• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifdef __FreeBSD__
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 361243 2020-05-19 07:23:35Z tuexen $");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #ifdef __FreeBSD__
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctputil.h>
52 #include <netinet/sctp_auth.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_indata.h>
56 #include <netinet/sctp_bsd_addr.h>
57 #include <netinet/sctp_input.h>
58 #include <netinet/sctp_crc32.h>
59 #if defined(__FreeBSD__)
60 #include <netinet/sctp_kdtrace.h>
61 #endif
62 #if defined(__Userspace_os_Linux)
63 #define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
64 #endif
65 #if defined(INET) || defined(INET6)
66 #if !defined(__Userspace_os_Windows)
67 #include <netinet/udp.h>
68 #endif
69 #endif
70 #if defined(__APPLE__)
71 #include <netinet/in.h>
72 #endif
73 #if defined(__FreeBSD__)
74 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
75 #include <netinet/udp_var.h>
76 #endif
77 #include <machine/in_cksum.h>
78 #endif
79 #if defined(__Userspace__) && defined(INET6)
80 #include <netinet6/sctp6_var.h>
81 #endif
82 
83 #if defined(__APPLE__)
84 #define APPLE_FILE_NO 3
85 #endif
86 
87 #if defined(__APPLE__)
88 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
89 #define SCTP_MAX_LINKHDR 16
90 #endif
91 #endif
92 
93 #define SCTP_MAX_GAPS_INARRAY 4
94 struct sack_track {
95 	uint8_t right_edge;	/* mergable on the right edge */
96 	uint8_t left_edge;	/* mergable on the left edge */
97 	uint8_t num_entries;
98 	uint8_t spare;
99 	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
100 };
101 
102 const struct sack_track sack_array[256] = {
103 	{0, 0, 0, 0,		/* 0x00 */
104 		{{0, 0},
105 		{0, 0},
106 		{0, 0},
107 		{0, 0}
108 		}
109 	},
110 	{1, 0, 1, 0,		/* 0x01 */
111 		{{0, 0},
112 		{0, 0},
113 		{0, 0},
114 		{0, 0}
115 		}
116 	},
117 	{0, 0, 1, 0,		/* 0x02 */
118 		{{1, 1},
119 		{0, 0},
120 		{0, 0},
121 		{0, 0}
122 		}
123 	},
124 	{1, 0, 1, 0,		/* 0x03 */
125 		{{0, 1},
126 		{0, 0},
127 		{0, 0},
128 		{0, 0}
129 		}
130 	},
131 	{0, 0, 1, 0,		/* 0x04 */
132 		{{2, 2},
133 		{0, 0},
134 		{0, 0},
135 		{0, 0}
136 		}
137 	},
138 	{1, 0, 2, 0,		/* 0x05 */
139 		{{0, 0},
140 		{2, 2},
141 		{0, 0},
142 		{0, 0}
143 		}
144 	},
145 	{0, 0, 1, 0,		/* 0x06 */
146 		{{1, 2},
147 		{0, 0},
148 		{0, 0},
149 		{0, 0}
150 		}
151 	},
152 	{1, 0, 1, 0,		/* 0x07 */
153 		{{0, 2},
154 		{0, 0},
155 		{0, 0},
156 		{0, 0}
157 		}
158 	},
159 	{0, 0, 1, 0,		/* 0x08 */
160 		{{3, 3},
161 		{0, 0},
162 		{0, 0},
163 		{0, 0}
164 		}
165 	},
166 	{1, 0, 2, 0,		/* 0x09 */
167 		{{0, 0},
168 		{3, 3},
169 		{0, 0},
170 		{0, 0}
171 		}
172 	},
173 	{0, 0, 2, 0,		/* 0x0a */
174 		{{1, 1},
175 		{3, 3},
176 		{0, 0},
177 		{0, 0}
178 		}
179 	},
180 	{1, 0, 2, 0,		/* 0x0b */
181 		{{0, 1},
182 		{3, 3},
183 		{0, 0},
184 		{0, 0}
185 		}
186 	},
187 	{0, 0, 1, 0,		/* 0x0c */
188 		{{2, 3},
189 		{0, 0},
190 		{0, 0},
191 		{0, 0}
192 		}
193 	},
194 	{1, 0, 2, 0,		/* 0x0d */
195 		{{0, 0},
196 		{2, 3},
197 		{0, 0},
198 		{0, 0}
199 		}
200 	},
201 	{0, 0, 1, 0,		/* 0x0e */
202 		{{1, 3},
203 		{0, 0},
204 		{0, 0},
205 		{0, 0}
206 		}
207 	},
208 	{1, 0, 1, 0,		/* 0x0f */
209 		{{0, 3},
210 		{0, 0},
211 		{0, 0},
212 		{0, 0}
213 		}
214 	},
215 	{0, 0, 1, 0,		/* 0x10 */
216 		{{4, 4},
217 		{0, 0},
218 		{0, 0},
219 		{0, 0}
220 		}
221 	},
222 	{1, 0, 2, 0,		/* 0x11 */
223 		{{0, 0},
224 		{4, 4},
225 		{0, 0},
226 		{0, 0}
227 		}
228 	},
229 	{0, 0, 2, 0,		/* 0x12 */
230 		{{1, 1},
231 		{4, 4},
232 		{0, 0},
233 		{0, 0}
234 		}
235 	},
236 	{1, 0, 2, 0,		/* 0x13 */
237 		{{0, 1},
238 		{4, 4},
239 		{0, 0},
240 		{0, 0}
241 		}
242 	},
243 	{0, 0, 2, 0,		/* 0x14 */
244 		{{2, 2},
245 		{4, 4},
246 		{0, 0},
247 		{0, 0}
248 		}
249 	},
250 	{1, 0, 3, 0,		/* 0x15 */
251 		{{0, 0},
252 		{2, 2},
253 		{4, 4},
254 		{0, 0}
255 		}
256 	},
257 	{0, 0, 2, 0,		/* 0x16 */
258 		{{1, 2},
259 		{4, 4},
260 		{0, 0},
261 		{0, 0}
262 		}
263 	},
264 	{1, 0, 2, 0,		/* 0x17 */
265 		{{0, 2},
266 		{4, 4},
267 		{0, 0},
268 		{0, 0}
269 		}
270 	},
271 	{0, 0, 1, 0,		/* 0x18 */
272 		{{3, 4},
273 		{0, 0},
274 		{0, 0},
275 		{0, 0}
276 		}
277 	},
278 	{1, 0, 2, 0,		/* 0x19 */
279 		{{0, 0},
280 		{3, 4},
281 		{0, 0},
282 		{0, 0}
283 		}
284 	},
285 	{0, 0, 2, 0,		/* 0x1a */
286 		{{1, 1},
287 		{3, 4},
288 		{0, 0},
289 		{0, 0}
290 		}
291 	},
292 	{1, 0, 2, 0,		/* 0x1b */
293 		{{0, 1},
294 		{3, 4},
295 		{0, 0},
296 		{0, 0}
297 		}
298 	},
299 	{0, 0, 1, 0,		/* 0x1c */
300 		{{2, 4},
301 		{0, 0},
302 		{0, 0},
303 		{0, 0}
304 		}
305 	},
306 	{1, 0, 2, 0,		/* 0x1d */
307 		{{0, 0},
308 		{2, 4},
309 		{0, 0},
310 		{0, 0}
311 		}
312 	},
313 	{0, 0, 1, 0,		/* 0x1e */
314 		{{1, 4},
315 		{0, 0},
316 		{0, 0},
317 		{0, 0}
318 		}
319 	},
320 	{1, 0, 1, 0,		/* 0x1f */
321 		{{0, 4},
322 		{0, 0},
323 		{0, 0},
324 		{0, 0}
325 		}
326 	},
327 	{0, 0, 1, 0,		/* 0x20 */
328 		{{5, 5},
329 		{0, 0},
330 		{0, 0},
331 		{0, 0}
332 		}
333 	},
334 	{1, 0, 2, 0,		/* 0x21 */
335 		{{0, 0},
336 		{5, 5},
337 		{0, 0},
338 		{0, 0}
339 		}
340 	},
341 	{0, 0, 2, 0,		/* 0x22 */
342 		{{1, 1},
343 		{5, 5},
344 		{0, 0},
345 		{0, 0}
346 		}
347 	},
348 	{1, 0, 2, 0,		/* 0x23 */
349 		{{0, 1},
350 		{5, 5},
351 		{0, 0},
352 		{0, 0}
353 		}
354 	},
355 	{0, 0, 2, 0,		/* 0x24 */
356 		{{2, 2},
357 		{5, 5},
358 		{0, 0},
359 		{0, 0}
360 		}
361 	},
362 	{1, 0, 3, 0,		/* 0x25 */
363 		{{0, 0},
364 		{2, 2},
365 		{5, 5},
366 		{0, 0}
367 		}
368 	},
369 	{0, 0, 2, 0,		/* 0x26 */
370 		{{1, 2},
371 		{5, 5},
372 		{0, 0},
373 		{0, 0}
374 		}
375 	},
376 	{1, 0, 2, 0,		/* 0x27 */
377 		{{0, 2},
378 		{5, 5},
379 		{0, 0},
380 		{0, 0}
381 		}
382 	},
383 	{0, 0, 2, 0,		/* 0x28 */
384 		{{3, 3},
385 		{5, 5},
386 		{0, 0},
387 		{0, 0}
388 		}
389 	},
390 	{1, 0, 3, 0,		/* 0x29 */
391 		{{0, 0},
392 		{3, 3},
393 		{5, 5},
394 		{0, 0}
395 		}
396 	},
397 	{0, 0, 3, 0,		/* 0x2a */
398 		{{1, 1},
399 		{3, 3},
400 		{5, 5},
401 		{0, 0}
402 		}
403 	},
404 	{1, 0, 3, 0,		/* 0x2b */
405 		{{0, 1},
406 		{3, 3},
407 		{5, 5},
408 		{0, 0}
409 		}
410 	},
411 	{0, 0, 2, 0,		/* 0x2c */
412 		{{2, 3},
413 		{5, 5},
414 		{0, 0},
415 		{0, 0}
416 		}
417 	},
418 	{1, 0, 3, 0,		/* 0x2d */
419 		{{0, 0},
420 		{2, 3},
421 		{5, 5},
422 		{0, 0}
423 		}
424 	},
425 	{0, 0, 2, 0,		/* 0x2e */
426 		{{1, 3},
427 		{5, 5},
428 		{0, 0},
429 		{0, 0}
430 		}
431 	},
432 	{1, 0, 2, 0,		/* 0x2f */
433 		{{0, 3},
434 		{5, 5},
435 		{0, 0},
436 		{0, 0}
437 		}
438 	},
439 	{0, 0, 1, 0,		/* 0x30 */
440 		{{4, 5},
441 		{0, 0},
442 		{0, 0},
443 		{0, 0}
444 		}
445 	},
446 	{1, 0, 2, 0,		/* 0x31 */
447 		{{0, 0},
448 		{4, 5},
449 		{0, 0},
450 		{0, 0}
451 		}
452 	},
453 	{0, 0, 2, 0,		/* 0x32 */
454 		{{1, 1},
455 		{4, 5},
456 		{0, 0},
457 		{0, 0}
458 		}
459 	},
460 	{1, 0, 2, 0,		/* 0x33 */
461 		{{0, 1},
462 		{4, 5},
463 		{0, 0},
464 		{0, 0}
465 		}
466 	},
467 	{0, 0, 2, 0,		/* 0x34 */
468 		{{2, 2},
469 		{4, 5},
470 		{0, 0},
471 		{0, 0}
472 		}
473 	},
474 	{1, 0, 3, 0,		/* 0x35 */
475 		{{0, 0},
476 		{2, 2},
477 		{4, 5},
478 		{0, 0}
479 		}
480 	},
481 	{0, 0, 2, 0,		/* 0x36 */
482 		{{1, 2},
483 		{4, 5},
484 		{0, 0},
485 		{0, 0}
486 		}
487 	},
488 	{1, 0, 2, 0,		/* 0x37 */
489 		{{0, 2},
490 		{4, 5},
491 		{0, 0},
492 		{0, 0}
493 		}
494 	},
495 	{0, 0, 1, 0,		/* 0x38 */
496 		{{3, 5},
497 		{0, 0},
498 		{0, 0},
499 		{0, 0}
500 		}
501 	},
502 	{1, 0, 2, 0,		/* 0x39 */
503 		{{0, 0},
504 		{3, 5},
505 		{0, 0},
506 		{0, 0}
507 		}
508 	},
509 	{0, 0, 2, 0,		/* 0x3a */
510 		{{1, 1},
511 		{3, 5},
512 		{0, 0},
513 		{0, 0}
514 		}
515 	},
516 	{1, 0, 2, 0,		/* 0x3b */
517 		{{0, 1},
518 		{3, 5},
519 		{0, 0},
520 		{0, 0}
521 		}
522 	},
523 	{0, 0, 1, 0,		/* 0x3c */
524 		{{2, 5},
525 		{0, 0},
526 		{0, 0},
527 		{0, 0}
528 		}
529 	},
530 	{1, 0, 2, 0,		/* 0x3d */
531 		{{0, 0},
532 		{2, 5},
533 		{0, 0},
534 		{0, 0}
535 		}
536 	},
537 	{0, 0, 1, 0,		/* 0x3e */
538 		{{1, 5},
539 		{0, 0},
540 		{0, 0},
541 		{0, 0}
542 		}
543 	},
544 	{1, 0, 1, 0,		/* 0x3f */
545 		{{0, 5},
546 		{0, 0},
547 		{0, 0},
548 		{0, 0}
549 		}
550 	},
551 	{0, 0, 1, 0,		/* 0x40 */
552 		{{6, 6},
553 		{0, 0},
554 		{0, 0},
555 		{0, 0}
556 		}
557 	},
558 	{1, 0, 2, 0,		/* 0x41 */
559 		{{0, 0},
560 		{6, 6},
561 		{0, 0},
562 		{0, 0}
563 		}
564 	},
565 	{0, 0, 2, 0,		/* 0x42 */
566 		{{1, 1},
567 		{6, 6},
568 		{0, 0},
569 		{0, 0}
570 		}
571 	},
572 	{1, 0, 2, 0,		/* 0x43 */
573 		{{0, 1},
574 		{6, 6},
575 		{0, 0},
576 		{0, 0}
577 		}
578 	},
579 	{0, 0, 2, 0,		/* 0x44 */
580 		{{2, 2},
581 		{6, 6},
582 		{0, 0},
583 		{0, 0}
584 		}
585 	},
586 	{1, 0, 3, 0,		/* 0x45 */
587 		{{0, 0},
588 		{2, 2},
589 		{6, 6},
590 		{0, 0}
591 		}
592 	},
593 	{0, 0, 2, 0,		/* 0x46 */
594 		{{1, 2},
595 		{6, 6},
596 		{0, 0},
597 		{0, 0}
598 		}
599 	},
600 	{1, 0, 2, 0,		/* 0x47 */
601 		{{0, 2},
602 		{6, 6},
603 		{0, 0},
604 		{0, 0}
605 		}
606 	},
607 	{0, 0, 2, 0,		/* 0x48 */
608 		{{3, 3},
609 		{6, 6},
610 		{0, 0},
611 		{0, 0}
612 		}
613 	},
614 	{1, 0, 3, 0,		/* 0x49 */
615 		{{0, 0},
616 		{3, 3},
617 		{6, 6},
618 		{0, 0}
619 		}
620 	},
621 	{0, 0, 3, 0,		/* 0x4a */
622 		{{1, 1},
623 		{3, 3},
624 		{6, 6},
625 		{0, 0}
626 		}
627 	},
628 	{1, 0, 3, 0,		/* 0x4b */
629 		{{0, 1},
630 		{3, 3},
631 		{6, 6},
632 		{0, 0}
633 		}
634 	},
635 	{0, 0, 2, 0,		/* 0x4c */
636 		{{2, 3},
637 		{6, 6},
638 		{0, 0},
639 		{0, 0}
640 		}
641 	},
642 	{1, 0, 3, 0,		/* 0x4d */
643 		{{0, 0},
644 		{2, 3},
645 		{6, 6},
646 		{0, 0}
647 		}
648 	},
649 	{0, 0, 2, 0,		/* 0x4e */
650 		{{1, 3},
651 		{6, 6},
652 		{0, 0},
653 		{0, 0}
654 		}
655 	},
656 	{1, 0, 2, 0,		/* 0x4f */
657 		{{0, 3},
658 		{6, 6},
659 		{0, 0},
660 		{0, 0}
661 		}
662 	},
663 	{0, 0, 2, 0,		/* 0x50 */
664 		{{4, 4},
665 		{6, 6},
666 		{0, 0},
667 		{0, 0}
668 		}
669 	},
670 	{1, 0, 3, 0,		/* 0x51 */
671 		{{0, 0},
672 		{4, 4},
673 		{6, 6},
674 		{0, 0}
675 		}
676 	},
677 	{0, 0, 3, 0,		/* 0x52 */
678 		{{1, 1},
679 		{4, 4},
680 		{6, 6},
681 		{0, 0}
682 		}
683 	},
684 	{1, 0, 3, 0,		/* 0x53 */
685 		{{0, 1},
686 		{4, 4},
687 		{6, 6},
688 		{0, 0}
689 		}
690 	},
691 	{0, 0, 3, 0,		/* 0x54 */
692 		{{2, 2},
693 		{4, 4},
694 		{6, 6},
695 		{0, 0}
696 		}
697 	},
698 	{1, 0, 4, 0,		/* 0x55 */
699 		{{0, 0},
700 		{2, 2},
701 		{4, 4},
702 		{6, 6}
703 		}
704 	},
705 	{0, 0, 3, 0,		/* 0x56 */
706 		{{1, 2},
707 		{4, 4},
708 		{6, 6},
709 		{0, 0}
710 		}
711 	},
712 	{1, 0, 3, 0,		/* 0x57 */
713 		{{0, 2},
714 		{4, 4},
715 		{6, 6},
716 		{0, 0}
717 		}
718 	},
719 	{0, 0, 2, 0,		/* 0x58 */
720 		{{3, 4},
721 		{6, 6},
722 		{0, 0},
723 		{0, 0}
724 		}
725 	},
726 	{1, 0, 3, 0,		/* 0x59 */
727 		{{0, 0},
728 		{3, 4},
729 		{6, 6},
730 		{0, 0}
731 		}
732 	},
733 	{0, 0, 3, 0,		/* 0x5a */
734 		{{1, 1},
735 		{3, 4},
736 		{6, 6},
737 		{0, 0}
738 		}
739 	},
740 	{1, 0, 3, 0,		/* 0x5b */
741 		{{0, 1},
742 		{3, 4},
743 		{6, 6},
744 		{0, 0}
745 		}
746 	},
747 	{0, 0, 2, 0,		/* 0x5c */
748 		{{2, 4},
749 		{6, 6},
750 		{0, 0},
751 		{0, 0}
752 		}
753 	},
754 	{1, 0, 3, 0,		/* 0x5d */
755 		{{0, 0},
756 		{2, 4},
757 		{6, 6},
758 		{0, 0}
759 		}
760 	},
761 	{0, 0, 2, 0,		/* 0x5e */
762 		{{1, 4},
763 		{6, 6},
764 		{0, 0},
765 		{0, 0}
766 		}
767 	},
768 	{1, 0, 2, 0,		/* 0x5f */
769 		{{0, 4},
770 		{6, 6},
771 		{0, 0},
772 		{0, 0}
773 		}
774 	},
775 	{0, 0, 1, 0,		/* 0x60 */
776 		{{5, 6},
777 		{0, 0},
778 		{0, 0},
779 		{0, 0}
780 		}
781 	},
782 	{1, 0, 2, 0,		/* 0x61 */
783 		{{0, 0},
784 		{5, 6},
785 		{0, 0},
786 		{0, 0}
787 		}
788 	},
789 	{0, 0, 2, 0,		/* 0x62 */
790 		{{1, 1},
791 		{5, 6},
792 		{0, 0},
793 		{0, 0}
794 		}
795 	},
796 	{1, 0, 2, 0,		/* 0x63 */
797 		{{0, 1},
798 		{5, 6},
799 		{0, 0},
800 		{0, 0}
801 		}
802 	},
803 	{0, 0, 2, 0,		/* 0x64 */
804 		{{2, 2},
805 		{5, 6},
806 		{0, 0},
807 		{0, 0}
808 		}
809 	},
810 	{1, 0, 3, 0,		/* 0x65 */
811 		{{0, 0},
812 		{2, 2},
813 		{5, 6},
814 		{0, 0}
815 		}
816 	},
817 	{0, 0, 2, 0,		/* 0x66 */
818 		{{1, 2},
819 		{5, 6},
820 		{0, 0},
821 		{0, 0}
822 		}
823 	},
824 	{1, 0, 2, 0,		/* 0x67 */
825 		{{0, 2},
826 		{5, 6},
827 		{0, 0},
828 		{0, 0}
829 		}
830 	},
831 	{0, 0, 2, 0,		/* 0x68 */
832 		{{3, 3},
833 		{5, 6},
834 		{0, 0},
835 		{0, 0}
836 		}
837 	},
838 	{1, 0, 3, 0,		/* 0x69 */
839 		{{0, 0},
840 		{3, 3},
841 		{5, 6},
842 		{0, 0}
843 		}
844 	},
845 	{0, 0, 3, 0,		/* 0x6a */
846 		{{1, 1},
847 		{3, 3},
848 		{5, 6},
849 		{0, 0}
850 		}
851 	},
852 	{1, 0, 3, 0,		/* 0x6b */
853 		{{0, 1},
854 		{3, 3},
855 		{5, 6},
856 		{0, 0}
857 		}
858 	},
859 	{0, 0, 2, 0,		/* 0x6c */
860 		{{2, 3},
861 		{5, 6},
862 		{0, 0},
863 		{0, 0}
864 		}
865 	},
866 	{1, 0, 3, 0,		/* 0x6d */
867 		{{0, 0},
868 		{2, 3},
869 		{5, 6},
870 		{0, 0}
871 		}
872 	},
873 	{0, 0, 2, 0,		/* 0x6e */
874 		{{1, 3},
875 		{5, 6},
876 		{0, 0},
877 		{0, 0}
878 		}
879 	},
880 	{1, 0, 2, 0,		/* 0x6f */
881 		{{0, 3},
882 		{5, 6},
883 		{0, 0},
884 		{0, 0}
885 		}
886 	},
887 	{0, 0, 1, 0,		/* 0x70 */
888 		{{4, 6},
889 		{0, 0},
890 		{0, 0},
891 		{0, 0}
892 		}
893 	},
894 	{1, 0, 2, 0,		/* 0x71 */
895 		{{0, 0},
896 		{4, 6},
897 		{0, 0},
898 		{0, 0}
899 		}
900 	},
901 	{0, 0, 2, 0,		/* 0x72 */
902 		{{1, 1},
903 		{4, 6},
904 		{0, 0},
905 		{0, 0}
906 		}
907 	},
908 	{1, 0, 2, 0,		/* 0x73 */
909 		{{0, 1},
910 		{4, 6},
911 		{0, 0},
912 		{0, 0}
913 		}
914 	},
915 	{0, 0, 2, 0,		/* 0x74 */
916 		{{2, 2},
917 		{4, 6},
918 		{0, 0},
919 		{0, 0}
920 		}
921 	},
922 	{1, 0, 3, 0,		/* 0x75 */
923 		{{0, 0},
924 		{2, 2},
925 		{4, 6},
926 		{0, 0}
927 		}
928 	},
929 	{0, 0, 2, 0,		/* 0x76 */
930 		{{1, 2},
931 		{4, 6},
932 		{0, 0},
933 		{0, 0}
934 		}
935 	},
936 	{1, 0, 2, 0,		/* 0x77 */
937 		{{0, 2},
938 		{4, 6},
939 		{0, 0},
940 		{0, 0}
941 		}
942 	},
943 	{0, 0, 1, 0,		/* 0x78 */
944 		{{3, 6},
945 		{0, 0},
946 		{0, 0},
947 		{0, 0}
948 		}
949 	},
950 	{1, 0, 2, 0,		/* 0x79 */
951 		{{0, 0},
952 		{3, 6},
953 		{0, 0},
954 		{0, 0}
955 		}
956 	},
957 	{0, 0, 2, 0,		/* 0x7a */
958 		{{1, 1},
959 		{3, 6},
960 		{0, 0},
961 		{0, 0}
962 		}
963 	},
964 	{1, 0, 2, 0,		/* 0x7b */
965 		{{0, 1},
966 		{3, 6},
967 		{0, 0},
968 		{0, 0}
969 		}
970 	},
971 	{0, 0, 1, 0,		/* 0x7c */
972 		{{2, 6},
973 		{0, 0},
974 		{0, 0},
975 		{0, 0}
976 		}
977 	},
978 	{1, 0, 2, 0,		/* 0x7d */
979 		{{0, 0},
980 		{2, 6},
981 		{0, 0},
982 		{0, 0}
983 		}
984 	},
985 	{0, 0, 1, 0,		/* 0x7e */
986 		{{1, 6},
987 		{0, 0},
988 		{0, 0},
989 		{0, 0}
990 		}
991 	},
992 	{1, 0, 1, 0,		/* 0x7f */
993 		{{0, 6},
994 		{0, 0},
995 		{0, 0},
996 		{0, 0}
997 		}
998 	},
999 	{0, 1, 1, 0,		/* 0x80 */
1000 		{{7, 7},
1001 		{0, 0},
1002 		{0, 0},
1003 		{0, 0}
1004 		}
1005 	},
1006 	{1, 1, 2, 0,		/* 0x81 */
1007 		{{0, 0},
1008 		{7, 7},
1009 		{0, 0},
1010 		{0, 0}
1011 		}
1012 	},
1013 	{0, 1, 2, 0,		/* 0x82 */
1014 		{{1, 1},
1015 		{7, 7},
1016 		{0, 0},
1017 		{0, 0}
1018 		}
1019 	},
1020 	{1, 1, 2, 0,		/* 0x83 */
1021 		{{0, 1},
1022 		{7, 7},
1023 		{0, 0},
1024 		{0, 0}
1025 		}
1026 	},
1027 	{0, 1, 2, 0,		/* 0x84 */
1028 		{{2, 2},
1029 		{7, 7},
1030 		{0, 0},
1031 		{0, 0}
1032 		}
1033 	},
1034 	{1, 1, 3, 0,		/* 0x85 */
1035 		{{0, 0},
1036 		{2, 2},
1037 		{7, 7},
1038 		{0, 0}
1039 		}
1040 	},
1041 	{0, 1, 2, 0,		/* 0x86 */
1042 		{{1, 2},
1043 		{7, 7},
1044 		{0, 0},
1045 		{0, 0}
1046 		}
1047 	},
1048 	{1, 1, 2, 0,		/* 0x87 */
1049 		{{0, 2},
1050 		{7, 7},
1051 		{0, 0},
1052 		{0, 0}
1053 		}
1054 	},
1055 	{0, 1, 2, 0,		/* 0x88 */
1056 		{{3, 3},
1057 		{7, 7},
1058 		{0, 0},
1059 		{0, 0}
1060 		}
1061 	},
1062 	{1, 1, 3, 0,		/* 0x89 */
1063 		{{0, 0},
1064 		{3, 3},
1065 		{7, 7},
1066 		{0, 0}
1067 		}
1068 	},
1069 	{0, 1, 3, 0,		/* 0x8a */
1070 		{{1, 1},
1071 		{3, 3},
1072 		{7, 7},
1073 		{0, 0}
1074 		}
1075 	},
1076 	{1, 1, 3, 0,		/* 0x8b */
1077 		{{0, 1},
1078 		{3, 3},
1079 		{7, 7},
1080 		{0, 0}
1081 		}
1082 	},
1083 	{0, 1, 2, 0,		/* 0x8c */
1084 		{{2, 3},
1085 		{7, 7},
1086 		{0, 0},
1087 		{0, 0}
1088 		}
1089 	},
1090 	{1, 1, 3, 0,		/* 0x8d */
1091 		{{0, 0},
1092 		{2, 3},
1093 		{7, 7},
1094 		{0, 0}
1095 		}
1096 	},
1097 	{0, 1, 2, 0,		/* 0x8e */
1098 		{{1, 3},
1099 		{7, 7},
1100 		{0, 0},
1101 		{0, 0}
1102 		}
1103 	},
1104 	{1, 1, 2, 0,		/* 0x8f */
1105 		{{0, 3},
1106 		{7, 7},
1107 		{0, 0},
1108 		{0, 0}
1109 		}
1110 	},
1111 	{0, 1, 2, 0,		/* 0x90 */
1112 		{{4, 4},
1113 		{7, 7},
1114 		{0, 0},
1115 		{0, 0}
1116 		}
1117 	},
1118 	{1, 1, 3, 0,		/* 0x91 */
1119 		{{0, 0},
1120 		{4, 4},
1121 		{7, 7},
1122 		{0, 0}
1123 		}
1124 	},
1125 	{0, 1, 3, 0,		/* 0x92 */
1126 		{{1, 1},
1127 		{4, 4},
1128 		{7, 7},
1129 		{0, 0}
1130 		}
1131 	},
1132 	{1, 1, 3, 0,		/* 0x93 */
1133 		{{0, 1},
1134 		{4, 4},
1135 		{7, 7},
1136 		{0, 0}
1137 		}
1138 	},
1139 	{0, 1, 3, 0,		/* 0x94 */
1140 		{{2, 2},
1141 		{4, 4},
1142 		{7, 7},
1143 		{0, 0}
1144 		}
1145 	},
1146 	{1, 1, 4, 0,		/* 0x95 */
1147 		{{0, 0},
1148 		{2, 2},
1149 		{4, 4},
1150 		{7, 7}
1151 		}
1152 	},
1153 	{0, 1, 3, 0,		/* 0x96 */
1154 		{{1, 2},
1155 		{4, 4},
1156 		{7, 7},
1157 		{0, 0}
1158 		}
1159 	},
1160 	{1, 1, 3, 0,		/* 0x97 */
1161 		{{0, 2},
1162 		{4, 4},
1163 		{7, 7},
1164 		{0, 0}
1165 		}
1166 	},
1167 	{0, 1, 2, 0,		/* 0x98 */
1168 		{{3, 4},
1169 		{7, 7},
1170 		{0, 0},
1171 		{0, 0}
1172 		}
1173 	},
1174 	{1, 1, 3, 0,		/* 0x99 */
1175 		{{0, 0},
1176 		{3, 4},
1177 		{7, 7},
1178 		{0, 0}
1179 		}
1180 	},
1181 	{0, 1, 3, 0,		/* 0x9a */
1182 		{{1, 1},
1183 		{3, 4},
1184 		{7, 7},
1185 		{0, 0}
1186 		}
1187 	},
1188 	{1, 1, 3, 0,		/* 0x9b */
1189 		{{0, 1},
1190 		{3, 4},
1191 		{7, 7},
1192 		{0, 0}
1193 		}
1194 	},
1195 	{0, 1, 2, 0,		/* 0x9c */
1196 		{{2, 4},
1197 		{7, 7},
1198 		{0, 0},
1199 		{0, 0}
1200 		}
1201 	},
1202 	{1, 1, 3, 0,		/* 0x9d */
1203 		{{0, 0},
1204 		{2, 4},
1205 		{7, 7},
1206 		{0, 0}
1207 		}
1208 	},
1209 	{0, 1, 2, 0,		/* 0x9e */
1210 		{{1, 4},
1211 		{7, 7},
1212 		{0, 0},
1213 		{0, 0}
1214 		}
1215 	},
1216 	{1, 1, 2, 0,		/* 0x9f */
1217 		{{0, 4},
1218 		{7, 7},
1219 		{0, 0},
1220 		{0, 0}
1221 		}
1222 	},
1223 	{0, 1, 2, 0,		/* 0xa0 */
1224 		{{5, 5},
1225 		{7, 7},
1226 		{0, 0},
1227 		{0, 0}
1228 		}
1229 	},
1230 	{1, 1, 3, 0,		/* 0xa1 */
1231 		{{0, 0},
1232 		{5, 5},
1233 		{7, 7},
1234 		{0, 0}
1235 		}
1236 	},
1237 	{0, 1, 3, 0,		/* 0xa2 */
1238 		{{1, 1},
1239 		{5, 5},
1240 		{7, 7},
1241 		{0, 0}
1242 		}
1243 	},
1244 	{1, 1, 3, 0,		/* 0xa3 */
1245 		{{0, 1},
1246 		{5, 5},
1247 		{7, 7},
1248 		{0, 0}
1249 		}
1250 	},
1251 	{0, 1, 3, 0,		/* 0xa4 */
1252 		{{2, 2},
1253 		{5, 5},
1254 		{7, 7},
1255 		{0, 0}
1256 		}
1257 	},
1258 	{1, 1, 4, 0,		/* 0xa5 */
1259 		{{0, 0},
1260 		{2, 2},
1261 		{5, 5},
1262 		{7, 7}
1263 		}
1264 	},
1265 	{0, 1, 3, 0,		/* 0xa6 */
1266 		{{1, 2},
1267 		{5, 5},
1268 		{7, 7},
1269 		{0, 0}
1270 		}
1271 	},
1272 	{1, 1, 3, 0,		/* 0xa7 */
1273 		{{0, 2},
1274 		{5, 5},
1275 		{7, 7},
1276 		{0, 0}
1277 		}
1278 	},
1279 	{0, 1, 3, 0,		/* 0xa8 */
1280 		{{3, 3},
1281 		{5, 5},
1282 		{7, 7},
1283 		{0, 0}
1284 		}
1285 	},
1286 	{1, 1, 4, 0,		/* 0xa9 */
1287 		{{0, 0},
1288 		{3, 3},
1289 		{5, 5},
1290 		{7, 7}
1291 		}
1292 	},
1293 	{0, 1, 4, 0,		/* 0xaa */
1294 		{{1, 1},
1295 		{3, 3},
1296 		{5, 5},
1297 		{7, 7}
1298 		}
1299 	},
1300 	{1, 1, 4, 0,		/* 0xab */
1301 		{{0, 1},
1302 		{3, 3},
1303 		{5, 5},
1304 		{7, 7}
1305 		}
1306 	},
1307 	{0, 1, 3, 0,		/* 0xac */
1308 		{{2, 3},
1309 		{5, 5},
1310 		{7, 7},
1311 		{0, 0}
1312 		}
1313 	},
1314 	{1, 1, 4, 0,		/* 0xad */
1315 		{{0, 0},
1316 		{2, 3},
1317 		{5, 5},
1318 		{7, 7}
1319 		}
1320 	},
1321 	{0, 1, 3, 0,		/* 0xae */
1322 		{{1, 3},
1323 		{5, 5},
1324 		{7, 7},
1325 		{0, 0}
1326 		}
1327 	},
1328 	{1, 1, 3, 0,		/* 0xaf */
1329 		{{0, 3},
1330 		{5, 5},
1331 		{7, 7},
1332 		{0, 0}
1333 		}
1334 	},
1335 	{0, 1, 2, 0,		/* 0xb0 */
1336 		{{4, 5},
1337 		{7, 7},
1338 		{0, 0},
1339 		{0, 0}
1340 		}
1341 	},
1342 	{1, 1, 3, 0,		/* 0xb1 */
1343 		{{0, 0},
1344 		{4, 5},
1345 		{7, 7},
1346 		{0, 0}
1347 		}
1348 	},
1349 	{0, 1, 3, 0,		/* 0xb2 */
1350 		{{1, 1},
1351 		{4, 5},
1352 		{7, 7},
1353 		{0, 0}
1354 		}
1355 	},
1356 	{1, 1, 3, 0,		/* 0xb3 */
1357 		{{0, 1},
1358 		{4, 5},
1359 		{7, 7},
1360 		{0, 0}
1361 		}
1362 	},
1363 	{0, 1, 3, 0,		/* 0xb4 */
1364 		{{2, 2},
1365 		{4, 5},
1366 		{7, 7},
1367 		{0, 0}
1368 		}
1369 	},
1370 	{1, 1, 4, 0,		/* 0xb5 */
1371 		{{0, 0},
1372 		{2, 2},
1373 		{4, 5},
1374 		{7, 7}
1375 		}
1376 	},
1377 	{0, 1, 3, 0,		/* 0xb6 */
1378 		{{1, 2},
1379 		{4, 5},
1380 		{7, 7},
1381 		{0, 0}
1382 		}
1383 	},
1384 	{1, 1, 3, 0,		/* 0xb7 */
1385 		{{0, 2},
1386 		{4, 5},
1387 		{7, 7},
1388 		{0, 0}
1389 		}
1390 	},
1391 	{0, 1, 2, 0,		/* 0xb8 */
1392 		{{3, 5},
1393 		{7, 7},
1394 		{0, 0},
1395 		{0, 0}
1396 		}
1397 	},
1398 	{1, 1, 3, 0,		/* 0xb9 */
1399 		{{0, 0},
1400 		{3, 5},
1401 		{7, 7},
1402 		{0, 0}
1403 		}
1404 	},
1405 	{0, 1, 3, 0,		/* 0xba */
1406 		{{1, 1},
1407 		{3, 5},
1408 		{7, 7},
1409 		{0, 0}
1410 		}
1411 	},
1412 	{1, 1, 3, 0,		/* 0xbb */
1413 		{{0, 1},
1414 		{3, 5},
1415 		{7, 7},
1416 		{0, 0}
1417 		}
1418 	},
1419 	{0, 1, 2, 0,		/* 0xbc */
1420 		{{2, 5},
1421 		{7, 7},
1422 		{0, 0},
1423 		{0, 0}
1424 		}
1425 	},
1426 	{1, 1, 3, 0,		/* 0xbd */
1427 		{{0, 0},
1428 		{2, 5},
1429 		{7, 7},
1430 		{0, 0}
1431 		}
1432 	},
1433 	{0, 1, 2, 0,		/* 0xbe */
1434 		{{1, 5},
1435 		{7, 7},
1436 		{0, 0},
1437 		{0, 0}
1438 		}
1439 	},
1440 	{1, 1, 2, 0,		/* 0xbf */
1441 		{{0, 5},
1442 		{7, 7},
1443 		{0, 0},
1444 		{0, 0}
1445 		}
1446 	},
1447 	{0, 1, 1, 0,		/* 0xc0 */
1448 		{{6, 7},
1449 		{0, 0},
1450 		{0, 0},
1451 		{0, 0}
1452 		}
1453 	},
1454 	{1, 1, 2, 0,		/* 0xc1 */
1455 		{{0, 0},
1456 		{6, 7},
1457 		{0, 0},
1458 		{0, 0}
1459 		}
1460 	},
1461 	{0, 1, 2, 0,		/* 0xc2 */
1462 		{{1, 1},
1463 		{6, 7},
1464 		{0, 0},
1465 		{0, 0}
1466 		}
1467 	},
1468 	{1, 1, 2, 0,		/* 0xc3 */
1469 		{{0, 1},
1470 		{6, 7},
1471 		{0, 0},
1472 		{0, 0}
1473 		}
1474 	},
1475 	{0, 1, 2, 0,		/* 0xc4 */
1476 		{{2, 2},
1477 		{6, 7},
1478 		{0, 0},
1479 		{0, 0}
1480 		}
1481 	},
1482 	{1, 1, 3, 0,		/* 0xc5 */
1483 		{{0, 0},
1484 		{2, 2},
1485 		{6, 7},
1486 		{0, 0}
1487 		}
1488 	},
1489 	{0, 1, 2, 0,		/* 0xc6 */
1490 		{{1, 2},
1491 		{6, 7},
1492 		{0, 0},
1493 		{0, 0}
1494 		}
1495 	},
1496 	{1, 1, 2, 0,		/* 0xc7 */
1497 		{{0, 2},
1498 		{6, 7},
1499 		{0, 0},
1500 		{0, 0}
1501 		}
1502 	},
1503 	{0, 1, 2, 0,		/* 0xc8 */
1504 		{{3, 3},
1505 		{6, 7},
1506 		{0, 0},
1507 		{0, 0}
1508 		}
1509 	},
1510 	{1, 1, 3, 0,		/* 0xc9 */
1511 		{{0, 0},
1512 		{3, 3},
1513 		{6, 7},
1514 		{0, 0}
1515 		}
1516 	},
1517 	{0, 1, 3, 0,		/* 0xca */
1518 		{{1, 1},
1519 		{3, 3},
1520 		{6, 7},
1521 		{0, 0}
1522 		}
1523 	},
1524 	{1, 1, 3, 0,		/* 0xcb */
1525 		{{0, 1},
1526 		{3, 3},
1527 		{6, 7},
1528 		{0, 0}
1529 		}
1530 	},
1531 	{0, 1, 2, 0,		/* 0xcc */
1532 		{{2, 3},
1533 		{6, 7},
1534 		{0, 0},
1535 		{0, 0}
1536 		}
1537 	},
1538 	{1, 1, 3, 0,		/* 0xcd */
1539 		{{0, 0},
1540 		{2, 3},
1541 		{6, 7},
1542 		{0, 0}
1543 		}
1544 	},
1545 	{0, 1, 2, 0,		/* 0xce */
1546 		{{1, 3},
1547 		{6, 7},
1548 		{0, 0},
1549 		{0, 0}
1550 		}
1551 	},
1552 	{1, 1, 2, 0,		/* 0xcf */
1553 		{{0, 3},
1554 		{6, 7},
1555 		{0, 0},
1556 		{0, 0}
1557 		}
1558 	},
1559 	{0, 1, 2, 0,		/* 0xd0 */
1560 		{{4, 4},
1561 		{6, 7},
1562 		{0, 0},
1563 		{0, 0}
1564 		}
1565 	},
1566 	{1, 1, 3, 0,		/* 0xd1 */
1567 		{{0, 0},
1568 		{4, 4},
1569 		{6, 7},
1570 		{0, 0}
1571 		}
1572 	},
1573 	{0, 1, 3, 0,		/* 0xd2 */
1574 		{{1, 1},
1575 		{4, 4},
1576 		{6, 7},
1577 		{0, 0}
1578 		}
1579 	},
1580 	{1, 1, 3, 0,		/* 0xd3 */
1581 		{{0, 1},
1582 		{4, 4},
1583 		{6, 7},
1584 		{0, 0}
1585 		}
1586 	},
1587 	{0, 1, 3, 0,		/* 0xd4 */
1588 		{{2, 2},
1589 		{4, 4},
1590 		{6, 7},
1591 		{0, 0}
1592 		}
1593 	},
1594 	{1, 1, 4, 0,		/* 0xd5 */
1595 		{{0, 0},
1596 		{2, 2},
1597 		{4, 4},
1598 		{6, 7}
1599 		}
1600 	},
1601 	{0, 1, 3, 0,		/* 0xd6 */
1602 		{{1, 2},
1603 		{4, 4},
1604 		{6, 7},
1605 		{0, 0}
1606 		}
1607 	},
1608 	{1, 1, 3, 0,		/* 0xd7 */
1609 		{{0, 2},
1610 		{4, 4},
1611 		{6, 7},
1612 		{0, 0}
1613 		}
1614 	},
1615 	{0, 1, 2, 0,		/* 0xd8 */
1616 		{{3, 4},
1617 		{6, 7},
1618 		{0, 0},
1619 		{0, 0}
1620 		}
1621 	},
1622 	{1, 1, 3, 0,		/* 0xd9 */
1623 		{{0, 0},
1624 		{3, 4},
1625 		{6, 7},
1626 		{0, 0}
1627 		}
1628 	},
1629 	{0, 1, 3, 0,		/* 0xda */
1630 		{{1, 1},
1631 		{3, 4},
1632 		{6, 7},
1633 		{0, 0}
1634 		}
1635 	},
1636 	{1, 1, 3, 0,		/* 0xdb */
1637 		{{0, 1},
1638 		{3, 4},
1639 		{6, 7},
1640 		{0, 0}
1641 		}
1642 	},
1643 	{0, 1, 2, 0,		/* 0xdc */
1644 		{{2, 4},
1645 		{6, 7},
1646 		{0, 0},
1647 		{0, 0}
1648 		}
1649 	},
1650 	{1, 1, 3, 0,		/* 0xdd */
1651 		{{0, 0},
1652 		{2, 4},
1653 		{6, 7},
1654 		{0, 0}
1655 		}
1656 	},
1657 	{0, 1, 2, 0,		/* 0xde */
1658 		{{1, 4},
1659 		{6, 7},
1660 		{0, 0},
1661 		{0, 0}
1662 		}
1663 	},
1664 	{1, 1, 2, 0,		/* 0xdf */
1665 		{{0, 4},
1666 		{6, 7},
1667 		{0, 0},
1668 		{0, 0}
1669 		}
1670 	},
1671 	{0, 1, 1, 0,		/* 0xe0 */
1672 		{{5, 7},
1673 		{0, 0},
1674 		{0, 0},
1675 		{0, 0}
1676 		}
1677 	},
1678 	{1, 1, 2, 0,		/* 0xe1 */
1679 		{{0, 0},
1680 		{5, 7},
1681 		{0, 0},
1682 		{0, 0}
1683 		}
1684 	},
1685 	{0, 1, 2, 0,		/* 0xe2 */
1686 		{{1, 1},
1687 		{5, 7},
1688 		{0, 0},
1689 		{0, 0}
1690 		}
1691 	},
1692 	{1, 1, 2, 0,		/* 0xe3 */
1693 		{{0, 1},
1694 		{5, 7},
1695 		{0, 0},
1696 		{0, 0}
1697 		}
1698 	},
1699 	{0, 1, 2, 0,		/* 0xe4 */
1700 		{{2, 2},
1701 		{5, 7},
1702 		{0, 0},
1703 		{0, 0}
1704 		}
1705 	},
1706 	{1, 1, 3, 0,		/* 0xe5 */
1707 		{{0, 0},
1708 		{2, 2},
1709 		{5, 7},
1710 		{0, 0}
1711 		}
1712 	},
1713 	{0, 1, 2, 0,		/* 0xe6 */
1714 		{{1, 2},
1715 		{5, 7},
1716 		{0, 0},
1717 		{0, 0}
1718 		}
1719 	},
1720 	{1, 1, 2, 0,		/* 0xe7 */
1721 		{{0, 2},
1722 		{5, 7},
1723 		{0, 0},
1724 		{0, 0}
1725 		}
1726 	},
1727 	{0, 1, 2, 0,		/* 0xe8 */
1728 		{{3, 3},
1729 		{5, 7},
1730 		{0, 0},
1731 		{0, 0}
1732 		}
1733 	},
1734 	{1, 1, 3, 0,		/* 0xe9 */
1735 		{{0, 0},
1736 		{3, 3},
1737 		{5, 7},
1738 		{0, 0}
1739 		}
1740 	},
1741 	{0, 1, 3, 0,		/* 0xea */
1742 		{{1, 1},
1743 		{3, 3},
1744 		{5, 7},
1745 		{0, 0}
1746 		}
1747 	},
1748 	{1, 1, 3, 0,		/* 0xeb */
1749 		{{0, 1},
1750 		{3, 3},
1751 		{5, 7},
1752 		{0, 0}
1753 		}
1754 	},
1755 	{0, 1, 2, 0,		/* 0xec */
1756 		{{2, 3},
1757 		{5, 7},
1758 		{0, 0},
1759 		{0, 0}
1760 		}
1761 	},
1762 	{1, 1, 3, 0,		/* 0xed */
1763 		{{0, 0},
1764 		{2, 3},
1765 		{5, 7},
1766 		{0, 0}
1767 		}
1768 	},
1769 	{0, 1, 2, 0,		/* 0xee */
1770 		{{1, 3},
1771 		{5, 7},
1772 		{0, 0},
1773 		{0, 0}
1774 		}
1775 	},
1776 	{1, 1, 2, 0,		/* 0xef */
1777 		{{0, 3},
1778 		{5, 7},
1779 		{0, 0},
1780 		{0, 0}
1781 		}
1782 	},
1783 	{0, 1, 1, 0,		/* 0xf0 */
1784 		{{4, 7},
1785 		{0, 0},
1786 		{0, 0},
1787 		{0, 0}
1788 		}
1789 	},
1790 	{1, 1, 2, 0,		/* 0xf1 */
1791 		{{0, 0},
1792 		{4, 7},
1793 		{0, 0},
1794 		{0, 0}
1795 		}
1796 	},
1797 	{0, 1, 2, 0,		/* 0xf2 */
1798 		{{1, 1},
1799 		{4, 7},
1800 		{0, 0},
1801 		{0, 0}
1802 		}
1803 	},
1804 	{1, 1, 2, 0,		/* 0xf3 */
1805 		{{0, 1},
1806 		{4, 7},
1807 		{0, 0},
1808 		{0, 0}
1809 		}
1810 	},
1811 	{0, 1, 2, 0,		/* 0xf4 */
1812 		{{2, 2},
1813 		{4, 7},
1814 		{0, 0},
1815 		{0, 0}
1816 		}
1817 	},
1818 	{1, 1, 3, 0,		/* 0xf5 */
1819 		{{0, 0},
1820 		{2, 2},
1821 		{4, 7},
1822 		{0, 0}
1823 		}
1824 	},
1825 	{0, 1, 2, 0,		/* 0xf6 */
1826 		{{1, 2},
1827 		{4, 7},
1828 		{0, 0},
1829 		{0, 0}
1830 		}
1831 	},
1832 	{1, 1, 2, 0,		/* 0xf7 */
1833 		{{0, 2},
1834 		{4, 7},
1835 		{0, 0},
1836 		{0, 0}
1837 		}
1838 	},
1839 	{0, 1, 1, 0,		/* 0xf8 */
1840 		{{3, 7},
1841 		{0, 0},
1842 		{0, 0},
1843 		{0, 0}
1844 		}
1845 	},
1846 	{1, 1, 2, 0,		/* 0xf9 */
1847 		{{0, 0},
1848 		{3, 7},
1849 		{0, 0},
1850 		{0, 0}
1851 		}
1852 	},
1853 	{0, 1, 2, 0,		/* 0xfa */
1854 		{{1, 1},
1855 		{3, 7},
1856 		{0, 0},
1857 		{0, 0}
1858 		}
1859 	},
1860 	{1, 1, 2, 0,		/* 0xfb */
1861 		{{0, 1},
1862 		{3, 7},
1863 		{0, 0},
1864 		{0, 0}
1865 		}
1866 	},
1867 	{0, 1, 1, 0,		/* 0xfc */
1868 		{{2, 7},
1869 		{0, 0},
1870 		{0, 0},
1871 		{0, 0}
1872 		}
1873 	},
1874 	{1, 1, 2, 0,		/* 0xfd */
1875 		{{0, 0},
1876 		{2, 7},
1877 		{0, 0},
1878 		{0, 0}
1879 		}
1880 	},
1881 	{0, 1, 1, 0,		/* 0xfe */
1882 		{{1, 7},
1883 		{0, 0},
1884 		{0, 0},
1885 		{0, 0}
1886 		}
1887 	},
1888 	{1, 1, 1, 0,		/* 0xff */
1889 		{{0, 7},
1890 		{0, 0},
1891 		{0, 0},
1892 		{0, 0}
1893 		}
1894 	}
1895 };
1896 
1897 
1898 int
sctp_is_address_in_scope(struct sctp_ifa * ifa,struct sctp_scoping * scope,int do_update)1899 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1900                          struct sctp_scoping *scope,
1901                          int do_update)
1902 {
1903 	if ((scope->loopback_scope == 0) &&
1904 	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1905 		/*
1906 		 * skip loopback if not in scope *
1907 		 */
1908 		return (0);
1909 	}
1910 	switch (ifa->address.sa.sa_family) {
1911 #ifdef INET
1912 	case AF_INET:
1913 		if (scope->ipv4_addr_legal) {
1914 			struct sockaddr_in *sin;
1915 
1916 			sin = &ifa->address.sin;
1917 			if (sin->sin_addr.s_addr == 0) {
1918 				/* not in scope , unspecified */
1919 				return (0);
1920 			}
1921 			if ((scope->ipv4_local_scope == 0) &&
1922 			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1923 				/* private address not in scope */
1924 				return (0);
1925 			}
1926 		} else {
1927 			return (0);
1928 		}
1929 		break;
1930 #endif
1931 #ifdef INET6
1932 	case AF_INET6:
1933 		if (scope->ipv6_addr_legal) {
1934 			struct sockaddr_in6 *sin6;
1935 
1936 #if !defined(__Panda__)
1937 			/* Must update the flags,  bummer, which
1938 			 * means any IFA locks must now be applied HERE <->
1939 			 */
1940 			if (do_update) {
1941 				sctp_gather_internal_ifa_flags(ifa);
1942 			}
1943 #endif
1944 			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1945 				return (0);
1946 			}
1947 			/* ok to use deprecated addresses? */
1948 			sin6 = &ifa->address.sin6;
1949 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1950 				/* skip unspecifed addresses */
1951 				return (0);
1952 			}
1953 			if (		/* (local_scope == 0) && */
1954 			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1955 				return (0);
1956 			}
1957 			if ((scope->site_scope == 0) &&
1958 			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1959 				return (0);
1960 			}
1961 		} else {
1962 			return (0);
1963 		}
1964 		break;
1965 #endif
1966 #if defined(__Userspace__)
1967 	case AF_CONN:
1968 		if (!scope->conn_addr_legal) {
1969 			return (0);
1970 		}
1971 		break;
1972 #endif
1973 	default:
1974 		return (0);
1975 	}
1976 	return (1);
1977 }
1978 
1979 static struct mbuf *
sctp_add_addr_to_mbuf(struct mbuf * m,struct sctp_ifa * ifa,uint16_t * len)1980 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1981 {
1982 #if defined(INET) || defined(INET6)
1983 	struct sctp_paramhdr *paramh;
1984 	struct mbuf *mret;
1985 	uint16_t plen;
1986 #endif
1987 
1988 	switch (ifa->address.sa.sa_family) {
1989 #ifdef INET
1990 	case AF_INET:
1991 		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1992 		break;
1993 #endif
1994 #ifdef INET6
1995 	case AF_INET6:
1996 		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1997 		break;
1998 #endif
1999 	default:
2000 		return (m);
2001 	}
2002 #if defined(INET) || defined(INET6)
2003 	if (M_TRAILINGSPACE(m) >= plen) {
2004 		/* easy side we just drop it on the end */
2005 		paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2006 		mret = m;
2007 	} else {
2008 		/* Need more space */
2009 		mret = m;
2010 		while (SCTP_BUF_NEXT(mret) != NULL) {
2011 			mret = SCTP_BUF_NEXT(mret);
2012 		}
2013 		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2014 		if (SCTP_BUF_NEXT(mret) == NULL) {
2015 			/* We are hosed, can't add more addresses */
2016 			return (m);
2017 		}
2018 		mret = SCTP_BUF_NEXT(mret);
2019 		paramh = mtod(mret, struct sctp_paramhdr *);
2020 	}
2021 	/* now add the parameter */
2022 	switch (ifa->address.sa.sa_family) {
2023 #ifdef INET
2024 	case AF_INET:
2025 	{
2026 		struct sctp_ipv4addr_param *ipv4p;
2027 		struct sockaddr_in *sin;
2028 
2029 		sin = &ifa->address.sin;
2030 		ipv4p = (struct sctp_ipv4addr_param *)paramh;
2031 		paramh->param_type = htons(SCTP_IPV4_ADDRESS);
2032 		paramh->param_length = htons(plen);
2033 		ipv4p->addr = sin->sin_addr.s_addr;
2034 		SCTP_BUF_LEN(mret) += plen;
2035 		break;
2036 	}
2037 #endif
2038 #ifdef INET6
2039 	case AF_INET6:
2040 	{
2041 		struct sctp_ipv6addr_param *ipv6p;
2042 		struct sockaddr_in6 *sin6;
2043 
2044 		sin6 = &ifa->address.sin6;
2045 		ipv6p = (struct sctp_ipv6addr_param *)paramh;
2046 		paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2047 		paramh->param_length = htons(plen);
2048 		memcpy(ipv6p->addr, &sin6->sin6_addr,
2049 		    sizeof(ipv6p->addr));
2050 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2051 		/* clear embedded scope in the address */
2052 		in6_clearscope((struct in6_addr *)ipv6p->addr);
2053 #endif
2054 		SCTP_BUF_LEN(mret) += plen;
2055 		break;
2056 	}
2057 #endif
2058 	default:
2059 		return (m);
2060 	}
2061 	if (len != NULL) {
2062 		*len += plen;
2063 	}
2064 	return (mret);
2065 #endif
2066 }
2067 
2068 
2069 struct mbuf *
sctp_add_addresses_to_i_ia(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_scoping * scope,struct mbuf * m_at,int cnt_inits_to,uint16_t * padding_len,uint16_t * chunk_len)2070 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2071                            struct sctp_scoping *scope,
2072 			   struct mbuf *m_at, int cnt_inits_to,
2073 			   uint16_t *padding_len, uint16_t *chunk_len)
2074 {
2075 	struct sctp_vrf *vrf = NULL;
2076 	int cnt, limit_out = 0, total_count;
2077 	uint32_t vrf_id;
2078 
2079 	vrf_id = inp->def_vrf_id;
2080 	SCTP_IPI_ADDR_RLOCK();
2081 	vrf = sctp_find_vrf(vrf_id);
2082 	if (vrf == NULL) {
2083 		SCTP_IPI_ADDR_RUNLOCK();
2084 		return (m_at);
2085 	}
2086 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2087 		struct sctp_ifa *sctp_ifap;
2088 		struct sctp_ifn *sctp_ifnp;
2089 
2090 		cnt = cnt_inits_to;
2091 		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2092 			limit_out = 1;
2093 			cnt = SCTP_ADDRESS_LIMIT;
2094 			goto skip_count;
2095 		}
2096 		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2097 			if ((scope->loopback_scope == 0) &&
2098 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2099 				/*
2100 				 * Skip loopback devices if loopback_scope
2101 				 * not set
2102 				 */
2103 				continue;
2104 			}
2105 			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2106 #if defined(__FreeBSD__)
2107 #ifdef INET
2108 				if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2109 				    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2110 				                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2111 					continue;
2112 				}
2113 #endif
2114 #ifdef INET6
2115 				if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2116 				    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2117 				                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2118 					continue;
2119 				}
2120 #endif
2121 #endif
2122 				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2123 					continue;
2124 				}
2125 #if defined(__Userspace__)
2126 				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2127 					continue;
2128 				}
2129 #endif
2130 				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2131 					continue;
2132 				}
2133 				cnt++;
2134 				if (cnt > SCTP_ADDRESS_LIMIT) {
2135 					break;
2136 				}
2137 			}
2138 			if (cnt > SCTP_ADDRESS_LIMIT) {
2139 				break;
2140 			}
2141 		}
2142 	skip_count:
2143 		if (cnt > 1) {
2144 			total_count = 0;
2145 			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2146 				cnt = 0;
2147 				if ((scope->loopback_scope == 0) &&
2148 				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2149 					/*
2150 					 * Skip loopback devices if
2151 					 * loopback_scope not set
2152 					 */
2153 					continue;
2154 				}
2155 				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2156 #if defined(__FreeBSD__)
2157 #ifdef INET
2158 					if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2159 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2160 					                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2161 						continue;
2162 					}
2163 #endif
2164 #ifdef INET6
2165 					if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2166 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2167 					                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2168 						continue;
2169 					}
2170 #endif
2171 #endif
2172 					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2173 						continue;
2174 					}
2175 #if defined(__Userspace__)
2176 					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2177 						continue;
2178 					}
2179 #endif
2180 					if (sctp_is_address_in_scope(sctp_ifap,
2181 								     scope, 0) == 0) {
2182 						continue;
2183 					}
2184 					if ((chunk_len != NULL) &&
2185 					    (padding_len != NULL) &&
2186 					    (*padding_len > 0)) {
2187 						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2188 						SCTP_BUF_LEN(m_at) += *padding_len;
2189 						*chunk_len += *padding_len;
2190 						*padding_len = 0;
2191 					}
2192 					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2193 					if (limit_out) {
2194 						cnt++;
2195 						total_count++;
2196 						if (cnt >= 2) {
2197 							/* two from each address */
2198 							break;
2199 						}
2200 						if (total_count > SCTP_ADDRESS_LIMIT) {
2201 							/* No more addresses */
2202 							break;
2203 						}
2204 					}
2205 				}
2206 			}
2207 		}
2208 	} else {
2209 		struct sctp_laddr *laddr;
2210 
2211 		cnt = cnt_inits_to;
2212 		/* First, how many ? */
2213 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2214 			if (laddr->ifa == NULL) {
2215 				continue;
2216 			}
2217 			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2218 				/* Address being deleted by the system, dont
2219 				 * list.
2220 				 */
2221 				continue;
2222 			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2223 				/* Address being deleted on this ep
2224 				 * don't list.
2225 				 */
2226 				continue;
2227 			}
2228 #if defined(__Userspace__)
2229 			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2230 				continue;
2231 			}
2232 #endif
2233 			if (sctp_is_address_in_scope(laddr->ifa,
2234 						     scope, 1) == 0) {
2235 				continue;
2236 			}
2237 			cnt++;
2238 		}
2239 		/*
2240 		 * To get through a NAT we only list addresses if we have
2241 		 * more than one. That way if you just bind a single address
2242 		 * we let the source of the init dictate our address.
2243 		 */
2244 		if (cnt > 1) {
2245 			cnt = cnt_inits_to;
2246 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2247 				if (laddr->ifa == NULL) {
2248 					continue;
2249 				}
2250 				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2251 					continue;
2252 				}
2253 #if defined(__Userspace__)
2254 				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2255 					continue;
2256 				}
2257 #endif
2258 				if (sctp_is_address_in_scope(laddr->ifa,
2259 							     scope, 0) == 0) {
2260 					continue;
2261 				}
2262 				if ((chunk_len != NULL) &&
2263 				    (padding_len != NULL) &&
2264 				    (*padding_len > 0)) {
2265 					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2266 					SCTP_BUF_LEN(m_at) += *padding_len;
2267 					*chunk_len += *padding_len;
2268 					*padding_len = 0;
2269 				}
2270 				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2271 				cnt++;
2272 				if (cnt >= SCTP_ADDRESS_LIMIT) {
2273 					break;
2274 				}
2275 			}
2276 		}
2277 	}
2278 	SCTP_IPI_ADDR_RUNLOCK();
2279 	return (m_at);
2280 }
2281 
2282 static struct sctp_ifa *
sctp_is_ifa_addr_preferred(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2283 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2284 			   uint8_t dest_is_loop,
2285 			   uint8_t dest_is_priv,
2286 			   sa_family_t fam)
2287 {
2288 	uint8_t dest_is_global = 0;
2289 	/* dest_is_priv is true if destination is a private address */
2290 	/* dest_is_loop is true if destination is a loopback addresses */
2291 
2292 	/**
2293 	 * Here we determine if its a preferred address. A preferred address
2294 	 * means it is the same scope or higher scope then the destination.
2295 	 * L = loopback, P = private, G = global
2296 	 * -----------------------------------------
2297 	 *    src    |  dest | result
2298 	 *  ----------------------------------------
2299 	 *     L     |    L  |    yes
2300 	 *  -----------------------------------------
2301 	 *     P     |    L  |    yes-v4 no-v6
2302 	 *  -----------------------------------------
2303 	 *     G     |    L  |    yes-v4 no-v6
2304 	 *  -----------------------------------------
2305 	 *     L     |    P  |    no
2306 	 *  -----------------------------------------
2307 	 *     P     |    P  |    yes
2308 	 *  -----------------------------------------
2309 	 *     G     |    P  |    no
2310 	 *   -----------------------------------------
2311 	 *     L     |    G  |    no
2312 	 *   -----------------------------------------
2313 	 *     P     |    G  |    no
2314 	 *    -----------------------------------------
2315 	 *     G     |    G  |    yes
2316 	 *    -----------------------------------------
2317 	 */
2318 
2319 	if (ifa->address.sa.sa_family != fam) {
2320 		/* forget mis-matched family */
2321 		return (NULL);
2322 	}
2323 	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2324 		dest_is_global = 1;
2325 	}
2326 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2327 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2328 	/* Ok the address may be ok */
2329 #ifdef INET6
2330 	if (fam == AF_INET6) {
2331 		/* ok to use deprecated addresses? no lets not! */
2332 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2333 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2334 			return (NULL);
2335 		}
2336 		if (ifa->src_is_priv && !ifa->src_is_loop) {
2337 			if (dest_is_loop) {
2338 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2339 				return (NULL);
2340 			}
2341 		}
2342 		if (ifa->src_is_glob) {
2343 			if (dest_is_loop) {
2344 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2345 				return (NULL);
2346 			}
2347 		}
2348 	}
2349 #endif
2350 	/* Now that we know what is what, implement or table
2351 	 * this could in theory be done slicker (it used to be), but this
2352 	 * is straightforward and easier to validate :-)
2353 	 */
2354 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2355 		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2356 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2357 		dest_is_loop, dest_is_priv, dest_is_global);
2358 
2359 	if ((ifa->src_is_loop) && (dest_is_priv)) {
2360 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2361 		return (NULL);
2362 	}
2363 	if ((ifa->src_is_glob) && (dest_is_priv)) {
2364 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2365 		return (NULL);
2366 	}
2367 	if ((ifa->src_is_loop) && (dest_is_global)) {
2368 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2369 		return (NULL);
2370 	}
2371 	if ((ifa->src_is_priv) && (dest_is_global)) {
2372 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2373 		return (NULL);
2374 	}
2375 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2376 	/* its a preferred address */
2377 	return (ifa);
2378 }
2379 
2380 static struct sctp_ifa *
sctp_is_ifa_addr_acceptable(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2381 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2382 			    uint8_t dest_is_loop,
2383 			    uint8_t dest_is_priv,
2384 			    sa_family_t fam)
2385 {
2386 	uint8_t dest_is_global = 0;
2387 
2388 	/**
2389 	 * Here we determine if its a acceptable address. A acceptable
2390 	 * address means it is the same scope or higher scope but we can
2391 	 * allow for NAT which means its ok to have a global dest and a
2392 	 * private src.
2393 	 *
2394 	 * L = loopback, P = private, G = global
2395 	 * -----------------------------------------
2396 	 *  src    |  dest | result
2397 	 * -----------------------------------------
2398 	 *   L     |   L   |    yes
2399 	 *  -----------------------------------------
2400 	 *   P     |   L   |    yes-v4 no-v6
2401 	 *  -----------------------------------------
2402 	 *   G     |   L   |    yes
2403 	 * -----------------------------------------
2404 	 *   L     |   P   |    no
2405 	 * -----------------------------------------
2406 	 *   P     |   P   |    yes
2407 	 * -----------------------------------------
2408 	 *   G     |   P   |    yes - May not work
2409 	 * -----------------------------------------
2410 	 *   L     |   G   |    no
2411 	 * -----------------------------------------
2412 	 *   P     |   G   |    yes - May not work
2413 	 * -----------------------------------------
2414 	 *   G     |   G   |    yes
2415 	 * -----------------------------------------
2416 	 */
2417 
2418 	if (ifa->address.sa.sa_family != fam) {
2419 		/* forget non matching family */
2420 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2421 			ifa->address.sa.sa_family, fam);
2422 		return (NULL);
2423 	}
2424 	/* Ok the address may be ok */
2425 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2426 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2427 		dest_is_loop, dest_is_priv);
2428 	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2429 		dest_is_global = 1;
2430 	}
2431 #ifdef INET6
2432 	if (fam == AF_INET6) {
2433 		/* ok to use deprecated addresses? */
2434 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2435 			return (NULL);
2436 		}
2437 		if (ifa->src_is_priv) {
2438 			/* Special case, linklocal to loop */
2439 			if (dest_is_loop)
2440 				return (NULL);
2441 		}
2442 	}
2443 #endif
2444 	/*
2445 	 * Now that we know what is what, implement our table.
2446 	 * This could in theory be done slicker (it used to be), but this
2447 	 * is straightforward and easier to validate :-)
2448 	 */
2449 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2450 		ifa->src_is_loop,
2451 		dest_is_priv);
2452 	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2453 		return (NULL);
2454 	}
2455 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2456 		ifa->src_is_loop,
2457 		dest_is_global);
2458 	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2459 		return (NULL);
2460 	}
2461 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2462 	/* its an acceptable address */
2463 	return (ifa);
2464 }
2465 
2466 int
sctp_is_addr_restricted(struct sctp_tcb * stcb,struct sctp_ifa * ifa)2467 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2468 {
2469 	struct sctp_laddr *laddr;
2470 
2471 	if (stcb == NULL) {
2472 		/* There are no restrictions, no TCB :-) */
2473 		return (0);
2474 	}
2475 	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2476 		if (laddr->ifa == NULL) {
2477 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2478 				__func__);
2479 			continue;
2480 		}
2481 		if (laddr->ifa == ifa) {
2482 			/* Yes it is on the list */
2483 			return (1);
2484 		}
2485 	}
2486 	return (0);
2487 }
2488 
2489 
2490 int
sctp_is_addr_in_ep(struct sctp_inpcb * inp,struct sctp_ifa * ifa)2491 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2492 {
2493 	struct sctp_laddr *laddr;
2494 
2495 	if (ifa == NULL)
2496 		return (0);
2497 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2498 		if (laddr->ifa == NULL) {
2499 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2500 				__func__);
2501 			continue;
2502 		}
2503 		if ((laddr->ifa == ifa) && laddr->action == 0)
2504 			/* same pointer */
2505 			return (1);
2506 	}
2507 	return (0);
2508 }
2509 
2510 
2511 
2512 static struct sctp_ifa *
sctp_choose_boundspecific_inp(struct sctp_inpcb * inp,sctp_route_t * ro,uint32_t vrf_id,int non_asoc_addr_ok,uint8_t dest_is_priv,uint8_t dest_is_loop,sa_family_t fam)2513 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2514 			      sctp_route_t *ro,
2515 			      uint32_t vrf_id,
2516 			      int non_asoc_addr_ok,
2517 			      uint8_t dest_is_priv,
2518 			      uint8_t dest_is_loop,
2519 			      sa_family_t fam)
2520 {
2521 	struct sctp_laddr *laddr, *starting_point;
2522 	void *ifn;
2523 	int resettotop = 0;
2524 	struct sctp_ifn *sctp_ifn;
2525 	struct sctp_ifa *sctp_ifa, *sifa;
2526 	struct sctp_vrf *vrf;
2527 	uint32_t ifn_index;
2528 
2529 	vrf = sctp_find_vrf(vrf_id);
2530 	if (vrf == NULL)
2531 		return (NULL);
2532 
2533 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2534 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2535 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2536 	/*
2537 	 * first question, is the ifn we will emit on in our list, if so, we
2538 	 * want such an address. Note that we first looked for a
2539 	 * preferred address.
2540 	 */
2541 	if (sctp_ifn) {
2542 		/* is a preferred one on the interface we route out? */
2543 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2544 #if defined(__FreeBSD__)
2545 #ifdef INET
2546 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2547 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2548 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2549 				continue;
2550 			}
2551 #endif
2552 #ifdef INET6
2553 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2554 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2555 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2556 				continue;
2557 			}
2558 #endif
2559 #endif
2560 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2561 			    (non_asoc_addr_ok == 0))
2562 				continue;
2563 			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2564 							  dest_is_loop,
2565 							  dest_is_priv, fam);
2566 			if (sifa == NULL)
2567 				continue;
2568 			if (sctp_is_addr_in_ep(inp, sifa)) {
2569 				atomic_add_int(&sifa->refcount, 1);
2570 				return (sifa);
2571 			}
2572 		}
2573 	}
2574 	/*
2575 	 * ok, now we now need to find one on the list of the addresses.
2576 	 * We can't get one on the emitting interface so let's find first
2577 	 * a preferred one. If not that an acceptable one otherwise...
2578 	 * we return NULL.
2579 	 */
2580 	starting_point = inp->next_addr_touse;
2581  once_again:
2582 	if (inp->next_addr_touse == NULL) {
2583 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2584 		resettotop = 1;
2585 	}
2586 	for (laddr = inp->next_addr_touse; laddr;
2587 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2588 		if (laddr->ifa == NULL) {
2589 			/* address has been removed */
2590 			continue;
2591 		}
2592 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2593 			/* address is being deleted */
2594 			continue;
2595 		}
2596 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2597 						  dest_is_priv, fam);
2598 		if (sifa == NULL)
2599 			continue;
2600 		atomic_add_int(&sifa->refcount, 1);
2601 		return (sifa);
2602 	}
2603 	if (resettotop == 0) {
2604 		inp->next_addr_touse = NULL;
2605 		goto once_again;
2606 	}
2607 
2608 	inp->next_addr_touse = starting_point;
2609 	resettotop = 0;
2610  once_again_too:
2611 	if (inp->next_addr_touse == NULL) {
2612 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2613 		resettotop = 1;
2614 	}
2615 
2616 	/* ok, what about an acceptable address in the inp */
2617 	for (laddr = inp->next_addr_touse; laddr;
2618 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2619 		if (laddr->ifa == NULL) {
2620 			/* address has been removed */
2621 			continue;
2622 		}
2623 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2624 			/* address is being deleted */
2625 			continue;
2626 		}
2627 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2628 						   dest_is_priv, fam);
2629 		if (sifa == NULL)
2630 			continue;
2631 		atomic_add_int(&sifa->refcount, 1);
2632 		return (sifa);
2633 	}
2634 	if (resettotop == 0) {
2635 		inp->next_addr_touse = NULL;
2636 		goto once_again_too;
2637 	}
2638 
2639 	/*
2640 	 * no address bound can be a source for the destination we are in
2641 	 * trouble
2642 	 */
2643 	return (NULL);
2644 }
2645 
2646 
2647 
2648 static struct sctp_ifa *
sctp_choose_boundspecific_stcb(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)2649 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2650 			       struct sctp_tcb *stcb,
2651 			       sctp_route_t *ro,
2652 			       uint32_t vrf_id,
2653 			       uint8_t dest_is_priv,
2654 			       uint8_t dest_is_loop,
2655 			       int non_asoc_addr_ok,
2656 			       sa_family_t fam)
2657 {
2658 	struct sctp_laddr *laddr, *starting_point;
2659 	void *ifn;
2660 	struct sctp_ifn *sctp_ifn;
2661 	struct sctp_ifa *sctp_ifa, *sifa;
2662 	uint8_t start_at_beginning = 0;
2663 	struct sctp_vrf *vrf;
2664 	uint32_t ifn_index;
2665 
2666 	/*
2667 	 * first question, is the ifn we will emit on in our list, if so, we
2668 	 * want that one.
2669 	 */
2670 	vrf = sctp_find_vrf(vrf_id);
2671 	if (vrf == NULL)
2672 		return (NULL);
2673 
2674 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2675 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2676 	sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2677 
2678 	/*
2679 	 * first question, is the ifn we will emit on in our list?  If so,
2680 	 * we want that one. First we look for a preferred. Second, we go
2681 	 * for an acceptable.
2682 	 */
2683 	if (sctp_ifn) {
2684 		/* first try for a preferred address on the ep */
2685 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2686 #if defined(__FreeBSD__)
2687 #ifdef INET
2688 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2689 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2690 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2691 				continue;
2692 			}
2693 #endif
2694 #ifdef INET6
2695 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2696 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2697 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2698 				continue;
2699 			}
2700 #endif
2701 #endif
2702 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2703 				continue;
2704 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2705 				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2706 				if (sifa == NULL)
2707 					continue;
2708 				if (((non_asoc_addr_ok == 0) &&
2709 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2710 				    (non_asoc_addr_ok &&
2711 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2712 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2713 					/* on the no-no list */
2714 					continue;
2715 				}
2716 				atomic_add_int(&sifa->refcount, 1);
2717 				return (sifa);
2718 			}
2719 		}
2720 		/* next try for an acceptable address on the ep */
2721 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2722 #if defined(__FreeBSD__)
2723 #ifdef INET
2724 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2725 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2726 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2727 				continue;
2728 			}
2729 #endif
2730 #ifdef INET6
2731 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2732 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2733 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2734 				continue;
2735 			}
2736 #endif
2737 #endif
2738 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2739 				continue;
2740 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2741 				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2742 				if (sifa == NULL)
2743 					continue;
2744 				if (((non_asoc_addr_ok == 0) &&
2745 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2746 				    (non_asoc_addr_ok &&
2747 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2748 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2749 					/* on the no-no list */
2750 					continue;
2751 				}
2752 				atomic_add_int(&sifa->refcount, 1);
2753 				return (sifa);
2754 			}
2755 		}
2756 
2757 	}
2758 	/*
2759 	 * if we can't find one like that then we must look at all
2760 	 * addresses bound to pick one at first preferable then
2761 	 * secondly acceptable.
2762 	 */
2763 	starting_point = stcb->asoc.last_used_address;
2764  sctp_from_the_top:
2765 	if (stcb->asoc.last_used_address == NULL) {
2766 		start_at_beginning = 1;
2767 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2768 	}
2769 	/* search beginning with the last used address */
2770 	for (laddr = stcb->asoc.last_used_address; laddr;
2771 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2772 		if (laddr->ifa == NULL) {
2773 			/* address has been removed */
2774 			continue;
2775 		}
2776 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2777 			/* address is being deleted */
2778 			continue;
2779 		}
2780 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2781 		if (sifa == NULL)
2782 			continue;
2783 		if (((non_asoc_addr_ok == 0) &&
2784 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2785 		    (non_asoc_addr_ok &&
2786 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2787 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2788 			/* on the no-no list */
2789 			continue;
2790 		}
2791 		stcb->asoc.last_used_address = laddr;
2792 		atomic_add_int(&sifa->refcount, 1);
2793 		return (sifa);
2794 	}
2795 	if (start_at_beginning == 0) {
2796 		stcb->asoc.last_used_address = NULL;
2797 		goto sctp_from_the_top;
2798 	}
2799 	/* now try for any higher scope than the destination */
2800 	stcb->asoc.last_used_address = starting_point;
2801 	start_at_beginning = 0;
2802  sctp_from_the_top2:
2803 	if (stcb->asoc.last_used_address == NULL) {
2804 		start_at_beginning = 1;
2805 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2806 	}
2807 	/* search beginning with the last used address */
2808 	for (laddr = stcb->asoc.last_used_address; laddr;
2809 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2810 		if (laddr->ifa == NULL) {
2811 			/* address has been removed */
2812 			continue;
2813 		}
2814 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2815 			/* address is being deleted */
2816 			continue;
2817 		}
2818 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2819 						   dest_is_priv, fam);
2820 		if (sifa == NULL)
2821 			continue;
2822 		if (((non_asoc_addr_ok == 0) &&
2823 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2824 		    (non_asoc_addr_ok &&
2825 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2826 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2827 			/* on the no-no list */
2828 			continue;
2829 		}
2830 		stcb->asoc.last_used_address = laddr;
2831 		atomic_add_int(&sifa->refcount, 1);
2832 		return (sifa);
2833 	}
2834 	if (start_at_beginning == 0) {
2835 		stcb->asoc.last_used_address = NULL;
2836 		goto sctp_from_the_top2;
2837 	}
2838 	return (NULL);
2839 }
2840 
2841 static struct sctp_ifa *
sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,int addr_wanted,sa_family_t fam,sctp_route_t * ro)2842 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2843 #if defined(__FreeBSD__)
2844                                                  struct sctp_inpcb *inp,
2845 #else
2846                                                  struct sctp_inpcb *inp SCTP_UNUSED,
2847 #endif
2848 						 struct sctp_tcb *stcb,
2849 						 int non_asoc_addr_ok,
2850 						 uint8_t dest_is_loop,
2851 						 uint8_t dest_is_priv,
2852 						 int addr_wanted,
2853 						 sa_family_t fam,
2854 						 sctp_route_t *ro
2855 						 )
2856 {
2857 	struct sctp_ifa *ifa, *sifa;
2858 	int num_eligible_addr = 0;
2859 #ifdef INET6
2860 #ifdef SCTP_EMBEDDED_V6_SCOPE
2861 	struct sockaddr_in6 sin6, lsa6;
2862 
2863 	if (fam == AF_INET6) {
2864 		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2865 #ifdef SCTP_KAME
2866 		(void)sa6_recoverscope(&sin6);
2867 #else
2868 		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2869 #endif  /* SCTP_KAME */
2870 	}
2871 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2872 #endif	/* INET6 */
2873 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2874 #if defined(__FreeBSD__)
2875 #ifdef INET
2876 		if ((ifa->address.sa.sa_family == AF_INET) &&
2877 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2878 		                      &ifa->address.sin.sin_addr) != 0)) {
2879 			continue;
2880 		}
2881 #endif
2882 #ifdef INET6
2883 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2884 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2885 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2886 			continue;
2887 		}
2888 #endif
2889 #endif
2890 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2891 		    (non_asoc_addr_ok == 0))
2892 			continue;
2893 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2894 						  dest_is_priv, fam);
2895 		if (sifa == NULL)
2896 			continue;
2897 #ifdef INET6
2898 		if (fam == AF_INET6 &&
2899 		    dest_is_loop &&
2900 		    sifa->src_is_loop && sifa->src_is_priv) {
2901 			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
2902 			 * to the peer so we will get an abort.
2903 			 */
2904 			continue;
2905 		}
2906 #ifdef SCTP_EMBEDDED_V6_SCOPE
2907 		if (fam == AF_INET6 &&
2908 		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2909 		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2910 			/* link-local <-> link-local must belong to the same scope. */
2911 			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2912 #ifdef SCTP_KAME
2913 			(void)sa6_recoverscope(&lsa6);
2914 #else
2915 			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2916 #endif  /* SCTP_KAME */
2917 			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2918 				continue;
2919 			}
2920 		}
2921 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2922 #endif	/* INET6 */
2923 
2924 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2925 		/* Check if the IPv6 address matches to next-hop.
2926 		   In the mobile case, old IPv6 address may be not deleted
2927 		   from the interface. Then, the interface has previous and
2928 		   new addresses.  We should use one corresponding to the
2929 		   next-hop.  (by micchie)
2930 		 */
2931 #ifdef INET6
2932 		if (stcb && fam == AF_INET6 &&
2933 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2934 			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2935 			    == 0) {
2936 				continue;
2937 			}
2938 		}
2939 #endif
2940 #ifdef INET
2941 		/* Avoid topologically incorrect IPv4 address */
2942 		if (stcb && fam == AF_INET &&
2943 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2944 			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2945 				continue;
2946 			}
2947 		}
2948 #endif
2949 #endif
2950 		if (stcb) {
2951 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2952 				continue;
2953 			}
2954 			if (((non_asoc_addr_ok == 0) &&
2955 			     (sctp_is_addr_restricted(stcb, sifa))) ||
2956 			    (non_asoc_addr_ok &&
2957 			     (sctp_is_addr_restricted(stcb, sifa)) &&
2958 			     (!sctp_is_addr_pending(stcb, sifa)))) {
2959 				/*
2960 				 * It is restricted for some reason..
2961 				 * probably not yet added.
2962 				 */
2963 				continue;
2964 			}
2965 		}
2966 		if (num_eligible_addr >= addr_wanted) {
2967 			return (sifa);
2968 		}
2969 		num_eligible_addr++;
2970 	}
2971 	return (NULL);
2972 }
2973 
2974 
2975 static int
sctp_count_num_preferred_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2976 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2977 #if defined(__FreeBSD__)
2978                                   struct sctp_inpcb *inp,
2979 #else
2980                                   struct sctp_inpcb *inp SCTP_UNUSED,
2981 #endif
2982 				  struct sctp_tcb *stcb,
2983 				  int non_asoc_addr_ok,
2984 				  uint8_t dest_is_loop,
2985 				  uint8_t dest_is_priv,
2986 				  sa_family_t fam)
2987 {
2988 	struct sctp_ifa *ifa, *sifa;
2989 	int num_eligible_addr = 0;
2990 
2991 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2992 #if defined(__FreeBSD__)
2993 #ifdef INET
2994 		if ((ifa->address.sa.sa_family == AF_INET) &&
2995 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2996 		                      &ifa->address.sin.sin_addr) != 0)) {
2997 			continue;
2998 		}
2999 #endif
3000 #ifdef INET6
3001 		if ((ifa->address.sa.sa_family == AF_INET6) &&
3002 		    (stcb != NULL) &&
3003 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3004 		                      &ifa->address.sin6.sin6_addr) != 0)) {
3005 			continue;
3006 		}
3007 #endif
3008 #endif
3009 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3010 		    (non_asoc_addr_ok == 0)) {
3011 			continue;
3012 		}
3013 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3014 						  dest_is_priv, fam);
3015 		if (sifa == NULL) {
3016 			continue;
3017 		}
3018 		if (stcb) {
3019 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3020 				continue;
3021 			}
3022 			if (((non_asoc_addr_ok == 0) &&
3023 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3024 			    (non_asoc_addr_ok &&
3025 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3026 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3027 				/*
3028 				 * It is restricted for some reason..
3029 				 * probably not yet added.
3030 				 */
3031 				continue;
3032 			}
3033 		}
3034 		num_eligible_addr++;
3035 	}
3036 	return (num_eligible_addr);
3037 }
3038 
3039 static struct sctp_ifa *
sctp_choose_boundall(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)3040 sctp_choose_boundall(struct sctp_inpcb *inp,
3041                      struct sctp_tcb *stcb,
3042 		     struct sctp_nets *net,
3043 		     sctp_route_t *ro,
3044 		     uint32_t vrf_id,
3045 		     uint8_t dest_is_priv,
3046 		     uint8_t dest_is_loop,
3047 		     int non_asoc_addr_ok,
3048 		     sa_family_t fam)
3049 {
3050 	int cur_addr_num = 0, num_preferred = 0;
3051 	void *ifn;
3052 	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3053 	struct sctp_ifa *sctp_ifa, *sifa;
3054 	uint32_t ifn_index;
3055 	struct sctp_vrf *vrf;
3056 #ifdef INET
3057 	int retried = 0;
3058 #endif
3059 
3060 	/*-
3061 	 * For boundall we can use any address in the association.
3062 	 * If non_asoc_addr_ok is set we can use any address (at least in
3063 	 * theory). So we look for preferred addresses first. If we find one,
3064 	 * we use it. Otherwise we next try to get an address on the
3065 	 * interface, which we should be able to do (unless non_asoc_addr_ok
3066 	 * is false and we are routed out that way). In these cases where we
3067 	 * can't use the address of the interface we go through all the
3068 	 * ifn's looking for an address we can use and fill that in. Punting
3069 	 * means we send back address 0, which will probably cause problems
3070 	 * actually since then IP will fill in the address of the route ifn,
3071 	 * which means we probably already rejected it.. i.e. here comes an
3072 	 * abort :-<.
3073 	 */
3074 	vrf = sctp_find_vrf(vrf_id);
3075 	if (vrf == NULL)
3076 		return (NULL);
3077 
3078 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3079 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3080 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3081 	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3082 	if (sctp_ifn == NULL) {
3083 		/* ?? We don't have this guy ?? */
3084 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3085 		goto bound_all_plan_b;
3086 	}
3087 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3088 		ifn_index, sctp_ifn->ifn_name);
3089 
3090 	if (net) {
3091 		cur_addr_num = net->indx_of_eligible_next_to_use;
3092 	}
3093 	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3094 							  inp, stcb,
3095 							  non_asoc_addr_ok,
3096 							  dest_is_loop,
3097 							  dest_is_priv, fam);
3098 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3099 		num_preferred, sctp_ifn->ifn_name);
3100 	if (num_preferred == 0) {
3101 		/*
3102 		 * no eligible addresses, we must use some other interface
3103 		 * address if we can find one.
3104 		 */
3105 		goto bound_all_plan_b;
3106 	}
3107 	/*
3108 	 * Ok we have num_eligible_addr set with how many we can use, this
3109 	 * may vary from call to call due to addresses being deprecated
3110 	 * etc..
3111 	 */
3112 	if (cur_addr_num >= num_preferred) {
3113 		cur_addr_num = 0;
3114 	}
3115 	/*
3116 	 * select the nth address from the list (where cur_addr_num is the
3117 	 * nth) and 0 is the first one, 1 is the second one etc...
3118 	 */
3119 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3120 
3121 	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3122                                                                     dest_is_priv, cur_addr_num, fam, ro);
3123 
3124 	/* if sctp_ifa is NULL something changed??, fall to plan b. */
3125 	if (sctp_ifa) {
3126 		atomic_add_int(&sctp_ifa->refcount, 1);
3127 		if (net) {
3128 			/* save off where the next one we will want */
3129 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3130 		}
3131 		return (sctp_ifa);
3132 	}
3133 	/*
3134 	 * plan_b: Look at all interfaces and find a preferred address. If
3135 	 * no preferred fall through to plan_c.
3136 	 */
3137  bound_all_plan_b:
3138 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3139 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3140 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3141 			sctp_ifn->ifn_name);
3142 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3143 			/* wrong base scope */
3144 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3145 			continue;
3146 		}
3147 		if ((sctp_ifn == looked_at) && looked_at) {
3148 			/* already looked at this guy */
3149 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3150 			continue;
3151 		}
3152 		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3153                                                                   dest_is_loop, dest_is_priv, fam);
3154 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3155 			"Found ifn:%p %d preferred source addresses\n",
3156 			ifn, num_preferred);
3157 		if (num_preferred == 0) {
3158 			/* None on this interface. */
3159 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3160 			continue;
3161 		}
3162 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3163 			"num preferred:%d on interface:%p cur_addr_num:%d\n",
3164 			num_preferred, (void *)sctp_ifn, cur_addr_num);
3165 
3166 		/*
3167 		 * Ok we have num_eligible_addr set with how many we can
3168 		 * use, this may vary from call to call due to addresses
3169 		 * being deprecated etc..
3170 		 */
3171 		if (cur_addr_num >= num_preferred) {
3172 			cur_addr_num = 0;
3173 		}
3174 		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3175                                                                         dest_is_priv, cur_addr_num, fam, ro);
3176 		if (sifa == NULL)
3177 			continue;
3178 		if (net) {
3179 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3180 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3181 				cur_addr_num);
3182 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3183 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3184 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3185 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3186 		}
3187 		atomic_add_int(&sifa->refcount, 1);
3188 		return (sifa);
3189 	}
3190 #ifdef INET
3191 again_with_private_addresses_allowed:
3192 #endif
3193 	/* plan_c: do we have an acceptable address on the emit interface */
3194 	sifa = NULL;
3195 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3196 	if (emit_ifn == NULL) {
3197 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3198 		goto plan_d;
3199 	}
3200 	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3201 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3202 #if defined(__FreeBSD__)
3203 #ifdef INET
3204 		if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3205 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3206 		                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3207 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3208 			continue;
3209 		}
3210 #endif
3211 #ifdef INET6
3212 		if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3213 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3214 		                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3215 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3216 			continue;
3217 		}
3218 #endif
3219 #endif
3220 		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3221 		    (non_asoc_addr_ok == 0)) {
3222 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3223 			continue;
3224 		}
3225 		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3226 						   dest_is_priv, fam);
3227 		if (sifa == NULL) {
3228 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3229 			continue;
3230 		}
3231 		if (stcb) {
3232 			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3233 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3234 				sifa = NULL;
3235 				continue;
3236 			}
3237 			if (((non_asoc_addr_ok == 0) &&
3238 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3239 			    (non_asoc_addr_ok &&
3240 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3241 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3242 				/*
3243 				 * It is restricted for some
3244 				 * reason.. probably not yet added.
3245 				 */
3246 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3247 				sifa = NULL;
3248 				continue;
3249 			}
3250 		}
3251 		atomic_add_int(&sifa->refcount, 1);
3252 		goto out;
3253 	}
3254  plan_d:
3255 	/*
3256 	 * plan_d: We are in trouble. No preferred address on the emit
3257 	 * interface. And not even a preferred address on all interfaces.
3258 	 * Go out and see if we can find an acceptable address somewhere
3259 	 * amongst all interfaces.
3260 	 */
3261 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3262 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3263 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3264 			/* wrong base scope */
3265 			continue;
3266 		}
3267 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3268 #if defined(__FreeBSD__)
3269 #ifdef INET
3270 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3271 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3272 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3273 				continue;
3274 			}
3275 #endif
3276 #ifdef INET6
3277 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3278 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3279 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3280 				continue;
3281 			}
3282 #endif
3283 #endif
3284 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3285 			    (non_asoc_addr_ok == 0))
3286 				continue;
3287 			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3288 							   dest_is_loop,
3289 							   dest_is_priv, fam);
3290 			if (sifa == NULL)
3291 				continue;
3292 			if (stcb) {
3293 				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3294 					sifa = NULL;
3295 					continue;
3296 				}
3297 				if (((non_asoc_addr_ok == 0) &&
3298 				     (sctp_is_addr_restricted(stcb, sifa))) ||
3299 				    (non_asoc_addr_ok &&
3300 				     (sctp_is_addr_restricted(stcb, sifa)) &&
3301 				     (!sctp_is_addr_pending(stcb, sifa)))) {
3302 					/*
3303 					 * It is restricted for some
3304 					 * reason.. probably not yet added.
3305 					 */
3306 					sifa = NULL;
3307 					continue;
3308 				}
3309 			}
3310 			goto out;
3311 		}
3312 	}
3313 #ifdef INET
3314 	if (stcb) {
3315 		if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3316 			stcb->asoc.scope.ipv4_local_scope = 1;
3317 			retried = 1;
3318 			goto again_with_private_addresses_allowed;
3319 		} else if (retried == 1) {
3320 			stcb->asoc.scope.ipv4_local_scope = 0;
3321 		}
3322 	}
3323 #endif
3324 out:
3325 #ifdef INET
3326 	if (sifa) {
3327 		if (retried == 1) {
3328 			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3329 				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3330 					/* wrong base scope */
3331 					continue;
3332 				}
3333 				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3334 					struct sctp_ifa *tmp_sifa;
3335 
3336 #if defined(__FreeBSD__)
3337 #ifdef INET
3338 					if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3339 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3340 					                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3341 						continue;
3342 					}
3343 #endif
3344 #ifdef INET6
3345 					if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3346 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3347 					                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3348 						continue;
3349 					}
3350 #endif
3351 #endif
3352 					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3353 					    (non_asoc_addr_ok == 0))
3354 						continue;
3355 					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3356 					                                       dest_is_loop,
3357 					                                       dest_is_priv, fam);
3358 					if (tmp_sifa == NULL) {
3359 						continue;
3360 					}
3361 					if (tmp_sifa == sifa) {
3362 						continue;
3363 					}
3364 					if (stcb) {
3365 						if (sctp_is_address_in_scope(tmp_sifa,
3366 						                             &stcb->asoc.scope, 0) == 0) {
3367 							continue;
3368 						}
3369 						if (((non_asoc_addr_ok == 0) &&
3370 						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3371 						    (non_asoc_addr_ok &&
3372 						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3373 						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3374 							/*
3375 							 * It is restricted for some
3376 							 * reason.. probably not yet added.
3377 							 */
3378 							continue;
3379 						}
3380 					}
3381 					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3382 					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3383 						sctp_add_local_addr_restricted(stcb, tmp_sifa);
3384 					}
3385 				}
3386 			}
3387 		}
3388 		atomic_add_int(&sifa->refcount, 1);
3389 	}
3390 #endif
3391 	return (sifa);
3392 }
3393 
3394 
3395 
3396 /* tcb may be NULL */
3397 struct sctp_ifa *
sctp_source_address_selection(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,struct sctp_nets * net,int non_asoc_addr_ok,uint32_t vrf_id)3398 sctp_source_address_selection(struct sctp_inpcb *inp,
3399 			      struct sctp_tcb *stcb,
3400 			      sctp_route_t *ro,
3401 			      struct sctp_nets *net,
3402 			      int non_asoc_addr_ok, uint32_t vrf_id)
3403 {
3404 	struct sctp_ifa *answer;
3405 	uint8_t dest_is_priv, dest_is_loop;
3406 	sa_family_t fam;
3407 #ifdef INET
3408 	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3409 #endif
3410 #ifdef INET6
3411 	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3412 #endif
3413 
3414 	/**
3415 	 * Rules:
3416 	 * - Find the route if needed, cache if I can.
3417 	 * - Look at interface address in route, Is it in the bound list. If so we
3418 	 *   have the best source.
3419 	 * - If not we must rotate amongst the addresses.
3420 	 *
3421 	 * Cavets and issues
3422 	 *
3423 	 * Do we need to pay attention to scope. We can have a private address
3424 	 * or a global address we are sourcing or sending to. So if we draw
3425 	 * it out
3426 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3427 	 * For V4
3428 	 * ------------------------------------------
3429 	 *      source     *      dest  *  result
3430 	 * -----------------------------------------
3431 	 * <a>  Private    *    Global  *  NAT
3432 	 * -----------------------------------------
3433 	 * <b>  Private    *    Private *  No problem
3434 	 * -----------------------------------------
3435 	 * <c>  Global     *    Private *  Huh, How will this work?
3436 	 * -----------------------------------------
3437 	 * <d>  Global     *    Global  *  No Problem
3438 	 *------------------------------------------
3439 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3440 	 * For V6
3441 	 *------------------------------------------
3442 	 *      source     *      dest  *  result
3443 	 * -----------------------------------------
3444 	 * <a>  Linklocal  *    Global  *
3445 	 * -----------------------------------------
3446 	 * <b>  Linklocal  * Linklocal  *  No problem
3447 	 * -----------------------------------------
3448 	 * <c>  Global     * Linklocal  *  Huh, How will this work?
3449 	 * -----------------------------------------
3450 	 * <d>  Global     *    Global  *  No Problem
3451 	 *------------------------------------------
3452 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3453 	 *
3454 	 * And then we add to that what happens if there are multiple addresses
3455 	 * assigned to an interface. Remember the ifa on a ifn is a linked
3456 	 * list of addresses. So one interface can have more than one IP
3457 	 * address. What happens if we have both a private and a global
3458 	 * address? Do we then use context of destination to sort out which
3459 	 * one is best? And what about NAT's sending P->G may get you a NAT
3460 	 * translation, or should you select the G thats on the interface in
3461 	 * preference.
3462 	 *
3463 	 * Decisions:
3464 	 *
3465 	 * - count the number of addresses on the interface.
3466 	 * - if it is one, no problem except case <c>.
3467 	 *   For <a> we will assume a NAT out there.
3468 	 * - if there are more than one, then we need to worry about scope P
3469 	 *   or G. We should prefer G -> G and P -> P if possible.
3470 	 *   Then as a secondary fall back to mixed types G->P being a last
3471 	 *   ditch one.
3472 	 * - The above all works for bound all, but bound specific we need to
3473 	 *   use the same concept but instead only consider the bound
3474 	 *   addresses. If the bound set is NOT assigned to the interface then
3475 	 *   we must use rotation amongst the bound addresses..
3476 	 */
3477 #if defined(__FreeBSD__)
3478 	if (ro->ro_nh == NULL) {
3479 #else
3480 	if (ro->ro_rt == NULL) {
3481 #endif
3482 		/*
3483 		 * Need a route to cache.
3484 		 */
3485 		SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3486 	}
3487 #if defined(__FreeBSD__)
3488 	if (ro->ro_nh == NULL) {
3489 #else
3490 	if (ro->ro_rt == NULL) {
3491 #endif
3492 		return (NULL);
3493 	}
3494 #if defined(__Userspace_os_Windows)
3495 	/* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3496 	fam = (sa_family_t)ro->ro_dst.sa_family;
3497 #else
3498 	fam = ro->ro_dst.sa_family;
3499 #endif
3500 	dest_is_priv = dest_is_loop = 0;
3501 	/* Setup our scopes for the destination */
3502 	switch (fam) {
3503 #ifdef INET
3504 	case AF_INET:
3505 		/* Scope based on outbound address */
3506 		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3507 			dest_is_loop = 1;
3508 			if (net != NULL) {
3509 				/* mark it as local */
3510 				net->addr_is_local = 1;
3511 			}
3512 		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3513 			dest_is_priv = 1;
3514 		}
3515 		break;
3516 #endif
3517 #ifdef INET6
3518 	case AF_INET6:
3519 		/* Scope based on outbound address */
3520 #if defined(__Userspace_os_Windows)
3521 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3522 #else
3523 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3524 		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3525 #endif
3526 			/*
3527 			 * If the address is a loopback address, which
3528 			 * consists of "::1" OR "fe80::1%lo0", we are loopback
3529 			 * scope. But we don't use dest_is_priv (link local
3530 			 * addresses).
3531 			 */
3532 			dest_is_loop = 1;
3533 			if (net != NULL) {
3534 				/* mark it as local */
3535 				net->addr_is_local = 1;
3536 			}
3537 		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3538 			dest_is_priv = 1;
3539 		}
3540 		break;
3541 #endif
3542 	}
3543 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3544 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3545 	SCTP_IPI_ADDR_RLOCK();
3546 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3547 		/*
3548 		 * Bound all case
3549 		 */
3550 		answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3551 					      dest_is_priv, dest_is_loop,
3552 					      non_asoc_addr_ok, fam);
3553 		SCTP_IPI_ADDR_RUNLOCK();
3554 		return (answer);
3555 	}
3556 	/*
3557 	 * Subset bound case
3558 	 */
3559 	if (stcb) {
3560 		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3561 							vrf_id,	dest_is_priv,
3562 							dest_is_loop,
3563 							non_asoc_addr_ok, fam);
3564 	} else {
3565 		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3566 						       non_asoc_addr_ok,
3567 						       dest_is_priv,
3568 						       dest_is_loop, fam);
3569 	}
3570 	SCTP_IPI_ADDR_RUNLOCK();
3571 	return (answer);
3572 }
3573 
3574 static int
3575 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3576 {
3577 #if defined(__Userspace_os_Windows)
3578 	WSACMSGHDR cmh;
3579 #else
3580 	struct cmsghdr cmh;
3581 #endif
3582 	struct sctp_sndinfo sndinfo;
3583 	struct sctp_prinfo prinfo;
3584 	struct sctp_authinfo authinfo;
3585 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3586 	int found;
3587 
3588 	/*
3589 	 * Independent of how many mbufs, find the c_type inside the control
3590 	 * structure and copy out the data.
3591 	 */
3592 	found = 0;
3593 	tot_len = SCTP_BUF_LEN(control);
3594 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3595 		rem_len = tot_len - off;
3596 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3597 			/* There is not enough room for one more. */
3598 			return (found);
3599 		}
3600 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3601 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3602 			/* We dont't have a complete CMSG header. */
3603 			return (found);
3604 		}
3605 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3606 			/* We don't have the complete CMSG. */
3607 			return (found);
3608 		}
3609 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3610 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3611 		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3612 		    ((c_type == cmh.cmsg_type) ||
3613 		     ((c_type == SCTP_SNDRCV) &&
3614 		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
3615 		       (cmh.cmsg_type == SCTP_PRINFO) ||
3616 		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3617 			if (c_type == cmh.cmsg_type) {
3618 				if (cpsize > INT_MAX) {
3619 					return (found);
3620 				}
3621 				if (cmsg_data_len < (int)cpsize) {
3622 					return (found);
3623 				}
3624 				/* It is exactly what we want. Copy it out. */
3625 				m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3626 				return (1);
3627 			} else {
3628 				struct sctp_sndrcvinfo *sndrcvinfo;
3629 
3630 				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3631 				if (found == 0) {
3632 					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3633 						return (found);
3634 					}
3635 					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3636 				}
3637 				switch (cmh.cmsg_type) {
3638 				case SCTP_SNDINFO:
3639 					if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3640 						return (found);
3641 					}
3642 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3643 					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3644 					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3645 					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3646 					sndrcvinfo->sinfo_context = sndinfo.snd_context;
3647 					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3648 					break;
3649 				case SCTP_PRINFO:
3650 					if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3651 						return (found);
3652 					}
3653 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3654 					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3655 						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3656 					} else {
3657 						sndrcvinfo->sinfo_timetolive = 0;
3658 					}
3659 					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3660 					break;
3661 				case SCTP_AUTHINFO:
3662 					if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3663 						return (found);
3664 					}
3665 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3666 					sndrcvinfo->sinfo_keynumber_valid = 1;
3667 					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3668 					break;
3669 				default:
3670 					return (found);
3671 				}
3672 				found = 1;
3673 			}
3674 		}
3675 	}
3676 	return (found);
3677 }
3678 
3679 static int
3680 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3681 {
3682 #if defined(__Userspace_os_Windows)
3683 	WSACMSGHDR cmh;
3684 #else
3685 	struct cmsghdr cmh;
3686 #endif
3687 	struct sctp_initmsg initmsg;
3688 #ifdef INET
3689 	struct sockaddr_in sin;
3690 #endif
3691 #ifdef INET6
3692 	struct sockaddr_in6 sin6;
3693 #endif
3694 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3695 
3696 	tot_len = SCTP_BUF_LEN(control);
3697 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3698 		rem_len = tot_len - off;
3699 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3700 			/* There is not enough room for one more. */
3701 			*error = EINVAL;
3702 			return (1);
3703 		}
3704 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3705 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3706 			/* We dont't have a complete CMSG header. */
3707 			*error = EINVAL;
3708 			return (1);
3709 		}
3710 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3711 			/* We don't have the complete CMSG. */
3712 			*error = EINVAL;
3713 			return (1);
3714 		}
3715 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3716 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3717 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3718 			switch (cmh.cmsg_type) {
3719 			case SCTP_INIT:
3720 				if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3721 					*error = EINVAL;
3722 					return (1);
3723 				}
3724 				m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3725 				if (initmsg.sinit_max_attempts)
3726 					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3727 				if (initmsg.sinit_num_ostreams)
3728 					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3729 				if (initmsg.sinit_max_instreams)
3730 					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3731 				if (initmsg.sinit_max_init_timeo)
3732 					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3733 				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3734 					struct sctp_stream_out *tmp_str;
3735 					unsigned int i;
3736 #if defined(SCTP_DETAILED_STR_STATS)
3737 					int j;
3738 #endif
3739 
3740 					/* Default is NOT correct */
3741 					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3742 						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3743 					SCTP_TCB_UNLOCK(stcb);
3744 					SCTP_MALLOC(tmp_str,
3745 					            struct sctp_stream_out *,
3746 					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3747 					            SCTP_M_STRMO);
3748 					SCTP_TCB_LOCK(stcb);
3749 					if (tmp_str != NULL) {
3750 						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3751 						stcb->asoc.strmout = tmp_str;
3752 						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3753 					} else {
3754 						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3755 					}
3756 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3757 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3758 						stcb->asoc.strmout[i].chunks_on_queues = 0;
3759 						stcb->asoc.strmout[i].next_mid_ordered = 0;
3760 						stcb->asoc.strmout[i].next_mid_unordered = 0;
3761 #if defined(SCTP_DETAILED_STR_STATS)
3762 						for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3763 							stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3764 							stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3765 						}
3766 #else
3767 						stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3768 						stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3769 #endif
3770 						stcb->asoc.strmout[i].sid = i;
3771 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3772 						stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3773 						stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3774 					}
3775 				}
3776 				break;
3777 #ifdef INET
3778 			case SCTP_DSTADDRV4:
3779 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3780 					*error = EINVAL;
3781 					return (1);
3782 				}
3783 				memset(&sin, 0, sizeof(struct sockaddr_in));
3784 				sin.sin_family = AF_INET;
3785 #ifdef HAVE_SIN_LEN
3786 				sin.sin_len = sizeof(struct sockaddr_in);
3787 #endif
3788 				sin.sin_port = stcb->rport;
3789 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3790 				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3791 				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3792 				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3793 					*error = EINVAL;
3794 					return (1);
3795 				}
3796 				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3797 				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3798 					*error = ENOBUFS;
3799 					return (1);
3800 				}
3801 				break;
3802 #endif
3803 #ifdef INET6
3804 			case SCTP_DSTADDRV6:
3805 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3806 					*error = EINVAL;
3807 					return (1);
3808 				}
3809 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3810 				sin6.sin6_family = AF_INET6;
3811 #ifdef HAVE_SIN6_LEN
3812 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3813 #endif
3814 				sin6.sin6_port = stcb->rport;
3815 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3816 				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3817 				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3818 					*error = EINVAL;
3819 					return (1);
3820 				}
3821 #ifdef INET
3822 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3823 					in6_sin6_2_sin(&sin, &sin6);
3824 					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3825 					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3826 					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3827 						*error = EINVAL;
3828 						return (1);
3829 					}
3830 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3831 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3832 						*error = ENOBUFS;
3833 						return (1);
3834 					}
3835 				} else
3836 #endif
3837 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3838 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3839 						*error = ENOBUFS;
3840 						return (1);
3841 					}
3842 				break;
3843 #endif
3844 			default:
3845 				break;
3846 			}
3847 		}
3848 	}
3849 	return (0);
3850 }
3851 
3852 #if defined(INET) || defined(INET6)
3853 static struct sctp_tcb *
3854 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3855                            uint16_t port,
3856                            struct mbuf *control,
3857                            struct sctp_nets **net_p,
3858                            int *error)
3859 {
3860 #if defined(__Userspace_os_Windows)
3861 	WSACMSGHDR cmh;
3862 #else
3863 	struct cmsghdr cmh;
3864 #endif
3865 	struct sctp_tcb *stcb;
3866 	struct sockaddr *addr;
3867 #ifdef INET
3868 	struct sockaddr_in sin;
3869 #endif
3870 #ifdef INET6
3871 	struct sockaddr_in6 sin6;
3872 #endif
3873 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3874 
3875 	tot_len = SCTP_BUF_LEN(control);
3876 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3877 		rem_len = tot_len - off;
3878 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3879 			/* There is not enough room for one more. */
3880 			*error = EINVAL;
3881 			return (NULL);
3882 		}
3883 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3884 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3885 			/* We dont't have a complete CMSG header. */
3886 			*error = EINVAL;
3887 			return (NULL);
3888 		}
3889 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3890 			/* We don't have the complete CMSG. */
3891 			*error = EINVAL;
3892 			return (NULL);
3893 		}
3894 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3895 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3896 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3897 			switch (cmh.cmsg_type) {
3898 #ifdef INET
3899 			case SCTP_DSTADDRV4:
3900 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3901 					*error = EINVAL;
3902 					return (NULL);
3903 				}
3904 				memset(&sin, 0, sizeof(struct sockaddr_in));
3905 				sin.sin_family = AF_INET;
3906 #ifdef HAVE_SIN_LEN
3907 				sin.sin_len = sizeof(struct sockaddr_in);
3908 #endif
3909 				sin.sin_port = port;
3910 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3911 				addr = (struct sockaddr *)&sin;
3912 				break;
3913 #endif
3914 #ifdef INET6
3915 			case SCTP_DSTADDRV6:
3916 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3917 					*error = EINVAL;
3918 					return (NULL);
3919 				}
3920 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3921 				sin6.sin6_family = AF_INET6;
3922 #ifdef HAVE_SIN6_LEN
3923 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3924 #endif
3925 				sin6.sin6_port = port;
3926 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3927 #ifdef INET
3928 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3929 					in6_sin6_2_sin(&sin, &sin6);
3930 					addr = (struct sockaddr *)&sin;
3931 				} else
3932 #endif
3933 					addr = (struct sockaddr *)&sin6;
3934 				break;
3935 #endif
3936 			default:
3937 				addr = NULL;
3938 				break;
3939 			}
3940 			if (addr) {
3941 				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3942 				if (stcb != NULL) {
3943 					return (stcb);
3944 				}
3945 			}
3946 		}
3947 	}
3948 	return (NULL);
3949 }
3950 #endif
3951 
3952 static struct mbuf *
3953 sctp_add_cookie(struct mbuf *init, int init_offset,
3954     struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3955 {
3956 	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3957 	struct sctp_state_cookie *stc;
3958 	struct sctp_paramhdr *ph;
3959 	uint8_t *foo;
3960 	int sig_offset;
3961 	uint16_t cookie_sz;
3962 
3963 	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3964 				      sizeof(struct sctp_paramhdr)), 0,
3965 				     M_NOWAIT, 1, MT_DATA);
3966 	if (mret == NULL) {
3967 		return (NULL);
3968 	}
3969 	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3970 	if (copy_init == NULL) {
3971 		sctp_m_freem(mret);
3972 		return (NULL);
3973 	}
3974 #ifdef SCTP_MBUF_LOGGING
3975 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3976 		sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3977 	}
3978 #endif
3979 	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3980 	    M_NOWAIT);
3981 	if (copy_initack == NULL) {
3982 		sctp_m_freem(mret);
3983 		sctp_m_freem(copy_init);
3984 		return (NULL);
3985 	}
3986 #ifdef SCTP_MBUF_LOGGING
3987 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3988 		sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3989 	}
3990 #endif
3991 	/* easy side we just drop it on the end */
3992 	ph = mtod(mret, struct sctp_paramhdr *);
3993 	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3994 	    sizeof(struct sctp_paramhdr);
3995 	stc = (struct sctp_state_cookie *)((caddr_t)ph +
3996 	    sizeof(struct sctp_paramhdr));
3997 	ph->param_type = htons(SCTP_STATE_COOKIE);
3998 	ph->param_length = 0;	/* fill in at the end */
3999 	/* Fill in the stc cookie data */
4000 	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
4001 
4002 	/* tack the INIT and then the INIT-ACK onto the chain */
4003 	cookie_sz = 0;
4004 	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4005 		cookie_sz += SCTP_BUF_LEN(m_at);
4006 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4007 			SCTP_BUF_NEXT(m_at) = copy_init;
4008 			break;
4009 		}
4010 	}
4011 	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4012 		cookie_sz += SCTP_BUF_LEN(m_at);
4013 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4014 			SCTP_BUF_NEXT(m_at) = copy_initack;
4015 			break;
4016 		}
4017 	}
4018 	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4019 		cookie_sz += SCTP_BUF_LEN(m_at);
4020 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4021 			break;
4022 		}
4023 	}
4024 	sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4025 	if (sig == NULL) {
4026 		/* no space, so free the entire chain */
4027 		sctp_m_freem(mret);
4028 		return (NULL);
4029 	}
4030 	SCTP_BUF_LEN(sig) = 0;
4031 	SCTP_BUF_NEXT(m_at) = sig;
4032 	sig_offset = 0;
4033 	foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
4034 	memset(foo, 0, SCTP_SIGNATURE_SIZE);
4035 	*signature = foo;
4036 	SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
4037 	cookie_sz += SCTP_SIGNATURE_SIZE;
4038 	ph->param_length = htons(cookie_sz);
4039 	return (mret);
4040 }
4041 
4042 
4043 static uint8_t
4044 sctp_get_ect(struct sctp_tcb *stcb)
4045 {
4046 	if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4047 		return (SCTP_ECT0_BIT);
4048 	} else {
4049 		return (0);
4050 	}
4051 }
4052 
4053 #if defined(INET) || defined(INET6)
4054 static void
4055 sctp_handle_no_route(struct sctp_tcb *stcb,
4056                      struct sctp_nets *net,
4057                      int so_locked)
4058 {
4059 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4060 
4061 	if (net) {
4062 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4063 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4064 		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4065 			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4066 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4067 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4068 			                        stcb, 0,
4069 			                        (void *)net,
4070 			                        so_locked);
4071 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
4072 				net->dest_state &= ~SCTP_ADDR_PF;
4073 			}
4074 		}
4075 		if (stcb) {
4076 			if (net == stcb->asoc.primary_destination) {
4077 				/* need a new primary */
4078 				struct sctp_nets *alt;
4079 
4080 				alt = sctp_find_alternate_net(stcb, net, 0);
4081 				if (alt != net) {
4082 					if (stcb->asoc.alternate) {
4083 						sctp_free_remote_addr(stcb->asoc.alternate);
4084 					}
4085 					stcb->asoc.alternate = alt;
4086 					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4087 					if (net->ro._s_addr) {
4088 						sctp_free_ifa(net->ro._s_addr);
4089 						net->ro._s_addr = NULL;
4090 					}
4091 					net->src_addr_selected = 0;
4092 				}
4093 			}
4094 		}
4095 	}
4096 }
4097 #endif
4098 
4099 static int
4100 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4101     struct sctp_tcb *stcb,	/* may be NULL */
4102     struct sctp_nets *net,
4103     struct sockaddr *to,
4104     struct mbuf *m,
4105     uint32_t auth_offset,
4106     struct sctp_auth_chunk *auth,
4107     uint16_t auth_keyid,
4108     int nofragment_flag,
4109     int ecn_ok,
4110     int out_of_asoc_ok,
4111     uint16_t src_port,
4112     uint16_t dest_port,
4113     uint32_t v_tag,
4114     uint16_t port,
4115     union sctp_sockstore *over_addr,
4116 #if defined(__FreeBSD__)
4117     uint8_t mflowtype, uint32_t mflowid,
4118 #endif
4119 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4120     int so_locked SCTP_UNUSED
4121 #else
4122     int so_locked
4123 #endif
4124     )
4125 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4126 {
4127 	/**
4128 	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4129 	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4130 	 * - fill in the HMAC digest of any AUTH chunk in the packet.
4131 	 * - calculate and fill in the SCTP checksum.
4132 	 * - prepend an IP address header.
4133 	 * - if boundall use INADDR_ANY.
4134 	 * - if boundspecific do source address selection.
4135 	 * - set fragmentation option for ipV4.
4136 	 * - On return from IP output, check/adjust mtu size of output
4137 	 *   interface and smallest_mtu size as well.
4138 	 */
4139 	/* Will need ifdefs around this */
4140 #ifdef __Panda__
4141 	pakhandle_type o_pak;
4142 #endif
4143 	struct mbuf *newm;
4144 	struct sctphdr *sctphdr;
4145 	int packet_length;
4146 	int ret;
4147 #if defined(INET) || defined(INET6)
4148 	uint32_t vrf_id;
4149 #endif
4150 #if defined(INET) || defined(INET6)
4151 #if !defined(__Panda__)
4152 	struct mbuf *o_pak;
4153 #endif
4154 	sctp_route_t *ro = NULL;
4155 	struct udphdr *udp = NULL;
4156 #endif
4157 	uint8_t tos_value;
4158 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4159 	struct socket *so = NULL;
4160 #endif
4161 
4162 #if defined(__APPLE__)
4163 	if (so_locked) {
4164 		sctp_lock_assert(SCTP_INP_SO(inp));
4165 		SCTP_TCB_LOCK_ASSERT(stcb);
4166 	} else {
4167 		sctp_unlock_assert(SCTP_INP_SO(inp));
4168 	}
4169 #endif
4170 	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4171 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4172 		sctp_m_freem(m);
4173 		return (EFAULT);
4174 	}
4175 #if defined(INET) || defined(INET6)
4176 	if (stcb) {
4177 		vrf_id = stcb->asoc.vrf_id;
4178 	} else {
4179 		vrf_id = inp->def_vrf_id;
4180 	}
4181 #endif
4182 	/* fill in the HMAC digest for any AUTH chunk in the packet */
4183 	if ((auth != NULL) && (stcb != NULL)) {
4184 		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4185 	}
4186 
4187 	if (net) {
4188 		tos_value = net->dscp;
4189 	} else if (stcb) {
4190 		tos_value = stcb->asoc.default_dscp;
4191 	} else {
4192 		tos_value = inp->sctp_ep.default_dscp;
4193 	}
4194 
4195 	switch (to->sa_family) {
4196 #ifdef INET
4197 	case AF_INET:
4198 	{
4199 		struct ip *ip = NULL;
4200 		sctp_route_t iproute;
4201 		int len;
4202 
4203 		len = SCTP_MIN_V4_OVERHEAD;
4204 		if (port) {
4205 			len += sizeof(struct udphdr);
4206 		}
4207 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4208 		if (newm == NULL) {
4209 			sctp_m_freem(m);
4210 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4211 			return (ENOMEM);
4212 		}
4213 		SCTP_ALIGN_TO_END(newm, len);
4214 		SCTP_BUF_LEN(newm) = len;
4215 		SCTP_BUF_NEXT(newm) = m;
4216 		m = newm;
4217 #if defined(__FreeBSD__)
4218 		if (net != NULL) {
4219 			m->m_pkthdr.flowid = net->flowid;
4220 			M_HASHTYPE_SET(m, net->flowtype);
4221 		} else {
4222 			m->m_pkthdr.flowid = mflowid;
4223 			M_HASHTYPE_SET(m, mflowtype);
4224  		}
4225 #endif
4226 		packet_length = sctp_calculate_len(m);
4227 		ip = mtod(m, struct ip *);
4228 		ip->ip_v = IPVERSION;
4229 		ip->ip_hl = (sizeof(struct ip) >> 2);
4230 		if (tos_value == 0) {
4231 			/*
4232 			 * This means especially, that it is not set at the
4233 			 * SCTP layer. So use the value from the IP layer.
4234 			 */
4235 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4236 			tos_value = inp->ip_inp.inp.inp_ip_tos;
4237 #else
4238 			tos_value = inp->inp_ip_tos;
4239 #endif
4240 		}
4241 		tos_value &= 0xfc;
4242 		if (ecn_ok) {
4243 			tos_value |= sctp_get_ect(stcb);
4244 		}
4245 		if ((nofragment_flag) && (port == 0)) {
4246 #if defined(__FreeBSD__)
4247 #if __FreeBSD_version >= 1000000
4248 			ip->ip_off = htons(IP_DF);
4249 #else
4250 			ip->ip_off = IP_DF;
4251 #endif
4252 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace_os_Darwin)
4253 			ip->ip_off = IP_DF;
4254 #else
4255 			ip->ip_off = htons(IP_DF);
4256 #endif
4257 		} else {
4258 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4259 			ip->ip_off = htons(0);
4260 #else
4261 			ip->ip_off = 0;
4262 #endif
4263 		}
4264 #if defined(__FreeBSD__)
4265 		/* FreeBSD has a function for ip_id's */
4266 		ip_fillid(ip);
4267 #elif defined(__APPLE__)
4268 #if RANDOM_IP_ID
4269 		ip->ip_id = ip_randomid();
4270 #else
4271 		ip->ip_id = htons(ip_id++);
4272 #endif
4273 #elif defined(__Userspace__)
4274 		ip->ip_id = htons(SCTP_IP_ID(inp)++);
4275 #else
4276 		ip->ip_id = SCTP_IP_ID(inp)++;
4277 #endif
4278 
4279 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4280 		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4281 #else
4282 		ip->ip_ttl = inp->inp_ip_ttl;
4283 #endif
4284 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4285 		ip->ip_len = htons(packet_length);
4286 #else
4287 		ip->ip_len = packet_length;
4288 #endif
4289 		ip->ip_tos = tos_value;
4290 		if (port) {
4291 			ip->ip_p = IPPROTO_UDP;
4292 		} else {
4293 			ip->ip_p = IPPROTO_SCTP;
4294 		}
4295 		ip->ip_sum = 0;
4296 		if (net == NULL) {
4297 			ro = &iproute;
4298 			memset(&iproute, 0, sizeof(iproute));
4299 #ifdef HAVE_SA_LEN
4300 			memcpy(&ro->ro_dst, to, to->sa_len);
4301 #else
4302 			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4303 #endif
4304 		} else {
4305 			ro = (sctp_route_t *)&net->ro;
4306 		}
4307 		/* Now the address selection part */
4308 		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4309 
4310 		/* call the routine to select the src address */
4311 		if (net && out_of_asoc_ok == 0) {
4312 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4313 				sctp_free_ifa(net->ro._s_addr);
4314 				net->ro._s_addr = NULL;
4315 				net->src_addr_selected = 0;
4316 #if defined(__FreeBSD__)
4317 				RO_NHFREE(ro);
4318 #else
4319 				if (ro->ro_rt) {
4320 					RTFREE(ro->ro_rt);
4321 					ro->ro_rt = NULL;
4322 				}
4323 #endif
4324 			}
4325 			if (net->src_addr_selected == 0) {
4326 				/* Cache the source address */
4327 				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4328 										ro, net, 0,
4329 										vrf_id);
4330 				net->src_addr_selected = 1;
4331 			}
4332 			if (net->ro._s_addr == NULL) {
4333 				/* No route to host */
4334 				net->src_addr_selected = 0;
4335 				sctp_handle_no_route(stcb, net, so_locked);
4336 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4337 				sctp_m_freem(m);
4338 				return (EHOSTUNREACH);
4339 			}
4340 			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4341 		} else {
4342 			if (over_addr == NULL) {
4343 				struct sctp_ifa *_lsrc;
4344 
4345 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4346 				                                      net,
4347 				                                      out_of_asoc_ok,
4348 				                                      vrf_id);
4349 				if (_lsrc == NULL) {
4350 					sctp_handle_no_route(stcb, net, so_locked);
4351 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4352 					sctp_m_freem(m);
4353 					return (EHOSTUNREACH);
4354 				}
4355 				ip->ip_src = _lsrc->address.sin.sin_addr;
4356 				sctp_free_ifa(_lsrc);
4357 			} else {
4358 				ip->ip_src = over_addr->sin.sin_addr;
4359 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4360 			}
4361 		}
4362 		if (port) {
4363 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4364 				sctp_handle_no_route(stcb, net, so_locked);
4365 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4366 				sctp_m_freem(m);
4367 				return (EHOSTUNREACH);
4368 			}
4369 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4370 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4371 			udp->uh_dport = port;
4372 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4373 #if !defined(__Windows__) && !defined(__Userspace__)
4374 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4375 			if (V_udp_cksum) {
4376 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4377 			} else {
4378 				udp->uh_sum = 0;
4379 			}
4380 #else
4381 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4382 #endif
4383 #else
4384 			udp->uh_sum = 0;
4385 #endif
4386 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4387 		} else {
4388 			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4389 		}
4390 
4391 		sctphdr->src_port = src_port;
4392 		sctphdr->dest_port = dest_port;
4393 		sctphdr->v_tag = v_tag;
4394 		sctphdr->checksum = 0;
4395 
4396 		/*
4397 		 * If source address selection fails and we find no route
4398 		 * then the ip_output should fail as well with a
4399 		 * NO_ROUTE_TO_HOST type error. We probably should catch
4400 		 * that somewhere and abort the association right away
4401 		 * (assuming this is an INIT being sent).
4402 		 */
4403 #if defined(__FreeBSD__)
4404 		if (ro->ro_nh == NULL) {
4405 #else
4406 		if (ro->ro_rt == NULL) {
4407 #endif
4408 			/*
4409 			 * src addr selection failed to find a route (or
4410 			 * valid source addr), so we can't get there from
4411 			 * here (yet)!
4412 			 */
4413 			sctp_handle_no_route(stcb, net, so_locked);
4414 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4415 			sctp_m_freem(m);
4416 			return (EHOSTUNREACH);
4417 		}
4418 		if (ro != &iproute) {
4419 			memcpy(&iproute, ro, sizeof(*ro));
4420 		}
4421 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4422 			(uint32_t) (ntohl(ip->ip_src.s_addr)));
4423 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4424 			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
4425 #if defined(__FreeBSD__)
4426 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4427 			(void *)ro->ro_nh);
4428 #else
4429 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4430 			(void *)ro->ro_rt);
4431 #endif
4432 
4433 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4434 			/* failed to prepend data, give up */
4435 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4436 			sctp_m_freem(m);
4437 			return (ENOMEM);
4438 		}
4439 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4440 		if (port) {
4441 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4442 			SCTP_STAT_INCR(sctps_sendswcrc);
4443 #if !defined(__Windows__) && !defined(__Userspace__)
4444 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4445 			if (V_udp_cksum) {
4446 				SCTP_ENABLE_UDP_CSUM(o_pak);
4447 			}
4448 #else
4449 			SCTP_ENABLE_UDP_CSUM(o_pak);
4450 #endif
4451 #endif
4452 		} else {
4453 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4454 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4455 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4456 			SCTP_STAT_INCR(sctps_sendhwcrc);
4457 #else
4458 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4459 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4460 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4461 				SCTP_STAT_INCR(sctps_sendswcrc);
4462 			} else {
4463 				SCTP_STAT_INCR(sctps_sendhwcrc);
4464 			}
4465 #endif
4466 		}
4467 #ifdef SCTP_PACKET_LOGGING
4468 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4469 			sctp_packet_log(o_pak);
4470 #endif
4471 		/* send it out.  table id is taken from stcb */
4472 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4474 			so = SCTP_INP_SO(inp);
4475 			SCTP_SOCKET_UNLOCK(so, 0);
4476 		}
4477 #endif
4478 #if defined(__FreeBSD__)
4479 		SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4480 #endif
4481 		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4482 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4483 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4484 			atomic_add_int(&stcb->asoc.refcnt, 1);
4485 			SCTP_TCB_UNLOCK(stcb);
4486 			SCTP_SOCKET_LOCK(so, 0);
4487 			SCTP_TCB_LOCK(stcb);
4488 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4489 		}
4490 #endif
4491 #if defined(__FreeBSD__)
4492 		if (port) {
4493 			UDPSTAT_INC(udps_opackets);
4494 		}
4495 #endif
4496 		SCTP_STAT_INCR(sctps_sendpackets);
4497 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4498 		if (ret)
4499 			SCTP_STAT_INCR(sctps_senderrors);
4500 
4501 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4502 		if (net == NULL) {
4503 			/* free tempy routes */
4504 #if defined(__FreeBSD__)
4505 			RO_NHFREE(ro);
4506 #else
4507 			if (ro->ro_rt) {
4508 				RTFREE(ro->ro_rt);
4509 				ro->ro_rt = NULL;
4510 			}
4511 #endif
4512 		} else {
4513 #if defined(__FreeBSD__)
4514 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4515 #else
4516 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4517 #endif
4518 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4519 				uint32_t mtu;
4520 
4521 #if defined(__FreeBSD__)
4522 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4523 #else
4524 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4525 #endif
4526 				if (mtu > 0) {
4527 					if (net->port) {
4528 						mtu -= sizeof(struct udphdr);
4529 					}
4530 					if (mtu < net->mtu) {
4531 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4532 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4533 						}
4534 						net->mtu = mtu;
4535 					}
4536 				}
4537 #if defined(__FreeBSD__)
4538 			} else if (ro->ro_nh == NULL) {
4539 #else
4540 			} else if (ro->ro_rt == NULL) {
4541 #endif
4542 				/* route was freed */
4543 				if (net->ro._s_addr &&
4544 				    net->src_addr_selected) {
4545 					sctp_free_ifa(net->ro._s_addr);
4546 					net->ro._s_addr = NULL;
4547 				}
4548 				net->src_addr_selected = 0;
4549 			}
4550 		}
4551 		return (ret);
4552 	}
4553 #endif
4554 #ifdef INET6
4555 	case AF_INET6:
4556 	{
4557 		uint32_t flowlabel, flowinfo;
4558 		struct ip6_hdr *ip6h;
4559 		struct route_in6 ip6route;
4560 #if !(defined(__Panda__) || defined(__Userspace__))
4561 		struct ifnet *ifp;
4562 #endif
4563 		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4564 		int prev_scope = 0;
4565 #ifdef SCTP_EMBEDDED_V6_SCOPE
4566 		struct sockaddr_in6 lsa6_storage;
4567 		int error;
4568 #endif
4569 		u_short prev_port = 0;
4570 		int len;
4571 
4572 		if (net) {
4573 			flowlabel = net->flowlabel;
4574 		} else if (stcb) {
4575 			flowlabel = stcb->asoc.default_flowlabel;
4576 		} else {
4577 			flowlabel = inp->sctp_ep.default_flowlabel;
4578 		}
4579 		if (flowlabel == 0) {
4580 			/*
4581 			 * This means especially, that it is not set at the
4582 			 * SCTP layer. So use the value from the IP layer.
4583 			 */
4584 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4585 			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4586 #else
4587 			flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4588 #endif
4589 		}
4590 		flowlabel &= 0x000fffff;
4591 		len = SCTP_MIN_OVERHEAD;
4592 		if (port) {
4593 			len += sizeof(struct udphdr);
4594 		}
4595 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4596 		if (newm == NULL) {
4597 			sctp_m_freem(m);
4598 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4599 			return (ENOMEM);
4600 		}
4601 		SCTP_ALIGN_TO_END(newm, len);
4602 		SCTP_BUF_LEN(newm) = len;
4603 		SCTP_BUF_NEXT(newm) = m;
4604 		m = newm;
4605 #if defined(__FreeBSD__)
4606 		if (net != NULL) {
4607 			m->m_pkthdr.flowid = net->flowid;
4608 			M_HASHTYPE_SET(m, net->flowtype);
4609 		} else {
4610 			m->m_pkthdr.flowid = mflowid;
4611 			M_HASHTYPE_SET(m, mflowtype);
4612  		}
4613 #endif
4614 		packet_length = sctp_calculate_len(m);
4615 
4616 		ip6h = mtod(m, struct ip6_hdr *);
4617 		/* protect *sin6 from overwrite */
4618 		sin6 = (struct sockaddr_in6 *)to;
4619 		tmp = *sin6;
4620 		sin6 = &tmp;
4621 
4622 #ifdef SCTP_EMBEDDED_V6_SCOPE
4623 		/* KAME hack: embed scopeid */
4624 #if defined(__APPLE__)
4625 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4626 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4627 #else
4628 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4629 #endif
4630 #elif defined(SCTP_KAME)
4631 		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4632 #else
4633 		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4634 #endif
4635 		{
4636 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4637 			sctp_m_freem(m);
4638 			return (EINVAL);
4639 		}
4640 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4641 		if (net == NULL) {
4642 			memset(&ip6route, 0, sizeof(ip6route));
4643 			ro = (sctp_route_t *)&ip6route;
4644 #ifdef HAVE_SIN6_LEN
4645 			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4646 #else
4647 			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4648 #endif
4649 		} else {
4650 			ro = (sctp_route_t *)&net->ro;
4651 		}
4652 		/*
4653 		 * We assume here that inp_flow is in host byte order within
4654 		 * the TCB!
4655 		 */
4656 		if (tos_value == 0) {
4657 			/*
4658 			 * This means especially, that it is not set at the
4659 			 * SCTP layer. So use the value from the IP layer.
4660 			 */
4661 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4662 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4663 			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4664 #else
4665 			tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4666 #endif
4667 #endif
4668 		}
4669 		tos_value &= 0xfc;
4670 		if (ecn_ok) {
4671 			tos_value |= sctp_get_ect(stcb);
4672 		}
4673 		flowinfo = 0x06;
4674 		flowinfo <<= 8;
4675 		flowinfo |= tos_value;
4676 		flowinfo <<= 20;
4677 		flowinfo |= flowlabel;
4678 		ip6h->ip6_flow = htonl(flowinfo);
4679 		if (port) {
4680 			ip6h->ip6_nxt = IPPROTO_UDP;
4681 		} else {
4682 			ip6h->ip6_nxt = IPPROTO_SCTP;
4683 		}
4684 		ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4685 		ip6h->ip6_dst = sin6->sin6_addr;
4686 
4687 		/*
4688 		 * Add SRC address selection here: we can only reuse to a
4689 		 * limited degree the kame src-addr-sel, since we can try
4690 		 * their selection but it may not be bound.
4691 		 */
4692 		memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4693 		lsa6_tmp.sin6_family = AF_INET6;
4694 #ifdef HAVE_SIN6_LEN
4695 		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4696 #endif
4697 		lsa6 = &lsa6_tmp;
4698 		if (net && out_of_asoc_ok == 0) {
4699 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4700 				sctp_free_ifa(net->ro._s_addr);
4701 				net->ro._s_addr = NULL;
4702 				net->src_addr_selected = 0;
4703 #if defined(__FreeBSD__)
4704 				RO_NHFREE(ro);
4705 #else
4706 				if (ro->ro_rt) {
4707 					RTFREE(ro->ro_rt);
4708 					ro->ro_rt = NULL;
4709 				}
4710 #endif
4711 			}
4712 			if (net->src_addr_selected == 0) {
4713 #ifdef SCTP_EMBEDDED_V6_SCOPE
4714 				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4715 				/* KAME hack: embed scopeid */
4716 #if defined(__APPLE__)
4717 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4718 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4719 #else
4720 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4721 #endif
4722 #elif defined(SCTP_KAME)
4723 				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4724 #else
4725 				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4726 #endif
4727 				{
4728 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4729 					sctp_m_freem(m);
4730 					return (EINVAL);
4731 				}
4732 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4733 				/* Cache the source address */
4734 				net->ro._s_addr = sctp_source_address_selection(inp,
4735 										stcb,
4736 										ro,
4737 										net,
4738 										0,
4739 										vrf_id);
4740 #ifdef SCTP_EMBEDDED_V6_SCOPE
4741 #ifdef SCTP_KAME
4742 				(void)sa6_recoverscope(sin6);
4743 #else
4744 				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4745 #endif	/* SCTP_KAME */
4746 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4747 				net->src_addr_selected = 1;
4748 			}
4749 			if (net->ro._s_addr == NULL) {
4750 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4751 				net->src_addr_selected = 0;
4752 				sctp_handle_no_route(stcb, net, so_locked);
4753 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4754 				sctp_m_freem(m);
4755 				return (EHOSTUNREACH);
4756 			}
4757 			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4758 		} else {
4759 #ifdef SCTP_EMBEDDED_V6_SCOPE
4760 			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4761 			/* KAME hack: embed scopeid */
4762 #if defined(__APPLE__)
4763 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4764 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4765 #else
4766 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4767 #endif
4768 #elif defined(SCTP_KAME)
4769 			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4770 #else
4771 			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4772 #endif
4773 			  {
4774 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4775 				sctp_m_freem(m);
4776 				return (EINVAL);
4777 			  }
4778 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4779 			if (over_addr == NULL) {
4780 				struct sctp_ifa *_lsrc;
4781 
4782 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4783 				                                      net,
4784 				                                      out_of_asoc_ok,
4785 				                                      vrf_id);
4786 				if (_lsrc == NULL) {
4787 					sctp_handle_no_route(stcb, net, so_locked);
4788 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4789 					sctp_m_freem(m);
4790 					return (EHOSTUNREACH);
4791 				}
4792 				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4793 				sctp_free_ifa(_lsrc);
4794 			} else {
4795 				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4796 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4797 			}
4798 #ifdef SCTP_EMBEDDED_V6_SCOPE
4799 #ifdef SCTP_KAME
4800 			(void)sa6_recoverscope(sin6);
4801 #else
4802 			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4803 #endif	/* SCTP_KAME */
4804 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4805 		}
4806 		lsa6->sin6_port = inp->sctp_lport;
4807 
4808 #if defined(__FreeBSD__)
4809 		if (ro->ro_nh == NULL) {
4810 #else
4811 		if (ro->ro_rt == NULL) {
4812 #endif
4813 			/*
4814 			 * src addr selection failed to find a route (or
4815 			 * valid source addr), so we can't get there from
4816 			 * here!
4817 			 */
4818 			sctp_handle_no_route(stcb, net, so_locked);
4819 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4820 			sctp_m_freem(m);
4821 			return (EHOSTUNREACH);
4822 		}
4823 #ifndef SCOPEDROUTING
4824 #ifdef SCTP_EMBEDDED_V6_SCOPE
4825 		/*
4826 		 * XXX: sa6 may not have a valid sin6_scope_id in the
4827 		 * non-SCOPEDROUTING case.
4828 		 */
4829 		memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4830 		lsa6_storage.sin6_family = AF_INET6;
4831 #ifdef HAVE_SIN6_LEN
4832 		lsa6_storage.sin6_len = sizeof(lsa6_storage);
4833 #endif
4834 #ifdef SCTP_KAME
4835 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4836 		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4837 #else
4838 		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4839 		    NULL)) != 0) {
4840 #endif				/* SCTP_KAME */
4841 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4842 			sctp_m_freem(m);
4843 			return (error);
4844 		}
4845 		/* XXX */
4846 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4847 		lsa6_storage.sin6_port = inp->sctp_lport;
4848 		lsa6 = &lsa6_storage;
4849 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4850 #endif /* SCOPEDROUTING */
4851 		ip6h->ip6_src = lsa6->sin6_addr;
4852 
4853 		if (port) {
4854 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4855 				sctp_handle_no_route(stcb, net, so_locked);
4856 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4857 				sctp_m_freem(m);
4858 				return (EHOSTUNREACH);
4859 			}
4860 			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4861 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4862 			udp->uh_dport = port;
4863 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4864 			udp->uh_sum = 0;
4865 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4866 		} else {
4867 			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4868 		}
4869 
4870 		sctphdr->src_port = src_port;
4871 		sctphdr->dest_port = dest_port;
4872 		sctphdr->v_tag = v_tag;
4873 		sctphdr->checksum = 0;
4874 
4875 		/*
4876 		 * We set the hop limit now since there is a good chance
4877 		 * that our ro pointer is now filled
4878 		 */
4879 		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4880 #if !(defined(__Panda__) || defined(__Userspace__))
4881 		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4882 #endif
4883 
4884 #ifdef SCTP_DEBUG
4885 		/* Copy to be sure something bad is not happening */
4886 		sin6->sin6_addr = ip6h->ip6_dst;
4887 		lsa6->sin6_addr = ip6h->ip6_src;
4888 #endif
4889 
4890 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4891 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4892 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4893 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4894 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4895 		if (net) {
4896 			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4897 			/* preserve the port and scope for link local send */
4898 			prev_scope = sin6->sin6_scope_id;
4899 			prev_port = sin6->sin6_port;
4900 		}
4901 
4902 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4903 			/* failed to prepend data, give up */
4904 			sctp_m_freem(m);
4905 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4906 			return (ENOMEM);
4907 		}
4908 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4909 		if (port) {
4910 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4911 			SCTP_STAT_INCR(sctps_sendswcrc);
4912 #if defined(__Windows__)
4913 			udp->uh_sum = 0;
4914 #elif !defined(__Userspace__)
4915 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4916 				udp->uh_sum = 0xffff;
4917 			}
4918 #endif
4919 		} else {
4920 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4921 #if __FreeBSD_version < 900000
4922 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4923 			SCTP_STAT_INCR(sctps_sendswcrc);
4924 #else
4925 #if __FreeBSD_version > 901000
4926 			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4927 #else
4928 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4929 #endif
4930 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4931 			SCTP_STAT_INCR(sctps_sendhwcrc);
4932 #endif
4933 #else
4934 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4935 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4936 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4937 				SCTP_STAT_INCR(sctps_sendswcrc);
4938 			} else {
4939 				SCTP_STAT_INCR(sctps_sendhwcrc);
4940 			}
4941 #endif
4942 		}
4943 		/* send it out. table id is taken from stcb */
4944 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4945 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4946 			so = SCTP_INP_SO(inp);
4947 			SCTP_SOCKET_UNLOCK(so, 0);
4948 		}
4949 #endif
4950 #ifdef SCTP_PACKET_LOGGING
4951 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4952 			sctp_packet_log(o_pak);
4953 #endif
4954 #if !(defined(__Panda__) || defined(__Userspace__))
4955 #if defined(__FreeBSD__)
4956 		SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4957 #endif
4958 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4959 #else
4960 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4961 #endif
4962 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4963 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4964 			atomic_add_int(&stcb->asoc.refcnt, 1);
4965 			SCTP_TCB_UNLOCK(stcb);
4966 			SCTP_SOCKET_LOCK(so, 0);
4967 			SCTP_TCB_LOCK(stcb);
4968 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4969 		}
4970 #endif
4971 		if (net) {
4972 			/* for link local this must be done */
4973 			sin6->sin6_scope_id = prev_scope;
4974 			sin6->sin6_port = prev_port;
4975 		}
4976 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4977 #if defined(__FreeBSD__)
4978 		if (port) {
4979 			UDPSTAT_INC(udps_opackets);
4980 		}
4981 #endif
4982 		SCTP_STAT_INCR(sctps_sendpackets);
4983 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4984 		if (ret) {
4985 			SCTP_STAT_INCR(sctps_senderrors);
4986 		}
4987 		if (net == NULL) {
4988 			/* Now if we had a temp route free it */
4989 #if defined(__FreeBSD__)
4990 			RO_NHFREE(ro);
4991 #else
4992 			if (ro->ro_rt) {
4993 				RTFREE(ro->ro_rt);
4994 				ro->ro_rt = NULL;
4995 			}
4996 #endif
4997 		} else {
4998 			/* PMTU check versus smallest asoc MTU goes here */
4999 #if defined(__FreeBSD__)
5000 			if (ro->ro_nh == NULL) {
5001 #else
5002 			if (ro->ro_rt == NULL) {
5003 #endif
5004 				/* Route was freed */
5005 				if (net->ro._s_addr &&
5006 				    net->src_addr_selected) {
5007 					sctp_free_ifa(net->ro._s_addr);
5008 					net->ro._s_addr = NULL;
5009 				}
5010 				net->src_addr_selected = 0;
5011 			}
5012 #if defined(__FreeBSD__)
5013 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
5014 #else
5015 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
5016 #endif
5017 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
5018 				uint32_t mtu;
5019 
5020 #if defined(__FreeBSD__)
5021 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
5022 #else
5023 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
5024 #endif
5025 				if (mtu > 0) {
5026 					if (net->port) {
5027 						mtu -= sizeof(struct udphdr);
5028 					}
5029 					if (mtu < net->mtu) {
5030 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
5031 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
5032 						}
5033 						net->mtu = mtu;
5034 					}
5035 				}
5036 			}
5037 #if !defined(__Panda__) && !defined(__Userspace__)
5038 			else if (ifp) {
5039 #if defined(__Windows__)
5040 #define ND_IFINFO(ifp)	(ifp)
5041 #define linkmtu		if_mtu
5042 #endif
5043 				if (ND_IFINFO(ifp)->linkmtu &&
5044 				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
5045 					sctp_mtu_size_reset(inp,
5046 					    &stcb->asoc,
5047 					    ND_IFINFO(ifp)->linkmtu);
5048 				}
5049 			}
5050 #endif
5051 		}
5052 		return (ret);
5053 	}
5054 #endif
5055 #if defined(__Userspace__)
5056 	case AF_CONN:
5057 	{
5058 		char *buffer;
5059 		struct sockaddr_conn *sconn;
5060 		int len;
5061 
5062 		sconn = (struct sockaddr_conn *)to;
5063 		len = sizeof(struct sctphdr);
5064 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
5065 		if (newm == NULL) {
5066 			sctp_m_freem(m);
5067 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5068 			return (ENOMEM);
5069 		}
5070 		SCTP_ALIGN_TO_END(newm, len);
5071 		SCTP_BUF_LEN(newm) = len;
5072 		SCTP_BUF_NEXT(newm) = m;
5073 		m = newm;
5074 		packet_length = sctp_calculate_len(m);
5075 		sctphdr = mtod(m, struct sctphdr *);
5076 		sctphdr->src_port = src_port;
5077 		sctphdr->dest_port = dest_port;
5078 		sctphdr->v_tag = v_tag;
5079 		sctphdr->checksum = 0;
5080 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
5081 			sctphdr->checksum = sctp_calculate_cksum(m, 0);
5082 			SCTP_STAT_INCR(sctps_sendswcrc);
5083 		} else {
5084 			SCTP_STAT_INCR(sctps_sendhwcrc);
5085 		}
5086 		if (tos_value == 0) {
5087 			tos_value = inp->ip_inp.inp.inp_ip_tos;
5088 		}
5089 		tos_value &= 0xfc;
5090 		if (ecn_ok) {
5091 			tos_value |= sctp_get_ect(stcb);
5092 		}
5093 		/* Don't alloc/free for each packet */
5094 		if ((buffer = malloc(packet_length)) != NULL) {
5095 			m_copydata(m, 0, packet_length, buffer);
5096 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5097 			free(buffer);
5098 		} else {
5099 			ret = ENOMEM;
5100 		}
5101 		sctp_m_freem(m);
5102 		return (ret);
5103 	}
5104 #endif
5105 	default:
5106 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5107 		        ((struct sockaddr *)to)->sa_family);
5108 		sctp_m_freem(m);
5109 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5110 		return (EFAULT);
5111 	}
5112 }
5113 
5114 
5115 void
5116 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
5117 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5118     SCTP_UNUSED
5119 #endif
5120     )
5121 {
5122 	struct mbuf *m, *m_last;
5123 	struct sctp_nets *net;
5124 	struct sctp_init_chunk *init;
5125 	struct sctp_supported_addr_param *sup_addr;
5126 	struct sctp_adaptation_layer_indication *ali;
5127 	struct sctp_supported_chunk_types_param *pr_supported;
5128 	struct sctp_paramhdr *ph;
5129 	int cnt_inits_to = 0;
5130 	int error;
5131 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5132 
5133 #if defined(__APPLE__)
5134 	if (so_locked) {
5135 		sctp_lock_assert(SCTP_INP_SO(inp));
5136 	} else {
5137 		sctp_unlock_assert(SCTP_INP_SO(inp));
5138 	}
5139 #endif
5140 	/* INIT's always go to the primary (and usually ONLY address) */
5141 	net = stcb->asoc.primary_destination;
5142 	if (net == NULL) {
5143 		net = TAILQ_FIRST(&stcb->asoc.nets);
5144 		if (net == NULL) {
5145 			/* TSNH */
5146 			return;
5147 		}
5148 		/* we confirm any address we send an INIT to */
5149 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5150 		(void)sctp_set_primary_addr(stcb, NULL, net);
5151 	} else {
5152 		/* we confirm any address we send an INIT to */
5153 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5154 	}
5155 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5156 #ifdef INET6
5157 	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5158 		/*
5159 		 * special hook, if we are sending to link local it will not
5160 		 * show up in our private address count.
5161 		 */
5162 		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5163 			cnt_inits_to = 1;
5164 	}
5165 #endif
5166 	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5167 		/* This case should not happen */
5168 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5169 		return;
5170 	}
5171 	/* start the INIT timer */
5172 	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5173 
5174 	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5175 	if (m == NULL) {
5176 		/* No memory, INIT timer will re-attempt. */
5177 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5178 		return;
5179 	}
5180 	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5181 	padding_len = 0;
5182 	/* Now lets put the chunk header in place */
5183 	init = mtod(m, struct sctp_init_chunk *);
5184 	/* now the chunk header */
5185 	init->ch.chunk_type = SCTP_INITIATION;
5186 	init->ch.chunk_flags = 0;
5187 	/* fill in later from mbuf we build */
5188 	init->ch.chunk_length = 0;
5189 	/* place in my tag */
5190 	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5191 	/* set up some of the credits. */
5192 	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5193 	                              SCTP_MINIMAL_RWND));
5194 	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5195 	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5196 	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5197 
5198 	/* Adaptation layer indication parameter */
5199 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5200 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5201 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5202 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5203 		ali->ph.param_length = htons(parameter_len);
5204 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5205 		chunk_len += parameter_len;
5206 	}
5207 
5208 	/* ECN parameter */
5209 	if (stcb->asoc.ecn_supported == 1) {
5210 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5211 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5212 		ph->param_type = htons(SCTP_ECN_CAPABLE);
5213 		ph->param_length = htons(parameter_len);
5214 		chunk_len += parameter_len;
5215 	}
5216 
5217 	/* PR-SCTP supported parameter */
5218 	if (stcb->asoc.prsctp_supported == 1) {
5219 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5220 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5221 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5222 		ph->param_length = htons(parameter_len);
5223 		chunk_len += parameter_len;
5224 	}
5225 
5226 	/* Add NAT friendly parameter. */
5227 	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5228 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5229 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5230 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5231 		ph->param_length = htons(parameter_len);
5232 		chunk_len += parameter_len;
5233 	}
5234 
5235 	/* And now tell the peer which extensions we support */
5236 	num_ext = 0;
5237 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5238 	if (stcb->asoc.prsctp_supported == 1) {
5239 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5240 		if (stcb->asoc.idata_supported) {
5241 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5242 		}
5243 	}
5244 	if (stcb->asoc.auth_supported == 1) {
5245 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5246 	}
5247 	if (stcb->asoc.asconf_supported == 1) {
5248 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5249 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5250 	}
5251 	if (stcb->asoc.reconfig_supported == 1) {
5252 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5253 	}
5254 	if (stcb->asoc.idata_supported) {
5255 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5256 	}
5257 	if (stcb->asoc.nrsack_supported == 1) {
5258 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5259 	}
5260 	if (stcb->asoc.pktdrop_supported == 1) {
5261 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5262 	}
5263 	if (num_ext > 0) {
5264 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5265 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5266 		pr_supported->ph.param_length = htons(parameter_len);
5267 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5268 		chunk_len += parameter_len;
5269 	}
5270 	/* add authentication parameters */
5271 	if (stcb->asoc.auth_supported) {
5272 		/* attach RANDOM parameter, if available */
5273 		if (stcb->asoc.authinfo.random != NULL) {
5274 			struct sctp_auth_random *randp;
5275 
5276 			if (padding_len > 0) {
5277 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5278 				chunk_len += padding_len;
5279 				padding_len = 0;
5280 			}
5281 			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5282 			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5283 			/* random key already contains the header */
5284 			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5285 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5286 			chunk_len += parameter_len;
5287 		}
5288 		/* add HMAC_ALGO parameter */
5289 		if (stcb->asoc.local_hmacs != NULL) {
5290 			struct sctp_auth_hmac_algo *hmacs;
5291 
5292 			if (padding_len > 0) {
5293 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5294 				chunk_len += padding_len;
5295 				padding_len = 0;
5296 			}
5297 			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5298 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5299 			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5300 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5301 			hmacs->ph.param_length = htons(parameter_len);
5302 			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5303 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5304 			chunk_len += parameter_len;
5305 		}
5306 		/* add CHUNKS parameter */
5307 		if (stcb->asoc.local_auth_chunks != NULL) {
5308 			struct sctp_auth_chunk_list *chunks;
5309 
5310 			if (padding_len > 0) {
5311 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5312 				chunk_len += padding_len;
5313 				padding_len = 0;
5314 			}
5315 			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5316 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5317 			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5318 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5319 			chunks->ph.param_length = htons(parameter_len);
5320 			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5321 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5322 			chunk_len += parameter_len;
5323 		}
5324 	}
5325 
5326 	/* now any cookie time extensions */
5327 	if (stcb->asoc.cookie_preserve_req) {
5328 		struct sctp_cookie_perserve_param *cookie_preserve;
5329 
5330 		if (padding_len > 0) {
5331 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5332 			chunk_len += padding_len;
5333 			padding_len = 0;
5334 		}
5335 		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5336 		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5337 		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5338 		cookie_preserve->ph.param_length = htons(parameter_len);
5339 		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5340 		stcb->asoc.cookie_preserve_req = 0;
5341 		chunk_len += parameter_len;
5342 	}
5343 
5344 	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5345 		uint8_t i;
5346 
5347 		if (padding_len > 0) {
5348 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5349 			chunk_len += padding_len;
5350 			padding_len = 0;
5351 		}
5352 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5353 		if (stcb->asoc.scope.ipv4_addr_legal) {
5354 			parameter_len += (uint16_t)sizeof(uint16_t);
5355 		}
5356 		if (stcb->asoc.scope.ipv6_addr_legal) {
5357 			parameter_len += (uint16_t)sizeof(uint16_t);
5358 		}
5359 		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5360 		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5361 		sup_addr->ph.param_length = htons(parameter_len);
5362 		i = 0;
5363 		if (stcb->asoc.scope.ipv4_addr_legal) {
5364 			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5365 		}
5366 		if (stcb->asoc.scope.ipv6_addr_legal) {
5367 			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5368 		}
5369 		padding_len = 4 - 2 * i;
5370 		chunk_len += parameter_len;
5371 	}
5372 
5373 	SCTP_BUF_LEN(m) = chunk_len;
5374 	/* now the addresses */
5375 	/* To optimize this we could put the scoping stuff
5376 	 * into a structure and remove the individual uint8's from
5377 	 * the assoc structure. Then we could just sifa in the
5378 	 * address within the stcb. But for now this is a quick
5379 	 * hack to get the address stuff teased apart.
5380 	 */
5381 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5382 	                                    m, cnt_inits_to,
5383 	                                    &padding_len, &chunk_len);
5384 
5385 	init->ch.chunk_length = htons(chunk_len);
5386 	if (padding_len > 0) {
5387 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5388 			sctp_m_freem(m);
5389 			return;
5390 		}
5391 	}
5392 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5393 	if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5394 	                                        (struct sockaddr *)&net->ro._l_addr,
5395 	                                        m, 0, NULL, 0, 0, 0, 0,
5396 	                                        inp->sctp_lport, stcb->rport, htonl(0),
5397 	                                        net->port, NULL,
5398 #if defined(__FreeBSD__)
5399 	                                        0, 0,
5400 #endif
5401 	                                        so_locked))) {
5402 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
5403 		if (error == ENOBUFS) {
5404 			stcb->asoc.ifp_had_enobuf = 1;
5405 			SCTP_STAT_INCR(sctps_lowlevelerr);
5406 		}
5407 	} else {
5408 		stcb->asoc.ifp_had_enobuf = 0;
5409 	}
5410 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5411 	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5412 }
5413 
5414 struct mbuf *
5415 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5416                                       int param_offset, int *abort_processing,
5417                                       struct sctp_chunkhdr *cp,
5418                                       int *nat_friendly,
5419                                       int *cookie_found)
5420 {
5421 	/*
5422 	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5423 	 * being equal to the beginning of the params i.e. (iphlen +
5424 	 * sizeof(struct sctp_init_msg) parse through the parameters to the
5425 	 * end of the mbuf verifying that all parameters are known.
5426 	 *
5427 	 * For unknown parameters build and return a mbuf with
5428 	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5429 	 * processing this chunk stop, and set *abort_processing to 1.
5430 	 *
5431 	 * By having param_offset be pre-set to where parameters begin it is
5432 	 * hoped that this routine may be reused in the future by new
5433 	 * features.
5434 	 */
5435 	struct sctp_paramhdr *phdr, params;
5436 
5437 	struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
5438 	int at, limit, pad_needed;
5439 	uint16_t ptype, plen, padded_size;
5440 
5441 	*abort_processing = 0;
5442 	if (cookie_found != NULL) {
5443 		*cookie_found = 0;
5444 	}
5445 	mat = in_initpkt;
5446 	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5447 	at = param_offset;
5448 	op_err = NULL;
5449 	op_err_last = NULL;
5450 	pad_needed = 0;
5451 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5452 	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5453 	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5454 		ptype = ntohs(phdr->param_type);
5455 		plen = ntohs(phdr->param_length);
5456 		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5457 			/* wacked parameter */
5458 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5459 			goto invalid_size;
5460 		}
5461 		limit -= SCTP_SIZE32(plen);
5462 		/*-
5463 		 * All parameters for all chunks that we know/understand are
5464 		 * listed here. We process them other places and make
5465 		 * appropriate stop actions per the upper bits. However this
5466 		 * is the generic routine processor's can call to get back
5467 		 * an operr.. to either incorporate (init-ack) or send.
5468 		 */
5469 		padded_size = SCTP_SIZE32(plen);
5470 		switch (ptype) {
5471 			/* Param's with variable size */
5472 		case SCTP_HEARTBEAT_INFO:
5473 		case SCTP_UNRECOG_PARAM:
5474 		case SCTP_ERROR_CAUSE_IND:
5475 			/* ok skip fwd */
5476 			at += padded_size;
5477 			break;
5478 		case SCTP_STATE_COOKIE:
5479 			if (cookie_found != NULL) {
5480 				*cookie_found = 1;
5481 			}
5482 			at += padded_size;
5483 			break;
5484 			/* Param's with variable size within a range */
5485 		case SCTP_CHUNK_LIST:
5486 		case SCTP_SUPPORTED_CHUNK_EXT:
5487 			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5488 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5489 				goto invalid_size;
5490 			}
5491 			at += padded_size;
5492 			break;
5493 		case SCTP_SUPPORTED_ADDRTYPE:
5494 			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5495 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5496 				goto invalid_size;
5497 			}
5498 			at += padded_size;
5499 			break;
5500 		case SCTP_RANDOM:
5501 			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5502 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5503 				goto invalid_size;
5504 			}
5505 			at += padded_size;
5506 			break;
5507 		case SCTP_SET_PRIM_ADDR:
5508 		case SCTP_DEL_IP_ADDRESS:
5509 		case SCTP_ADD_IP_ADDRESS:
5510 			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5511 			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5512 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5513 				goto invalid_size;
5514 			}
5515 			at += padded_size;
5516 			break;
5517 			/* Param's with a fixed size */
5518 		case SCTP_IPV4_ADDRESS:
5519 			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5520 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5521 				goto invalid_size;
5522 			}
5523 			at += padded_size;
5524 			break;
5525 		case SCTP_IPV6_ADDRESS:
5526 			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5527 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5528 				goto invalid_size;
5529 			}
5530 			at += padded_size;
5531 			break;
5532 		case SCTP_COOKIE_PRESERVE:
5533 			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5534 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5535 				goto invalid_size;
5536 			}
5537 			at += padded_size;
5538 			break;
5539 		case SCTP_HAS_NAT_SUPPORT:
5540 			*nat_friendly = 1;
5541 			/* fall through */
5542 		case SCTP_PRSCTP_SUPPORTED:
5543 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5544 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5545 				goto invalid_size;
5546 			}
5547 			at += padded_size;
5548 			break;
5549 		case SCTP_ECN_CAPABLE:
5550 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5551 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5552 				goto invalid_size;
5553 			}
5554 			at += padded_size;
5555 			break;
5556 		case SCTP_ULP_ADAPTATION:
5557 			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5558 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5559 				goto invalid_size;
5560 			}
5561 			at += padded_size;
5562 			break;
5563 		case SCTP_SUCCESS_REPORT:
5564 			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5565 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5566 				goto invalid_size;
5567 			}
5568 			at += padded_size;
5569 			break;
5570 		case SCTP_HOSTNAME_ADDRESS:
5571 		{
5572 			/* Hostname parameters are deprecated. */
5573 			struct sctp_gen_error_cause *cause;
5574 			int l_len;
5575 
5576 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5577 			*abort_processing = 1;
5578 			sctp_m_freem(op_err);
5579 			op_err = NULL;
5580 			op_err_last = NULL;
5581 #ifdef INET6
5582 			l_len = SCTP_MIN_OVERHEAD;
5583 #else
5584 			l_len = SCTP_MIN_V4_OVERHEAD;
5585 #endif
5586 			l_len += sizeof(struct sctp_chunkhdr);
5587 			l_len += sizeof(struct sctp_gen_error_cause);
5588 			op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5589 			if (op_err != NULL) {
5590 				/*
5591 				 * Pre-reserve space for IP, SCTP, and
5592 				 * chunk header.
5593 				 */
5594 #ifdef INET6
5595 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5596 #else
5597 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5598 #endif
5599 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5600 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5601 				SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5602 				cause = mtod(op_err, struct sctp_gen_error_cause *);
5603 				cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5604 				cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5605 				SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5606 				if (SCTP_BUF_NEXT(op_err) == NULL) {
5607 					sctp_m_freem(op_err);
5608 					op_err = NULL;
5609 					op_err_last = NULL;
5610 				}
5611 			}
5612 			return (op_err);
5613 			break;
5614 		}
5615 		default:
5616 			/*
5617 			 * we do not recognize the parameter figure out what
5618 			 * we do.
5619 			 */
5620 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5621 			if ((ptype & 0x4000) == 0x4000) {
5622 				/* Report bit is set?? */
5623 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5624 				if (op_err == NULL) {
5625 					int l_len;
5626 					/* Ok need to try to get an mbuf */
5627 #ifdef INET6
5628 					l_len = SCTP_MIN_OVERHEAD;
5629 #else
5630 					l_len = SCTP_MIN_V4_OVERHEAD;
5631 #endif
5632 					l_len += sizeof(struct sctp_chunkhdr);
5633 					l_len += sizeof(struct sctp_paramhdr);
5634 					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5635 					if (op_err) {
5636 						SCTP_BUF_LEN(op_err) = 0;
5637 #ifdef INET6
5638 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5639 #else
5640 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5641 #endif
5642 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5643 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5644 						op_err_last = op_err;
5645 					}
5646 				}
5647 				if (op_err != NULL) {
5648 					/* If we have space */
5649 					struct sctp_paramhdr *param;
5650 
5651 					if (pad_needed > 0) {
5652 						op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5653 					}
5654 					if (op_err_last == NULL) {
5655 						sctp_m_freem(op_err);
5656 						op_err = NULL;
5657 						op_err_last = NULL;
5658 						goto more_processing;
5659 					}
5660 					if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5661 						m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5662 						if (m_tmp == NULL) {
5663 							sctp_m_freem(op_err);
5664 							op_err = NULL;
5665 							op_err_last = NULL;
5666 							goto more_processing;
5667 						}
5668 						SCTP_BUF_LEN(m_tmp) = 0;
5669 						SCTP_BUF_NEXT(m_tmp) = NULL;
5670 						SCTP_BUF_NEXT(op_err_last) = m_tmp;
5671 						op_err_last = m_tmp;
5672 					}
5673 					param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last));
5674 					param->param_type = htons(SCTP_UNRECOG_PARAM);
5675 					param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5676 					SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5677 					SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5678 					if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5679 						sctp_m_freem(op_err);
5680 						op_err = NULL;
5681 						op_err_last = NULL;
5682 						goto more_processing;
5683 					} else {
5684 						while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5685 							op_err_last = SCTP_BUF_NEXT(op_err_last);
5686 						}
5687 					}
5688 					if (plen % 4 != 0) {
5689 						pad_needed = 4 - (plen % 4);
5690 					} else {
5691 						pad_needed = 0;
5692 					}
5693 				}
5694 			}
5695 		more_processing:
5696 			if ((ptype & 0x8000) == 0x0000) {
5697 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5698 				return (op_err);
5699 			} else {
5700 				/* skip this chunk and continue processing */
5701 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5702 				at += SCTP_SIZE32(plen);
5703 			}
5704 			break;
5705 
5706 		}
5707 		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5708 	}
5709 	return (op_err);
5710  invalid_size:
5711 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5712 	*abort_processing = 1;
5713 	sctp_m_freem(op_err);
5714 	op_err = NULL;
5715 	op_err_last = NULL;
5716 	if (phdr != NULL) {
5717 		struct sctp_paramhdr *param;
5718 		int l_len;
5719 #ifdef INET6
5720 		l_len = SCTP_MIN_OVERHEAD;
5721 #else
5722 		l_len = SCTP_MIN_V4_OVERHEAD;
5723 #endif
5724 		l_len += sizeof(struct sctp_chunkhdr);
5725 		l_len += (2 * sizeof(struct sctp_paramhdr));
5726 		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5727 		if (op_err) {
5728 			SCTP_BUF_LEN(op_err) = 0;
5729 #ifdef INET6
5730 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5731 #else
5732 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5733 #endif
5734 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5735 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5736 			SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5737 			param = mtod(op_err, struct sctp_paramhdr *);
5738 			param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5739 			param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5740 			param++;
5741 			param->param_type = htons(ptype);
5742 			param->param_length = htons(plen);
5743 		}
5744 	}
5745 	return (op_err);
5746 }
5747 
5748 static int
5749 sctp_are_there_new_addresses(struct sctp_association *asoc,
5750     struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5751 {
5752 	/*
5753 	 * Given a INIT packet, look through the packet to verify that there
5754 	 * are NO new addresses. As we go through the parameters add reports
5755 	 * of any un-understood parameters that require an error.  Also we
5756 	 * must return (1) to drop the packet if we see a un-understood
5757 	 * parameter that tells us to drop the chunk.
5758 	 */
5759 	struct sockaddr *sa_touse;
5760 	struct sockaddr *sa;
5761 	struct sctp_paramhdr *phdr, params;
5762 	uint16_t ptype, plen;
5763 	uint8_t fnd;
5764 	struct sctp_nets *net;
5765 	int check_src;
5766 #ifdef INET
5767 	struct sockaddr_in sin4, *sa4;
5768 #endif
5769 #ifdef INET6
5770 	struct sockaddr_in6 sin6, *sa6;
5771 #endif
5772 #if defined(__Userspace__)
5773 	struct sockaddr_conn *sac;
5774 #endif
5775 
5776 #ifdef INET
5777 	memset(&sin4, 0, sizeof(sin4));
5778 	sin4.sin_family = AF_INET;
5779 #ifdef HAVE_SIN_LEN
5780 	sin4.sin_len = sizeof(sin4);
5781 #endif
5782 #endif
5783 #ifdef INET6
5784 	memset(&sin6, 0, sizeof(sin6));
5785 	sin6.sin6_family = AF_INET6;
5786 #ifdef HAVE_SIN6_LEN
5787 	sin6.sin6_len = sizeof(sin6);
5788 #endif
5789 #endif
5790 	/* First what about the src address of the pkt ? */
5791 	check_src = 0;
5792 	switch (src->sa_family) {
5793 #ifdef INET
5794 	case AF_INET:
5795 		if (asoc->scope.ipv4_addr_legal) {
5796 			check_src = 1;
5797 		}
5798 		break;
5799 #endif
5800 #ifdef INET6
5801 	case AF_INET6:
5802 		if (asoc->scope.ipv6_addr_legal) {
5803 			check_src = 1;
5804 		}
5805 		break;
5806 #endif
5807 #if defined(__Userspace__)
5808 	case AF_CONN:
5809 		if (asoc->scope.conn_addr_legal) {
5810 			check_src = 1;
5811 		}
5812 		break;
5813 #endif
5814 	default:
5815 		/* TSNH */
5816 		break;
5817 	}
5818 	if (check_src) {
5819 		fnd = 0;
5820 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5821 			sa = (struct sockaddr *)&net->ro._l_addr;
5822 			if (sa->sa_family == src->sa_family) {
5823 #ifdef INET
5824 				if (sa->sa_family == AF_INET) {
5825 					struct sockaddr_in *src4;
5826 
5827 					sa4 = (struct sockaddr_in *)sa;
5828 					src4 = (struct sockaddr_in *)src;
5829 					if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5830 						fnd = 1;
5831 						break;
5832 					}
5833 				}
5834 #endif
5835 #ifdef INET6
5836 				if (sa->sa_family == AF_INET6) {
5837 					struct sockaddr_in6 *src6;
5838 
5839 					sa6 = (struct sockaddr_in6 *)sa;
5840 					src6 = (struct sockaddr_in6 *)src;
5841 					if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5842 						fnd = 1;
5843 						break;
5844 					}
5845 				}
5846 #endif
5847 #if defined(__Userspace__)
5848 				if (sa->sa_family == AF_CONN) {
5849 					struct sockaddr_conn *srcc;
5850 
5851 					sac = (struct sockaddr_conn *)sa;
5852 					srcc = (struct sockaddr_conn *)src;
5853 					if (sac->sconn_addr == srcc->sconn_addr) {
5854 						fnd = 1;
5855 						break;
5856 					}
5857 				}
5858 #endif
5859 			}
5860 		}
5861 		if (fnd == 0) {
5862 			/* New address added! no need to look further. */
5863 			return (1);
5864 		}
5865 	}
5866 	/* Ok so far lets munge through the rest of the packet */
5867 	offset += sizeof(struct sctp_init_chunk);
5868 	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5869 	while (phdr) {
5870 		sa_touse = NULL;
5871 		ptype = ntohs(phdr->param_type);
5872 		plen = ntohs(phdr->param_length);
5873 		switch (ptype) {
5874 #ifdef INET
5875 		case SCTP_IPV4_ADDRESS:
5876 		{
5877 			struct sctp_ipv4addr_param *p4, p4_buf;
5878 
5879 			if (plen != sizeof(struct sctp_ipv4addr_param)) {
5880 				return (1);
5881 			}
5882 			phdr = sctp_get_next_param(in_initpkt, offset,
5883 			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5884 			if (phdr == NULL) {
5885 				return (1);
5886 			}
5887 			if (asoc->scope.ipv4_addr_legal) {
5888 				p4 = (struct sctp_ipv4addr_param *)phdr;
5889 				sin4.sin_addr.s_addr = p4->addr;
5890 				sa_touse = (struct sockaddr *)&sin4;
5891 			}
5892 			break;
5893 		}
5894 #endif
5895 #ifdef INET6
5896 		case SCTP_IPV6_ADDRESS:
5897 		{
5898 			struct sctp_ipv6addr_param *p6, p6_buf;
5899 
5900 			if (plen != sizeof(struct sctp_ipv6addr_param)) {
5901 				return (1);
5902 			}
5903 			phdr = sctp_get_next_param(in_initpkt, offset,
5904 			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5905 			if (phdr == NULL) {
5906 				return (1);
5907 			}
5908 			if (asoc->scope.ipv6_addr_legal) {
5909 				p6 = (struct sctp_ipv6addr_param *)phdr;
5910 				memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5911 				       sizeof(p6->addr));
5912 				sa_touse = (struct sockaddr *)&sin6;
5913 			}
5914 			break;
5915 		}
5916 #endif
5917 		default:
5918 			sa_touse = NULL;
5919 			break;
5920 		}
5921 		if (sa_touse) {
5922 			/* ok, sa_touse points to one to check */
5923 			fnd = 0;
5924 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5925 				sa = (struct sockaddr *)&net->ro._l_addr;
5926 				if (sa->sa_family != sa_touse->sa_family) {
5927 					continue;
5928 				}
5929 #ifdef INET
5930 				if (sa->sa_family == AF_INET) {
5931 					sa4 = (struct sockaddr_in *)sa;
5932 					if (sa4->sin_addr.s_addr ==
5933 					    sin4.sin_addr.s_addr) {
5934 						fnd = 1;
5935 						break;
5936 					}
5937 				}
5938 #endif
5939 #ifdef INET6
5940 				if (sa->sa_family == AF_INET6) {
5941 					sa6 = (struct sockaddr_in6 *)sa;
5942 					if (SCTP6_ARE_ADDR_EQUAL(
5943 					    sa6, &sin6)) {
5944 						fnd = 1;
5945 						break;
5946 					}
5947 				}
5948 #endif
5949 			}
5950 			if (!fnd) {
5951 				/* New addr added! no need to look further */
5952 				return (1);
5953 			}
5954 		}
5955 		offset += SCTP_SIZE32(plen);
5956 		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5957 	}
5958 	return (0);
5959 }
5960 
5961 /*
5962  * Given a MBUF chain that was sent into us containing an INIT. Build a
5963  * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5964  * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5965  * message (i.e. the struct sctp_init_msg).
5966  */
5967 void
5968 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5969                        struct sctp_nets *src_net, struct mbuf *init_pkt,
5970                        int iphlen, int offset,
5971                        struct sockaddr *src, struct sockaddr *dst,
5972                        struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5973 #if defined(__FreeBSD__)
5974 		       uint8_t mflowtype, uint32_t mflowid,
5975 #endif
5976                        uint32_t vrf_id, uint16_t port)
5977 {
5978 	struct sctp_association *asoc;
5979 	struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5980 	struct sctp_init_ack_chunk *initack;
5981 	struct sctp_adaptation_layer_indication *ali;
5982 	struct sctp_supported_chunk_types_param *pr_supported;
5983 	struct sctp_paramhdr *ph;
5984 	union sctp_sockstore *over_addr;
5985 	struct sctp_scoping scp;
5986 	struct timeval now;
5987 #ifdef INET
5988 	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5989 	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5990 	struct sockaddr_in *sin;
5991 #endif
5992 #ifdef INET6
5993 	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5994 	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5995 	struct sockaddr_in6 *sin6;
5996 #endif
5997 #if defined(__Userspace__)
5998 	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5999 	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
6000 	struct sockaddr_conn *sconn;
6001 #endif
6002 	struct sockaddr *to;
6003 	struct sctp_state_cookie stc;
6004 	struct sctp_nets *net = NULL;
6005 	uint8_t *signature = NULL;
6006 	int cnt_inits_to = 0;
6007 	uint16_t his_limit, i_want;
6008 	int abort_flag;
6009 	int nat_friendly = 0;
6010 	int error;
6011 	struct socket *so;
6012 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
6013 
6014 	if (stcb) {
6015 		asoc = &stcb->asoc;
6016 	} else {
6017 		asoc = NULL;
6018 	}
6019 	if ((asoc != NULL) &&
6020 	    (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
6021 		if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
6022 			/*
6023 			 * new addresses, out of here in non-cookie-wait states
6024 			 *
6025 			 * Send an ABORT, without the new address error cause.
6026 			 * This looks no different than if no listener
6027 			 * was present.
6028 			 */
6029 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6030 			                             "Address added");
6031 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
6032 #if defined(__FreeBSD__)
6033 			                mflowtype, mflowid, inp->fibnum,
6034 #endif
6035 			                vrf_id, port);
6036 			return;
6037 		}
6038 		if (src_net != NULL && (src_net->port != port)) {
6039 			/*
6040 			 * change of remote encapsulation port, out of here in
6041 			 * non-cookie-wait states
6042 			 *
6043 			 * Send an ABORT, without an specific error cause.
6044 			 * This looks no different than if no listener
6045 			 * was present.
6046 			 */
6047 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6048 			                             "Remote encapsulation port changed");
6049 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
6050 #if defined(__FreeBSD__)
6051 			                mflowtype, mflowid, inp->fibnum,
6052 #endif
6053 			                vrf_id, port);
6054 			return;
6055 		}
6056 	}
6057 	abort_flag = 0;
6058 	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
6059 	                                               (offset + sizeof(struct sctp_init_chunk)),
6060 	                                               &abort_flag,
6061 	                                               (struct sctp_chunkhdr *)init_chk,
6062 	                                               &nat_friendly, NULL);
6063 	if (abort_flag) {
6064 	do_a_abort:
6065 		if (op_err == NULL) {
6066 			char msg[SCTP_DIAG_INFO_LEN];
6067 
6068 			SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
6069 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6070 			                             msg);
6071 		}
6072 		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
6073 				init_chk->init.initiate_tag, op_err,
6074 #if defined(__FreeBSD__)
6075 		                mflowtype, mflowid, inp->fibnum,
6076 #endif
6077 		                vrf_id, port);
6078 		return;
6079 	}
6080 	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
6081 	if (m == NULL) {
6082 		/* No memory, INIT timer will re-attempt. */
6083 		sctp_m_freem(op_err);
6084 		return;
6085 	}
6086 	chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
6087 	padding_len = 0;
6088 
6089 	/*
6090 	 * We might not overwrite the identification[] completely and on
6091 	 * some platforms time_entered will contain some padding.
6092 	 * Therefore zero out the cookie to avoid putting
6093 	 * uninitialized memory on the wire.
6094 	 */
6095 	memset(&stc, 0, sizeof(struct sctp_state_cookie));
6096 
6097 	/* the time I built cookie */
6098 	(void)SCTP_GETTIME_TIMEVAL(&now);
6099 	stc.time_entered.tv_sec = now.tv_sec;
6100 	stc.time_entered.tv_usec = now.tv_usec;
6101 
6102 	/* populate any tie tags */
6103 	if (asoc != NULL) {
6104 		/* unlock before tag selections */
6105 		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
6106 		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
6107 		stc.cookie_life = asoc->cookie_life;
6108 		net = asoc->primary_destination;
6109 	} else {
6110 		stc.tie_tag_my_vtag = 0;
6111 		stc.tie_tag_peer_vtag = 0;
6112 		/* life I will award this cookie */
6113 		stc.cookie_life = inp->sctp_ep.def_cookie_life;
6114 	}
6115 
6116 	/* copy in the ports for later check */
6117 	stc.myport = sh->dest_port;
6118 	stc.peerport = sh->src_port;
6119 
6120 	/*
6121 	 * If we wanted to honor cookie life extensions, we would add to
6122 	 * stc.cookie_life. For now we should NOT honor any extension
6123 	 */
6124 	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
6125 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6126 		stc.ipv6_addr_legal = 1;
6127 		if (SCTP_IPV6_V6ONLY(inp)) {
6128 			stc.ipv4_addr_legal = 0;
6129 		} else {
6130 			stc.ipv4_addr_legal = 1;
6131 		}
6132 #if defined(__Userspace__)
6133 		stc.conn_addr_legal = 0;
6134 #endif
6135 	} else {
6136 		stc.ipv6_addr_legal = 0;
6137 #if defined(__Userspace__)
6138 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6139 			stc.conn_addr_legal = 1;
6140 			stc.ipv4_addr_legal = 0;
6141 		} else {
6142 			stc.conn_addr_legal = 0;
6143 			stc.ipv4_addr_legal = 1;
6144 		}
6145 #else
6146 		stc.ipv4_addr_legal = 1;
6147 #endif
6148 	}
6149 	stc.ipv4_scope = 0;
6150 	if (net == NULL) {
6151 		to = src;
6152 		switch (dst->sa_family) {
6153 #ifdef INET
6154 		case AF_INET:
6155 		{
6156 			/* lookup address */
6157 			stc.address[0] = src4->sin_addr.s_addr;
6158 			stc.address[1] = 0;
6159 			stc.address[2] = 0;
6160 			stc.address[3] = 0;
6161 			stc.addr_type = SCTP_IPV4_ADDRESS;
6162 			/* local from address */
6163 			stc.laddress[0] = dst4->sin_addr.s_addr;
6164 			stc.laddress[1] = 0;
6165 			stc.laddress[2] = 0;
6166 			stc.laddress[3] = 0;
6167 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6168 			/* scope_id is only for v6 */
6169 			stc.scope_id = 0;
6170 			if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
6171 			    (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
6172 				stc.ipv4_scope = 1;
6173 			}
6174 			/* Must use the address in this case */
6175 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6176 				stc.loopback_scope = 1;
6177 				stc.ipv4_scope = 1;
6178 				stc.site_scope = 1;
6179 				stc.local_scope = 0;
6180 			}
6181 			break;
6182 		}
6183 #endif
6184 #ifdef INET6
6185 		case AF_INET6:
6186 		{
6187 			stc.addr_type = SCTP_IPV6_ADDRESS;
6188 			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6189 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
6190 			stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
6191 #else
6192 			stc.scope_id = 0;
6193 #endif
6194 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6195 				stc.loopback_scope = 1;
6196 				stc.local_scope = 0;
6197 				stc.site_scope = 1;
6198 				stc.ipv4_scope = 1;
6199 			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
6200 			           IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
6201 				/*
6202 				 * If the new destination or source is a
6203 				 * LINK_LOCAL we must have common both site and
6204 				 * local scope. Don't set local scope though
6205 				 * since we must depend on the source to be
6206 				 * added implicitly. We cannot assure just
6207 				 * because we share one link that all links are
6208 				 * common.
6209 				 */
6210 #if defined(__APPLE__)
6211 				/* Mac OS X currently doesn't have in6_getscope() */
6212 				stc.scope_id = src6->sin6_addr.s6_addr16[1];
6213 #endif
6214 				stc.local_scope = 0;
6215 				stc.site_scope = 1;
6216 				stc.ipv4_scope = 1;
6217 				/*
6218 				 * we start counting for the private address
6219 				 * stuff at 1. since the link local we
6220 				 * source from won't show up in our scoped
6221 				 * count.
6222 				 */
6223 				cnt_inits_to = 1;
6224 				/* pull out the scope_id from incoming pkt */
6225 			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
6226 			           IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
6227 				/*
6228 				 * If the new destination or source is
6229 				 * SITE_LOCAL then we must have site scope in
6230 				 * common.
6231 				 */
6232 				stc.site_scope = 1;
6233 			}
6234 			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6235 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6236 			break;
6237 		}
6238 #endif
6239 #if defined(__Userspace__)
6240 		case AF_CONN:
6241 		{
6242 			/* lookup address */
6243 			stc.address[0] = 0;
6244 			stc.address[1] = 0;
6245 			stc.address[2] = 0;
6246 			stc.address[3] = 0;
6247 			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6248 			stc.addr_type = SCTP_CONN_ADDRESS;
6249 			/* local from address */
6250 			stc.laddress[0] = 0;
6251 			stc.laddress[1] = 0;
6252 			stc.laddress[2] = 0;
6253 			stc.laddress[3] = 0;
6254 			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6255 			stc.laddr_type = SCTP_CONN_ADDRESS;
6256 			/* scope_id is only for v6 */
6257 			stc.scope_id = 0;
6258 			break;
6259 		}
6260 #endif
6261 		default:
6262 			/* TSNH */
6263 			goto do_a_abort;
6264 			break;
6265 		}
6266 	} else {
6267 		/* set the scope per the existing tcb */
6268 
6269 #ifdef INET6
6270 		struct sctp_nets *lnet;
6271 #endif
6272 
6273 		stc.loopback_scope = asoc->scope.loopback_scope;
6274 		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6275 		stc.site_scope = asoc->scope.site_scope;
6276 		stc.local_scope = asoc->scope.local_scope;
6277 #ifdef INET6
6278 		/* Why do we not consider IPv4 LL addresses? */
6279 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6280 			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6281 				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6282 					/*
6283 					 * if we have a LL address, start
6284 					 * counting at 1.
6285 					 */
6286 					cnt_inits_to = 1;
6287 				}
6288 			}
6289 		}
6290 #endif
6291 		/* use the net pointer */
6292 		to = (struct sockaddr *)&net->ro._l_addr;
6293 		switch (to->sa_family) {
6294 #ifdef INET
6295 		case AF_INET:
6296 			sin = (struct sockaddr_in *)to;
6297 			stc.address[0] = sin->sin_addr.s_addr;
6298 			stc.address[1] = 0;
6299 			stc.address[2] = 0;
6300 			stc.address[3] = 0;
6301 			stc.addr_type = SCTP_IPV4_ADDRESS;
6302 			if (net->src_addr_selected == 0) {
6303 				/*
6304 				 * strange case here, the INIT should have
6305 				 * did the selection.
6306 				 */
6307 				net->ro._s_addr = sctp_source_address_selection(inp,
6308 										stcb, (sctp_route_t *)&net->ro,
6309 										net, 0, vrf_id);
6310 				if (net->ro._s_addr == NULL) {
6311 					sctp_m_freem(op_err);
6312 					sctp_m_freem(m);
6313 					return;
6314 				}
6315 
6316 				net->src_addr_selected = 1;
6317 
6318 			}
6319 			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6320 			stc.laddress[1] = 0;
6321 			stc.laddress[2] = 0;
6322 			stc.laddress[3] = 0;
6323 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6324 			/* scope_id is only for v6 */
6325 			stc.scope_id = 0;
6326 			break;
6327 #endif
6328 #ifdef INET6
6329 		case AF_INET6:
6330 			sin6 = (struct sockaddr_in6 *)to;
6331 			memcpy(&stc.address, &sin6->sin6_addr,
6332 			       sizeof(struct in6_addr));
6333 			stc.addr_type = SCTP_IPV6_ADDRESS;
6334 			stc.scope_id = sin6->sin6_scope_id;
6335 			if (net->src_addr_selected == 0) {
6336 				/*
6337 				 * strange case here, the INIT should have
6338 				 * done the selection.
6339 				 */
6340 				net->ro._s_addr = sctp_source_address_selection(inp,
6341 										stcb, (sctp_route_t *)&net->ro,
6342 										net, 0, vrf_id);
6343 				if (net->ro._s_addr == NULL) {
6344 					sctp_m_freem(op_err);
6345 					sctp_m_freem(m);
6346 					return;
6347 				}
6348 
6349 				net->src_addr_selected = 1;
6350 			}
6351 			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6352 			       sizeof(struct in6_addr));
6353 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6354 			break;
6355 #endif
6356 #if defined(__Userspace__)
6357 		case AF_CONN:
6358 			sconn = (struct sockaddr_conn *)to;
6359 			stc.address[0] = 0;
6360 			stc.address[1] = 0;
6361 			stc.address[2] = 0;
6362 			stc.address[3] = 0;
6363 			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6364 			stc.addr_type = SCTP_CONN_ADDRESS;
6365 			stc.laddress[0] = 0;
6366 			stc.laddress[1] = 0;
6367 			stc.laddress[2] = 0;
6368 			stc.laddress[3] = 0;
6369 			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6370 			stc.laddr_type = SCTP_CONN_ADDRESS;
6371 			stc.scope_id = 0;
6372 			break;
6373 #endif
6374 		}
6375 	}
6376 	/* Now lets put the SCTP header in place */
6377 	initack = mtod(m, struct sctp_init_ack_chunk *);
6378 	/* Save it off for quick ref */
6379 	stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
6380 	/* who are we */
6381 	memcpy(stc.identification, SCTP_VERSION_STRING,
6382 	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6383 	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6384 	/* now the chunk header */
6385 	initack->ch.chunk_type = SCTP_INITIATION_ACK;
6386 	initack->ch.chunk_flags = 0;
6387 	/* fill in later from mbuf we build */
6388 	initack->ch.chunk_length = 0;
6389 	/* place in my tag */
6390 	if ((asoc != NULL) &&
6391 	    ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
6392 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
6393 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
6394 		/* re-use the v-tags and init-seq here */
6395 		initack->init.initiate_tag = htonl(asoc->my_vtag);
6396 		initack->init.initial_tsn = htonl(asoc->init_seq_number);
6397 	} else {
6398 		uint32_t vtag, itsn;
6399 
6400 		if (asoc) {
6401 			atomic_add_int(&asoc->refcnt, 1);
6402 			SCTP_TCB_UNLOCK(stcb);
6403 		new_tag:
6404 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6405 			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
6406 				/* Got a duplicate vtag on some guy behind a nat
6407 				 * make sure we don't use it.
6408 				 */
6409 				goto new_tag;
6410 			}
6411 			initack->init.initiate_tag = htonl(vtag);
6412 			/* get a TSN to use too */
6413 			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6414 			initack->init.initial_tsn = htonl(itsn);
6415 			SCTP_TCB_LOCK(stcb);
6416 			atomic_add_int(&asoc->refcnt, -1);
6417 		} else {
6418 			SCTP_INP_INCR_REF(inp);
6419 			SCTP_INP_RUNLOCK(inp);
6420 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6421 			initack->init.initiate_tag = htonl(vtag);
6422 			/* get a TSN to use too */
6423 			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6424 			SCTP_INP_RLOCK(inp);
6425 			SCTP_INP_DECR_REF(inp);
6426 		}
6427 	}
6428 	/* save away my tag to */
6429 	stc.my_vtag = initack->init.initiate_tag;
6430 
6431 	/* set up some of the credits. */
6432 	so = inp->sctp_socket;
6433 	if (so == NULL) {
6434 		/* memory problem */
6435 		sctp_m_freem(op_err);
6436 		sctp_m_freem(m);
6437 		return;
6438 	} else {
6439 		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6440 	}
6441 	/* set what I want */
6442 	his_limit = ntohs(init_chk->init.num_inbound_streams);
6443 	/* choose what I want */
6444 	if (asoc != NULL) {
6445 		if (asoc->streamoutcnt > asoc->pre_open_streams) {
6446 			i_want = asoc->streamoutcnt;
6447 		} else {
6448 			i_want = asoc->pre_open_streams;
6449 		}
6450 	} else {
6451 		i_want = inp->sctp_ep.pre_open_stream_count;
6452 	}
6453 	if (his_limit < i_want) {
6454 		/* I Want more :< */
6455 		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6456 	} else {
6457 		/* I can have what I want :> */
6458 		initack->init.num_outbound_streams = htons(i_want);
6459 	}
6460 	/* tell him his limit. */
6461 	initack->init.num_inbound_streams =
6462 		htons(inp->sctp_ep.max_open_streams_intome);
6463 
6464 	/* adaptation layer indication parameter */
6465 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6466 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6467 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6468 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6469 		ali->ph.param_length = htons(parameter_len);
6470 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6471 		chunk_len += parameter_len;
6472 	}
6473 
6474 	/* ECN parameter */
6475 	if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6476 	    ((asoc == NULL) && (inp->ecn_supported == 1))) {
6477 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6478 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6479 		ph->param_type = htons(SCTP_ECN_CAPABLE);
6480 		ph->param_length = htons(parameter_len);
6481 		chunk_len += parameter_len;
6482 	}
6483 
6484 	/* PR-SCTP supported parameter */
6485 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6486 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6487 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6488 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6489 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6490 		ph->param_length = htons(parameter_len);
6491 		chunk_len += parameter_len;
6492 	}
6493 
6494 	/* Add NAT friendly parameter */
6495 	if (nat_friendly) {
6496 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6497 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6498 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6499 		ph->param_length = htons(parameter_len);
6500 		chunk_len += parameter_len;
6501 	}
6502 
6503 	/* And now tell the peer which extensions we support */
6504 	num_ext = 0;
6505 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6506 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6507 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6508 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6509 		if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6510 		    ((asoc == NULL) && (inp->idata_supported == 1))) {
6511 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
6512 		}
6513 	}
6514 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6515 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6516 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6517 	}
6518 	if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6519 	    ((asoc == NULL) && (inp->asconf_supported == 1))) {
6520 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6521 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6522 	}
6523 	if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6524 	    ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6525 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6526 	}
6527 	if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6528 	    ((asoc == NULL) && (inp->idata_supported == 1))) {
6529 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
6530 	}
6531 	if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6532 	    ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6533 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6534 	}
6535 	if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6536 	    ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6537 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6538 	}
6539 	if (num_ext > 0) {
6540 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6541 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6542 		pr_supported->ph.param_length = htons(parameter_len);
6543 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6544 		chunk_len += parameter_len;
6545 	}
6546 
6547 	/* add authentication parameters */
6548 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6549 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6550 		struct sctp_auth_random *randp;
6551 		struct sctp_auth_hmac_algo *hmacs;
6552 		struct sctp_auth_chunk_list *chunks;
6553 
6554 		if (padding_len > 0) {
6555 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6556 			chunk_len += padding_len;
6557 			padding_len = 0;
6558 		}
6559 		/* generate and add RANDOM parameter */
6560 		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6561 		parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6562 		                SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6563 		randp->ph.param_type = htons(SCTP_RANDOM);
6564 		randp->ph.param_length = htons(parameter_len);
6565 		SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6566 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6567 		chunk_len += parameter_len;
6568 
6569 		if (padding_len > 0) {
6570 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6571 			chunk_len += padding_len;
6572 			padding_len = 0;
6573 		}
6574 		/* add HMAC_ALGO parameter */
6575 		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6576 		parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6577 		                sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6578 		                                        (uint8_t *)hmacs->hmac_ids);
6579 		hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6580 		hmacs->ph.param_length = htons(parameter_len);
6581 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6582 		chunk_len += parameter_len;
6583 
6584 		if (padding_len > 0) {
6585 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6586 			chunk_len += padding_len;
6587 			padding_len = 0;
6588 		}
6589 		/* add CHUNKS parameter */
6590 		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6591 		parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6592 		                sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6593 		                                           chunks->chunk_types);
6594 		chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6595 		chunks->ph.param_length = htons(parameter_len);
6596 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6597 		chunk_len += parameter_len;
6598 	}
6599 	SCTP_BUF_LEN(m) = chunk_len;
6600 	m_last = m;
6601 	/* now the addresses */
6602 	/* To optimize this we could put the scoping stuff
6603 	 * into a structure and remove the individual uint8's from
6604 	 * the stc structure. Then we could just sifa in the
6605 	 * address within the stc.. but for now this is a quick
6606 	 * hack to get the address stuff teased apart.
6607 	 */
6608 	scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6609 	scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6610 #if defined(__Userspace__)
6611 	scp.conn_addr_legal = stc.conn_addr_legal;
6612 #endif
6613 	scp.loopback_scope = stc.loopback_scope;
6614 	scp.ipv4_local_scope = stc.ipv4_scope;
6615 	scp.local_scope = stc.local_scope;
6616 	scp.site_scope = stc.site_scope;
6617 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6618 	                                    cnt_inits_to,
6619 	                                    &padding_len, &chunk_len);
6620 	/* padding_len can only be positive, if no addresses have been added */
6621 	if (padding_len > 0) {
6622 		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6623 		chunk_len += padding_len;
6624 		SCTP_BUF_LEN(m) += padding_len;
6625 		padding_len = 0;
6626 	}
6627 
6628 	/* tack on the operational error if present */
6629 	if (op_err) {
6630 		parameter_len = 0;
6631 		for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6632 			parameter_len += SCTP_BUF_LEN(m_tmp);
6633 		}
6634 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6635 		SCTP_BUF_NEXT(m_last) = op_err;
6636 		while (SCTP_BUF_NEXT(m_last) != NULL) {
6637 			m_last = SCTP_BUF_NEXT(m_last);
6638 		}
6639 		chunk_len += parameter_len;
6640 	}
6641 	if (padding_len > 0) {
6642 		m_last = sctp_add_pad_tombuf(m_last, padding_len);
6643 		if (m_last == NULL) {
6644 			/* Houston we have a problem, no space */
6645 			sctp_m_freem(m);
6646 			return;
6647 		}
6648 		chunk_len += padding_len;
6649 		padding_len = 0;
6650 	}
6651 	/* Now we must build a cookie */
6652 	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6653 	if (m_cookie == NULL) {
6654 		/* memory problem */
6655 		sctp_m_freem(m);
6656 		return;
6657 	}
6658 	/* Now append the cookie to the end and update the space/size */
6659 	SCTP_BUF_NEXT(m_last) = m_cookie;
6660 	parameter_len = 0;
6661 	for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6662 		parameter_len += SCTP_BUF_LEN(m_tmp);
6663 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6664 			m_last = m_tmp;
6665 		}
6666 	}
6667 	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6668 	chunk_len += parameter_len;
6669 
6670 	/* Place in the size, but we don't include
6671 	 * the last pad (if any) in the INIT-ACK.
6672 	 */
6673 	initack->ch.chunk_length = htons(chunk_len);
6674 
6675 	/* Time to sign the cookie, we don't sign over the cookie
6676 	 * signature though thus we set trailer.
6677 	 */
6678 	(void)sctp_hmac_m(SCTP_HMAC,
6679 			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6680 			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6681 			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6682 	/*
6683 	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6684 	 * here since the timer will drive a retranmission.
6685 	 */
6686 	if (padding_len > 0) {
6687 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6688 			sctp_m_freem(m);
6689 			return;
6690 		}
6691 	}
6692 	if (stc.loopback_scope) {
6693 		over_addr = (union sctp_sockstore *)dst;
6694 	} else {
6695 		over_addr = NULL;
6696 	}
6697 
6698 	if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6699 	                                        0, 0,
6700 	                                        inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6701 	                                        port, over_addr,
6702 #if defined(__FreeBSD__)
6703 	                                        mflowtype, mflowid,
6704 #endif
6705 	                                        SCTP_SO_NOT_LOCKED))) {
6706 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6707 		if (error == ENOBUFS) {
6708 			if (asoc != NULL) {
6709 				asoc->ifp_had_enobuf = 1;
6710 			}
6711 			SCTP_STAT_INCR(sctps_lowlevelerr);
6712 		}
6713 	} else {
6714 		if (asoc != NULL) {
6715 			asoc->ifp_had_enobuf = 0;
6716 		}
6717 	}
6718 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6719 }
6720 
6721 
6722 static void
6723 sctp_prune_prsctp(struct sctp_tcb *stcb,
6724     struct sctp_association *asoc,
6725     struct sctp_sndrcvinfo *srcv,
6726     int dataout)
6727 {
6728 	int freed_spc = 0;
6729 	struct sctp_tmit_chunk *chk, *nchk;
6730 
6731 	SCTP_TCB_LOCK_ASSERT(stcb);
6732 	if ((asoc->prsctp_supported) &&
6733 	    (asoc->sent_queue_cnt_removeable > 0)) {
6734 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6735 			/*
6736 			 * Look for chunks marked with the PR_SCTP flag AND
6737 			 * the buffer space flag. If the one being sent is
6738 			 * equal or greater priority then purge the old one
6739 			 * and free some space.
6740 			 */
6741 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6742 				/*
6743 				 * This one is PR-SCTP AND buffer space
6744 				 * limited type
6745 				 */
6746 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6747 					/*
6748 					 * Lower numbers equates to higher
6749 					 * priority. So if the one we are
6750 					 * looking at has a larger priority,
6751 					 * we want to drop the data and NOT
6752 					 * retransmit it.
6753 					 */
6754 					if (chk->data) {
6755 						/*
6756 						 * We release the book_size
6757 						 * if the mbuf is here
6758 						 */
6759 						int ret_spc;
6760 						uint8_t sent;
6761 
6762 						if (chk->sent > SCTP_DATAGRAM_UNSENT)
6763 							sent = 1;
6764 						else
6765 							sent = 0;
6766 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6767 						    sent,
6768 						    SCTP_SO_LOCKED);
6769 						freed_spc += ret_spc;
6770 						if (freed_spc >= dataout) {
6771 							return;
6772 						}
6773 					}	/* if chunk was present */
6774 				}	/* if of sufficient priority */
6775 			}	/* if chunk has enabled */
6776 		}		/* tailqforeach */
6777 
6778 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6779 			/* Here we must move to the sent queue and mark */
6780 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6781 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6782 					if (chk->data) {
6783 						/*
6784 						 * We release the book_size
6785 						 * if the mbuf is here
6786 						 */
6787 						int ret_spc;
6788 
6789 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6790 						    0, SCTP_SO_LOCKED);
6791 
6792 						freed_spc += ret_spc;
6793 						if (freed_spc >= dataout) {
6794 							return;
6795 						}
6796 					}	/* end if chk->data */
6797 				}	/* end if right class */
6798 			}	/* end if chk pr-sctp */
6799 		}		/* tailqforeachsafe (chk) */
6800 	}			/* if enabled in asoc */
6801 }
6802 
6803 int
6804 sctp_get_frag_point(struct sctp_tcb *stcb,
6805     struct sctp_association *asoc)
6806 {
6807 	int siz, ovh;
6808 
6809 	/*
6810 	 * For endpoints that have both v6 and v4 addresses we must reserve
6811 	 * room for the ipv6 header, for those that are only dealing with V4
6812 	 * we use a larger frag point.
6813 	 */
6814 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6815 		ovh = SCTP_MIN_OVERHEAD;
6816 	} else {
6817 #if defined(__Userspace__)
6818 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6819 			ovh = sizeof(struct sctphdr);
6820 		} else {
6821 			ovh = SCTP_MIN_V4_OVERHEAD;
6822 		}
6823 #else
6824 		ovh = SCTP_MIN_V4_OVERHEAD;
6825 #endif
6826 	}
6827 	ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6828 	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6829 		siz = asoc->smallest_mtu - ovh;
6830 	else
6831 		siz = (stcb->asoc.sctp_frag_point - ovh);
6832 	/*
6833 	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6834 	 */
6835 	/* A data chunk MUST fit in a cluster */
6836 	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6837 	/* } */
6838 
6839 	/* adjust for an AUTH chunk if DATA requires auth */
6840 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6841 		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6842 
6843 	if (siz % 4) {
6844 		/* make it an even word boundary please */
6845 		siz -= (siz % 4);
6846 	}
6847 	return (siz);
6848 }
6849 
6850 static void
6851 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6852 {
6853 	/*
6854 	 * We assume that the user wants PR_SCTP_TTL if the user
6855 	 * provides a positive lifetime but does not specify any
6856 	 * PR_SCTP policy.
6857 	 */
6858 	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6859 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6860 	} else if (sp->timetolive > 0) {
6861 		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6862 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6863 	} else {
6864 		return;
6865 	}
6866 	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6867 	case CHUNK_FLAGS_PR_SCTP_BUF:
6868 		/*
6869 		 * Time to live is a priority stored in tv_sec when
6870 		 * doing the buffer drop thing.
6871 		 */
6872 		sp->ts.tv_sec = sp->timetolive;
6873 		sp->ts.tv_usec = 0;
6874 		break;
6875 	case CHUNK_FLAGS_PR_SCTP_TTL:
6876 	{
6877 		struct timeval tv;
6878 		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6879 		tv.tv_sec = sp->timetolive / 1000;
6880 		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6881 		/* TODO sctp_constants.h needs alternative time macros when
6882 		 *  _KERNEL is undefined.
6883 		 */
6884 #ifndef __FreeBSD__
6885 		timeradd(&sp->ts, &tv, &sp->ts);
6886 #else
6887 		timevaladd(&sp->ts, &tv);
6888 #endif
6889 	}
6890 		break;
6891 	case CHUNK_FLAGS_PR_SCTP_RTX:
6892 		/*
6893 		 * Time to live is a the number or retransmissions
6894 		 * stored in tv_sec.
6895 		 */
6896 		sp->ts.tv_sec = sp->timetolive;
6897 		sp->ts.tv_usec = 0;
6898 		break;
6899 	default:
6900 		SCTPDBG(SCTP_DEBUG_USRREQ1,
6901 			"Unknown PR_SCTP policy %u.\n",
6902 			PR_SCTP_POLICY(sp->sinfo_flags));
6903 		break;
6904 	}
6905 }
6906 
6907 static int
6908 sctp_msg_append(struct sctp_tcb *stcb,
6909 		struct sctp_nets *net,
6910 		struct mbuf *m,
6911 		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6912 {
6913 	int error = 0;
6914 	struct mbuf *at;
6915 	struct sctp_stream_queue_pending *sp = NULL;
6916 	struct sctp_stream_out *strm;
6917 
6918 	/* Given an mbuf chain, put it
6919 	 * into the association send queue and
6920 	 * place it on the wheel
6921 	 */
6922 	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6923 		/* Invalid stream number */
6924 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6925 		error = EINVAL;
6926 		goto out_now;
6927 	}
6928 	if ((stcb->asoc.stream_locked) &&
6929 	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6930 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6931 		error = EINVAL;
6932 		goto out_now;
6933 	}
6934 	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6935 	/* Now can we send this? */
6936 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6937 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6938 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6939 	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6940 		/* got data while shutting down */
6941 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6942 		error = ECONNRESET;
6943 		goto out_now;
6944 	}
6945 	sctp_alloc_a_strmoq(stcb, sp);
6946 	if (sp == NULL) {
6947 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6948 		error = ENOMEM;
6949 		goto out_now;
6950 	}
6951 	sp->sinfo_flags = srcv->sinfo_flags;
6952 	sp->timetolive = srcv->sinfo_timetolive;
6953 	sp->ppid = srcv->sinfo_ppid;
6954 	sp->context = srcv->sinfo_context;
6955 	sp->fsn = 0;
6956 	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6957 		sp->net = net;
6958 		atomic_add_int(&sp->net->ref_count, 1);
6959 	} else {
6960 		sp->net = NULL;
6961 	}
6962 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6963 	sp->sid = srcv->sinfo_stream;
6964 	sp->msg_is_complete = 1;
6965 	sp->sender_all_done = 1;
6966 	sp->some_taken = 0;
6967 	sp->data = m;
6968 	sp->tail_mbuf = NULL;
6969 	sctp_set_prsctp_policy(sp);
6970 	/* We could in theory (for sendall) sifa the length
6971 	 * in, but we would still have to hunt through the
6972 	 * chain since we need to setup the tail_mbuf
6973 	 */
6974 	sp->length = 0;
6975 	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6976 		if (SCTP_BUF_NEXT(at) == NULL)
6977 			sp->tail_mbuf = at;
6978 		sp->length += SCTP_BUF_LEN(at);
6979 	}
6980 	if (srcv->sinfo_keynumber_valid) {
6981 		sp->auth_keyid = srcv->sinfo_keynumber;
6982 	} else {
6983 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6984 	}
6985 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6986 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
6987 		sp->holds_key_ref = 1;
6988 	}
6989 	if (hold_stcb_lock == 0) {
6990 		SCTP_TCB_SEND_LOCK(stcb);
6991 	}
6992 	sctp_snd_sb_alloc(stcb, sp->length);
6993 	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6994 	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6995 	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6996 	m = NULL;
6997 	if (hold_stcb_lock == 0) {
6998 		SCTP_TCB_SEND_UNLOCK(stcb);
6999 	}
7000 out_now:
7001 	if (m) {
7002 		sctp_m_freem(m);
7003 	}
7004 	return (error);
7005 }
7006 
7007 
7008 static struct mbuf *
7009 sctp_copy_mbufchain(struct mbuf *clonechain,
7010 		    struct mbuf *outchain,
7011 		    struct mbuf **endofchain,
7012 		    int can_take_mbuf,
7013 		    int sizeofcpy,
7014 		    uint8_t copy_by_ref)
7015 {
7016 	struct mbuf *m;
7017 	struct mbuf *appendchain;
7018 	caddr_t cp;
7019 	int len;
7020 
7021 	if (endofchain == NULL) {
7022 		/* error */
7023 	error_out:
7024 		if (outchain)
7025 			sctp_m_freem(outchain);
7026 		return (NULL);
7027 	}
7028 	if (can_take_mbuf) {
7029 		appendchain = clonechain;
7030 	} else {
7031 		if (!copy_by_ref &&
7032 #if defined(__Panda__)
7033 		    0
7034 #else
7035 		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
7036 #endif
7037 		    ) {
7038 			/* Its not in a cluster */
7039 			if (*endofchain == NULL) {
7040 				/* lets get a mbuf cluster */
7041 				if (outchain == NULL) {
7042 					/* This is the general case */
7043 				new_mbuf:
7044 					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7045 					if (outchain == NULL) {
7046 						goto error_out;
7047 					}
7048 					SCTP_BUF_LEN(outchain) = 0;
7049 					*endofchain = outchain;
7050 					/* get the prepend space */
7051 					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
7052 				} else {
7053 					/* We really should not get a NULL in endofchain */
7054 					/* find end */
7055 					m = outchain;
7056 					while (m) {
7057 						if (SCTP_BUF_NEXT(m) == NULL) {
7058 							*endofchain = m;
7059 							break;
7060 						}
7061 						m = SCTP_BUF_NEXT(m);
7062 					}
7063 					/* sanity */
7064 					if (*endofchain == NULL) {
7065 						/* huh, TSNH XXX maybe we should panic */
7066 						sctp_m_freem(outchain);
7067 						goto new_mbuf;
7068 					}
7069 				}
7070 				/* get the new end of length */
7071 				len = (int)M_TRAILINGSPACE(*endofchain);
7072 			} else {
7073 				/* how much is left at the end? */
7074 				len = (int)M_TRAILINGSPACE(*endofchain);
7075 			}
7076 			/* Find the end of the data, for appending */
7077 			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
7078 
7079 			/* Now lets copy it out */
7080 			if (len >= sizeofcpy) {
7081 				/* It all fits, copy it in */
7082 				m_copydata(clonechain, 0, sizeofcpy, cp);
7083 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7084 			} else {
7085 				/* fill up the end of the chain */
7086 				if (len > 0) {
7087 					m_copydata(clonechain, 0, len, cp);
7088 					SCTP_BUF_LEN((*endofchain)) += len;
7089 					/* now we need another one */
7090 					sizeofcpy -= len;
7091 				}
7092 				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7093 				if (m == NULL) {
7094 					/* We failed */
7095 					goto error_out;
7096 				}
7097 				SCTP_BUF_NEXT((*endofchain)) = m;
7098 				*endofchain = m;
7099 				cp = mtod((*endofchain), caddr_t);
7100 				m_copydata(clonechain, len, sizeofcpy, cp);
7101 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7102 			}
7103 			return (outchain);
7104 		} else {
7105 			/* copy the old fashion way */
7106 			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
7107 #ifdef SCTP_MBUF_LOGGING
7108 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7109 				sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
7110 			}
7111 #endif
7112 		}
7113 	}
7114 	if (appendchain == NULL) {
7115 		/* error */
7116 		if (outchain)
7117 			sctp_m_freem(outchain);
7118 		return (NULL);
7119 	}
7120 	if (outchain) {
7121 		/* tack on to the end */
7122 		if (*endofchain != NULL) {
7123 			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
7124 		} else {
7125 			m = outchain;
7126 			while (m) {
7127 				if (SCTP_BUF_NEXT(m) == NULL) {
7128 					SCTP_BUF_NEXT(m) = appendchain;
7129 					break;
7130 				}
7131 				m = SCTP_BUF_NEXT(m);
7132 			}
7133 		}
7134 		/*
7135 		 * save off the end and update the end-chain
7136 		 * position
7137 		 */
7138 		m = appendchain;
7139 		while (m) {
7140 			if (SCTP_BUF_NEXT(m) == NULL) {
7141 				*endofchain = m;
7142 				break;
7143 			}
7144 			m = SCTP_BUF_NEXT(m);
7145 		}
7146 		return (outchain);
7147 	} else {
7148 		/* save off the end and update the end-chain position */
7149 		m = appendchain;
7150 		while (m) {
7151 			if (SCTP_BUF_NEXT(m) == NULL) {
7152 				*endofchain = m;
7153 				break;
7154 			}
7155 			m = SCTP_BUF_NEXT(m);
7156 		}
7157 		return (appendchain);
7158 	}
7159 }
7160 
7161 static int
7162 sctp_med_chunk_output(struct sctp_inpcb *inp,
7163 		      struct sctp_tcb *stcb,
7164 		      struct sctp_association *asoc,
7165 		      int *num_out,
7166 		      int *reason_code,
7167 		      int control_only, int from_where,
7168 		      struct timeval *now, int *now_filled, int frag_point, int so_locked
7169 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7170 		      SCTP_UNUSED
7171 #endif
7172                       );
7173 
7174 static void
7175 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
7176     uint32_t val SCTP_UNUSED)
7177 {
7178 	struct sctp_copy_all *ca;
7179 	struct mbuf *m;
7180 	int ret = 0;
7181 	int added_control = 0;
7182 	int un_sent, do_chunk_output = 1;
7183 	struct sctp_association *asoc;
7184 	struct sctp_nets *net;
7185 
7186 	ca = (struct sctp_copy_all *)ptr;
7187 	if (ca->m == NULL) {
7188 		return;
7189 	}
7190 	if (ca->inp != inp) {
7191 		/* TSNH */
7192 		return;
7193 	}
7194 	if (ca->sndlen > 0) {
7195 		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7196 		if (m == NULL) {
7197 			/* can't copy so we are done */
7198 			ca->cnt_failed++;
7199 			return;
7200 		}
7201 #ifdef SCTP_MBUF_LOGGING
7202 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7203 			sctp_log_mbc(m, SCTP_MBUF_ICOPY);
7204 		}
7205 #endif
7206 	} else {
7207 		m = NULL;
7208 	}
7209 	SCTP_TCB_LOCK_ASSERT(stcb);
7210 	if (stcb->asoc.alternate) {
7211 		net = stcb->asoc.alternate;
7212 	} else {
7213 		net = stcb->asoc.primary_destination;
7214 	}
7215 	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7216 		/* Abort this assoc with m as the user defined reason */
7217 		if (m != NULL) {
7218 			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7219 		} else {
7220 			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7221 			                          0, M_NOWAIT, 1, MT_DATA);
7222 			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7223 		}
7224 		if (m != NULL) {
7225 			struct sctp_paramhdr *ph;
7226 
7227 			ph = mtod(m, struct sctp_paramhdr *);
7228 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7229 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
7230 		}
7231 		/* We add one here to keep the assoc from
7232 		 * dis-appearing on us.
7233 		 */
7234 		atomic_add_int(&stcb->asoc.refcnt, 1);
7235 		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7236 		/* sctp_abort_an_association calls sctp_free_asoc()
7237 		 * free association will NOT free it since we
7238 		 * incremented the refcnt .. we do this to prevent
7239 		 * it being freed and things getting tricky since
7240 		 * we could end up (from free_asoc) calling inpcb_free
7241 		 * which would get a recursive lock call to the
7242 		 * iterator lock.. But as a consequence of that the
7243 		 * stcb will return to us un-locked.. since free_asoc
7244 		 * returns with either no TCB or the TCB unlocked, we
7245 		 * must relock.. to unlock in the iterator timer :-0
7246 		 */
7247 		SCTP_TCB_LOCK(stcb);
7248 		atomic_add_int(&stcb->asoc.refcnt, -1);
7249 		goto no_chunk_output;
7250 	} else {
7251 		if (m) {
7252 			ret = sctp_msg_append(stcb, net, m,
7253 					      &ca->sndrcv, 1);
7254 		}
7255 		asoc = &stcb->asoc;
7256 		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7257 			/* shutdown this assoc */
7258 			if (TAILQ_EMPTY(&asoc->send_queue) &&
7259 			    TAILQ_EMPTY(&asoc->sent_queue) &&
7260 			    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
7261 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7262 					goto abort_anyway;
7263 				}
7264 				/* there is nothing queued to send, so I'm done... */
7265 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7266 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7267 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7268 					/* only send SHUTDOWN the first time through */
7269 					if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
7270 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7271 					}
7272 					SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
7273 					sctp_stop_timers_for_shutdown(stcb);
7274 					sctp_send_shutdown(stcb, net);
7275 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7276 							 net);
7277 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7278 					                 NULL);
7279 					added_control = 1;
7280 					do_chunk_output = 0;
7281 				}
7282 			} else {
7283 				/*
7284 				 * we still got (or just got) data to send, so set
7285 				 * SHUTDOWN_PENDING
7286 				 */
7287 				/*
7288 				 * XXX sockets draft says that SCTP_EOF should be
7289 				 * sent with no data.  currently, we will allow user
7290 				 * data to be sent first and move to
7291 				 * SHUTDOWN-PENDING
7292 				 */
7293 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7294 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7295 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7296 					if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7297 						SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
7298 					}
7299 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7300 					if (TAILQ_EMPTY(&asoc->send_queue) &&
7301 					    TAILQ_EMPTY(&asoc->sent_queue) &&
7302 					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7303 						struct mbuf *op_err;
7304 						char msg[SCTP_DIAG_INFO_LEN];
7305 
7306 					abort_anyway:
7307 						SCTP_SNPRINTF(msg, sizeof(msg),
7308 						              "%s:%d at %s", __FILE__, __LINE__, __func__);
7309 						op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
7310 						                             msg);
7311 						atomic_add_int(&stcb->asoc.refcnt, 1);
7312 						sctp_abort_an_association(stcb->sctp_ep, stcb,
7313 									  op_err, SCTP_SO_NOT_LOCKED);
7314 						atomic_add_int(&stcb->asoc.refcnt, -1);
7315 						goto no_chunk_output;
7316 					}
7317 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7318 					                 NULL);
7319 				}
7320 			}
7321 
7322 		}
7323 	}
7324 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7325 		   (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
7326 
7327 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7328 	    (stcb->asoc.total_flight > 0) &&
7329 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7330 		do_chunk_output = 0;
7331 	}
7332 	if (do_chunk_output)
7333 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7334 	else if (added_control) {
7335 		int num_out, reason, now_filled = 0;
7336 		struct timeval now;
7337 		int frag_point;
7338 
7339 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7340 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7341 				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7342 	}
7343  no_chunk_output:
7344 	if (ret) {
7345 		ca->cnt_failed++;
7346 	} else {
7347 		ca->cnt_sent++;
7348 	}
7349 }
7350 
7351 static void
7352 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7353 {
7354 	struct sctp_copy_all *ca;
7355 
7356 	ca = (struct sctp_copy_all *)ptr;
7357 	/*
7358 	 * Do a notify here? Kacheong suggests that the notify be done at
7359 	 * the send time.. so you would push up a notification if any send
7360 	 * failed. Don't know if this is feasible since the only failures we
7361 	 * have is "memory" related and if you cannot get an mbuf to send
7362 	 * the data you surely can't get an mbuf to send up to notify the
7363 	 * user you can't send the data :->
7364 	 */
7365 
7366 	/* now free everything */
7367 	if (ca->inp) {
7368 		/* Lets clear the flag to allow others to run. */
7369 		ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7370 	}
7371 	sctp_m_freem(ca->m);
7372 	SCTP_FREE(ca, SCTP_M_COPYAL);
7373 }
7374 
7375 static struct mbuf *
7376 sctp_copy_out_all(struct uio *uio, ssize_t len)
7377 {
7378 	struct mbuf *ret, *at;
7379 	ssize_t left, willcpy, cancpy, error;
7380 
7381 	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7382 	if (ret == NULL) {
7383 		/* TSNH */
7384 		return (NULL);
7385 	}
7386 	left = len;
7387 	SCTP_BUF_LEN(ret) = 0;
7388 	/* save space for the data chunk header */
7389 	cancpy = (int)M_TRAILINGSPACE(ret);
7390 	willcpy = min(cancpy, left);
7391 	at = ret;
7392 	while (left > 0) {
7393 		/* Align data to the end */
7394 		error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
7395 		if (error) {
7396 	err_out_now:
7397 			sctp_m_freem(at);
7398 			return (NULL);
7399 		}
7400 		SCTP_BUF_LEN(at) = (int)willcpy;
7401 		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7402 		left -= willcpy;
7403 		if (left > 0) {
7404 			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
7405 			if (SCTP_BUF_NEXT(at) == NULL) {
7406 				goto err_out_now;
7407 			}
7408 			at = SCTP_BUF_NEXT(at);
7409 			SCTP_BUF_LEN(at) = 0;
7410 			cancpy = (int)M_TRAILINGSPACE(at);
7411 			willcpy = min(cancpy, left);
7412 		}
7413 	}
7414 	return (ret);
7415 }
7416 
7417 static int
7418 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7419     struct sctp_sndrcvinfo *srcv)
7420 {
7421 	int ret;
7422 	struct sctp_copy_all *ca;
7423 
7424 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
7425 		/* There is another. */
7426 		return (EBUSY);
7427 	}
7428 #if defined(__APPLE__)
7429 #if defined(APPLE_LEOPARD)
7430 	if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7431 #else
7432 	if (uio_resid(uio) > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7433 #endif
7434 #else
7435 	if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7436 #endif
7437 		/* You must not be larger than the limit! */
7438 		return (EMSGSIZE);
7439 	}
7440 	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7441 		    SCTP_M_COPYAL);
7442 	if (ca == NULL) {
7443 		sctp_m_freem(m);
7444 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7445 		return (ENOMEM);
7446 	}
7447 	memset(ca, 0, sizeof(struct sctp_copy_all));
7448 
7449 	ca->inp = inp;
7450 	if (srcv) {
7451 		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7452 	}
7453 	/*
7454 	 * take off the sendall flag, it would be bad if we failed to do
7455 	 * this :-0
7456 	 */
7457 	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7458 	/* get length and mbuf chain */
7459 	if (uio) {
7460 #if defined(__APPLE__)
7461 #if defined(APPLE_LEOPARD)
7462 		ca->sndlen = uio->uio_resid;
7463 #else
7464 		ca->sndlen = uio_resid(uio);
7465 #endif
7466 #else
7467 		ca->sndlen = uio->uio_resid;
7468 #endif
7469 #if defined(__APPLE__)
7470 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7471 #endif
7472 		ca->m = sctp_copy_out_all(uio, ca->sndlen);
7473 #if defined(__APPLE__)
7474 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7475 #endif
7476 		if (ca->m == NULL) {
7477 			SCTP_FREE(ca, SCTP_M_COPYAL);
7478 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7479 			return (ENOMEM);
7480 		}
7481 	} else {
7482 		/* Gather the length of the send */
7483 		struct mbuf *mat;
7484 
7485 		ca->sndlen = 0;
7486 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7487 			ca->sndlen += SCTP_BUF_LEN(mat);
7488 		}
7489 	}
7490 	inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7491 	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7492 				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7493 				     SCTP_ASOC_ANY_STATE,
7494 				     (void *)ca, 0,
7495 				     sctp_sendall_completes, inp, 1);
7496 	if (ret) {
7497 		inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7498 		SCTP_FREE(ca, SCTP_M_COPYAL);
7499 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7500 		return (EFAULT);
7501 	}
7502 	return (0);
7503 }
7504 
7505 
7506 void
7507 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7508 {
7509 	struct sctp_tmit_chunk *chk, *nchk;
7510 
7511 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7512 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7513 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7514 			asoc->ctrl_queue_cnt--;
7515 			if (chk->data) {
7516 				sctp_m_freem(chk->data);
7517 				chk->data = NULL;
7518 			}
7519 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7520 		}
7521 	}
7522 }
7523 
7524 void
7525 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7526 {
7527 	struct sctp_association *asoc;
7528 	struct sctp_tmit_chunk *chk, *nchk;
7529 	struct sctp_asconf_chunk *acp;
7530 
7531 	asoc = &stcb->asoc;
7532 	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7533 		/* find SCTP_ASCONF chunk in queue */
7534 		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7535 			if (chk->data) {
7536 				acp = mtod(chk->data, struct sctp_asconf_chunk *);
7537 				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7538 					/* Not Acked yet */
7539 					break;
7540 				}
7541 			}
7542 			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7543 			asoc->ctrl_queue_cnt--;
7544 			if (chk->data) {
7545 				sctp_m_freem(chk->data);
7546 				chk->data = NULL;
7547 			}
7548 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7549 		}
7550 	}
7551 }
7552 
7553 
7554 static void
7555 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7556     struct sctp_association *asoc,
7557     struct sctp_tmit_chunk **data_list,
7558     int bundle_at,
7559     struct sctp_nets *net)
7560 {
7561 	int i;
7562 	struct sctp_tmit_chunk *tp1;
7563 
7564 	for (i = 0; i < bundle_at; i++) {
7565 		/* off of the send queue */
7566 		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7567 		asoc->send_queue_cnt--;
7568 		if (i > 0) {
7569 			/*
7570 			 * Any chunk NOT 0 you zap the time chunk 0 gets
7571 			 * zapped or set based on if a RTO measurment is
7572 			 * needed.
7573 			 */
7574 			data_list[i]->do_rtt = 0;
7575 		}
7576 		/* record time */
7577 		data_list[i]->sent_rcv_time = net->last_sent_time;
7578 		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7579 		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7580 		if (data_list[i]->whoTo == NULL) {
7581 			data_list[i]->whoTo = net;
7582 			atomic_add_int(&net->ref_count, 1);
7583 		}
7584 		/* on to the sent queue */
7585 		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7586 		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7587 			struct sctp_tmit_chunk *tpp;
7588 
7589 			/* need to move back */
7590 		back_up_more:
7591 			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7592 			if (tpp == NULL) {
7593 				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7594 				goto all_done;
7595 			}
7596 			tp1 = tpp;
7597 			if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7598 				goto back_up_more;
7599 			}
7600 			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7601 		} else {
7602 			TAILQ_INSERT_TAIL(&asoc->sent_queue,
7603 					  data_list[i],
7604 					  sctp_next);
7605 		}
7606 	all_done:
7607 		/* This does not lower until the cum-ack passes it */
7608 		asoc->sent_queue_cnt++;
7609 		if ((asoc->peers_rwnd <= 0) &&
7610 		    (asoc->total_flight == 0) &&
7611 		    (bundle_at == 1)) {
7612 			/* Mark the chunk as being a window probe */
7613 			SCTP_STAT_INCR(sctps_windowprobed);
7614 		}
7615 #ifdef SCTP_AUDITING_ENABLED
7616 		sctp_audit_log(0xC2, 3);
7617 #endif
7618 		data_list[i]->sent = SCTP_DATAGRAM_SENT;
7619 		data_list[i]->snd_count = 1;
7620 		data_list[i]->rec.data.chunk_was_revoked = 0;
7621 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7622 			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7623 				       data_list[i]->whoTo->flight_size,
7624 				       data_list[i]->book_size,
7625 				       (uint32_t)(uintptr_t)data_list[i]->whoTo,
7626 				       data_list[i]->rec.data.tsn);
7627 		}
7628 		sctp_flight_size_increase(data_list[i]);
7629 		sctp_total_flight_increase(stcb, data_list[i]);
7630 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7631 			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7632 			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7633 		}
7634 		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7635 						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7636 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7637 			/* SWS sender side engages */
7638 			asoc->peers_rwnd = 0;
7639 		}
7640 	}
7641 	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7642 		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7643 	}
7644 }
7645 
7646 static void
7647 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7648 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7649 	SCTP_UNUSED
7650 #endif
7651 )
7652 {
7653 	struct sctp_tmit_chunk *chk, *nchk;
7654 
7655 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7656 		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7657 		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
7658 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7659 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7660 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7661 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7662 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7663 		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7664 		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7665 		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7666 		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7667 		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7668 			/* Stray chunks must be cleaned up */
7669 	clean_up_anyway:
7670 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7671 			asoc->ctrl_queue_cnt--;
7672 			if (chk->data) {
7673 				sctp_m_freem(chk->data);
7674 				chk->data = NULL;
7675 			}
7676 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7677 				asoc->fwd_tsn_cnt--;
7678 			}
7679 			sctp_free_a_chunk(stcb, chk, so_locked);
7680 		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7681 			/* special handling, we must look into the param */
7682 			if (chk != asoc->str_reset) {
7683 				goto clean_up_anyway;
7684 			}
7685 		}
7686 	}
7687 }
7688 
7689 static uint32_t
7690 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7691                        uint32_t space_left, uint32_t frag_point, int eeor_on)
7692 {
7693 	/* Make a decision on if I should split a
7694 	 * msg into multiple parts. This is only asked of
7695 	 * incomplete messages.
7696 	 */
7697 	if (eeor_on) {
7698 		/* If we are doing EEOR we need to always send
7699 		 * it if its the entire thing, since it might
7700 		 * be all the guy is putting in the hopper.
7701 		 */
7702 		if (space_left >= length) {
7703 			/*-
7704 			 * If we have data outstanding,
7705 			 * we get another chance when the sack
7706 			 * arrives to transmit - wait for more data
7707 			 */
7708 			if (stcb->asoc.total_flight == 0) {
7709 				/* If nothing is in flight, we zero
7710 				 * the packet counter.
7711 				 */
7712 				return (length);
7713 			}
7714 			return (0);
7715 
7716 		} else {
7717 			/* You can fill the rest */
7718 			return (space_left);
7719 		}
7720 	}
7721 	/*-
7722 	 * For those strange folk that make the send buffer
7723 	 * smaller than our fragmentation point, we can't
7724 	 * get a full msg in so we have to allow splitting.
7725 	 */
7726 	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7727 		return (length);
7728 	}
7729 	if ((length <= space_left) ||
7730 	    ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7731 		/* Sub-optimial residual don't split in non-eeor mode. */
7732 		return (0);
7733 	}
7734 	/* If we reach here length is larger
7735 	 * than the space_left. Do we wish to split
7736 	 * it for the sake of packet putting together?
7737 	 */
7738 	if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7739 		/* Its ok to split it */
7740 		return (min(space_left, frag_point));
7741 	}
7742 	/* Nope, can't split */
7743 	return (0);
7744 }
7745 
7746 static uint32_t
7747 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7748                       struct sctp_stream_out *strq,
7749                       uint32_t space_left,
7750                       uint32_t frag_point,
7751                       int *giveup,
7752                       int eeor_mode,
7753                       int *bail,
7754                       int so_locked
7755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7756                       SCTP_UNUSED
7757 #endif
7758 	)
7759 {
7760 	/* Move from the stream to the send_queue keeping track of the total */
7761 	struct sctp_association *asoc;
7762 	struct sctp_stream_queue_pending *sp;
7763 	struct sctp_tmit_chunk *chk;
7764 	struct sctp_data_chunk *dchkh=NULL;
7765 	struct sctp_idata_chunk *ndchkh=NULL;
7766 	uint32_t to_move, length;
7767 	int leading;
7768 	uint8_t rcv_flags = 0;
7769 	uint8_t some_taken;
7770 	uint8_t send_lock_up = 0;
7771 
7772 	SCTP_TCB_LOCK_ASSERT(stcb);
7773 	asoc = &stcb->asoc;
7774 one_more_time:
7775 	/*sa_ignore FREED_MEMORY*/
7776 	sp = TAILQ_FIRST(&strq->outqueue);
7777 	if (sp == NULL) {
7778 		if (send_lock_up == 0) {
7779 			SCTP_TCB_SEND_LOCK(stcb);
7780 			send_lock_up = 1;
7781 		}
7782 		sp = TAILQ_FIRST(&strq->outqueue);
7783 		if (sp) {
7784 			goto one_more_time;
7785 		}
7786 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7787 		    (stcb->asoc.idata_supported == 0) &&
7788 		    (strq->last_msg_incomplete)) {
7789 			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7790 			            strq->sid,
7791 			            strq->last_msg_incomplete);
7792 			strq->last_msg_incomplete = 0;
7793 		}
7794 		to_move = 0;
7795 		if (send_lock_up) {
7796 			SCTP_TCB_SEND_UNLOCK(stcb);
7797 			send_lock_up = 0;
7798 		}
7799 		goto out_of;
7800 	}
7801 	if ((sp->msg_is_complete) && (sp->length == 0)) {
7802 		if (sp->sender_all_done) {
7803 			/* We are doing deferred cleanup. Last
7804 			 * time through when we took all the data
7805 			 * the sender_all_done was not set.
7806 			 */
7807 			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7808 				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7809 				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7810 				            sp->sender_all_done,
7811 				            sp->length,
7812 				            sp->msg_is_complete,
7813 				            sp->put_last_out,
7814 				            send_lock_up);
7815 			}
7816 			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
7817 				SCTP_TCB_SEND_LOCK(stcb);
7818 				send_lock_up = 1;
7819 			}
7820 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7821 			TAILQ_REMOVE(&strq->outqueue, sp, next);
7822 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7823 			if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7824 			    (strq->chunks_on_queues == 0) &&
7825 			    TAILQ_EMPTY(&strq->outqueue)) {
7826 				stcb->asoc.trigger_reset = 1;
7827 			}
7828 			if (sp->net) {
7829 				sctp_free_remote_addr(sp->net);
7830 				sp->net = NULL;
7831 			}
7832 			if (sp->data) {
7833 				sctp_m_freem(sp->data);
7834 				sp->data = NULL;
7835 			}
7836 			sctp_free_a_strmoq(stcb, sp, so_locked);
7837 			/* we can't be locked to it */
7838 			if (send_lock_up) {
7839 				SCTP_TCB_SEND_UNLOCK(stcb);
7840 				send_lock_up = 0;
7841 			}
7842 			/* back to get the next msg */
7843 			goto one_more_time;
7844 		} else {
7845 			/* sender just finished this but
7846 			 * still holds a reference
7847 			 */
7848 			*giveup = 1;
7849 			to_move = 0;
7850 			goto out_of;
7851 		}
7852 	} else {
7853 		/* is there some to get */
7854 		if (sp->length == 0) {
7855 			/* no */
7856 			*giveup = 1;
7857 			to_move = 0;
7858 			goto out_of;
7859 		} else if (sp->discard_rest) {
7860 			if (send_lock_up == 0) {
7861 				SCTP_TCB_SEND_LOCK(stcb);
7862 				send_lock_up = 1;
7863 			}
7864 			/* Whack down the size */
7865 			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7866 			if ((stcb->sctp_socket != NULL) &&
7867 			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7868 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7869 				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7870 			}
7871 			if (sp->data) {
7872 				sctp_m_freem(sp->data);
7873 				sp->data = NULL;
7874 				sp->tail_mbuf = NULL;
7875 			}
7876 			sp->length = 0;
7877 			sp->some_taken = 1;
7878 			*giveup = 1;
7879 			to_move = 0;
7880 			goto out_of;
7881 		}
7882 	}
7883 	some_taken = sp->some_taken;
7884 re_look:
7885 	length = sp->length;
7886 	if (sp->msg_is_complete) {
7887 		/* The message is complete */
7888 		to_move = min(length, frag_point);
7889 		if (to_move == length) {
7890 			/* All of it fits in the MTU */
7891 			if (sp->some_taken) {
7892 				rcv_flags |= SCTP_DATA_LAST_FRAG;
7893 			} else {
7894 				rcv_flags |= SCTP_DATA_NOT_FRAG;
7895 			}
7896 			sp->put_last_out = 1;
7897 			if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7898 				rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7899 			}
7900 		} else {
7901 			/* Not all of it fits, we fragment */
7902 			if (sp->some_taken == 0) {
7903 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7904 			}
7905 			sp->some_taken = 1;
7906 		}
7907 	} else {
7908 		to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7909 		if (to_move) {
7910 			/*-
7911 			 * We use a snapshot of length in case it
7912 			 * is expanding during the compare.
7913 			 */
7914 			uint32_t llen;
7915 
7916 			llen = length;
7917 			if (to_move >= llen) {
7918 				to_move = llen;
7919 				if (send_lock_up == 0) {
7920 					/*-
7921 					 * We are taking all of an incomplete msg
7922 					 * thus we need a send lock.
7923 					 */
7924 					SCTP_TCB_SEND_LOCK(stcb);
7925 					send_lock_up = 1;
7926 					if (sp->msg_is_complete) {
7927 						/* the sender finished the msg */
7928 						goto re_look;
7929 					}
7930 				}
7931 			}
7932 			if (sp->some_taken == 0) {
7933 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7934 				sp->some_taken = 1;
7935 			}
7936 		} else {
7937 			/* Nothing to take. */
7938 			*giveup = 1;
7939 			to_move = 0;
7940 			goto out_of;
7941 		}
7942 	}
7943 
7944 	/* If we reach here, we can copy out a chunk */
7945 	sctp_alloc_a_chunk(stcb, chk);
7946 	if (chk == NULL) {
7947 		/* No chunk memory */
7948 		*giveup = 1;
7949 		to_move = 0;
7950 		goto out_of;
7951 	}
7952 	/* Setup for unordered if needed by looking
7953 	 * at the user sent info flags.
7954 	 */
7955 	if (sp->sinfo_flags & SCTP_UNORDERED) {
7956 		rcv_flags |= SCTP_DATA_UNORDERED;
7957 	}
7958 	if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7959 	    (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7960 		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7961 	}
7962 	/* clear out the chunk before setting up */
7963 	memset(chk, 0, sizeof(*chk));
7964 	chk->rec.data.rcv_flags = rcv_flags;
7965 
7966 	if (to_move >= length) {
7967 		/* we think we can steal the whole thing */
7968 		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7969 			SCTP_TCB_SEND_LOCK(stcb);
7970 			send_lock_up = 1;
7971 		}
7972 		if (to_move < sp->length) {
7973 			/* bail, it changed */
7974 			goto dont_do_it;
7975 		}
7976 		chk->data = sp->data;
7977 		chk->last_mbuf = sp->tail_mbuf;
7978 		/* register the stealing */
7979 		sp->data = sp->tail_mbuf = NULL;
7980 	} else {
7981 		struct mbuf *m;
7982 	dont_do_it:
7983 		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7984 		chk->last_mbuf = NULL;
7985 		if (chk->data == NULL) {
7986 			sp->some_taken = some_taken;
7987 			sctp_free_a_chunk(stcb, chk, so_locked);
7988 			*bail = 1;
7989 			to_move = 0;
7990 			goto out_of;
7991 		}
7992 #ifdef SCTP_MBUF_LOGGING
7993 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7994 			sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7995 		}
7996 #endif
7997 		/* Pull off the data */
7998 		m_adj(sp->data, to_move);
7999 		/* Now lets work our way down and compact it */
8000 		m = sp->data;
8001 		while (m && (SCTP_BUF_LEN(m) == 0)) {
8002 			sp->data  = SCTP_BUF_NEXT(m);
8003 			SCTP_BUF_NEXT(m) = NULL;
8004 			if (sp->tail_mbuf == m) {
8005 				/*-
8006 				 * Freeing tail? TSNH since
8007 				 * we supposedly were taking less
8008 				 * than the sp->length.
8009 				 */
8010 #ifdef INVARIANTS
8011 				panic("Huh, freing tail? - TSNH");
8012 #else
8013 				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
8014 				sp->tail_mbuf = sp->data = NULL;
8015 				sp->length = 0;
8016 #endif
8017 
8018 			}
8019 			sctp_m_free(m);
8020 			m = sp->data;
8021 		}
8022 	}
8023 	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
8024 		chk->copy_by_ref = 1;
8025 	} else {
8026 		chk->copy_by_ref = 0;
8027 	}
8028 	/* get last_mbuf and counts of mb usage
8029 	 * This is ugly but hopefully its only one mbuf.
8030 	 */
8031 	if (chk->last_mbuf == NULL) {
8032 		chk->last_mbuf = chk->data;
8033 		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
8034 			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
8035 		}
8036 	}
8037 
8038 	if (to_move > length) {
8039 		/*- This should not happen either
8040 		 * since we always lower to_move to the size
8041 		 * of sp->length if its larger.
8042 		 */
8043 #ifdef INVARIANTS
8044 		panic("Huh, how can to_move be larger?");
8045 #else
8046 		SCTP_PRINTF("Huh, how can to_move be larger?\n");
8047 		sp->length = 0;
8048 #endif
8049 	} else {
8050 		atomic_subtract_int(&sp->length, to_move);
8051 	}
8052 	leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
8053 	if (M_LEADINGSPACE(chk->data) < leading) {
8054 		/* Not enough room for a chunk header, get some */
8055 		struct mbuf *m;
8056 
8057 		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
8058 		if (m == NULL) {
8059 			/*
8060 			 * we're in trouble here. _PREPEND below will free
8061 			 * all the data if there is no leading space, so we
8062 			 * must put the data back and restore.
8063 			 */
8064 			if (send_lock_up == 0) {
8065 				SCTP_TCB_SEND_LOCK(stcb);
8066 				send_lock_up = 1;
8067 			}
8068 			if (sp->data == NULL) {
8069 				/* unsteal the data */
8070 				sp->data = chk->data;
8071 				sp->tail_mbuf = chk->last_mbuf;
8072 			} else {
8073 				struct mbuf *m_tmp;
8074 				/* reassemble the data */
8075 				m_tmp = sp->data;
8076 				sp->data = chk->data;
8077 				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
8078 			}
8079 			sp->some_taken = some_taken;
8080 			atomic_add_int(&sp->length, to_move);
8081 			chk->data = NULL;
8082 			*bail = 1;
8083 			sctp_free_a_chunk(stcb, chk, so_locked);
8084 			to_move = 0;
8085 			goto out_of;
8086 		} else {
8087 			SCTP_BUF_LEN(m) = 0;
8088 			SCTP_BUF_NEXT(m) = chk->data;
8089 			chk->data = m;
8090 			M_ALIGN(chk->data, 4);
8091 		}
8092 	}
8093 	SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
8094 	if (chk->data == NULL) {
8095 		/* HELP, TSNH since we assured it would not above? */
8096 #ifdef INVARIANTS
8097 		panic("prepend failes HELP?");
8098 #else
8099 		SCTP_PRINTF("prepend fails HELP?\n");
8100 		sctp_free_a_chunk(stcb, chk, so_locked);
8101 #endif
8102 		*bail = 1;
8103 		to_move = 0;
8104 		goto out_of;
8105 	}
8106 	sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
8107 	chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
8108 	chk->book_size_scale = 0;
8109 	chk->sent = SCTP_DATAGRAM_UNSENT;
8110 
8111 	chk->flags = 0;
8112 	chk->asoc = &stcb->asoc;
8113 	chk->pad_inplace = 0;
8114 	chk->no_fr_allowed = 0;
8115 	if (stcb->asoc.idata_supported == 0) {
8116 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8117 			/* Just use 0. The receiver ignores the values. */
8118 			chk->rec.data.mid = 0;
8119 		} else {
8120 			chk->rec.data.mid = strq->next_mid_ordered;
8121 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8122 				strq->next_mid_ordered++;
8123 			}
8124 		}
8125 	} else {
8126 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8127 			chk->rec.data.mid = strq->next_mid_unordered;
8128 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8129 				strq->next_mid_unordered++;
8130 			}
8131 		} else {
8132 			chk->rec.data.mid = strq->next_mid_ordered;
8133 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8134 				strq->next_mid_ordered++;
8135 			}
8136 		}
8137 	}
8138 	chk->rec.data.sid = sp->sid;
8139 	chk->rec.data.ppid = sp->ppid;
8140 	chk->rec.data.context = sp->context;
8141 	chk->rec.data.doing_fast_retransmit = 0;
8142 
8143 	chk->rec.data.timetodrop = sp->ts;
8144 	chk->flags = sp->act_flags;
8145 
8146 	if (sp->net) {
8147 		chk->whoTo = sp->net;
8148 		atomic_add_int(&chk->whoTo->ref_count, 1);
8149 	} else
8150 		chk->whoTo = NULL;
8151 
8152 	if (sp->holds_key_ref) {
8153 		chk->auth_keyid = sp->auth_keyid;
8154 		sctp_auth_key_acquire(stcb, chk->auth_keyid);
8155 		chk->holds_key_ref = 1;
8156 	}
8157 #if defined(__FreeBSD__) || defined(__Panda__)
8158 	chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
8159 #else
8160 	chk->rec.data.tsn = asoc->sending_seq++;
8161 #endif
8162 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
8163 		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
8164 		               (uint32_t)(uintptr_t)stcb, sp->length,
8165 		               (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
8166 		               chk->rec.data.tsn);
8167 	}
8168 	if (stcb->asoc.idata_supported == 0) {
8169 		dchkh = mtod(chk->data, struct sctp_data_chunk *);
8170 	} else {
8171 		ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
8172 	}
8173 	/*
8174 	 * Put the rest of the things in place now. Size was done
8175 	 * earlier in previous loop prior to padding.
8176 	 */
8177 
8178 #ifdef SCTP_ASOCLOG_OF_TSNS
8179 	SCTP_TCB_LOCK_ASSERT(stcb);
8180 	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
8181 		asoc->tsn_out_at = 0;
8182 		asoc->tsn_out_wrapped = 1;
8183 	}
8184 	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
8185 	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
8186 	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
8187 	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
8188 	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
8189 	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
8190 	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
8191 	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
8192 	asoc->tsn_out_at++;
8193 #endif
8194 	if (stcb->asoc.idata_supported == 0) {
8195 		dchkh->ch.chunk_type = SCTP_DATA;
8196 		dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8197 		dchkh->dp.tsn = htonl(chk->rec.data.tsn);
8198 		dchkh->dp.sid = htons(strq->sid);
8199 		dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
8200 		dchkh->dp.ppid = chk->rec.data.ppid;
8201 		dchkh->ch.chunk_length = htons(chk->send_size);
8202 	} else {
8203 		ndchkh->ch.chunk_type = SCTP_IDATA;
8204 		ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8205 		ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
8206 		ndchkh->dp.sid = htons(strq->sid);
8207 		ndchkh->dp.reserved = htons(0);
8208 		ndchkh->dp.mid = htonl(chk->rec.data.mid);
8209 		if (sp->fsn == 0)
8210 			ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
8211 		else
8212 			ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
8213 		sp->fsn++;
8214 		ndchkh->ch.chunk_length = htons(chk->send_size);
8215 	}
8216 	/* Now advance the chk->send_size by the actual pad needed. */
8217 	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
8218 		/* need a pad */
8219 		struct mbuf *lm;
8220 		int pads;
8221 
8222 		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
8223 		lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
8224 		if (lm != NULL) {
8225 			chk->last_mbuf = lm;
8226 			chk->pad_inplace = 1;
8227 		}
8228 		chk->send_size += pads;
8229 	}
8230 	if (PR_SCTP_ENABLED(chk->flags)) {
8231 		asoc->pr_sctp_cnt++;
8232 	}
8233 	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8234 		/* All done pull and kill the message */
8235 		if (sp->put_last_out == 0) {
8236 			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8237 			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8238 			            sp->sender_all_done,
8239 			            sp->length,
8240 			            sp->msg_is_complete,
8241 			            sp->put_last_out,
8242 			            send_lock_up);
8243 		}
8244 		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8245 			SCTP_TCB_SEND_LOCK(stcb);
8246 			send_lock_up = 1;
8247 		}
8248 		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8249 		TAILQ_REMOVE(&strq->outqueue, sp, next);
8250 		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8251 		if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
8252 		    (strq->chunks_on_queues == 0) &&
8253 		    TAILQ_EMPTY(&strq->outqueue)) {
8254 			stcb->asoc.trigger_reset = 1;
8255 		}
8256 		if (sp->net) {
8257 			sctp_free_remote_addr(sp->net);
8258 			sp->net = NULL;
8259 		}
8260 		if (sp->data) {
8261 			sctp_m_freem(sp->data);
8262 			sp->data = NULL;
8263 		}
8264 		sctp_free_a_strmoq(stcb, sp, so_locked);
8265 	}
8266 	asoc->chunks_on_out_queue++;
8267 	strq->chunks_on_queues++;
8268 	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8269 	asoc->send_queue_cnt++;
8270 out_of:
8271 	if (send_lock_up) {
8272 		SCTP_TCB_SEND_UNLOCK(stcb);
8273 	}
8274 	return (to_move);
8275 }
8276 
8277 
8278 static void
8279 sctp_fill_outqueue(struct sctp_tcb *stcb,
8280     struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
8281 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8282 	SCTP_UNUSED
8283 #endif
8284 )
8285 {
8286 	struct sctp_association *asoc;
8287 	struct sctp_stream_out *strq;
8288 	uint32_t space_left, moved, total_moved;
8289 	int bail, giveup;
8290 
8291 	SCTP_TCB_LOCK_ASSERT(stcb);
8292 	asoc = &stcb->asoc;
8293 	total_moved = 0;
8294 	switch (net->ro._l_addr.sa.sa_family) {
8295 #ifdef INET
8296 		case AF_INET:
8297 			space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
8298 			break;
8299 #endif
8300 #ifdef INET6
8301 		case AF_INET6:
8302 			space_left = net->mtu - SCTP_MIN_OVERHEAD;
8303 			break;
8304 #endif
8305 #if defined(__Userspace__)
8306 		case AF_CONN:
8307 			space_left = net->mtu - sizeof(struct sctphdr);
8308 			break;
8309 #endif
8310 		default:
8311 			/* TSNH */
8312 			space_left = net->mtu;
8313 			break;
8314 	}
8315 	/* Need an allowance for the data chunk header too */
8316 	space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8317 
8318 	/* must make even word boundary */
8319 	space_left &= 0xfffffffc;
8320 	strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8321 	giveup = 0;
8322 	bail = 0;
8323 	while ((space_left > 0) && (strq != NULL)) {
8324 		moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
8325 		                              &giveup, eeor_mode, &bail, so_locked);
8326 		stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
8327 		if ((giveup != 0) || (bail != 0)) {
8328 			break;
8329 		}
8330 		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8331 		total_moved += moved;
8332 		if (space_left >= moved) {
8333 			space_left -= moved;
8334 		} else {
8335 			space_left = 0;
8336 		}
8337 		if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
8338 			space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8339 		} else {
8340 			space_left = 0;
8341 		}
8342 		space_left &= 0xfffffffc;
8343 	}
8344 	if (bail != 0)
8345 		*quit_now = 1;
8346 
8347 	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8348 
8349 	if (total_moved == 0) {
8350 		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8351 		    (net == stcb->asoc.primary_destination)) {
8352 			/* ran dry for primary network net */
8353 			SCTP_STAT_INCR(sctps_primary_randry);
8354 		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
8355 			/* ran dry with CMT on */
8356 			SCTP_STAT_INCR(sctps_cmt_randry);
8357 		}
8358 	}
8359 }
8360 
8361 void
8362 sctp_fix_ecn_echo(struct sctp_association *asoc)
8363 {
8364 	struct sctp_tmit_chunk *chk;
8365 
8366 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8367 		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8368 			chk->sent = SCTP_DATAGRAM_UNSENT;
8369 		}
8370 	}
8371 }
8372 
8373 void
8374 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8375 {
8376 	struct sctp_association *asoc;
8377 	struct sctp_tmit_chunk *chk;
8378 	struct sctp_stream_queue_pending *sp;
8379 	unsigned int i;
8380 
8381 	if (net == NULL) {
8382 		return;
8383 	}
8384 	asoc = &stcb->asoc;
8385 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8386 		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8387 			if (sp->net == net) {
8388 				sctp_free_remote_addr(sp->net);
8389 				sp->net = NULL;
8390 			}
8391 		}
8392 	}
8393 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8394 		if (chk->whoTo == net) {
8395 			sctp_free_remote_addr(chk->whoTo);
8396 			chk->whoTo = NULL;
8397 		}
8398 	}
8399 }
8400 
8401 int
8402 sctp_med_chunk_output(struct sctp_inpcb *inp,
8403 		      struct sctp_tcb *stcb,
8404 		      struct sctp_association *asoc,
8405 		      int *num_out,
8406 		      int *reason_code,
8407 		      int control_only, int from_where,
8408 		      struct timeval *now, int *now_filled, int frag_point, int so_locked
8409 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8410 		      SCTP_UNUSED
8411 #endif
8412 	)
8413 {
8414 	/**
8415 	 * Ok this is the generic chunk service queue. we must do the
8416 	 * following:
8417 	 * - Service the stream queue that is next, moving any
8418 	 *   message (note I must get a complete message i.e. FIRST/MIDDLE and
8419 	 *   LAST to the out queue in one pass) and assigning TSN's. This
8420 	 *   only applys though if the peer does not support NDATA. For NDATA
8421 	 *   chunks its ok to not send the entire message ;-)
8422 	 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
8423 	 *   fomulate and send the low level chunks. Making sure to combine
8424 	 *   any control in the control chunk queue also.
8425 	 */
8426 	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8427 	struct mbuf *outchain, *endoutchain;
8428 	struct sctp_tmit_chunk *chk, *nchk;
8429 
8430 	/* temp arrays for unlinking */
8431 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8432 	int no_fragmentflg, error;
8433 	unsigned int max_rwnd_per_dest, max_send_per_dest;
8434 	int one_chunk, hbflag, skip_data_for_this_net;
8435 	int asconf, cookie, no_out_cnt;
8436 	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8437 	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8438 	int tsns_sent = 0;
8439 	uint32_t auth_offset;
8440 	struct sctp_auth_chunk *auth;
8441 	uint16_t auth_keyid;
8442 	int override_ok = 1;
8443 	int skip_fill_up = 0;
8444 	int data_auth_reqd = 0;
8445 	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8446 	   the destination. */
8447 	int quit_now = 0;
8448 
8449 #if defined(__APPLE__)
8450 	if (so_locked) {
8451 		sctp_lock_assert(SCTP_INP_SO(inp));
8452 	} else {
8453 		sctp_unlock_assert(SCTP_INP_SO(inp));
8454 	}
8455 #endif
8456 	*num_out = 0;
8457 	*reason_code = 0;
8458 	auth_keyid = stcb->asoc.authinfo.active_keyid;
8459 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8460 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8461 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8462 		eeor_mode = 1;
8463 	} else {
8464 		eeor_mode = 0;
8465 	}
8466 	ctl_cnt = no_out_cnt = asconf = cookie = 0;
8467 	/*
8468 	 * First lets prime the pump. For each destination, if there is room
8469 	 * in the flight size, attempt to pull an MTU's worth out of the
8470 	 * stream queues into the general send_queue
8471 	 */
8472 #ifdef SCTP_AUDITING_ENABLED
8473 	sctp_audit_log(0xC2, 2);
8474 #endif
8475 	SCTP_TCB_LOCK_ASSERT(stcb);
8476 	hbflag = 0;
8477 	if (control_only)
8478 		no_data_chunks = 1;
8479 	else
8480 		no_data_chunks = 0;
8481 
8482 	/* Nothing to possible to send? */
8483 	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8484 	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8485 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8486 	    TAILQ_EMPTY(&asoc->send_queue) &&
8487 	    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
8488 	nothing_to_send:
8489 		*reason_code = 9;
8490 		return (0);
8491 	}
8492 	if (asoc->peers_rwnd == 0) {
8493 		/* No room in peers rwnd */
8494 		*reason_code = 1;
8495 		if (asoc->total_flight > 0) {
8496 			/* we are allowed one chunk in flight */
8497 			no_data_chunks = 1;
8498 		}
8499 	}
8500 	if (stcb->asoc.ecn_echo_cnt_onq) {
8501 		/* Record where a sack goes, if any */
8502 		if (no_data_chunks &&
8503 		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8504 			/* Nothing but ECNe to send - we don't do that */
8505 			goto nothing_to_send;
8506 		}
8507 		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8508 			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8509 			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8510 				sack_goes_to = chk->whoTo;
8511 				break;
8512 			}
8513 		}
8514 	}
8515 	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8516 	if (stcb->sctp_socket)
8517 		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8518 	else
8519 		max_send_per_dest = 0;
8520 	if (no_data_chunks == 0) {
8521 		/* How many non-directed chunks are there? */
8522 		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8523 			if (chk->whoTo == NULL) {
8524 				/* We already have non-directed
8525 				 * chunks on the queue, no need
8526 				 * to do a fill-up.
8527 				 */
8528 				skip_fill_up = 1;
8529 				break;
8530 			}
8531 		}
8532 
8533 	}
8534 	if ((no_data_chunks == 0) &&
8535 	    (skip_fill_up == 0) &&
8536 	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8537 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8538 			/*
8539 			 * This for loop we are in takes in
8540 			 * each net, if its's got space in cwnd and
8541 			 * has data sent to it (when CMT is off) then it
8542 			 * calls sctp_fill_outqueue for the net. This gets
8543 			 * data on the send queue for that network.
8544 			 *
8545 			 * In sctp_fill_outqueue TSN's are assigned and
8546 			 * data is copied out of the stream buffers. Note
8547 			 * mostly copy by reference (we hope).
8548 			 */
8549 			net->window_probe = 0;
8550 			if ((net != stcb->asoc.alternate) &&
8551 			    ((net->dest_state & SCTP_ADDR_PF) ||
8552 			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8553 			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8554 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8555 					sctp_log_cwnd(stcb, net, 1,
8556 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8557 				}
8558 				continue;
8559 			}
8560 			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8561 			    (net->flight_size == 0)) {
8562 				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8563 			}
8564 			if (net->flight_size >= net->cwnd) {
8565 				/* skip this network, no room - can't fill */
8566 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8567 					sctp_log_cwnd(stcb, net, 3,
8568 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8569 				}
8570 				continue;
8571 			}
8572 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8573 				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8574 			}
8575 			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8576 			if (quit_now) {
8577 				/* memory alloc failure */
8578 				no_data_chunks = 1;
8579 				break;
8580 			}
8581 		}
8582 	}
8583 	/* now service each destination and send out what we can for it */
8584 	/* Nothing to send? */
8585 	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8586 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8587 	    TAILQ_EMPTY(&asoc->send_queue)) {
8588 		*reason_code = 8;
8589 		return (0);
8590 	}
8591 
8592 	if (asoc->sctp_cmt_on_off > 0) {
8593 		/* get the last start point */
8594 		start_at = asoc->last_net_cmt_send_started;
8595 		if (start_at == NULL) {
8596 			/* null so to beginning */
8597 			start_at = TAILQ_FIRST(&asoc->nets);
8598 		} else {
8599 			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8600 			if (start_at == NULL) {
8601 				start_at = TAILQ_FIRST(&asoc->nets);
8602 			}
8603 		}
8604 		asoc->last_net_cmt_send_started = start_at;
8605 	} else {
8606 		start_at = TAILQ_FIRST(&asoc->nets);
8607 	}
8608 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8609 		if (chk->whoTo == NULL) {
8610 			if (asoc->alternate) {
8611 				chk->whoTo = asoc->alternate;
8612 			} else {
8613 				chk->whoTo = asoc->primary_destination;
8614 			}
8615 			atomic_add_int(&chk->whoTo->ref_count, 1);
8616 		}
8617 	}
8618 	old_start_at = NULL;
8619 again_one_more_time:
8620 	for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8621 		/* how much can we send? */
8622 		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8623 		if (old_start_at && (old_start_at == net)) {
8624 			/* through list ocmpletely. */
8625 			break;
8626 		}
8627 		tsns_sent = 0xa;
8628 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8629 		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8630 		    (net->flight_size >= net->cwnd)) {
8631 			/* Nothing on control or asconf and flight is full, we can skip
8632 			 * even in the CMT case.
8633 			 */
8634 			continue;
8635 		}
8636 		bundle_at = 0;
8637 		endoutchain = outchain = NULL;
8638 		auth = NULL;
8639 		auth_offset = 0;
8640 		no_fragmentflg = 1;
8641 		one_chunk = 0;
8642 		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8643 			skip_data_for_this_net = 1;
8644 		} else {
8645 			skip_data_for_this_net = 0;
8646 		}
8647 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8648 #ifdef INET
8649 		case AF_INET:
8650 			mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8651 			break;
8652 #endif
8653 #ifdef INET6
8654 		case AF_INET6:
8655 			mtu = net->mtu - SCTP_MIN_OVERHEAD;
8656 			break;
8657 #endif
8658 #if defined(__Userspace__)
8659 		case AF_CONN:
8660 			mtu = net->mtu - sizeof(struct sctphdr);
8661 			break;
8662 #endif
8663 		default:
8664 			/* TSNH */
8665 			mtu = net->mtu;
8666 			break;
8667 		}
8668 		mx_mtu = mtu;
8669 		to_out = 0;
8670 		if (mtu > asoc->peers_rwnd) {
8671 			if (asoc->total_flight > 0) {
8672 				/* We have a packet in flight somewhere */
8673 				r_mtu = asoc->peers_rwnd;
8674 			} else {
8675 				/* We are always allowed to send one MTU out */
8676 				one_chunk = 1;
8677 				r_mtu = mtu;
8678 			}
8679 		} else {
8680 			r_mtu = mtu;
8681 		}
8682 		error = 0;
8683 		/************************/
8684 		/* ASCONF transmission */
8685 		/************************/
8686 		/* Now first lets go through the asconf queue */
8687 		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8688 			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8689 				continue;
8690 			}
8691 			if (chk->whoTo == NULL) {
8692 				if (asoc->alternate == NULL) {
8693 					if (asoc->primary_destination != net) {
8694 						break;
8695 					}
8696 				} else {
8697 					if (asoc->alternate != net) {
8698 						break;
8699 					}
8700 				}
8701 			} else {
8702 				if (chk->whoTo != net) {
8703 					break;
8704 				}
8705 			}
8706 			if (chk->data == NULL) {
8707 				break;
8708 			}
8709 			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8710 			    chk->sent != SCTP_DATAGRAM_RESEND) {
8711 				break;
8712 			}
8713 			/*
8714 			 * if no AUTH is yet included and this chunk
8715 			 * requires it, make sure to account for it.  We
8716 			 * don't apply the size until the AUTH chunk is
8717 			 * actually added below in case there is no room for
8718 			 * this chunk. NOTE: we overload the use of "omtu"
8719 			 * here
8720 			 */
8721 			if ((auth == NULL) &&
8722 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8723 							stcb->asoc.peer_auth_chunks)) {
8724 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8725 			} else
8726 				omtu = 0;
8727 			/* Here we do NOT factor the r_mtu */
8728 			if ((chk->send_size < (int)(mtu - omtu)) ||
8729 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8730 				/*
8731 				 * We probably should glom the mbuf chain
8732 				 * from the chk->data for control but the
8733 				 * problem is it becomes yet one more level
8734 				 * of tracking to do if for some reason
8735 				 * output fails. Then I have got to
8736 				 * reconstruct the merged control chain.. el
8737 				 * yucko.. for now we take the easy way and
8738 				 * do the copy
8739 				 */
8740 				/*
8741 				 * Add an AUTH chunk, if chunk requires it
8742 				 * save the offset into the chain for AUTH
8743 				 */
8744 				if ((auth == NULL) &&
8745 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8746 								 stcb->asoc.peer_auth_chunks))) {
8747 					outchain = sctp_add_auth_chunk(outchain,
8748 								       &endoutchain,
8749 								       &auth,
8750 								       &auth_offset,
8751 								       stcb,
8752 								       chk->rec.chunk_id.id);
8753 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8754 				}
8755 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8756 							       (int)chk->rec.chunk_id.can_take_data,
8757 							       chk->send_size, chk->copy_by_ref);
8758 				if (outchain == NULL) {
8759 					*reason_code = 8;
8760 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8761 					return (ENOMEM);
8762 				}
8763 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8764 				/* update our MTU size */
8765 				if (mtu > (chk->send_size + omtu))
8766 					mtu -= (chk->send_size + omtu);
8767 				else
8768 					mtu = 0;
8769 				to_out += (chk->send_size + omtu);
8770 				/* Do clear IP_DF ? */
8771 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8772 					no_fragmentflg = 0;
8773 				}
8774 				if (chk->rec.chunk_id.can_take_data)
8775 					chk->data = NULL;
8776 				/*
8777 				 * set hb flag since we can
8778 				 * use these for RTO
8779 				 */
8780 				hbflag = 1;
8781 				asconf = 1;
8782 				/*
8783 				 * should sysctl this: don't
8784 				 * bundle data with ASCONF
8785 				 * since it requires AUTH
8786 				 */
8787 				no_data_chunks = 1;
8788 				chk->sent = SCTP_DATAGRAM_SENT;
8789 				if (chk->whoTo == NULL) {
8790 					chk->whoTo = net;
8791 					atomic_add_int(&net->ref_count, 1);
8792 				}
8793 				chk->snd_count++;
8794 				if (mtu == 0) {
8795 					/*
8796 					 * Ok we are out of room but we can
8797 					 * output without effecting the
8798 					 * flight size since this little guy
8799 					 * is a control only packet.
8800 					 */
8801 					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8802 					/*
8803 					 * do NOT clear the asconf
8804 					 * flag as it is used to do
8805 					 * appropriate source address
8806 					 * selection.
8807 					 */
8808 					if (*now_filled == 0) {
8809 						(void)SCTP_GETTIME_TIMEVAL(now);
8810 						*now_filled = 1;
8811 					}
8812 					net->last_sent_time = *now;
8813 					hbflag = 0;
8814 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8815 					                                        (struct sockaddr *)&net->ro._l_addr,
8816 					                                        outchain, auth_offset, auth,
8817 					                                        stcb->asoc.authinfo.active_keyid,
8818 					                                        no_fragmentflg, 0, asconf,
8819 					                                        inp->sctp_lport, stcb->rport,
8820 					                                        htonl(stcb->asoc.peer_vtag),
8821 					                                        net->port, NULL,
8822 #if defined(__FreeBSD__)
8823 					                                        0, 0,
8824 #endif
8825 					                                        so_locked))) {
8826 						/* error, we could not output */
8827 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8828 						if (from_where == 0) {
8829 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
8830 						}
8831 						if (error == ENOBUFS) {
8832 							asoc->ifp_had_enobuf = 1;
8833 							SCTP_STAT_INCR(sctps_lowlevelerr);
8834 						}
8835 						/* error, could not output */
8836 						if (error == EHOSTUNREACH) {
8837 							/*
8838 							 * Destination went
8839 							 * unreachable
8840 							 * during this send
8841 							 */
8842 							sctp_move_chunks_from_net(stcb, net);
8843 						}
8844 						*reason_code = 7;
8845 						break;
8846 					} else {
8847 						asoc->ifp_had_enobuf = 0;
8848 					}
8849 					/*
8850 					 * increase the number we sent, if a
8851 					 * cookie is sent we don't tell them
8852 					 * any was sent out.
8853 					 */
8854 					outchain = endoutchain = NULL;
8855 					auth = NULL;
8856 					auth_offset = 0;
8857 					if (!no_out_cnt)
8858 						*num_out += ctl_cnt;
8859 					/* recalc a clean slate and setup */
8860 					switch (net->ro._l_addr.sa.sa_family) {
8861 #ifdef INET
8862 						case AF_INET:
8863 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8864 							break;
8865 #endif
8866 #ifdef INET6
8867 						case AF_INET6:
8868 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
8869 							break;
8870 #endif
8871 #if defined(__Userspace__)
8872 						case AF_CONN:
8873 							mtu = net->mtu - sizeof(struct sctphdr);
8874 							break;
8875 #endif
8876 						default:
8877 							/* TSNH */
8878 							mtu = net->mtu;
8879 							break;
8880 					}
8881 					to_out = 0;
8882 					no_fragmentflg = 1;
8883 				}
8884 			}
8885 		}
8886 		if (error != 0) {
8887 			/* try next net */
8888 			continue;
8889 		}
8890 		/************************/
8891 		/* Control transmission */
8892 		/************************/
8893 		/* Now first lets go through the control queue */
8894 		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8895 			if ((sack_goes_to) &&
8896 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8897 			    (chk->whoTo != sack_goes_to)) {
8898 				/*
8899 				 * if we have a sack in queue, and we are looking at an
8900 				 * ecn echo that is NOT queued to where the sack is going..
8901 				 */
8902 				if (chk->whoTo == net) {
8903 					/* Don't transmit it to where its going (current net) */
8904 					continue;
8905 				} else if (sack_goes_to == net) {
8906 					/* But do transmit it to this address */
8907 					goto skip_net_check;
8908 				}
8909 			}
8910 			if (chk->whoTo == NULL) {
8911 				if (asoc->alternate == NULL) {
8912 					if (asoc->primary_destination != net) {
8913 						continue;
8914 					}
8915 				} else {
8916 					if (asoc->alternate != net) {
8917 						continue;
8918 					}
8919 				}
8920 			} else {
8921 				if (chk->whoTo != net) {
8922 					continue;
8923 				}
8924 			}
8925 		skip_net_check:
8926 			if (chk->data == NULL) {
8927 				continue;
8928 			}
8929 			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8930 				/*
8931 				 * It must be unsent. Cookies and ASCONF's
8932 				 * hang around but there timers will force
8933 				 * when marked for resend.
8934 				 */
8935 				continue;
8936 			}
8937 			/*
8938 			 * if no AUTH is yet included and this chunk
8939 			 * requires it, make sure to account for it.  We
8940 			 * don't apply the size until the AUTH chunk is
8941 			 * actually added below in case there is no room for
8942 			 * this chunk. NOTE: we overload the use of "omtu"
8943 			 * here
8944 			 */
8945 			if ((auth == NULL) &&
8946 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8947 							stcb->asoc.peer_auth_chunks)) {
8948 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8949 			} else
8950 				omtu = 0;
8951 			/* Here we do NOT factor the r_mtu */
8952 			if ((chk->send_size <= (int)(mtu - omtu)) ||
8953 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8954 				/*
8955 				 * We probably should glom the mbuf chain
8956 				 * from the chk->data for control but the
8957 				 * problem is it becomes yet one more level
8958 				 * of tracking to do if for some reason
8959 				 * output fails. Then I have got to
8960 				 * reconstruct the merged control chain.. el
8961 				 * yucko.. for now we take the easy way and
8962 				 * do the copy
8963 				 */
8964 				/*
8965 				 * Add an AUTH chunk, if chunk requires it
8966 				 * save the offset into the chain for AUTH
8967 				 */
8968 				if ((auth == NULL) &&
8969 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8970 								 stcb->asoc.peer_auth_chunks))) {
8971 					outchain = sctp_add_auth_chunk(outchain,
8972 								       &endoutchain,
8973 								       &auth,
8974 								       &auth_offset,
8975 								       stcb,
8976 								       chk->rec.chunk_id.id);
8977 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8978 				}
8979 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8980 							       (int)chk->rec.chunk_id.can_take_data,
8981 							       chk->send_size, chk->copy_by_ref);
8982 				if (outchain == NULL) {
8983 					*reason_code = 8;
8984 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8985 					return (ENOMEM);
8986 				}
8987 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8988 				/* update our MTU size */
8989 				if (mtu > (chk->send_size + omtu))
8990 					mtu -= (chk->send_size + omtu);
8991 				else
8992 					mtu = 0;
8993 				to_out += (chk->send_size + omtu);
8994 				/* Do clear IP_DF ? */
8995 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8996 					no_fragmentflg = 0;
8997 				}
8998 				if (chk->rec.chunk_id.can_take_data)
8999 					chk->data = NULL;
9000 				/* Mark things to be removed, if needed */
9001 				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
9002 				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
9003 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
9004 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
9005 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
9006 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
9007 				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
9008 				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
9009 				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
9010 				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
9011 				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
9012 					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
9013 						hbflag = 1;
9014 					}
9015 					/* remove these chunks at the end */
9016 					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
9017 					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
9018 						/* turn off the timer */
9019 						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9020 							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9021 							                inp, stcb, NULL,
9022 							                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
9023 						}
9024 					}
9025 					ctl_cnt++;
9026 				} else {
9027 					/*
9028 					 * Other chunks, since they have
9029 					 * timers running (i.e. COOKIE)
9030 					 * we just "trust" that it
9031 					 * gets sent or retransmitted.
9032 					 */
9033 					ctl_cnt++;
9034 					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9035 						cookie = 1;
9036 						no_out_cnt = 1;
9037 					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
9038 						/*
9039 						 * Increment ecne send count here
9040 						 * this means we may be over-zealous in
9041 						 * our counting if the send fails, but its
9042 						 * the best place to do it (we used to do
9043 						 * it in the queue of the chunk, but that did
9044 						 * not tell how many times it was sent.
9045 						 */
9046 						SCTP_STAT_INCR(sctps_sendecne);
9047 					}
9048 					chk->sent = SCTP_DATAGRAM_SENT;
9049 					if (chk->whoTo == NULL) {
9050 						chk->whoTo = net;
9051 						atomic_add_int(&net->ref_count, 1);
9052 					}
9053 					chk->snd_count++;
9054 				}
9055 				if (mtu == 0) {
9056 					/*
9057 					 * Ok we are out of room but we can
9058 					 * output without effecting the
9059 					 * flight size since this little guy
9060 					 * is a control only packet.
9061 					 */
9062 					if (asconf) {
9063 						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
9064 						/*
9065 						 * do NOT clear the asconf
9066 						 * flag as it is used to do
9067 						 * appropriate source address
9068 						 * selection.
9069 						 */
9070 					}
9071 					if (cookie) {
9072 						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9073 						cookie = 0;
9074 					}
9075 					/* Only HB or ASCONF advances time */
9076 					if (hbflag) {
9077 						if (*now_filled == 0) {
9078 							(void)SCTP_GETTIME_TIMEVAL(now);
9079 							*now_filled = 1;
9080 						}
9081 						net->last_sent_time = *now;
9082 						hbflag = 0;
9083 					}
9084 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9085 					                                        (struct sockaddr *)&net->ro._l_addr,
9086 					                                        outchain,
9087 					                                        auth_offset, auth,
9088 					                                        stcb->asoc.authinfo.active_keyid,
9089 					                                        no_fragmentflg, 0, asconf,
9090 					                                        inp->sctp_lport, stcb->rport,
9091 					                                        htonl(stcb->asoc.peer_vtag),
9092 					                                        net->port, NULL,
9093 #if defined(__FreeBSD__)
9094 					                                        0, 0,
9095 #endif
9096 					                                        so_locked))) {
9097 						/* error, we could not output */
9098 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9099 						if (from_where == 0) {
9100 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
9101 						}
9102 						if (error == ENOBUFS) {
9103 							asoc->ifp_had_enobuf = 1;
9104 							SCTP_STAT_INCR(sctps_lowlevelerr);
9105 						}
9106 						if (error == EHOSTUNREACH) {
9107 							/*
9108 							 * Destination went
9109 							 * unreachable
9110 							 * during this send
9111 							 */
9112 							sctp_move_chunks_from_net(stcb, net);
9113 						}
9114 						*reason_code = 7;
9115 						break;
9116 					} else {
9117 						asoc->ifp_had_enobuf = 0;
9118 					}
9119 					/*
9120 					 * increase the number we sent, if a
9121 					 * cookie is sent we don't tell them
9122 					 * any was sent out.
9123 					 */
9124 					outchain = endoutchain = NULL;
9125 					auth = NULL;
9126 					auth_offset = 0;
9127 					if (!no_out_cnt)
9128 						*num_out += ctl_cnt;
9129 					/* recalc a clean slate and setup */
9130 					switch (net->ro._l_addr.sa.sa_family) {
9131 #ifdef INET
9132 						case AF_INET:
9133 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9134 							break;
9135 #endif
9136 #ifdef INET6
9137 						case AF_INET6:
9138 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
9139 							break;
9140 #endif
9141 #if defined(__Userspace__)
9142 						case AF_CONN:
9143 							mtu = net->mtu - sizeof(struct sctphdr);
9144 							break;
9145 #endif
9146 						default:
9147 							/* TSNH */
9148 							mtu = net->mtu;
9149 							break;
9150 					}
9151 					to_out = 0;
9152 					no_fragmentflg = 1;
9153 				}
9154 			}
9155 		}
9156 		if (error != 0) {
9157 			/* try next net */
9158 			continue;
9159 		}
9160 		/* JRI: if dest is in PF state, do not send data to it */
9161 		if ((asoc->sctp_cmt_on_off > 0) &&
9162 		    (net != stcb->asoc.alternate) &&
9163 		    (net->dest_state & SCTP_ADDR_PF)) {
9164 			goto no_data_fill;
9165 		}
9166 		if (net->flight_size >= net->cwnd) {
9167 			goto no_data_fill;
9168 		}
9169 		if ((asoc->sctp_cmt_on_off > 0) &&
9170 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
9171 		    (net->flight_size > max_rwnd_per_dest)) {
9172 			goto no_data_fill;
9173 		}
9174 		/*
9175 		 * We need a specific accounting for the usage of the
9176 		 * send buffer. We also need to check the number of messages
9177 		 * per net. For now, this is better than nothing and it
9178 		 * disabled by default...
9179 		 */
9180 		if ((asoc->sctp_cmt_on_off > 0) &&
9181 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
9182 		    (max_send_per_dest > 0) &&
9183 		    (net->flight_size > max_send_per_dest)) {
9184 			goto no_data_fill;
9185 		}
9186 		/*********************/
9187 		/* Data transmission */
9188 		/*********************/
9189 		/*
9190 		 * if AUTH for DATA is required and no AUTH has been added
9191 		 * yet, account for this in the mtu now... if no data can be
9192 		 * bundled, this adjustment won't matter anyways since the
9193 		 * packet will be going out...
9194 		 */
9195 		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9196 							     stcb->asoc.peer_auth_chunks);
9197 		if (data_auth_reqd && (auth == NULL)) {
9198 			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9199 		}
9200 		/* now lets add any data within the MTU constraints */
9201 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9202 #ifdef INET
9203 		case AF_INET:
9204 			if (net->mtu > SCTP_MIN_V4_OVERHEAD)
9205 				omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9206 			else
9207 				omtu = 0;
9208 			break;
9209 #endif
9210 #ifdef INET6
9211 		case AF_INET6:
9212 			if (net->mtu > SCTP_MIN_OVERHEAD)
9213 				omtu = net->mtu - SCTP_MIN_OVERHEAD;
9214 			else
9215 				omtu = 0;
9216 			break;
9217 #endif
9218 #if defined(__Userspace__)
9219 		case AF_CONN:
9220 			if (net->mtu > sizeof(struct sctphdr)) {
9221 				omtu = net->mtu - sizeof(struct sctphdr);
9222 			} else {
9223 				omtu = 0;
9224 			}
9225 			break;
9226 #endif
9227 		default:
9228 			/* TSNH */
9229 			omtu = 0;
9230 			break;
9231 		}
9232 		if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
9233 		      (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
9234 		     (skip_data_for_this_net == 0)) ||
9235 		    (cookie)) {
9236 			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9237 				if (no_data_chunks) {
9238 					/* let only control go out */
9239 					*reason_code = 1;
9240 					break;
9241 				}
9242 				if (net->flight_size >= net->cwnd) {
9243 					/* skip this net, no room for data */
9244 					*reason_code = 2;
9245 					break;
9246 				}
9247 				if ((chk->whoTo != NULL) &&
9248 				    (chk->whoTo != net)) {
9249 					/* Don't send the chunk on this net */
9250 					continue;
9251 				}
9252 
9253 				if (asoc->sctp_cmt_on_off == 0) {
9254 					if ((asoc->alternate) &&
9255 					    (asoc->alternate != net) &&
9256 					    (chk->whoTo == NULL)) {
9257 						continue;
9258 					} else if ((net != asoc->primary_destination) &&
9259 						   (asoc->alternate == NULL) &&
9260 						   (chk->whoTo == NULL)) {
9261 						continue;
9262 					}
9263 				}
9264 				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9265 					/*-
9266 					 * strange, we have a chunk that is
9267 					 * to big for its destination and
9268 					 * yet no fragment ok flag.
9269 					 * Something went wrong when the
9270 					 * PMTU changed...we did not mark
9271 					 * this chunk for some reason?? I
9272 					 * will fix it here by letting IP
9273 					 * fragment it for now and printing
9274 					 * a warning. This really should not
9275 					 * happen ...
9276 					 */
9277 					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9278 						    chk->send_size, mtu);
9279 					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9280 				}
9281 				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9282 				    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9283 					struct sctp_data_chunk *dchkh;
9284 
9285 					dchkh = mtod(chk->data, struct sctp_data_chunk *);
9286 					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9287 				}
9288 				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9289 				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9290 					/* ok we will add this one */
9291 
9292 					/*
9293 					 * Add an AUTH chunk, if chunk
9294 					 * requires it, save the offset into
9295 					 * the chain for AUTH
9296 					 */
9297 					if (data_auth_reqd) {
9298 						if (auth == NULL) {
9299 							outchain = sctp_add_auth_chunk(outchain,
9300 										       &endoutchain,
9301 										       &auth,
9302 										       &auth_offset,
9303 										       stcb,
9304 										       SCTP_DATA);
9305 							auth_keyid = chk->auth_keyid;
9306 							override_ok = 0;
9307 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9308 						} else if (override_ok) {
9309 							/* use this data's keyid */
9310 							auth_keyid = chk->auth_keyid;
9311 							override_ok = 0;
9312 						} else if (auth_keyid != chk->auth_keyid) {
9313 							/* different keyid, so done bundling */
9314 							break;
9315 						}
9316 					}
9317 					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9318 								       chk->send_size, chk->copy_by_ref);
9319 					if (outchain == NULL) {
9320 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9321 						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9322 							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9323 						}
9324 						*reason_code = 3;
9325 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9326 						return (ENOMEM);
9327 					}
9328 					/* upate our MTU size */
9329 					/* Do clear IP_DF ? */
9330 					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9331 						no_fragmentflg = 0;
9332 					}
9333 					/* unsigned subtraction of mtu */
9334 					if (mtu > chk->send_size)
9335 						mtu -= chk->send_size;
9336 					else
9337 						mtu = 0;
9338 					/* unsigned subtraction of r_mtu */
9339 					if (r_mtu > chk->send_size)
9340 						r_mtu -= chk->send_size;
9341 					else
9342 						r_mtu = 0;
9343 
9344 					to_out += chk->send_size;
9345 					if ((to_out > mx_mtu) && no_fragmentflg) {
9346 #ifdef INVARIANTS
9347 						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9348 #else
9349 						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9350 							    mx_mtu, to_out);
9351 #endif
9352 					}
9353 					chk->window_probe = 0;
9354 					data_list[bundle_at++] = chk;
9355 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9356 						break;
9357 					}
9358 					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9359 						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9360 							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9361 						} else {
9362 							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9363 						}
9364 						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9365 						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9366 							/* Count number of user msg's that were fragmented
9367 							 * we do this by counting when we see a LAST fragment
9368 							 * only.
9369 							 */
9370 							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9371 					}
9372 					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9373 						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9374 							data_list[0]->window_probe = 1;
9375 							net->window_probe = 1;
9376 						}
9377 						break;
9378 					}
9379 				} else {
9380 					/*
9381 					 * Must be sent in order of the
9382 					 * TSN's (on a network)
9383 					 */
9384 					break;
9385 				}
9386 			}	/* for (chunk gather loop for this net) */
9387 		}		/* if asoc.state OPEN */
9388 	no_data_fill:
9389 		/* Is there something to send for this destination? */
9390 		if (outchain) {
9391 			/* We may need to start a control timer or two */
9392 			if (asconf) {
9393 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9394 						 stcb, net);
9395 				/*
9396 				 * do NOT clear the asconf flag as it is used
9397 				 * to do appropriate source address selection.
9398 				 */
9399 			}
9400 			if (cookie) {
9401 				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9402 				cookie = 0;
9403 			}
9404 			/* must start a send timer if data is being sent */
9405 			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9406 				/*
9407 				 * no timer running on this destination
9408 				 * restart it.
9409 				 */
9410 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9411 			}
9412 			if (bundle_at || hbflag) {
9413 				/* For data/asconf and hb set time */
9414 				if (*now_filled == 0) {
9415 					(void)SCTP_GETTIME_TIMEVAL(now);
9416 					*now_filled = 1;
9417 				}
9418 				net->last_sent_time = *now;
9419 			}
9420 			/* Now send it, if there is anything to send :> */
9421 			if ((error = sctp_lowlevel_chunk_output(inp,
9422 			                                        stcb,
9423 			                                        net,
9424 			                                        (struct sockaddr *)&net->ro._l_addr,
9425 			                                        outchain,
9426 			                                        auth_offset,
9427 			                                        auth,
9428 			                                        auth_keyid,
9429 			                                        no_fragmentflg,
9430 			                                        bundle_at,
9431 			                                        asconf,
9432 			                                        inp->sctp_lport, stcb->rport,
9433 			                                        htonl(stcb->asoc.peer_vtag),
9434 			                                        net->port, NULL,
9435 #if defined(__FreeBSD__)
9436 			                                        0, 0,
9437 #endif
9438 			                                        so_locked))) {
9439 				/* error, we could not output */
9440 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9441 				if (from_where == 0) {
9442 					SCTP_STAT_INCR(sctps_lowlevelerrusr);
9443 				}
9444 				if (error == ENOBUFS) {
9445 					asoc->ifp_had_enobuf = 1;
9446 					SCTP_STAT_INCR(sctps_lowlevelerr);
9447 				}
9448 				if (error == EHOSTUNREACH) {
9449 					/*
9450 					 * Destination went unreachable
9451 					 * during this send
9452 					 */
9453 					sctp_move_chunks_from_net(stcb, net);
9454 				}
9455 				*reason_code = 6;
9456 				/*-
9457 				 * I add this line to be paranoid. As far as
9458 				 * I can tell the continue, takes us back to
9459 				 * the top of the for, but just to make sure
9460 				 * I will reset these again here.
9461 				 */
9462 				ctl_cnt = bundle_at = 0;
9463 				continue; /* This takes us back to the for() for the nets. */
9464 			} else {
9465 				asoc->ifp_had_enobuf = 0;
9466 			}
9467 			endoutchain = NULL;
9468 			auth = NULL;
9469 			auth_offset = 0;
9470 			if (!no_out_cnt) {
9471 				*num_out += (ctl_cnt + bundle_at);
9472 			}
9473 			if (bundle_at) {
9474 				/* setup for a RTO measurement */
9475 				tsns_sent = data_list[0]->rec.data.tsn;
9476 				/* fill time if not already filled */
9477 				if (*now_filled == 0) {
9478 					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9479 					*now_filled = 1;
9480 					*now = asoc->time_last_sent;
9481 				} else {
9482 					asoc->time_last_sent = *now;
9483 				}
9484 				if (net->rto_needed) {
9485 					data_list[0]->do_rtt = 1;
9486 					net->rto_needed = 0;
9487 				}
9488 				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9489 				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9490 			}
9491 			if (one_chunk) {
9492 				break;
9493 			}
9494 		}
9495 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9496 			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9497 		}
9498 	}
9499 	if (old_start_at == NULL) {
9500 		old_start_at = start_at;
9501 		start_at = TAILQ_FIRST(&asoc->nets);
9502 		if (old_start_at)
9503 			goto again_one_more_time;
9504 	}
9505 
9506 	/*
9507 	 * At the end there should be no NON timed chunks hanging on this
9508 	 * queue.
9509 	 */
9510 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9511 		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9512 	}
9513 	if ((*num_out == 0) && (*reason_code == 0)) {
9514 		*reason_code = 4;
9515 	} else {
9516 		*reason_code = 5;
9517 	}
9518 	sctp_clean_up_ctl(stcb, asoc, so_locked);
9519 	return (0);
9520 }
9521 
9522 void
9523 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9524 {
9525 	/*-
9526 	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9527 	 * the control chunk queue.
9528 	 */
9529 	struct sctp_chunkhdr *hdr;
9530 	struct sctp_tmit_chunk *chk;
9531 	struct mbuf *mat, *last_mbuf;
9532 	uint32_t chunk_length;
9533 	uint16_t padding_length;
9534 
9535 	SCTP_TCB_LOCK_ASSERT(stcb);
9536 	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9537 	if (op_err == NULL) {
9538 		return;
9539 	}
9540 	last_mbuf = NULL;
9541 	chunk_length = 0;
9542 	for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9543 		chunk_length += SCTP_BUF_LEN(mat);
9544 		if (SCTP_BUF_NEXT(mat) == NULL) {
9545 			last_mbuf = mat;
9546 		}
9547 	}
9548 	if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
9549 		sctp_m_freem(op_err);
9550 		return;
9551 	}
9552 	padding_length = chunk_length % 4;
9553 	if (padding_length != 0) {
9554 		padding_length = 4 - padding_length;
9555 	}
9556 	if (padding_length != 0) {
9557 		if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
9558 			sctp_m_freem(op_err);
9559 			return;
9560 		}
9561 	}
9562 	sctp_alloc_a_chunk(stcb, chk);
9563 	if (chk == NULL) {
9564 		/* no memory */
9565 		sctp_m_freem(op_err);
9566 		return;
9567 	}
9568 	chk->copy_by_ref = 0;
9569 	chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9570 	chk->rec.chunk_id.can_take_data = 0;
9571 	chk->flags = 0;
9572 	chk->send_size = (uint16_t)chunk_length;
9573 	chk->sent = SCTP_DATAGRAM_UNSENT;
9574 	chk->snd_count = 0;
9575 	chk->asoc = &stcb->asoc;
9576 	chk->data = op_err;
9577 	chk->whoTo = NULL;
9578 	hdr = mtod(op_err, struct sctp_chunkhdr *);
9579 	hdr->chunk_type = SCTP_OPERATION_ERROR;
9580 	hdr->chunk_flags = 0;
9581 	hdr->chunk_length = htons(chk->send_size);
9582 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9583 	chk->asoc->ctrl_queue_cnt++;
9584 }
9585 
9586 int
9587 sctp_send_cookie_echo(struct mbuf *m,
9588     int offset, int limit,
9589     struct sctp_tcb *stcb,
9590     struct sctp_nets *net)
9591 {
9592 	/*-
9593 	 * pull out the cookie and put it at the front of the control chunk
9594 	 * queue.
9595 	 */
9596 	int at;
9597 	struct mbuf *cookie;
9598 	struct sctp_paramhdr param, *phdr;
9599 	struct sctp_chunkhdr *hdr;
9600 	struct sctp_tmit_chunk *chk;
9601 	uint16_t ptype, plen;
9602 
9603 	SCTP_TCB_LOCK_ASSERT(stcb);
9604 	/* First find the cookie in the param area */
9605 	cookie = NULL;
9606 	at = offset + sizeof(struct sctp_init_chunk);
9607 	for (;;) {
9608 		phdr = sctp_get_next_param(m, at, &param, sizeof(param));
9609 		if (phdr == NULL) {
9610 			return (-3);
9611 		}
9612 		ptype = ntohs(phdr->param_type);
9613 		plen = ntohs(phdr->param_length);
9614 		if (plen < sizeof(struct sctp_paramhdr)) {
9615 			return (-6);
9616 		}
9617 		if (ptype == SCTP_STATE_COOKIE) {
9618 			int pad;
9619 
9620 			/* found the cookie */
9621 			if (at + plen > limit) {
9622 				return (-7);
9623 			}
9624 			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9625 			if (cookie == NULL) {
9626 				/* No memory */
9627 				return (-2);
9628 			}
9629 			if ((pad = (plen % 4)) > 0) {
9630 				pad = 4 - pad;
9631 			}
9632 			if (pad > 0) {
9633 				if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9634 					return (-8);
9635 				}
9636 			}
9637 #ifdef SCTP_MBUF_LOGGING
9638 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9639 				sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9640 			}
9641 #endif
9642 			break;
9643 		}
9644 		at += SCTP_SIZE32(plen);
9645 	}
9646 	/* ok, we got the cookie lets change it into a cookie echo chunk */
9647 	/* first the change from param to cookie */
9648 	hdr = mtod(cookie, struct sctp_chunkhdr *);
9649 	hdr->chunk_type = SCTP_COOKIE_ECHO;
9650 	hdr->chunk_flags = 0;
9651 	/* get the chunk stuff now and place it in the FRONT of the queue */
9652 	sctp_alloc_a_chunk(stcb, chk);
9653 	if (chk == NULL) {
9654 		/* no memory */
9655 		sctp_m_freem(cookie);
9656 		return (-5);
9657 	}
9658 	chk->copy_by_ref = 0;
9659 	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9660 	chk->rec.chunk_id.can_take_data = 0;
9661 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9662 	chk->send_size = SCTP_SIZE32(plen);
9663 	chk->sent = SCTP_DATAGRAM_UNSENT;
9664 	chk->snd_count = 0;
9665 	chk->asoc = &stcb->asoc;
9666 	chk->data = cookie;
9667 	chk->whoTo = net;
9668 	atomic_add_int(&chk->whoTo->ref_count, 1);
9669 	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9670 	chk->asoc->ctrl_queue_cnt++;
9671 	return (0);
9672 }
9673 
9674 void
9675 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9676     struct mbuf *m,
9677     int offset,
9678     int chk_length,
9679     struct sctp_nets *net)
9680 {
9681 	/*
9682 	 * take a HB request and make it into a HB ack and send it.
9683 	 */
9684 	struct mbuf *outchain;
9685 	struct sctp_chunkhdr *chdr;
9686 	struct sctp_tmit_chunk *chk;
9687 
9688 	if (net == NULL)
9689 		/* must have a net pointer */
9690 		return;
9691 
9692 	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9693 	if (outchain == NULL) {
9694 		/* gak out of memory */
9695 		return;
9696 	}
9697 #ifdef SCTP_MBUF_LOGGING
9698 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9699 		sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9700 	}
9701 #endif
9702 	chdr = mtod(outchain, struct sctp_chunkhdr *);
9703 	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9704 	chdr->chunk_flags = 0;
9705 	if (chk_length % 4 != 0) {
9706 		sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9707 	}
9708 	sctp_alloc_a_chunk(stcb, chk);
9709 	if (chk == NULL) {
9710 		/* no memory */
9711 		sctp_m_freem(outchain);
9712 		return;
9713 	}
9714 	chk->copy_by_ref = 0;
9715 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9716 	chk->rec.chunk_id.can_take_data = 1;
9717 	chk->flags = 0;
9718 	chk->send_size = chk_length;
9719 	chk->sent = SCTP_DATAGRAM_UNSENT;
9720 	chk->snd_count = 0;
9721 	chk->asoc = &stcb->asoc;
9722 	chk->data = outchain;
9723 	chk->whoTo = net;
9724 	atomic_add_int(&chk->whoTo->ref_count, 1);
9725 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9726 	chk->asoc->ctrl_queue_cnt++;
9727 }
9728 
9729 void
9730 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9731 {
9732 	/* formulate and queue a cookie-ack back to sender */
9733 	struct mbuf *cookie_ack;
9734 	struct sctp_chunkhdr *hdr;
9735 	struct sctp_tmit_chunk *chk;
9736 
9737 	SCTP_TCB_LOCK_ASSERT(stcb);
9738 
9739 	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9740 	if (cookie_ack == NULL) {
9741 		/* no mbuf's */
9742 		return;
9743 	}
9744 	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9745 	sctp_alloc_a_chunk(stcb, chk);
9746 	if (chk == NULL) {
9747 		/* no memory */
9748 		sctp_m_freem(cookie_ack);
9749 		return;
9750 	}
9751 	chk->copy_by_ref = 0;
9752 	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9753 	chk->rec.chunk_id.can_take_data = 1;
9754 	chk->flags = 0;
9755 	chk->send_size = sizeof(struct sctp_chunkhdr);
9756 	chk->sent = SCTP_DATAGRAM_UNSENT;
9757 	chk->snd_count = 0;
9758 	chk->asoc = &stcb->asoc;
9759 	chk->data = cookie_ack;
9760 	if (chk->asoc->last_control_chunk_from != NULL) {
9761 		chk->whoTo = chk->asoc->last_control_chunk_from;
9762 		atomic_add_int(&chk->whoTo->ref_count, 1);
9763 	} else {
9764 		chk->whoTo = NULL;
9765 	}
9766 	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9767 	hdr->chunk_type = SCTP_COOKIE_ACK;
9768 	hdr->chunk_flags = 0;
9769 	hdr->chunk_length = htons(chk->send_size);
9770 	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9771 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9772 	chk->asoc->ctrl_queue_cnt++;
9773 	return;
9774 }
9775 
9776 
9777 void
9778 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9779 {
9780 	/* formulate and queue a SHUTDOWN-ACK back to the sender */
9781 	struct mbuf *m_shutdown_ack;
9782 	struct sctp_shutdown_ack_chunk *ack_cp;
9783 	struct sctp_tmit_chunk *chk;
9784 
9785 	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9786 	if (m_shutdown_ack == NULL) {
9787 		/* no mbuf's */
9788 		return;
9789 	}
9790 	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9791 	sctp_alloc_a_chunk(stcb, chk);
9792 	if (chk == NULL) {
9793 		/* no memory */
9794 		sctp_m_freem(m_shutdown_ack);
9795 		return;
9796 	}
9797 	chk->copy_by_ref = 0;
9798 	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9799 	chk->rec.chunk_id.can_take_data = 1;
9800 	chk->flags = 0;
9801 	chk->send_size = sizeof(struct sctp_chunkhdr);
9802 	chk->sent = SCTP_DATAGRAM_UNSENT;
9803 	chk->snd_count = 0;
9804 	chk->asoc = &stcb->asoc;
9805 	chk->data = m_shutdown_ack;
9806 	chk->whoTo = net;
9807 	if (chk->whoTo) {
9808 		atomic_add_int(&chk->whoTo->ref_count, 1);
9809 	}
9810 	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9811 	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9812 	ack_cp->ch.chunk_flags = 0;
9813 	ack_cp->ch.chunk_length = htons(chk->send_size);
9814 	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9815 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9816 	chk->asoc->ctrl_queue_cnt++;
9817 	return;
9818 }
9819 
9820 void
9821 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9822 {
9823 	/* formulate and queue a SHUTDOWN to the sender */
9824 	struct mbuf *m_shutdown;
9825 	struct sctp_shutdown_chunk *shutdown_cp;
9826 	struct sctp_tmit_chunk *chk;
9827 
9828 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9829 		if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9830 			/* We already have a SHUTDOWN queued. Reuse it. */
9831 			if (chk->whoTo) {
9832 				sctp_free_remote_addr(chk->whoTo);
9833 				chk->whoTo = NULL;
9834 			}
9835 			break;
9836 		}
9837 	}
9838 	if (chk == NULL) {
9839 		m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9840 		if (m_shutdown == NULL) {
9841 			/* no mbuf's */
9842 			return;
9843 		}
9844 		SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9845 		sctp_alloc_a_chunk(stcb, chk);
9846 		if (chk == NULL) {
9847 			/* no memory */
9848 			sctp_m_freem(m_shutdown);
9849 			return;
9850 		}
9851 		chk->copy_by_ref = 0;
9852 		chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9853 		chk->rec.chunk_id.can_take_data = 1;
9854 		chk->flags = 0;
9855 		chk->send_size = sizeof(struct sctp_shutdown_chunk);
9856 		chk->sent = SCTP_DATAGRAM_UNSENT;
9857 		chk->snd_count = 0;
9858 		chk->asoc = &stcb->asoc;
9859 		chk->data = m_shutdown;
9860 		chk->whoTo = net;
9861 		if (chk->whoTo) {
9862 			atomic_add_int(&chk->whoTo->ref_count, 1);
9863 		}
9864 		shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9865 		shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9866 		shutdown_cp->ch.chunk_flags = 0;
9867 		shutdown_cp->ch.chunk_length = htons(chk->send_size);
9868 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9869 		SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9870 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9871 		chk->asoc->ctrl_queue_cnt++;
9872 	} else {
9873 		TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9874 		chk->whoTo = net;
9875 		if (chk->whoTo) {
9876 			atomic_add_int(&chk->whoTo->ref_count, 1);
9877 		}
9878 		shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9879 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9880 		TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9881 	}
9882 	return;
9883 }
9884 
9885 void
9886 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9887 {
9888 	/*
9889 	 * formulate and queue an ASCONF to the peer.
9890 	 * ASCONF parameters should be queued on the assoc queue.
9891 	 */
9892 	struct sctp_tmit_chunk *chk;
9893 	struct mbuf *m_asconf;
9894 	int len;
9895 
9896 	SCTP_TCB_LOCK_ASSERT(stcb);
9897 
9898 	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9899 	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9900 		/* can't send a new one if there is one in flight already */
9901 		return;
9902 	}
9903 
9904 	/* compose an ASCONF chunk, maximum length is PMTU */
9905 	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9906 	if (m_asconf == NULL) {
9907 		return;
9908 	}
9909 
9910 	sctp_alloc_a_chunk(stcb, chk);
9911 	if (chk == NULL) {
9912 		/* no memory */
9913 		sctp_m_freem(m_asconf);
9914 		return;
9915 	}
9916 
9917 	chk->copy_by_ref = 0;
9918 	chk->rec.chunk_id.id = SCTP_ASCONF;
9919 	chk->rec.chunk_id.can_take_data = 0;
9920 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9921 	chk->data = m_asconf;
9922 	chk->send_size = len;
9923 	chk->sent = SCTP_DATAGRAM_UNSENT;
9924 	chk->snd_count = 0;
9925 	chk->asoc = &stcb->asoc;
9926 	chk->whoTo = net;
9927 	if (chk->whoTo) {
9928 		atomic_add_int(&chk->whoTo->ref_count, 1);
9929 	}
9930 	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9931 	chk->asoc->ctrl_queue_cnt++;
9932 	return;
9933 }
9934 
9935 void
9936 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9937 {
9938 	/*
9939 	 * formulate and queue a asconf-ack back to sender.
9940 	 * the asconf-ack must be stored in the tcb.
9941 	 */
9942 	struct sctp_tmit_chunk *chk;
9943 	struct sctp_asconf_ack *ack, *latest_ack;
9944 	struct mbuf *m_ack;
9945 	struct sctp_nets *net = NULL;
9946 
9947 	SCTP_TCB_LOCK_ASSERT(stcb);
9948 	/* Get the latest ASCONF-ACK */
9949 	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9950 	if (latest_ack == NULL) {
9951 		return;
9952 	}
9953 	if (latest_ack->last_sent_to != NULL &&
9954 	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9955 		/* we're doing a retransmission */
9956 		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9957 		if (net == NULL) {
9958 			/* no alternate */
9959 			if (stcb->asoc.last_control_chunk_from == NULL) {
9960 				if (stcb->asoc.alternate) {
9961 					net = stcb->asoc.alternate;
9962 				} else {
9963 					net = stcb->asoc.primary_destination;
9964 				}
9965 			} else {
9966 				net = stcb->asoc.last_control_chunk_from;
9967 			}
9968 		}
9969 	} else {
9970 		/* normal case */
9971 		if (stcb->asoc.last_control_chunk_from == NULL) {
9972 			if (stcb->asoc.alternate) {
9973 				net = stcb->asoc.alternate;
9974 			} else {
9975 				net = stcb->asoc.primary_destination;
9976 			}
9977 		} else {
9978 			net = stcb->asoc.last_control_chunk_from;
9979 		}
9980 	}
9981 	latest_ack->last_sent_to = net;
9982 
9983 	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9984 		if (ack->data == NULL) {
9985 			continue;
9986 		}
9987 
9988 		/* copy the asconf_ack */
9989 		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9990 		if (m_ack == NULL) {
9991 			/* couldn't copy it */
9992 			return;
9993 		}
9994 #ifdef SCTP_MBUF_LOGGING
9995 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9996 			sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9997 		}
9998 #endif
9999 
10000 		sctp_alloc_a_chunk(stcb, chk);
10001 		if (chk == NULL) {
10002 			/* no memory */
10003 			if (m_ack)
10004 				sctp_m_freem(m_ack);
10005 			return;
10006 		}
10007 		chk->copy_by_ref = 0;
10008 		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
10009 		chk->rec.chunk_id.can_take_data = 1;
10010 		chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
10011 		chk->whoTo = net;
10012 		if (chk->whoTo) {
10013 			atomic_add_int(&chk->whoTo->ref_count, 1);
10014 		}
10015 		chk->data = m_ack;
10016 		chk->send_size = ack->len;
10017 		chk->sent = SCTP_DATAGRAM_UNSENT;
10018 		chk->snd_count = 0;
10019 		chk->asoc = &stcb->asoc;
10020 
10021 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
10022 		chk->asoc->ctrl_queue_cnt++;
10023 	}
10024 	return;
10025 }
10026 
10027 
10028 static int
10029 sctp_chunk_retransmission(struct sctp_inpcb *inp,
10030     struct sctp_tcb *stcb,
10031     struct sctp_association *asoc,
10032     int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
10033 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10034     SCTP_UNUSED
10035 #endif
10036     )
10037 {
10038 	/*-
10039 	 * send out one MTU of retransmission. If fast_retransmit is
10040 	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
10041 	 * rwnd. For a Cookie or Asconf in the control chunk queue we
10042 	 * retransmit them by themselves.
10043 	 *
10044 	 * For data chunks we will pick out the lowest TSN's in the sent_queue
10045 	 * marked for resend and bundle them all together (up to a MTU of
10046 	 * destination). The address to send to should have been
10047 	 * selected/changed where the retransmission was marked (i.e. in FR
10048 	 * or t3-timeout routines).
10049 	 */
10050 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
10051 	struct sctp_tmit_chunk *chk, *fwd;
10052 	struct mbuf *m, *endofchain;
10053 	struct sctp_nets *net = NULL;
10054 	uint32_t tsns_sent = 0;
10055 	int no_fragmentflg, bundle_at, cnt_thru;
10056 	unsigned int mtu;
10057 	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
10058 	struct sctp_auth_chunk *auth = NULL;
10059 	uint32_t auth_offset = 0;
10060 	uint16_t auth_keyid;
10061 	int override_ok = 1;
10062 	int data_auth_reqd = 0;
10063 	uint32_t dmtu = 0;
10064 
10065 #if defined(__APPLE__)
10066 	if (so_locked) {
10067 		sctp_lock_assert(SCTP_INP_SO(inp));
10068 	} else {
10069 		sctp_unlock_assert(SCTP_INP_SO(inp));
10070 	}
10071 #endif
10072 	SCTP_TCB_LOCK_ASSERT(stcb);
10073 	tmr_started = ctl_cnt = bundle_at = error = 0;
10074 	no_fragmentflg = 1;
10075 	fwd_tsn = 0;
10076 	*cnt_out = 0;
10077 	fwd = NULL;
10078 	endofchain = m = NULL;
10079 	auth_keyid = stcb->asoc.authinfo.active_keyid;
10080 #ifdef SCTP_AUDITING_ENABLED
10081 	sctp_audit_log(0xC3, 1);
10082 #endif
10083 	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
10084 	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
10085 		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
10086 			asoc->sent_queue_retran_cnt);
10087 		asoc->sent_queue_cnt = 0;
10088 		asoc->sent_queue_cnt_removeable = 0;
10089 		/* send back 0/0 so we enter normal transmission */
10090 		*cnt_out = 0;
10091 		return (0);
10092 	}
10093 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10094 		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
10095 		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
10096 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
10097 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
10098 				continue;
10099 			}
10100 			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
10101 				if (chk != asoc->str_reset) {
10102 					/*
10103 					 * not eligible for retran if its
10104 					 * not ours
10105 					 */
10106 					continue;
10107 				}
10108 			}
10109 			ctl_cnt++;
10110 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10111 				fwd_tsn = 1;
10112 			}
10113 			/*
10114 			 * Add an AUTH chunk, if chunk requires it save the
10115 			 * offset into the chain for AUTH
10116 			 */
10117 			if ((auth == NULL) &&
10118 			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
10119 							 stcb->asoc.peer_auth_chunks))) {
10120 				m = sctp_add_auth_chunk(m, &endofchain,
10121 							&auth, &auth_offset,
10122 							stcb,
10123 							chk->rec.chunk_id.id);
10124 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10125 			}
10126 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10127 			break;
10128 		}
10129 	}
10130 	one_chunk = 0;
10131 	cnt_thru = 0;
10132 	/* do we have control chunks to retransmit? */
10133 	if (m != NULL) {
10134 		/* Start a timer no matter if we succeed or fail */
10135 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
10136 			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
10137 		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
10138 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
10139 		chk->snd_count++;	/* update our count */
10140 		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
10141 		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
10142 		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
10143 		                                        no_fragmentflg, 0, 0,
10144 		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10145 		                                        chk->whoTo->port, NULL,
10146 #if defined(__FreeBSD__)
10147 		                                        0, 0,
10148 #endif
10149 		                                        so_locked))) {
10150 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10151 			if (error == ENOBUFS) {
10152 				asoc->ifp_had_enobuf = 1;
10153 				SCTP_STAT_INCR(sctps_lowlevelerr);
10154 			}
10155 			return (error);
10156 		} else {
10157 			asoc->ifp_had_enobuf = 0;
10158 		}
10159 		endofchain = NULL;
10160 		auth = NULL;
10161 		auth_offset = 0;
10162 		/*
10163 		 * We don't want to mark the net->sent time here since this
10164 		 * we use this for HB and retrans cannot measure RTT
10165 		 */
10166 		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
10167 		*cnt_out += 1;
10168 		chk->sent = SCTP_DATAGRAM_SENT;
10169 		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
10170 		if (fwd_tsn == 0) {
10171 			return (0);
10172 		} else {
10173 			/* Clean up the fwd-tsn list */
10174 			sctp_clean_up_ctl(stcb, asoc, so_locked);
10175 			return (0);
10176 		}
10177 	}
10178 	/*
10179 	 * Ok, it is just data retransmission we need to do or that and a
10180 	 * fwd-tsn with it all.
10181 	 */
10182 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
10183 		return (SCTP_RETRAN_DONE);
10184 	}
10185 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
10186 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
10187 		/* not yet open, resend the cookie and that is it */
10188 		return (1);
10189 	}
10190 #ifdef SCTP_AUDITING_ENABLED
10191 	sctp_auditing(20, inp, stcb, NULL);
10192 #endif
10193 	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
10194 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
10195 		if (chk->sent != SCTP_DATAGRAM_RESEND) {
10196 			/* No, not sent to this net or not ready for rtx */
10197 			continue;
10198 		}
10199 		if (chk->data == NULL) {
10200 			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
10201 			            chk->rec.data.tsn, chk->snd_count, chk->sent);
10202 			continue;
10203 		}
10204 		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10205 		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10206 			struct mbuf *op_err;
10207 			char msg[SCTP_DIAG_INFO_LEN];
10208 
10209 			SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
10210 			              chk->rec.data.tsn, chk->snd_count);
10211 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
10212 			                             msg);
10213 			atomic_add_int(&stcb->asoc.refcnt, 1);
10214 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
10215 			                          so_locked);
10216 			SCTP_TCB_LOCK(stcb);
10217 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
10218 			return (SCTP_RETRAN_EXIT);
10219 		}
10220 		/* pick up the net */
10221 		net = chk->whoTo;
10222 		switch (net->ro._l_addr.sa.sa_family) {
10223 #ifdef INET
10224 			case AF_INET:
10225 				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10226 				break;
10227 #endif
10228 #ifdef INET6
10229 			case AF_INET6:
10230 				mtu = net->mtu - SCTP_MIN_OVERHEAD;
10231 				break;
10232 #endif
10233 #if defined(__Userspace__)
10234 			case AF_CONN:
10235 				mtu = net->mtu - sizeof(struct sctphdr);
10236 				break;
10237 #endif
10238 			default:
10239 				/* TSNH */
10240 				mtu = net->mtu;
10241 				break;
10242 		}
10243 
10244 		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10245 			/* No room in peers rwnd */
10246 			uint32_t tsn;
10247 
10248 			tsn = asoc->last_acked_seq + 1;
10249 			if (tsn == chk->rec.data.tsn) {
10250 				/*
10251 				 * we make a special exception for this
10252 				 * case. The peer has no rwnd but is missing
10253 				 * the lowest chunk.. which is probably what
10254 				 * is holding up the rwnd.
10255 				 */
10256 				goto one_chunk_around;
10257 			}
10258 			return (1);
10259 		}
10260 	one_chunk_around:
10261 		if (asoc->peers_rwnd < mtu) {
10262 			one_chunk = 1;
10263 			if ((asoc->peers_rwnd == 0) &&
10264 			    (asoc->total_flight == 0)) {
10265 				chk->window_probe = 1;
10266 				chk->whoTo->window_probe = 1;
10267 			}
10268 		}
10269 #ifdef SCTP_AUDITING_ENABLED
10270 		sctp_audit_log(0xC3, 2);
10271 #endif
10272 		bundle_at = 0;
10273 		m = NULL;
10274 		net->fast_retran_ip = 0;
10275 		if (chk->rec.data.doing_fast_retransmit == 0) {
10276 			/*
10277 			 * if no FR in progress skip destination that have
10278 			 * flight_size > cwnd.
10279 			 */
10280 			if (net->flight_size >= net->cwnd) {
10281 				continue;
10282 			}
10283 		} else {
10284 			/*
10285 			 * Mark the destination net to have FR recovery
10286 			 * limits put on it.
10287 			 */
10288 			*fr_done = 1;
10289 			net->fast_retran_ip = 1;
10290 		}
10291 
10292 		/*
10293 		 * if no AUTH is yet included and this chunk requires it,
10294 		 * make sure to account for it.  We don't apply the size
10295 		 * until the AUTH chunk is actually added below in case
10296 		 * there is no room for this chunk.
10297 		 */
10298 		if (data_auth_reqd && (auth == NULL)) {
10299 			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10300 		} else
10301 			dmtu = 0;
10302 
10303 		if ((chk->send_size <= (mtu - dmtu)) ||
10304 		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10305 			/* ok we will add this one */
10306 			if (data_auth_reqd) {
10307 				if (auth == NULL) {
10308 					m = sctp_add_auth_chunk(m,
10309 								&endofchain,
10310 								&auth,
10311 								&auth_offset,
10312 								stcb,
10313 								SCTP_DATA);
10314 					auth_keyid = chk->auth_keyid;
10315 					override_ok = 0;
10316 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10317 				} else if (override_ok) {
10318 					auth_keyid = chk->auth_keyid;
10319 					override_ok = 0;
10320 				} else if (chk->auth_keyid != auth_keyid) {
10321 					/* different keyid, so done bundling */
10322 					break;
10323 				}
10324 			}
10325 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10326 			if (m == NULL) {
10327 				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10328 				return (ENOMEM);
10329 			}
10330 			/* Do clear IP_DF ? */
10331 			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10332 				no_fragmentflg = 0;
10333 			}
10334 			/* upate our MTU size */
10335 			if (mtu > (chk->send_size + dmtu))
10336 				mtu -= (chk->send_size + dmtu);
10337 			else
10338 				mtu = 0;
10339 			data_list[bundle_at++] = chk;
10340 			if (one_chunk && (asoc->total_flight <= 0)) {
10341 				SCTP_STAT_INCR(sctps_windowprobed);
10342 			}
10343 		}
10344 		if (one_chunk == 0) {
10345 			/*
10346 			 * now are there anymore forward from chk to pick
10347 			 * up?
10348 			 */
10349 			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10350 				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10351 					/* Nope, not for retran */
10352 					continue;
10353 				}
10354 				if (fwd->whoTo != net) {
10355 					/* Nope, not the net in question */
10356 					continue;
10357 				}
10358 				if (data_auth_reqd && (auth == NULL)) {
10359 					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10360 				} else
10361 					dmtu = 0;
10362 				if (fwd->send_size <= (mtu - dmtu)) {
10363 					if (data_auth_reqd) {
10364 						if (auth == NULL) {
10365 							m = sctp_add_auth_chunk(m,
10366 										&endofchain,
10367 										&auth,
10368 										&auth_offset,
10369 										stcb,
10370 										SCTP_DATA);
10371 							auth_keyid = fwd->auth_keyid;
10372 							override_ok = 0;
10373 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10374 						} else if (override_ok) {
10375 							auth_keyid = fwd->auth_keyid;
10376 							override_ok = 0;
10377 						} else if (fwd->auth_keyid != auth_keyid) {
10378 							/* different keyid, so done bundling */
10379 							break;
10380 						}
10381 					}
10382 					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10383 					if (m == NULL) {
10384 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10385 						return (ENOMEM);
10386 					}
10387 					/* Do clear IP_DF ? */
10388 					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10389 						no_fragmentflg = 0;
10390 					}
10391 					/* upate our MTU size */
10392 					if (mtu > (fwd->send_size + dmtu))
10393 						mtu -= (fwd->send_size + dmtu);
10394 					else
10395 						mtu = 0;
10396 					data_list[bundle_at++] = fwd;
10397 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10398 						break;
10399 					}
10400 				} else {
10401 					/* can't fit so we are done */
10402 					break;
10403 				}
10404 			}
10405 		}
10406 		/* Is there something to send for this destination? */
10407 		if (m) {
10408 			/*
10409 			 * No matter if we fail/or succeed we should start a
10410 			 * timer. A failure is like a lost IP packet :-)
10411 			 */
10412 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10413 				/*
10414 				 * no timer running on this destination
10415 				 * restart it.
10416 				 */
10417 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10418 				tmr_started = 1;
10419 			}
10420 			/* Now lets send it, if there is anything to send :> */
10421 			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10422 			                                        (struct sockaddr *)&net->ro._l_addr, m,
10423 			                                        auth_offset, auth, auth_keyid,
10424 			                                        no_fragmentflg, 0, 0,
10425 			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10426 			                                        net->port, NULL,
10427 #if defined(__FreeBSD__)
10428 			                                        0, 0,
10429 #endif
10430 			                                        so_locked))) {
10431 				/* error, we could not output */
10432 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10433 				if (error == ENOBUFS) {
10434 					asoc->ifp_had_enobuf = 1;
10435 					SCTP_STAT_INCR(sctps_lowlevelerr);
10436 				}
10437 				return (error);
10438 			} else {
10439 				asoc->ifp_had_enobuf = 0;
10440 			}
10441 			endofchain = NULL;
10442 			auth = NULL;
10443 			auth_offset = 0;
10444 			/* For HB's */
10445 			/*
10446 			 * We don't want to mark the net->sent time here
10447 			 * since this we use this for HB and retrans cannot
10448 			 * measure RTT
10449 			 */
10450 			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10451 
10452 			/* For auto-close */
10453 			cnt_thru++;
10454 			if (*now_filled == 0) {
10455 				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10456 				*now = asoc->time_last_sent;
10457 				*now_filled = 1;
10458 			} else {
10459 				asoc->time_last_sent = *now;
10460 			}
10461 			*cnt_out += bundle_at;
10462 #ifdef SCTP_AUDITING_ENABLED
10463 			sctp_audit_log(0xC4, bundle_at);
10464 #endif
10465 			if (bundle_at) {
10466 				tsns_sent = data_list[0]->rec.data.tsn;
10467 			}
10468 			for (i = 0; i < bundle_at; i++) {
10469 				SCTP_STAT_INCR(sctps_sendretransdata);
10470 				data_list[i]->sent = SCTP_DATAGRAM_SENT;
10471 				/*
10472 				 * When we have a revoked data, and we
10473 				 * retransmit it, then we clear the revoked
10474 				 * flag since this flag dictates if we
10475 				 * subtracted from the fs
10476 				 */
10477 				if (data_list[i]->rec.data.chunk_was_revoked) {
10478 					/* Deflate the cwnd */
10479 					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10480 					data_list[i]->rec.data.chunk_was_revoked = 0;
10481 				}
10482 				data_list[i]->snd_count++;
10483 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10484 				/* record the time */
10485 				data_list[i]->sent_rcv_time = asoc->time_last_sent;
10486 				if (data_list[i]->book_size_scale) {
10487 					/*
10488 					 * need to double the book size on
10489 					 * this one
10490 					 */
10491 					data_list[i]->book_size_scale = 0;
10492 					/* Since we double the booksize, we must
10493 					 * also double the output queue size, since this
10494 					 * get shrunk when we free by this amount.
10495 					 */
10496 					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10497 					data_list[i]->book_size *= 2;
10498 
10499 
10500 				} else {
10501 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10502 						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10503 						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10504 					}
10505 					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10506 									    (uint32_t) (data_list[i]->send_size +
10507 											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10508 				}
10509 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10510 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10511 						       data_list[i]->whoTo->flight_size,
10512 						       data_list[i]->book_size,
10513 						       (uint32_t)(uintptr_t)data_list[i]->whoTo,
10514 						       data_list[i]->rec.data.tsn);
10515 				}
10516 				sctp_flight_size_increase(data_list[i]);
10517 				sctp_total_flight_increase(stcb, data_list[i]);
10518 				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10519 					/* SWS sender side engages */
10520 					asoc->peers_rwnd = 0;
10521 				}
10522 				if ((i == 0) &&
10523 				    (data_list[i]->rec.data.doing_fast_retransmit)) {
10524 					SCTP_STAT_INCR(sctps_sendfastretrans);
10525 					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10526 					    (tmr_started == 0)) {
10527 						/*-
10528 						 * ok we just fast-retrans'd
10529 						 * the lowest TSN, i.e the
10530 						 * first on the list. In
10531 						 * this case we want to give
10532 						 * some more time to get a
10533 						 * SACK back without a
10534 						 * t3-expiring.
10535 						 */
10536 						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10537 						                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
10538 						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10539 					}
10540 				}
10541 			}
10542 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10543 				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10544 			}
10545 #ifdef SCTP_AUDITING_ENABLED
10546 			sctp_auditing(21, inp, stcb, NULL);
10547 #endif
10548 		} else {
10549 			/* None will fit */
10550 			return (1);
10551 		}
10552 		if (asoc->sent_queue_retran_cnt <= 0) {
10553 			/* all done we have no more to retran */
10554 			asoc->sent_queue_retran_cnt = 0;
10555 			break;
10556 		}
10557 		if (one_chunk) {
10558 			/* No more room in rwnd */
10559 			return (1);
10560 		}
10561 		/* stop the for loop here. we sent out a packet */
10562 		break;
10563 	}
10564 	return (0);
10565 }
10566 
10567 static void
10568 sctp_timer_validation(struct sctp_inpcb *inp,
10569     struct sctp_tcb *stcb,
10570     struct sctp_association *asoc)
10571 {
10572 	struct sctp_nets *net;
10573 
10574 	/* Validate that a timer is running somewhere */
10575 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10576 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10577 			/* Here is a timer */
10578 			return;
10579 		}
10580 	}
10581 	SCTP_TCB_LOCK_ASSERT(stcb);
10582 	/* Gak, we did not have a timer somewhere */
10583 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10584 	if (asoc->alternate) {
10585 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10586 	} else {
10587 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10588 	}
10589 	return;
10590 }
10591 
10592 void
10593 sctp_chunk_output(struct sctp_inpcb *inp,
10594     struct sctp_tcb *stcb,
10595     int from_where,
10596     int so_locked
10597 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10598     SCTP_UNUSED
10599 #endif
10600     )
10601 {
10602 	/*-
10603 	 * Ok this is the generic chunk service queue. we must do the
10604 	 * following:
10605 	 * - See if there are retransmits pending, if so we must
10606 	 *   do these first.
10607 	 * - Service the stream queue that is next, moving any
10608 	 *   message (note I must get a complete message i.e.
10609 	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10610 	 *   TSN's
10611 	 * - Check to see if the cwnd/rwnd allows any output, if so we
10612 	 *   go ahead and fomulate and send the low level chunks. Making sure
10613 	 *   to combine any control in the control chunk queue also.
10614 	 */
10615 	struct sctp_association *asoc;
10616 	struct sctp_nets *net;
10617 	int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10618 	unsigned int burst_cnt = 0;
10619 	struct timeval now;
10620 	int now_filled = 0;
10621 	int nagle_on;
10622 	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10623 	int un_sent = 0;
10624 	int fr_done;
10625 	unsigned int tot_frs = 0;
10626 
10627 #if defined(__APPLE__)
10628 	if (so_locked) {
10629 		sctp_lock_assert(SCTP_INP_SO(inp));
10630 	} else {
10631 		sctp_unlock_assert(SCTP_INP_SO(inp));
10632 	}
10633 #endif
10634 	asoc = &stcb->asoc;
10635 do_it_again:
10636 	/* The Nagle algorithm is only applied when handling a send call. */
10637 	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10638 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10639 			nagle_on = 0;
10640 		} else {
10641 			nagle_on = 1;
10642 		}
10643 	} else {
10644 		nagle_on = 0;
10645 	}
10646 	SCTP_TCB_LOCK_ASSERT(stcb);
10647 
10648 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10649 
10650 	if ((un_sent <= 0) &&
10651 	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10652 	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10653 	    (asoc->sent_queue_retran_cnt == 0) &&
10654 	    (asoc->trigger_reset == 0)) {
10655 		/* Nothing to do unless there is something to be sent left */
10656 		return;
10657 	}
10658 	/* Do we have something to send, data or control AND
10659 	 * a sack timer running, if so piggy-back the sack.
10660 	 */
10661 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10662 		sctp_send_sack(stcb, so_locked);
10663 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
10664 		                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10665 	}
10666 	while (asoc->sent_queue_retran_cnt) {
10667 		/*-
10668 		 * Ok, it is retransmission time only, we send out only ONE
10669 		 * packet with a single call off to the retran code.
10670 		 */
10671 		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10672 			/*-
10673 			 * Special hook for handling cookiess discarded
10674 			 * by peer that carried data. Send cookie-ack only
10675 			 * and then the next call with get the retran's.
10676 			 */
10677 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10678 						    from_where,
10679 						    &now, &now_filled, frag_point, so_locked);
10680 			return;
10681 		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10682 			/* if its not from a HB then do it */
10683 			fr_done = 0;
10684 			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10685 			if (fr_done) {
10686 				tot_frs++;
10687 			}
10688 		} else {
10689 			/*
10690 			 * its from any other place, we don't allow retran
10691 			 * output (only control)
10692 			 */
10693 			ret = 1;
10694 		}
10695 		if (ret > 0) {
10696 			/* Can't send anymore */
10697 			/*-
10698 			 * now lets push out control by calling med-level
10699 			 * output once. this assures that we WILL send HB's
10700 			 * if queued too.
10701 			 */
10702 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10703 						    from_where,
10704 						    &now, &now_filled, frag_point, so_locked);
10705 #ifdef SCTP_AUDITING_ENABLED
10706 			sctp_auditing(8, inp, stcb, NULL);
10707 #endif
10708 			sctp_timer_validation(inp, stcb, asoc);
10709 			return;
10710 		}
10711 		if (ret < 0) {
10712 			/*-
10713 			 * The count was off.. retran is not happening so do
10714 			 * the normal retransmission.
10715 			 */
10716 #ifdef SCTP_AUDITING_ENABLED
10717 			sctp_auditing(9, inp, stcb, NULL);
10718 #endif
10719 			if (ret == SCTP_RETRAN_EXIT) {
10720 				return;
10721 			}
10722 			break;
10723 		}
10724 		if (from_where == SCTP_OUTPUT_FROM_T3) {
10725 			/* Only one transmission allowed out of a timeout */
10726 #ifdef SCTP_AUDITING_ENABLED
10727 			sctp_auditing(10, inp, stcb, NULL);
10728 #endif
10729 			/* Push out any control */
10730 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10731 						    &now, &now_filled, frag_point, so_locked);
10732 			return;
10733 		}
10734 		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10735 			/* Hit FR burst limit */
10736 			return;
10737 		}
10738 		if ((num_out == 0) && (ret == 0)) {
10739 			/* No more retrans to send */
10740 			break;
10741 		}
10742 	}
10743 #ifdef SCTP_AUDITING_ENABLED
10744 	sctp_auditing(12, inp, stcb, NULL);
10745 #endif
10746 	/* Check for bad destinations, if they exist move chunks around. */
10747 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10748 		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10749 			/*-
10750 			 * if possible move things off of this address we
10751 			 * still may send below due to the dormant state but
10752 			 * we try to find an alternate address to send to
10753 			 * and if we have one we move all queued data on the
10754 			 * out wheel to this alternate address.
10755 			 */
10756 			if (net->ref_count > 1)
10757 				sctp_move_chunks_from_net(stcb, net);
10758 		} else {
10759 			/*-
10760 			 * if ((asoc->sat_network) || (net->addr_is_local))
10761 			 * { burst_limit = asoc->max_burst *
10762 			 * SCTP_SAT_NETWORK_BURST_INCR; }
10763 			 */
10764 			if (asoc->max_burst > 0) {
10765 				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10766 					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10767 						/* JRS - Use the congestion control given in the congestion control module */
10768 						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10769 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10770 							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10771 						}
10772 						SCTP_STAT_INCR(sctps_maxburstqueued);
10773 					}
10774 					net->fast_retran_ip = 0;
10775 				} else {
10776 					if (net->flight_size == 0) {
10777 						/* Should be decaying the cwnd here */
10778 						;
10779 					}
10780 				}
10781 			}
10782 		}
10783 
10784 	}
10785 	burst_cnt = 0;
10786 	do {
10787 		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10788 					      &reason_code, 0, from_where,
10789 					      &now, &now_filled, frag_point, so_locked);
10790 		if (error) {
10791 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10792 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10793 				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10794 			}
10795 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10796 				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10797 				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10798 			}
10799 			break;
10800 		}
10801 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10802 
10803 		tot_out += num_out;
10804 		burst_cnt++;
10805 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10806 			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10807 			if (num_out == 0) {
10808 				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10809 			}
10810 		}
10811 		if (nagle_on) {
10812 			/*
10813 			 * When the Nagle algorithm is used, look at how much
10814 			 * is unsent, then if its smaller than an MTU and we
10815 			 * have data in flight we stop, except if we are
10816 			 * handling a fragmented user message.
10817 			 */
10818 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10819 			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10820 			    (stcb->asoc.total_flight > 0)) {
10821 /*	&&		     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10822 				break;
10823 			}
10824 		}
10825 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10826 		    TAILQ_EMPTY(&asoc->send_queue) &&
10827 		    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10828 			/* Nothing left to send */
10829 			break;
10830 		}
10831 		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10832 			/* Nothing left to send */
10833 			break;
10834 		}
10835 	} while (num_out &&
10836 	         ((asoc->max_burst == 0) ||
10837 		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10838 		  (burst_cnt < asoc->max_burst)));
10839 
10840 	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10841 		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10842 			SCTP_STAT_INCR(sctps_maxburstqueued);
10843 			asoc->burst_limit_applied = 1;
10844 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10845 				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10846 			}
10847 		} else {
10848 			asoc->burst_limit_applied = 0;
10849 		}
10850 	}
10851 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10852 		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10853 	}
10854 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10855 		tot_out);
10856 
10857 	/*-
10858 	 * Now we need to clean up the control chunk chain if a ECNE is on
10859 	 * it. It must be marked as UNSENT again so next call will continue
10860 	 * to send it until such time that we get a CWR, to remove it.
10861 	 */
10862 	if (stcb->asoc.ecn_echo_cnt_onq)
10863 		sctp_fix_ecn_echo(asoc);
10864 
10865 	if (stcb->asoc.trigger_reset) {
10866 		if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0)  {
10867 			goto do_it_again;
10868 		}
10869 	}
10870 	return;
10871 }
10872 
10873 
10874 int
10875 sctp_output(
10876 	struct sctp_inpcb *inp,
10877 #if defined(__Panda__)
10878 	pakhandle_type m,
10879 #else
10880 	struct mbuf *m,
10881 #endif
10882 	struct sockaddr *addr,
10883 #if defined(__Panda__)
10884 	pakhandle_type control,
10885 #else
10886 	struct mbuf *control,
10887 #endif
10888 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
10889 	struct thread *p,
10890 #elif defined(__Windows__)
10891 	PKTHREAD p,
10892 #else
10893 #if defined(__APPLE__)
10894 	struct proc *p SCTP_UNUSED,
10895 #else
10896 	struct proc *p,
10897 #endif
10898 #endif
10899 	int flags)
10900 {
10901 	if (inp == NULL) {
10902 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10903 		return (EINVAL);
10904 	}
10905 
10906 	if (inp->sctp_socket == NULL) {
10907 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10908 		return (EINVAL);
10909 	}
10910 	return (sctp_sosend(inp->sctp_socket,
10911 			    addr,
10912 			    (struct uio *)NULL,
10913 			    m,
10914 			    control,
10915 #if defined(__APPLE__) || defined(__Panda__)
10916 			    flags
10917 #else
10918 			    flags, p
10919 #endif
10920 			));
10921 }
10922 
10923 void
10924 send_forward_tsn(struct sctp_tcb *stcb,
10925 		 struct sctp_association *asoc)
10926 {
10927 	struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10928 	struct sctp_forward_tsn_chunk *fwdtsn;
10929 	struct sctp_strseq *strseq;
10930 	struct sctp_strseq_mid *strseq_m;
10931 	uint32_t advance_peer_ack_point;
10932 	unsigned int cnt_of_space, i, ovh;
10933 	unsigned int space_needed;
10934 	unsigned int cnt_of_skipped = 0;
10935 
10936 	SCTP_TCB_LOCK_ASSERT(stcb);
10937 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10938 		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10939 			/* mark it to unsent */
10940 			chk->sent = SCTP_DATAGRAM_UNSENT;
10941 			chk->snd_count = 0;
10942 			/* Do we correct its output location? */
10943 			if (chk->whoTo) {
10944 				sctp_free_remote_addr(chk->whoTo);
10945 				chk->whoTo = NULL;
10946 			}
10947 			goto sctp_fill_in_rest;
10948 		}
10949 	}
10950 	/* Ok if we reach here we must build one */
10951 	sctp_alloc_a_chunk(stcb, chk);
10952 	if (chk == NULL) {
10953 		return;
10954 	}
10955 	asoc->fwd_tsn_cnt++;
10956 	chk->copy_by_ref = 0;
10957 	/*
10958 	 * We don't do the old thing here since
10959 	 * this is used not for on-wire but to
10960 	 * tell if we are sending a fwd-tsn by
10961 	 * the stack during output. And if its
10962 	 * a IFORWARD or a FORWARD it is a fwd-tsn.
10963 	 */
10964 	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10965 	chk->rec.chunk_id.can_take_data = 0;
10966 	chk->flags = 0;
10967 	chk->asoc = asoc;
10968 	chk->whoTo = NULL;
10969 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10970 	if (chk->data == NULL) {
10971 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10972 		return;
10973 	}
10974 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10975 	chk->sent = SCTP_DATAGRAM_UNSENT;
10976 	chk->snd_count = 0;
10977 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10978 	asoc->ctrl_queue_cnt++;
10979 sctp_fill_in_rest:
10980 	/*-
10981 	 * Here we go through and fill out the part that deals with
10982 	 * stream/seq of the ones we skip.
10983 	 */
10984 	SCTP_BUF_LEN(chk->data) = 0;
10985 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10986 		if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10987 		    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10988 			/* no more to look at */
10989 			break;
10990 		}
10991 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10992 			/* We don't report these */
10993 			continue;
10994 		}
10995 		cnt_of_skipped++;
10996 	}
10997 	if (asoc->idata_supported) {
10998 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10999 		                (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
11000 	} else {
11001 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
11002 		                (cnt_of_skipped * sizeof(struct sctp_strseq)));
11003 	}
11004 	cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
11005 
11006 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
11007 		ovh = SCTP_MIN_OVERHEAD;
11008 	} else {
11009 		ovh = SCTP_MIN_V4_OVERHEAD;
11010 	}
11011 	if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
11012 		/* trim to a mtu size */
11013 		cnt_of_space = asoc->smallest_mtu - ovh;
11014 	}
11015 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
11016 		sctp_misc_ints(SCTP_FWD_TSN_CHECK,
11017 			       0xff, 0, cnt_of_skipped,
11018 			       asoc->advanced_peer_ack_point);
11019 	}
11020 	advance_peer_ack_point = asoc->advanced_peer_ack_point;
11021 	if (cnt_of_space < space_needed) {
11022 		/*-
11023 		 * ok we must trim down the chunk by lowering the
11024 		 * advance peer ack point.
11025 		 */
11026 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
11027 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
11028 				       0xff, 0xff, cnt_of_space,
11029 				       space_needed);
11030 		}
11031 		cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
11032 		if (asoc->idata_supported) {
11033 			cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
11034 		} else {
11035 			cnt_of_skipped /= sizeof(struct sctp_strseq);
11036 		}
11037 		/*-
11038 		 * Go through and find the TSN that will be the one
11039 		 * we report.
11040 		 */
11041 		at = TAILQ_FIRST(&asoc->sent_queue);
11042 		if (at != NULL) {
11043 			for (i = 0; i < cnt_of_skipped; i++) {
11044 				tp1 = TAILQ_NEXT(at, sctp_next);
11045 				if (tp1 == NULL) {
11046 					break;
11047 				}
11048 				at = tp1;
11049 			}
11050 		}
11051 		if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
11052 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
11053 				       0xff, cnt_of_skipped, at->rec.data.tsn,
11054 				       asoc->advanced_peer_ack_point);
11055 		}
11056 		last = at;
11057 		/*-
11058 		 * last now points to last one I can report, update
11059 		 * peer ack point
11060 		 */
11061 		if (last) {
11062 			advance_peer_ack_point = last->rec.data.tsn;
11063 		}
11064 		if (asoc->idata_supported) {
11065 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
11066 			               cnt_of_skipped * sizeof(struct sctp_strseq_mid);
11067 		} else {
11068 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
11069 			               cnt_of_skipped * sizeof(struct sctp_strseq);
11070 		}
11071 	}
11072 	chk->send_size = space_needed;
11073 	/* Setup the chunk */
11074 	fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
11075 	fwdtsn->ch.chunk_length = htons(chk->send_size);
11076 	fwdtsn->ch.chunk_flags = 0;
11077 	if (asoc->idata_supported) {
11078 		fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
11079 	} else {
11080 		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
11081 	}
11082 	fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
11083 	SCTP_BUF_LEN(chk->data) = chk->send_size;
11084 	fwdtsn++;
11085 	/*-
11086 	 * Move pointer to after the fwdtsn and transfer to the
11087 	 * strseq pointer.
11088 	 */
11089 	if (asoc->idata_supported) {
11090 		strseq_m = (struct sctp_strseq_mid *)fwdtsn;
11091 		strseq = NULL;
11092 	} else {
11093 		strseq = (struct sctp_strseq *)fwdtsn;
11094 		strseq_m = NULL;
11095 	}
11096 	/*-
11097 	 * Now populate the strseq list. This is done blindly
11098 	 * without pulling out duplicate stream info. This is
11099 	 * inefficent but won't harm the process since the peer will
11100 	 * look at these in sequence and will thus release anything.
11101 	 * It could mean we exceed the PMTU and chop off some that
11102 	 * we could have included.. but this is unlikely (aka 1432/4
11103 	 * would mean 300+ stream seq's would have to be reported in
11104 	 * one FWD-TSN. With a bit of work we can later FIX this to
11105 	 * optimize and pull out duplicates.. but it does add more
11106 	 * overhead. So for now... not!
11107 	 */
11108 	i = 0;
11109 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
11110 		if (i >= cnt_of_skipped) {
11111 			break;
11112 		}
11113 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
11114 			/* We don't report these */
11115 			continue;
11116 		}
11117 		if (at->rec.data.tsn == advance_peer_ack_point) {
11118 			at->rec.data.fwd_tsn_cnt = 0;
11119 		}
11120 		if (asoc->idata_supported) {
11121 			strseq_m->sid = htons(at->rec.data.sid);
11122 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
11123 				strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
11124 			} else {
11125 				strseq_m->flags = 0;
11126 			}
11127 			strseq_m->mid = htonl(at->rec.data.mid);
11128 			strseq_m++;
11129 		} else {
11130 			strseq->sid = htons(at->rec.data.sid);
11131 			strseq->ssn = htons((uint16_t)at->rec.data.mid);
11132 			strseq++;
11133 		}
11134 		i++;
11135 	}
11136 	return;
11137 }
11138 
11139 void
11140 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
11141 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11142 	SCTP_UNUSED
11143 #endif
11144 )
11145 {
11146 	/*-
11147 	 * Queue up a SACK or NR-SACK in the control queue.
11148 	 * We must first check to see if a SACK or NR-SACK is
11149 	 * somehow on the control queue.
11150 	 * If so, we will take and and remove the old one.
11151 	 */
11152 	struct sctp_association *asoc;
11153 	struct sctp_tmit_chunk *chk, *a_chk;
11154 	struct sctp_sack_chunk *sack;
11155 	struct sctp_nr_sack_chunk *nr_sack;
11156 	struct sctp_gap_ack_block *gap_descriptor;
11157 	const struct sack_track *selector;
11158 	int mergeable = 0;
11159 	int offset;
11160 	caddr_t limit;
11161 	uint32_t *dup;
11162 	int limit_reached = 0;
11163 	unsigned int i, siz, j;
11164 	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
11165 	int num_dups = 0;
11166 	int space_req;
11167 	uint32_t highest_tsn;
11168 	uint8_t flags;
11169 	uint8_t type;
11170 	uint8_t tsn_map;
11171 
11172 	if (stcb->asoc.nrsack_supported == 1) {
11173 		type = SCTP_NR_SELECTIVE_ACK;
11174 	} else {
11175 		type = SCTP_SELECTIVE_ACK;
11176 	}
11177 	a_chk = NULL;
11178 	asoc = &stcb->asoc;
11179 	SCTP_TCB_LOCK_ASSERT(stcb);
11180 	if (asoc->last_data_chunk_from == NULL) {
11181 		/* Hmm we never received anything */
11182 		return;
11183 	}
11184 	sctp_slide_mapping_arrays(stcb);
11185 	sctp_set_rwnd(stcb, asoc);
11186 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11187 		if (chk->rec.chunk_id.id == type) {
11188 			/* Hmm, found a sack already on queue, remove it */
11189 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
11190 			asoc->ctrl_queue_cnt--;
11191 			a_chk = chk;
11192 			if (a_chk->data) {
11193 				sctp_m_freem(a_chk->data);
11194 				a_chk->data = NULL;
11195 			}
11196 			if (a_chk->whoTo) {
11197 				sctp_free_remote_addr(a_chk->whoTo);
11198 				a_chk->whoTo = NULL;
11199 			}
11200 			break;
11201 		}
11202 	}
11203 	if (a_chk == NULL) {
11204 		sctp_alloc_a_chunk(stcb, a_chk);
11205 		if (a_chk == NULL) {
11206 			/* No memory so we drop the idea, and set a timer */
11207 			if (stcb->asoc.delayed_ack) {
11208 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11209 				                stcb->sctp_ep, stcb, NULL,
11210 				                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
11211 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11212 				    stcb->sctp_ep, stcb, NULL);
11213 			} else {
11214 				stcb->asoc.send_sack = 1;
11215 			}
11216 			return;
11217 		}
11218 		a_chk->copy_by_ref = 0;
11219 		a_chk->rec.chunk_id.id = type;
11220 		a_chk->rec.chunk_id.can_take_data = 1;
11221 	}
11222 	/* Clear our pkt counts */
11223 	asoc->data_pkts_seen = 0;
11224 
11225 	a_chk->flags = 0;
11226 	a_chk->asoc = asoc;
11227 	a_chk->snd_count = 0;
11228 	a_chk->send_size = 0;	/* fill in later */
11229 	a_chk->sent = SCTP_DATAGRAM_UNSENT;
11230 	a_chk->whoTo = NULL;
11231 
11232 	if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
11233 		/*-
11234 		 * Ok, the destination for the SACK is unreachable, lets see if
11235 		 * we can select an alternate to asoc->last_data_chunk_from
11236 		 */
11237 		a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
11238 		if (a_chk->whoTo == NULL) {
11239 			/* Nope, no alternate */
11240 			a_chk->whoTo = asoc->last_data_chunk_from;
11241 		}
11242 	} else {
11243 		a_chk->whoTo = asoc->last_data_chunk_from;
11244 	}
11245 	if (a_chk->whoTo) {
11246 		atomic_add_int(&a_chk->whoTo->ref_count, 1);
11247 	}
11248 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11249 		highest_tsn = asoc->highest_tsn_inside_map;
11250 	} else {
11251 		highest_tsn = asoc->highest_tsn_inside_nr_map;
11252 	}
11253 	if (highest_tsn == asoc->cumulative_tsn) {
11254 		/* no gaps */
11255 		if (type == SCTP_SELECTIVE_ACK) {
11256 			space_req = sizeof(struct sctp_sack_chunk);
11257 		} else {
11258 			space_req = sizeof(struct sctp_nr_sack_chunk);
11259 		}
11260 	} else {
11261 		/* gaps get a cluster */
11262 		space_req = MCLBYTES;
11263 	}
11264 	/* Ok now lets formulate a MBUF with our sack */
11265 	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11266 	if ((a_chk->data == NULL) ||
11267 	    (a_chk->whoTo == NULL)) {
11268 		/* rats, no mbuf memory */
11269 		if (a_chk->data) {
11270 			/* was a problem with the destination */
11271 			sctp_m_freem(a_chk->data);
11272 			a_chk->data = NULL;
11273 		}
11274 		sctp_free_a_chunk(stcb, a_chk, so_locked);
11275 		/* sa_ignore NO_NULL_CHK */
11276 		if (stcb->asoc.delayed_ack) {
11277 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11278 			                stcb->sctp_ep, stcb, NULL,
11279 			                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
11280 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11281 			    stcb->sctp_ep, stcb, NULL);
11282 		} else {
11283 			stcb->asoc.send_sack = 1;
11284 		}
11285 		return;
11286 	}
11287 	/* ok, lets go through and fill it in */
11288 	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11289 	space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
11290 	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11291 		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11292 	}
11293 	limit = mtod(a_chk->data, caddr_t);
11294 	limit += space;
11295 
11296 	flags = 0;
11297 
11298 	if ((asoc->sctp_cmt_on_off > 0) &&
11299 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11300 		/*-
11301 		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11302 		 * received, then set high bit to 1, else 0. Reset
11303 		 * pkts_rcvd.
11304 		 */
11305 		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11306 		asoc->cmt_dac_pkts_rcvd = 0;
11307 	}
11308 #ifdef SCTP_ASOCLOG_OF_TSNS
11309 	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11310 	stcb->asoc.cumack_log_atsnt++;
11311 	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11312 		stcb->asoc.cumack_log_atsnt = 0;
11313 	}
11314 #endif
11315 	/* reset the readers interpretation */
11316 	stcb->freed_by_sorcv_sincelast = 0;
11317 
11318 	if (type == SCTP_SELECTIVE_ACK) {
11319 		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11320 		nr_sack = NULL;
11321 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11322 		if (highest_tsn > asoc->mapping_array_base_tsn) {
11323 			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11324 		} else {
11325 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
11326 		}
11327 	} else {
11328 		sack = NULL;
11329 		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11330 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11331 		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11332 			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11333 		} else {
11334 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11335 		}
11336 	}
11337 
11338 	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11339 		offset = 1;
11340 	} else {
11341 		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11342 	}
11343 	if (((type == SCTP_SELECTIVE_ACK) &&
11344 	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11345 	    ((type == SCTP_NR_SELECTIVE_ACK) &&
11346 	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11347 		/* we have a gap .. maybe */
11348 		for (i = 0; i < siz; i++) {
11349 			tsn_map = asoc->mapping_array[i];
11350 			if (type == SCTP_SELECTIVE_ACK) {
11351 				tsn_map |= asoc->nr_mapping_array[i];
11352 			}
11353 			if (i == 0) {
11354 				/*
11355 				 * Clear all bits corresponding to TSNs
11356 				 * smaller or equal to the cumulative TSN.
11357 				 */
11358 				tsn_map &= (~0U << (1 - offset));
11359 			}
11360 			selector = &sack_array[tsn_map];
11361 			if (mergeable && selector->right_edge) {
11362 				/*
11363 				 * Backup, left and right edges were ok to
11364 				 * merge.
11365 				 */
11366 				num_gap_blocks--;
11367 				gap_descriptor--;
11368 			}
11369 			if (selector->num_entries == 0)
11370 				mergeable = 0;
11371 			else {
11372 				for (j = 0; j < selector->num_entries; j++) {
11373 					if (mergeable && selector->right_edge) {
11374 						/*
11375 						 * do a merge by NOT setting
11376 						 * the left side
11377 						 */
11378 						mergeable = 0;
11379 					} else {
11380 						/*
11381 						 * no merge, set the left
11382 						 * side
11383 						 */
11384 						mergeable = 0;
11385 						gap_descriptor->start = htons((selector->gaps[j].start + offset));
11386 					}
11387 					gap_descriptor->end = htons((selector->gaps[j].end + offset));
11388 					num_gap_blocks++;
11389 					gap_descriptor++;
11390 					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11391 						/* no more room */
11392 						limit_reached = 1;
11393 						break;
11394 					}
11395 				}
11396 				if (selector->left_edge) {
11397 					mergeable = 1;
11398 				}
11399 			}
11400 			if (limit_reached) {
11401 				/* Reached the limit stop */
11402 				break;
11403 			}
11404 			offset += 8;
11405 		}
11406 	}
11407 	if ((type == SCTP_NR_SELECTIVE_ACK) &&
11408 	    (limit_reached == 0)) {
11409 
11410 		mergeable = 0;
11411 
11412 		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11413 			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11414 		} else {
11415 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11416 		}
11417 
11418 		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11419 			offset = 1;
11420 		} else {
11421 			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11422 		}
11423 		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11424 			/* we have a gap .. maybe */
11425 			for (i = 0; i < siz; i++) {
11426 				tsn_map = asoc->nr_mapping_array[i];
11427 				if (i == 0) {
11428 					/*
11429 					 * Clear all bits corresponding to TSNs
11430 					 * smaller or equal to the cumulative TSN.
11431 					 */
11432 					tsn_map &= (~0U << (1 - offset));
11433 				}
11434 				selector = &sack_array[tsn_map];
11435 				if (mergeable && selector->right_edge) {
11436 					/*
11437 					* Backup, left and right edges were ok to
11438 					* merge.
11439 					*/
11440 					num_nr_gap_blocks--;
11441 					gap_descriptor--;
11442 				}
11443 				if (selector->num_entries == 0)
11444 					mergeable = 0;
11445 				else {
11446 					for (j = 0; j < selector->num_entries; j++) {
11447 						if (mergeable && selector->right_edge) {
11448 							/*
11449 							* do a merge by NOT setting
11450 							* the left side
11451 							*/
11452 							mergeable = 0;
11453 						} else {
11454 							/*
11455 							* no merge, set the left
11456 							* side
11457 							*/
11458 							mergeable = 0;
11459 							gap_descriptor->start = htons((selector->gaps[j].start + offset));
11460 						}
11461 						gap_descriptor->end = htons((selector->gaps[j].end + offset));
11462 						num_nr_gap_blocks++;
11463 						gap_descriptor++;
11464 						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11465 							/* no more room */
11466 							limit_reached = 1;
11467 							break;
11468 						}
11469 					}
11470 					if (selector->left_edge) {
11471 						mergeable = 1;
11472 					}
11473 				}
11474 				if (limit_reached) {
11475 					/* Reached the limit stop */
11476 					break;
11477 				}
11478 				offset += 8;
11479 			}
11480 		}
11481 	}
11482 	/* now we must add any dups we are going to report. */
11483 	if ((limit_reached == 0) && (asoc->numduptsns)) {
11484 		dup = (uint32_t *) gap_descriptor;
11485 		for (i = 0; i < asoc->numduptsns; i++) {
11486 			*dup = htonl(asoc->dup_tsns[i]);
11487 			dup++;
11488 			num_dups++;
11489 			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11490 				/* no more room */
11491 				break;
11492 			}
11493 		}
11494 		asoc->numduptsns = 0;
11495 	}
11496 	/*
11497 	 * now that the chunk is prepared queue it to the control chunk
11498 	 * queue.
11499 	 */
11500 	if (type == SCTP_SELECTIVE_ACK) {
11501 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
11502 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11503 		                              num_dups * sizeof(int32_t));
11504 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11505 		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11506 		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11507 		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11508 		sack->sack.num_dup_tsns = htons(num_dups);
11509 		sack->ch.chunk_type = type;
11510 		sack->ch.chunk_flags = flags;
11511 		sack->ch.chunk_length = htons(a_chk->send_size);
11512 	} else {
11513 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
11514 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11515 		                              num_dups * sizeof(int32_t));
11516 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11517 		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11518 		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11519 		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11520 		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11521 		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11522 		nr_sack->nr_sack.reserved = 0;
11523 		nr_sack->ch.chunk_type = type;
11524 		nr_sack->ch.chunk_flags = flags;
11525 		nr_sack->ch.chunk_length = htons(a_chk->send_size);
11526 	}
11527 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11528 	asoc->my_last_reported_rwnd = asoc->my_rwnd;
11529 	asoc->ctrl_queue_cnt++;
11530 	asoc->send_sack = 0;
11531 	SCTP_STAT_INCR(sctps_sendsacks);
11532 	return;
11533 }
11534 
11535 void
11536 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
11537 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11538     SCTP_UNUSED
11539 #endif
11540     )
11541 {
11542 	struct mbuf *m_abort, *m, *m_last;
11543 	struct mbuf *m_out, *m_end = NULL;
11544 	struct sctp_abort_chunk *abort;
11545 	struct sctp_auth_chunk *auth = NULL;
11546 	struct sctp_nets *net;
11547 	uint32_t vtag;
11548 	uint32_t auth_offset = 0;
11549 	int error;
11550 	uint16_t cause_len, chunk_len, padding_len;
11551 
11552 #if defined(__APPLE__)
11553 	if (so_locked) {
11554 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11555 	} else {
11556 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11557 	}
11558 #endif
11559 	SCTP_TCB_LOCK_ASSERT(stcb);
11560 	/*-
11561 	 * Add an AUTH chunk, if chunk requires it and save the offset into
11562 	 * the chain for AUTH
11563 	 */
11564 	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11565 	                                stcb->asoc.peer_auth_chunks)) {
11566 		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11567 					    stcb, SCTP_ABORT_ASSOCIATION);
11568 		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11569 	} else {
11570 		m_out = NULL;
11571 	}
11572 	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11573 	if (m_abort == NULL) {
11574 		if (m_out) {
11575 			sctp_m_freem(m_out);
11576 		}
11577 		if (operr) {
11578 			sctp_m_freem(operr);
11579 		}
11580 		return;
11581 	}
11582 	/* link in any error */
11583 	SCTP_BUF_NEXT(m_abort) = operr;
11584 	cause_len = 0;
11585 	m_last = NULL;
11586 	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11587 		cause_len += (uint16_t)SCTP_BUF_LEN(m);
11588 		if (SCTP_BUF_NEXT(m) == NULL) {
11589 			m_last = m;
11590 		}
11591 	}
11592 	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11593 	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11594 	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11595 	if (m_out == NULL) {
11596 		/* NO Auth chunk prepended, so reserve space in front */
11597 		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11598 		m_out = m_abort;
11599 	} else {
11600 		/* Put AUTH chunk at the front of the chain */
11601 		SCTP_BUF_NEXT(m_end) = m_abort;
11602 	}
11603 	if (stcb->asoc.alternate) {
11604 		net = stcb->asoc.alternate;
11605 	} else {
11606 		net = stcb->asoc.primary_destination;
11607 	}
11608 	/* Fill in the ABORT chunk header. */
11609 	abort = mtod(m_abort, struct sctp_abort_chunk *);
11610 	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11611 	if (stcb->asoc.peer_vtag == 0) {
11612 		/* This happens iff the assoc is in COOKIE-WAIT state. */
11613 		vtag = stcb->asoc.my_vtag;
11614 		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11615 	} else {
11616 		vtag = stcb->asoc.peer_vtag;
11617 		abort->ch.chunk_flags = 0;
11618 	}
11619 	abort->ch.chunk_length = htons(chunk_len);
11620 	/* Add padding, if necessary. */
11621 	if (padding_len > 0) {
11622 		if ((m_last == NULL) ||
11623 		    (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11624 			sctp_m_freem(m_out);
11625 			return;
11626 		}
11627 	}
11628 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11629 	                                        (struct sockaddr *)&net->ro._l_addr,
11630 	                                        m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11631 	                                        stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11632 	                                        stcb->asoc.primary_destination->port, NULL,
11633 #if defined(__FreeBSD__)
11634 	                                        0, 0,
11635 #endif
11636 	                                        so_locked))) {
11637 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11638 		if (error == ENOBUFS) {
11639 			stcb->asoc.ifp_had_enobuf = 1;
11640 			SCTP_STAT_INCR(sctps_lowlevelerr);
11641 		}
11642 	} else {
11643 		stcb->asoc.ifp_had_enobuf = 0;
11644 	}
11645 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11646 }
11647 
11648 void
11649 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11650                             struct sctp_nets *net,
11651                             int reflect_vtag)
11652 {
11653 	/* formulate and SEND a SHUTDOWN-COMPLETE */
11654 	struct mbuf *m_shutdown_comp;
11655 	struct sctp_shutdown_complete_chunk *shutdown_complete;
11656 	uint32_t vtag;
11657 	int error;
11658 	uint8_t flags;
11659 
11660 	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11661 	if (m_shutdown_comp == NULL) {
11662 		/* no mbuf's */
11663 		return;
11664 	}
11665 	if (reflect_vtag) {
11666 		flags = SCTP_HAD_NO_TCB;
11667 		vtag = stcb->asoc.my_vtag;
11668 	} else {
11669 		flags = 0;
11670 		vtag = stcb->asoc.peer_vtag;
11671 	}
11672 	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11673 	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11674 	shutdown_complete->ch.chunk_flags = flags;
11675 	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11676 	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11677 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11678 	                                        (struct sockaddr *)&net->ro._l_addr,
11679 	                                        m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11680 	                                        stcb->sctp_ep->sctp_lport, stcb->rport,
11681 	                                        htonl(vtag),
11682 	                                        net->port, NULL,
11683 #if defined(__FreeBSD__)
11684 	                                        0, 0,
11685 #endif
11686 	                                        SCTP_SO_NOT_LOCKED))) {
11687 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11688 		if (error == ENOBUFS) {
11689 			stcb->asoc.ifp_had_enobuf = 1;
11690 			SCTP_STAT_INCR(sctps_lowlevelerr);
11691 		}
11692 	} else {
11693 		stcb->asoc.ifp_had_enobuf = 0;
11694 	}
11695 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11696 	return;
11697 }
11698 
11699 #if defined(__FreeBSD__)
11700 static void
11701 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11702                    struct sctphdr *sh, uint32_t vtag,
11703                    uint8_t type, struct mbuf *cause,
11704                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11705                    uint32_t vrf_id, uint16_t port)
11706 #else
11707 static void
11708 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11709                    struct sctphdr *sh, uint32_t vtag,
11710                    uint8_t type, struct mbuf *cause,
11711                    uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11712 #endif
11713 {
11714 #ifdef __Panda__
11715 	pakhandle_type o_pak;
11716 #else
11717 	struct mbuf *o_pak;
11718 #endif
11719 	struct mbuf *mout;
11720 	struct sctphdr *shout;
11721 	struct sctp_chunkhdr *ch;
11722 #if defined(INET) || defined(INET6)
11723 	struct udphdr *udp;
11724 #endif
11725 	int ret, len, cause_len, padding_len;
11726 #ifdef INET
11727 #if defined(__APPLE__) || defined(__Panda__)
11728 	sctp_route_t ro;
11729 #endif
11730 	struct sockaddr_in *src_sin, *dst_sin;
11731 	struct ip *ip;
11732 #endif
11733 #ifdef INET6
11734 	struct sockaddr_in6 *src_sin6, *dst_sin6;
11735 	struct ip6_hdr *ip6;
11736 #endif
11737 
11738 	/* Compute the length of the cause and add final padding. */
11739 	cause_len = 0;
11740 	if (cause != NULL) {
11741 		struct mbuf *m_at, *m_last = NULL;
11742 
11743 		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11744 			if (SCTP_BUF_NEXT(m_at) == NULL)
11745 				m_last = m_at;
11746 			cause_len += SCTP_BUF_LEN(m_at);
11747 		}
11748 		padding_len = cause_len % 4;
11749 		if (padding_len != 0) {
11750 			padding_len = 4 - padding_len;
11751 		}
11752 		if (padding_len != 0) {
11753 			if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11754 				sctp_m_freem(cause);
11755 				return;
11756 			}
11757 		}
11758 	} else {
11759 		padding_len = 0;
11760 	}
11761 	/* Get an mbuf for the header. */
11762 	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11763 	switch (dst->sa_family) {
11764 #ifdef INET
11765 	case AF_INET:
11766 		len += sizeof(struct ip);
11767 		break;
11768 #endif
11769 #ifdef INET6
11770 	case AF_INET6:
11771 		len += sizeof(struct ip6_hdr);
11772 		break;
11773 #endif
11774 	default:
11775 		break;
11776 	}
11777 #if defined(INET) || defined(INET6)
11778 	if (port) {
11779 		len += sizeof(struct udphdr);
11780 	}
11781 #endif
11782 #if defined(__APPLE__)
11783 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11784 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11785 #else
11786 	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11787 #endif
11788 #else
11789 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11790 #endif
11791 	if (mout == NULL) {
11792 		if (cause) {
11793 			sctp_m_freem(cause);
11794 		}
11795 		return;
11796 	}
11797 #if defined(__APPLE__)
11798 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11799 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11800 #else
11801 	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11802 #endif
11803 #else
11804 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11805 #endif
11806 	SCTP_BUF_LEN(mout) = len;
11807 	SCTP_BUF_NEXT(mout) = cause;
11808 #if defined(__FreeBSD__)
11809 	M_SETFIB(mout, fibnum);
11810 	mout->m_pkthdr.flowid = mflowid;
11811 	M_HASHTYPE_SET(mout, mflowtype);
11812 #endif
11813 #ifdef INET
11814 	ip = NULL;
11815 #endif
11816 #ifdef INET6
11817 	ip6 = NULL;
11818 #endif
11819 	switch (dst->sa_family) {
11820 #ifdef INET
11821 	case AF_INET:
11822 		src_sin = (struct sockaddr_in *)src;
11823 		dst_sin = (struct sockaddr_in *)dst;
11824 		ip = mtod(mout, struct ip *);
11825 		ip->ip_v = IPVERSION;
11826 		ip->ip_hl = (sizeof(struct ip) >> 2);
11827 		ip->ip_tos = 0;
11828 #if defined(__FreeBSD__)
11829 #if __FreeBSD_version >= 1000000
11830 		ip->ip_off = htons(IP_DF);
11831 #else
11832 		ip->ip_off = IP_DF;
11833 #endif
11834 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace_os_Darwin)
11835 		ip->ip_off = IP_DF;
11836 #else
11837 		ip->ip_off = htons(IP_DF);
11838 #endif
11839 #if defined(__FreeBSD__)
11840 		ip_fillid(ip);
11841 #elif defined(__APPLE__)
11842 #if RANDOM_IP_ID
11843 		ip->ip_id = ip_randomid();
11844 #else
11845 		ip->ip_id = htons(ip_id++);
11846 #endif
11847 #elif defined(__Userspace__)
11848 		ip->ip_id = htons(ip_id++);
11849 #else
11850 		ip->ip_id = ip_id++;
11851 #endif
11852 		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11853 		if (port) {
11854 			ip->ip_p = IPPROTO_UDP;
11855 		} else {
11856 			ip->ip_p = IPPROTO_SCTP;
11857 		}
11858 		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11859 		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11860 		ip->ip_sum = 0;
11861 		len = sizeof(struct ip);
11862 		shout = (struct sctphdr *)((caddr_t)ip + len);
11863 		break;
11864 #endif
11865 #ifdef INET6
11866 	case AF_INET6:
11867 		src_sin6 = (struct sockaddr_in6 *)src;
11868 		dst_sin6 = (struct sockaddr_in6 *)dst;
11869 		ip6 = mtod(mout, struct ip6_hdr *);
11870 		ip6->ip6_flow = htonl(0x60000000);
11871 #if defined(__FreeBSD__)
11872 		if (V_ip6_auto_flowlabel) {
11873 			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11874 		}
11875 #endif
11876 #if defined(__Userspace__)
11877 		ip6->ip6_hlim = IPv6_HOP_LIMIT;
11878 #else
11879 		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11880 #endif
11881 		if (port) {
11882 			ip6->ip6_nxt = IPPROTO_UDP;
11883 		} else {
11884 			ip6->ip6_nxt = IPPROTO_SCTP;
11885 		}
11886 		ip6->ip6_src = dst_sin6->sin6_addr;
11887 		ip6->ip6_dst = src_sin6->sin6_addr;
11888 		len = sizeof(struct ip6_hdr);
11889 		shout = (struct sctphdr *)((caddr_t)ip6 + len);
11890 		break;
11891 #endif
11892 	default:
11893 		len = 0;
11894 		shout = mtod(mout, struct sctphdr *);
11895 		break;
11896 	}
11897 #if defined(INET) || defined(INET6)
11898 	if (port) {
11899 		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11900 			sctp_m_freem(mout);
11901 			return;
11902 		}
11903 		udp = (struct udphdr *)shout;
11904 		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11905 		udp->uh_dport = port;
11906 		udp->uh_sum = 0;
11907 		udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11908 		                                sizeof(struct sctphdr) +
11909 		                                sizeof(struct sctp_chunkhdr) +
11910 		                                cause_len + padding_len));
11911 		len += sizeof(struct udphdr);
11912 		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11913 	} else {
11914 		udp = NULL;
11915 	}
11916 #endif
11917 	shout->src_port = sh->dest_port;
11918 	shout->dest_port = sh->src_port;
11919 	shout->checksum = 0;
11920 	if (vtag) {
11921 		shout->v_tag = htonl(vtag);
11922 	} else {
11923 		shout->v_tag = sh->v_tag;
11924 	}
11925 	len += sizeof(struct sctphdr);
11926 	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11927 	ch->chunk_type = type;
11928 	if (vtag) {
11929 		ch->chunk_flags = 0;
11930 	} else {
11931 		ch->chunk_flags = SCTP_HAD_NO_TCB;
11932 	}
11933 	ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11934 	len += sizeof(struct sctp_chunkhdr);
11935 	len += cause_len + padding_len;
11936 
11937 	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11938 		sctp_m_freem(mout);
11939 		return;
11940 	}
11941 	SCTP_ATTACH_CHAIN(o_pak, mout, len);
11942 	switch (dst->sa_family) {
11943 #ifdef INET
11944 	case AF_INET:
11945 #if defined(__APPLE__) || defined(__Panda__)
11946 		/* zap the stack pointer to the route */
11947 		memset(&ro, 0, sizeof(sctp_route_t));
11948 #if defined(__Panda__)
11949 		ro._l_addr.sa.sa_family = AF_INET;
11950 #endif
11951 #endif
11952 		if (port) {
11953 #if !defined(__Windows__) && !defined(__Userspace__)
11954 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11955 			if (V_udp_cksum) {
11956 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11957 			} else {
11958 				udp->uh_sum = 0;
11959 			}
11960 #else
11961 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11962 #endif
11963 #else
11964 			udp->uh_sum = 0;
11965 #endif
11966 		}
11967 #if defined(__FreeBSD__)
11968 #if __FreeBSD_version >= 1000000
11969 		ip->ip_len = htons(len);
11970 #else
11971 		ip->ip_len = len;
11972 #endif
11973 #elif defined(__APPLE__) || defined(__Userspace__)
11974 		ip->ip_len = len;
11975 #else
11976 		ip->ip_len = htons(len);
11977 #endif
11978 		if (port) {
11979 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11980 			SCTP_STAT_INCR(sctps_sendswcrc);
11981 #if !defined(__Windows__) && !defined(__Userspace__)
11982 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11983 			if (V_udp_cksum) {
11984 				SCTP_ENABLE_UDP_CSUM(o_pak);
11985 			}
11986 #else
11987 			SCTP_ENABLE_UDP_CSUM(o_pak);
11988 #endif
11989 #endif
11990 		} else {
11991 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
11992 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
11993 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11994 			SCTP_STAT_INCR(sctps_sendhwcrc);
11995 #else
11996 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11997 			SCTP_STAT_INCR(sctps_sendswcrc);
11998 #endif
11999 		}
12000 #ifdef SCTP_PACKET_LOGGING
12001 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
12002 			sctp_packet_log(o_pak);
12003 		}
12004 #endif
12005 #if defined(__APPLE__) || defined(__Panda__)
12006 		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12007 		/* Free the route if we got one back */
12008 		if (ro.ro_rt) {
12009 			RTFREE(ro.ro_rt);
12010 			ro.ro_rt = NULL;
12011 		}
12012 #else
12013 #if defined(__FreeBSD__)
12014 		SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
12015 #endif
12016 		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
12017 #endif
12018 		break;
12019 #endif
12020 #ifdef INET6
12021 	case AF_INET6:
12022 		ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
12023 		if (port) {
12024 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12025 			SCTP_STAT_INCR(sctps_sendswcrc);
12026 #if defined(__Windows__)
12027 			udp->uh_sum = 0;
12028 #elif !defined(__Userspace__)
12029 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12030 				udp->uh_sum = 0xffff;
12031 			}
12032 #endif
12033 		} else {
12034 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000
12035 #if __FreeBSD_version > 901000
12036 			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
12037 #else
12038 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
12039 #endif
12040 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
12041 			SCTP_STAT_INCR(sctps_sendhwcrc);
12042 #else
12043 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
12044 			SCTP_STAT_INCR(sctps_sendswcrc);
12045 #endif
12046 		}
12047 #ifdef SCTP_PACKET_LOGGING
12048 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
12049 			sctp_packet_log(o_pak);
12050 		}
12051 #endif
12052 #if defined(__FreeBSD__)
12053 		SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
12054 #endif
12055 		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
12056 		break;
12057 #endif
12058 #if defined(__Userspace__)
12059 	case AF_CONN:
12060 	{
12061 		char *buffer;
12062 		struct sockaddr_conn *sconn;
12063 
12064 		sconn = (struct sockaddr_conn *)src;
12065 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
12066 			shout->checksum = sctp_calculate_cksum(mout, 0);
12067 			SCTP_STAT_INCR(sctps_sendswcrc);
12068 		} else {
12069 			SCTP_STAT_INCR(sctps_sendhwcrc);
12070 		}
12071 #ifdef SCTP_PACKET_LOGGING
12072 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
12073 			sctp_packet_log(mout);
12074 		}
12075 #endif
12076 		/* Don't alloc/free for each packet */
12077 		if ((buffer = malloc(len)) != NULL) {
12078 			m_copydata(mout, 0, len, buffer);
12079 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
12080 			free(buffer);
12081 		} else {
12082 			ret = ENOMEM;
12083 		}
12084 		sctp_m_freem(mout);
12085 		break;
12086 	}
12087 #endif
12088 	default:
12089 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
12090 		        dst->sa_family);
12091 		sctp_m_freem(mout);
12092 		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12093 		return;
12094 	}
12095 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
12096 #if defined(__FreeBSD__)
12097 	if (port) {
12098 		UDPSTAT_INC(udps_opackets);
12099 	}
12100 #endif
12101 	SCTP_STAT_INCR(sctps_sendpackets);
12102 	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12103 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12104 	if (ret) {
12105 		SCTP_STAT_INCR(sctps_senderrors);
12106 	}
12107 	return;
12108 }
12109 
12110 void
12111 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
12112                              struct sctphdr *sh,
12113 #if defined(__FreeBSD__)
12114                              uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12115 #endif
12116                              uint32_t vrf_id, uint16_t port)
12117 {
12118 	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
12119 #if defined(__FreeBSD__)
12120 	                   mflowtype, mflowid, fibnum,
12121 #endif
12122 	                   vrf_id, port);
12123 }
12124 
12125 void
12126 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
12127 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
12128 	SCTP_UNUSED
12129 #endif
12130 )
12131 {
12132 	struct sctp_tmit_chunk *chk;
12133 	struct sctp_heartbeat_chunk *hb;
12134 	struct timeval now;
12135 
12136 	SCTP_TCB_LOCK_ASSERT(stcb);
12137 	if (net == NULL) {
12138 		return;
12139 	}
12140 	(void)SCTP_GETTIME_TIMEVAL(&now);
12141 	switch (net->ro._l_addr.sa.sa_family) {
12142 #ifdef INET
12143 	case AF_INET:
12144 		break;
12145 #endif
12146 #ifdef INET6
12147 	case AF_INET6:
12148 		break;
12149 #endif
12150 #if defined(__Userspace__)
12151 	case AF_CONN:
12152 		break;
12153 #endif
12154 	default:
12155 		return;
12156 	}
12157 	sctp_alloc_a_chunk(stcb, chk);
12158 	if (chk == NULL) {
12159 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
12160 		return;
12161 	}
12162 
12163 	chk->copy_by_ref = 0;
12164 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
12165 	chk->rec.chunk_id.can_take_data = 1;
12166 	chk->flags = 0;
12167 	chk->asoc = &stcb->asoc;
12168 	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
12169 
12170 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12171 	if (chk->data == NULL) {
12172 		sctp_free_a_chunk(stcb, chk, so_locked);
12173 		return;
12174 	}
12175 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12176 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12177 	chk->sent = SCTP_DATAGRAM_UNSENT;
12178 	chk->snd_count = 0;
12179 	chk->whoTo = net;
12180 	atomic_add_int(&chk->whoTo->ref_count, 1);
12181 	/* Now we have a mbuf that we can fill in with the details */
12182 	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
12183 	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
12184 	/* fill out chunk header */
12185 	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
12186 	hb->ch.chunk_flags = 0;
12187 	hb->ch.chunk_length = htons(chk->send_size);
12188 	/* Fill out hb parameter */
12189 	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
12190 	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
12191 	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
12192 	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
12193 	/* Did our user request this one, put it in */
12194 	hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
12195 #ifdef HAVE_SA_LEN
12196 	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
12197 #else
12198 	switch (net->ro._l_addr.sa.sa_family) {
12199 #ifdef INET
12200 	case AF_INET:
12201 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
12202 		break;
12203 #endif
12204 #ifdef INET6
12205 	case AF_INET6:
12206 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
12207 		break;
12208 #endif
12209 #if defined(__Userspace__)
12210 	case AF_CONN:
12211 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
12212 		break;
12213 #endif
12214 	default:
12215 		hb->heartbeat.hb_info.addr_len = 0;
12216 		break;
12217 	}
12218 #endif
12219 	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
12220 		/*
12221 		 * we only take from the entropy pool if the address is not
12222 		 * confirmed.
12223 		 */
12224 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12225 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12226 	} else {
12227 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
12228 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
12229 	}
12230 	switch (net->ro._l_addr.sa.sa_family) {
12231 #ifdef INET
12232 	case AF_INET:
12233 		memcpy(hb->heartbeat.hb_info.address,
12234 		       &net->ro._l_addr.sin.sin_addr,
12235 		       sizeof(net->ro._l_addr.sin.sin_addr));
12236 		break;
12237 #endif
12238 #ifdef INET6
12239 	case AF_INET6:
12240 		memcpy(hb->heartbeat.hb_info.address,
12241 		       &net->ro._l_addr.sin6.sin6_addr,
12242 		       sizeof(net->ro._l_addr.sin6.sin6_addr));
12243 		break;
12244 #endif
12245 #if defined(__Userspace__)
12246 	case AF_CONN:
12247 		memcpy(hb->heartbeat.hb_info.address,
12248 		       &net->ro._l_addr.sconn.sconn_addr,
12249 		       sizeof(net->ro._l_addr.sconn.sconn_addr));
12250 		break;
12251 #endif
12252 	default:
12253 		if (chk->data) {
12254 			sctp_m_freem(chk->data);
12255 			chk->data = NULL;
12256 		}
12257 		sctp_free_a_chunk(stcb, chk, so_locked);
12258 		return;
12259 		break;
12260 	}
12261 	net->hb_responded = 0;
12262 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12263 	stcb->asoc.ctrl_queue_cnt++;
12264 	SCTP_STAT_INCR(sctps_sendheartbeat);
12265 	return;
12266 }
12267 
12268 void
12269 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
12270 		   uint32_t high_tsn)
12271 {
12272 	struct sctp_association *asoc;
12273 	struct sctp_ecne_chunk *ecne;
12274 	struct sctp_tmit_chunk *chk;
12275 
12276 	if (net == NULL) {
12277 		return;
12278 	}
12279 	asoc = &stcb->asoc;
12280 	SCTP_TCB_LOCK_ASSERT(stcb);
12281 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12282 		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12283 			/* found a previous ECN_ECHO update it if needed */
12284 			uint32_t cnt, ctsn;
12285 			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12286 			ctsn = ntohl(ecne->tsn);
12287 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12288 				ecne->tsn = htonl(high_tsn);
12289 				SCTP_STAT_INCR(sctps_queue_upd_ecne);
12290 			}
12291 			cnt = ntohl(ecne->num_pkts_since_cwr);
12292 			cnt++;
12293 			ecne->num_pkts_since_cwr = htonl(cnt);
12294 			return;
12295 		}
12296 	}
12297 	/* nope could not find one to update so we must build one */
12298 	sctp_alloc_a_chunk(stcb, chk);
12299 	if (chk == NULL) {
12300 		return;
12301 	}
12302 	SCTP_STAT_INCR(sctps_queue_upd_ecne);
12303 	chk->copy_by_ref = 0;
12304 	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12305 	chk->rec.chunk_id.can_take_data = 0;
12306 	chk->flags = 0;
12307 	chk->asoc = &stcb->asoc;
12308 	chk->send_size = sizeof(struct sctp_ecne_chunk);
12309 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12310 	if (chk->data == NULL) {
12311 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12312 		return;
12313 	}
12314 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12315 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12316 	chk->sent = SCTP_DATAGRAM_UNSENT;
12317 	chk->snd_count = 0;
12318 	chk->whoTo = net;
12319 	atomic_add_int(&chk->whoTo->ref_count, 1);
12320 
12321 	stcb->asoc.ecn_echo_cnt_onq++;
12322 	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12323 	ecne->ch.chunk_type = SCTP_ECN_ECHO;
12324 	ecne->ch.chunk_flags = 0;
12325 	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12326 	ecne->tsn = htonl(high_tsn);
12327 	ecne->num_pkts_since_cwr = htonl(1);
12328 	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12329 	asoc->ctrl_queue_cnt++;
12330 }
12331 
12332 void
12333 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12334     struct mbuf *m, int len, int iphlen, int bad_crc)
12335 {
12336 	struct sctp_association *asoc;
12337 	struct sctp_pktdrop_chunk *drp;
12338 	struct sctp_tmit_chunk *chk;
12339 	uint8_t *datap;
12340 	int was_trunc = 0;
12341 	int fullsz = 0;
12342 	long spc;
12343 	int offset;
12344 	struct sctp_chunkhdr *ch, chunk_buf;
12345 	unsigned int chk_length;
12346 
12347         if (!stcb) {
12348             return;
12349         }
12350 	asoc = &stcb->asoc;
12351 	SCTP_TCB_LOCK_ASSERT(stcb);
12352 	if (asoc->pktdrop_supported == 0) {
12353 		/*-
12354 		 * peer must declare support before I send one.
12355 		 */
12356 		return;
12357 	}
12358 	if (stcb->sctp_socket == NULL) {
12359 		return;
12360 	}
12361 	sctp_alloc_a_chunk(stcb, chk);
12362 	if (chk == NULL) {
12363 		return;
12364 	}
12365 	chk->copy_by_ref = 0;
12366 	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12367 	chk->rec.chunk_id.can_take_data = 1;
12368 	chk->flags = 0;
12369 	len -= iphlen;
12370 	chk->send_size = len;
12371 	/* Validate that we do not have an ABORT in here. */
12372 	offset = iphlen + sizeof(struct sctphdr);
12373 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12374 						   sizeof(*ch), (uint8_t *) & chunk_buf);
12375 	while (ch != NULL) {
12376 		chk_length = ntohs(ch->chunk_length);
12377 		if (chk_length < sizeof(*ch)) {
12378 			/* break to abort land */
12379 			break;
12380 		}
12381 		switch (ch->chunk_type) {
12382 		case SCTP_PACKET_DROPPED:
12383 		case SCTP_ABORT_ASSOCIATION:
12384 		case SCTP_INITIATION_ACK:
12385 			/**
12386 			 * We don't respond with an PKT-DROP to an ABORT
12387 			 * or PKT-DROP. We also do not respond to an
12388 			 * INIT-ACK, because we can't know if the initiation
12389 			 * tag is correct or not.
12390 			 */
12391 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12392 			return;
12393 		default:
12394 			break;
12395 		}
12396 		offset += SCTP_SIZE32(chk_length);
12397 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12398 		    sizeof(*ch), (uint8_t *) & chunk_buf);
12399 	}
12400 
12401 	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12402 	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12403 		/* only send 1 mtu worth, trim off the
12404 		 * excess on the end.
12405 		 */
12406 		fullsz = len;
12407 		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12408 		was_trunc = 1;
12409 	}
12410 	chk->asoc = &stcb->asoc;
12411 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12412 	if (chk->data == NULL) {
12413 jump_out:
12414 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12415 		return;
12416 	}
12417 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12418 	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12419 	if (drp == NULL) {
12420 		sctp_m_freem(chk->data);
12421 		chk->data = NULL;
12422 		goto jump_out;
12423 	}
12424 	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12425 	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12426 	chk->book_size_scale = 0;
12427 	if (was_trunc) {
12428 		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12429 		drp->trunc_len = htons(fullsz);
12430 		/* Len is already adjusted to size minus overhead above
12431 		 * take out the pkt_drop chunk itself from it.
12432 		 */
12433 		chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
12434 		len = chk->send_size;
12435 	} else {
12436 		/* no truncation needed */
12437 		drp->ch.chunk_flags = 0;
12438 		drp->trunc_len = htons(0);
12439 	}
12440 	if (bad_crc) {
12441 		drp->ch.chunk_flags |= SCTP_BADCRC;
12442 	}
12443 	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12444 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12445 	chk->sent = SCTP_DATAGRAM_UNSENT;
12446 	chk->snd_count = 0;
12447 	if (net) {
12448 		/* we should hit here */
12449 		chk->whoTo = net;
12450 		atomic_add_int(&chk->whoTo->ref_count, 1);
12451 	} else {
12452 		chk->whoTo = NULL;
12453 	}
12454 	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12455 	drp->ch.chunk_length = htons(chk->send_size);
12456 	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12457 	if (spc < 0) {
12458 		spc = 0;
12459 	}
12460 	drp->bottle_bw = htonl(spc);
12461 	if (asoc->my_rwnd) {
12462 		drp->current_onq = htonl(asoc->size_on_reasm_queue +
12463 		    asoc->size_on_all_streams +
12464 		    asoc->my_rwnd_control_len +
12465 		    stcb->sctp_socket->so_rcv.sb_cc);
12466 	} else {
12467 		/*-
12468 		 * If my rwnd is 0, possibly from mbuf depletion as well as
12469 		 * space used, tell the peer there is NO space aka onq == bw
12470 		 */
12471 		drp->current_onq = htonl(spc);
12472 	}
12473 	drp->reserved = 0;
12474 	datap = drp->data;
12475 	m_copydata(m, iphlen, len, (caddr_t)datap);
12476 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12477 	asoc->ctrl_queue_cnt++;
12478 }
12479 
12480 void
12481 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12482 {
12483 	struct sctp_association *asoc;
12484 	struct sctp_cwr_chunk *cwr;
12485 	struct sctp_tmit_chunk *chk;
12486 
12487 	SCTP_TCB_LOCK_ASSERT(stcb);
12488 	if (net == NULL) {
12489 		return;
12490 	}
12491 	asoc = &stcb->asoc;
12492 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12493 		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12494 			/* found a previous CWR queued to same destination update it if needed */
12495 			uint32_t ctsn;
12496 			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12497 			ctsn = ntohl(cwr->tsn);
12498 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12499 				cwr->tsn = htonl(high_tsn);
12500 			}
12501 			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12502 				/* Make sure override is carried */
12503 				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12504 			}
12505 			return;
12506 		}
12507 	}
12508 	sctp_alloc_a_chunk(stcb, chk);
12509 	if (chk == NULL) {
12510 		return;
12511 	}
12512 	chk->copy_by_ref = 0;
12513 	chk->rec.chunk_id.id = SCTP_ECN_CWR;
12514 	chk->rec.chunk_id.can_take_data = 1;
12515 	chk->flags = 0;
12516 	chk->asoc = &stcb->asoc;
12517 	chk->send_size = sizeof(struct sctp_cwr_chunk);
12518 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12519 	if (chk->data == NULL) {
12520 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12521 		return;
12522 	}
12523 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12524 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12525 	chk->sent = SCTP_DATAGRAM_UNSENT;
12526 	chk->snd_count = 0;
12527 	chk->whoTo = net;
12528 	atomic_add_int(&chk->whoTo->ref_count, 1);
12529 	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12530 	cwr->ch.chunk_type = SCTP_ECN_CWR;
12531 	cwr->ch.chunk_flags = override;
12532 	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12533 	cwr->tsn = htonl(high_tsn);
12534 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12535 	asoc->ctrl_queue_cnt++;
12536 }
12537 
12538 static int
12539 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
12540                           uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12541 {
12542 	uint16_t len, old_len, i;
12543 	struct sctp_stream_reset_out_request *req_out;
12544 	struct sctp_chunkhdr *ch;
12545 	int at;
12546 	int number_entries=0;
12547 
12548 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12549 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12550 	/* get to new offset for the param. */
12551 	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12552 	/* now how long will this param be? */
12553 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12554 		if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12555 		    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12556 		    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12557 			number_entries++;
12558 		}
12559 	}
12560 	if (number_entries == 0) {
12561 		return (0);
12562 	}
12563 	if (number_entries == stcb->asoc.streamoutcnt) {
12564 		number_entries = 0;
12565 	}
12566 	if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
12567 		number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
12568 	}
12569 	len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12570 	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12571 	req_out->ph.param_length = htons(len);
12572 	req_out->request_seq = htonl(seq);
12573 	req_out->response_seq = htonl(resp_seq);
12574 	req_out->send_reset_at_tsn = htonl(last_sent);
12575 	at = 0;
12576 	if (number_entries) {
12577 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12578 			if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12579 			    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12580 			    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12581 				req_out->list_of_streams[at] = htons(i);
12582 				at++;
12583 				stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12584 				if (at >= number_entries) {
12585 					break;
12586 				}
12587 			}
12588 		}
12589 	} else {
12590 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12591 			stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12592 		}
12593 	}
12594 	if (SCTP_SIZE32(len) > len) {
12595 		/*-
12596 		 * Need to worry about the pad we may end up adding to the
12597 		 * end. This is easy since the struct is either aligned to 4
12598 		 * bytes or 2 bytes off.
12599 		 */
12600 		req_out->list_of_streams[number_entries] = 0;
12601 	}
12602 	/* now fix the chunk length */
12603 	ch->chunk_length = htons(len + old_len);
12604 	chk->book_size = len + old_len;
12605 	chk->book_size_scale = 0;
12606 	chk->send_size = SCTP_SIZE32(chk->book_size);
12607 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12608 	return (1);
12609 }
12610 
12611 static void
12612 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12613                          int number_entries, uint16_t *list,
12614                          uint32_t seq)
12615 {
12616 	uint16_t len, old_len, i;
12617 	struct sctp_stream_reset_in_request *req_in;
12618 	struct sctp_chunkhdr *ch;
12619 
12620 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12621 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12622 
12623 	/* get to new offset for the param. */
12624 	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12625 	/* now how long will this param be? */
12626 	len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12627 	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12628 	req_in->ph.param_length = htons(len);
12629 	req_in->request_seq = htonl(seq);
12630 	if (number_entries) {
12631 		for (i = 0; i < number_entries; i++) {
12632 			req_in->list_of_streams[i] = htons(list[i]);
12633 		}
12634 	}
12635 	if (SCTP_SIZE32(len) > len) {
12636 		/*-
12637 		 * Need to worry about the pad we may end up adding to the
12638 		 * end. This is easy since the struct is either aligned to 4
12639 		 * bytes or 2 bytes off.
12640 		 */
12641 		req_in->list_of_streams[number_entries] = 0;
12642 	}
12643 	/* now fix the chunk length */
12644 	ch->chunk_length = htons(len + old_len);
12645 	chk->book_size = len + old_len;
12646 	chk->book_size_scale = 0;
12647 	chk->send_size = SCTP_SIZE32(chk->book_size);
12648 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12649 	return;
12650 }
12651 
12652 static void
12653 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12654                           uint32_t seq)
12655 {
12656 	uint16_t len, old_len;
12657 	struct sctp_stream_reset_tsn_request *req_tsn;
12658 	struct sctp_chunkhdr *ch;
12659 
12660 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12661 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12662 
12663 	/* get to new offset for the param. */
12664 	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12665 	/* now how long will this param be? */
12666 	len = sizeof(struct sctp_stream_reset_tsn_request);
12667 	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12668 	req_tsn->ph.param_length = htons(len);
12669 	req_tsn->request_seq = htonl(seq);
12670 
12671 	/* now fix the chunk length */
12672 	ch->chunk_length = htons(len + old_len);
12673 	chk->send_size = len + old_len;
12674 	chk->book_size = SCTP_SIZE32(chk->send_size);
12675 	chk->book_size_scale = 0;
12676 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12677 	return;
12678 }
12679 
12680 void
12681 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12682                              uint32_t resp_seq, uint32_t result)
12683 {
12684 	uint16_t len, old_len;
12685 	struct sctp_stream_reset_response *resp;
12686 	struct sctp_chunkhdr *ch;
12687 
12688 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12689 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12690 
12691 	/* get to new offset for the param. */
12692 	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12693 	/* now how long will this param be? */
12694 	len = sizeof(struct sctp_stream_reset_response);
12695 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12696 	resp->ph.param_length = htons(len);
12697 	resp->response_seq = htonl(resp_seq);
12698 	resp->result = ntohl(result);
12699 
12700 	/* now fix the chunk length */
12701 	ch->chunk_length = htons(len + old_len);
12702 	chk->book_size = len + old_len;
12703 	chk->book_size_scale = 0;
12704 	chk->send_size = SCTP_SIZE32(chk->book_size);
12705 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12706 	return;
12707 }
12708 
12709 void
12710 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
12711 				 struct sctp_stream_reset_list *ent,
12712 				 int response)
12713 {
12714 	struct sctp_association *asoc;
12715 	struct sctp_tmit_chunk *chk;
12716 	struct sctp_chunkhdr *ch;
12717 
12718 	asoc = &stcb->asoc;
12719 
12720 	/*
12721 	 * Reset our last reset action to the new one IP -> response
12722 	 * (PERFORMED probably). This assures that if we fail to send, a
12723 	 * retran from the peer will get the new response.
12724 	 */
12725 	asoc->last_reset_action[0] = response;
12726 	if (asoc->stream_reset_outstanding) {
12727 		return;
12728 	}
12729 	sctp_alloc_a_chunk(stcb, chk);
12730 	if (chk == NULL) {
12731 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12732 		return;
12733 	}
12734 	chk->copy_by_ref = 0;
12735 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12736 	chk->rec.chunk_id.can_take_data = 0;
12737 	chk->flags = 0;
12738 	chk->asoc = &stcb->asoc;
12739 	chk->book_size = sizeof(struct sctp_chunkhdr);
12740 	chk->send_size = SCTP_SIZE32(chk->book_size);
12741 	chk->book_size_scale = 0;
12742 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12743 	if (chk->data == NULL) {
12744 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12745 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12746 		return;
12747 	}
12748 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12749 	/* setup chunk parameters */
12750 	chk->sent = SCTP_DATAGRAM_UNSENT;
12751 	chk->snd_count = 0;
12752 	if (stcb->asoc.alternate) {
12753 		chk->whoTo = stcb->asoc.alternate;
12754 	} else {
12755 		chk->whoTo = stcb->asoc.primary_destination;
12756 	}
12757 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12758 	ch->chunk_type = SCTP_STREAM_RESET;
12759 	ch->chunk_flags = 0;
12760 	ch->chunk_length = htons(chk->book_size);
12761 	atomic_add_int(&chk->whoTo->ref_count, 1);
12762 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12763 	sctp_add_stream_reset_result(chk, ent->seq, response);
12764 	/* insert the chunk for sending */
12765 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12766 			  chk,
12767 			  sctp_next);
12768 	asoc->ctrl_queue_cnt++;
12769 }
12770 
12771 void
12772 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12773                                  uint32_t resp_seq, uint32_t result,
12774                                  uint32_t send_una, uint32_t recv_next)
12775 {
12776 	uint16_t len, old_len;
12777 	struct sctp_stream_reset_response_tsn *resp;
12778 	struct sctp_chunkhdr *ch;
12779 
12780 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12781 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12782 
12783 	/* get to new offset for the param. */
12784 	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12785 	/* now how long will this param be? */
12786 	len = sizeof(struct sctp_stream_reset_response_tsn);
12787 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12788 	resp->ph.param_length = htons(len);
12789 	resp->response_seq = htonl(resp_seq);
12790 	resp->result = htonl(result);
12791 	resp->senders_next_tsn = htonl(send_una);
12792 	resp->receivers_next_tsn = htonl(recv_next);
12793 
12794 	/* now fix the chunk length */
12795 	ch->chunk_length = htons(len + old_len);
12796 	chk->book_size = len + old_len;
12797 	chk->send_size = SCTP_SIZE32(chk->book_size);
12798 	chk->book_size_scale = 0;
12799 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12800 	return;
12801 }
12802 
12803 static void
12804 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12805 		       uint32_t seq,
12806 		       uint16_t adding)
12807 {
12808 	uint16_t len, old_len;
12809 	struct sctp_chunkhdr *ch;
12810 	struct sctp_stream_reset_add_strm *addstr;
12811 
12812 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12813 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12814 
12815 	/* get to new offset for the param. */
12816 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12817 	/* now how long will this param be? */
12818 	len = sizeof(struct sctp_stream_reset_add_strm);
12819 
12820 	/* Fill it out. */
12821 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12822 	addstr->ph.param_length = htons(len);
12823 	addstr->request_seq = htonl(seq);
12824 	addstr->number_of_streams = htons(adding);
12825 	addstr->reserved = 0;
12826 
12827 	/* now fix the chunk length */
12828 	ch->chunk_length = htons(len + old_len);
12829 	chk->send_size = len + old_len;
12830 	chk->book_size = SCTP_SIZE32(chk->send_size);
12831 	chk->book_size_scale = 0;
12832 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12833 	return;
12834 }
12835 
12836 static void
12837 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12838                       uint32_t seq,
12839                       uint16_t adding)
12840 {
12841 	uint16_t len, old_len;
12842 	struct sctp_chunkhdr *ch;
12843 	struct sctp_stream_reset_add_strm *addstr;
12844 
12845 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12846 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12847 
12848 	/* get to new offset for the param. */
12849 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12850 	/* now how long will this param be? */
12851 	len = sizeof(struct sctp_stream_reset_add_strm);
12852 	/* Fill it out. */
12853 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12854 	addstr->ph.param_length = htons(len);
12855 	addstr->request_seq = htonl(seq);
12856 	addstr->number_of_streams = htons(adding);
12857 	addstr->reserved = 0;
12858 
12859 	/* now fix the chunk length */
12860 	ch->chunk_length = htons(len + old_len);
12861 	chk->send_size = len + old_len;
12862 	chk->book_size = SCTP_SIZE32(chk->send_size);
12863 	chk->book_size_scale = 0;
12864 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12865 	return;
12866 }
12867 
12868 int
12869 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12870 {
12871 	struct sctp_association *asoc;
12872 	struct sctp_tmit_chunk *chk;
12873 	struct sctp_chunkhdr *ch;
12874 	uint32_t seq;
12875 
12876 	asoc = &stcb->asoc;
12877 	asoc->trigger_reset = 0;
12878 	if (asoc->stream_reset_outstanding) {
12879 		return (EALREADY);
12880 	}
12881 	sctp_alloc_a_chunk(stcb, chk);
12882 	if (chk == NULL) {
12883 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12884 		return (ENOMEM);
12885 	}
12886 	chk->copy_by_ref = 0;
12887 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12888 	chk->rec.chunk_id.can_take_data = 0;
12889 	chk->flags = 0;
12890 	chk->asoc = &stcb->asoc;
12891 	chk->book_size = sizeof(struct sctp_chunkhdr);
12892 	chk->send_size = SCTP_SIZE32(chk->book_size);
12893 	chk->book_size_scale = 0;
12894 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12895 	if (chk->data == NULL) {
12896 		sctp_free_a_chunk(stcb, chk, so_locked);
12897 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12898 		return (ENOMEM);
12899 	}
12900 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12901 
12902 	/* setup chunk parameters */
12903 	chk->sent = SCTP_DATAGRAM_UNSENT;
12904 	chk->snd_count = 0;
12905 	if (stcb->asoc.alternate) {
12906 		chk->whoTo = stcb->asoc.alternate;
12907 	} else {
12908 		chk->whoTo = stcb->asoc.primary_destination;
12909 	}
12910 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12911 	ch->chunk_type = SCTP_STREAM_RESET;
12912 	ch->chunk_flags = 0;
12913 	ch->chunk_length = htons(chk->book_size);
12914 	atomic_add_int(&chk->whoTo->ref_count, 1);
12915 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12916 	seq = stcb->asoc.str_reset_seq_out;
12917 	if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12918 		seq++;
12919 		asoc->stream_reset_outstanding++;
12920 	} else {
12921 		m_freem(chk->data);
12922 		chk->data = NULL;
12923 		sctp_free_a_chunk(stcb, chk, so_locked);
12924 		return (ENOENT);
12925 	}
12926 	asoc->str_reset = chk;
12927 	/* insert the chunk for sending */
12928 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12929 			  chk,
12930 			  sctp_next);
12931 	asoc->ctrl_queue_cnt++;
12932 
12933 	if (stcb->asoc.send_sack) {
12934 		sctp_send_sack(stcb, so_locked);
12935 	}
12936 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12937 	return (0);
12938 }
12939 
12940 int
12941 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12942                         uint16_t number_entries, uint16_t *list,
12943                         uint8_t send_in_req,
12944                         uint8_t send_tsn_req,
12945                         uint8_t add_stream,
12946                         uint16_t adding_o,
12947                         uint16_t adding_i, uint8_t peer_asked)
12948 {
12949 	struct sctp_association *asoc;
12950 	struct sctp_tmit_chunk *chk;
12951 	struct sctp_chunkhdr *ch;
12952 	int can_send_out_req=0;
12953 	uint32_t seq;
12954 
12955 	asoc = &stcb->asoc;
12956 	if (asoc->stream_reset_outstanding) {
12957 		/*-
12958 		 * Already one pending, must get ACK back to clear the flag.
12959 		 */
12960 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12961 		return (EBUSY);
12962 	}
12963 	if ((send_in_req == 0) && (send_tsn_req == 0) &&
12964 	    (add_stream == 0)) {
12965 		/* nothing to do */
12966 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12967 		return (EINVAL);
12968 	}
12969 	if (send_tsn_req && send_in_req) {
12970 		/* error, can't do that */
12971 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12972 		return (EINVAL);
12973 	} else if (send_in_req) {
12974 		can_send_out_req = 1;
12975 	}
12976 	if (number_entries > (MCLBYTES -
12977 	                      SCTP_MIN_OVERHEAD -
12978 	                      sizeof(struct sctp_chunkhdr) -
12979 	                      sizeof(struct sctp_stream_reset_out_request)) /
12980 	                     sizeof(uint16_t)) {
12981 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12982 		return (ENOMEM);
12983 	}
12984 	sctp_alloc_a_chunk(stcb, chk);
12985 	if (chk == NULL) {
12986 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12987 		return (ENOMEM);
12988 	}
12989 	chk->copy_by_ref = 0;
12990 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12991 	chk->rec.chunk_id.can_take_data = 0;
12992 	chk->flags = 0;
12993 	chk->asoc = &stcb->asoc;
12994 	chk->book_size = sizeof(struct sctp_chunkhdr);
12995 	chk->send_size = SCTP_SIZE32(chk->book_size);
12996 	chk->book_size_scale = 0;
12997 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12998 	if (chk->data == NULL) {
12999 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
13000 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13001 		return (ENOMEM);
13002 	}
13003 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
13004 
13005 	/* setup chunk parameters */
13006 	chk->sent = SCTP_DATAGRAM_UNSENT;
13007 	chk->snd_count = 0;
13008 	if (stcb->asoc.alternate) {
13009 		chk->whoTo = stcb->asoc.alternate;
13010 	} else {
13011 		chk->whoTo = stcb->asoc.primary_destination;
13012 	}
13013 	atomic_add_int(&chk->whoTo->ref_count, 1);
13014 	ch = mtod(chk->data, struct sctp_chunkhdr *);
13015 	ch->chunk_type = SCTP_STREAM_RESET;
13016 	ch->chunk_flags = 0;
13017 	ch->chunk_length = htons(chk->book_size);
13018 	SCTP_BUF_LEN(chk->data) = chk->send_size;
13019 
13020 	seq = stcb->asoc.str_reset_seq_out;
13021 	if (can_send_out_req) {
13022 		int ret;
13023 	        ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
13024 		if (ret) {
13025 			seq++;
13026 			asoc->stream_reset_outstanding++;
13027 		}
13028 	}
13029 	if ((add_stream & 1) &&
13030 	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
13031 		/* Need to allocate more */
13032 		struct sctp_stream_out *oldstream;
13033 		struct sctp_stream_queue_pending *sp, *nsp;
13034 		int i;
13035 #if defined(SCTP_DETAILED_STR_STATS)
13036 		int j;
13037 #endif
13038 
13039 		oldstream = stcb->asoc.strmout;
13040 		/* get some more */
13041 		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
13042 			    (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
13043 			    SCTP_M_STRMO);
13044 		if (stcb->asoc.strmout == NULL) {
13045 			uint8_t x;
13046 			stcb->asoc.strmout = oldstream;
13047 			/* Turn off the bit */
13048 			x = add_stream & 0xfe;
13049 			add_stream = x;
13050 			goto skip_stuff;
13051 		}
13052 		/* Ok now we proceed with copying the old out stuff and
13053 		 * initializing the new stuff.
13054 		 */
13055 		SCTP_TCB_SEND_LOCK(stcb);
13056 		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
13057 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
13058 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
13059 			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
13060 			stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
13061 			stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
13062 			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
13063 			stcb->asoc.strmout[i].sid = i;
13064 			stcb->asoc.strmout[i].state = oldstream[i].state;
13065 			/* FIX ME FIX ME */
13066 			/* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
13067 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
13068 			/* now anything on those queues? */
13069 			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
13070 				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
13071 				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
13072 			}
13073 
13074 		}
13075 		/* now the new streams */
13076 		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
13077 		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
13078 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
13079 			stcb->asoc.strmout[i].chunks_on_queues = 0;
13080 #if defined(SCTP_DETAILED_STR_STATS)
13081 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
13082 				stcb->asoc.strmout[i].abandoned_sent[j] = 0;
13083 				stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
13084 			}
13085 #else
13086 			stcb->asoc.strmout[i].abandoned_sent[0] = 0;
13087 			stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
13088 #endif
13089 			stcb->asoc.strmout[i].next_mid_ordered = 0;
13090 			stcb->asoc.strmout[i].next_mid_unordered = 0;
13091 			stcb->asoc.strmout[i].sid = i;
13092 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
13093 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
13094 			stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
13095 		}
13096 		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
13097 		SCTP_FREE(oldstream, SCTP_M_STRMO);
13098 		SCTP_TCB_SEND_UNLOCK(stcb);
13099 	}
13100 skip_stuff:
13101 	if ((add_stream & 1) && (adding_o > 0)) {
13102 		asoc->strm_pending_add_size = adding_o;
13103 		asoc->peer_req_out = peer_asked;
13104 		sctp_add_an_out_stream(chk, seq, adding_o);
13105 		seq++;
13106 		asoc->stream_reset_outstanding++;
13107 	}
13108 	if ((add_stream & 2) && (adding_i > 0)) {
13109 		sctp_add_an_in_stream(chk, seq, adding_i);
13110 		seq++;
13111 		asoc->stream_reset_outstanding++;
13112 	}
13113 	if (send_in_req) {
13114 		sctp_add_stream_reset_in(chk, number_entries, list, seq);
13115 		seq++;
13116 		asoc->stream_reset_outstanding++;
13117 	}
13118 	if (send_tsn_req) {
13119 		sctp_add_stream_reset_tsn(chk, seq);
13120 		asoc->stream_reset_outstanding++;
13121 	}
13122 	asoc->str_reset = chk;
13123 	/* insert the chunk for sending */
13124 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
13125 			  chk,
13126 			  sctp_next);
13127 	asoc->ctrl_queue_cnt++;
13128 	if (stcb->asoc.send_sack) {
13129 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
13130 	}
13131 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
13132 	return (0);
13133 }
13134 
13135 void
13136 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
13137                 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13138 #if defined(__FreeBSD__)
13139                 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13140 #endif
13141                 uint32_t vrf_id, uint16_t port)
13142 {
13143 	/* Don't respond to an ABORT with an ABORT. */
13144 	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
13145 		if (cause)
13146 			sctp_m_freem(cause);
13147 		return;
13148 	}
13149 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
13150 #if defined(__FreeBSD__)
13151 	                   mflowtype, mflowid, fibnum,
13152 #endif
13153 	                   vrf_id, port);
13154 	return;
13155 }
13156 
13157 void
13158 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
13159                    struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13160 #if defined(__FreeBSD__)
13161                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13162 #endif
13163                    uint32_t vrf_id, uint16_t port)
13164 {
13165 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
13166 #if defined(__FreeBSD__)
13167 	                   mflowtype, mflowid, fibnum,
13168 #endif
13169 	                   vrf_id, port);
13170 	return;
13171 }
13172 
13173 static struct mbuf *
13174 sctp_copy_resume(struct uio *uio,
13175 		 int max_send_len,
13176 #if (defined(__FreeBSD__) && __FreeBSD_version > 602000) || defined(__Userspace__)
13177 		 int user_marks_eor,
13178 #endif
13179 		 int *error,
13180 		 uint32_t *sndout,
13181 		 struct mbuf **new_tail)
13182 {
13183 #if defined(__Panda__)
13184 	struct mbuf *m;
13185 
13186 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13187 			(user_marks_eor ? M_EOR : 0));
13188 	if (m == NULL) {
13189 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13190 		*error = ENOBUFS;
13191 	} else {
13192 		*sndout = m_length(m, NULL);
13193 		*new_tail = m_last(m);
13194 	}
13195 	return (m);
13196 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000 || defined(__Userspace__)
13197 	struct mbuf *m;
13198 
13199 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13200 		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
13201 	if (m == NULL) {
13202 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13203 		*error = ENOBUFS;
13204 	} else {
13205 		*sndout = m_length(m, NULL);
13206 		*new_tail = m_last(m);
13207 	}
13208 	return (m);
13209 #else
13210 	int left, cancpy, willcpy;
13211 	struct mbuf *m, *head;
13212 
13213 #if defined(__APPLE__)
13214 #if defined(APPLE_LEOPARD)
13215 	left = (int)min(uio->uio_resid, max_send_len);
13216 #else
13217 	left = (int)min(uio_resid(uio), max_send_len);
13218 #endif
13219 #else
13220 	left = (int)min(uio->uio_resid, max_send_len);
13221 #endif
13222 	/* Always get a header just in case */
13223 	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13224 	if (head == NULL) {
13225 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13226 		*error = ENOBUFS;
13227 		return (NULL);
13228 	}
13229 	cancpy = (int)M_TRAILINGSPACE(head);
13230 	willcpy = min(cancpy, left);
13231 	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
13232 	if (*error) {
13233 		sctp_m_freem(head);
13234 		return (NULL);
13235 	}
13236 	*sndout += willcpy;
13237 	left -= willcpy;
13238 	SCTP_BUF_LEN(head) = willcpy;
13239 	m = head;
13240 	*new_tail = head;
13241 	while (left > 0) {
13242 		/* move in user data */
13243 		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13244 		if (SCTP_BUF_NEXT(m) == NULL) {
13245 			sctp_m_freem(head);
13246 			*new_tail = NULL;
13247 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13248 			*error = ENOBUFS;
13249 			return (NULL);
13250 		}
13251 		m = SCTP_BUF_NEXT(m);
13252 		cancpy = (int)M_TRAILINGSPACE(m);
13253 		willcpy = min(cancpy, left);
13254 		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
13255 		if (*error) {
13256 			sctp_m_freem(head);
13257 			*new_tail = NULL;
13258 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13259 			*error = EFAULT;
13260 			return (NULL);
13261 		}
13262 		SCTP_BUF_LEN(m) = willcpy;
13263 		left -= willcpy;
13264 		*sndout += willcpy;
13265 		*new_tail = m;
13266 		if (left == 0) {
13267 			SCTP_BUF_NEXT(m) = NULL;
13268 		}
13269 	}
13270 	return (head);
13271 #endif
13272 }
13273 
13274 static int
13275 sctp_copy_one(struct sctp_stream_queue_pending *sp,
13276               struct uio *uio,
13277               int resv_upfront)
13278 {
13279 #if defined(__Panda__)
13280 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13281 	                       resv_upfront, 0);
13282 	if (sp->data == NULL) {
13283 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13284 		return (ENOBUFS);
13285 	}
13286 
13287 	sp->tail_mbuf = m_last(sp->data);
13288 	return (0);
13289 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000 || defined(__Userspace__)
13290 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13291 	                       resv_upfront, 0);
13292 	if (sp->data == NULL) {
13293 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13294 		return (ENOBUFS);
13295 	}
13296 
13297 	sp->tail_mbuf = m_last(sp->data);
13298 	return (0);
13299 #else
13300 	int left;
13301 	int cancpy, willcpy, error;
13302 	struct mbuf *m, *head;
13303 	int cpsz = 0;
13304 
13305 	/* First one gets a header */
13306 	left = sp->length;
13307 	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
13308 	if (m == NULL) {
13309 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13310 		return (ENOBUFS);
13311 	}
13312 	/*-
13313 	 * Add this one for m in now, that way if the alloc fails we won't
13314 	 * have a bad cnt.
13315 	 */
13316 	SCTP_BUF_RESV_UF(m, resv_upfront);
13317 	cancpy = (int)M_TRAILINGSPACE(m);
13318 	willcpy = min(cancpy, left);
13319 	while (left > 0) {
13320 		/* move in user data */
13321 		error = uiomove(mtod(m, caddr_t), willcpy, uio);
13322 		if (error) {
13323 			sctp_m_freem(head);
13324 			return (error);
13325 		}
13326 		SCTP_BUF_LEN(m) = willcpy;
13327 		left -= willcpy;
13328 		cpsz += willcpy;
13329 		if (left > 0) {
13330 			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13331 			if (SCTP_BUF_NEXT(m) == NULL) {
13332 				/*
13333 				 * the head goes back to caller, he can free
13334 				 * the rest
13335 				 */
13336 				sctp_m_freem(head);
13337 				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13338 				return (ENOBUFS);
13339 			}
13340 			m = SCTP_BUF_NEXT(m);
13341 			cancpy = (int)M_TRAILINGSPACE(m);
13342 			willcpy = min(cancpy, left);
13343 		} else {
13344 			sp->tail_mbuf = m;
13345 			SCTP_BUF_NEXT(m) = NULL;
13346 		}
13347 	}
13348 	sp->data = head;
13349 	sp->length = cpsz;
13350 	return (0);
13351 #endif
13352 }
13353 
13354 
13355 
13356 static struct sctp_stream_queue_pending *
13357 sctp_copy_it_in(struct sctp_tcb *stcb,
13358     struct sctp_association *asoc,
13359     struct sctp_sndrcvinfo *srcv,
13360     struct uio *uio,
13361     struct sctp_nets *net,
13362     ssize_t max_send_len,
13363     int user_marks_eor,
13364     int *error)
13365 
13366 {
13367 	/*-
13368 	 * This routine must be very careful in its work. Protocol
13369 	 * processing is up and running so care must be taken to spl...()
13370 	 * when you need to do something that may effect the stcb/asoc. The
13371 	 * sb is locked however. When data is copied the protocol processing
13372 	 * should be enabled since this is a slower operation...
13373 	 */
13374 	struct sctp_stream_queue_pending *sp = NULL;
13375 	int resv_in_first;
13376 
13377 	*error = 0;
13378 	/* Now can we send this? */
13379 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13380 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13381 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13382 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13383 		/* got data while shutting down */
13384 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13385 		*error = ECONNRESET;
13386 		goto out_now;
13387 	}
13388 	sctp_alloc_a_strmoq(stcb, sp);
13389 	if (sp == NULL) {
13390 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13391 		*error = ENOMEM;
13392 		goto out_now;
13393 	}
13394 	sp->act_flags = 0;
13395 	sp->sender_all_done = 0;
13396 	sp->sinfo_flags = srcv->sinfo_flags;
13397 	sp->timetolive = srcv->sinfo_timetolive;
13398 	sp->ppid = srcv->sinfo_ppid;
13399 	sp->context = srcv->sinfo_context;
13400 	sp->fsn = 0;
13401 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
13402 
13403 	sp->sid = srcv->sinfo_stream;
13404 #if defined(__APPLE__)
13405 #if defined(APPLE_LEOPARD)
13406 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13407 #else
13408 	sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
13409 #endif
13410 #else
13411 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13412 #endif
13413 #if defined(__APPLE__)
13414 #if defined(APPLE_LEOPARD)
13415 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13416 #else
13417 	if ((sp->length == (uint32_t)uio_resid(uio)) &&
13418 #endif
13419 #else
13420 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13421 #endif
13422 	    ((user_marks_eor == 0) ||
13423 	     (srcv->sinfo_flags & SCTP_EOF) ||
13424 	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13425 		sp->msg_is_complete = 1;
13426 	} else {
13427 		sp->msg_is_complete = 0;
13428 	}
13429 	sp->sender_all_done = 0;
13430 	sp->some_taken = 0;
13431 	sp->put_last_out = 0;
13432 	resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
13433 	sp->data = sp->tail_mbuf = NULL;
13434 	if (sp->length == 0) {
13435 		goto skip_copy;
13436 	}
13437 	if (srcv->sinfo_keynumber_valid) {
13438 		sp->auth_keyid = srcv->sinfo_keynumber;
13439 	} else {
13440 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
13441 	}
13442 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
13443 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
13444 		sp->holds_key_ref = 1;
13445 	}
13446 #if defined(__APPLE__)
13447 	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13448 #endif
13449 	*error = sctp_copy_one(sp, uio, resv_in_first);
13450 #if defined(__APPLE__)
13451 	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13452 #endif
13453  skip_copy:
13454 	if (*error) {
13455 #if defined(__Userspace__)
13456 		SCTP_TCB_LOCK(stcb);
13457 #endif
13458 		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
13459 #if defined(__Userspace__)
13460 		SCTP_TCB_UNLOCK(stcb);
13461 #endif
13462 		sp = NULL;
13463 	} else {
13464 		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
13465 			sp->net = net;
13466 			atomic_add_int(&sp->net->ref_count, 1);
13467 		} else {
13468 			sp->net = NULL;
13469 		}
13470 		sctp_set_prsctp_policy(sp);
13471 	}
13472 out_now:
13473 	return (sp);
13474 }
13475 
13476 
13477 int
13478 sctp_sosend(struct socket *so,
13479             struct sockaddr *addr,
13480             struct uio *uio,
13481 #ifdef __Panda__
13482             pakhandle_type top,
13483             pakhandle_type icontrol,
13484 #else
13485             struct mbuf *top,
13486             struct mbuf *control,
13487 #endif
13488 #if defined(__APPLE__) || defined(__Panda__)
13489             int flags
13490 #else
13491             int flags,
13492 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13493             struct thread *p
13494 #elif defined(__Windows__)
13495             PKTHREAD p
13496 #else
13497 #if defined(__Userspace__)
13498             /*
13499 	     * proc is a dummy in __Userspace__ and will not be passed
13500 	     * to sctp_lower_sosend
13501 	     */
13502 #endif
13503             struct proc *p
13504 #endif
13505 #endif
13506 )
13507 {
13508 #ifdef __Panda__
13509 	struct mbuf *control = NULL;
13510 #endif
13511 #if defined(__APPLE__)
13512 	struct proc *p = current_proc();
13513 #endif
13514 	int error, use_sndinfo = 0;
13515 	struct sctp_sndrcvinfo sndrcvninfo;
13516 	struct sockaddr *addr_to_use;
13517 #if defined(INET) && defined(INET6)
13518 	struct sockaddr_in sin;
13519 #endif
13520 
13521 #if defined(__APPLE__)
13522 	SCTP_SOCKET_LOCK(so, 1);
13523 #endif
13524 #ifdef __Panda__
13525 	control = SCTP_HEADER_TO_CHAIN(icontrol);
13526 #endif
13527 	if (control) {
13528 		/* process cmsg snd/rcv info (maybe a assoc-id) */
13529 		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13530 		    sizeof(sndrcvninfo))) {
13531 			/* got one */
13532 			use_sndinfo = 1;
13533 		}
13534 	}
13535 	addr_to_use = addr;
13536 #if defined(INET) && defined(INET6)
13537 	if ((addr) && (addr->sa_family == AF_INET6)) {
13538 		struct sockaddr_in6 *sin6;
13539 
13540 		sin6 = (struct sockaddr_in6 *)addr;
13541 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13542 			in6_sin6_2_sin(&sin, sin6);
13543 			addr_to_use = (struct sockaddr *)&sin;
13544 		}
13545 	}
13546 #endif
13547 	error = sctp_lower_sosend(so, addr_to_use, uio, top,
13548 #ifdef __Panda__
13549 				  icontrol,
13550 #else
13551 				  control,
13552 #endif
13553 				  flags,
13554 				  use_sndinfo ? &sndrcvninfo: NULL
13555 #if !(defined(__Panda__) || defined(__Userspace__))
13556 				  , p
13557 #endif
13558 		);
13559 #if defined(__APPLE__)
13560 	SCTP_SOCKET_UNLOCK(so, 1);
13561 #endif
13562 	return (error);
13563 }
13564 
13565 
13566 int
13567 sctp_lower_sosend(struct socket *so,
13568                   struct sockaddr *addr,
13569                   struct uio *uio,
13570 #ifdef __Panda__
13571                   pakhandle_type i_pak,
13572                   pakhandle_type i_control,
13573 #else
13574                   struct mbuf *i_pak,
13575                   struct mbuf *control,
13576 #endif
13577                   int flags,
13578                   struct sctp_sndrcvinfo *srcv
13579 #if !(defined( __Panda__) || defined(__Userspace__))
13580                   ,
13581 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13582                   struct thread *p
13583 #elif defined(__Windows__)
13584                   PKTHREAD p
13585 #else
13586                   struct proc *p
13587 #endif
13588 #endif
13589 	)
13590 {
13591 #if defined(__FreeBSD__)
13592 	struct epoch_tracker et;
13593 #endif
13594 	ssize_t sndlen = 0, max_len, local_add_more;
13595 	int error, len;
13596 	struct mbuf *top = NULL;
13597 #ifdef __Panda__
13598 	struct mbuf *control = NULL;
13599 #endif
13600 	int queue_only = 0, queue_only_for_init = 0;
13601 	int free_cnt_applied = 0;
13602 	int un_sent;
13603 	int now_filled = 0;
13604 	unsigned int inqueue_bytes = 0;
13605 	struct sctp_block_entry be;
13606 	struct sctp_inpcb *inp;
13607 	struct sctp_tcb *stcb = NULL;
13608 	struct timeval now;
13609 	struct sctp_nets *net;
13610 	struct sctp_association *asoc;
13611 	struct sctp_inpcb *t_inp;
13612 	int user_marks_eor;
13613 	int create_lock_applied = 0;
13614 	int nagle_applies = 0;
13615 	int some_on_control = 0;
13616 	int got_all_of_the_send = 0;
13617 	int hold_tcblock = 0;
13618 	int non_blocking = 0;
13619 	ssize_t local_soresv = 0;
13620 	uint16_t port;
13621 	uint16_t sinfo_flags;
13622 	sctp_assoc_t sinfo_assoc_id;
13623 
13624 	error = 0;
13625 	net = NULL;
13626 	stcb = NULL;
13627 	asoc = NULL;
13628 
13629 #if defined(__APPLE__)
13630 	sctp_lock_assert(so);
13631 #endif
13632 	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13633 	if (inp == NULL) {
13634 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13635 		error = EINVAL;
13636 		if (i_pak) {
13637 			SCTP_RELEASE_PKT(i_pak);
13638 		}
13639 		return (error);
13640 	}
13641 	if ((uio == NULL) && (i_pak == NULL)) {
13642 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13643 		return (EINVAL);
13644 	}
13645 	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13646 	atomic_add_int(&inp->total_sends, 1);
13647 	if (uio) {
13648 #if defined(__APPLE__)
13649 #if defined(APPLE_LEOPARD)
13650 		if (uio->uio_resid < 0) {
13651 #else
13652 		if (uio_resid(uio) < 0) {
13653 #endif
13654 #else
13655 		if (uio->uio_resid < 0) {
13656 #endif
13657 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13658 			return (EINVAL);
13659 		}
13660 #if defined(__APPLE__)
13661 #if defined(APPLE_LEOPARD)
13662 		sndlen = uio->uio_resid;
13663 #else
13664 		sndlen = uio_resid(uio);
13665 #endif
13666 #else
13667 		sndlen = uio->uio_resid;
13668 #endif
13669 	} else {
13670 		top = SCTP_HEADER_TO_CHAIN(i_pak);
13671 #ifdef __Panda__
13672 		/*-
13673 		 * app len indicates the datalen, dgsize for cases
13674 		 * of SCTP_EOF/ABORT will not have the right len
13675 		 */
13676 		sndlen = SCTP_APP_DATA_LEN(i_pak);
13677 		/*-
13678 		 * Set the particle len also to zero to match
13679 		 * up with app len. We only have one particle
13680 		 * if app len is zero for Panda. This is ensured
13681 		 * in the socket lib
13682 		 */
13683 		if (sndlen == 0) {
13684 			SCTP_BUF_LEN(top)  = 0;
13685 		}
13686 		/*-
13687 		 * We delink the chain from header, but keep
13688 		 * the header around as we will need it in
13689 		 * EAGAIN case
13690 		 */
13691 		SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
13692 #else
13693 		sndlen = SCTP_HEADER_LEN(i_pak);
13694 #endif
13695 	}
13696 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
13697 	        (void *)addr,
13698 	        sndlen);
13699 #ifdef __Panda__
13700 	if (i_control) {
13701 		control = SCTP_HEADER_TO_CHAIN(i_control);
13702 	}
13703 #endif
13704 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13705 	    SCTP_IS_LISTENING(inp)) {
13706 		/* The listener can NOT send */
13707 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13708 		error = ENOTCONN;
13709 		goto out_unlocked;
13710 	}
13711 	/**
13712 	 * Pre-screen address, if one is given the sin-len
13713 	 * must be set correctly!
13714 	 */
13715 	if (addr) {
13716 		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13717 		switch (raddr->sa.sa_family) {
13718 #ifdef INET
13719 		case AF_INET:
13720 #ifdef HAVE_SIN_LEN
13721 			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13722 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13723 				error = EINVAL;
13724 				goto out_unlocked;
13725 			}
13726 #endif
13727 			port = raddr->sin.sin_port;
13728 			break;
13729 #endif
13730 #ifdef INET6
13731 		case AF_INET6:
13732 #ifdef HAVE_SIN6_LEN
13733 			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13734 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13735 				error = EINVAL;
13736 				goto out_unlocked;
13737 			}
13738 #endif
13739 			port = raddr->sin6.sin6_port;
13740 			break;
13741 #endif
13742 #if defined(__Userspace__)
13743 		case AF_CONN:
13744 #ifdef HAVE_SCONN_LEN
13745 			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13746 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13747 				error = EINVAL;
13748 				goto out_unlocked;
13749 			}
13750 #endif
13751 			port = raddr->sconn.sconn_port;
13752 			break;
13753 #endif
13754 		default:
13755 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13756 			error = EAFNOSUPPORT;
13757 			goto out_unlocked;
13758 		}
13759 	} else
13760 		port = 0;
13761 
13762 	if (srcv) {
13763 		sinfo_flags = srcv->sinfo_flags;
13764 		sinfo_assoc_id = srcv->sinfo_assoc_id;
13765 		if (INVALID_SINFO_FLAG(sinfo_flags) ||
13766 		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13767 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13768 			error = EINVAL;
13769 			goto out_unlocked;
13770 		}
13771 		if (srcv->sinfo_flags)
13772 			SCTP_STAT_INCR(sctps_sends_with_flags);
13773 	} else {
13774 		sinfo_flags = inp->def_send.sinfo_flags;
13775 		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13776 	}
13777 #if defined(__FreeBSD__)
13778 	if (flags & MSG_EOR) {
13779 		sinfo_flags |= SCTP_EOR;
13780 	}
13781 	if (flags & MSG_EOF) {
13782 		sinfo_flags |= SCTP_EOF;
13783 	}
13784 #endif
13785 	if (sinfo_flags & SCTP_SENDALL) {
13786 		/* its a sendall */
13787 		error = sctp_sendall(inp, uio, top, srcv);
13788 		top = NULL;
13789 		goto out_unlocked;
13790 	}
13791 	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13792 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13793 		error = EINVAL;
13794 		goto out_unlocked;
13795 	}
13796 	/* now we must find the assoc */
13797 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13798 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13799 		SCTP_INP_RLOCK(inp);
13800 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
13801 		if (stcb) {
13802 			SCTP_TCB_LOCK(stcb);
13803 			hold_tcblock = 1;
13804 		}
13805 		SCTP_INP_RUNLOCK(inp);
13806 	} else if (sinfo_assoc_id) {
13807 		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
13808 		if (stcb != NULL) {
13809 			hold_tcblock = 1;
13810 		}
13811 	} else if (addr) {
13812 		/*-
13813 		 * Since we did not use findep we must
13814 		 * increment it, and if we don't find a tcb
13815 		 * decrement it.
13816 		 */
13817 		SCTP_INP_WLOCK(inp);
13818 		SCTP_INP_INCR_REF(inp);
13819 		SCTP_INP_WUNLOCK(inp);
13820 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13821 		if (stcb == NULL) {
13822 			SCTP_INP_WLOCK(inp);
13823 			SCTP_INP_DECR_REF(inp);
13824 			SCTP_INP_WUNLOCK(inp);
13825 		} else {
13826 			hold_tcblock = 1;
13827 		}
13828 	}
13829 	if ((stcb == NULL) && (addr)) {
13830 		/* Possible implicit send? */
13831 		SCTP_ASOC_CREATE_LOCK(inp);
13832 		create_lock_applied = 1;
13833 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13834 		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13835 			/* Should I really unlock ? */
13836 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13837 			error = EINVAL;
13838 			goto out_unlocked;
13839 
13840 		}
13841 		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13842 		    (addr->sa_family == AF_INET6)) {
13843 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13844 			error = EINVAL;
13845 			goto out_unlocked;
13846 		}
13847 		SCTP_INP_WLOCK(inp);
13848 		SCTP_INP_INCR_REF(inp);
13849 		SCTP_INP_WUNLOCK(inp);
13850 		/* With the lock applied look again */
13851 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13852 #if defined(INET) || defined(INET6)
13853 		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13854 			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13855 		}
13856 #endif
13857 		if (stcb == NULL) {
13858 			SCTP_INP_WLOCK(inp);
13859 			SCTP_INP_DECR_REF(inp);
13860 			SCTP_INP_WUNLOCK(inp);
13861 		} else {
13862 			hold_tcblock = 1;
13863 		}
13864 		if (error) {
13865 			goto out_unlocked;
13866 		}
13867 		if (t_inp != inp) {
13868 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13869 			error = ENOTCONN;
13870 			goto out_unlocked;
13871 		}
13872 	}
13873 	if (stcb == NULL) {
13874 		if (addr == NULL) {
13875 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13876 			error = ENOENT;
13877 			goto out_unlocked;
13878 		} else {
13879 			/* We must go ahead and start the INIT process */
13880 			uint32_t vrf_id;
13881 
13882 			if ((sinfo_flags & SCTP_ABORT) ||
13883 			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13884 				/*-
13885 				 * User asks to abort a non-existant assoc,
13886 				 * or EOF a non-existant assoc with no data
13887 				 */
13888 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13889 				error = ENOENT;
13890 				goto out_unlocked;
13891 			}
13892 			/* get an asoc/stcb struct */
13893 			vrf_id = inp->def_vrf_id;
13894 #ifdef INVARIANTS
13895 			if (create_lock_applied == 0) {
13896 				panic("Error, should hold create lock and I don't?");
13897 			}
13898 #endif
13899 			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13900 			                       inp->sctp_ep.pre_open_stream_count,
13901 			                       inp->sctp_ep.port,
13902 #if !(defined( __Panda__) || defined(__Userspace__))
13903 			                       p,
13904 #else
13905 			                       (struct proc *)NULL,
13906 #endif
13907 			                       SCTP_INITIALIZE_AUTH_PARAMS);
13908 			if (stcb == NULL) {
13909 				/* Error is setup for us in the call */
13910 				goto out_unlocked;
13911 			}
13912 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13913 				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13914 				/* Set the connected flag so we can queue data */
13915 				soisconnecting(so);
13916 			}
13917 			hold_tcblock = 1;
13918 			if (create_lock_applied) {
13919 				SCTP_ASOC_CREATE_UNLOCK(inp);
13920 				create_lock_applied = 0;
13921 			} else {
13922 				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13923 			}
13924 			/* Turn on queue only flag to prevent data from being sent */
13925 			queue_only = 1;
13926 			asoc = &stcb->asoc;
13927 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13928 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13929 
13930 			if (control) {
13931 				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13932 					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
13933 					                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
13934 					hold_tcblock = 0;
13935 					stcb = NULL;
13936 					goto out_unlocked;
13937 				}
13938 			}
13939 			/* out with the INIT */
13940 			queue_only_for_init = 1;
13941 			/*-
13942 			 * we may want to dig in after this call and adjust the MTU
13943 			 * value. It defaulted to 1500 (constant) but the ro
13944 			 * structure may now have an update and thus we may need to
13945 			 * change it BEFORE we append the message.
13946 			 */
13947 		}
13948 	} else
13949 		asoc = &stcb->asoc;
13950 	if (srcv == NULL) {
13951 		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13952 		sinfo_flags = srcv->sinfo_flags;
13953 #if defined(__FreeBSD__)
13954 		if (flags & MSG_EOR) {
13955 			sinfo_flags |= SCTP_EOR;
13956 		}
13957 		if (flags & MSG_EOF) {
13958 			sinfo_flags |= SCTP_EOF;
13959 		}
13960 #endif
13961 	}
13962 	if (sinfo_flags & SCTP_ADDR_OVER) {
13963 		if (addr)
13964 			net = sctp_findnet(stcb, addr);
13965 		else
13966 			net = NULL;
13967 		if ((net == NULL) ||
13968 		    ((port != 0) && (port != stcb->rport))) {
13969 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13970 			error = EINVAL;
13971 			goto out_unlocked;
13972 		}
13973 	} else {
13974 		if (stcb->asoc.alternate) {
13975 			net = stcb->asoc.alternate;
13976 		} else {
13977 			net = stcb->asoc.primary_destination;
13978 		}
13979 	}
13980 	atomic_add_int(&stcb->total_sends, 1);
13981 	/* Keep the stcb from being freed under our feet */
13982 	atomic_add_int(&asoc->refcnt, 1);
13983 	free_cnt_applied = 1;
13984 
13985 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13986 		if (sndlen > (ssize_t)asoc->smallest_mtu) {
13987 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13988 			error = EMSGSIZE;
13989 			goto out_unlocked;
13990 		}
13991 	}
13992 #if defined(__Userspace__)
13993 	if (inp->recv_callback) {
13994 		non_blocking = 1;
13995 	}
13996 #endif
13997 	if (SCTP_SO_IS_NBIO(so)
13998 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13999 	     || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
14000 #endif
14001 	    ) {
14002 		non_blocking = 1;
14003 	}
14004 	/* would we block? */
14005 	if (non_blocking) {
14006 		ssize_t amount;
14007 
14008 		if (hold_tcblock == 0) {
14009 			SCTP_TCB_LOCK(stcb);
14010 			hold_tcblock = 1;
14011 		}
14012 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14013 		if (user_marks_eor == 0) {
14014 			amount = sndlen;
14015 		} else {
14016 			amount = 1;
14017 		}
14018 		if ((SCTP_SB_LIMIT_SND(so) <  (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
14019 		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14020 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
14021 			if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
14022 				error = EMSGSIZE;
14023 			else
14024 				error = EWOULDBLOCK;
14025 			goto out_unlocked;
14026 		}
14027 		stcb->asoc.sb_send_resv += (uint32_t)sndlen;
14028 		SCTP_TCB_UNLOCK(stcb);
14029 		hold_tcblock = 0;
14030 	} else {
14031 		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
14032 	}
14033 	local_soresv = sndlen;
14034 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14035 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14036 		error = ECONNRESET;
14037 		goto out_unlocked;
14038 	}
14039 	if (create_lock_applied) {
14040 		SCTP_ASOC_CREATE_UNLOCK(inp);
14041 		create_lock_applied = 0;
14042 	}
14043 	/* Is the stream no. valid? */
14044 	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
14045 		/* Invalid stream number */
14046 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14047 		error = EINVAL;
14048 		goto out_unlocked;
14049 	}
14050 	if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
14051 	    (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
14052 		/*
14053 		 * Can't queue any data while stream reset is underway.
14054 		 */
14055 		if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
14056 			error = EAGAIN;
14057 		} else {
14058 			error = EINVAL;
14059 		}
14060 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
14061 		goto out_unlocked;
14062 	}
14063 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
14064 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
14065 		queue_only = 1;
14066 	}
14067 	/* we are now done with all control */
14068 	if (control) {
14069 		sctp_m_freem(control);
14070 		control = NULL;
14071 	}
14072 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
14073 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
14074 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
14075 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
14076 		if (sinfo_flags & SCTP_ABORT) {
14077 			;
14078 		} else {
14079 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14080 			error = ECONNRESET;
14081 			goto out_unlocked;
14082 		}
14083 	}
14084 	/* Ok, we will attempt a msgsnd :> */
14085 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
14086 	if (p) {
14087 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000
14088 		p->td_ru.ru_msgsnd++;
14089 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
14090 		p->td_proc->p_stats->p_ru.ru_msgsnd++;
14091 #else
14092 		p->p_stats->p_ru.ru_msgsnd++;
14093 #endif
14094 	}
14095 #endif
14096 	/* Are we aborting? */
14097 	if (sinfo_flags & SCTP_ABORT) {
14098 		struct mbuf *mm;
14099 		ssize_t tot_demand, tot_out = 0, max_out;
14100 
14101 		SCTP_STAT_INCR(sctps_sends_with_abort);
14102 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
14103 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
14104 			/* It has to be up before we abort */
14105 			/* how big is the user initiated abort? */
14106 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14107 			error = EINVAL;
14108 			goto out;
14109 		}
14110 		if (hold_tcblock) {
14111 			SCTP_TCB_UNLOCK(stcb);
14112 			hold_tcblock = 0;
14113 		}
14114 		if (top) {
14115 			struct mbuf *cntm = NULL;
14116 
14117 			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
14118 			if (sndlen != 0) {
14119 				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
14120 					tot_out += SCTP_BUF_LEN(cntm);
14121 				}
14122 			}
14123 		} else {
14124 			/* Must fit in a MTU */
14125 			tot_out = sndlen;
14126 			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
14127 			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
14128 				/* To big */
14129 				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14130 				error = EMSGSIZE;
14131 				goto out;
14132 			}
14133 			mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
14134 		}
14135 		if (mm == NULL) {
14136 			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
14137 			error = ENOMEM;
14138 			goto out;
14139 		}
14140 		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
14141 		max_out -= sizeof(struct sctp_abort_msg);
14142 		if (tot_out > max_out) {
14143 			tot_out = max_out;
14144 		}
14145 		if (mm) {
14146 			struct sctp_paramhdr *ph;
14147 
14148 			/* now move forward the data pointer */
14149 			ph = mtod(mm, struct sctp_paramhdr *);
14150 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
14151 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
14152 			ph++;
14153 			SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
14154 			if (top == NULL) {
14155 #if defined(__APPLE__)
14156 				SCTP_SOCKET_UNLOCK(so, 0);
14157 #endif
14158 				error = uiomove((caddr_t)ph, (int)tot_out, uio);
14159 #if defined(__APPLE__)
14160 				SCTP_SOCKET_LOCK(so, 0);
14161 #endif
14162 				if (error) {
14163 					/*-
14164 					 * Here if we can't get his data we
14165 					 * still abort we just don't get to
14166 					 * send the users note :-0
14167 					 */
14168 					sctp_m_freem(mm);
14169 					mm = NULL;
14170 				}
14171 			} else {
14172 				if (sndlen != 0) {
14173 					SCTP_BUF_NEXT(mm) = top;
14174 				}
14175 			}
14176 		}
14177 		if (hold_tcblock == 0) {
14178 			SCTP_TCB_LOCK(stcb);
14179 		}
14180 		atomic_add_int(&stcb->asoc.refcnt, -1);
14181 		free_cnt_applied = 0;
14182 		/* release this lock, otherwise we hang on ourselves */
14183 #if defined(__FreeBSD__)
14184 		NET_EPOCH_ENTER(et);
14185 #endif
14186 		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
14187 #if defined(__FreeBSD__)
14188 		NET_EPOCH_EXIT(et);
14189 #endif
14190 		/* now relock the stcb so everything is sane */
14191 		hold_tcblock = 0;
14192 		stcb = NULL;
14193 		/* In this case top is already chained to mm
14194 		 * avoid double free, since we free it below if
14195 		 * top != NULL and driver would free it after sending
14196 		 * the packet out
14197 		 */
14198 		if (sndlen != 0) {
14199 			top = NULL;
14200 		}
14201 		goto out_unlocked;
14202 	}
14203 	/* Calculate the maximum we can send */
14204 	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14205 	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14206 		max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14207 	} else {
14208 		max_len = 0;
14209 	}
14210 	if (hold_tcblock) {
14211 		SCTP_TCB_UNLOCK(stcb);
14212 		hold_tcblock = 0;
14213 	}
14214 	if (asoc->strmout == NULL) {
14215 		/* huh? software error */
14216 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
14217 		error = EFAULT;
14218 		goto out_unlocked;
14219 	}
14220 
14221 	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
14222 	if ((user_marks_eor == 0) &&
14223 	    (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
14224 		/* It will NEVER fit */
14225 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14226 		error = EMSGSIZE;
14227 		goto out_unlocked;
14228 	}
14229 	if ((uio == NULL) && user_marks_eor) {
14230 		/*-
14231 		 * We do not support eeor mode for
14232 		 * sending with mbuf chains (like sendfile).
14233 		 */
14234 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14235 		error = EINVAL;
14236 		goto out_unlocked;
14237 	}
14238 
14239 	if (user_marks_eor) {
14240 		local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
14241 	} else {
14242 		/*-
14243 		 * For non-eeor the whole message must fit in
14244 		 * the socket send buffer.
14245 		 */
14246 		local_add_more = sndlen;
14247 	}
14248 	len = 0;
14249 	if (non_blocking) {
14250 		goto skip_preblock;
14251 	}
14252 	if (((max_len <= local_add_more) &&
14253 	     ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
14254 	    (max_len == 0) ||
14255 	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14256 		/* No room right now ! */
14257 		SOCKBUF_LOCK(&so->so_snd);
14258 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14259 		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
14260 		       ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14261 			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
14262 			        (unsigned int)SCTP_SB_LIMIT_SND(so),
14263 			        inqueue_bytes,
14264 			        local_add_more,
14265 			        stcb->asoc.stream_queue_cnt,
14266 			        stcb->asoc.chunks_on_out_queue,
14267 			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
14268 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14269 				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
14270 			}
14271 			be.error = 0;
14272 #if !defined(__Panda__) && !defined(__Windows__)
14273 			stcb->block_entry = &be;
14274 #endif
14275 			error = sbwait(&so->so_snd);
14276 			stcb->block_entry = NULL;
14277 			if (error || so->so_error || be.error) {
14278 				if (error == 0) {
14279 					if (so->so_error)
14280 						error = so->so_error;
14281 					if (be.error) {
14282 						error = be.error;
14283 					}
14284 				}
14285 				SOCKBUF_UNLOCK(&so->so_snd);
14286 				goto out_unlocked;
14287 			}
14288 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14289 				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14290 				               asoc, stcb->asoc.total_output_queue_size);
14291 			}
14292 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14293 				SOCKBUF_UNLOCK(&so->so_snd);
14294 				goto out_unlocked;
14295 			}
14296 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14297 		}
14298 		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14299 			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
14300 		} else {
14301 			max_len = 0;
14302 		}
14303 		SOCKBUF_UNLOCK(&so->so_snd);
14304 	}
14305 
14306 skip_preblock:
14307 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14308 		goto out_unlocked;
14309 	}
14310 #if defined(__APPLE__)
14311 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14312 #endif
14313 	/* sndlen covers for mbuf case
14314 	 * uio_resid covers for the non-mbuf case
14315 	 * NOTE: uio will be null when top/mbuf is passed
14316 	 */
14317 	if (sndlen == 0) {
14318 		if (sinfo_flags & SCTP_EOF) {
14319 			got_all_of_the_send = 1;
14320 			goto dataless_eof;
14321 		} else {
14322 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14323 			error = EINVAL;
14324 			goto out;
14325 		}
14326 	}
14327 	if (top == NULL) {
14328 		struct sctp_stream_queue_pending *sp;
14329 		struct sctp_stream_out *strm;
14330 		uint32_t sndout;
14331 
14332 		SCTP_TCB_SEND_LOCK(stcb);
14333 		if ((asoc->stream_locked) &&
14334 		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
14335 			SCTP_TCB_SEND_UNLOCK(stcb);
14336 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14337 			error = EINVAL;
14338 			goto out;
14339 		}
14340 		SCTP_TCB_SEND_UNLOCK(stcb);
14341 
14342 		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
14343 		if (strm->last_msg_incomplete == 0) {
14344 		do_a_copy_in:
14345 			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
14346 			if (error) {
14347 				goto out;
14348 			}
14349 			SCTP_TCB_SEND_LOCK(stcb);
14350 			if (sp->msg_is_complete) {
14351 				strm->last_msg_incomplete = 0;
14352 				asoc->stream_locked = 0;
14353 			} else {
14354 				/* Just got locked to this guy in
14355 				 * case of an interrupt.
14356 				 */
14357 				strm->last_msg_incomplete = 1;
14358 				if (stcb->asoc.idata_supported == 0) {
14359 					asoc->stream_locked = 1;
14360 					asoc->stream_locked_on  = srcv->sinfo_stream;
14361 				}
14362 				sp->sender_all_done = 0;
14363 			}
14364 			sctp_snd_sb_alloc(stcb, sp->length);
14365 			atomic_add_int(&asoc->stream_queue_cnt, 1);
14366 			if (sinfo_flags & SCTP_UNORDERED) {
14367 				SCTP_STAT_INCR(sctps_sends_with_unord);
14368 			}
14369 			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
14370 			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
14371 			SCTP_TCB_SEND_UNLOCK(stcb);
14372 		} else {
14373 			SCTP_TCB_SEND_LOCK(stcb);
14374 			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
14375 			SCTP_TCB_SEND_UNLOCK(stcb);
14376 			if (sp == NULL) {
14377 				/* ???? Huh ??? last msg is gone */
14378 #ifdef INVARIANTS
14379 				panic("Warning: Last msg marked incomplete, yet nothing left?");
14380 #else
14381 				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
14382 				strm->last_msg_incomplete = 0;
14383 #endif
14384 				goto do_a_copy_in;
14385 
14386 			}
14387 		}
14388 #if defined(__APPLE__)
14389 #if defined(APPLE_LEOPARD)
14390 		while (uio->uio_resid > 0) {
14391 #else
14392 		while (uio_resid(uio) > 0) {
14393 #endif
14394 #else
14395 		while (uio->uio_resid > 0) {
14396 #endif
14397 			/* How much room do we have? */
14398 			struct mbuf *new_tail, *mm;
14399 
14400 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14401 			if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14402 				max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14403 			else
14404 				max_len = 0;
14405 
14406 			if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
14407 			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
14408 #if defined(__APPLE__)
14409 #if defined(APPLE_LEOPARD)
14410 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14411 #else
14412 			    (uio_resid(uio) && (uio_resid(uio) <= max_len))) {
14413 #endif
14414 #else
14415 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14416 #endif
14417 				sndout = 0;
14418 				new_tail = NULL;
14419 				if (hold_tcblock) {
14420 					SCTP_TCB_UNLOCK(stcb);
14421 					hold_tcblock = 0;
14422 				}
14423 #if defined(__APPLE__)
14424 				SCTP_SOCKET_UNLOCK(so, 0);
14425 #endif
14426 #if (defined(__FreeBSD__) && __FreeBSD_version > 602000) || defined(__Userspace__)
14427 				mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
14428 #else
14429 				mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail);
14430 #endif
14431 #if defined(__APPLE__)
14432 				SCTP_SOCKET_LOCK(so, 0);
14433 #endif
14434 				if ((mm == NULL) || error) {
14435 					if (mm) {
14436 						sctp_m_freem(mm);
14437 					}
14438 					goto out;
14439 				}
14440 				/* Update the mbuf and count */
14441 				SCTP_TCB_SEND_LOCK(stcb);
14442 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14443 					/* we need to get out.
14444 					 * Peer probably aborted.
14445 					 */
14446 					sctp_m_freem(mm);
14447 					if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
14448 						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14449 						error = ECONNRESET;
14450 					}
14451 					SCTP_TCB_SEND_UNLOCK(stcb);
14452 					goto out;
14453 				}
14454 				if (sp->tail_mbuf) {
14455 					/* tack it to the end */
14456 					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
14457 					sp->tail_mbuf = new_tail;
14458 				} else {
14459 					/* A stolen mbuf */
14460 					sp->data = mm;
14461 					sp->tail_mbuf = new_tail;
14462 				}
14463 				sctp_snd_sb_alloc(stcb, sndout);
14464 				atomic_add_int(&sp->length, sndout);
14465 				len += sndout;
14466 				if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
14467 					sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
14468 				}
14469 
14470 				/* Did we reach EOR? */
14471 #if defined(__APPLE__)
14472 #if defined(APPLE_LEOPARD)
14473 				if ((uio->uio_resid == 0) &&
14474 #else
14475 				if ((uio_resid(uio) == 0) &&
14476 #endif
14477 #else
14478 				if ((uio->uio_resid == 0) &&
14479 #endif
14480 				    ((user_marks_eor == 0) ||
14481 				     (sinfo_flags & SCTP_EOF) ||
14482 				     (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
14483 					sp->msg_is_complete = 1;
14484 				} else {
14485 					sp->msg_is_complete = 0;
14486 				}
14487 				SCTP_TCB_SEND_UNLOCK(stcb);
14488 			}
14489 #if defined(__APPLE__)
14490 #if defined(APPLE_LEOPARD)
14491 			if (uio->uio_resid == 0) {
14492 #else
14493 			if (uio_resid(uio) == 0) {
14494 #endif
14495 #else
14496 			if (uio->uio_resid == 0) {
14497 #endif
14498 				/* got it all? */
14499 				continue;
14500 			}
14501 			/* PR-SCTP? */
14502 			if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
14503 				/* This is ugly but we must assure locking order */
14504 				if (hold_tcblock == 0) {
14505 					SCTP_TCB_LOCK(stcb);
14506 					hold_tcblock = 1;
14507 				}
14508 				sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
14509 				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14510 				if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14511 					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14512 				else
14513 					max_len = 0;
14514 				if (max_len > 0) {
14515 					continue;
14516 				}
14517 				SCTP_TCB_UNLOCK(stcb);
14518 				hold_tcblock = 0;
14519 			}
14520 			/* wait for space now */
14521 			if (non_blocking) {
14522 				/* Non-blocking io in place out */
14523 				goto skip_out_eof;
14524 			}
14525 			/* What about the INIT, send it maybe */
14526 			if (queue_only_for_init) {
14527 				if (hold_tcblock == 0) {
14528 					SCTP_TCB_LOCK(stcb);
14529 					hold_tcblock = 1;
14530 				}
14531 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14532 					/* a collision took us forward? */
14533 					queue_only = 0;
14534 				} else {
14535 #if defined(__FreeBSD__)
14536 					NET_EPOCH_ENTER(et);
14537 #endif
14538 					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14539 #if defined(__FreeBSD__)
14540 					NET_EPOCH_EXIT(et);
14541 #endif
14542 					SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14543 					queue_only = 1;
14544 				}
14545 			}
14546 			if ((net->flight_size > net->cwnd) &&
14547 			    (asoc->sctp_cmt_on_off == 0)) {
14548 				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14549 				queue_only = 1;
14550 			} else if (asoc->ifp_had_enobuf) {
14551 				SCTP_STAT_INCR(sctps_ifnomemqueued);
14552 				if (net->flight_size > (2 * net->mtu)) {
14553 					queue_only = 1;
14554 				}
14555 				asoc->ifp_had_enobuf = 0;
14556 			}
14557 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14558 			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14559 			    (stcb->asoc.total_flight > 0) &&
14560 			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14561 			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14562 
14563 				/*-
14564 				 * Ok, Nagle is set on and we have data outstanding.
14565 				 * Don't send anything and let SACKs drive out the
14566 				 * data unless we have a "full" segment to send.
14567 				 */
14568 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14569 					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14570 				}
14571 				SCTP_STAT_INCR(sctps_naglequeued);
14572 				nagle_applies = 1;
14573 			} else {
14574 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14575 					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14576 						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14577 				}
14578 				SCTP_STAT_INCR(sctps_naglesent);
14579 				nagle_applies = 0;
14580 			}
14581 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14582 
14583 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14584 					       nagle_applies, un_sent);
14585 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14586 					       stcb->asoc.total_flight,
14587 					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14588 			}
14589 			if (queue_only_for_init)
14590 				queue_only_for_init = 0;
14591 			if ((queue_only == 0) && (nagle_applies == 0)) {
14592 				/*-
14593 				 * need to start chunk output
14594 				 * before blocking.. note that if
14595 				 * a lock is already applied, then
14596 				 * the input via the net is happening
14597 				 * and I don't need to start output :-D
14598 				 */
14599 #if defined(__FreeBSD__)
14600 				NET_EPOCH_ENTER(et);
14601 #endif
14602 				if (hold_tcblock == 0) {
14603 					if (SCTP_TCB_TRYLOCK(stcb)) {
14604 						hold_tcblock = 1;
14605 						sctp_chunk_output(inp,
14606 								  stcb,
14607 								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14608 					}
14609 				} else {
14610 					sctp_chunk_output(inp,
14611 							  stcb,
14612 							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14613 				}
14614 #if defined(__FreeBSD__)
14615 				NET_EPOCH_EXIT(et);
14616 #endif
14617 			}
14618 			if (hold_tcblock == 1) {
14619 				SCTP_TCB_UNLOCK(stcb);
14620 				hold_tcblock = 0;
14621 			}
14622 			SOCKBUF_LOCK(&so->so_snd);
14623 			/*-
14624 			 * This is a bit strange, but I think it will
14625 			 * work. The total_output_queue_size is locked and
14626 			 * protected by the TCB_LOCK, which we just released.
14627 			 * There is a race that can occur between releasing it
14628 			 * above, and me getting the socket lock, where sacks
14629 			 * come in but we have not put the SB_WAIT on the
14630 			 * so_snd buffer to get the wakeup. After the LOCK
14631 			 * is applied the sack_processing will also need to
14632 			 * LOCK the so->so_snd to do the actual sowwakeup(). So
14633 			 * once we have the socket buffer lock if we recheck the
14634 			 * size we KNOW we will get to sleep safely with the
14635 			 * wakeup flag in place.
14636 			 */
14637 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14638 			if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
14639 						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14640 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14641 #if defined(__APPLE__)
14642 #if defined(APPLE_LEOPARD)
14643 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14644 						       asoc, uio->uio_resid);
14645 #else
14646 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14647 						       asoc, uio_resid(uio));
14648 #endif
14649 #else
14650 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14651 						       asoc, uio->uio_resid);
14652 #endif
14653 				}
14654 				be.error = 0;
14655 #if !defined(__Panda__) && !defined(__Windows__)
14656 				stcb->block_entry = &be;
14657 #endif
14658 #if defined(__APPLE__)
14659 				sbunlock(&so->so_snd, 1);
14660 #endif
14661 				error = sbwait(&so->so_snd);
14662 				stcb->block_entry = NULL;
14663 
14664 				if (error || so->so_error || be.error) {
14665 					if (error == 0) {
14666 						if (so->so_error)
14667 							error = so->so_error;
14668 						if (be.error) {
14669 							error = be.error;
14670 						}
14671 					}
14672 					SOCKBUF_UNLOCK(&so->so_snd);
14673 					goto out_unlocked;
14674 				}
14675 
14676 #if defined(__APPLE__)
14677 				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14678 #endif
14679 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14680 					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14681 						       asoc, stcb->asoc.total_output_queue_size);
14682 				}
14683 			}
14684 			SOCKBUF_UNLOCK(&so->so_snd);
14685 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14686 				goto out_unlocked;
14687 			}
14688 		}
14689 		SCTP_TCB_SEND_LOCK(stcb);
14690 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14691 			SCTP_TCB_SEND_UNLOCK(stcb);
14692 			goto out_unlocked;
14693 		}
14694 		if (sp) {
14695 			if (sp->msg_is_complete == 0) {
14696 				strm->last_msg_incomplete = 1;
14697 				if (stcb->asoc.idata_supported == 0) {
14698 					asoc->stream_locked = 1;
14699 					asoc->stream_locked_on  = srcv->sinfo_stream;
14700 				}
14701 			} else {
14702 				sp->sender_all_done = 1;
14703 				strm->last_msg_incomplete = 0;
14704 				asoc->stream_locked = 0;
14705 			}
14706 		} else {
14707 			SCTP_PRINTF("Huh no sp TSNH?\n");
14708 			strm->last_msg_incomplete = 0;
14709 			asoc->stream_locked = 0;
14710 		}
14711 		SCTP_TCB_SEND_UNLOCK(stcb);
14712 #if defined(__APPLE__)
14713 #if defined(APPLE_LEOPARD)
14714 		if (uio->uio_resid == 0) {
14715 #else
14716 		if (uio_resid(uio) == 0) {
14717 #endif
14718 #else
14719 		if (uio->uio_resid == 0) {
14720 #endif
14721 			got_all_of_the_send = 1;
14722 		}
14723 	} else {
14724 		/* We send in a 0, since we do NOT have any locks */
14725 		error = sctp_msg_append(stcb, net, top, srcv, 0);
14726 		top = NULL;
14727 		if (sinfo_flags & SCTP_EOF) {
14728 			/*
14729 			 * This should only happen for Panda for the mbuf
14730 			 * send case, which does NOT yet support EEOR mode.
14731 			 * Thus, we can just set this flag to do the proper
14732 			 * EOF handling.
14733 			 */
14734 			got_all_of_the_send = 1;
14735 		}
14736 	}
14737 	if (error) {
14738 		goto out;
14739 	}
14740 dataless_eof:
14741 	/* EOF thing ? */
14742 	if ((sinfo_flags & SCTP_EOF) &&
14743 	    (got_all_of_the_send == 1)) {
14744 		SCTP_STAT_INCR(sctps_sends_with_eof);
14745 		error = 0;
14746 		if (hold_tcblock == 0) {
14747 			SCTP_TCB_LOCK(stcb);
14748 			hold_tcblock = 1;
14749 		}
14750 		if (TAILQ_EMPTY(&asoc->send_queue) &&
14751 		    TAILQ_EMPTY(&asoc->sent_queue) &&
14752 		    sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
14753 			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14754 				goto abort_anyway;
14755 			}
14756 			/* there is nothing queued to send, so I'm done... */
14757 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14758 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14759 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14760 				struct sctp_nets *netp;
14761 
14762 				/* only send SHUTDOWN the first time through */
14763 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14764 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14765 				}
14766 				SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
14767 				sctp_stop_timers_for_shutdown(stcb);
14768 				if (stcb->asoc.alternate) {
14769 					netp = stcb->asoc.alternate;
14770 				} else {
14771 					netp = stcb->asoc.primary_destination;
14772 				}
14773 				sctp_send_shutdown(stcb, netp);
14774 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14775 				                 netp);
14776 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14777 				                 NULL);
14778 			}
14779 		} else {
14780 			/*-
14781 			 * we still got (or just got) data to send, so set
14782 			 * SHUTDOWN_PENDING
14783 			 */
14784 			/*-
14785 			 * XXX sockets draft says that SCTP_EOF should be
14786 			 * sent with no data.  currently, we will allow user
14787 			 * data to be sent first and move to
14788 			 * SHUTDOWN-PENDING
14789 			 */
14790 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14791 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14792 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14793 				if (hold_tcblock == 0) {
14794 					SCTP_TCB_LOCK(stcb);
14795 					hold_tcblock = 1;
14796 				}
14797 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14798 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
14799 				}
14800 				SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
14801 				if (TAILQ_EMPTY(&asoc->send_queue) &&
14802 				    TAILQ_EMPTY(&asoc->sent_queue) &&
14803 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14804 					struct mbuf *op_err;
14805 					char msg[SCTP_DIAG_INFO_LEN];
14806 
14807 				abort_anyway:
14808 					if (free_cnt_applied) {
14809 						atomic_add_int(&stcb->asoc.refcnt, -1);
14810 						free_cnt_applied = 0;
14811 					}
14812 					SCTP_SNPRINTF(msg, sizeof(msg),
14813 					              "%s:%d at %s", __FILE__, __LINE__, __func__);
14814 					op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
14815 					                             msg);
14816 #if defined(__FreeBSD__)
14817 					NET_EPOCH_ENTER(et);
14818 #endif
14819 					sctp_abort_an_association(stcb->sctp_ep, stcb,
14820 					                          op_err, SCTP_SO_LOCKED);
14821 #if defined(__FreeBSD__)
14822 					NET_EPOCH_EXIT(et);
14823 #endif
14824 					/* now relock the stcb so everything is sane */
14825 					hold_tcblock = 0;
14826 					stcb = NULL;
14827 					goto out;
14828 				}
14829 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14830 				                 NULL);
14831 				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14832 			}
14833 		}
14834 	}
14835 skip_out_eof:
14836 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14837 		some_on_control = 1;
14838 	}
14839 	if (queue_only_for_init) {
14840 		if (hold_tcblock == 0) {
14841 			SCTP_TCB_LOCK(stcb);
14842 			hold_tcblock = 1;
14843 		}
14844 		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14845 			/* a collision took us forward? */
14846 			queue_only = 0;
14847 		} else {
14848 #if defined(__FreeBSD__)
14849 			NET_EPOCH_ENTER(et);
14850 #endif
14851 			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14852 #if defined(__FreeBSD__)
14853 			NET_EPOCH_EXIT(et);
14854 #endif
14855 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14856 			queue_only = 1;
14857 		}
14858 	}
14859 	if ((net->flight_size > net->cwnd) &&
14860 	    (stcb->asoc.sctp_cmt_on_off == 0)) {
14861 		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14862 		queue_only = 1;
14863 	} else if (asoc->ifp_had_enobuf) {
14864 		SCTP_STAT_INCR(sctps_ifnomemqueued);
14865 		if (net->flight_size > (2 * net->mtu)) {
14866 			queue_only = 1;
14867 		}
14868 		asoc->ifp_had_enobuf = 0;
14869 	}
14870 	un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14871 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14872 	    (stcb->asoc.total_flight > 0) &&
14873 	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14874 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14875 		/*-
14876 		 * Ok, Nagle is set on and we have data outstanding.
14877 		 * Don't send anything and let SACKs drive out the
14878 		 * data unless wen have a "full" segment to send.
14879 		 */
14880 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14881 			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14882 		}
14883 		SCTP_STAT_INCR(sctps_naglequeued);
14884 		nagle_applies = 1;
14885 	} else {
14886 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14887 			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14888 				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14889 		}
14890 		SCTP_STAT_INCR(sctps_naglesent);
14891 		nagle_applies = 0;
14892 	}
14893 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14894 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14895 		               nagle_applies, un_sent);
14896 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14897 		               stcb->asoc.total_flight,
14898 		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14899 	}
14900 #if defined(__FreeBSD__)
14901 	NET_EPOCH_ENTER(et);
14902 #endif
14903 	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14904 		/* we can attempt to send too. */
14905 		if (hold_tcblock == 0) {
14906 			/* If there is activity recv'ing sacks no need to send */
14907 			if (SCTP_TCB_TRYLOCK(stcb)) {
14908 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14909 				hold_tcblock = 1;
14910 			}
14911 		} else {
14912 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14913 		}
14914 	} else if ((queue_only == 0) &&
14915 	           (stcb->asoc.peers_rwnd == 0) &&
14916 	           (stcb->asoc.total_flight == 0)) {
14917 		/* We get to have a probe outstanding */
14918 		if (hold_tcblock == 0) {
14919 			hold_tcblock = 1;
14920 			SCTP_TCB_LOCK(stcb);
14921 		}
14922 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14923 	} else if (some_on_control) {
14924 		int num_out, reason, frag_point;
14925 
14926 		/* Here we do control only */
14927 		if (hold_tcblock == 0) {
14928 			hold_tcblock = 1;
14929 			SCTP_TCB_LOCK(stcb);
14930 		}
14931 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14932 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14933 		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14934 	}
14935 #if defined(__FreeBSD__)
14936 	NET_EPOCH_EXIT(et);
14937 #endif
14938 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14939 	        queue_only, stcb->asoc.peers_rwnd, un_sent,
14940 		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14941 	        stcb->asoc.total_output_queue_size, error);
14942 
14943 out:
14944 #if defined(__APPLE__)
14945 	sbunlock(&so->so_snd, 1);
14946 #endif
14947 out_unlocked:
14948 
14949 	if (local_soresv && stcb) {
14950 		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14951 	}
14952 	if (create_lock_applied) {
14953 		SCTP_ASOC_CREATE_UNLOCK(inp);
14954 	}
14955 	if ((stcb) && hold_tcblock) {
14956 		SCTP_TCB_UNLOCK(stcb);
14957 	}
14958 	if (stcb && free_cnt_applied) {
14959 		atomic_add_int(&stcb->asoc.refcnt, -1);
14960 	}
14961 #ifdef INVARIANTS
14962 #if defined(__FreeBSD__)
14963 	if (stcb) {
14964 		if (mtx_owned(&stcb->tcb_mtx)) {
14965 			panic("Leaving with tcb mtx owned?");
14966 		}
14967 		if (mtx_owned(&stcb->tcb_send_mtx)) {
14968 			panic("Leaving with tcb send mtx owned?");
14969 		}
14970 	}
14971 #endif
14972 #endif
14973 #ifdef __Panda__
14974 	/*
14975 	 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
14976 	 * to particle when pak is passed in, so that caller
14977 	 * can try again with this pak
14978 	 *
14979 	 * NOTE: For other cases, including success case,
14980 	 * we simply want to return the header back to free
14981 	 * pool
14982 	 */
14983 	if (top) {
14984 		if ((error == EAGAIN) || (error == ENOMEM)) {
14985 			SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
14986 			top = NULL;
14987 		} else {
14988 			(void)SCTP_RELEASE_HEADER(i_pak);
14989 		}
14990 	} else {
14991 		/* This is to handle cases when top has
14992 		 * been reset to NULL but pak might not
14993 		 * be freed
14994 		 */
14995 		if (i_pak) {
14996 			(void)SCTP_RELEASE_HEADER(i_pak);
14997 		}
14998 	}
14999 #endif
15000 	if (top) {
15001 		sctp_m_freem(top);
15002 	}
15003 	if (control) {
15004 		sctp_m_freem(control);
15005 	}
15006 	return (error);
15007 }
15008 
15009 
15010 /*
15011  * generate an AUTHentication chunk, if required
15012  */
15013 struct mbuf *
15014 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
15015     struct sctp_auth_chunk **auth_ret, uint32_t * offset,
15016     struct sctp_tcb *stcb, uint8_t chunk)
15017 {
15018 	struct mbuf *m_auth;
15019 	struct sctp_auth_chunk *auth;
15020 	int chunk_len;
15021 	struct mbuf *cn;
15022 
15023 	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
15024 	    (stcb == NULL))
15025 		return (m);
15026 
15027 	if (stcb->asoc.auth_supported == 0) {
15028 		return (m);
15029 	}
15030 	/* does the requested chunk require auth? */
15031 	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
15032 		return (m);
15033 	}
15034 	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
15035 	if (m_auth == NULL) {
15036 		/* no mbuf's */
15037 		return (m);
15038 	}
15039 	/* reserve some space if this will be the first mbuf */
15040 	if (m == NULL)
15041 		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
15042 	/* fill in the AUTH chunk details */
15043 	auth = mtod(m_auth, struct sctp_auth_chunk *);
15044 	memset(auth, 0, sizeof(*auth));
15045 	auth->ch.chunk_type = SCTP_AUTHENTICATION;
15046 	auth->ch.chunk_flags = 0;
15047 	chunk_len = sizeof(*auth) +
15048 	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
15049 	auth->ch.chunk_length = htons(chunk_len);
15050 	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
15051 	/* key id and hmac digest will be computed and filled in upon send */
15052 
15053 	/* save the offset where the auth was inserted into the chain */
15054 	*offset = 0;
15055 	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
15056 		*offset += SCTP_BUF_LEN(cn);
15057 	}
15058 
15059 	/* update length and return pointer to the auth chunk */
15060 	SCTP_BUF_LEN(m_auth) = chunk_len;
15061 	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
15062 	if (auth_ret != NULL)
15063 		*auth_ret = auth;
15064 
15065 	return (m);
15066 }
15067 
15068 #if defined(__FreeBSD__)  || defined(__APPLE__)
15069 #ifdef INET6
15070 int
15071 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
15072 {
15073 	struct nd_prefix *pfx = NULL;
15074 	struct nd_pfxrouter *pfxrtr = NULL;
15075 	struct sockaddr_in6 gw6;
15076 
15077 #if defined(__FreeBSD__)
15078 	if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
15079 #else
15080 	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
15081 #endif
15082 		return (0);
15083 
15084 	/* get prefix entry of address */
15085 #if defined(__FreeBSD__)
15086 	ND6_RLOCK();
15087 #endif
15088 	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
15089 		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
15090 			continue;
15091 		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
15092 		    &src6->sin6_addr, &pfx->ndpr_mask))
15093 			break;
15094 	}
15095 	/* no prefix entry in the prefix list */
15096 	if (pfx == NULL) {
15097 #if defined(__FreeBSD__)
15098 		ND6_RUNLOCK();
15099 #endif
15100 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
15101 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
15102 		return (0);
15103 	}
15104 
15105 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
15106 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
15107 
15108 	/* search installed gateway from prefix entry */
15109 	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
15110 		memset(&gw6, 0, sizeof(struct sockaddr_in6));
15111 		gw6.sin6_family = AF_INET6;
15112 #ifdef HAVE_SIN6_LEN
15113 		gw6.sin6_len = sizeof(struct sockaddr_in6);
15114 #endif
15115 		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
15116 		    sizeof(struct in6_addr));
15117 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
15118 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
15119 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
15120 #if defined(__FreeBSD__)
15121 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
15122 #else
15123 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
15124 #endif
15125 #if defined(__FreeBSD__)
15126 		if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
15127 			ND6_RUNLOCK();
15128 #else
15129 		if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
15130 #endif
15131 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
15132 			return (1);
15133 		}
15134 	}
15135 #if defined(__FreeBSD__)
15136 	ND6_RUNLOCK();
15137 #endif
15138 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
15139 	return (0);
15140 }
15141 #endif
15142 
15143 int
15144 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
15145 {
15146 #ifdef INET
15147 	struct sockaddr_in *sin, *mask;
15148 	struct ifaddr *ifa;
15149 	struct in_addr srcnetaddr, gwnetaddr;
15150 
15151 #if defined(__FreeBSD__)
15152 	if (ro == NULL || ro->ro_nh == NULL ||
15153 #else
15154 	if (ro == NULL || ro->ro_rt == NULL ||
15155 #endif
15156 	    sifa->address.sa.sa_family != AF_INET) {
15157 		return (0);
15158 	}
15159 	ifa = (struct ifaddr *)sifa->ifa;
15160 	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
15161 	sin = &sifa->address.sin;
15162 	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
15163 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
15164 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
15165 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
15166 
15167 #if defined(__FreeBSD__)
15168 	sin = &ro->ro_nh->gw4_sa;
15169 #else
15170 	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
15171 #endif
15172 	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
15173 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
15174 #if defined(__FreeBSD__)
15175 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
15176 #else
15177 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
15178 #endif
15179 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
15180 	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
15181 		return (1);
15182 	}
15183 #endif
15184 	return (0);
15185 }
15186 #elif defined(__Userspace__)
15187 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
15188 int
15189 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
15190 {
15191     return (0);
15192 }
15193 int
15194 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
15195 {
15196     return (0);
15197 }
15198 
15199 #endif
15200