• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 362178 2020-06-14 16:05:08Z tuexen $");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #if defined(__FreeBSD__) && !defined(__Userspace__)
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctputil.h>
52 #include <netinet/sctp_auth.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_indata.h>
56 #include <netinet/sctp_bsd_addr.h>
57 #include <netinet/sctp_input.h>
58 #include <netinet/sctp_crc32.h>
59 #if defined(__FreeBSD__) && !defined(__Userspace__)
60 #include <netinet/sctp_kdtrace.h>
61 #endif
62 #if defined(__linux__)
63 #define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
64 #endif
65 #if defined(INET) || defined(INET6)
66 #if !defined(_WIN32)
67 #include <netinet/udp.h>
68 #endif
69 #endif
70 #if !defined(__Userspace__)
71 #if defined(__APPLE__)
72 #include <netinet/in.h>
73 #endif
74 #if defined(__FreeBSD__) && !defined(__Userspace__)
75 #include <netinet/udp_var.h>
76 #include <machine/in_cksum.h>
77 #endif
78 #endif
79 #if defined(__Userspace__) && defined(INET6)
80 #include <netinet6/sctp6_var.h>
81 #endif
82 
83 #if defined(__APPLE__) && !defined(__Userspace__)
84 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
85 #define SCTP_MAX_LINKHDR 16
86 #endif
87 #endif
88 
89 #define SCTP_MAX_GAPS_INARRAY 4
90 struct sack_track {
91 	uint8_t right_edge;	/* mergable on the right edge */
92 	uint8_t left_edge;	/* mergable on the left edge */
93 	uint8_t num_entries;
94 	uint8_t spare;
95 	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
96 };
97 
98 const struct sack_track sack_array[256] = {
99 	{0, 0, 0, 0,		/* 0x00 */
100 		{{0, 0},
101 		{0, 0},
102 		{0, 0},
103 		{0, 0}
104 		}
105 	},
106 	{1, 0, 1, 0,		/* 0x01 */
107 		{{0, 0},
108 		{0, 0},
109 		{0, 0},
110 		{0, 0}
111 		}
112 	},
113 	{0, 0, 1, 0,		/* 0x02 */
114 		{{1, 1},
115 		{0, 0},
116 		{0, 0},
117 		{0, 0}
118 		}
119 	},
120 	{1, 0, 1, 0,		/* 0x03 */
121 		{{0, 1},
122 		{0, 0},
123 		{0, 0},
124 		{0, 0}
125 		}
126 	},
127 	{0, 0, 1, 0,		/* 0x04 */
128 		{{2, 2},
129 		{0, 0},
130 		{0, 0},
131 		{0, 0}
132 		}
133 	},
134 	{1, 0, 2, 0,		/* 0x05 */
135 		{{0, 0},
136 		{2, 2},
137 		{0, 0},
138 		{0, 0}
139 		}
140 	},
141 	{0, 0, 1, 0,		/* 0x06 */
142 		{{1, 2},
143 		{0, 0},
144 		{0, 0},
145 		{0, 0}
146 		}
147 	},
148 	{1, 0, 1, 0,		/* 0x07 */
149 		{{0, 2},
150 		{0, 0},
151 		{0, 0},
152 		{0, 0}
153 		}
154 	},
155 	{0, 0, 1, 0,		/* 0x08 */
156 		{{3, 3},
157 		{0, 0},
158 		{0, 0},
159 		{0, 0}
160 		}
161 	},
162 	{1, 0, 2, 0,		/* 0x09 */
163 		{{0, 0},
164 		{3, 3},
165 		{0, 0},
166 		{0, 0}
167 		}
168 	},
169 	{0, 0, 2, 0,		/* 0x0a */
170 		{{1, 1},
171 		{3, 3},
172 		{0, 0},
173 		{0, 0}
174 		}
175 	},
176 	{1, 0, 2, 0,		/* 0x0b */
177 		{{0, 1},
178 		{3, 3},
179 		{0, 0},
180 		{0, 0}
181 		}
182 	},
183 	{0, 0, 1, 0,		/* 0x0c */
184 		{{2, 3},
185 		{0, 0},
186 		{0, 0},
187 		{0, 0}
188 		}
189 	},
190 	{1, 0, 2, 0,		/* 0x0d */
191 		{{0, 0},
192 		{2, 3},
193 		{0, 0},
194 		{0, 0}
195 		}
196 	},
197 	{0, 0, 1, 0,		/* 0x0e */
198 		{{1, 3},
199 		{0, 0},
200 		{0, 0},
201 		{0, 0}
202 		}
203 	},
204 	{1, 0, 1, 0,		/* 0x0f */
205 		{{0, 3},
206 		{0, 0},
207 		{0, 0},
208 		{0, 0}
209 		}
210 	},
211 	{0, 0, 1, 0,		/* 0x10 */
212 		{{4, 4},
213 		{0, 0},
214 		{0, 0},
215 		{0, 0}
216 		}
217 	},
218 	{1, 0, 2, 0,		/* 0x11 */
219 		{{0, 0},
220 		{4, 4},
221 		{0, 0},
222 		{0, 0}
223 		}
224 	},
225 	{0, 0, 2, 0,		/* 0x12 */
226 		{{1, 1},
227 		{4, 4},
228 		{0, 0},
229 		{0, 0}
230 		}
231 	},
232 	{1, 0, 2, 0,		/* 0x13 */
233 		{{0, 1},
234 		{4, 4},
235 		{0, 0},
236 		{0, 0}
237 		}
238 	},
239 	{0, 0, 2, 0,		/* 0x14 */
240 		{{2, 2},
241 		{4, 4},
242 		{0, 0},
243 		{0, 0}
244 		}
245 	},
246 	{1, 0, 3, 0,		/* 0x15 */
247 		{{0, 0},
248 		{2, 2},
249 		{4, 4},
250 		{0, 0}
251 		}
252 	},
253 	{0, 0, 2, 0,		/* 0x16 */
254 		{{1, 2},
255 		{4, 4},
256 		{0, 0},
257 		{0, 0}
258 		}
259 	},
260 	{1, 0, 2, 0,		/* 0x17 */
261 		{{0, 2},
262 		{4, 4},
263 		{0, 0},
264 		{0, 0}
265 		}
266 	},
267 	{0, 0, 1, 0,		/* 0x18 */
268 		{{3, 4},
269 		{0, 0},
270 		{0, 0},
271 		{0, 0}
272 		}
273 	},
274 	{1, 0, 2, 0,		/* 0x19 */
275 		{{0, 0},
276 		{3, 4},
277 		{0, 0},
278 		{0, 0}
279 		}
280 	},
281 	{0, 0, 2, 0,		/* 0x1a */
282 		{{1, 1},
283 		{3, 4},
284 		{0, 0},
285 		{0, 0}
286 		}
287 	},
288 	{1, 0, 2, 0,		/* 0x1b */
289 		{{0, 1},
290 		{3, 4},
291 		{0, 0},
292 		{0, 0}
293 		}
294 	},
295 	{0, 0, 1, 0,		/* 0x1c */
296 		{{2, 4},
297 		{0, 0},
298 		{0, 0},
299 		{0, 0}
300 		}
301 	},
302 	{1, 0, 2, 0,		/* 0x1d */
303 		{{0, 0},
304 		{2, 4},
305 		{0, 0},
306 		{0, 0}
307 		}
308 	},
309 	{0, 0, 1, 0,		/* 0x1e */
310 		{{1, 4},
311 		{0, 0},
312 		{0, 0},
313 		{0, 0}
314 		}
315 	},
316 	{1, 0, 1, 0,		/* 0x1f */
317 		{{0, 4},
318 		{0, 0},
319 		{0, 0},
320 		{0, 0}
321 		}
322 	},
323 	{0, 0, 1, 0,		/* 0x20 */
324 		{{5, 5},
325 		{0, 0},
326 		{0, 0},
327 		{0, 0}
328 		}
329 	},
330 	{1, 0, 2, 0,		/* 0x21 */
331 		{{0, 0},
332 		{5, 5},
333 		{0, 0},
334 		{0, 0}
335 		}
336 	},
337 	{0, 0, 2, 0,		/* 0x22 */
338 		{{1, 1},
339 		{5, 5},
340 		{0, 0},
341 		{0, 0}
342 		}
343 	},
344 	{1, 0, 2, 0,		/* 0x23 */
345 		{{0, 1},
346 		{5, 5},
347 		{0, 0},
348 		{0, 0}
349 		}
350 	},
351 	{0, 0, 2, 0,		/* 0x24 */
352 		{{2, 2},
353 		{5, 5},
354 		{0, 0},
355 		{0, 0}
356 		}
357 	},
358 	{1, 0, 3, 0,		/* 0x25 */
359 		{{0, 0},
360 		{2, 2},
361 		{5, 5},
362 		{0, 0}
363 		}
364 	},
365 	{0, 0, 2, 0,		/* 0x26 */
366 		{{1, 2},
367 		{5, 5},
368 		{0, 0},
369 		{0, 0}
370 		}
371 	},
372 	{1, 0, 2, 0,		/* 0x27 */
373 		{{0, 2},
374 		{5, 5},
375 		{0, 0},
376 		{0, 0}
377 		}
378 	},
379 	{0, 0, 2, 0,		/* 0x28 */
380 		{{3, 3},
381 		{5, 5},
382 		{0, 0},
383 		{0, 0}
384 		}
385 	},
386 	{1, 0, 3, 0,		/* 0x29 */
387 		{{0, 0},
388 		{3, 3},
389 		{5, 5},
390 		{0, 0}
391 		}
392 	},
393 	{0, 0, 3, 0,		/* 0x2a */
394 		{{1, 1},
395 		{3, 3},
396 		{5, 5},
397 		{0, 0}
398 		}
399 	},
400 	{1, 0, 3, 0,		/* 0x2b */
401 		{{0, 1},
402 		{3, 3},
403 		{5, 5},
404 		{0, 0}
405 		}
406 	},
407 	{0, 0, 2, 0,		/* 0x2c */
408 		{{2, 3},
409 		{5, 5},
410 		{0, 0},
411 		{0, 0}
412 		}
413 	},
414 	{1, 0, 3, 0,		/* 0x2d */
415 		{{0, 0},
416 		{2, 3},
417 		{5, 5},
418 		{0, 0}
419 		}
420 	},
421 	{0, 0, 2, 0,		/* 0x2e */
422 		{{1, 3},
423 		{5, 5},
424 		{0, 0},
425 		{0, 0}
426 		}
427 	},
428 	{1, 0, 2, 0,		/* 0x2f */
429 		{{0, 3},
430 		{5, 5},
431 		{0, 0},
432 		{0, 0}
433 		}
434 	},
435 	{0, 0, 1, 0,		/* 0x30 */
436 		{{4, 5},
437 		{0, 0},
438 		{0, 0},
439 		{0, 0}
440 		}
441 	},
442 	{1, 0, 2, 0,		/* 0x31 */
443 		{{0, 0},
444 		{4, 5},
445 		{0, 0},
446 		{0, 0}
447 		}
448 	},
449 	{0, 0, 2, 0,		/* 0x32 */
450 		{{1, 1},
451 		{4, 5},
452 		{0, 0},
453 		{0, 0}
454 		}
455 	},
456 	{1, 0, 2, 0,		/* 0x33 */
457 		{{0, 1},
458 		{4, 5},
459 		{0, 0},
460 		{0, 0}
461 		}
462 	},
463 	{0, 0, 2, 0,		/* 0x34 */
464 		{{2, 2},
465 		{4, 5},
466 		{0, 0},
467 		{0, 0}
468 		}
469 	},
470 	{1, 0, 3, 0,		/* 0x35 */
471 		{{0, 0},
472 		{2, 2},
473 		{4, 5},
474 		{0, 0}
475 		}
476 	},
477 	{0, 0, 2, 0,		/* 0x36 */
478 		{{1, 2},
479 		{4, 5},
480 		{0, 0},
481 		{0, 0}
482 		}
483 	},
484 	{1, 0, 2, 0,		/* 0x37 */
485 		{{0, 2},
486 		{4, 5},
487 		{0, 0},
488 		{0, 0}
489 		}
490 	},
491 	{0, 0, 1, 0,		/* 0x38 */
492 		{{3, 5},
493 		{0, 0},
494 		{0, 0},
495 		{0, 0}
496 		}
497 	},
498 	{1, 0, 2, 0,		/* 0x39 */
499 		{{0, 0},
500 		{3, 5},
501 		{0, 0},
502 		{0, 0}
503 		}
504 	},
505 	{0, 0, 2, 0,		/* 0x3a */
506 		{{1, 1},
507 		{3, 5},
508 		{0, 0},
509 		{0, 0}
510 		}
511 	},
512 	{1, 0, 2, 0,		/* 0x3b */
513 		{{0, 1},
514 		{3, 5},
515 		{0, 0},
516 		{0, 0}
517 		}
518 	},
519 	{0, 0, 1, 0,		/* 0x3c */
520 		{{2, 5},
521 		{0, 0},
522 		{0, 0},
523 		{0, 0}
524 		}
525 	},
526 	{1, 0, 2, 0,		/* 0x3d */
527 		{{0, 0},
528 		{2, 5},
529 		{0, 0},
530 		{0, 0}
531 		}
532 	},
533 	{0, 0, 1, 0,		/* 0x3e */
534 		{{1, 5},
535 		{0, 0},
536 		{0, 0},
537 		{0, 0}
538 		}
539 	},
540 	{1, 0, 1, 0,		/* 0x3f */
541 		{{0, 5},
542 		{0, 0},
543 		{0, 0},
544 		{0, 0}
545 		}
546 	},
547 	{0, 0, 1, 0,		/* 0x40 */
548 		{{6, 6},
549 		{0, 0},
550 		{0, 0},
551 		{0, 0}
552 		}
553 	},
554 	{1, 0, 2, 0,		/* 0x41 */
555 		{{0, 0},
556 		{6, 6},
557 		{0, 0},
558 		{0, 0}
559 		}
560 	},
561 	{0, 0, 2, 0,		/* 0x42 */
562 		{{1, 1},
563 		{6, 6},
564 		{0, 0},
565 		{0, 0}
566 		}
567 	},
568 	{1, 0, 2, 0,		/* 0x43 */
569 		{{0, 1},
570 		{6, 6},
571 		{0, 0},
572 		{0, 0}
573 		}
574 	},
575 	{0, 0, 2, 0,		/* 0x44 */
576 		{{2, 2},
577 		{6, 6},
578 		{0, 0},
579 		{0, 0}
580 		}
581 	},
582 	{1, 0, 3, 0,		/* 0x45 */
583 		{{0, 0},
584 		{2, 2},
585 		{6, 6},
586 		{0, 0}
587 		}
588 	},
589 	{0, 0, 2, 0,		/* 0x46 */
590 		{{1, 2},
591 		{6, 6},
592 		{0, 0},
593 		{0, 0}
594 		}
595 	},
596 	{1, 0, 2, 0,		/* 0x47 */
597 		{{0, 2},
598 		{6, 6},
599 		{0, 0},
600 		{0, 0}
601 		}
602 	},
603 	{0, 0, 2, 0,		/* 0x48 */
604 		{{3, 3},
605 		{6, 6},
606 		{0, 0},
607 		{0, 0}
608 		}
609 	},
610 	{1, 0, 3, 0,		/* 0x49 */
611 		{{0, 0},
612 		{3, 3},
613 		{6, 6},
614 		{0, 0}
615 		}
616 	},
617 	{0, 0, 3, 0,		/* 0x4a */
618 		{{1, 1},
619 		{3, 3},
620 		{6, 6},
621 		{0, 0}
622 		}
623 	},
624 	{1, 0, 3, 0,		/* 0x4b */
625 		{{0, 1},
626 		{3, 3},
627 		{6, 6},
628 		{0, 0}
629 		}
630 	},
631 	{0, 0, 2, 0,		/* 0x4c */
632 		{{2, 3},
633 		{6, 6},
634 		{0, 0},
635 		{0, 0}
636 		}
637 	},
638 	{1, 0, 3, 0,		/* 0x4d */
639 		{{0, 0},
640 		{2, 3},
641 		{6, 6},
642 		{0, 0}
643 		}
644 	},
645 	{0, 0, 2, 0,		/* 0x4e */
646 		{{1, 3},
647 		{6, 6},
648 		{0, 0},
649 		{0, 0}
650 		}
651 	},
652 	{1, 0, 2, 0,		/* 0x4f */
653 		{{0, 3},
654 		{6, 6},
655 		{0, 0},
656 		{0, 0}
657 		}
658 	},
659 	{0, 0, 2, 0,		/* 0x50 */
660 		{{4, 4},
661 		{6, 6},
662 		{0, 0},
663 		{0, 0}
664 		}
665 	},
666 	{1, 0, 3, 0,		/* 0x51 */
667 		{{0, 0},
668 		{4, 4},
669 		{6, 6},
670 		{0, 0}
671 		}
672 	},
673 	{0, 0, 3, 0,		/* 0x52 */
674 		{{1, 1},
675 		{4, 4},
676 		{6, 6},
677 		{0, 0}
678 		}
679 	},
680 	{1, 0, 3, 0,		/* 0x53 */
681 		{{0, 1},
682 		{4, 4},
683 		{6, 6},
684 		{0, 0}
685 		}
686 	},
687 	{0, 0, 3, 0,		/* 0x54 */
688 		{{2, 2},
689 		{4, 4},
690 		{6, 6},
691 		{0, 0}
692 		}
693 	},
694 	{1, 0, 4, 0,		/* 0x55 */
695 		{{0, 0},
696 		{2, 2},
697 		{4, 4},
698 		{6, 6}
699 		}
700 	},
701 	{0, 0, 3, 0,		/* 0x56 */
702 		{{1, 2},
703 		{4, 4},
704 		{6, 6},
705 		{0, 0}
706 		}
707 	},
708 	{1, 0, 3, 0,		/* 0x57 */
709 		{{0, 2},
710 		{4, 4},
711 		{6, 6},
712 		{0, 0}
713 		}
714 	},
715 	{0, 0, 2, 0,		/* 0x58 */
716 		{{3, 4},
717 		{6, 6},
718 		{0, 0},
719 		{0, 0}
720 		}
721 	},
722 	{1, 0, 3, 0,		/* 0x59 */
723 		{{0, 0},
724 		{3, 4},
725 		{6, 6},
726 		{0, 0}
727 		}
728 	},
729 	{0, 0, 3, 0,		/* 0x5a */
730 		{{1, 1},
731 		{3, 4},
732 		{6, 6},
733 		{0, 0}
734 		}
735 	},
736 	{1, 0, 3, 0,		/* 0x5b */
737 		{{0, 1},
738 		{3, 4},
739 		{6, 6},
740 		{0, 0}
741 		}
742 	},
743 	{0, 0, 2, 0,		/* 0x5c */
744 		{{2, 4},
745 		{6, 6},
746 		{0, 0},
747 		{0, 0}
748 		}
749 	},
750 	{1, 0, 3, 0,		/* 0x5d */
751 		{{0, 0},
752 		{2, 4},
753 		{6, 6},
754 		{0, 0}
755 		}
756 	},
757 	{0, 0, 2, 0,		/* 0x5e */
758 		{{1, 4},
759 		{6, 6},
760 		{0, 0},
761 		{0, 0}
762 		}
763 	},
764 	{1, 0, 2, 0,		/* 0x5f */
765 		{{0, 4},
766 		{6, 6},
767 		{0, 0},
768 		{0, 0}
769 		}
770 	},
771 	{0, 0, 1, 0,		/* 0x60 */
772 		{{5, 6},
773 		{0, 0},
774 		{0, 0},
775 		{0, 0}
776 		}
777 	},
778 	{1, 0, 2, 0,		/* 0x61 */
779 		{{0, 0},
780 		{5, 6},
781 		{0, 0},
782 		{0, 0}
783 		}
784 	},
785 	{0, 0, 2, 0,		/* 0x62 */
786 		{{1, 1},
787 		{5, 6},
788 		{0, 0},
789 		{0, 0}
790 		}
791 	},
792 	{1, 0, 2, 0,		/* 0x63 */
793 		{{0, 1},
794 		{5, 6},
795 		{0, 0},
796 		{0, 0}
797 		}
798 	},
799 	{0, 0, 2, 0,		/* 0x64 */
800 		{{2, 2},
801 		{5, 6},
802 		{0, 0},
803 		{0, 0}
804 		}
805 	},
806 	{1, 0, 3, 0,		/* 0x65 */
807 		{{0, 0},
808 		{2, 2},
809 		{5, 6},
810 		{0, 0}
811 		}
812 	},
813 	{0, 0, 2, 0,		/* 0x66 */
814 		{{1, 2},
815 		{5, 6},
816 		{0, 0},
817 		{0, 0}
818 		}
819 	},
820 	{1, 0, 2, 0,		/* 0x67 */
821 		{{0, 2},
822 		{5, 6},
823 		{0, 0},
824 		{0, 0}
825 		}
826 	},
827 	{0, 0, 2, 0,		/* 0x68 */
828 		{{3, 3},
829 		{5, 6},
830 		{0, 0},
831 		{0, 0}
832 		}
833 	},
834 	{1, 0, 3, 0,		/* 0x69 */
835 		{{0, 0},
836 		{3, 3},
837 		{5, 6},
838 		{0, 0}
839 		}
840 	},
841 	{0, 0, 3, 0,		/* 0x6a */
842 		{{1, 1},
843 		{3, 3},
844 		{5, 6},
845 		{0, 0}
846 		}
847 	},
848 	{1, 0, 3, 0,		/* 0x6b */
849 		{{0, 1},
850 		{3, 3},
851 		{5, 6},
852 		{0, 0}
853 		}
854 	},
855 	{0, 0, 2, 0,		/* 0x6c */
856 		{{2, 3},
857 		{5, 6},
858 		{0, 0},
859 		{0, 0}
860 		}
861 	},
862 	{1, 0, 3, 0,		/* 0x6d */
863 		{{0, 0},
864 		{2, 3},
865 		{5, 6},
866 		{0, 0}
867 		}
868 	},
869 	{0, 0, 2, 0,		/* 0x6e */
870 		{{1, 3},
871 		{5, 6},
872 		{0, 0},
873 		{0, 0}
874 		}
875 	},
876 	{1, 0, 2, 0,		/* 0x6f */
877 		{{0, 3},
878 		{5, 6},
879 		{0, 0},
880 		{0, 0}
881 		}
882 	},
883 	{0, 0, 1, 0,		/* 0x70 */
884 		{{4, 6},
885 		{0, 0},
886 		{0, 0},
887 		{0, 0}
888 		}
889 	},
890 	{1, 0, 2, 0,		/* 0x71 */
891 		{{0, 0},
892 		{4, 6},
893 		{0, 0},
894 		{0, 0}
895 		}
896 	},
897 	{0, 0, 2, 0,		/* 0x72 */
898 		{{1, 1},
899 		{4, 6},
900 		{0, 0},
901 		{0, 0}
902 		}
903 	},
904 	{1, 0, 2, 0,		/* 0x73 */
905 		{{0, 1},
906 		{4, 6},
907 		{0, 0},
908 		{0, 0}
909 		}
910 	},
911 	{0, 0, 2, 0,		/* 0x74 */
912 		{{2, 2},
913 		{4, 6},
914 		{0, 0},
915 		{0, 0}
916 		}
917 	},
918 	{1, 0, 3, 0,		/* 0x75 */
919 		{{0, 0},
920 		{2, 2},
921 		{4, 6},
922 		{0, 0}
923 		}
924 	},
925 	{0, 0, 2, 0,		/* 0x76 */
926 		{{1, 2},
927 		{4, 6},
928 		{0, 0},
929 		{0, 0}
930 		}
931 	},
932 	{1, 0, 2, 0,		/* 0x77 */
933 		{{0, 2},
934 		{4, 6},
935 		{0, 0},
936 		{0, 0}
937 		}
938 	},
939 	{0, 0, 1, 0,		/* 0x78 */
940 		{{3, 6},
941 		{0, 0},
942 		{0, 0},
943 		{0, 0}
944 		}
945 	},
946 	{1, 0, 2, 0,		/* 0x79 */
947 		{{0, 0},
948 		{3, 6},
949 		{0, 0},
950 		{0, 0}
951 		}
952 	},
953 	{0, 0, 2, 0,		/* 0x7a */
954 		{{1, 1},
955 		{3, 6},
956 		{0, 0},
957 		{0, 0}
958 		}
959 	},
960 	{1, 0, 2, 0,		/* 0x7b */
961 		{{0, 1},
962 		{3, 6},
963 		{0, 0},
964 		{0, 0}
965 		}
966 	},
967 	{0, 0, 1, 0,		/* 0x7c */
968 		{{2, 6},
969 		{0, 0},
970 		{0, 0},
971 		{0, 0}
972 		}
973 	},
974 	{1, 0, 2, 0,		/* 0x7d */
975 		{{0, 0},
976 		{2, 6},
977 		{0, 0},
978 		{0, 0}
979 		}
980 	},
981 	{0, 0, 1, 0,		/* 0x7e */
982 		{{1, 6},
983 		{0, 0},
984 		{0, 0},
985 		{0, 0}
986 		}
987 	},
988 	{1, 0, 1, 0,		/* 0x7f */
989 		{{0, 6},
990 		{0, 0},
991 		{0, 0},
992 		{0, 0}
993 		}
994 	},
995 	{0, 1, 1, 0,		/* 0x80 */
996 		{{7, 7},
997 		{0, 0},
998 		{0, 0},
999 		{0, 0}
1000 		}
1001 	},
1002 	{1, 1, 2, 0,		/* 0x81 */
1003 		{{0, 0},
1004 		{7, 7},
1005 		{0, 0},
1006 		{0, 0}
1007 		}
1008 	},
1009 	{0, 1, 2, 0,		/* 0x82 */
1010 		{{1, 1},
1011 		{7, 7},
1012 		{0, 0},
1013 		{0, 0}
1014 		}
1015 	},
1016 	{1, 1, 2, 0,		/* 0x83 */
1017 		{{0, 1},
1018 		{7, 7},
1019 		{0, 0},
1020 		{0, 0}
1021 		}
1022 	},
1023 	{0, 1, 2, 0,		/* 0x84 */
1024 		{{2, 2},
1025 		{7, 7},
1026 		{0, 0},
1027 		{0, 0}
1028 		}
1029 	},
1030 	{1, 1, 3, 0,		/* 0x85 */
1031 		{{0, 0},
1032 		{2, 2},
1033 		{7, 7},
1034 		{0, 0}
1035 		}
1036 	},
1037 	{0, 1, 2, 0,		/* 0x86 */
1038 		{{1, 2},
1039 		{7, 7},
1040 		{0, 0},
1041 		{0, 0}
1042 		}
1043 	},
1044 	{1, 1, 2, 0,		/* 0x87 */
1045 		{{0, 2},
1046 		{7, 7},
1047 		{0, 0},
1048 		{0, 0}
1049 		}
1050 	},
1051 	{0, 1, 2, 0,		/* 0x88 */
1052 		{{3, 3},
1053 		{7, 7},
1054 		{0, 0},
1055 		{0, 0}
1056 		}
1057 	},
1058 	{1, 1, 3, 0,		/* 0x89 */
1059 		{{0, 0},
1060 		{3, 3},
1061 		{7, 7},
1062 		{0, 0}
1063 		}
1064 	},
1065 	{0, 1, 3, 0,		/* 0x8a */
1066 		{{1, 1},
1067 		{3, 3},
1068 		{7, 7},
1069 		{0, 0}
1070 		}
1071 	},
1072 	{1, 1, 3, 0,		/* 0x8b */
1073 		{{0, 1},
1074 		{3, 3},
1075 		{7, 7},
1076 		{0, 0}
1077 		}
1078 	},
1079 	{0, 1, 2, 0,		/* 0x8c */
1080 		{{2, 3},
1081 		{7, 7},
1082 		{0, 0},
1083 		{0, 0}
1084 		}
1085 	},
1086 	{1, 1, 3, 0,		/* 0x8d */
1087 		{{0, 0},
1088 		{2, 3},
1089 		{7, 7},
1090 		{0, 0}
1091 		}
1092 	},
1093 	{0, 1, 2, 0,		/* 0x8e */
1094 		{{1, 3},
1095 		{7, 7},
1096 		{0, 0},
1097 		{0, 0}
1098 		}
1099 	},
1100 	{1, 1, 2, 0,		/* 0x8f */
1101 		{{0, 3},
1102 		{7, 7},
1103 		{0, 0},
1104 		{0, 0}
1105 		}
1106 	},
1107 	{0, 1, 2, 0,		/* 0x90 */
1108 		{{4, 4},
1109 		{7, 7},
1110 		{0, 0},
1111 		{0, 0}
1112 		}
1113 	},
1114 	{1, 1, 3, 0,		/* 0x91 */
1115 		{{0, 0},
1116 		{4, 4},
1117 		{7, 7},
1118 		{0, 0}
1119 		}
1120 	},
1121 	{0, 1, 3, 0,		/* 0x92 */
1122 		{{1, 1},
1123 		{4, 4},
1124 		{7, 7},
1125 		{0, 0}
1126 		}
1127 	},
1128 	{1, 1, 3, 0,		/* 0x93 */
1129 		{{0, 1},
1130 		{4, 4},
1131 		{7, 7},
1132 		{0, 0}
1133 		}
1134 	},
1135 	{0, 1, 3, 0,		/* 0x94 */
1136 		{{2, 2},
1137 		{4, 4},
1138 		{7, 7},
1139 		{0, 0}
1140 		}
1141 	},
1142 	{1, 1, 4, 0,		/* 0x95 */
1143 		{{0, 0},
1144 		{2, 2},
1145 		{4, 4},
1146 		{7, 7}
1147 		}
1148 	},
1149 	{0, 1, 3, 0,		/* 0x96 */
1150 		{{1, 2},
1151 		{4, 4},
1152 		{7, 7},
1153 		{0, 0}
1154 		}
1155 	},
1156 	{1, 1, 3, 0,		/* 0x97 */
1157 		{{0, 2},
1158 		{4, 4},
1159 		{7, 7},
1160 		{0, 0}
1161 		}
1162 	},
1163 	{0, 1, 2, 0,		/* 0x98 */
1164 		{{3, 4},
1165 		{7, 7},
1166 		{0, 0},
1167 		{0, 0}
1168 		}
1169 	},
1170 	{1, 1, 3, 0,		/* 0x99 */
1171 		{{0, 0},
1172 		{3, 4},
1173 		{7, 7},
1174 		{0, 0}
1175 		}
1176 	},
1177 	{0, 1, 3, 0,		/* 0x9a */
1178 		{{1, 1},
1179 		{3, 4},
1180 		{7, 7},
1181 		{0, 0}
1182 		}
1183 	},
1184 	{1, 1, 3, 0,		/* 0x9b */
1185 		{{0, 1},
1186 		{3, 4},
1187 		{7, 7},
1188 		{0, 0}
1189 		}
1190 	},
1191 	{0, 1, 2, 0,		/* 0x9c */
1192 		{{2, 4},
1193 		{7, 7},
1194 		{0, 0},
1195 		{0, 0}
1196 		}
1197 	},
1198 	{1, 1, 3, 0,		/* 0x9d */
1199 		{{0, 0},
1200 		{2, 4},
1201 		{7, 7},
1202 		{0, 0}
1203 		}
1204 	},
1205 	{0, 1, 2, 0,		/* 0x9e */
1206 		{{1, 4},
1207 		{7, 7},
1208 		{0, 0},
1209 		{0, 0}
1210 		}
1211 	},
1212 	{1, 1, 2, 0,		/* 0x9f */
1213 		{{0, 4},
1214 		{7, 7},
1215 		{0, 0},
1216 		{0, 0}
1217 		}
1218 	},
1219 	{0, 1, 2, 0,		/* 0xa0 */
1220 		{{5, 5},
1221 		{7, 7},
1222 		{0, 0},
1223 		{0, 0}
1224 		}
1225 	},
1226 	{1, 1, 3, 0,		/* 0xa1 */
1227 		{{0, 0},
1228 		{5, 5},
1229 		{7, 7},
1230 		{0, 0}
1231 		}
1232 	},
1233 	{0, 1, 3, 0,		/* 0xa2 */
1234 		{{1, 1},
1235 		{5, 5},
1236 		{7, 7},
1237 		{0, 0}
1238 		}
1239 	},
1240 	{1, 1, 3, 0,		/* 0xa3 */
1241 		{{0, 1},
1242 		{5, 5},
1243 		{7, 7},
1244 		{0, 0}
1245 		}
1246 	},
1247 	{0, 1, 3, 0,		/* 0xa4 */
1248 		{{2, 2},
1249 		{5, 5},
1250 		{7, 7},
1251 		{0, 0}
1252 		}
1253 	},
1254 	{1, 1, 4, 0,		/* 0xa5 */
1255 		{{0, 0},
1256 		{2, 2},
1257 		{5, 5},
1258 		{7, 7}
1259 		}
1260 	},
1261 	{0, 1, 3, 0,		/* 0xa6 */
1262 		{{1, 2},
1263 		{5, 5},
1264 		{7, 7},
1265 		{0, 0}
1266 		}
1267 	},
1268 	{1, 1, 3, 0,		/* 0xa7 */
1269 		{{0, 2},
1270 		{5, 5},
1271 		{7, 7},
1272 		{0, 0}
1273 		}
1274 	},
1275 	{0, 1, 3, 0,		/* 0xa8 */
1276 		{{3, 3},
1277 		{5, 5},
1278 		{7, 7},
1279 		{0, 0}
1280 		}
1281 	},
1282 	{1, 1, 4, 0,		/* 0xa9 */
1283 		{{0, 0},
1284 		{3, 3},
1285 		{5, 5},
1286 		{7, 7}
1287 		}
1288 	},
1289 	{0, 1, 4, 0,		/* 0xaa */
1290 		{{1, 1},
1291 		{3, 3},
1292 		{5, 5},
1293 		{7, 7}
1294 		}
1295 	},
1296 	{1, 1, 4, 0,		/* 0xab */
1297 		{{0, 1},
1298 		{3, 3},
1299 		{5, 5},
1300 		{7, 7}
1301 		}
1302 	},
1303 	{0, 1, 3, 0,		/* 0xac */
1304 		{{2, 3},
1305 		{5, 5},
1306 		{7, 7},
1307 		{0, 0}
1308 		}
1309 	},
1310 	{1, 1, 4, 0,		/* 0xad */
1311 		{{0, 0},
1312 		{2, 3},
1313 		{5, 5},
1314 		{7, 7}
1315 		}
1316 	},
1317 	{0, 1, 3, 0,		/* 0xae */
1318 		{{1, 3},
1319 		{5, 5},
1320 		{7, 7},
1321 		{0, 0}
1322 		}
1323 	},
1324 	{1, 1, 3, 0,		/* 0xaf */
1325 		{{0, 3},
1326 		{5, 5},
1327 		{7, 7},
1328 		{0, 0}
1329 		}
1330 	},
1331 	{0, 1, 2, 0,		/* 0xb0 */
1332 		{{4, 5},
1333 		{7, 7},
1334 		{0, 0},
1335 		{0, 0}
1336 		}
1337 	},
1338 	{1, 1, 3, 0,		/* 0xb1 */
1339 		{{0, 0},
1340 		{4, 5},
1341 		{7, 7},
1342 		{0, 0}
1343 		}
1344 	},
1345 	{0, 1, 3, 0,		/* 0xb2 */
1346 		{{1, 1},
1347 		{4, 5},
1348 		{7, 7},
1349 		{0, 0}
1350 		}
1351 	},
1352 	{1, 1, 3, 0,		/* 0xb3 */
1353 		{{0, 1},
1354 		{4, 5},
1355 		{7, 7},
1356 		{0, 0}
1357 		}
1358 	},
1359 	{0, 1, 3, 0,		/* 0xb4 */
1360 		{{2, 2},
1361 		{4, 5},
1362 		{7, 7},
1363 		{0, 0}
1364 		}
1365 	},
1366 	{1, 1, 4, 0,		/* 0xb5 */
1367 		{{0, 0},
1368 		{2, 2},
1369 		{4, 5},
1370 		{7, 7}
1371 		}
1372 	},
1373 	{0, 1, 3, 0,		/* 0xb6 */
1374 		{{1, 2},
1375 		{4, 5},
1376 		{7, 7},
1377 		{0, 0}
1378 		}
1379 	},
1380 	{1, 1, 3, 0,		/* 0xb7 */
1381 		{{0, 2},
1382 		{4, 5},
1383 		{7, 7},
1384 		{0, 0}
1385 		}
1386 	},
1387 	{0, 1, 2, 0,		/* 0xb8 */
1388 		{{3, 5},
1389 		{7, 7},
1390 		{0, 0},
1391 		{0, 0}
1392 		}
1393 	},
1394 	{1, 1, 3, 0,		/* 0xb9 */
1395 		{{0, 0},
1396 		{3, 5},
1397 		{7, 7},
1398 		{0, 0}
1399 		}
1400 	},
1401 	{0, 1, 3, 0,		/* 0xba */
1402 		{{1, 1},
1403 		{3, 5},
1404 		{7, 7},
1405 		{0, 0}
1406 		}
1407 	},
1408 	{1, 1, 3, 0,		/* 0xbb */
1409 		{{0, 1},
1410 		{3, 5},
1411 		{7, 7},
1412 		{0, 0}
1413 		}
1414 	},
1415 	{0, 1, 2, 0,		/* 0xbc */
1416 		{{2, 5},
1417 		{7, 7},
1418 		{0, 0},
1419 		{0, 0}
1420 		}
1421 	},
1422 	{1, 1, 3, 0,		/* 0xbd */
1423 		{{0, 0},
1424 		{2, 5},
1425 		{7, 7},
1426 		{0, 0}
1427 		}
1428 	},
1429 	{0, 1, 2, 0,		/* 0xbe */
1430 		{{1, 5},
1431 		{7, 7},
1432 		{0, 0},
1433 		{0, 0}
1434 		}
1435 	},
1436 	{1, 1, 2, 0,		/* 0xbf */
1437 		{{0, 5},
1438 		{7, 7},
1439 		{0, 0},
1440 		{0, 0}
1441 		}
1442 	},
1443 	{0, 1, 1, 0,		/* 0xc0 */
1444 		{{6, 7},
1445 		{0, 0},
1446 		{0, 0},
1447 		{0, 0}
1448 		}
1449 	},
1450 	{1, 1, 2, 0,		/* 0xc1 */
1451 		{{0, 0},
1452 		{6, 7},
1453 		{0, 0},
1454 		{0, 0}
1455 		}
1456 	},
1457 	{0, 1, 2, 0,		/* 0xc2 */
1458 		{{1, 1},
1459 		{6, 7},
1460 		{0, 0},
1461 		{0, 0}
1462 		}
1463 	},
1464 	{1, 1, 2, 0,		/* 0xc3 */
1465 		{{0, 1},
1466 		{6, 7},
1467 		{0, 0},
1468 		{0, 0}
1469 		}
1470 	},
1471 	{0, 1, 2, 0,		/* 0xc4 */
1472 		{{2, 2},
1473 		{6, 7},
1474 		{0, 0},
1475 		{0, 0}
1476 		}
1477 	},
1478 	{1, 1, 3, 0,		/* 0xc5 */
1479 		{{0, 0},
1480 		{2, 2},
1481 		{6, 7},
1482 		{0, 0}
1483 		}
1484 	},
1485 	{0, 1, 2, 0,		/* 0xc6 */
1486 		{{1, 2},
1487 		{6, 7},
1488 		{0, 0},
1489 		{0, 0}
1490 		}
1491 	},
1492 	{1, 1, 2, 0,		/* 0xc7 */
1493 		{{0, 2},
1494 		{6, 7},
1495 		{0, 0},
1496 		{0, 0}
1497 		}
1498 	},
1499 	{0, 1, 2, 0,		/* 0xc8 */
1500 		{{3, 3},
1501 		{6, 7},
1502 		{0, 0},
1503 		{0, 0}
1504 		}
1505 	},
1506 	{1, 1, 3, 0,		/* 0xc9 */
1507 		{{0, 0},
1508 		{3, 3},
1509 		{6, 7},
1510 		{0, 0}
1511 		}
1512 	},
1513 	{0, 1, 3, 0,		/* 0xca */
1514 		{{1, 1},
1515 		{3, 3},
1516 		{6, 7},
1517 		{0, 0}
1518 		}
1519 	},
1520 	{1, 1, 3, 0,		/* 0xcb */
1521 		{{0, 1},
1522 		{3, 3},
1523 		{6, 7},
1524 		{0, 0}
1525 		}
1526 	},
1527 	{0, 1, 2, 0,		/* 0xcc */
1528 		{{2, 3},
1529 		{6, 7},
1530 		{0, 0},
1531 		{0, 0}
1532 		}
1533 	},
1534 	{1, 1, 3, 0,		/* 0xcd */
1535 		{{0, 0},
1536 		{2, 3},
1537 		{6, 7},
1538 		{0, 0}
1539 		}
1540 	},
1541 	{0, 1, 2, 0,		/* 0xce */
1542 		{{1, 3},
1543 		{6, 7},
1544 		{0, 0},
1545 		{0, 0}
1546 		}
1547 	},
1548 	{1, 1, 2, 0,		/* 0xcf */
1549 		{{0, 3},
1550 		{6, 7},
1551 		{0, 0},
1552 		{0, 0}
1553 		}
1554 	},
1555 	{0, 1, 2, 0,		/* 0xd0 */
1556 		{{4, 4},
1557 		{6, 7},
1558 		{0, 0},
1559 		{0, 0}
1560 		}
1561 	},
1562 	{1, 1, 3, 0,		/* 0xd1 */
1563 		{{0, 0},
1564 		{4, 4},
1565 		{6, 7},
1566 		{0, 0}
1567 		}
1568 	},
1569 	{0, 1, 3, 0,		/* 0xd2 */
1570 		{{1, 1},
1571 		{4, 4},
1572 		{6, 7},
1573 		{0, 0}
1574 		}
1575 	},
1576 	{1, 1, 3, 0,		/* 0xd3 */
1577 		{{0, 1},
1578 		{4, 4},
1579 		{6, 7},
1580 		{0, 0}
1581 		}
1582 	},
1583 	{0, 1, 3, 0,		/* 0xd4 */
1584 		{{2, 2},
1585 		{4, 4},
1586 		{6, 7},
1587 		{0, 0}
1588 		}
1589 	},
1590 	{1, 1, 4, 0,		/* 0xd5 */
1591 		{{0, 0},
1592 		{2, 2},
1593 		{4, 4},
1594 		{6, 7}
1595 		}
1596 	},
1597 	{0, 1, 3, 0,		/* 0xd6 */
1598 		{{1, 2},
1599 		{4, 4},
1600 		{6, 7},
1601 		{0, 0}
1602 		}
1603 	},
1604 	{1, 1, 3, 0,		/* 0xd7 */
1605 		{{0, 2},
1606 		{4, 4},
1607 		{6, 7},
1608 		{0, 0}
1609 		}
1610 	},
1611 	{0, 1, 2, 0,		/* 0xd8 */
1612 		{{3, 4},
1613 		{6, 7},
1614 		{0, 0},
1615 		{0, 0}
1616 		}
1617 	},
1618 	{1, 1, 3, 0,		/* 0xd9 */
1619 		{{0, 0},
1620 		{3, 4},
1621 		{6, 7},
1622 		{0, 0}
1623 		}
1624 	},
1625 	{0, 1, 3, 0,		/* 0xda */
1626 		{{1, 1},
1627 		{3, 4},
1628 		{6, 7},
1629 		{0, 0}
1630 		}
1631 	},
1632 	{1, 1, 3, 0,		/* 0xdb */
1633 		{{0, 1},
1634 		{3, 4},
1635 		{6, 7},
1636 		{0, 0}
1637 		}
1638 	},
1639 	{0, 1, 2, 0,		/* 0xdc */
1640 		{{2, 4},
1641 		{6, 7},
1642 		{0, 0},
1643 		{0, 0}
1644 		}
1645 	},
1646 	{1, 1, 3, 0,		/* 0xdd */
1647 		{{0, 0},
1648 		{2, 4},
1649 		{6, 7},
1650 		{0, 0}
1651 		}
1652 	},
1653 	{0, 1, 2, 0,		/* 0xde */
1654 		{{1, 4},
1655 		{6, 7},
1656 		{0, 0},
1657 		{0, 0}
1658 		}
1659 	},
1660 	{1, 1, 2, 0,		/* 0xdf */
1661 		{{0, 4},
1662 		{6, 7},
1663 		{0, 0},
1664 		{0, 0}
1665 		}
1666 	},
1667 	{0, 1, 1, 0,		/* 0xe0 */
1668 		{{5, 7},
1669 		{0, 0},
1670 		{0, 0},
1671 		{0, 0}
1672 		}
1673 	},
1674 	{1, 1, 2, 0,		/* 0xe1 */
1675 		{{0, 0},
1676 		{5, 7},
1677 		{0, 0},
1678 		{0, 0}
1679 		}
1680 	},
1681 	{0, 1, 2, 0,		/* 0xe2 */
1682 		{{1, 1},
1683 		{5, 7},
1684 		{0, 0},
1685 		{0, 0}
1686 		}
1687 	},
1688 	{1, 1, 2, 0,		/* 0xe3 */
1689 		{{0, 1},
1690 		{5, 7},
1691 		{0, 0},
1692 		{0, 0}
1693 		}
1694 	},
1695 	{0, 1, 2, 0,		/* 0xe4 */
1696 		{{2, 2},
1697 		{5, 7},
1698 		{0, 0},
1699 		{0, 0}
1700 		}
1701 	},
1702 	{1, 1, 3, 0,		/* 0xe5 */
1703 		{{0, 0},
1704 		{2, 2},
1705 		{5, 7},
1706 		{0, 0}
1707 		}
1708 	},
1709 	{0, 1, 2, 0,		/* 0xe6 */
1710 		{{1, 2},
1711 		{5, 7},
1712 		{0, 0},
1713 		{0, 0}
1714 		}
1715 	},
1716 	{1, 1, 2, 0,		/* 0xe7 */
1717 		{{0, 2},
1718 		{5, 7},
1719 		{0, 0},
1720 		{0, 0}
1721 		}
1722 	},
1723 	{0, 1, 2, 0,		/* 0xe8 */
1724 		{{3, 3},
1725 		{5, 7},
1726 		{0, 0},
1727 		{0, 0}
1728 		}
1729 	},
1730 	{1, 1, 3, 0,		/* 0xe9 */
1731 		{{0, 0},
1732 		{3, 3},
1733 		{5, 7},
1734 		{0, 0}
1735 		}
1736 	},
1737 	{0, 1, 3, 0,		/* 0xea */
1738 		{{1, 1},
1739 		{3, 3},
1740 		{5, 7},
1741 		{0, 0}
1742 		}
1743 	},
1744 	{1, 1, 3, 0,		/* 0xeb */
1745 		{{0, 1},
1746 		{3, 3},
1747 		{5, 7},
1748 		{0, 0}
1749 		}
1750 	},
1751 	{0, 1, 2, 0,		/* 0xec */
1752 		{{2, 3},
1753 		{5, 7},
1754 		{0, 0},
1755 		{0, 0}
1756 		}
1757 	},
1758 	{1, 1, 3, 0,		/* 0xed */
1759 		{{0, 0},
1760 		{2, 3},
1761 		{5, 7},
1762 		{0, 0}
1763 		}
1764 	},
1765 	{0, 1, 2, 0,		/* 0xee */
1766 		{{1, 3},
1767 		{5, 7},
1768 		{0, 0},
1769 		{0, 0}
1770 		}
1771 	},
1772 	{1, 1, 2, 0,		/* 0xef */
1773 		{{0, 3},
1774 		{5, 7},
1775 		{0, 0},
1776 		{0, 0}
1777 		}
1778 	},
1779 	{0, 1, 1, 0,		/* 0xf0 */
1780 		{{4, 7},
1781 		{0, 0},
1782 		{0, 0},
1783 		{0, 0}
1784 		}
1785 	},
1786 	{1, 1, 2, 0,		/* 0xf1 */
1787 		{{0, 0},
1788 		{4, 7},
1789 		{0, 0},
1790 		{0, 0}
1791 		}
1792 	},
1793 	{0, 1, 2, 0,		/* 0xf2 */
1794 		{{1, 1},
1795 		{4, 7},
1796 		{0, 0},
1797 		{0, 0}
1798 		}
1799 	},
1800 	{1, 1, 2, 0,		/* 0xf3 */
1801 		{{0, 1},
1802 		{4, 7},
1803 		{0, 0},
1804 		{0, 0}
1805 		}
1806 	},
1807 	{0, 1, 2, 0,		/* 0xf4 */
1808 		{{2, 2},
1809 		{4, 7},
1810 		{0, 0},
1811 		{0, 0}
1812 		}
1813 	},
1814 	{1, 1, 3, 0,		/* 0xf5 */
1815 		{{0, 0},
1816 		{2, 2},
1817 		{4, 7},
1818 		{0, 0}
1819 		}
1820 	},
1821 	{0, 1, 2, 0,		/* 0xf6 */
1822 		{{1, 2},
1823 		{4, 7},
1824 		{0, 0},
1825 		{0, 0}
1826 		}
1827 	},
1828 	{1, 1, 2, 0,		/* 0xf7 */
1829 		{{0, 2},
1830 		{4, 7},
1831 		{0, 0},
1832 		{0, 0}
1833 		}
1834 	},
1835 	{0, 1, 1, 0,		/* 0xf8 */
1836 		{{3, 7},
1837 		{0, 0},
1838 		{0, 0},
1839 		{0, 0}
1840 		}
1841 	},
1842 	{1, 1, 2, 0,		/* 0xf9 */
1843 		{{0, 0},
1844 		{3, 7},
1845 		{0, 0},
1846 		{0, 0}
1847 		}
1848 	},
1849 	{0, 1, 2, 0,		/* 0xfa */
1850 		{{1, 1},
1851 		{3, 7},
1852 		{0, 0},
1853 		{0, 0}
1854 		}
1855 	},
1856 	{1, 1, 2, 0,		/* 0xfb */
1857 		{{0, 1},
1858 		{3, 7},
1859 		{0, 0},
1860 		{0, 0}
1861 		}
1862 	},
1863 	{0, 1, 1, 0,		/* 0xfc */
1864 		{{2, 7},
1865 		{0, 0},
1866 		{0, 0},
1867 		{0, 0}
1868 		}
1869 	},
1870 	{1, 1, 2, 0,		/* 0xfd */
1871 		{{0, 0},
1872 		{2, 7},
1873 		{0, 0},
1874 		{0, 0}
1875 		}
1876 	},
1877 	{0, 1, 1, 0,		/* 0xfe */
1878 		{{1, 7},
1879 		{0, 0},
1880 		{0, 0},
1881 		{0, 0}
1882 		}
1883 	},
1884 	{1, 1, 1, 0,		/* 0xff */
1885 		{{0, 7},
1886 		{0, 0},
1887 		{0, 0},
1888 		{0, 0}
1889 		}
1890 	}
1891 };
1892 
1893 
1894 int
sctp_is_address_in_scope(struct sctp_ifa * ifa,struct sctp_scoping * scope,int do_update)1895 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1896                          struct sctp_scoping *scope,
1897                          int do_update)
1898 {
1899 	if ((scope->loopback_scope == 0) &&
1900 	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1901 		/*
1902 		 * skip loopback if not in scope *
1903 		 */
1904 		return (0);
1905 	}
1906 	switch (ifa->address.sa.sa_family) {
1907 #ifdef INET
1908 	case AF_INET:
1909 		if (scope->ipv4_addr_legal) {
1910 			struct sockaddr_in *sin;
1911 
1912 			sin = &ifa->address.sin;
1913 			if (sin->sin_addr.s_addr == 0) {
1914 				/* not in scope , unspecified */
1915 				return (0);
1916 			}
1917 			if ((scope->ipv4_local_scope == 0) &&
1918 			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1919 				/* private address not in scope */
1920 				return (0);
1921 			}
1922 		} else {
1923 			return (0);
1924 		}
1925 		break;
1926 #endif
1927 #ifdef INET6
1928 	case AF_INET6:
1929 		if (scope->ipv6_addr_legal) {
1930 			struct sockaddr_in6 *sin6;
1931 
1932 			/* Must update the flags,  bummer, which
1933 			 * means any IFA locks must now be applied HERE <->
1934 			 */
1935 			if (do_update) {
1936 				sctp_gather_internal_ifa_flags(ifa);
1937 			}
1938 			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1939 				return (0);
1940 			}
1941 			/* ok to use deprecated addresses? */
1942 			sin6 = &ifa->address.sin6;
1943 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1944 				/* skip unspecifed addresses */
1945 				return (0);
1946 			}
1947 			if (		/* (local_scope == 0) && */
1948 			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1949 				return (0);
1950 			}
1951 			if ((scope->site_scope == 0) &&
1952 			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1953 				return (0);
1954 			}
1955 		} else {
1956 			return (0);
1957 		}
1958 		break;
1959 #endif
1960 #if defined(__Userspace__)
1961 	case AF_CONN:
1962 		if (!scope->conn_addr_legal) {
1963 			return (0);
1964 		}
1965 		break;
1966 #endif
1967 	default:
1968 		return (0);
1969 	}
1970 	return (1);
1971 }
1972 
1973 static struct mbuf *
sctp_add_addr_to_mbuf(struct mbuf * m,struct sctp_ifa * ifa,uint16_t * len)1974 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1975 {
1976 #if defined(INET) || defined(INET6)
1977 	struct sctp_paramhdr *paramh;
1978 	struct mbuf *mret;
1979 	uint16_t plen;
1980 #endif
1981 
1982 	switch (ifa->address.sa.sa_family) {
1983 #ifdef INET
1984 	case AF_INET:
1985 		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1986 		break;
1987 #endif
1988 #ifdef INET6
1989 	case AF_INET6:
1990 		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1991 		break;
1992 #endif
1993 	default:
1994 		return (m);
1995 	}
1996 #if defined(INET) || defined(INET6)
1997 	if (M_TRAILINGSPACE(m) >= plen) {
1998 		/* easy side we just drop it on the end */
1999 		paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2000 		mret = m;
2001 	} else {
2002 		/* Need more space */
2003 		mret = m;
2004 		while (SCTP_BUF_NEXT(mret) != NULL) {
2005 			mret = SCTP_BUF_NEXT(mret);
2006 		}
2007 		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2008 		if (SCTP_BUF_NEXT(mret) == NULL) {
2009 			/* We are hosed, can't add more addresses */
2010 			return (m);
2011 		}
2012 		mret = SCTP_BUF_NEXT(mret);
2013 		paramh = mtod(mret, struct sctp_paramhdr *);
2014 	}
2015 	/* now add the parameter */
2016 	switch (ifa->address.sa.sa_family) {
2017 #ifdef INET
2018 	case AF_INET:
2019 	{
2020 		struct sctp_ipv4addr_param *ipv4p;
2021 		struct sockaddr_in *sin;
2022 
2023 		sin = &ifa->address.sin;
2024 		ipv4p = (struct sctp_ipv4addr_param *)paramh;
2025 		paramh->param_type = htons(SCTP_IPV4_ADDRESS);
2026 		paramh->param_length = htons(plen);
2027 		ipv4p->addr = sin->sin_addr.s_addr;
2028 		SCTP_BUF_LEN(mret) += plen;
2029 		break;
2030 	}
2031 #endif
2032 #ifdef INET6
2033 	case AF_INET6:
2034 	{
2035 		struct sctp_ipv6addr_param *ipv6p;
2036 		struct sockaddr_in6 *sin6;
2037 
2038 		sin6 = &ifa->address.sin6;
2039 		ipv6p = (struct sctp_ipv6addr_param *)paramh;
2040 		paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2041 		paramh->param_length = htons(plen);
2042 		memcpy(ipv6p->addr, &sin6->sin6_addr,
2043 		    sizeof(ipv6p->addr));
2044 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2045 		/* clear embedded scope in the address */
2046 		in6_clearscope((struct in6_addr *)ipv6p->addr);
2047 #endif
2048 		SCTP_BUF_LEN(mret) += plen;
2049 		break;
2050 	}
2051 #endif
2052 	default:
2053 		return (m);
2054 	}
2055 	if (len != NULL) {
2056 		*len += plen;
2057 	}
2058 	return (mret);
2059 #endif
2060 }
2061 
2062 
2063 struct mbuf *
sctp_add_addresses_to_i_ia(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_scoping * scope,struct mbuf * m_at,int cnt_inits_to,uint16_t * padding_len,uint16_t * chunk_len)2064 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2065                            struct sctp_scoping *scope,
2066 			   struct mbuf *m_at, int cnt_inits_to,
2067 			   uint16_t *padding_len, uint16_t *chunk_len)
2068 {
2069 	struct sctp_vrf *vrf = NULL;
2070 	int cnt, limit_out = 0, total_count;
2071 	uint32_t vrf_id;
2072 
2073 	vrf_id = inp->def_vrf_id;
2074 	SCTP_IPI_ADDR_RLOCK();
2075 	vrf = sctp_find_vrf(vrf_id);
2076 	if (vrf == NULL) {
2077 		SCTP_IPI_ADDR_RUNLOCK();
2078 		return (m_at);
2079 	}
2080 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2081 		struct sctp_ifa *sctp_ifap;
2082 		struct sctp_ifn *sctp_ifnp;
2083 
2084 		cnt = cnt_inits_to;
2085 		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2086 			limit_out = 1;
2087 			cnt = SCTP_ADDRESS_LIMIT;
2088 			goto skip_count;
2089 		}
2090 		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2091 			if ((scope->loopback_scope == 0) &&
2092 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2093 				/*
2094 				 * Skip loopback devices if loopback_scope
2095 				 * not set
2096 				 */
2097 				continue;
2098 			}
2099 			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2100 #if defined(__FreeBSD__) && !defined(__Userspace__)
2101 #ifdef INET
2102 				if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2103 				    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2104 				                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2105 					continue;
2106 				}
2107 #endif
2108 #ifdef INET6
2109 				if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2110 				    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2111 				                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2112 					continue;
2113 				}
2114 #endif
2115 #endif
2116 				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2117 					continue;
2118 				}
2119 #if defined(__Userspace__)
2120 				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2121 					continue;
2122 				}
2123 #endif
2124 				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2125 					continue;
2126 				}
2127 				cnt++;
2128 				if (cnt > SCTP_ADDRESS_LIMIT) {
2129 					break;
2130 				}
2131 			}
2132 			if (cnt > SCTP_ADDRESS_LIMIT) {
2133 				break;
2134 			}
2135 		}
2136 	skip_count:
2137 		if (cnt > 1) {
2138 			total_count = 0;
2139 			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2140 				cnt = 0;
2141 				if ((scope->loopback_scope == 0) &&
2142 				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2143 					/*
2144 					 * Skip loopback devices if
2145 					 * loopback_scope not set
2146 					 */
2147 					continue;
2148 				}
2149 				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2150 #if defined(__FreeBSD__) && !defined(__Userspace__)
2151 #ifdef INET
2152 					if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2153 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2154 					                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2155 						continue;
2156 					}
2157 #endif
2158 #ifdef INET6
2159 					if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2160 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2161 					                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2162 						continue;
2163 					}
2164 #endif
2165 #endif
2166 					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2167 						continue;
2168 					}
2169 #if defined(__Userspace__)
2170 					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2171 						continue;
2172 					}
2173 #endif
2174 					if (sctp_is_address_in_scope(sctp_ifap,
2175 								     scope, 0) == 0) {
2176 						continue;
2177 					}
2178 					if ((chunk_len != NULL) &&
2179 					    (padding_len != NULL) &&
2180 					    (*padding_len > 0)) {
2181 						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2182 						SCTP_BUF_LEN(m_at) += *padding_len;
2183 						*chunk_len += *padding_len;
2184 						*padding_len = 0;
2185 					}
2186 					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2187 					if (limit_out) {
2188 						cnt++;
2189 						total_count++;
2190 						if (cnt >= 2) {
2191 							/* two from each address */
2192 							break;
2193 						}
2194 						if (total_count > SCTP_ADDRESS_LIMIT) {
2195 							/* No more addresses */
2196 							break;
2197 						}
2198 					}
2199 				}
2200 			}
2201 		}
2202 	} else {
2203 		struct sctp_laddr *laddr;
2204 
2205 		cnt = cnt_inits_to;
2206 		/* First, how many ? */
2207 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2208 			if (laddr->ifa == NULL) {
2209 				continue;
2210 			}
2211 			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2212 				/* Address being deleted by the system, dont
2213 				 * list.
2214 				 */
2215 				continue;
2216 			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2217 				/* Address being deleted on this ep
2218 				 * don't list.
2219 				 */
2220 				continue;
2221 			}
2222 #if defined(__Userspace__)
2223 			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2224 				continue;
2225 			}
2226 #endif
2227 			if (sctp_is_address_in_scope(laddr->ifa,
2228 						     scope, 1) == 0) {
2229 				continue;
2230 			}
2231 			cnt++;
2232 		}
2233 		/*
2234 		 * To get through a NAT we only list addresses if we have
2235 		 * more than one. That way if you just bind a single address
2236 		 * we let the source of the init dictate our address.
2237 		 */
2238 		if (cnt > 1) {
2239 			cnt = cnt_inits_to;
2240 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2241 				if (laddr->ifa == NULL) {
2242 					continue;
2243 				}
2244 				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2245 					continue;
2246 				}
2247 #if defined(__Userspace__)
2248 				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2249 					continue;
2250 				}
2251 #endif
2252 				if (sctp_is_address_in_scope(laddr->ifa,
2253 							     scope, 0) == 0) {
2254 					continue;
2255 				}
2256 				if ((chunk_len != NULL) &&
2257 				    (padding_len != NULL) &&
2258 				    (*padding_len > 0)) {
2259 					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2260 					SCTP_BUF_LEN(m_at) += *padding_len;
2261 					*chunk_len += *padding_len;
2262 					*padding_len = 0;
2263 				}
2264 				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2265 				cnt++;
2266 				if (cnt >= SCTP_ADDRESS_LIMIT) {
2267 					break;
2268 				}
2269 			}
2270 		}
2271 	}
2272 	SCTP_IPI_ADDR_RUNLOCK();
2273 	return (m_at);
2274 }
2275 
2276 static struct sctp_ifa *
sctp_is_ifa_addr_preferred(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2277 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2278 			   uint8_t dest_is_loop,
2279 			   uint8_t dest_is_priv,
2280 			   sa_family_t fam)
2281 {
2282 	uint8_t dest_is_global = 0;
2283 	/* dest_is_priv is true if destination is a private address */
2284 	/* dest_is_loop is true if destination is a loopback addresses */
2285 
2286 	/**
2287 	 * Here we determine if its a preferred address. A preferred address
2288 	 * means it is the same scope or higher scope then the destination.
2289 	 * L = loopback, P = private, G = global
2290 	 * -----------------------------------------
2291 	 *    src    |  dest | result
2292 	 *  ----------------------------------------
2293 	 *     L     |    L  |    yes
2294 	 *  -----------------------------------------
2295 	 *     P     |    L  |    yes-v4 no-v6
2296 	 *  -----------------------------------------
2297 	 *     G     |    L  |    yes-v4 no-v6
2298 	 *  -----------------------------------------
2299 	 *     L     |    P  |    no
2300 	 *  -----------------------------------------
2301 	 *     P     |    P  |    yes
2302 	 *  -----------------------------------------
2303 	 *     G     |    P  |    no
2304 	 *   -----------------------------------------
2305 	 *     L     |    G  |    no
2306 	 *   -----------------------------------------
2307 	 *     P     |    G  |    no
2308 	 *    -----------------------------------------
2309 	 *     G     |    G  |    yes
2310 	 *    -----------------------------------------
2311 	 */
2312 
2313 	if (ifa->address.sa.sa_family != fam) {
2314 		/* forget mis-matched family */
2315 		return (NULL);
2316 	}
2317 	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2318 		dest_is_global = 1;
2319 	}
2320 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2321 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2322 	/* Ok the address may be ok */
2323 #ifdef INET6
2324 	if (fam == AF_INET6) {
2325 		/* ok to use deprecated addresses? no lets not! */
2326 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2327 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2328 			return (NULL);
2329 		}
2330 		if (ifa->src_is_priv && !ifa->src_is_loop) {
2331 			if (dest_is_loop) {
2332 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2333 				return (NULL);
2334 			}
2335 		}
2336 		if (ifa->src_is_glob) {
2337 			if (dest_is_loop) {
2338 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2339 				return (NULL);
2340 			}
2341 		}
2342 	}
2343 #endif
2344 	/* Now that we know what is what, implement or table
2345 	 * this could in theory be done slicker (it used to be), but this
2346 	 * is straightforward and easier to validate :-)
2347 	 */
2348 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2349 		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2350 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2351 		dest_is_loop, dest_is_priv, dest_is_global);
2352 
2353 	if ((ifa->src_is_loop) && (dest_is_priv)) {
2354 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2355 		return (NULL);
2356 	}
2357 	if ((ifa->src_is_glob) && (dest_is_priv)) {
2358 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2359 		return (NULL);
2360 	}
2361 	if ((ifa->src_is_loop) && (dest_is_global)) {
2362 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2363 		return (NULL);
2364 	}
2365 	if ((ifa->src_is_priv) && (dest_is_global)) {
2366 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2367 		return (NULL);
2368 	}
2369 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2370 	/* its a preferred address */
2371 	return (ifa);
2372 }
2373 
2374 static struct sctp_ifa *
sctp_is_ifa_addr_acceptable(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2375 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2376 			    uint8_t dest_is_loop,
2377 			    uint8_t dest_is_priv,
2378 			    sa_family_t fam)
2379 {
2380 	uint8_t dest_is_global = 0;
2381 
2382 	/**
2383 	 * Here we determine if its a acceptable address. A acceptable
2384 	 * address means it is the same scope or higher scope but we can
2385 	 * allow for NAT which means its ok to have a global dest and a
2386 	 * private src.
2387 	 *
2388 	 * L = loopback, P = private, G = global
2389 	 * -----------------------------------------
2390 	 *  src    |  dest | result
2391 	 * -----------------------------------------
2392 	 *   L     |   L   |    yes
2393 	 *  -----------------------------------------
2394 	 *   P     |   L   |    yes-v4 no-v6
2395 	 *  -----------------------------------------
2396 	 *   G     |   L   |    yes
2397 	 * -----------------------------------------
2398 	 *   L     |   P   |    no
2399 	 * -----------------------------------------
2400 	 *   P     |   P   |    yes
2401 	 * -----------------------------------------
2402 	 *   G     |   P   |    yes - May not work
2403 	 * -----------------------------------------
2404 	 *   L     |   G   |    no
2405 	 * -----------------------------------------
2406 	 *   P     |   G   |    yes - May not work
2407 	 * -----------------------------------------
2408 	 *   G     |   G   |    yes
2409 	 * -----------------------------------------
2410 	 */
2411 
2412 	if (ifa->address.sa.sa_family != fam) {
2413 		/* forget non matching family */
2414 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2415 			ifa->address.sa.sa_family, fam);
2416 		return (NULL);
2417 	}
2418 	/* Ok the address may be ok */
2419 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2420 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2421 		dest_is_loop, dest_is_priv);
2422 	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2423 		dest_is_global = 1;
2424 	}
2425 #ifdef INET6
2426 	if (fam == AF_INET6) {
2427 		/* ok to use deprecated addresses? */
2428 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2429 			return (NULL);
2430 		}
2431 		if (ifa->src_is_priv) {
2432 			/* Special case, linklocal to loop */
2433 			if (dest_is_loop)
2434 				return (NULL);
2435 		}
2436 	}
2437 #endif
2438 	/*
2439 	 * Now that we know what is what, implement our table.
2440 	 * This could in theory be done slicker (it used to be), but this
2441 	 * is straightforward and easier to validate :-)
2442 	 */
2443 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2444 		ifa->src_is_loop,
2445 		dest_is_priv);
2446 	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2447 		return (NULL);
2448 	}
2449 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2450 		ifa->src_is_loop,
2451 		dest_is_global);
2452 	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2453 		return (NULL);
2454 	}
2455 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2456 	/* its an acceptable address */
2457 	return (ifa);
2458 }
2459 
2460 int
sctp_is_addr_restricted(struct sctp_tcb * stcb,struct sctp_ifa * ifa)2461 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2462 {
2463 	struct sctp_laddr *laddr;
2464 
2465 	if (stcb == NULL) {
2466 		/* There are no restrictions, no TCB :-) */
2467 		return (0);
2468 	}
2469 	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2470 		if (laddr->ifa == NULL) {
2471 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2472 				__func__);
2473 			continue;
2474 		}
2475 		if (laddr->ifa == ifa) {
2476 			/* Yes it is on the list */
2477 			return (1);
2478 		}
2479 	}
2480 	return (0);
2481 }
2482 
2483 
2484 int
sctp_is_addr_in_ep(struct sctp_inpcb * inp,struct sctp_ifa * ifa)2485 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2486 {
2487 	struct sctp_laddr *laddr;
2488 
2489 	if (ifa == NULL)
2490 		return (0);
2491 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2492 		if (laddr->ifa == NULL) {
2493 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2494 				__func__);
2495 			continue;
2496 		}
2497 		if ((laddr->ifa == ifa) && laddr->action == 0)
2498 			/* same pointer */
2499 			return (1);
2500 	}
2501 	return (0);
2502 }
2503 
2504 
2505 
2506 static struct sctp_ifa *
sctp_choose_boundspecific_inp(struct sctp_inpcb * inp,sctp_route_t * ro,uint32_t vrf_id,int non_asoc_addr_ok,uint8_t dest_is_priv,uint8_t dest_is_loop,sa_family_t fam)2507 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2508 			      sctp_route_t *ro,
2509 			      uint32_t vrf_id,
2510 			      int non_asoc_addr_ok,
2511 			      uint8_t dest_is_priv,
2512 			      uint8_t dest_is_loop,
2513 			      sa_family_t fam)
2514 {
2515 	struct sctp_laddr *laddr, *starting_point;
2516 	void *ifn;
2517 	int resettotop = 0;
2518 	struct sctp_ifn *sctp_ifn;
2519 	struct sctp_ifa *sctp_ifa, *sifa;
2520 	struct sctp_vrf *vrf;
2521 	uint32_t ifn_index;
2522 
2523 	vrf = sctp_find_vrf(vrf_id);
2524 	if (vrf == NULL)
2525 		return (NULL);
2526 
2527 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2528 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2529 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2530 	/*
2531 	 * first question, is the ifn we will emit on in our list, if so, we
2532 	 * want such an address. Note that we first looked for a
2533 	 * preferred address.
2534 	 */
2535 	if (sctp_ifn) {
2536 		/* is a preferred one on the interface we route out? */
2537 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2538 #if defined(__FreeBSD__) && !defined(__Userspace__)
2539 #ifdef INET
2540 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2541 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2542 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2543 				continue;
2544 			}
2545 #endif
2546 #ifdef INET6
2547 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2548 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2549 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2550 				continue;
2551 			}
2552 #endif
2553 #endif
2554 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2555 			    (non_asoc_addr_ok == 0))
2556 				continue;
2557 			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2558 							  dest_is_loop,
2559 							  dest_is_priv, fam);
2560 			if (sifa == NULL)
2561 				continue;
2562 			if (sctp_is_addr_in_ep(inp, sifa)) {
2563 				atomic_add_int(&sifa->refcount, 1);
2564 				return (sifa);
2565 			}
2566 		}
2567 	}
2568 	/*
2569 	 * ok, now we now need to find one on the list of the addresses.
2570 	 * We can't get one on the emitting interface so let's find first
2571 	 * a preferred one. If not that an acceptable one otherwise...
2572 	 * we return NULL.
2573 	 */
2574 	starting_point = inp->next_addr_touse;
2575  once_again:
2576 	if (inp->next_addr_touse == NULL) {
2577 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2578 		resettotop = 1;
2579 	}
2580 	for (laddr = inp->next_addr_touse; laddr;
2581 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2582 		if (laddr->ifa == NULL) {
2583 			/* address has been removed */
2584 			continue;
2585 		}
2586 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2587 			/* address is being deleted */
2588 			continue;
2589 		}
2590 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2591 						  dest_is_priv, fam);
2592 		if (sifa == NULL)
2593 			continue;
2594 		atomic_add_int(&sifa->refcount, 1);
2595 		return (sifa);
2596 	}
2597 	if (resettotop == 0) {
2598 		inp->next_addr_touse = NULL;
2599 		goto once_again;
2600 	}
2601 
2602 	inp->next_addr_touse = starting_point;
2603 	resettotop = 0;
2604  once_again_too:
2605 	if (inp->next_addr_touse == NULL) {
2606 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2607 		resettotop = 1;
2608 	}
2609 
2610 	/* ok, what about an acceptable address in the inp */
2611 	for (laddr = inp->next_addr_touse; laddr;
2612 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2613 		if (laddr->ifa == NULL) {
2614 			/* address has been removed */
2615 			continue;
2616 		}
2617 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2618 			/* address is being deleted */
2619 			continue;
2620 		}
2621 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2622 						   dest_is_priv, fam);
2623 		if (sifa == NULL)
2624 			continue;
2625 		atomic_add_int(&sifa->refcount, 1);
2626 		return (sifa);
2627 	}
2628 	if (resettotop == 0) {
2629 		inp->next_addr_touse = NULL;
2630 		goto once_again_too;
2631 	}
2632 
2633 	/*
2634 	 * no address bound can be a source for the destination we are in
2635 	 * trouble
2636 	 */
2637 	return (NULL);
2638 }
2639 
2640 
2641 
2642 static struct sctp_ifa *
sctp_choose_boundspecific_stcb(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)2643 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2644 			       struct sctp_tcb *stcb,
2645 			       sctp_route_t *ro,
2646 			       uint32_t vrf_id,
2647 			       uint8_t dest_is_priv,
2648 			       uint8_t dest_is_loop,
2649 			       int non_asoc_addr_ok,
2650 			       sa_family_t fam)
2651 {
2652 	struct sctp_laddr *laddr, *starting_point;
2653 	void *ifn;
2654 	struct sctp_ifn *sctp_ifn;
2655 	struct sctp_ifa *sctp_ifa, *sifa;
2656 	uint8_t start_at_beginning = 0;
2657 	struct sctp_vrf *vrf;
2658 	uint32_t ifn_index;
2659 
2660 	/*
2661 	 * first question, is the ifn we will emit on in our list, if so, we
2662 	 * want that one.
2663 	 */
2664 	vrf = sctp_find_vrf(vrf_id);
2665 	if (vrf == NULL)
2666 		return (NULL);
2667 
2668 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2669 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2670 	sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2671 
2672 	/*
2673 	 * first question, is the ifn we will emit on in our list?  If so,
2674 	 * we want that one. First we look for a preferred. Second, we go
2675 	 * for an acceptable.
2676 	 */
2677 	if (sctp_ifn) {
2678 		/* first try for a preferred address on the ep */
2679 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2680 #if defined(__FreeBSD__) && !defined(__Userspace__)
2681 #ifdef INET
2682 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2683 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2684 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2685 				continue;
2686 			}
2687 #endif
2688 #ifdef INET6
2689 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2690 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2691 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2692 				continue;
2693 			}
2694 #endif
2695 #endif
2696 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2697 				continue;
2698 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2699 				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2700 				if (sifa == NULL)
2701 					continue;
2702 				if (((non_asoc_addr_ok == 0) &&
2703 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2704 				    (non_asoc_addr_ok &&
2705 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2706 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2707 					/* on the no-no list */
2708 					continue;
2709 				}
2710 				atomic_add_int(&sifa->refcount, 1);
2711 				return (sifa);
2712 			}
2713 		}
2714 		/* next try for an acceptable address on the ep */
2715 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2716 #if defined(__FreeBSD__) && !defined(__Userspace__)
2717 #ifdef INET
2718 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2719 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2720 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2721 				continue;
2722 			}
2723 #endif
2724 #ifdef INET6
2725 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2726 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2727 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2728 				continue;
2729 			}
2730 #endif
2731 #endif
2732 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2733 				continue;
2734 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2735 				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2736 				if (sifa == NULL)
2737 					continue;
2738 				if (((non_asoc_addr_ok == 0) &&
2739 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2740 				    (non_asoc_addr_ok &&
2741 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2742 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2743 					/* on the no-no list */
2744 					continue;
2745 				}
2746 				atomic_add_int(&sifa->refcount, 1);
2747 				return (sifa);
2748 			}
2749 		}
2750 
2751 	}
2752 	/*
2753 	 * if we can't find one like that then we must look at all
2754 	 * addresses bound to pick one at first preferable then
2755 	 * secondly acceptable.
2756 	 */
2757 	starting_point = stcb->asoc.last_used_address;
2758  sctp_from_the_top:
2759 	if (stcb->asoc.last_used_address == NULL) {
2760 		start_at_beginning = 1;
2761 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2762 	}
2763 	/* search beginning with the last used address */
2764 	for (laddr = stcb->asoc.last_used_address; laddr;
2765 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2766 		if (laddr->ifa == NULL) {
2767 			/* address has been removed */
2768 			continue;
2769 		}
2770 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2771 			/* address is being deleted */
2772 			continue;
2773 		}
2774 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2775 		if (sifa == NULL)
2776 			continue;
2777 		if (((non_asoc_addr_ok == 0) &&
2778 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2779 		    (non_asoc_addr_ok &&
2780 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2781 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2782 			/* on the no-no list */
2783 			continue;
2784 		}
2785 		stcb->asoc.last_used_address = laddr;
2786 		atomic_add_int(&sifa->refcount, 1);
2787 		return (sifa);
2788 	}
2789 	if (start_at_beginning == 0) {
2790 		stcb->asoc.last_used_address = NULL;
2791 		goto sctp_from_the_top;
2792 	}
2793 	/* now try for any higher scope than the destination */
2794 	stcb->asoc.last_used_address = starting_point;
2795 	start_at_beginning = 0;
2796  sctp_from_the_top2:
2797 	if (stcb->asoc.last_used_address == NULL) {
2798 		start_at_beginning = 1;
2799 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2800 	}
2801 	/* search beginning with the last used address */
2802 	for (laddr = stcb->asoc.last_used_address; laddr;
2803 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2804 		if (laddr->ifa == NULL) {
2805 			/* address has been removed */
2806 			continue;
2807 		}
2808 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2809 			/* address is being deleted */
2810 			continue;
2811 		}
2812 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2813 						   dest_is_priv, fam);
2814 		if (sifa == NULL)
2815 			continue;
2816 		if (((non_asoc_addr_ok == 0) &&
2817 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2818 		    (non_asoc_addr_ok &&
2819 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2820 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2821 			/* on the no-no list */
2822 			continue;
2823 		}
2824 		stcb->asoc.last_used_address = laddr;
2825 		atomic_add_int(&sifa->refcount, 1);
2826 		return (sifa);
2827 	}
2828 	if (start_at_beginning == 0) {
2829 		stcb->asoc.last_used_address = NULL;
2830 		goto sctp_from_the_top2;
2831 	}
2832 	return (NULL);
2833 }
2834 
2835 static struct sctp_ifa *
sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,int addr_wanted,sa_family_t fam,sctp_route_t * ro)2836 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2837 #if defined(__FreeBSD__) && !defined(__Userspace__)
2838                                                  struct sctp_inpcb *inp,
2839 #else
2840                                                  struct sctp_inpcb *inp SCTP_UNUSED,
2841 #endif
2842 						 struct sctp_tcb *stcb,
2843 						 int non_asoc_addr_ok,
2844 						 uint8_t dest_is_loop,
2845 						 uint8_t dest_is_priv,
2846 						 int addr_wanted,
2847 						 sa_family_t fam,
2848 						 sctp_route_t *ro
2849 						 )
2850 {
2851 	struct sctp_ifa *ifa, *sifa;
2852 	int num_eligible_addr = 0;
2853 #ifdef INET6
2854 #ifdef SCTP_EMBEDDED_V6_SCOPE
2855 	struct sockaddr_in6 sin6, lsa6;
2856 
2857 	if (fam == AF_INET6) {
2858 		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2859 #ifdef SCTP_KAME
2860 		(void)sa6_recoverscope(&sin6);
2861 #else
2862 		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2863 #endif  /* SCTP_KAME */
2864 	}
2865 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2866 #endif	/* INET6 */
2867 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2868 #if defined(__FreeBSD__) && !defined(__Userspace__)
2869 #ifdef INET
2870 		if ((ifa->address.sa.sa_family == AF_INET) &&
2871 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2872 		                      &ifa->address.sin.sin_addr) != 0)) {
2873 			continue;
2874 		}
2875 #endif
2876 #ifdef INET6
2877 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2878 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2879 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2880 			continue;
2881 		}
2882 #endif
2883 #endif
2884 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2885 		    (non_asoc_addr_ok == 0))
2886 			continue;
2887 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2888 						  dest_is_priv, fam);
2889 		if (sifa == NULL)
2890 			continue;
2891 #ifdef INET6
2892 		if (fam == AF_INET6 &&
2893 		    dest_is_loop &&
2894 		    sifa->src_is_loop && sifa->src_is_priv) {
2895 			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
2896 			 * to the peer so we will get an abort.
2897 			 */
2898 			continue;
2899 		}
2900 #ifdef SCTP_EMBEDDED_V6_SCOPE
2901 		if (fam == AF_INET6 &&
2902 		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2903 		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2904 			/* link-local <-> link-local must belong to the same scope. */
2905 			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2906 #ifdef SCTP_KAME
2907 			(void)sa6_recoverscope(&lsa6);
2908 #else
2909 			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2910 #endif  /* SCTP_KAME */
2911 			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2912 				continue;
2913 			}
2914 		}
2915 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2916 #endif	/* INET6 */
2917 
2918 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2919 		/* Check if the IPv6 address matches to next-hop.
2920 		   In the mobile case, old IPv6 address may be not deleted
2921 		   from the interface. Then, the interface has previous and
2922 		   new addresses.  We should use one corresponding to the
2923 		   next-hop.  (by micchie)
2924 		 */
2925 #ifdef INET6
2926 		if (stcb && fam == AF_INET6 &&
2927 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2928 			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2929 			    == 0) {
2930 				continue;
2931 			}
2932 		}
2933 #endif
2934 #ifdef INET
2935 		/* Avoid topologically incorrect IPv4 address */
2936 		if (stcb && fam == AF_INET &&
2937 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2938 			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2939 				continue;
2940 			}
2941 		}
2942 #endif
2943 #endif
2944 		if (stcb) {
2945 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2946 				continue;
2947 			}
2948 			if (((non_asoc_addr_ok == 0) &&
2949 			     (sctp_is_addr_restricted(stcb, sifa))) ||
2950 			    (non_asoc_addr_ok &&
2951 			     (sctp_is_addr_restricted(stcb, sifa)) &&
2952 			     (!sctp_is_addr_pending(stcb, sifa)))) {
2953 				/*
2954 				 * It is restricted for some reason..
2955 				 * probably not yet added.
2956 				 */
2957 				continue;
2958 			}
2959 		}
2960 		if (num_eligible_addr >= addr_wanted) {
2961 			return (sifa);
2962 		}
2963 		num_eligible_addr++;
2964 	}
2965 	return (NULL);
2966 }
2967 
2968 
2969 static int
sctp_count_num_preferred_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2970 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2971 #if defined(__FreeBSD__) && !defined(__Userspace__)
2972                                   struct sctp_inpcb *inp,
2973 #else
2974                                   struct sctp_inpcb *inp SCTP_UNUSED,
2975 #endif
2976 				  struct sctp_tcb *stcb,
2977 				  int non_asoc_addr_ok,
2978 				  uint8_t dest_is_loop,
2979 				  uint8_t dest_is_priv,
2980 				  sa_family_t fam)
2981 {
2982 	struct sctp_ifa *ifa, *sifa;
2983 	int num_eligible_addr = 0;
2984 
2985 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2986 #if defined(__FreeBSD__) && !defined(__Userspace__)
2987 #ifdef INET
2988 		if ((ifa->address.sa.sa_family == AF_INET) &&
2989 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2990 		                      &ifa->address.sin.sin_addr) != 0)) {
2991 			continue;
2992 		}
2993 #endif
2994 #ifdef INET6
2995 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2996 		    (stcb != NULL) &&
2997 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2998 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2999 			continue;
3000 		}
3001 #endif
3002 #endif
3003 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3004 		    (non_asoc_addr_ok == 0)) {
3005 			continue;
3006 		}
3007 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3008 						  dest_is_priv, fam);
3009 		if (sifa == NULL) {
3010 			continue;
3011 		}
3012 		if (stcb) {
3013 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3014 				continue;
3015 			}
3016 			if (((non_asoc_addr_ok == 0) &&
3017 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3018 			    (non_asoc_addr_ok &&
3019 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3020 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3021 				/*
3022 				 * It is restricted for some reason..
3023 				 * probably not yet added.
3024 				 */
3025 				continue;
3026 			}
3027 		}
3028 		num_eligible_addr++;
3029 	}
3030 	return (num_eligible_addr);
3031 }
3032 
3033 static struct sctp_ifa *
sctp_choose_boundall(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)3034 sctp_choose_boundall(struct sctp_inpcb *inp,
3035                      struct sctp_tcb *stcb,
3036 		     struct sctp_nets *net,
3037 		     sctp_route_t *ro,
3038 		     uint32_t vrf_id,
3039 		     uint8_t dest_is_priv,
3040 		     uint8_t dest_is_loop,
3041 		     int non_asoc_addr_ok,
3042 		     sa_family_t fam)
3043 {
3044 	int cur_addr_num = 0, num_preferred = 0;
3045 	void *ifn;
3046 	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3047 	struct sctp_ifa *sctp_ifa, *sifa;
3048 	uint32_t ifn_index;
3049 	struct sctp_vrf *vrf;
3050 #ifdef INET
3051 	int retried = 0;
3052 #endif
3053 
3054 	/*-
3055 	 * For boundall we can use any address in the association.
3056 	 * If non_asoc_addr_ok is set we can use any address (at least in
3057 	 * theory). So we look for preferred addresses first. If we find one,
3058 	 * we use it. Otherwise we next try to get an address on the
3059 	 * interface, which we should be able to do (unless non_asoc_addr_ok
3060 	 * is false and we are routed out that way). In these cases where we
3061 	 * can't use the address of the interface we go through all the
3062 	 * ifn's looking for an address we can use and fill that in. Punting
3063 	 * means we send back address 0, which will probably cause problems
3064 	 * actually since then IP will fill in the address of the route ifn,
3065 	 * which means we probably already rejected it.. i.e. here comes an
3066 	 * abort :-<.
3067 	 */
3068 	vrf = sctp_find_vrf(vrf_id);
3069 	if (vrf == NULL)
3070 		return (NULL);
3071 
3072 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3073 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3074 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3075 	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3076 	if (sctp_ifn == NULL) {
3077 		/* ?? We don't have this guy ?? */
3078 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3079 		goto bound_all_plan_b;
3080 	}
3081 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3082 		ifn_index, sctp_ifn->ifn_name);
3083 
3084 	if (net) {
3085 		cur_addr_num = net->indx_of_eligible_next_to_use;
3086 	}
3087 	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3088 							  inp, stcb,
3089 							  non_asoc_addr_ok,
3090 							  dest_is_loop,
3091 							  dest_is_priv, fam);
3092 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3093 		num_preferred, sctp_ifn->ifn_name);
3094 	if (num_preferred == 0) {
3095 		/*
3096 		 * no eligible addresses, we must use some other interface
3097 		 * address if we can find one.
3098 		 */
3099 		goto bound_all_plan_b;
3100 	}
3101 	/*
3102 	 * Ok we have num_eligible_addr set with how many we can use, this
3103 	 * may vary from call to call due to addresses being deprecated
3104 	 * etc..
3105 	 */
3106 	if (cur_addr_num >= num_preferred) {
3107 		cur_addr_num = 0;
3108 	}
3109 	/*
3110 	 * select the nth address from the list (where cur_addr_num is the
3111 	 * nth) and 0 is the first one, 1 is the second one etc...
3112 	 */
3113 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3114 
3115 	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3116                                                                     dest_is_priv, cur_addr_num, fam, ro);
3117 
3118 	/* if sctp_ifa is NULL something changed??, fall to plan b. */
3119 	if (sctp_ifa) {
3120 		atomic_add_int(&sctp_ifa->refcount, 1);
3121 		if (net) {
3122 			/* save off where the next one we will want */
3123 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3124 		}
3125 		return (sctp_ifa);
3126 	}
3127 	/*
3128 	 * plan_b: Look at all interfaces and find a preferred address. If
3129 	 * no preferred fall through to plan_c.
3130 	 */
3131  bound_all_plan_b:
3132 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3133 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3134 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3135 			sctp_ifn->ifn_name);
3136 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3137 			/* wrong base scope */
3138 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3139 			continue;
3140 		}
3141 		if ((sctp_ifn == looked_at) && looked_at) {
3142 			/* already looked at this guy */
3143 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3144 			continue;
3145 		}
3146 		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3147                                                                   dest_is_loop, dest_is_priv, fam);
3148 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3149 			"Found ifn:%p %d preferred source addresses\n",
3150 			ifn, num_preferred);
3151 		if (num_preferred == 0) {
3152 			/* None on this interface. */
3153 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3154 			continue;
3155 		}
3156 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3157 			"num preferred:%d on interface:%p cur_addr_num:%d\n",
3158 			num_preferred, (void *)sctp_ifn, cur_addr_num);
3159 
3160 		/*
3161 		 * Ok we have num_eligible_addr set with how many we can
3162 		 * use, this may vary from call to call due to addresses
3163 		 * being deprecated etc..
3164 		 */
3165 		if (cur_addr_num >= num_preferred) {
3166 			cur_addr_num = 0;
3167 		}
3168 		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3169                                                                         dest_is_priv, cur_addr_num, fam, ro);
3170 		if (sifa == NULL)
3171 			continue;
3172 		if (net) {
3173 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3174 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3175 				cur_addr_num);
3176 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3177 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3178 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3179 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3180 		}
3181 		atomic_add_int(&sifa->refcount, 1);
3182 		return (sifa);
3183 	}
3184 #ifdef INET
3185 again_with_private_addresses_allowed:
3186 #endif
3187 	/* plan_c: do we have an acceptable address on the emit interface */
3188 	sifa = NULL;
3189 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3190 	if (emit_ifn == NULL) {
3191 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3192 		goto plan_d;
3193 	}
3194 	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3195 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3196 #if defined(__FreeBSD__) && !defined(__Userspace__)
3197 #ifdef INET
3198 		if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3199 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3200 		                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3201 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3202 			continue;
3203 		}
3204 #endif
3205 #ifdef INET6
3206 		if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3207 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3208 		                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3209 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3210 			continue;
3211 		}
3212 #endif
3213 #endif
3214 		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3215 		    (non_asoc_addr_ok == 0)) {
3216 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3217 			continue;
3218 		}
3219 		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3220 						   dest_is_priv, fam);
3221 		if (sifa == NULL) {
3222 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3223 			continue;
3224 		}
3225 		if (stcb) {
3226 			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3227 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3228 				sifa = NULL;
3229 				continue;
3230 			}
3231 			if (((non_asoc_addr_ok == 0) &&
3232 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3233 			    (non_asoc_addr_ok &&
3234 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3235 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3236 				/*
3237 				 * It is restricted for some
3238 				 * reason.. probably not yet added.
3239 				 */
3240 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3241 				sifa = NULL;
3242 				continue;
3243 			}
3244 		}
3245 		atomic_add_int(&sifa->refcount, 1);
3246 		goto out;
3247 	}
3248  plan_d:
3249 	/*
3250 	 * plan_d: We are in trouble. No preferred address on the emit
3251 	 * interface. And not even a preferred address on all interfaces.
3252 	 * Go out and see if we can find an acceptable address somewhere
3253 	 * amongst all interfaces.
3254 	 */
3255 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3256 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3257 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3258 			/* wrong base scope */
3259 			continue;
3260 		}
3261 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3262 #if defined(__FreeBSD__) && !defined(__Userspace__)
3263 #ifdef INET
3264 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3265 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3266 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3267 				continue;
3268 			}
3269 #endif
3270 #ifdef INET6
3271 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3272 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3273 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3274 				continue;
3275 			}
3276 #endif
3277 #endif
3278 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3279 			    (non_asoc_addr_ok == 0))
3280 				continue;
3281 			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3282 							   dest_is_loop,
3283 							   dest_is_priv, fam);
3284 			if (sifa == NULL)
3285 				continue;
3286 			if (stcb) {
3287 				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3288 					sifa = NULL;
3289 					continue;
3290 				}
3291 				if (((non_asoc_addr_ok == 0) &&
3292 				     (sctp_is_addr_restricted(stcb, sifa))) ||
3293 				    (non_asoc_addr_ok &&
3294 				     (sctp_is_addr_restricted(stcb, sifa)) &&
3295 				     (!sctp_is_addr_pending(stcb, sifa)))) {
3296 					/*
3297 					 * It is restricted for some
3298 					 * reason.. probably not yet added.
3299 					 */
3300 					sifa = NULL;
3301 					continue;
3302 				}
3303 			}
3304 			goto out;
3305 		}
3306 	}
3307 #ifdef INET
3308 	if (stcb) {
3309 		if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3310 			stcb->asoc.scope.ipv4_local_scope = 1;
3311 			retried = 1;
3312 			goto again_with_private_addresses_allowed;
3313 		} else if (retried == 1) {
3314 			stcb->asoc.scope.ipv4_local_scope = 0;
3315 		}
3316 	}
3317 #endif
3318 out:
3319 #ifdef INET
3320 	if (sifa) {
3321 		if (retried == 1) {
3322 			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3323 				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3324 					/* wrong base scope */
3325 					continue;
3326 				}
3327 				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3328 					struct sctp_ifa *tmp_sifa;
3329 
3330 #if defined(__FreeBSD__) && !defined(__Userspace__)
3331 #ifdef INET
3332 					if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3333 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3334 					                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3335 						continue;
3336 					}
3337 #endif
3338 #ifdef INET6
3339 					if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3340 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3341 					                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3342 						continue;
3343 					}
3344 #endif
3345 #endif
3346 					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3347 					    (non_asoc_addr_ok == 0))
3348 						continue;
3349 					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3350 					                                       dest_is_loop,
3351 					                                       dest_is_priv, fam);
3352 					if (tmp_sifa == NULL) {
3353 						continue;
3354 					}
3355 					if (tmp_sifa == sifa) {
3356 						continue;
3357 					}
3358 					if (stcb) {
3359 						if (sctp_is_address_in_scope(tmp_sifa,
3360 						                             &stcb->asoc.scope, 0) == 0) {
3361 							continue;
3362 						}
3363 						if (((non_asoc_addr_ok == 0) &&
3364 						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3365 						    (non_asoc_addr_ok &&
3366 						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3367 						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3368 							/*
3369 							 * It is restricted for some
3370 							 * reason.. probably not yet added.
3371 							 */
3372 							continue;
3373 						}
3374 					}
3375 					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3376 					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3377 						sctp_add_local_addr_restricted(stcb, tmp_sifa);
3378 					}
3379 				}
3380 			}
3381 		}
3382 		atomic_add_int(&sifa->refcount, 1);
3383 	}
3384 #endif
3385 	return (sifa);
3386 }
3387 
3388 
3389 
3390 /* tcb may be NULL */
3391 struct sctp_ifa *
sctp_source_address_selection(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,struct sctp_nets * net,int non_asoc_addr_ok,uint32_t vrf_id)3392 sctp_source_address_selection(struct sctp_inpcb *inp,
3393 			      struct sctp_tcb *stcb,
3394 			      sctp_route_t *ro,
3395 			      struct sctp_nets *net,
3396 			      int non_asoc_addr_ok, uint32_t vrf_id)
3397 {
3398 	struct sctp_ifa *answer;
3399 	uint8_t dest_is_priv, dest_is_loop;
3400 	sa_family_t fam;
3401 #ifdef INET
3402 	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3403 #endif
3404 #ifdef INET6
3405 	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3406 #endif
3407 
3408 	/**
3409 	 * Rules:
3410 	 * - Find the route if needed, cache if I can.
3411 	 * - Look at interface address in route, Is it in the bound list. If so we
3412 	 *   have the best source.
3413 	 * - If not we must rotate amongst the addresses.
3414 	 *
3415 	 * Cavets and issues
3416 	 *
3417 	 * Do we need to pay attention to scope. We can have a private address
3418 	 * or a global address we are sourcing or sending to. So if we draw
3419 	 * it out
3420 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3421 	 * For V4
3422 	 * ------------------------------------------
3423 	 *      source     *      dest  *  result
3424 	 * -----------------------------------------
3425 	 * <a>  Private    *    Global  *  NAT
3426 	 * -----------------------------------------
3427 	 * <b>  Private    *    Private *  No problem
3428 	 * -----------------------------------------
3429 	 * <c>  Global     *    Private *  Huh, How will this work?
3430 	 * -----------------------------------------
3431 	 * <d>  Global     *    Global  *  No Problem
3432 	 *------------------------------------------
3433 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3434 	 * For V6
3435 	 *------------------------------------------
3436 	 *      source     *      dest  *  result
3437 	 * -----------------------------------------
3438 	 * <a>  Linklocal  *    Global  *
3439 	 * -----------------------------------------
3440 	 * <b>  Linklocal  * Linklocal  *  No problem
3441 	 * -----------------------------------------
3442 	 * <c>  Global     * Linklocal  *  Huh, How will this work?
3443 	 * -----------------------------------------
3444 	 * <d>  Global     *    Global  *  No Problem
3445 	 *------------------------------------------
3446 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3447 	 *
3448 	 * And then we add to that what happens if there are multiple addresses
3449 	 * assigned to an interface. Remember the ifa on a ifn is a linked
3450 	 * list of addresses. So one interface can have more than one IP
3451 	 * address. What happens if we have both a private and a global
3452 	 * address? Do we then use context of destination to sort out which
3453 	 * one is best? And what about NAT's sending P->G may get you a NAT
3454 	 * translation, or should you select the G thats on the interface in
3455 	 * preference.
3456 	 *
3457 	 * Decisions:
3458 	 *
3459 	 * - count the number of addresses on the interface.
3460 	 * - if it is one, no problem except case <c>.
3461 	 *   For <a> we will assume a NAT out there.
3462 	 * - if there are more than one, then we need to worry about scope P
3463 	 *   or G. We should prefer G -> G and P -> P if possible.
3464 	 *   Then as a secondary fall back to mixed types G->P being a last
3465 	 *   ditch one.
3466 	 * - The above all works for bound all, but bound specific we need to
3467 	 *   use the same concept but instead only consider the bound
3468 	 *   addresses. If the bound set is NOT assigned to the interface then
3469 	 *   we must use rotation amongst the bound addresses..
3470 	 */
3471 #if defined(__FreeBSD__) && !defined(__Userspace__)
3472 	if (ro->ro_nh == NULL) {
3473 #else
3474 	if (ro->ro_rt == NULL) {
3475 #endif
3476 		/*
3477 		 * Need a route to cache.
3478 		 */
3479 		SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3480 	}
3481 #if defined(__FreeBSD__) && !defined(__Userspace__)
3482 	if (ro->ro_nh == NULL) {
3483 #else
3484 	if (ro->ro_rt == NULL) {
3485 #endif
3486 		return (NULL);
3487 	}
3488 #if defined(_WIN32)
3489 	/* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3490 	fam = (sa_family_t)ro->ro_dst.sa_family;
3491 #else
3492 	fam = ro->ro_dst.sa_family;
3493 #endif
3494 	dest_is_priv = dest_is_loop = 0;
3495 	/* Setup our scopes for the destination */
3496 	switch (fam) {
3497 #ifdef INET
3498 	case AF_INET:
3499 		/* Scope based on outbound address */
3500 		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3501 			dest_is_loop = 1;
3502 			if (net != NULL) {
3503 				/* mark it as local */
3504 				net->addr_is_local = 1;
3505 			}
3506 		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3507 			dest_is_priv = 1;
3508 		}
3509 		break;
3510 #endif
3511 #ifdef INET6
3512 	case AF_INET6:
3513 		/* Scope based on outbound address */
3514 #if defined(_WIN32)
3515 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3516 #else
3517 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3518 		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3519 #endif
3520 			/*
3521 			 * If the address is a loopback address, which
3522 			 * consists of "::1" OR "fe80::1%lo0", we are loopback
3523 			 * scope. But we don't use dest_is_priv (link local
3524 			 * addresses).
3525 			 */
3526 			dest_is_loop = 1;
3527 			if (net != NULL) {
3528 				/* mark it as local */
3529 				net->addr_is_local = 1;
3530 			}
3531 		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3532 			dest_is_priv = 1;
3533 		}
3534 		break;
3535 #endif
3536 	}
3537 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3538 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3539 	SCTP_IPI_ADDR_RLOCK();
3540 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3541 		/*
3542 		 * Bound all case
3543 		 */
3544 		answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3545 					      dest_is_priv, dest_is_loop,
3546 					      non_asoc_addr_ok, fam);
3547 		SCTP_IPI_ADDR_RUNLOCK();
3548 		return (answer);
3549 	}
3550 	/*
3551 	 * Subset bound case
3552 	 */
3553 	if (stcb) {
3554 		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3555 							vrf_id,	dest_is_priv,
3556 							dest_is_loop,
3557 							non_asoc_addr_ok, fam);
3558 	} else {
3559 		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3560 						       non_asoc_addr_ok,
3561 						       dest_is_priv,
3562 						       dest_is_loop, fam);
3563 	}
3564 	SCTP_IPI_ADDR_RUNLOCK();
3565 	return (answer);
3566 }
3567 
3568 static int
3569 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3570 {
3571 #if defined(_WIN32)
3572 	WSACMSGHDR cmh;
3573 #else
3574 	struct cmsghdr cmh;
3575 #endif
3576 	struct sctp_sndinfo sndinfo;
3577 	struct sctp_prinfo prinfo;
3578 	struct sctp_authinfo authinfo;
3579 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3580 	int found;
3581 
3582 	/*
3583 	 * Independent of how many mbufs, find the c_type inside the control
3584 	 * structure and copy out the data.
3585 	 */
3586 	found = 0;
3587 	tot_len = SCTP_BUF_LEN(control);
3588 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3589 		rem_len = tot_len - off;
3590 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3591 			/* There is not enough room for one more. */
3592 			return (found);
3593 		}
3594 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3595 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3596 			/* We dont't have a complete CMSG header. */
3597 			return (found);
3598 		}
3599 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3600 			/* We don't have the complete CMSG. */
3601 			return (found);
3602 		}
3603 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3604 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3605 		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3606 		    ((c_type == cmh.cmsg_type) ||
3607 		     ((c_type == SCTP_SNDRCV) &&
3608 		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
3609 		       (cmh.cmsg_type == SCTP_PRINFO) ||
3610 		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3611 			if (c_type == cmh.cmsg_type) {
3612 				if (cpsize > INT_MAX) {
3613 					return (found);
3614 				}
3615 				if (cmsg_data_len < (int)cpsize) {
3616 					return (found);
3617 				}
3618 				/* It is exactly what we want. Copy it out. */
3619 				m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3620 				return (1);
3621 			} else {
3622 				struct sctp_sndrcvinfo *sndrcvinfo;
3623 
3624 				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3625 				if (found == 0) {
3626 					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3627 						return (found);
3628 					}
3629 					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3630 				}
3631 				switch (cmh.cmsg_type) {
3632 				case SCTP_SNDINFO:
3633 					if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3634 						return (found);
3635 					}
3636 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3637 					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3638 					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3639 					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3640 					sndrcvinfo->sinfo_context = sndinfo.snd_context;
3641 					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3642 					break;
3643 				case SCTP_PRINFO:
3644 					if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3645 						return (found);
3646 					}
3647 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3648 					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3649 						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3650 					} else {
3651 						sndrcvinfo->sinfo_timetolive = 0;
3652 					}
3653 					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3654 					break;
3655 				case SCTP_AUTHINFO:
3656 					if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3657 						return (found);
3658 					}
3659 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3660 					sndrcvinfo->sinfo_keynumber_valid = 1;
3661 					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3662 					break;
3663 				default:
3664 					return (found);
3665 				}
3666 				found = 1;
3667 			}
3668 		}
3669 	}
3670 	return (found);
3671 }
3672 
3673 static int
3674 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3675 {
3676 #if defined(_WIN32)
3677 	WSACMSGHDR cmh;
3678 #else
3679 	struct cmsghdr cmh;
3680 #endif
3681 	struct sctp_initmsg initmsg;
3682 #ifdef INET
3683 	struct sockaddr_in sin;
3684 #endif
3685 #ifdef INET6
3686 	struct sockaddr_in6 sin6;
3687 #endif
3688 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3689 
3690 	tot_len = SCTP_BUF_LEN(control);
3691 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3692 		rem_len = tot_len - off;
3693 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3694 			/* There is not enough room for one more. */
3695 			*error = EINVAL;
3696 			return (1);
3697 		}
3698 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3699 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3700 			/* We dont't have a complete CMSG header. */
3701 			*error = EINVAL;
3702 			return (1);
3703 		}
3704 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3705 			/* We don't have the complete CMSG. */
3706 			*error = EINVAL;
3707 			return (1);
3708 		}
3709 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3710 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3711 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3712 			switch (cmh.cmsg_type) {
3713 			case SCTP_INIT:
3714 				if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3715 					*error = EINVAL;
3716 					return (1);
3717 				}
3718 				m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3719 				if (initmsg.sinit_max_attempts)
3720 					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3721 				if (initmsg.sinit_num_ostreams)
3722 					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3723 				if (initmsg.sinit_max_instreams)
3724 					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3725 				if (initmsg.sinit_max_init_timeo)
3726 					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3727 				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3728 					struct sctp_stream_out *tmp_str;
3729 					unsigned int i;
3730 #if defined(SCTP_DETAILED_STR_STATS)
3731 					int j;
3732 #endif
3733 
3734 					/* Default is NOT correct */
3735 					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3736 						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3737 					SCTP_TCB_UNLOCK(stcb);
3738 					SCTP_MALLOC(tmp_str,
3739 					            struct sctp_stream_out *,
3740 					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3741 					            SCTP_M_STRMO);
3742 					SCTP_TCB_LOCK(stcb);
3743 					if (tmp_str != NULL) {
3744 						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3745 						stcb->asoc.strmout = tmp_str;
3746 						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3747 					} else {
3748 						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3749 					}
3750 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3751 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3752 						stcb->asoc.strmout[i].chunks_on_queues = 0;
3753 						stcb->asoc.strmout[i].next_mid_ordered = 0;
3754 						stcb->asoc.strmout[i].next_mid_unordered = 0;
3755 #if defined(SCTP_DETAILED_STR_STATS)
3756 						for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3757 							stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3758 							stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3759 						}
3760 #else
3761 						stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3762 						stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3763 #endif
3764 						stcb->asoc.strmout[i].sid = i;
3765 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3766 						stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3767 						stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3768 					}
3769 				}
3770 				break;
3771 #ifdef INET
3772 			case SCTP_DSTADDRV4:
3773 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3774 					*error = EINVAL;
3775 					return (1);
3776 				}
3777 				memset(&sin, 0, sizeof(struct sockaddr_in));
3778 				sin.sin_family = AF_INET;
3779 #ifdef HAVE_SIN_LEN
3780 				sin.sin_len = sizeof(struct sockaddr_in);
3781 #endif
3782 				sin.sin_port = stcb->rport;
3783 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3784 				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3785 				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3786 				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3787 					*error = EINVAL;
3788 					return (1);
3789 				}
3790 				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3791 				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3792 					*error = ENOBUFS;
3793 					return (1);
3794 				}
3795 				break;
3796 #endif
3797 #ifdef INET6
3798 			case SCTP_DSTADDRV6:
3799 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3800 					*error = EINVAL;
3801 					return (1);
3802 				}
3803 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3804 				sin6.sin6_family = AF_INET6;
3805 #ifdef HAVE_SIN6_LEN
3806 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3807 #endif
3808 				sin6.sin6_port = stcb->rport;
3809 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3810 				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3811 				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3812 					*error = EINVAL;
3813 					return (1);
3814 				}
3815 #ifdef INET
3816 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3817 					in6_sin6_2_sin(&sin, &sin6);
3818 					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3819 					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3820 					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3821 						*error = EINVAL;
3822 						return (1);
3823 					}
3824 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3825 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3826 						*error = ENOBUFS;
3827 						return (1);
3828 					}
3829 				} else
3830 #endif
3831 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3832 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3833 						*error = ENOBUFS;
3834 						return (1);
3835 					}
3836 				break;
3837 #endif
3838 			default:
3839 				break;
3840 			}
3841 		}
3842 	}
3843 	return (0);
3844 }
3845 
3846 #if defined(INET) || defined(INET6)
3847 static struct sctp_tcb *
3848 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3849                            uint16_t port,
3850                            struct mbuf *control,
3851                            struct sctp_nets **net_p,
3852                            int *error)
3853 {
3854 #if defined(_WIN32)
3855 	WSACMSGHDR cmh;
3856 #else
3857 	struct cmsghdr cmh;
3858 #endif
3859 	struct sctp_tcb *stcb;
3860 	struct sockaddr *addr;
3861 #ifdef INET
3862 	struct sockaddr_in sin;
3863 #endif
3864 #ifdef INET6
3865 	struct sockaddr_in6 sin6;
3866 #endif
3867 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3868 
3869 	tot_len = SCTP_BUF_LEN(control);
3870 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3871 		rem_len = tot_len - off;
3872 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3873 			/* There is not enough room for one more. */
3874 			*error = EINVAL;
3875 			return (NULL);
3876 		}
3877 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3878 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3879 			/* We dont't have a complete CMSG header. */
3880 			*error = EINVAL;
3881 			return (NULL);
3882 		}
3883 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3884 			/* We don't have the complete CMSG. */
3885 			*error = EINVAL;
3886 			return (NULL);
3887 		}
3888 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3889 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3890 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3891 			switch (cmh.cmsg_type) {
3892 #ifdef INET
3893 			case SCTP_DSTADDRV4:
3894 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3895 					*error = EINVAL;
3896 					return (NULL);
3897 				}
3898 				memset(&sin, 0, sizeof(struct sockaddr_in));
3899 				sin.sin_family = AF_INET;
3900 #ifdef HAVE_SIN_LEN
3901 				sin.sin_len = sizeof(struct sockaddr_in);
3902 #endif
3903 				sin.sin_port = port;
3904 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3905 				addr = (struct sockaddr *)&sin;
3906 				break;
3907 #endif
3908 #ifdef INET6
3909 			case SCTP_DSTADDRV6:
3910 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3911 					*error = EINVAL;
3912 					return (NULL);
3913 				}
3914 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3915 				sin6.sin6_family = AF_INET6;
3916 #ifdef HAVE_SIN6_LEN
3917 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3918 #endif
3919 				sin6.sin6_port = port;
3920 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3921 #ifdef INET
3922 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3923 					in6_sin6_2_sin(&sin, &sin6);
3924 					addr = (struct sockaddr *)&sin;
3925 				} else
3926 #endif
3927 					addr = (struct sockaddr *)&sin6;
3928 				break;
3929 #endif
3930 			default:
3931 				addr = NULL;
3932 				break;
3933 			}
3934 			if (addr) {
3935 				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3936 				if (stcb != NULL) {
3937 					return (stcb);
3938 				}
3939 			}
3940 		}
3941 	}
3942 	return (NULL);
3943 }
3944 #endif
3945 
3946 static struct mbuf *
3947 sctp_add_cookie(struct mbuf *init, int init_offset,
3948     struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3949 {
3950 	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3951 	struct sctp_state_cookie *stc;
3952 	struct sctp_paramhdr *ph;
3953 	uint16_t cookie_sz;
3954 
3955 	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3956 				      sizeof(struct sctp_paramhdr)), 0,
3957 				     M_NOWAIT, 1, MT_DATA);
3958 	if (mret == NULL) {
3959 		return (NULL);
3960 	}
3961 	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3962 	if (copy_init == NULL) {
3963 		sctp_m_freem(mret);
3964 		return (NULL);
3965 	}
3966 #ifdef SCTP_MBUF_LOGGING
3967 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3968 		sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3969 	}
3970 #endif
3971 	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3972 	    M_NOWAIT);
3973 	if (copy_initack == NULL) {
3974 		sctp_m_freem(mret);
3975 		sctp_m_freem(copy_init);
3976 		return (NULL);
3977 	}
3978 #ifdef SCTP_MBUF_LOGGING
3979 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3980 		sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3981 	}
3982 #endif
3983 	/* easy side we just drop it on the end */
3984 	ph = mtod(mret, struct sctp_paramhdr *);
3985 	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3986 	    sizeof(struct sctp_paramhdr);
3987 	stc = (struct sctp_state_cookie *)((caddr_t)ph +
3988 	    sizeof(struct sctp_paramhdr));
3989 	ph->param_type = htons(SCTP_STATE_COOKIE);
3990 	ph->param_length = 0;	/* fill in at the end */
3991 	/* Fill in the stc cookie data */
3992 	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3993 
3994 	/* tack the INIT and then the INIT-ACK onto the chain */
3995 	cookie_sz = 0;
3996 	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3997 		cookie_sz += SCTP_BUF_LEN(m_at);
3998 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3999 			SCTP_BUF_NEXT(m_at) = copy_init;
4000 			break;
4001 		}
4002 	}
4003 	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4004 		cookie_sz += SCTP_BUF_LEN(m_at);
4005 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4006 			SCTP_BUF_NEXT(m_at) = copy_initack;
4007 			break;
4008 		}
4009 	}
4010 	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4011 		cookie_sz += SCTP_BUF_LEN(m_at);
4012 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4013 			break;
4014 		}
4015 	}
4016 	sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4017 	if (sig == NULL) {
4018 		/* no space, so free the entire chain */
4019 		sctp_m_freem(mret);
4020 		return (NULL);
4021 	}
4022 	SCTP_BUF_NEXT(m_at) = sig;
4023 	SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE;
4024 	cookie_sz += SCTP_SIGNATURE_SIZE;
4025 	ph->param_length = htons(cookie_sz);
4026 	*signature = (uint8_t *)mtod(sig, caddr_t);
4027 	memset(*signature, 0, SCTP_SIGNATURE_SIZE);
4028 	return (mret);
4029 }
4030 
4031 static uint8_t
4032 sctp_get_ect(struct sctp_tcb *stcb)
4033 {
4034 	if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4035 		return (SCTP_ECT0_BIT);
4036 	} else {
4037 		return (0);
4038 	}
4039 }
4040 
4041 #if defined(INET) || defined(INET6)
4042 static void
4043 sctp_handle_no_route(struct sctp_tcb *stcb,
4044                      struct sctp_nets *net,
4045                      int so_locked)
4046 {
4047 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4048 
4049 	if (net) {
4050 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4051 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4052 		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4053 			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4054 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4055 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4056 			                        stcb, 0,
4057 			                        (void *)net,
4058 			                        so_locked);
4059 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
4060 				net->dest_state &= ~SCTP_ADDR_PF;
4061 			}
4062 		}
4063 		if (stcb) {
4064 			if (net == stcb->asoc.primary_destination) {
4065 				/* need a new primary */
4066 				struct sctp_nets *alt;
4067 
4068 				alt = sctp_find_alternate_net(stcb, net, 0);
4069 				if (alt != net) {
4070 					if (stcb->asoc.alternate) {
4071 						sctp_free_remote_addr(stcb->asoc.alternate);
4072 					}
4073 					stcb->asoc.alternate = alt;
4074 					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4075 					if (net->ro._s_addr) {
4076 						sctp_free_ifa(net->ro._s_addr);
4077 						net->ro._s_addr = NULL;
4078 					}
4079 					net->src_addr_selected = 0;
4080 				}
4081 			}
4082 		}
4083 	}
4084 }
4085 #endif
4086 
4087 static int
4088 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4089     struct sctp_tcb *stcb,	/* may be NULL */
4090     struct sctp_nets *net,
4091     struct sockaddr *to,
4092     struct mbuf *m,
4093     uint32_t auth_offset,
4094     struct sctp_auth_chunk *auth,
4095     uint16_t auth_keyid,
4096     int nofragment_flag,
4097     int ecn_ok,
4098     int out_of_asoc_ok,
4099     uint16_t src_port,
4100     uint16_t dest_port,
4101     uint32_t v_tag,
4102     uint16_t port,
4103     union sctp_sockstore *over_addr,
4104 #if defined(__FreeBSD__) && !defined(__Userspace__)
4105     uint8_t mflowtype, uint32_t mflowid,
4106 #endif
4107 int so_locked)
4108 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4109 {
4110 	/**
4111 	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4112 	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4113 	 * - fill in the HMAC digest of any AUTH chunk in the packet.
4114 	 * - calculate and fill in the SCTP checksum.
4115 	 * - prepend an IP address header.
4116 	 * - if boundall use INADDR_ANY.
4117 	 * - if boundspecific do source address selection.
4118 	 * - set fragmentation option for ipV4.
4119 	 * - On return from IP output, check/adjust mtu size of output
4120 	 *   interface and smallest_mtu size as well.
4121 	 */
4122 	/* Will need ifdefs around this */
4123 	struct mbuf *newm;
4124 	struct sctphdr *sctphdr;
4125 	int packet_length;
4126 	int ret;
4127 #if defined(INET) || defined(INET6)
4128 	uint32_t vrf_id;
4129 #endif
4130 #if defined(INET) || defined(INET6)
4131 	struct mbuf *o_pak;
4132 	sctp_route_t *ro = NULL;
4133 	struct udphdr *udp = NULL;
4134 #endif
4135 	uint8_t tos_value;
4136 #if defined(__APPLE__) && !defined(__Userspace__)
4137 	struct socket *so = NULL;
4138 #endif
4139 
4140 #if defined(__APPLE__) && !defined(__Userspace__)
4141 	if (so_locked) {
4142 		sctp_lock_assert(SCTP_INP_SO(inp));
4143 		SCTP_TCB_LOCK_ASSERT(stcb);
4144 	} else {
4145 		sctp_unlock_assert(SCTP_INP_SO(inp));
4146 	}
4147 #endif
4148 	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4149 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4150 		sctp_m_freem(m);
4151 		return (EFAULT);
4152 	}
4153 #if defined(INET) || defined(INET6)
4154 	if (stcb) {
4155 		vrf_id = stcb->asoc.vrf_id;
4156 	} else {
4157 		vrf_id = inp->def_vrf_id;
4158 	}
4159 #endif
4160 	/* fill in the HMAC digest for any AUTH chunk in the packet */
4161 	if ((auth != NULL) && (stcb != NULL)) {
4162 		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4163 	}
4164 
4165 	if (net) {
4166 		tos_value = net->dscp;
4167 	} else if (stcb) {
4168 		tos_value = stcb->asoc.default_dscp;
4169 	} else {
4170 		tos_value = inp->sctp_ep.default_dscp;
4171 	}
4172 
4173 	switch (to->sa_family) {
4174 #ifdef INET
4175 	case AF_INET:
4176 	{
4177 		struct ip *ip = NULL;
4178 		sctp_route_t iproute;
4179 		int len;
4180 
4181 		len = SCTP_MIN_V4_OVERHEAD;
4182 		if (port) {
4183 			len += sizeof(struct udphdr);
4184 		}
4185 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4186 		if (newm == NULL) {
4187 			sctp_m_freem(m);
4188 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4189 			return (ENOMEM);
4190 		}
4191 		SCTP_ALIGN_TO_END(newm, len);
4192 		SCTP_BUF_LEN(newm) = len;
4193 		SCTP_BUF_NEXT(newm) = m;
4194 		m = newm;
4195 #if defined(__FreeBSD__) && !defined(__Userspace__)
4196 		if (net != NULL) {
4197 			m->m_pkthdr.flowid = net->flowid;
4198 			M_HASHTYPE_SET(m, net->flowtype);
4199 		} else {
4200 			m->m_pkthdr.flowid = mflowid;
4201 			M_HASHTYPE_SET(m, mflowtype);
4202  		}
4203 #endif
4204 		packet_length = sctp_calculate_len(m);
4205 		ip = mtod(m, struct ip *);
4206 		ip->ip_v = IPVERSION;
4207 		ip->ip_hl = (sizeof(struct ip) >> 2);
4208 		if (tos_value == 0) {
4209 			/*
4210 			 * This means especially, that it is not set at the
4211 			 * SCTP layer. So use the value from the IP layer.
4212 			 */
4213 			tos_value = inp->ip_inp.inp.inp_ip_tos;
4214 		}
4215 		tos_value &= 0xfc;
4216 		if (ecn_ok) {
4217 			tos_value |= sctp_get_ect(stcb);
4218 		}
4219 		if ((nofragment_flag) && (port == 0)) {
4220 #if defined(__FreeBSD__) && !defined(__Userspace__)
4221 			ip->ip_off = htons(IP_DF);
4222 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
4223 			ip->ip_off = IP_DF;
4224 #else
4225 			ip->ip_off = htons(IP_DF);
4226 #endif
4227 		} else {
4228 #if defined(__FreeBSD__) && !defined(__Userspace__)
4229 			ip->ip_off = htons(0);
4230 #else
4231 			ip->ip_off = 0;
4232 #endif
4233 		}
4234 #if defined(__Userspace__)
4235 		ip->ip_id = htons(SCTP_IP_ID(inp)++);
4236 #elif defined(__FreeBSD__)
4237 		/* FreeBSD has a function for ip_id's */
4238 		ip_fillid(ip);
4239 #elif defined(__APPLE__)
4240 #if RANDOM_IP_ID
4241 		ip->ip_id = ip_randomid();
4242 #else
4243 		ip->ip_id = htons(ip_id++);
4244 #endif
4245 #else
4246 		ip->ip_id = SCTP_IP_ID(inp)++;
4247 #endif
4248 
4249 		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4250 #if defined(__FreeBSD__) && !defined(__Userspace__)
4251 		ip->ip_len = htons(packet_length);
4252 #else
4253 		ip->ip_len = packet_length;
4254 #endif
4255 		ip->ip_tos = tos_value;
4256 		if (port) {
4257 			ip->ip_p = IPPROTO_UDP;
4258 		} else {
4259 			ip->ip_p = IPPROTO_SCTP;
4260 		}
4261 		ip->ip_sum = 0;
4262 		if (net == NULL) {
4263 			ro = &iproute;
4264 			memset(&iproute, 0, sizeof(iproute));
4265 #ifdef HAVE_SA_LEN
4266 			memcpy(&ro->ro_dst, to, to->sa_len);
4267 #else
4268 			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4269 #endif
4270 		} else {
4271 			ro = (sctp_route_t *)&net->ro;
4272 		}
4273 		/* Now the address selection part */
4274 		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4275 
4276 		/* call the routine to select the src address */
4277 		if (net && out_of_asoc_ok == 0) {
4278 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4279 				sctp_free_ifa(net->ro._s_addr);
4280 				net->ro._s_addr = NULL;
4281 				net->src_addr_selected = 0;
4282 #if defined(__FreeBSD__) && !defined(__Userspace__)
4283 				RO_NHFREE(ro);
4284 #else
4285 				if (ro->ro_rt) {
4286 					RTFREE(ro->ro_rt);
4287 					ro->ro_rt = NULL;
4288 				}
4289 #endif
4290 			}
4291 			if (net->src_addr_selected == 0) {
4292 				/* Cache the source address */
4293 				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4294 										ro, net, 0,
4295 										vrf_id);
4296 				net->src_addr_selected = 1;
4297 			}
4298 			if (net->ro._s_addr == NULL) {
4299 				/* No route to host */
4300 				net->src_addr_selected = 0;
4301 				sctp_handle_no_route(stcb, net, so_locked);
4302 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4303 				sctp_m_freem(m);
4304 				return (EHOSTUNREACH);
4305 			}
4306 			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4307 		} else {
4308 			if (over_addr == NULL) {
4309 				struct sctp_ifa *_lsrc;
4310 
4311 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4312 				                                      net,
4313 				                                      out_of_asoc_ok,
4314 				                                      vrf_id);
4315 				if (_lsrc == NULL) {
4316 					sctp_handle_no_route(stcb, net, so_locked);
4317 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4318 					sctp_m_freem(m);
4319 					return (EHOSTUNREACH);
4320 				}
4321 				ip->ip_src = _lsrc->address.sin.sin_addr;
4322 				sctp_free_ifa(_lsrc);
4323 			} else {
4324 				ip->ip_src = over_addr->sin.sin_addr;
4325 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4326 			}
4327 		}
4328 		if (port) {
4329 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4330 				sctp_handle_no_route(stcb, net, so_locked);
4331 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4332 				sctp_m_freem(m);
4333 				return (EHOSTUNREACH);
4334 			}
4335 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4336 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4337 			udp->uh_dport = port;
4338 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4339 #if !defined(__Userspace__)
4340 #if defined(__FreeBSD__)
4341 			if (V_udp_cksum) {
4342 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4343 			} else {
4344 				udp->uh_sum = 0;
4345 			}
4346 #else
4347 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4348 #endif
4349 #else
4350 			udp->uh_sum = 0;
4351 #endif
4352 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4353 		} else {
4354 			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4355 		}
4356 
4357 		sctphdr->src_port = src_port;
4358 		sctphdr->dest_port = dest_port;
4359 		sctphdr->v_tag = v_tag;
4360 		sctphdr->checksum = 0;
4361 
4362 		/*
4363 		 * If source address selection fails and we find no route
4364 		 * then the ip_output should fail as well with a
4365 		 * NO_ROUTE_TO_HOST type error. We probably should catch
4366 		 * that somewhere and abort the association right away
4367 		 * (assuming this is an INIT being sent).
4368 		 */
4369 #if defined(__FreeBSD__) && !defined(__Userspace__)
4370 		if (ro->ro_nh == NULL) {
4371 #else
4372 		if (ro->ro_rt == NULL) {
4373 #endif
4374 			/*
4375 			 * src addr selection failed to find a route (or
4376 			 * valid source addr), so we can't get there from
4377 			 * here (yet)!
4378 			 */
4379 			sctp_handle_no_route(stcb, net, so_locked);
4380 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4381 			sctp_m_freem(m);
4382 			return (EHOSTUNREACH);
4383 		}
4384 		if (ro != &iproute) {
4385 			memcpy(&iproute, ro, sizeof(*ro));
4386 		}
4387 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4388 			(uint32_t) (ntohl(ip->ip_src.s_addr)));
4389 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4390 			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
4391 #if defined(__FreeBSD__) && !defined(__Userspace__)
4392 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4393 			(void *)ro->ro_nh);
4394 #else
4395 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4396 			(void *)ro->ro_rt);
4397 #endif
4398 
4399 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4400 			/* failed to prepend data, give up */
4401 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4402 			sctp_m_freem(m);
4403 			return (ENOMEM);
4404 		}
4405 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4406 		if (port) {
4407 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4408 			SCTP_STAT_INCR(sctps_sendswcrc);
4409 #if !defined(__Userspace__)
4410 #if defined(__FreeBSD__)
4411 			if (V_udp_cksum) {
4412 				SCTP_ENABLE_UDP_CSUM(o_pak);
4413 			}
4414 #else
4415 			SCTP_ENABLE_UDP_CSUM(o_pak);
4416 #endif
4417 #endif
4418 		} else {
4419 #if defined(__FreeBSD__) && !defined(__Userspace__)
4420 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4421 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4422 			SCTP_STAT_INCR(sctps_sendhwcrc);
4423 #else
4424 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4425 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4426 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4427 				SCTP_STAT_INCR(sctps_sendswcrc);
4428 			} else {
4429 				SCTP_STAT_INCR(sctps_sendhwcrc);
4430 			}
4431 #endif
4432 		}
4433 #ifdef SCTP_PACKET_LOGGING
4434 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4435 			sctp_packet_log(o_pak);
4436 #endif
4437 		/* send it out.  table id is taken from stcb */
4438 #if defined(__APPLE__) && !defined(__Userspace__)
4439 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4440 			so = SCTP_INP_SO(inp);
4441 			SCTP_SOCKET_UNLOCK(so, 0);
4442 		}
4443 #endif
4444 #if defined(__FreeBSD__) && !defined(__Userspace__)
4445 		SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4446 #endif
4447 		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4448 #if defined(__APPLE__) && !defined(__Userspace__)
4449 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4450 			atomic_add_int(&stcb->asoc.refcnt, 1);
4451 			SCTP_TCB_UNLOCK(stcb);
4452 			SCTP_SOCKET_LOCK(so, 0);
4453 			SCTP_TCB_LOCK(stcb);
4454 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4455 		}
4456 #endif
4457 #if defined(__FreeBSD__) && !defined(__Userspace__)
4458 		if (port) {
4459 			UDPSTAT_INC(udps_opackets);
4460 		}
4461 #endif
4462 		SCTP_STAT_INCR(sctps_sendpackets);
4463 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4464 		if (ret)
4465 			SCTP_STAT_INCR(sctps_senderrors);
4466 
4467 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4468 		if (net == NULL) {
4469 			/* free tempy routes */
4470 #if defined(__FreeBSD__) && !defined(__Userspace__)
4471 			RO_NHFREE(ro);
4472 #else
4473 			if (ro->ro_rt) {
4474 				RTFREE(ro->ro_rt);
4475 				ro->ro_rt = NULL;
4476 			}
4477 #endif
4478 		} else {
4479 #if defined(__FreeBSD__) && !defined(__Userspace__)
4480 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4481 #else
4482 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4483 #endif
4484 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4485 				uint32_t mtu;
4486 
4487 #if defined(__FreeBSD__) && !defined(__Userspace__)
4488 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4489 #else
4490 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4491 #endif
4492 				if (mtu > 0) {
4493 					if (net->port) {
4494 						mtu -= sizeof(struct udphdr);
4495 					}
4496 					if (mtu < net->mtu) {
4497 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4498 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4499 						}
4500 						net->mtu = mtu;
4501 					}
4502 				}
4503 #if defined(__FreeBSD__) && !defined(__Userspace__)
4504 			} else if (ro->ro_nh == NULL) {
4505 #else
4506 			} else if (ro->ro_rt == NULL) {
4507 #endif
4508 				/* route was freed */
4509 				if (net->ro._s_addr &&
4510 				    net->src_addr_selected) {
4511 					sctp_free_ifa(net->ro._s_addr);
4512 					net->ro._s_addr = NULL;
4513 				}
4514 				net->src_addr_selected = 0;
4515 			}
4516 		}
4517 		return (ret);
4518 	}
4519 #endif
4520 #ifdef INET6
4521 	case AF_INET6:
4522 	{
4523 		uint32_t flowlabel, flowinfo;
4524 		struct ip6_hdr *ip6h;
4525 		struct route_in6 ip6route;
4526 #if !defined(__Userspace__)
4527 		struct ifnet *ifp;
4528 #endif
4529 		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4530 		int prev_scope = 0;
4531 #ifdef SCTP_EMBEDDED_V6_SCOPE
4532 		struct sockaddr_in6 lsa6_storage;
4533 		int error;
4534 #endif
4535 		u_short prev_port = 0;
4536 		int len;
4537 
4538 		if (net) {
4539 			flowlabel = net->flowlabel;
4540 		} else if (stcb) {
4541 			flowlabel = stcb->asoc.default_flowlabel;
4542 		} else {
4543 			flowlabel = inp->sctp_ep.default_flowlabel;
4544 		}
4545 		if (flowlabel == 0) {
4546 			/*
4547 			 * This means especially, that it is not set at the
4548 			 * SCTP layer. So use the value from the IP layer.
4549 			 */
4550 #if defined(__APPLE__)  && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4551 			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4552 #else
4553 			flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4554 #endif
4555 		}
4556 		flowlabel &= 0x000fffff;
4557 		len = SCTP_MIN_OVERHEAD;
4558 		if (port) {
4559 			len += sizeof(struct udphdr);
4560 		}
4561 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4562 		if (newm == NULL) {
4563 			sctp_m_freem(m);
4564 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4565 			return (ENOMEM);
4566 		}
4567 		SCTP_ALIGN_TO_END(newm, len);
4568 		SCTP_BUF_LEN(newm) = len;
4569 		SCTP_BUF_NEXT(newm) = m;
4570 		m = newm;
4571 #if defined(__FreeBSD__) && !defined(__Userspace__)
4572 		if (net != NULL) {
4573 			m->m_pkthdr.flowid = net->flowid;
4574 			M_HASHTYPE_SET(m, net->flowtype);
4575 		} else {
4576 			m->m_pkthdr.flowid = mflowid;
4577 			M_HASHTYPE_SET(m, mflowtype);
4578  		}
4579 #endif
4580 		packet_length = sctp_calculate_len(m);
4581 
4582 		ip6h = mtod(m, struct ip6_hdr *);
4583 		/* protect *sin6 from overwrite */
4584 		sin6 = (struct sockaddr_in6 *)to;
4585 		tmp = *sin6;
4586 		sin6 = &tmp;
4587 
4588 #ifdef SCTP_EMBEDDED_V6_SCOPE
4589 		/* KAME hack: embed scopeid */
4590 #if defined(__APPLE__) && !defined(__Userspace__)
4591 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4592 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4593 #else
4594 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4595 #endif
4596 #elif defined(SCTP_KAME)
4597 		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4598 #else
4599 		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4600 #endif
4601 		{
4602 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4603 			sctp_m_freem(m);
4604 			return (EINVAL);
4605 		}
4606 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4607 		if (net == NULL) {
4608 			memset(&ip6route, 0, sizeof(ip6route));
4609 			ro = (sctp_route_t *)&ip6route;
4610 #ifdef HAVE_SIN6_LEN
4611 			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4612 #else
4613 			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4614 #endif
4615 		} else {
4616 			ro = (sctp_route_t *)&net->ro;
4617 		}
4618 		/*
4619 		 * We assume here that inp_flow is in host byte order within
4620 		 * the TCB!
4621 		 */
4622 		if (tos_value == 0) {
4623 			/*
4624 			 * This means especially, that it is not set at the
4625 			 * SCTP layer. So use the value from the IP layer.
4626 			 */
4627 #if defined(__APPLE__)  && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4628 			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4629 #else
4630 			tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4631 #endif
4632 		}
4633 		tos_value &= 0xfc;
4634 		if (ecn_ok) {
4635 			tos_value |= sctp_get_ect(stcb);
4636 		}
4637 		flowinfo = 0x06;
4638 		flowinfo <<= 8;
4639 		flowinfo |= tos_value;
4640 		flowinfo <<= 20;
4641 		flowinfo |= flowlabel;
4642 		ip6h->ip6_flow = htonl(flowinfo);
4643 		if (port) {
4644 			ip6h->ip6_nxt = IPPROTO_UDP;
4645 		} else {
4646 			ip6h->ip6_nxt = IPPROTO_SCTP;
4647 		}
4648 		ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4649 		ip6h->ip6_dst = sin6->sin6_addr;
4650 
4651 		/*
4652 		 * Add SRC address selection here: we can only reuse to a
4653 		 * limited degree the kame src-addr-sel, since we can try
4654 		 * their selection but it may not be bound.
4655 		 */
4656 		memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4657 		lsa6_tmp.sin6_family = AF_INET6;
4658 #ifdef HAVE_SIN6_LEN
4659 		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4660 #endif
4661 		lsa6 = &lsa6_tmp;
4662 		if (net && out_of_asoc_ok == 0) {
4663 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4664 				sctp_free_ifa(net->ro._s_addr);
4665 				net->ro._s_addr = NULL;
4666 				net->src_addr_selected = 0;
4667 #if defined(__FreeBSD__) && !defined(__Userspace__)
4668 				RO_NHFREE(ro);
4669 #else
4670 				if (ro->ro_rt) {
4671 					RTFREE(ro->ro_rt);
4672 					ro->ro_rt = NULL;
4673 				}
4674 #endif
4675 			}
4676 			if (net->src_addr_selected == 0) {
4677 #ifdef SCTP_EMBEDDED_V6_SCOPE
4678 				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4679 				/* KAME hack: embed scopeid */
4680 #if defined(__APPLE__) && !defined(__Userspace__)
4681 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4682 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4683 #else
4684 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4685 #endif
4686 #elif defined(SCTP_KAME)
4687 				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4688 #else
4689 				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4690 #endif
4691 				{
4692 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4693 					sctp_m_freem(m);
4694 					return (EINVAL);
4695 				}
4696 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4697 				/* Cache the source address */
4698 				net->ro._s_addr = sctp_source_address_selection(inp,
4699 										stcb,
4700 										ro,
4701 										net,
4702 										0,
4703 										vrf_id);
4704 #ifdef SCTP_EMBEDDED_V6_SCOPE
4705 #ifdef SCTP_KAME
4706 				(void)sa6_recoverscope(sin6);
4707 #else
4708 				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4709 #endif	/* SCTP_KAME */
4710 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4711 				net->src_addr_selected = 1;
4712 			}
4713 			if (net->ro._s_addr == NULL) {
4714 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4715 				net->src_addr_selected = 0;
4716 				sctp_handle_no_route(stcb, net, so_locked);
4717 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4718 				sctp_m_freem(m);
4719 				return (EHOSTUNREACH);
4720 			}
4721 			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4722 		} else {
4723 #ifdef SCTP_EMBEDDED_V6_SCOPE
4724 			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4725 			/* KAME hack: embed scopeid */
4726 #if defined(__APPLE__) && !defined(__Userspace__)
4727 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4728 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4729 #else
4730 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4731 #endif
4732 #elif defined(SCTP_KAME)
4733 			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4734 #else
4735 			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4736 #endif
4737 			  {
4738 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4739 				sctp_m_freem(m);
4740 				return (EINVAL);
4741 			  }
4742 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4743 			if (over_addr == NULL) {
4744 				struct sctp_ifa *_lsrc;
4745 
4746 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4747 				                                      net,
4748 				                                      out_of_asoc_ok,
4749 				                                      vrf_id);
4750 				if (_lsrc == NULL) {
4751 					sctp_handle_no_route(stcb, net, so_locked);
4752 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4753 					sctp_m_freem(m);
4754 					return (EHOSTUNREACH);
4755 				}
4756 				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4757 				sctp_free_ifa(_lsrc);
4758 			} else {
4759 				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4760 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4761 			}
4762 #ifdef SCTP_EMBEDDED_V6_SCOPE
4763 #ifdef SCTP_KAME
4764 			(void)sa6_recoverscope(sin6);
4765 #else
4766 			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4767 #endif	/* SCTP_KAME */
4768 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4769 		}
4770 		lsa6->sin6_port = inp->sctp_lport;
4771 
4772 #if defined(__FreeBSD__) && !defined(__Userspace__)
4773 		if (ro->ro_nh == NULL) {
4774 #else
4775 		if (ro->ro_rt == NULL) {
4776 #endif
4777 			/*
4778 			 * src addr selection failed to find a route (or
4779 			 * valid source addr), so we can't get there from
4780 			 * here!
4781 			 */
4782 			sctp_handle_no_route(stcb, net, so_locked);
4783 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4784 			sctp_m_freem(m);
4785 			return (EHOSTUNREACH);
4786 		}
4787 #ifndef SCOPEDROUTING
4788 #ifdef SCTP_EMBEDDED_V6_SCOPE
4789 		/*
4790 		 * XXX: sa6 may not have a valid sin6_scope_id in the
4791 		 * non-SCOPEDROUTING case.
4792 		 */
4793 		memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4794 		lsa6_storage.sin6_family = AF_INET6;
4795 #ifdef HAVE_SIN6_LEN
4796 		lsa6_storage.sin6_len = sizeof(lsa6_storage);
4797 #endif
4798 #ifdef SCTP_KAME
4799 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4800 		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4801 #else
4802 		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4803 		    NULL)) != 0) {
4804 #endif				/* SCTP_KAME */
4805 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4806 			sctp_m_freem(m);
4807 			return (error);
4808 		}
4809 		/* XXX */
4810 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4811 		lsa6_storage.sin6_port = inp->sctp_lport;
4812 		lsa6 = &lsa6_storage;
4813 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4814 #endif /* SCOPEDROUTING */
4815 		ip6h->ip6_src = lsa6->sin6_addr;
4816 
4817 		if (port) {
4818 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4819 				sctp_handle_no_route(stcb, net, so_locked);
4820 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4821 				sctp_m_freem(m);
4822 				return (EHOSTUNREACH);
4823 			}
4824 			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4825 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4826 			udp->uh_dport = port;
4827 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4828 			udp->uh_sum = 0;
4829 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4830 		} else {
4831 			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4832 		}
4833 
4834 		sctphdr->src_port = src_port;
4835 		sctphdr->dest_port = dest_port;
4836 		sctphdr->v_tag = v_tag;
4837 		sctphdr->checksum = 0;
4838 
4839 		/*
4840 		 * We set the hop limit now since there is a good chance
4841 		 * that our ro pointer is now filled
4842 		 */
4843 		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4844 #if !defined(__Userspace__)
4845 		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4846 #endif
4847 
4848 #ifdef SCTP_DEBUG
4849 		/* Copy to be sure something bad is not happening */
4850 		sin6->sin6_addr = ip6h->ip6_dst;
4851 		lsa6->sin6_addr = ip6h->ip6_src;
4852 #endif
4853 
4854 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4855 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4856 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4857 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4858 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4859 		if (net) {
4860 			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4861 			/* preserve the port and scope for link local send */
4862 			prev_scope = sin6->sin6_scope_id;
4863 			prev_port = sin6->sin6_port;
4864 		}
4865 
4866 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4867 			/* failed to prepend data, give up */
4868 			sctp_m_freem(m);
4869 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4870 			return (ENOMEM);
4871 		}
4872 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4873 		if (port) {
4874 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4875 			SCTP_STAT_INCR(sctps_sendswcrc);
4876 #if !defined(__Userspace__)
4877 #if defined(_WIN32)
4878 			udp->uh_sum = 0;
4879 #else
4880 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4881 				udp->uh_sum = 0xffff;
4882 			}
4883 #endif
4884 #endif
4885 		} else {
4886 #if defined(__FreeBSD__) && !defined(__Userspace__)
4887 			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4888 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4889 			SCTP_STAT_INCR(sctps_sendhwcrc);
4890 #else
4891 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4892 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4893 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4894 				SCTP_STAT_INCR(sctps_sendswcrc);
4895 			} else {
4896 				SCTP_STAT_INCR(sctps_sendhwcrc);
4897 			}
4898 #endif
4899 		}
4900 		/* send it out. table id is taken from stcb */
4901 #if defined(__APPLE__) && !defined(__Userspace__)
4902 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4903 			so = SCTP_INP_SO(inp);
4904 			SCTP_SOCKET_UNLOCK(so, 0);
4905 		}
4906 #endif
4907 #ifdef SCTP_PACKET_LOGGING
4908 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4909 			sctp_packet_log(o_pak);
4910 #endif
4911 #if !defined(__Userspace__)
4912 #if defined(__FreeBSD__)
4913 		SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4914 #endif
4915 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4916 #else
4917 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4918 #endif
4919 #if defined(__APPLE__) && !defined(__Userspace__)
4920 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4921 			atomic_add_int(&stcb->asoc.refcnt, 1);
4922 			SCTP_TCB_UNLOCK(stcb);
4923 			SCTP_SOCKET_LOCK(so, 0);
4924 			SCTP_TCB_LOCK(stcb);
4925 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4926 		}
4927 #endif
4928 		if (net) {
4929 			/* for link local this must be done */
4930 			sin6->sin6_scope_id = prev_scope;
4931 			sin6->sin6_port = prev_port;
4932 		}
4933 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4934 #if defined(__FreeBSD__) && !defined(__Userspace__)
4935 		if (port) {
4936 			UDPSTAT_INC(udps_opackets);
4937 		}
4938 #endif
4939 		SCTP_STAT_INCR(sctps_sendpackets);
4940 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4941 		if (ret) {
4942 			SCTP_STAT_INCR(sctps_senderrors);
4943 		}
4944 		if (net == NULL) {
4945 			/* Now if we had a temp route free it */
4946 #if defined(__FreeBSD__) && !defined(__Userspace__)
4947 			RO_NHFREE(ro);
4948 #else
4949 			if (ro->ro_rt) {
4950 				RTFREE(ro->ro_rt);
4951 				ro->ro_rt = NULL;
4952 			}
4953 #endif
4954 		} else {
4955 			/* PMTU check versus smallest asoc MTU goes here */
4956 #if defined(__FreeBSD__) && !defined(__Userspace__)
4957 			if (ro->ro_nh == NULL) {
4958 #else
4959 			if (ro->ro_rt == NULL) {
4960 #endif
4961 				/* Route was freed */
4962 				if (net->ro._s_addr &&
4963 				    net->src_addr_selected) {
4964 					sctp_free_ifa(net->ro._s_addr);
4965 					net->ro._s_addr = NULL;
4966 				}
4967 				net->src_addr_selected = 0;
4968 			}
4969 #if defined(__FreeBSD__) && !defined(__Userspace__)
4970 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4971 #else
4972 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4973 #endif
4974 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4975 				uint32_t mtu;
4976 
4977 #if defined(__FreeBSD__) && !defined(__Userspace__)
4978 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4979 #else
4980 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4981 #endif
4982 				if (mtu > 0) {
4983 					if (net->port) {
4984 						mtu -= sizeof(struct udphdr);
4985 					}
4986 					if (mtu < net->mtu) {
4987 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4988 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4989 						}
4990 						net->mtu = mtu;
4991 					}
4992 				}
4993 			}
4994 #if !defined(__Userspace__)
4995 			else if (ifp) {
4996 #if defined(_WIN32)
4997 #define ND_IFINFO(ifp)	(ifp)
4998 #define linkmtu		if_mtu
4999 #endif
5000 				if (ND_IFINFO(ifp)->linkmtu &&
5001 				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
5002 					sctp_mtu_size_reset(inp,
5003 					    &stcb->asoc,
5004 					    ND_IFINFO(ifp)->linkmtu);
5005 				}
5006 			}
5007 #endif
5008 		}
5009 		return (ret);
5010 	}
5011 #endif
5012 #if defined(__Userspace__)
5013 	case AF_CONN:
5014 	{
5015 		char *buffer;
5016 		struct sockaddr_conn *sconn;
5017 		int len;
5018 
5019 		sconn = (struct sockaddr_conn *)to;
5020 		len = sizeof(struct sctphdr);
5021 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
5022 		if (newm == NULL) {
5023 			sctp_m_freem(m);
5024 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5025 			return (ENOMEM);
5026 		}
5027 		SCTP_ALIGN_TO_END(newm, len);
5028 		SCTP_BUF_LEN(newm) = len;
5029 		SCTP_BUF_NEXT(newm) = m;
5030 		m = newm;
5031 		packet_length = sctp_calculate_len(m);
5032 		sctphdr = mtod(m, struct sctphdr *);
5033 		sctphdr->src_port = src_port;
5034 		sctphdr->dest_port = dest_port;
5035 		sctphdr->v_tag = v_tag;
5036 		sctphdr->checksum = 0;
5037 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
5038 			sctphdr->checksum = sctp_calculate_cksum(m, 0);
5039 			SCTP_STAT_INCR(sctps_sendswcrc);
5040 		} else {
5041 			SCTP_STAT_INCR(sctps_sendhwcrc);
5042 		}
5043 		if (tos_value == 0) {
5044 			tos_value = inp->ip_inp.inp.inp_ip_tos;
5045 		}
5046 		tos_value &= 0xfc;
5047 		if (ecn_ok) {
5048 			tos_value |= sctp_get_ect(stcb);
5049 		}
5050 		/* Don't alloc/free for each packet */
5051 		if ((buffer = malloc(packet_length)) != NULL) {
5052 			m_copydata(m, 0, packet_length, buffer);
5053 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5054 			free(buffer);
5055 		} else {
5056 			ret = ENOMEM;
5057 		}
5058 		sctp_m_freem(m);
5059 		return (ret);
5060 	}
5061 #endif
5062 	default:
5063 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5064 		        ((struct sockaddr *)to)->sa_family);
5065 		sctp_m_freem(m);
5066 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5067 		return (EFAULT);
5068 	}
5069 }
5070 
5071 
5072 void
5073 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
5074 {
5075 	struct mbuf *m, *m_last;
5076 	struct sctp_nets *net;
5077 	struct sctp_init_chunk *init;
5078 	struct sctp_supported_addr_param *sup_addr;
5079 	struct sctp_adaptation_layer_indication *ali;
5080 	struct sctp_supported_chunk_types_param *pr_supported;
5081 	struct sctp_paramhdr *ph;
5082 	int cnt_inits_to = 0;
5083 	int error;
5084 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5085 
5086 #if defined(__APPLE__) && !defined(__Userspace__)
5087 	if (so_locked) {
5088 		sctp_lock_assert(SCTP_INP_SO(inp));
5089 	} else {
5090 		sctp_unlock_assert(SCTP_INP_SO(inp));
5091 	}
5092 #endif
5093 	/* INIT's always go to the primary (and usually ONLY address) */
5094 	net = stcb->asoc.primary_destination;
5095 	if (net == NULL) {
5096 		net = TAILQ_FIRST(&stcb->asoc.nets);
5097 		if (net == NULL) {
5098 			/* TSNH */
5099 			return;
5100 		}
5101 		/* we confirm any address we send an INIT to */
5102 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5103 		(void)sctp_set_primary_addr(stcb, NULL, net);
5104 	} else {
5105 		/* we confirm any address we send an INIT to */
5106 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5107 	}
5108 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5109 #ifdef INET6
5110 	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5111 		/*
5112 		 * special hook, if we are sending to link local it will not
5113 		 * show up in our private address count.
5114 		 */
5115 		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5116 			cnt_inits_to = 1;
5117 	}
5118 #endif
5119 	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5120 		/* This case should not happen */
5121 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5122 		return;
5123 	}
5124 	/* start the INIT timer */
5125 	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5126 
5127 	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5128 	if (m == NULL) {
5129 		/* No memory, INIT timer will re-attempt. */
5130 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5131 		return;
5132 	}
5133 	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5134 	padding_len = 0;
5135 	/* Now lets put the chunk header in place */
5136 	init = mtod(m, struct sctp_init_chunk *);
5137 	/* now the chunk header */
5138 	init->ch.chunk_type = SCTP_INITIATION;
5139 	init->ch.chunk_flags = 0;
5140 	/* fill in later from mbuf we build */
5141 	init->ch.chunk_length = 0;
5142 	/* place in my tag */
5143 	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5144 	/* set up some of the credits. */
5145 	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5146 	                              SCTP_MINIMAL_RWND));
5147 	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5148 	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5149 	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5150 
5151 	/* Adaptation layer indication parameter */
5152 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5153 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5154 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5155 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5156 		ali->ph.param_length = htons(parameter_len);
5157 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5158 		chunk_len += parameter_len;
5159 	}
5160 
5161 	/* ECN parameter */
5162 	if (stcb->asoc.ecn_supported == 1) {
5163 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5164 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5165 		ph->param_type = htons(SCTP_ECN_CAPABLE);
5166 		ph->param_length = htons(parameter_len);
5167 		chunk_len += parameter_len;
5168 	}
5169 
5170 	/* PR-SCTP supported parameter */
5171 	if (stcb->asoc.prsctp_supported == 1) {
5172 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5173 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5174 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5175 		ph->param_length = htons(parameter_len);
5176 		chunk_len += parameter_len;
5177 	}
5178 
5179 	/* Add NAT friendly parameter. */
5180 	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5181 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5182 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5183 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5184 		ph->param_length = htons(parameter_len);
5185 		chunk_len += parameter_len;
5186 	}
5187 
5188 	/* And now tell the peer which extensions we support */
5189 	num_ext = 0;
5190 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5191 	if (stcb->asoc.prsctp_supported == 1) {
5192 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5193 		if (stcb->asoc.idata_supported) {
5194 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5195 		}
5196 	}
5197 	if (stcb->asoc.auth_supported == 1) {
5198 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5199 	}
5200 	if (stcb->asoc.asconf_supported == 1) {
5201 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5202 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5203 	}
5204 	if (stcb->asoc.reconfig_supported == 1) {
5205 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5206 	}
5207 	if (stcb->asoc.idata_supported) {
5208 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5209 	}
5210 	if (stcb->asoc.nrsack_supported == 1) {
5211 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5212 	}
5213 	if (stcb->asoc.pktdrop_supported == 1) {
5214 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5215 	}
5216 	if (num_ext > 0) {
5217 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5218 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5219 		pr_supported->ph.param_length = htons(parameter_len);
5220 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5221 		chunk_len += parameter_len;
5222 	}
5223 	/* add authentication parameters */
5224 	if (stcb->asoc.auth_supported) {
5225 		/* attach RANDOM parameter, if available */
5226 		if (stcb->asoc.authinfo.random != NULL) {
5227 			struct sctp_auth_random *randp;
5228 
5229 			if (padding_len > 0) {
5230 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5231 				chunk_len += padding_len;
5232 				padding_len = 0;
5233 			}
5234 			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5235 			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5236 			/* random key already contains the header */
5237 			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5238 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5239 			chunk_len += parameter_len;
5240 		}
5241 		/* add HMAC_ALGO parameter */
5242 		if (stcb->asoc.local_hmacs != NULL) {
5243 			struct sctp_auth_hmac_algo *hmacs;
5244 
5245 			if (padding_len > 0) {
5246 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5247 				chunk_len += padding_len;
5248 				padding_len = 0;
5249 			}
5250 			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5251 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5252 			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5253 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5254 			hmacs->ph.param_length = htons(parameter_len);
5255 			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5256 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5257 			chunk_len += parameter_len;
5258 		}
5259 		/* add CHUNKS parameter */
5260 		if (stcb->asoc.local_auth_chunks != NULL) {
5261 			struct sctp_auth_chunk_list *chunks;
5262 
5263 			if (padding_len > 0) {
5264 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5265 				chunk_len += padding_len;
5266 				padding_len = 0;
5267 			}
5268 			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5269 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5270 			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5271 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5272 			chunks->ph.param_length = htons(parameter_len);
5273 			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5274 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5275 			chunk_len += parameter_len;
5276 		}
5277 	}
5278 
5279 	/* now any cookie time extensions */
5280 	if (stcb->asoc.cookie_preserve_req) {
5281 		struct sctp_cookie_perserve_param *cookie_preserve;
5282 
5283 		if (padding_len > 0) {
5284 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5285 			chunk_len += padding_len;
5286 			padding_len = 0;
5287 		}
5288 		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5289 		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5290 		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5291 		cookie_preserve->ph.param_length = htons(parameter_len);
5292 		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5293 		stcb->asoc.cookie_preserve_req = 0;
5294 		chunk_len += parameter_len;
5295 	}
5296 
5297 	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5298 		uint8_t i;
5299 
5300 		if (padding_len > 0) {
5301 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5302 			chunk_len += padding_len;
5303 			padding_len = 0;
5304 		}
5305 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5306 		if (stcb->asoc.scope.ipv4_addr_legal) {
5307 			parameter_len += (uint16_t)sizeof(uint16_t);
5308 		}
5309 		if (stcb->asoc.scope.ipv6_addr_legal) {
5310 			parameter_len += (uint16_t)sizeof(uint16_t);
5311 		}
5312 		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5313 		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5314 		sup_addr->ph.param_length = htons(parameter_len);
5315 		i = 0;
5316 		if (stcb->asoc.scope.ipv4_addr_legal) {
5317 			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5318 		}
5319 		if (stcb->asoc.scope.ipv6_addr_legal) {
5320 			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5321 		}
5322 		padding_len = 4 - 2 * i;
5323 		chunk_len += parameter_len;
5324 	}
5325 
5326 	SCTP_BUF_LEN(m) = chunk_len;
5327 	/* now the addresses */
5328 	/* To optimize this we could put the scoping stuff
5329 	 * into a structure and remove the individual uint8's from
5330 	 * the assoc structure. Then we could just sifa in the
5331 	 * address within the stcb. But for now this is a quick
5332 	 * hack to get the address stuff teased apart.
5333 	 */
5334 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5335 	                                    m, cnt_inits_to,
5336 	                                    &padding_len, &chunk_len);
5337 
5338 	init->ch.chunk_length = htons(chunk_len);
5339 	if (padding_len > 0) {
5340 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5341 			sctp_m_freem(m);
5342 			return;
5343 		}
5344 	}
5345 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5346 	if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5347 	                                        (struct sockaddr *)&net->ro._l_addr,
5348 	                                        m, 0, NULL, 0, 0, 0, 0,
5349 	                                        inp->sctp_lport, stcb->rport, htonl(0),
5350 	                                        net->port, NULL,
5351 #if defined(__FreeBSD__) && !defined(__Userspace__)
5352 	                                        0, 0,
5353 #endif
5354 	                                        so_locked))) {
5355 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
5356 		if (error == ENOBUFS) {
5357 			stcb->asoc.ifp_had_enobuf = 1;
5358 			SCTP_STAT_INCR(sctps_lowlevelerr);
5359 		}
5360 	} else {
5361 		stcb->asoc.ifp_had_enobuf = 0;
5362 	}
5363 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5364 	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5365 }
5366 
5367 struct mbuf *
5368 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5369                                       int param_offset, int *abort_processing,
5370                                       struct sctp_chunkhdr *cp,
5371                                       int *nat_friendly,
5372                                       int *cookie_found)
5373 {
5374 	/*
5375 	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5376 	 * being equal to the beginning of the params i.e. (iphlen +
5377 	 * sizeof(struct sctp_init_msg) parse through the parameters to the
5378 	 * end of the mbuf verifying that all parameters are known.
5379 	 *
5380 	 * For unknown parameters build and return a mbuf with
5381 	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5382 	 * processing this chunk stop, and set *abort_processing to 1.
5383 	 *
5384 	 * By having param_offset be pre-set to where parameters begin it is
5385 	 * hoped that this routine may be reused in the future by new
5386 	 * features.
5387 	 */
5388 	struct sctp_paramhdr *phdr, params;
5389 
5390 	struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
5391 	int at, limit, pad_needed;
5392 	uint16_t ptype, plen, padded_size;
5393 
5394 	*abort_processing = 0;
5395 	if (cookie_found != NULL) {
5396 		*cookie_found = 0;
5397 	}
5398 	mat = in_initpkt;
5399 	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5400 	at = param_offset;
5401 	op_err = NULL;
5402 	op_err_last = NULL;
5403 	pad_needed = 0;
5404 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5405 	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5406 	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5407 		ptype = ntohs(phdr->param_type);
5408 		plen = ntohs(phdr->param_length);
5409 		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5410 			/* wacked parameter */
5411 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5412 			goto invalid_size;
5413 		}
5414 		limit -= SCTP_SIZE32(plen);
5415 		/*-
5416 		 * All parameters for all chunks that we know/understand are
5417 		 * listed here. We process them other places and make
5418 		 * appropriate stop actions per the upper bits. However this
5419 		 * is the generic routine processor's can call to get back
5420 		 * an operr.. to either incorporate (init-ack) or send.
5421 		 */
5422 		padded_size = SCTP_SIZE32(plen);
5423 		switch (ptype) {
5424 			/* Param's with variable size */
5425 		case SCTP_HEARTBEAT_INFO:
5426 		case SCTP_UNRECOG_PARAM:
5427 		case SCTP_ERROR_CAUSE_IND:
5428 			/* ok skip fwd */
5429 			at += padded_size;
5430 			break;
5431 		case SCTP_STATE_COOKIE:
5432 			if (cookie_found != NULL) {
5433 				*cookie_found = 1;
5434 			}
5435 			at += padded_size;
5436 			break;
5437 			/* Param's with variable size within a range */
5438 		case SCTP_CHUNK_LIST:
5439 		case SCTP_SUPPORTED_CHUNK_EXT:
5440 			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5441 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5442 				goto invalid_size;
5443 			}
5444 			at += padded_size;
5445 			break;
5446 		case SCTP_SUPPORTED_ADDRTYPE:
5447 			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5448 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5449 				goto invalid_size;
5450 			}
5451 			at += padded_size;
5452 			break;
5453 		case SCTP_RANDOM:
5454 			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5455 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5456 				goto invalid_size;
5457 			}
5458 			at += padded_size;
5459 			break;
5460 		case SCTP_SET_PRIM_ADDR:
5461 		case SCTP_DEL_IP_ADDRESS:
5462 		case SCTP_ADD_IP_ADDRESS:
5463 			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5464 			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5465 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5466 				goto invalid_size;
5467 			}
5468 			at += padded_size;
5469 			break;
5470 			/* Param's with a fixed size */
5471 		case SCTP_IPV4_ADDRESS:
5472 			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5473 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5474 				goto invalid_size;
5475 			}
5476 			at += padded_size;
5477 			break;
5478 		case SCTP_IPV6_ADDRESS:
5479 			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5480 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5481 				goto invalid_size;
5482 			}
5483 			at += padded_size;
5484 			break;
5485 		case SCTP_COOKIE_PRESERVE:
5486 			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5487 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5488 				goto invalid_size;
5489 			}
5490 			at += padded_size;
5491 			break;
5492 		case SCTP_HAS_NAT_SUPPORT:
5493 			*nat_friendly = 1;
5494 			/* fall through */
5495 		case SCTP_PRSCTP_SUPPORTED:
5496 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5497 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5498 				goto invalid_size;
5499 			}
5500 			at += padded_size;
5501 			break;
5502 		case SCTP_ECN_CAPABLE:
5503 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5504 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5505 				goto invalid_size;
5506 			}
5507 			at += padded_size;
5508 			break;
5509 		case SCTP_ULP_ADAPTATION:
5510 			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5511 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5512 				goto invalid_size;
5513 			}
5514 			at += padded_size;
5515 			break;
5516 		case SCTP_SUCCESS_REPORT:
5517 			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5518 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5519 				goto invalid_size;
5520 			}
5521 			at += padded_size;
5522 			break;
5523 		case SCTP_HOSTNAME_ADDRESS:
5524 		{
5525 			/* Hostname parameters are deprecated. */
5526 			struct sctp_gen_error_cause *cause;
5527 			int l_len;
5528 
5529 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5530 			*abort_processing = 1;
5531 			sctp_m_freem(op_err);
5532 			op_err = NULL;
5533 			op_err_last = NULL;
5534 #ifdef INET6
5535 			l_len = SCTP_MIN_OVERHEAD;
5536 #else
5537 			l_len = SCTP_MIN_V4_OVERHEAD;
5538 #endif
5539 			l_len += sizeof(struct sctp_chunkhdr);
5540 			l_len += sizeof(struct sctp_gen_error_cause);
5541 			op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5542 			if (op_err != NULL) {
5543 				/*
5544 				 * Pre-reserve space for IP, SCTP, and
5545 				 * chunk header.
5546 				 */
5547 #ifdef INET6
5548 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5549 #else
5550 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5551 #endif
5552 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5553 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5554 				SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5555 				cause = mtod(op_err, struct sctp_gen_error_cause *);
5556 				cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5557 				cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5558 				SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5559 				if (SCTP_BUF_NEXT(op_err) == NULL) {
5560 					sctp_m_freem(op_err);
5561 					op_err = NULL;
5562 					op_err_last = NULL;
5563 				}
5564 			}
5565 			return (op_err);
5566 		}
5567 		default:
5568 			/*
5569 			 * we do not recognize the parameter figure out what
5570 			 * we do.
5571 			 */
5572 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5573 			if ((ptype & 0x4000) == 0x4000) {
5574 				/* Report bit is set?? */
5575 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5576 				if (op_err == NULL) {
5577 					int l_len;
5578 					/* Ok need to try to get an mbuf */
5579 #ifdef INET6
5580 					l_len = SCTP_MIN_OVERHEAD;
5581 #else
5582 					l_len = SCTP_MIN_V4_OVERHEAD;
5583 #endif
5584 					l_len += sizeof(struct sctp_chunkhdr);
5585 					l_len += sizeof(struct sctp_paramhdr);
5586 					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5587 					if (op_err) {
5588 						SCTP_BUF_LEN(op_err) = 0;
5589 #ifdef INET6
5590 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5591 #else
5592 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5593 #endif
5594 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5595 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5596 						op_err_last = op_err;
5597 					}
5598 				}
5599 				if (op_err != NULL) {
5600 					/* If we have space */
5601 					struct sctp_paramhdr *param;
5602 
5603 					if (pad_needed > 0) {
5604 						op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5605 					}
5606 					if (op_err_last == NULL) {
5607 						sctp_m_freem(op_err);
5608 						op_err = NULL;
5609 						op_err_last = NULL;
5610 						goto more_processing;
5611 					}
5612 					if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5613 						m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5614 						if (m_tmp == NULL) {
5615 							sctp_m_freem(op_err);
5616 							op_err = NULL;
5617 							op_err_last = NULL;
5618 							goto more_processing;
5619 						}
5620 						SCTP_BUF_LEN(m_tmp) = 0;
5621 						SCTP_BUF_NEXT(m_tmp) = NULL;
5622 						SCTP_BUF_NEXT(op_err_last) = m_tmp;
5623 						op_err_last = m_tmp;
5624 					}
5625 					param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last));
5626 					param->param_type = htons(SCTP_UNRECOG_PARAM);
5627 					param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5628 					SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5629 					SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5630 					if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5631 						sctp_m_freem(op_err);
5632 						op_err = NULL;
5633 						op_err_last = NULL;
5634 						goto more_processing;
5635 					} else {
5636 						while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5637 							op_err_last = SCTP_BUF_NEXT(op_err_last);
5638 						}
5639 					}
5640 					if (plen % 4 != 0) {
5641 						pad_needed = 4 - (plen % 4);
5642 					} else {
5643 						pad_needed = 0;
5644 					}
5645 				}
5646 			}
5647 		more_processing:
5648 			if ((ptype & 0x8000) == 0x0000) {
5649 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5650 				return (op_err);
5651 			} else {
5652 				/* skip this chunk and continue processing */
5653 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5654 				at += SCTP_SIZE32(plen);
5655 			}
5656 			break;
5657 
5658 		}
5659 		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5660 	}
5661 	return (op_err);
5662  invalid_size:
5663 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5664 	*abort_processing = 1;
5665 	sctp_m_freem(op_err);
5666 	op_err = NULL;
5667 	op_err_last = NULL;
5668 	if (phdr != NULL) {
5669 		struct sctp_paramhdr *param;
5670 		int l_len;
5671 #ifdef INET6
5672 		l_len = SCTP_MIN_OVERHEAD;
5673 #else
5674 		l_len = SCTP_MIN_V4_OVERHEAD;
5675 #endif
5676 		l_len += sizeof(struct sctp_chunkhdr);
5677 		l_len += (2 * sizeof(struct sctp_paramhdr));
5678 		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5679 		if (op_err) {
5680 			SCTP_BUF_LEN(op_err) = 0;
5681 #ifdef INET6
5682 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5683 #else
5684 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5685 #endif
5686 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5687 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5688 			SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5689 			param = mtod(op_err, struct sctp_paramhdr *);
5690 			param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5691 			param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5692 			param++;
5693 			param->param_type = htons(ptype);
5694 			param->param_length = htons(plen);
5695 		}
5696 	}
5697 	return (op_err);
5698 }
5699 
5700 static int
5701 sctp_are_there_new_addresses(struct sctp_association *asoc,
5702     struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5703 {
5704 	/*
5705 	 * Given a INIT packet, look through the packet to verify that there
5706 	 * are NO new addresses. As we go through the parameters add reports
5707 	 * of any un-understood parameters that require an error.  Also we
5708 	 * must return (1) to drop the packet if we see a un-understood
5709 	 * parameter that tells us to drop the chunk.
5710 	 */
5711 	struct sockaddr *sa_touse;
5712 	struct sockaddr *sa;
5713 	struct sctp_paramhdr *phdr, params;
5714 	uint16_t ptype, plen;
5715 	uint8_t fnd;
5716 	struct sctp_nets *net;
5717 	int check_src;
5718 #ifdef INET
5719 	struct sockaddr_in sin4, *sa4;
5720 #endif
5721 #ifdef INET6
5722 	struct sockaddr_in6 sin6, *sa6;
5723 #endif
5724 #if defined(__Userspace__)
5725 	struct sockaddr_conn *sac;
5726 #endif
5727 
5728 #ifdef INET
5729 	memset(&sin4, 0, sizeof(sin4));
5730 	sin4.sin_family = AF_INET;
5731 #ifdef HAVE_SIN_LEN
5732 	sin4.sin_len = sizeof(sin4);
5733 #endif
5734 #endif
5735 #ifdef INET6
5736 	memset(&sin6, 0, sizeof(sin6));
5737 	sin6.sin6_family = AF_INET6;
5738 #ifdef HAVE_SIN6_LEN
5739 	sin6.sin6_len = sizeof(sin6);
5740 #endif
5741 #endif
5742 	/* First what about the src address of the pkt ? */
5743 	check_src = 0;
5744 	switch (src->sa_family) {
5745 #ifdef INET
5746 	case AF_INET:
5747 		if (asoc->scope.ipv4_addr_legal) {
5748 			check_src = 1;
5749 		}
5750 		break;
5751 #endif
5752 #ifdef INET6
5753 	case AF_INET6:
5754 		if (asoc->scope.ipv6_addr_legal) {
5755 			check_src = 1;
5756 		}
5757 		break;
5758 #endif
5759 #if defined(__Userspace__)
5760 	case AF_CONN:
5761 		if (asoc->scope.conn_addr_legal) {
5762 			check_src = 1;
5763 		}
5764 		break;
5765 #endif
5766 	default:
5767 		/* TSNH */
5768 		break;
5769 	}
5770 	if (check_src) {
5771 		fnd = 0;
5772 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5773 			sa = (struct sockaddr *)&net->ro._l_addr;
5774 			if (sa->sa_family == src->sa_family) {
5775 #ifdef INET
5776 				if (sa->sa_family == AF_INET) {
5777 					struct sockaddr_in *src4;
5778 
5779 					sa4 = (struct sockaddr_in *)sa;
5780 					src4 = (struct sockaddr_in *)src;
5781 					if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5782 						fnd = 1;
5783 						break;
5784 					}
5785 				}
5786 #endif
5787 #ifdef INET6
5788 				if (sa->sa_family == AF_INET6) {
5789 					struct sockaddr_in6 *src6;
5790 
5791 					sa6 = (struct sockaddr_in6 *)sa;
5792 					src6 = (struct sockaddr_in6 *)src;
5793 					if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5794 						fnd = 1;
5795 						break;
5796 					}
5797 				}
5798 #endif
5799 #if defined(__Userspace__)
5800 				if (sa->sa_family == AF_CONN) {
5801 					struct sockaddr_conn *srcc;
5802 
5803 					sac = (struct sockaddr_conn *)sa;
5804 					srcc = (struct sockaddr_conn *)src;
5805 					if (sac->sconn_addr == srcc->sconn_addr) {
5806 						fnd = 1;
5807 						break;
5808 					}
5809 				}
5810 #endif
5811 			}
5812 		}
5813 		if (fnd == 0) {
5814 			/* New address added! no need to look further. */
5815 			return (1);
5816 		}
5817 	}
5818 	/* Ok so far lets munge through the rest of the packet */
5819 	offset += sizeof(struct sctp_init_chunk);
5820 	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5821 	while (phdr) {
5822 		sa_touse = NULL;
5823 		ptype = ntohs(phdr->param_type);
5824 		plen = ntohs(phdr->param_length);
5825 		switch (ptype) {
5826 #ifdef INET
5827 		case SCTP_IPV4_ADDRESS:
5828 		{
5829 			struct sctp_ipv4addr_param *p4, p4_buf;
5830 
5831 			if (plen != sizeof(struct sctp_ipv4addr_param)) {
5832 				return (1);
5833 			}
5834 			phdr = sctp_get_next_param(in_initpkt, offset,
5835 			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5836 			if (phdr == NULL) {
5837 				return (1);
5838 			}
5839 			if (asoc->scope.ipv4_addr_legal) {
5840 				p4 = (struct sctp_ipv4addr_param *)phdr;
5841 				sin4.sin_addr.s_addr = p4->addr;
5842 				sa_touse = (struct sockaddr *)&sin4;
5843 			}
5844 			break;
5845 		}
5846 #endif
5847 #ifdef INET6
5848 		case SCTP_IPV6_ADDRESS:
5849 		{
5850 			struct sctp_ipv6addr_param *p6, p6_buf;
5851 
5852 			if (plen != sizeof(struct sctp_ipv6addr_param)) {
5853 				return (1);
5854 			}
5855 			phdr = sctp_get_next_param(in_initpkt, offset,
5856 			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5857 			if (phdr == NULL) {
5858 				return (1);
5859 			}
5860 			if (asoc->scope.ipv6_addr_legal) {
5861 				p6 = (struct sctp_ipv6addr_param *)phdr;
5862 				memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5863 				       sizeof(p6->addr));
5864 				sa_touse = (struct sockaddr *)&sin6;
5865 			}
5866 			break;
5867 		}
5868 #endif
5869 		default:
5870 			sa_touse = NULL;
5871 			break;
5872 		}
5873 		if (sa_touse) {
5874 			/* ok, sa_touse points to one to check */
5875 			fnd = 0;
5876 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5877 				sa = (struct sockaddr *)&net->ro._l_addr;
5878 				if (sa->sa_family != sa_touse->sa_family) {
5879 					continue;
5880 				}
5881 #ifdef INET
5882 				if (sa->sa_family == AF_INET) {
5883 					sa4 = (struct sockaddr_in *)sa;
5884 					if (sa4->sin_addr.s_addr ==
5885 					    sin4.sin_addr.s_addr) {
5886 						fnd = 1;
5887 						break;
5888 					}
5889 				}
5890 #endif
5891 #ifdef INET6
5892 				if (sa->sa_family == AF_INET6) {
5893 					sa6 = (struct sockaddr_in6 *)sa;
5894 					if (SCTP6_ARE_ADDR_EQUAL(
5895 					    sa6, &sin6)) {
5896 						fnd = 1;
5897 						break;
5898 					}
5899 				}
5900 #endif
5901 			}
5902 			if (!fnd) {
5903 				/* New addr added! no need to look further */
5904 				return (1);
5905 			}
5906 		}
5907 		offset += SCTP_SIZE32(plen);
5908 		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5909 	}
5910 	return (0);
5911 }
5912 
5913 /*
5914  * Given a MBUF chain that was sent into us containing an INIT. Build a
5915  * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5916  * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5917  * message (i.e. the struct sctp_init_msg).
5918  */
5919 void
5920 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5921                        struct sctp_nets *src_net, struct mbuf *init_pkt,
5922                        int iphlen, int offset,
5923                        struct sockaddr *src, struct sockaddr *dst,
5924                        struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5925 #if defined(__FreeBSD__) && !defined(__Userspace__)
5926 		       uint8_t mflowtype, uint32_t mflowid,
5927 #endif
5928                        uint32_t vrf_id, uint16_t port)
5929 {
5930 	struct sctp_association *asoc;
5931 	struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5932 	struct sctp_init_ack_chunk *initack;
5933 	struct sctp_adaptation_layer_indication *ali;
5934 	struct sctp_supported_chunk_types_param *pr_supported;
5935 	struct sctp_paramhdr *ph;
5936 	union sctp_sockstore *over_addr;
5937 	struct sctp_scoping scp;
5938 	struct timeval now;
5939 #ifdef INET
5940 	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5941 	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5942 	struct sockaddr_in *sin;
5943 #endif
5944 #ifdef INET6
5945 	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5946 	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5947 	struct sockaddr_in6 *sin6;
5948 #endif
5949 #if defined(__Userspace__)
5950 	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5951 	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5952 	struct sockaddr_conn *sconn;
5953 #endif
5954 	struct sockaddr *to;
5955 	struct sctp_state_cookie stc;
5956 	struct sctp_nets *net = NULL;
5957 	uint8_t *signature = NULL;
5958 	int cnt_inits_to = 0;
5959 	uint16_t his_limit, i_want;
5960 	int abort_flag;
5961 	int nat_friendly = 0;
5962 	int error;
5963 	struct socket *so;
5964 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5965 
5966 	if (stcb) {
5967 		asoc = &stcb->asoc;
5968 	} else {
5969 		asoc = NULL;
5970 	}
5971 	if ((asoc != NULL) &&
5972 	    (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5973 		if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5974 			/*
5975 			 * new addresses, out of here in non-cookie-wait states
5976 			 *
5977 			 * Send an ABORT, without the new address error cause.
5978 			 * This looks no different than if no listener
5979 			 * was present.
5980 			 */
5981 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5982 			                             "Address added");
5983 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5984 #if defined(__FreeBSD__) && !defined(__Userspace__)
5985 			                mflowtype, mflowid, inp->fibnum,
5986 #endif
5987 			                vrf_id, port);
5988 			return;
5989 		}
5990 		if (src_net != NULL && (src_net->port != port)) {
5991 			/*
5992 			 * change of remote encapsulation port, out of here in
5993 			 * non-cookie-wait states
5994 			 *
5995 			 * Send an ABORT, without an specific error cause.
5996 			 * This looks no different than if no listener
5997 			 * was present.
5998 			 */
5999 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6000 			                             "Remote encapsulation port changed");
6001 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
6002 #if defined(__FreeBSD__) && !defined(__Userspace__)
6003 			                mflowtype, mflowid, inp->fibnum,
6004 #endif
6005 			                vrf_id, port);
6006 			return;
6007 		}
6008 	}
6009 	abort_flag = 0;
6010 	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
6011 	                                               (offset + sizeof(struct sctp_init_chunk)),
6012 	                                               &abort_flag,
6013 	                                               (struct sctp_chunkhdr *)init_chk,
6014 	                                               &nat_friendly, NULL);
6015 	if (abort_flag) {
6016 	do_a_abort:
6017 		if (op_err == NULL) {
6018 			char msg[SCTP_DIAG_INFO_LEN];
6019 
6020 			SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
6021 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6022 			                             msg);
6023 		}
6024 		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
6025 				init_chk->init.initiate_tag, op_err,
6026 #if defined(__FreeBSD__) && !defined(__Userspace__)
6027 		                mflowtype, mflowid, inp->fibnum,
6028 #endif
6029 		                vrf_id, port);
6030 		return;
6031 	}
6032 	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
6033 	if (m == NULL) {
6034 		/* No memory, INIT timer will re-attempt. */
6035 		sctp_m_freem(op_err);
6036 		return;
6037 	}
6038 	chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
6039 	padding_len = 0;
6040 
6041 	/*
6042 	 * We might not overwrite the identification[] completely and on
6043 	 * some platforms time_entered will contain some padding.
6044 	 * Therefore zero out the cookie to avoid putting
6045 	 * uninitialized memory on the wire.
6046 	 */
6047 	memset(&stc, 0, sizeof(struct sctp_state_cookie));
6048 
6049 	/* the time I built cookie */
6050 	(void)SCTP_GETTIME_TIMEVAL(&now);
6051 	stc.time_entered.tv_sec = now.tv_sec;
6052 	stc.time_entered.tv_usec = now.tv_usec;
6053 
6054 	/* populate any tie tags */
6055 	if (asoc != NULL) {
6056 		/* unlock before tag selections */
6057 		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
6058 		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
6059 		stc.cookie_life = asoc->cookie_life;
6060 		net = asoc->primary_destination;
6061 	} else {
6062 		stc.tie_tag_my_vtag = 0;
6063 		stc.tie_tag_peer_vtag = 0;
6064 		/* life I will award this cookie */
6065 		stc.cookie_life = inp->sctp_ep.def_cookie_life;
6066 	}
6067 
6068 	/* copy in the ports for later check */
6069 	stc.myport = sh->dest_port;
6070 	stc.peerport = sh->src_port;
6071 
6072 	/*
6073 	 * If we wanted to honor cookie life extensions, we would add to
6074 	 * stc.cookie_life. For now we should NOT honor any extension
6075 	 */
6076 	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
6077 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6078 		stc.ipv6_addr_legal = 1;
6079 		if (SCTP_IPV6_V6ONLY(inp)) {
6080 			stc.ipv4_addr_legal = 0;
6081 		} else {
6082 			stc.ipv4_addr_legal = 1;
6083 		}
6084 #if defined(__Userspace__)
6085 		stc.conn_addr_legal = 0;
6086 #endif
6087 	} else {
6088 		stc.ipv6_addr_legal = 0;
6089 #if defined(__Userspace__)
6090 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6091 			stc.conn_addr_legal = 1;
6092 			stc.ipv4_addr_legal = 0;
6093 		} else {
6094 			stc.conn_addr_legal = 0;
6095 			stc.ipv4_addr_legal = 1;
6096 		}
6097 #else
6098 		stc.ipv4_addr_legal = 1;
6099 #endif
6100 	}
6101 	stc.ipv4_scope = 0;
6102 	if (net == NULL) {
6103 		to = src;
6104 		switch (dst->sa_family) {
6105 #ifdef INET
6106 		case AF_INET:
6107 		{
6108 			/* lookup address */
6109 			stc.address[0] = src4->sin_addr.s_addr;
6110 			stc.address[1] = 0;
6111 			stc.address[2] = 0;
6112 			stc.address[3] = 0;
6113 			stc.addr_type = SCTP_IPV4_ADDRESS;
6114 			/* local from address */
6115 			stc.laddress[0] = dst4->sin_addr.s_addr;
6116 			stc.laddress[1] = 0;
6117 			stc.laddress[2] = 0;
6118 			stc.laddress[3] = 0;
6119 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6120 			/* scope_id is only for v6 */
6121 			stc.scope_id = 0;
6122 			if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
6123 			    (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
6124 				stc.ipv4_scope = 1;
6125 			}
6126 			/* Must use the address in this case */
6127 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6128 				stc.loopback_scope = 1;
6129 				stc.ipv4_scope = 1;
6130 				stc.site_scope = 1;
6131 				stc.local_scope = 0;
6132 			}
6133 			break;
6134 		}
6135 #endif
6136 #ifdef INET6
6137 		case AF_INET6:
6138 		{
6139 			stc.addr_type = SCTP_IPV6_ADDRESS;
6140 			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6141 #if defined(__FreeBSD__) && !defined(__Userspace__)
6142 			stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
6143 #else
6144 			stc.scope_id = 0;
6145 #endif
6146 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6147 				stc.loopback_scope = 1;
6148 				stc.local_scope = 0;
6149 				stc.site_scope = 1;
6150 				stc.ipv4_scope = 1;
6151 			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
6152 			           IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
6153 				/*
6154 				 * If the new destination or source is a
6155 				 * LINK_LOCAL we must have common both site and
6156 				 * local scope. Don't set local scope though
6157 				 * since we must depend on the source to be
6158 				 * added implicitly. We cannot assure just
6159 				 * because we share one link that all links are
6160 				 * common.
6161 				 */
6162 #if defined(__APPLE__) && !defined(__Userspace__)
6163 				/* Mac OS X currently doesn't have in6_getscope() */
6164 				stc.scope_id = src6->sin6_addr.s6_addr16[1];
6165 #endif
6166 				stc.local_scope = 0;
6167 				stc.site_scope = 1;
6168 				stc.ipv4_scope = 1;
6169 				/*
6170 				 * we start counting for the private address
6171 				 * stuff at 1. since the link local we
6172 				 * source from won't show up in our scoped
6173 				 * count.
6174 				 */
6175 				cnt_inits_to = 1;
6176 				/* pull out the scope_id from incoming pkt */
6177 			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
6178 			           IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
6179 				/*
6180 				 * If the new destination or source is
6181 				 * SITE_LOCAL then we must have site scope in
6182 				 * common.
6183 				 */
6184 				stc.site_scope = 1;
6185 			}
6186 			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6187 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6188 			break;
6189 		}
6190 #endif
6191 #if defined(__Userspace__)
6192 		case AF_CONN:
6193 		{
6194 			/* lookup address */
6195 			stc.address[0] = 0;
6196 			stc.address[1] = 0;
6197 			stc.address[2] = 0;
6198 			stc.address[3] = 0;
6199 			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6200 			stc.addr_type = SCTP_CONN_ADDRESS;
6201 			/* local from address */
6202 			stc.laddress[0] = 0;
6203 			stc.laddress[1] = 0;
6204 			stc.laddress[2] = 0;
6205 			stc.laddress[3] = 0;
6206 			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6207 			stc.laddr_type = SCTP_CONN_ADDRESS;
6208 			/* scope_id is only for v6 */
6209 			stc.scope_id = 0;
6210 			break;
6211 		}
6212 #endif
6213 		default:
6214 			/* TSNH */
6215 			goto do_a_abort;
6216 			break;
6217 		}
6218 	} else {
6219 		/* set the scope per the existing tcb */
6220 
6221 #ifdef INET6
6222 		struct sctp_nets *lnet;
6223 #endif
6224 
6225 		stc.loopback_scope = asoc->scope.loopback_scope;
6226 		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6227 		stc.site_scope = asoc->scope.site_scope;
6228 		stc.local_scope = asoc->scope.local_scope;
6229 #ifdef INET6
6230 		/* Why do we not consider IPv4 LL addresses? */
6231 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6232 			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6233 				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6234 					/*
6235 					 * if we have a LL address, start
6236 					 * counting at 1.
6237 					 */
6238 					cnt_inits_to = 1;
6239 				}
6240 			}
6241 		}
6242 #endif
6243 		/* use the net pointer */
6244 		to = (struct sockaddr *)&net->ro._l_addr;
6245 		switch (to->sa_family) {
6246 #ifdef INET
6247 		case AF_INET:
6248 			sin = (struct sockaddr_in *)to;
6249 			stc.address[0] = sin->sin_addr.s_addr;
6250 			stc.address[1] = 0;
6251 			stc.address[2] = 0;
6252 			stc.address[3] = 0;
6253 			stc.addr_type = SCTP_IPV4_ADDRESS;
6254 			if (net->src_addr_selected == 0) {
6255 				/*
6256 				 * strange case here, the INIT should have
6257 				 * did the selection.
6258 				 */
6259 				net->ro._s_addr = sctp_source_address_selection(inp,
6260 										stcb, (sctp_route_t *)&net->ro,
6261 										net, 0, vrf_id);
6262 				if (net->ro._s_addr == NULL) {
6263 					sctp_m_freem(op_err);
6264 					sctp_m_freem(m);
6265 					return;
6266 				}
6267 
6268 				net->src_addr_selected = 1;
6269 
6270 			}
6271 			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6272 			stc.laddress[1] = 0;
6273 			stc.laddress[2] = 0;
6274 			stc.laddress[3] = 0;
6275 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6276 			/* scope_id is only for v6 */
6277 			stc.scope_id = 0;
6278 			break;
6279 #endif
6280 #ifdef INET6
6281 		case AF_INET6:
6282 			sin6 = (struct sockaddr_in6 *)to;
6283 			memcpy(&stc.address, &sin6->sin6_addr,
6284 			       sizeof(struct in6_addr));
6285 			stc.addr_type = SCTP_IPV6_ADDRESS;
6286 			stc.scope_id = sin6->sin6_scope_id;
6287 			if (net->src_addr_selected == 0) {
6288 				/*
6289 				 * strange case here, the INIT should have
6290 				 * done the selection.
6291 				 */
6292 				net->ro._s_addr = sctp_source_address_selection(inp,
6293 										stcb, (sctp_route_t *)&net->ro,
6294 										net, 0, vrf_id);
6295 				if (net->ro._s_addr == NULL) {
6296 					sctp_m_freem(op_err);
6297 					sctp_m_freem(m);
6298 					return;
6299 				}
6300 
6301 				net->src_addr_selected = 1;
6302 			}
6303 			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6304 			       sizeof(struct in6_addr));
6305 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6306 			break;
6307 #endif
6308 #if defined(__Userspace__)
6309 		case AF_CONN:
6310 			sconn = (struct sockaddr_conn *)to;
6311 			stc.address[0] = 0;
6312 			stc.address[1] = 0;
6313 			stc.address[2] = 0;
6314 			stc.address[3] = 0;
6315 			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6316 			stc.addr_type = SCTP_CONN_ADDRESS;
6317 			stc.laddress[0] = 0;
6318 			stc.laddress[1] = 0;
6319 			stc.laddress[2] = 0;
6320 			stc.laddress[3] = 0;
6321 			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6322 			stc.laddr_type = SCTP_CONN_ADDRESS;
6323 			stc.scope_id = 0;
6324 			break;
6325 #endif
6326 		}
6327 	}
6328 	/* Now lets put the SCTP header in place */
6329 	initack = mtod(m, struct sctp_init_ack_chunk *);
6330 	/* Save it off for quick ref */
6331 	stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
6332 	/* who are we */
6333 	memcpy(stc.identification, SCTP_VERSION_STRING,
6334 	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6335 	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6336 	/* now the chunk header */
6337 	initack->ch.chunk_type = SCTP_INITIATION_ACK;
6338 	initack->ch.chunk_flags = 0;
6339 	/* fill in later from mbuf we build */
6340 	initack->ch.chunk_length = 0;
6341 	/* place in my tag */
6342 	if ((asoc != NULL) &&
6343 	    ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
6344 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
6345 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
6346 		/* re-use the v-tags and init-seq here */
6347 		initack->init.initiate_tag = htonl(asoc->my_vtag);
6348 		initack->init.initial_tsn = htonl(asoc->init_seq_number);
6349 	} else {
6350 		uint32_t vtag, itsn;
6351 
6352 		if (asoc) {
6353 			atomic_add_int(&asoc->refcnt, 1);
6354 			SCTP_TCB_UNLOCK(stcb);
6355 		new_tag:
6356 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6357 			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
6358 				/* Got a duplicate vtag on some guy behind a nat
6359 				 * make sure we don't use it.
6360 				 */
6361 				goto new_tag;
6362 			}
6363 			initack->init.initiate_tag = htonl(vtag);
6364 			/* get a TSN to use too */
6365 			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6366 			initack->init.initial_tsn = htonl(itsn);
6367 			SCTP_TCB_LOCK(stcb);
6368 			atomic_add_int(&asoc->refcnt, -1);
6369 		} else {
6370 			SCTP_INP_INCR_REF(inp);
6371 			SCTP_INP_RUNLOCK(inp);
6372 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6373 			initack->init.initiate_tag = htonl(vtag);
6374 			/* get a TSN to use too */
6375 			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6376 			SCTP_INP_RLOCK(inp);
6377 			SCTP_INP_DECR_REF(inp);
6378 		}
6379 	}
6380 	/* save away my tag to */
6381 	stc.my_vtag = initack->init.initiate_tag;
6382 
6383 	/* set up some of the credits. */
6384 	so = inp->sctp_socket;
6385 	if (so == NULL) {
6386 		/* memory problem */
6387 		sctp_m_freem(op_err);
6388 		sctp_m_freem(m);
6389 		return;
6390 	} else {
6391 		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6392 	}
6393 	/* set what I want */
6394 	his_limit = ntohs(init_chk->init.num_inbound_streams);
6395 	/* choose what I want */
6396 	if (asoc != NULL) {
6397 		if (asoc->streamoutcnt > asoc->pre_open_streams) {
6398 			i_want = asoc->streamoutcnt;
6399 		} else {
6400 			i_want = asoc->pre_open_streams;
6401 		}
6402 	} else {
6403 		i_want = inp->sctp_ep.pre_open_stream_count;
6404 	}
6405 	if (his_limit < i_want) {
6406 		/* I Want more :< */
6407 		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6408 	} else {
6409 		/* I can have what I want :> */
6410 		initack->init.num_outbound_streams = htons(i_want);
6411 	}
6412 	/* tell him his limit. */
6413 	initack->init.num_inbound_streams =
6414 		htons(inp->sctp_ep.max_open_streams_intome);
6415 
6416 	/* adaptation layer indication parameter */
6417 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6418 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6419 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6420 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6421 		ali->ph.param_length = htons(parameter_len);
6422 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6423 		chunk_len += parameter_len;
6424 	}
6425 
6426 	/* ECN parameter */
6427 	if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6428 	    ((asoc == NULL) && (inp->ecn_supported == 1))) {
6429 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6430 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6431 		ph->param_type = htons(SCTP_ECN_CAPABLE);
6432 		ph->param_length = htons(parameter_len);
6433 		chunk_len += parameter_len;
6434 	}
6435 
6436 	/* PR-SCTP supported parameter */
6437 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6438 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6439 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6440 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6441 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6442 		ph->param_length = htons(parameter_len);
6443 		chunk_len += parameter_len;
6444 	}
6445 
6446 	/* Add NAT friendly parameter */
6447 	if (nat_friendly) {
6448 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6449 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6450 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6451 		ph->param_length = htons(parameter_len);
6452 		chunk_len += parameter_len;
6453 	}
6454 
6455 	/* And now tell the peer which extensions we support */
6456 	num_ext = 0;
6457 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6458 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6459 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6460 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6461 		if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6462 		    ((asoc == NULL) && (inp->idata_supported == 1))) {
6463 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
6464 		}
6465 	}
6466 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6467 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6468 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6469 	}
6470 	if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6471 	    ((asoc == NULL) && (inp->asconf_supported == 1))) {
6472 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6473 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6474 	}
6475 	if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6476 	    ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6477 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6478 	}
6479 	if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6480 	    ((asoc == NULL) && (inp->idata_supported == 1))) {
6481 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
6482 	}
6483 	if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6484 	    ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6485 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6486 	}
6487 	if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6488 	    ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6489 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6490 	}
6491 	if (num_ext > 0) {
6492 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6493 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6494 		pr_supported->ph.param_length = htons(parameter_len);
6495 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6496 		chunk_len += parameter_len;
6497 	}
6498 
6499 	/* add authentication parameters */
6500 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6501 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6502 		struct sctp_auth_random *randp;
6503 		struct sctp_auth_hmac_algo *hmacs;
6504 		struct sctp_auth_chunk_list *chunks;
6505 
6506 		if (padding_len > 0) {
6507 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6508 			chunk_len += padding_len;
6509 			padding_len = 0;
6510 		}
6511 		/* generate and add RANDOM parameter */
6512 		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6513 		parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6514 		                SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6515 		randp->ph.param_type = htons(SCTP_RANDOM);
6516 		randp->ph.param_length = htons(parameter_len);
6517 		SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6518 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6519 		chunk_len += parameter_len;
6520 
6521 		if (padding_len > 0) {
6522 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6523 			chunk_len += padding_len;
6524 			padding_len = 0;
6525 		}
6526 		/* add HMAC_ALGO parameter */
6527 		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6528 		parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6529 		                sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6530 		                                        (uint8_t *)hmacs->hmac_ids);
6531 		hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6532 		hmacs->ph.param_length = htons(parameter_len);
6533 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6534 		chunk_len += parameter_len;
6535 
6536 		if (padding_len > 0) {
6537 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6538 			chunk_len += padding_len;
6539 			padding_len = 0;
6540 		}
6541 		/* add CHUNKS parameter */
6542 		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6543 		parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6544 		                sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6545 		                                           chunks->chunk_types);
6546 		chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6547 		chunks->ph.param_length = htons(parameter_len);
6548 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6549 		chunk_len += parameter_len;
6550 	}
6551 	SCTP_BUF_LEN(m) = chunk_len;
6552 	m_last = m;
6553 	/* now the addresses */
6554 	/* To optimize this we could put the scoping stuff
6555 	 * into a structure and remove the individual uint8's from
6556 	 * the stc structure. Then we could just sifa in the
6557 	 * address within the stc.. but for now this is a quick
6558 	 * hack to get the address stuff teased apart.
6559 	 */
6560 	scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6561 	scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6562 #if defined(__Userspace__)
6563 	scp.conn_addr_legal = stc.conn_addr_legal;
6564 #endif
6565 	scp.loopback_scope = stc.loopback_scope;
6566 	scp.ipv4_local_scope = stc.ipv4_scope;
6567 	scp.local_scope = stc.local_scope;
6568 	scp.site_scope = stc.site_scope;
6569 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6570 	                                    cnt_inits_to,
6571 	                                    &padding_len, &chunk_len);
6572 	/* padding_len can only be positive, if no addresses have been added */
6573 	if (padding_len > 0) {
6574 		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6575 		chunk_len += padding_len;
6576 		SCTP_BUF_LEN(m) += padding_len;
6577 		padding_len = 0;
6578 	}
6579 
6580 	/* tack on the operational error if present */
6581 	if (op_err) {
6582 		parameter_len = 0;
6583 		for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6584 			parameter_len += SCTP_BUF_LEN(m_tmp);
6585 		}
6586 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6587 		SCTP_BUF_NEXT(m_last) = op_err;
6588 		while (SCTP_BUF_NEXT(m_last) != NULL) {
6589 			m_last = SCTP_BUF_NEXT(m_last);
6590 		}
6591 		chunk_len += parameter_len;
6592 	}
6593 	if (padding_len > 0) {
6594 		m_last = sctp_add_pad_tombuf(m_last, padding_len);
6595 		if (m_last == NULL) {
6596 			/* Houston we have a problem, no space */
6597 			sctp_m_freem(m);
6598 			return;
6599 		}
6600 		chunk_len += padding_len;
6601 		padding_len = 0;
6602 	}
6603 	/* Now we must build a cookie */
6604 	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6605 	if (m_cookie == NULL) {
6606 		/* memory problem */
6607 		sctp_m_freem(m);
6608 		return;
6609 	}
6610 	/* Now append the cookie to the end and update the space/size */
6611 	SCTP_BUF_NEXT(m_last) = m_cookie;
6612 	parameter_len = 0;
6613 	for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6614 		parameter_len += SCTP_BUF_LEN(m_tmp);
6615 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6616 			m_last = m_tmp;
6617 		}
6618 	}
6619 	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6620 	chunk_len += parameter_len;
6621 
6622 	/* Place in the size, but we don't include
6623 	 * the last pad (if any) in the INIT-ACK.
6624 	 */
6625 	initack->ch.chunk_length = htons(chunk_len);
6626 
6627 	/* Time to sign the cookie, we don't sign over the cookie
6628 	 * signature though thus we set trailer.
6629 	 */
6630 	(void)sctp_hmac_m(SCTP_HMAC,
6631 			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6632 			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6633 			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6634 #if defined(__Userspace__)
6635 	/*
6636 	 * Don't put AF_CONN addresses on the wire, in case this is critical
6637 	 * for the application. However, they are protected by the HMAC and
6638 	 * need to be reconstructed before checking the HMAC.
6639 	 * Clearing is only done in the mbuf chain, since the local stc is
6640 	 * not used anymore.
6641 	 */
6642 	if (stc.addr_type == SCTP_CONN_ADDRESS) {
6643 		const void *p = NULL;
6644 
6645 		m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address),
6646 		           (int)sizeof(void *), (caddr_t)&p);
6647 	}
6648 	if (stc.laddr_type == SCTP_CONN_ADDRESS) {
6649 		const void *p = NULL;
6650 
6651 		m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress),
6652 		           (int)sizeof(void *), (caddr_t)&p);
6653 	}
6654 #endif
6655 	/*
6656 	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6657 	 * here since the timer will drive a retranmission.
6658 	 */
6659 	if (padding_len > 0) {
6660 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6661 			sctp_m_freem(m);
6662 			return;
6663 		}
6664 	}
6665 	if (stc.loopback_scope) {
6666 		over_addr = (union sctp_sockstore *)dst;
6667 	} else {
6668 		over_addr = NULL;
6669 	}
6670 
6671 	if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6672 	                                        0, 0,
6673 	                                        inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6674 	                                        port, over_addr,
6675 #if defined(__FreeBSD__) && !defined(__Userspace__)
6676 	                                        mflowtype, mflowid,
6677 #endif
6678 	                                        SCTP_SO_NOT_LOCKED))) {
6679 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6680 		if (error == ENOBUFS) {
6681 			if (asoc != NULL) {
6682 				asoc->ifp_had_enobuf = 1;
6683 			}
6684 			SCTP_STAT_INCR(sctps_lowlevelerr);
6685 		}
6686 	} else {
6687 		if (asoc != NULL) {
6688 			asoc->ifp_had_enobuf = 0;
6689 		}
6690 	}
6691 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6692 }
6693 
6694 
6695 static void
6696 sctp_prune_prsctp(struct sctp_tcb *stcb,
6697     struct sctp_association *asoc,
6698     struct sctp_sndrcvinfo *srcv,
6699     int dataout)
6700 {
6701 	int freed_spc = 0;
6702 	struct sctp_tmit_chunk *chk, *nchk;
6703 
6704 	SCTP_TCB_LOCK_ASSERT(stcb);
6705 	if ((asoc->prsctp_supported) &&
6706 	    (asoc->sent_queue_cnt_removeable > 0)) {
6707 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6708 			/*
6709 			 * Look for chunks marked with the PR_SCTP flag AND
6710 			 * the buffer space flag. If the one being sent is
6711 			 * equal or greater priority then purge the old one
6712 			 * and free some space.
6713 			 */
6714 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6715 				/*
6716 				 * This one is PR-SCTP AND buffer space
6717 				 * limited type
6718 				 */
6719 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6720 					/*
6721 					 * Lower numbers equates to higher
6722 					 * priority. So if the one we are
6723 					 * looking at has a larger priority,
6724 					 * we want to drop the data and NOT
6725 					 * retransmit it.
6726 					 */
6727 					if (chk->data) {
6728 						/*
6729 						 * We release the book_size
6730 						 * if the mbuf is here
6731 						 */
6732 						int ret_spc;
6733 						uint8_t sent;
6734 
6735 						if (chk->sent > SCTP_DATAGRAM_UNSENT)
6736 							sent = 1;
6737 						else
6738 							sent = 0;
6739 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6740 						    sent,
6741 						    SCTP_SO_LOCKED);
6742 						freed_spc += ret_spc;
6743 						if (freed_spc >= dataout) {
6744 							return;
6745 						}
6746 					}	/* if chunk was present */
6747 				}	/* if of sufficient priority */
6748 			}	/* if chunk has enabled */
6749 		}		/* tailqforeach */
6750 
6751 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6752 			/* Here we must move to the sent queue and mark */
6753 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6754 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6755 					if (chk->data) {
6756 						/*
6757 						 * We release the book_size
6758 						 * if the mbuf is here
6759 						 */
6760 						int ret_spc;
6761 
6762 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6763 						    0, SCTP_SO_LOCKED);
6764 
6765 						freed_spc += ret_spc;
6766 						if (freed_spc >= dataout) {
6767 							return;
6768 						}
6769 					}	/* end if chk->data */
6770 				}	/* end if right class */
6771 			}	/* end if chk pr-sctp */
6772 		}		/* tailqforeachsafe (chk) */
6773 	}			/* if enabled in asoc */
6774 }
6775 
6776 int
6777 sctp_get_frag_point(struct sctp_tcb *stcb,
6778     struct sctp_association *asoc)
6779 {
6780 	int siz, ovh;
6781 
6782 	/*
6783 	 * For endpoints that have both v6 and v4 addresses we must reserve
6784 	 * room for the ipv6 header, for those that are only dealing with V4
6785 	 * we use a larger frag point.
6786 	 */
6787 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6788 		ovh = SCTP_MIN_OVERHEAD;
6789 	} else {
6790 #if defined(__Userspace__)
6791 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6792 			ovh = sizeof(struct sctphdr);
6793 		} else {
6794 			ovh = SCTP_MIN_V4_OVERHEAD;
6795 		}
6796 #else
6797 		ovh = SCTP_MIN_V4_OVERHEAD;
6798 #endif
6799 	}
6800 	ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6801 	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6802 		siz = asoc->smallest_mtu - ovh;
6803 	else
6804 		siz = (stcb->asoc.sctp_frag_point - ovh);
6805 	/*
6806 	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6807 	 */
6808 	/* A data chunk MUST fit in a cluster */
6809 	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6810 	/* } */
6811 
6812 	/* adjust for an AUTH chunk if DATA requires auth */
6813 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6814 		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6815 
6816 	if (siz % 4) {
6817 		/* make it an even word boundary please */
6818 		siz -= (siz % 4);
6819 	}
6820 	return (siz);
6821 }
6822 
6823 static void
6824 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6825 {
6826 	/*
6827 	 * We assume that the user wants PR_SCTP_TTL if the user
6828 	 * provides a positive lifetime but does not specify any
6829 	 * PR_SCTP policy.
6830 	 */
6831 	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6832 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6833 	} else if (sp->timetolive > 0) {
6834 		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6835 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6836 	} else {
6837 		return;
6838 	}
6839 	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6840 	case CHUNK_FLAGS_PR_SCTP_BUF:
6841 		/*
6842 		 * Time to live is a priority stored in tv_sec when
6843 		 * doing the buffer drop thing.
6844 		 */
6845 		sp->ts.tv_sec = sp->timetolive;
6846 		sp->ts.tv_usec = 0;
6847 		break;
6848 	case CHUNK_FLAGS_PR_SCTP_TTL:
6849 	{
6850 		struct timeval tv;
6851 		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6852 		tv.tv_sec = sp->timetolive / 1000;
6853 		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6854 		/* TODO sctp_constants.h needs alternative time macros when
6855 		 *  _KERNEL is undefined.
6856 		 */
6857 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
6858 		timeradd(&sp->ts, &tv, &sp->ts);
6859 #else
6860 		timevaladd(&sp->ts, &tv);
6861 #endif
6862 	}
6863 		break;
6864 	case CHUNK_FLAGS_PR_SCTP_RTX:
6865 		/*
6866 		 * Time to live is a the number or retransmissions
6867 		 * stored in tv_sec.
6868 		 */
6869 		sp->ts.tv_sec = sp->timetolive;
6870 		sp->ts.tv_usec = 0;
6871 		break;
6872 	default:
6873 		SCTPDBG(SCTP_DEBUG_USRREQ1,
6874 			"Unknown PR_SCTP policy %u.\n",
6875 			PR_SCTP_POLICY(sp->sinfo_flags));
6876 		break;
6877 	}
6878 }
6879 
6880 static int
6881 sctp_msg_append(struct sctp_tcb *stcb,
6882 		struct sctp_nets *net,
6883 		struct mbuf *m,
6884 		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6885 {
6886 	int error = 0;
6887 	struct mbuf *at;
6888 	struct sctp_stream_queue_pending *sp = NULL;
6889 	struct sctp_stream_out *strm;
6890 
6891 	/* Given an mbuf chain, put it
6892 	 * into the association send queue and
6893 	 * place it on the wheel
6894 	 */
6895 	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6896 		/* Invalid stream number */
6897 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6898 		error = EINVAL;
6899 		goto out_now;
6900 	}
6901 	if ((stcb->asoc.stream_locked) &&
6902 	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6903 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6904 		error = EINVAL;
6905 		goto out_now;
6906 	}
6907 	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6908 	/* Now can we send this? */
6909 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6910 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6911 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6912 	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6913 		/* got data while shutting down */
6914 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6915 		error = ECONNRESET;
6916 		goto out_now;
6917 	}
6918 	sctp_alloc_a_strmoq(stcb, sp);
6919 	if (sp == NULL) {
6920 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6921 		error = ENOMEM;
6922 		goto out_now;
6923 	}
6924 	sp->sinfo_flags = srcv->sinfo_flags;
6925 	sp->timetolive = srcv->sinfo_timetolive;
6926 	sp->ppid = srcv->sinfo_ppid;
6927 	sp->context = srcv->sinfo_context;
6928 	sp->fsn = 0;
6929 	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6930 		sp->net = net;
6931 		atomic_add_int(&sp->net->ref_count, 1);
6932 	} else {
6933 		sp->net = NULL;
6934 	}
6935 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6936 	sp->sid = srcv->sinfo_stream;
6937 	sp->msg_is_complete = 1;
6938 	sp->sender_all_done = 1;
6939 	sp->some_taken = 0;
6940 	sp->data = m;
6941 	sp->tail_mbuf = NULL;
6942 	sctp_set_prsctp_policy(sp);
6943 	/* We could in theory (for sendall) sifa the length
6944 	 * in, but we would still have to hunt through the
6945 	 * chain since we need to setup the tail_mbuf
6946 	 */
6947 	sp->length = 0;
6948 	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6949 		if (SCTP_BUF_NEXT(at) == NULL)
6950 			sp->tail_mbuf = at;
6951 		sp->length += SCTP_BUF_LEN(at);
6952 	}
6953 	if (srcv->sinfo_keynumber_valid) {
6954 		sp->auth_keyid = srcv->sinfo_keynumber;
6955 	} else {
6956 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6957 	}
6958 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6959 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
6960 		sp->holds_key_ref = 1;
6961 	}
6962 	if (hold_stcb_lock == 0) {
6963 		SCTP_TCB_SEND_LOCK(stcb);
6964 	}
6965 	sctp_snd_sb_alloc(stcb, sp->length);
6966 	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6967 	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6968 	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6969 	m = NULL;
6970 	if (hold_stcb_lock == 0) {
6971 		SCTP_TCB_SEND_UNLOCK(stcb);
6972 	}
6973 out_now:
6974 	if (m) {
6975 		sctp_m_freem(m);
6976 	}
6977 	return (error);
6978 }
6979 
6980 
6981 static struct mbuf *
6982 sctp_copy_mbufchain(struct mbuf *clonechain,
6983 		    struct mbuf *outchain,
6984 		    struct mbuf **endofchain,
6985 		    int can_take_mbuf,
6986 		    int sizeofcpy,
6987 		    uint8_t copy_by_ref)
6988 {
6989 	struct mbuf *m;
6990 	struct mbuf *appendchain;
6991 	caddr_t cp;
6992 	int len;
6993 
6994 	if (endofchain == NULL) {
6995 		/* error */
6996 	error_out:
6997 		if (outchain)
6998 			sctp_m_freem(outchain);
6999 		return (NULL);
7000 	}
7001 	if (can_take_mbuf) {
7002 		appendchain = clonechain;
7003 	} else {
7004 		if (!copy_by_ref &&
7005 		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) {
7006 			/* Its not in a cluster */
7007 			if (*endofchain == NULL) {
7008 				/* lets get a mbuf cluster */
7009 				if (outchain == NULL) {
7010 					/* This is the general case */
7011 				new_mbuf:
7012 					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7013 					if (outchain == NULL) {
7014 						goto error_out;
7015 					}
7016 					SCTP_BUF_LEN(outchain) = 0;
7017 					*endofchain = outchain;
7018 					/* get the prepend space */
7019 					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
7020 				} else {
7021 					/* We really should not get a NULL in endofchain */
7022 					/* find end */
7023 					m = outchain;
7024 					while (m) {
7025 						if (SCTP_BUF_NEXT(m) == NULL) {
7026 							*endofchain = m;
7027 							break;
7028 						}
7029 						m = SCTP_BUF_NEXT(m);
7030 					}
7031 					/* sanity */
7032 					if (*endofchain == NULL) {
7033 						/* huh, TSNH XXX maybe we should panic */
7034 						sctp_m_freem(outchain);
7035 						goto new_mbuf;
7036 					}
7037 				}
7038 				/* get the new end of length */
7039 				len = (int)M_TRAILINGSPACE(*endofchain);
7040 			} else {
7041 				/* how much is left at the end? */
7042 				len = (int)M_TRAILINGSPACE(*endofchain);
7043 			}
7044 			/* Find the end of the data, for appending */
7045 			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
7046 
7047 			/* Now lets copy it out */
7048 			if (len >= sizeofcpy) {
7049 				/* It all fits, copy it in */
7050 				m_copydata(clonechain, 0, sizeofcpy, cp);
7051 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7052 			} else {
7053 				/* fill up the end of the chain */
7054 				if (len > 0) {
7055 					m_copydata(clonechain, 0, len, cp);
7056 					SCTP_BUF_LEN((*endofchain)) += len;
7057 					/* now we need another one */
7058 					sizeofcpy -= len;
7059 				}
7060 				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7061 				if (m == NULL) {
7062 					/* We failed */
7063 					goto error_out;
7064 				}
7065 				SCTP_BUF_NEXT((*endofchain)) = m;
7066 				*endofchain = m;
7067 				cp = mtod((*endofchain), caddr_t);
7068 				m_copydata(clonechain, len, sizeofcpy, cp);
7069 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7070 			}
7071 			return (outchain);
7072 		} else {
7073 			/* copy the old fashion way */
7074 			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
7075 #ifdef SCTP_MBUF_LOGGING
7076 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7077 				sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
7078 			}
7079 #endif
7080 		}
7081 	}
7082 	if (appendchain == NULL) {
7083 		/* error */
7084 		if (outchain)
7085 			sctp_m_freem(outchain);
7086 		return (NULL);
7087 	}
7088 	if (outchain) {
7089 		/* tack on to the end */
7090 		if (*endofchain != NULL) {
7091 			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
7092 		} else {
7093 			m = outchain;
7094 			while (m) {
7095 				if (SCTP_BUF_NEXT(m) == NULL) {
7096 					SCTP_BUF_NEXT(m) = appendchain;
7097 					break;
7098 				}
7099 				m = SCTP_BUF_NEXT(m);
7100 			}
7101 		}
7102 		/*
7103 		 * save off the end and update the end-chain
7104 		 * position
7105 		 */
7106 		m = appendchain;
7107 		while (m) {
7108 			if (SCTP_BUF_NEXT(m) == NULL) {
7109 				*endofchain = m;
7110 				break;
7111 			}
7112 			m = SCTP_BUF_NEXT(m);
7113 		}
7114 		return (outchain);
7115 	} else {
7116 		/* save off the end and update the end-chain position */
7117 		m = appendchain;
7118 		while (m) {
7119 			if (SCTP_BUF_NEXT(m) == NULL) {
7120 				*endofchain = m;
7121 				break;
7122 			}
7123 			m = SCTP_BUF_NEXT(m);
7124 		}
7125 		return (appendchain);
7126 	}
7127 }
7128 
7129 static int
7130 sctp_med_chunk_output(struct sctp_inpcb *inp,
7131 		      struct sctp_tcb *stcb,
7132 		      struct sctp_association *asoc,
7133 		      int *num_out,
7134 		      int *reason_code,
7135 		      int control_only, int from_where,
7136 		      struct timeval *now, int *now_filled, int frag_point, int so_locked);
7137 
7138 static void
7139 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
7140     uint32_t val SCTP_UNUSED)
7141 {
7142 	struct sctp_copy_all *ca;
7143 	struct mbuf *m;
7144 	int ret = 0;
7145 	int added_control = 0;
7146 	int un_sent, do_chunk_output = 1;
7147 	struct sctp_association *asoc;
7148 	struct sctp_nets *net;
7149 
7150 	ca = (struct sctp_copy_all *)ptr;
7151 	if (ca->m == NULL) {
7152 		return;
7153 	}
7154 	if (ca->inp != inp) {
7155 		/* TSNH */
7156 		return;
7157 	}
7158 	if (ca->sndlen > 0) {
7159 		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7160 		if (m == NULL) {
7161 			/* can't copy so we are done */
7162 			ca->cnt_failed++;
7163 			return;
7164 		}
7165 #ifdef SCTP_MBUF_LOGGING
7166 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7167 			sctp_log_mbc(m, SCTP_MBUF_ICOPY);
7168 		}
7169 #endif
7170 	} else {
7171 		m = NULL;
7172 	}
7173 	SCTP_TCB_LOCK_ASSERT(stcb);
7174 	if (stcb->asoc.alternate) {
7175 		net = stcb->asoc.alternate;
7176 	} else {
7177 		net = stcb->asoc.primary_destination;
7178 	}
7179 	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7180 		/* Abort this assoc with m as the user defined reason */
7181 		if (m != NULL) {
7182 			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7183 		} else {
7184 			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7185 			                          0, M_NOWAIT, 1, MT_DATA);
7186 			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7187 		}
7188 		if (m != NULL) {
7189 			struct sctp_paramhdr *ph;
7190 
7191 			ph = mtod(m, struct sctp_paramhdr *);
7192 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7193 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
7194 		}
7195 		/* We add one here to keep the assoc from
7196 		 * dis-appearing on us.
7197 		 */
7198 		atomic_add_int(&stcb->asoc.refcnt, 1);
7199 		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7200 		/* sctp_abort_an_association calls sctp_free_asoc()
7201 		 * free association will NOT free it since we
7202 		 * incremented the refcnt .. we do this to prevent
7203 		 * it being freed and things getting tricky since
7204 		 * we could end up (from free_asoc) calling inpcb_free
7205 		 * which would get a recursive lock call to the
7206 		 * iterator lock.. But as a consequence of that the
7207 		 * stcb will return to us un-locked.. since free_asoc
7208 		 * returns with either no TCB or the TCB unlocked, we
7209 		 * must relock.. to unlock in the iterator timer :-0
7210 		 */
7211 		SCTP_TCB_LOCK(stcb);
7212 		atomic_add_int(&stcb->asoc.refcnt, -1);
7213 		goto no_chunk_output;
7214 	} else {
7215 		if (m) {
7216 			ret = sctp_msg_append(stcb, net, m,
7217 					      &ca->sndrcv, 1);
7218 		}
7219 		asoc = &stcb->asoc;
7220 		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7221 			/* shutdown this assoc */
7222 			if (TAILQ_EMPTY(&asoc->send_queue) &&
7223 			    TAILQ_EMPTY(&asoc->sent_queue) &&
7224 			    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
7225 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7226 					goto abort_anyway;
7227 				}
7228 				/* there is nothing queued to send, so I'm done... */
7229 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7230 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7231 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7232 					/* only send SHUTDOWN the first time through */
7233 					if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
7234 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7235 					}
7236 					SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
7237 					sctp_stop_timers_for_shutdown(stcb);
7238 					sctp_send_shutdown(stcb, net);
7239 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7240 							 net);
7241 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7242 					                 NULL);
7243 					added_control = 1;
7244 					do_chunk_output = 0;
7245 				}
7246 			} else {
7247 				/*
7248 				 * we still got (or just got) data to send, so set
7249 				 * SHUTDOWN_PENDING
7250 				 */
7251 				/*
7252 				 * XXX sockets draft says that SCTP_EOF should be
7253 				 * sent with no data.  currently, we will allow user
7254 				 * data to be sent first and move to
7255 				 * SHUTDOWN-PENDING
7256 				 */
7257 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7258 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7259 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7260 					if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7261 						SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
7262 					}
7263 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7264 					if (TAILQ_EMPTY(&asoc->send_queue) &&
7265 					    TAILQ_EMPTY(&asoc->sent_queue) &&
7266 					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7267 						struct mbuf *op_err;
7268 						char msg[SCTP_DIAG_INFO_LEN];
7269 
7270 					abort_anyway:
7271 						SCTP_SNPRINTF(msg, sizeof(msg),
7272 						              "%s:%d at %s", __FILE__, __LINE__, __func__);
7273 						op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
7274 						                             msg);
7275 						atomic_add_int(&stcb->asoc.refcnt, 1);
7276 						sctp_abort_an_association(stcb->sctp_ep, stcb,
7277 									  op_err, SCTP_SO_NOT_LOCKED);
7278 						atomic_add_int(&stcb->asoc.refcnt, -1);
7279 						goto no_chunk_output;
7280 					}
7281 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7282 					                 NULL);
7283 				}
7284 			}
7285 
7286 		}
7287 	}
7288 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7289 		   (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
7290 
7291 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7292 	    (stcb->asoc.total_flight > 0) &&
7293 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7294 		do_chunk_output = 0;
7295 	}
7296 	if (do_chunk_output)
7297 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7298 	else if (added_control) {
7299 		int num_out, reason, now_filled = 0;
7300 		struct timeval now;
7301 		int frag_point;
7302 
7303 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7304 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7305 				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7306 	}
7307  no_chunk_output:
7308 	if (ret) {
7309 		ca->cnt_failed++;
7310 	} else {
7311 		ca->cnt_sent++;
7312 	}
7313 }
7314 
7315 static void
7316 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7317 {
7318 	struct sctp_copy_all *ca;
7319 
7320 	ca = (struct sctp_copy_all *)ptr;
7321 	/*
7322 	 * Do a notify here? Kacheong suggests that the notify be done at
7323 	 * the send time.. so you would push up a notification if any send
7324 	 * failed. Don't know if this is feasible since the only failures we
7325 	 * have is "memory" related and if you cannot get an mbuf to send
7326 	 * the data you surely can't get an mbuf to send up to notify the
7327 	 * user you can't send the data :->
7328 	 */
7329 
7330 	/* now free everything */
7331 	if (ca->inp) {
7332 		/* Lets clear the flag to allow others to run. */
7333 		ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7334 	}
7335 	sctp_m_freem(ca->m);
7336 	SCTP_FREE(ca, SCTP_M_COPYAL);
7337 }
7338 
7339 static struct mbuf *
7340 sctp_copy_out_all(struct uio *uio, ssize_t len)
7341 {
7342 	struct mbuf *ret, *at;
7343 	ssize_t left, willcpy, cancpy, error;
7344 
7345 	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7346 	if (ret == NULL) {
7347 		/* TSNH */
7348 		return (NULL);
7349 	}
7350 	left = len;
7351 	SCTP_BUF_LEN(ret) = 0;
7352 	/* save space for the data chunk header */
7353 	cancpy = (int)M_TRAILINGSPACE(ret);
7354 	willcpy = min(cancpy, left);
7355 	at = ret;
7356 	while (left > 0) {
7357 		/* Align data to the end */
7358 		error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
7359 		if (error) {
7360 	err_out_now:
7361 			sctp_m_freem(at);
7362 			return (NULL);
7363 		}
7364 		SCTP_BUF_LEN(at) = (int)willcpy;
7365 		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7366 		left -= willcpy;
7367 		if (left > 0) {
7368 			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
7369 			if (SCTP_BUF_NEXT(at) == NULL) {
7370 				goto err_out_now;
7371 			}
7372 			at = SCTP_BUF_NEXT(at);
7373 			SCTP_BUF_LEN(at) = 0;
7374 			cancpy = (int)M_TRAILINGSPACE(at);
7375 			willcpy = min(cancpy, left);
7376 		}
7377 	}
7378 	return (ret);
7379 }
7380 
7381 static int
7382 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7383     struct sctp_sndrcvinfo *srcv)
7384 {
7385 	int ret;
7386 	struct sctp_copy_all *ca;
7387 
7388 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
7389 		/* There is another. */
7390 		return (EBUSY);
7391 	}
7392 #if defined(__APPLE__) && !defined(__Userspace__)
7393 #if defined(APPLE_LEOPARD)
7394 	if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7395 #else
7396 	if (uio_resid(uio) > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7397 #endif
7398 #else
7399 	if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7400 #endif
7401 		/* You must not be larger than the limit! */
7402 		return (EMSGSIZE);
7403 	}
7404 	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7405 		    SCTP_M_COPYAL);
7406 	if (ca == NULL) {
7407 		sctp_m_freem(m);
7408 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7409 		return (ENOMEM);
7410 	}
7411 	memset(ca, 0, sizeof(struct sctp_copy_all));
7412 
7413 	ca->inp = inp;
7414 	if (srcv) {
7415 		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7416 	}
7417 	/*
7418 	 * take off the sendall flag, it would be bad if we failed to do
7419 	 * this :-0
7420 	 */
7421 	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7422 	/* get length and mbuf chain */
7423 	if (uio) {
7424 #if defined(__APPLE__) && !defined(__Userspace__)
7425 #if defined(APPLE_LEOPARD)
7426 		ca->sndlen = uio->uio_resid;
7427 #else
7428 		ca->sndlen = uio_resid(uio);
7429 #endif
7430 #else
7431 		ca->sndlen = uio->uio_resid;
7432 #endif
7433 #if defined(__APPLE__) && !defined(__Userspace__)
7434 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7435 #endif
7436 		ca->m = sctp_copy_out_all(uio, ca->sndlen);
7437 #if defined(__APPLE__) && !defined(__Userspace__)
7438 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7439 #endif
7440 		if (ca->m == NULL) {
7441 			SCTP_FREE(ca, SCTP_M_COPYAL);
7442 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7443 			return (ENOMEM);
7444 		}
7445 	} else {
7446 		/* Gather the length of the send */
7447 		struct mbuf *mat;
7448 
7449 		ca->sndlen = 0;
7450 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7451 			ca->sndlen += SCTP_BUF_LEN(mat);
7452 		}
7453 	}
7454 	inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7455 	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7456 				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7457 				     SCTP_ASOC_ANY_STATE,
7458 				     (void *)ca, 0,
7459 				     sctp_sendall_completes, inp, 1);
7460 	if (ret) {
7461 		inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7462 		SCTP_FREE(ca, SCTP_M_COPYAL);
7463 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7464 		return (EFAULT);
7465 	}
7466 	return (0);
7467 }
7468 
7469 
7470 void
7471 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7472 {
7473 	struct sctp_tmit_chunk *chk, *nchk;
7474 
7475 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7476 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7477 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7478 			asoc->ctrl_queue_cnt--;
7479 			if (chk->data) {
7480 				sctp_m_freem(chk->data);
7481 				chk->data = NULL;
7482 			}
7483 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7484 		}
7485 	}
7486 }
7487 
7488 void
7489 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7490 {
7491 	struct sctp_association *asoc;
7492 	struct sctp_tmit_chunk *chk, *nchk;
7493 	struct sctp_asconf_chunk *acp;
7494 
7495 	asoc = &stcb->asoc;
7496 	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7497 		/* find SCTP_ASCONF chunk in queue */
7498 		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7499 			if (chk->data) {
7500 				acp = mtod(chk->data, struct sctp_asconf_chunk *);
7501 				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7502 					/* Not Acked yet */
7503 					break;
7504 				}
7505 			}
7506 			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7507 			asoc->ctrl_queue_cnt--;
7508 			if (chk->data) {
7509 				sctp_m_freem(chk->data);
7510 				chk->data = NULL;
7511 			}
7512 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7513 		}
7514 	}
7515 }
7516 
7517 
7518 static void
7519 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7520     struct sctp_association *asoc,
7521     struct sctp_tmit_chunk **data_list,
7522     int bundle_at,
7523     struct sctp_nets *net)
7524 {
7525 	int i;
7526 	struct sctp_tmit_chunk *tp1;
7527 
7528 	for (i = 0; i < bundle_at; i++) {
7529 		/* off of the send queue */
7530 		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7531 		asoc->send_queue_cnt--;
7532 		if (i > 0) {
7533 			/*
7534 			 * Any chunk NOT 0 you zap the time chunk 0 gets
7535 			 * zapped or set based on if a RTO measurment is
7536 			 * needed.
7537 			 */
7538 			data_list[i]->do_rtt = 0;
7539 		}
7540 		/* record time */
7541 		data_list[i]->sent_rcv_time = net->last_sent_time;
7542 		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7543 		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7544 		if (data_list[i]->whoTo == NULL) {
7545 			data_list[i]->whoTo = net;
7546 			atomic_add_int(&net->ref_count, 1);
7547 		}
7548 		/* on to the sent queue */
7549 		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7550 		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7551 			struct sctp_tmit_chunk *tpp;
7552 
7553 			/* need to move back */
7554 		back_up_more:
7555 			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7556 			if (tpp == NULL) {
7557 				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7558 				goto all_done;
7559 			}
7560 			tp1 = tpp;
7561 			if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7562 				goto back_up_more;
7563 			}
7564 			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7565 		} else {
7566 			TAILQ_INSERT_TAIL(&asoc->sent_queue,
7567 					  data_list[i],
7568 					  sctp_next);
7569 		}
7570 	all_done:
7571 		/* This does not lower until the cum-ack passes it */
7572 		asoc->sent_queue_cnt++;
7573 		if ((asoc->peers_rwnd <= 0) &&
7574 		    (asoc->total_flight == 0) &&
7575 		    (bundle_at == 1)) {
7576 			/* Mark the chunk as being a window probe */
7577 			SCTP_STAT_INCR(sctps_windowprobed);
7578 		}
7579 #ifdef SCTP_AUDITING_ENABLED
7580 		sctp_audit_log(0xC2, 3);
7581 #endif
7582 		data_list[i]->sent = SCTP_DATAGRAM_SENT;
7583 		data_list[i]->snd_count = 1;
7584 		data_list[i]->rec.data.chunk_was_revoked = 0;
7585 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7586 			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7587 				       data_list[i]->whoTo->flight_size,
7588 				       data_list[i]->book_size,
7589 				       (uint32_t)(uintptr_t)data_list[i]->whoTo,
7590 				       data_list[i]->rec.data.tsn);
7591 		}
7592 		sctp_flight_size_increase(data_list[i]);
7593 		sctp_total_flight_increase(stcb, data_list[i]);
7594 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7595 			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7596 			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7597 		}
7598 		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7599 						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7600 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7601 			/* SWS sender side engages */
7602 			asoc->peers_rwnd = 0;
7603 		}
7604 	}
7605 	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7606 		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7607 	}
7608 }
7609 
7610 static void
7611 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
7612 {
7613 	struct sctp_tmit_chunk *chk, *nchk;
7614 
7615 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7616 		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7617 		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
7618 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7619 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7620 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7621 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7622 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7623 		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7624 		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7625 		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7626 		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7627 		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7628 			/* Stray chunks must be cleaned up */
7629 	clean_up_anyway:
7630 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7631 			asoc->ctrl_queue_cnt--;
7632 			if (chk->data) {
7633 				sctp_m_freem(chk->data);
7634 				chk->data = NULL;
7635 			}
7636 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7637 				asoc->fwd_tsn_cnt--;
7638 			}
7639 			sctp_free_a_chunk(stcb, chk, so_locked);
7640 		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7641 			/* special handling, we must look into the param */
7642 			if (chk != asoc->str_reset) {
7643 				goto clean_up_anyway;
7644 			}
7645 		}
7646 	}
7647 }
7648 
7649 static uint32_t
7650 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7651                        uint32_t space_left, uint32_t frag_point, int eeor_on)
7652 {
7653 	/* Make a decision on if I should split a
7654 	 * msg into multiple parts. This is only asked of
7655 	 * incomplete messages.
7656 	 */
7657 	if (eeor_on) {
7658 		/* If we are doing EEOR we need to always send
7659 		 * it if its the entire thing, since it might
7660 		 * be all the guy is putting in the hopper.
7661 		 */
7662 		if (space_left >= length) {
7663 			/*-
7664 			 * If we have data outstanding,
7665 			 * we get another chance when the sack
7666 			 * arrives to transmit - wait for more data
7667 			 */
7668 			if (stcb->asoc.total_flight == 0) {
7669 				/* If nothing is in flight, we zero
7670 				 * the packet counter.
7671 				 */
7672 				return (length);
7673 			}
7674 			return (0);
7675 
7676 		} else {
7677 			/* You can fill the rest */
7678 			return (space_left);
7679 		}
7680 	}
7681 	/*-
7682 	 * For those strange folk that make the send buffer
7683 	 * smaller than our fragmentation point, we can't
7684 	 * get a full msg in so we have to allow splitting.
7685 	 */
7686 	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7687 		return (length);
7688 	}
7689 	if ((length <= space_left) ||
7690 	    ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7691 		/* Sub-optimial residual don't split in non-eeor mode. */
7692 		return (0);
7693 	}
7694 	/* If we reach here length is larger
7695 	 * than the space_left. Do we wish to split
7696 	 * it for the sake of packet putting together?
7697 	 */
7698 	if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7699 		/* Its ok to split it */
7700 		return (min(space_left, frag_point));
7701 	}
7702 	/* Nope, can't split */
7703 	return (0);
7704 }
7705 
7706 static uint32_t
7707 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7708                       struct sctp_stream_out *strq,
7709                       uint32_t space_left,
7710                       uint32_t frag_point,
7711                       int *giveup,
7712                       int eeor_mode,
7713                       int *bail,
7714                       int so_locked)
7715 {
7716 	/* Move from the stream to the send_queue keeping track of the total */
7717 	struct sctp_association *asoc;
7718 	struct sctp_stream_queue_pending *sp;
7719 	struct sctp_tmit_chunk *chk;
7720 	struct sctp_data_chunk *dchkh=NULL;
7721 	struct sctp_idata_chunk *ndchkh=NULL;
7722 	uint32_t to_move, length;
7723 	int leading;
7724 	uint8_t rcv_flags = 0;
7725 	uint8_t some_taken;
7726 	uint8_t send_lock_up = 0;
7727 
7728 	SCTP_TCB_LOCK_ASSERT(stcb);
7729 	asoc = &stcb->asoc;
7730 one_more_time:
7731 	/*sa_ignore FREED_MEMORY*/
7732 	sp = TAILQ_FIRST(&strq->outqueue);
7733 	if (sp == NULL) {
7734 		if (send_lock_up == 0) {
7735 			SCTP_TCB_SEND_LOCK(stcb);
7736 			send_lock_up = 1;
7737 		}
7738 		sp = TAILQ_FIRST(&strq->outqueue);
7739 		if (sp) {
7740 			goto one_more_time;
7741 		}
7742 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7743 		    (stcb->asoc.idata_supported == 0) &&
7744 		    (strq->last_msg_incomplete)) {
7745 			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7746 			            strq->sid,
7747 			            strq->last_msg_incomplete);
7748 			strq->last_msg_incomplete = 0;
7749 		}
7750 		to_move = 0;
7751 		if (send_lock_up) {
7752 			SCTP_TCB_SEND_UNLOCK(stcb);
7753 			send_lock_up = 0;
7754 		}
7755 		goto out_of;
7756 	}
7757 	if ((sp->msg_is_complete) && (sp->length == 0)) {
7758 		if (sp->sender_all_done) {
7759 			/* We are doing deferred cleanup. Last
7760 			 * time through when we took all the data
7761 			 * the sender_all_done was not set.
7762 			 */
7763 			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7764 				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7765 				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7766 				            sp->sender_all_done,
7767 				            sp->length,
7768 				            sp->msg_is_complete,
7769 				            sp->put_last_out,
7770 				            send_lock_up);
7771 			}
7772 			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
7773 				SCTP_TCB_SEND_LOCK(stcb);
7774 				send_lock_up = 1;
7775 			}
7776 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7777 			TAILQ_REMOVE(&strq->outqueue, sp, next);
7778 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7779 			if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7780 			    (strq->chunks_on_queues == 0) &&
7781 			    TAILQ_EMPTY(&strq->outqueue)) {
7782 				stcb->asoc.trigger_reset = 1;
7783 			}
7784 			if (sp->net) {
7785 				sctp_free_remote_addr(sp->net);
7786 				sp->net = NULL;
7787 			}
7788 			if (sp->data) {
7789 				sctp_m_freem(sp->data);
7790 				sp->data = NULL;
7791 			}
7792 			sctp_free_a_strmoq(stcb, sp, so_locked);
7793 			/* we can't be locked to it */
7794 			if (send_lock_up) {
7795 				SCTP_TCB_SEND_UNLOCK(stcb);
7796 				send_lock_up = 0;
7797 			}
7798 			/* back to get the next msg */
7799 			goto one_more_time;
7800 		} else {
7801 			/* sender just finished this but
7802 			 * still holds a reference
7803 			 */
7804 			*giveup = 1;
7805 			to_move = 0;
7806 			goto out_of;
7807 		}
7808 	} else {
7809 		/* is there some to get */
7810 		if (sp->length == 0) {
7811 			/* no */
7812 			*giveup = 1;
7813 			to_move = 0;
7814 			goto out_of;
7815 		} else if (sp->discard_rest) {
7816 			if (send_lock_up == 0) {
7817 				SCTP_TCB_SEND_LOCK(stcb);
7818 				send_lock_up = 1;
7819 			}
7820 			/* Whack down the size */
7821 			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7822 			if ((stcb->sctp_socket != NULL) &&
7823 			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7824 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7825 				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7826 			}
7827 			if (sp->data) {
7828 				sctp_m_freem(sp->data);
7829 				sp->data = NULL;
7830 				sp->tail_mbuf = NULL;
7831 			}
7832 			sp->length = 0;
7833 			sp->some_taken = 1;
7834 			*giveup = 1;
7835 			to_move = 0;
7836 			goto out_of;
7837 		}
7838 	}
7839 	some_taken = sp->some_taken;
7840 re_look:
7841 	length = sp->length;
7842 	if (sp->msg_is_complete) {
7843 		/* The message is complete */
7844 		to_move = min(length, frag_point);
7845 		if (to_move == length) {
7846 			/* All of it fits in the MTU */
7847 			if (sp->some_taken) {
7848 				rcv_flags |= SCTP_DATA_LAST_FRAG;
7849 			} else {
7850 				rcv_flags |= SCTP_DATA_NOT_FRAG;
7851 			}
7852 			sp->put_last_out = 1;
7853 			if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7854 				rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7855 			}
7856 		} else {
7857 			/* Not all of it fits, we fragment */
7858 			if (sp->some_taken == 0) {
7859 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7860 			}
7861 			sp->some_taken = 1;
7862 		}
7863 	} else {
7864 		to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7865 		if (to_move) {
7866 			/*-
7867 			 * We use a snapshot of length in case it
7868 			 * is expanding during the compare.
7869 			 */
7870 			uint32_t llen;
7871 
7872 			llen = length;
7873 			if (to_move >= llen) {
7874 				to_move = llen;
7875 				if (send_lock_up == 0) {
7876 					/*-
7877 					 * We are taking all of an incomplete msg
7878 					 * thus we need a send lock.
7879 					 */
7880 					SCTP_TCB_SEND_LOCK(stcb);
7881 					send_lock_up = 1;
7882 					if (sp->msg_is_complete) {
7883 						/* the sender finished the msg */
7884 						goto re_look;
7885 					}
7886 				}
7887 			}
7888 			if (sp->some_taken == 0) {
7889 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7890 				sp->some_taken = 1;
7891 			}
7892 		} else {
7893 			/* Nothing to take. */
7894 			*giveup = 1;
7895 			to_move = 0;
7896 			goto out_of;
7897 		}
7898 	}
7899 
7900 	/* If we reach here, we can copy out a chunk */
7901 	sctp_alloc_a_chunk(stcb, chk);
7902 	if (chk == NULL) {
7903 		/* No chunk memory */
7904 		*giveup = 1;
7905 		to_move = 0;
7906 		goto out_of;
7907 	}
7908 	/* Setup for unordered if needed by looking
7909 	 * at the user sent info flags.
7910 	 */
7911 	if (sp->sinfo_flags & SCTP_UNORDERED) {
7912 		rcv_flags |= SCTP_DATA_UNORDERED;
7913 	}
7914 	if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7915 	    (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7916 		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7917 	}
7918 	/* clear out the chunk before setting up */
7919 	memset(chk, 0, sizeof(*chk));
7920 	chk->rec.data.rcv_flags = rcv_flags;
7921 
7922 	if (to_move >= length) {
7923 		/* we think we can steal the whole thing */
7924 		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7925 			SCTP_TCB_SEND_LOCK(stcb);
7926 			send_lock_up = 1;
7927 		}
7928 		if (to_move < sp->length) {
7929 			/* bail, it changed */
7930 			goto dont_do_it;
7931 		}
7932 		chk->data = sp->data;
7933 		chk->last_mbuf = sp->tail_mbuf;
7934 		/* register the stealing */
7935 		sp->data = sp->tail_mbuf = NULL;
7936 	} else {
7937 		struct mbuf *m;
7938 	dont_do_it:
7939 		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7940 		chk->last_mbuf = NULL;
7941 		if (chk->data == NULL) {
7942 			sp->some_taken = some_taken;
7943 			sctp_free_a_chunk(stcb, chk, so_locked);
7944 			*bail = 1;
7945 			to_move = 0;
7946 			goto out_of;
7947 		}
7948 #ifdef SCTP_MBUF_LOGGING
7949 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7950 			sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7951 		}
7952 #endif
7953 		/* Pull off the data */
7954 		m_adj(sp->data, to_move);
7955 		/* Now lets work our way down and compact it */
7956 		m = sp->data;
7957 		while (m && (SCTP_BUF_LEN(m) == 0)) {
7958 			sp->data  = SCTP_BUF_NEXT(m);
7959 			SCTP_BUF_NEXT(m) = NULL;
7960 			if (sp->tail_mbuf == m) {
7961 				/*-
7962 				 * Freeing tail? TSNH since
7963 				 * we supposedly were taking less
7964 				 * than the sp->length.
7965 				 */
7966 #ifdef INVARIANTS
7967 				panic("Huh, freing tail? - TSNH");
7968 #else
7969 				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7970 				sp->tail_mbuf = sp->data = NULL;
7971 				sp->length = 0;
7972 #endif
7973 
7974 			}
7975 			sctp_m_free(m);
7976 			m = sp->data;
7977 		}
7978 	}
7979 	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7980 		chk->copy_by_ref = 1;
7981 	} else {
7982 		chk->copy_by_ref = 0;
7983 	}
7984 	/* get last_mbuf and counts of mb usage
7985 	 * This is ugly but hopefully its only one mbuf.
7986 	 */
7987 	if (chk->last_mbuf == NULL) {
7988 		chk->last_mbuf = chk->data;
7989 		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7990 			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7991 		}
7992 	}
7993 
7994 	if (to_move > length) {
7995 		/*- This should not happen either
7996 		 * since we always lower to_move to the size
7997 		 * of sp->length if its larger.
7998 		 */
7999 #ifdef INVARIANTS
8000 		panic("Huh, how can to_move be larger?");
8001 #else
8002 		SCTP_PRINTF("Huh, how can to_move be larger?\n");
8003 		sp->length = 0;
8004 #endif
8005 	} else {
8006 		atomic_subtract_int(&sp->length, to_move);
8007 	}
8008 	leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
8009 	if (M_LEADINGSPACE(chk->data) < leading) {
8010 		/* Not enough room for a chunk header, get some */
8011 		struct mbuf *m;
8012 
8013 		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
8014 		if (m == NULL) {
8015 			/*
8016 			 * we're in trouble here. _PREPEND below will free
8017 			 * all the data if there is no leading space, so we
8018 			 * must put the data back and restore.
8019 			 */
8020 			if (send_lock_up == 0) {
8021 				SCTP_TCB_SEND_LOCK(stcb);
8022 				send_lock_up = 1;
8023 			}
8024 			if (sp->data == NULL) {
8025 				/* unsteal the data */
8026 				sp->data = chk->data;
8027 				sp->tail_mbuf = chk->last_mbuf;
8028 			} else {
8029 				struct mbuf *m_tmp;
8030 				/* reassemble the data */
8031 				m_tmp = sp->data;
8032 				sp->data = chk->data;
8033 				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
8034 			}
8035 			sp->some_taken = some_taken;
8036 			atomic_add_int(&sp->length, to_move);
8037 			chk->data = NULL;
8038 			*bail = 1;
8039 			sctp_free_a_chunk(stcb, chk, so_locked);
8040 			to_move = 0;
8041 			goto out_of;
8042 		} else {
8043 			SCTP_BUF_LEN(m) = 0;
8044 			SCTP_BUF_NEXT(m) = chk->data;
8045 			chk->data = m;
8046 			M_ALIGN(chk->data, 4);
8047 		}
8048 	}
8049 	SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
8050 	if (chk->data == NULL) {
8051 		/* HELP, TSNH since we assured it would not above? */
8052 #ifdef INVARIANTS
8053 		panic("prepend failes HELP?");
8054 #else
8055 		SCTP_PRINTF("prepend fails HELP?\n");
8056 		sctp_free_a_chunk(stcb, chk, so_locked);
8057 #endif
8058 		*bail = 1;
8059 		to_move = 0;
8060 		goto out_of;
8061 	}
8062 	sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
8063 	chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
8064 	chk->book_size_scale = 0;
8065 	chk->sent = SCTP_DATAGRAM_UNSENT;
8066 
8067 	chk->flags = 0;
8068 	chk->asoc = &stcb->asoc;
8069 	chk->pad_inplace = 0;
8070 	chk->no_fr_allowed = 0;
8071 	if (stcb->asoc.idata_supported == 0) {
8072 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8073 			/* Just use 0. The receiver ignores the values. */
8074 			chk->rec.data.mid = 0;
8075 		} else {
8076 			chk->rec.data.mid = strq->next_mid_ordered;
8077 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8078 				strq->next_mid_ordered++;
8079 			}
8080 		}
8081 	} else {
8082 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8083 			chk->rec.data.mid = strq->next_mid_unordered;
8084 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8085 				strq->next_mid_unordered++;
8086 			}
8087 		} else {
8088 			chk->rec.data.mid = strq->next_mid_ordered;
8089 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8090 				strq->next_mid_ordered++;
8091 			}
8092 		}
8093 	}
8094 	chk->rec.data.sid = sp->sid;
8095 	chk->rec.data.ppid = sp->ppid;
8096 	chk->rec.data.context = sp->context;
8097 	chk->rec.data.doing_fast_retransmit = 0;
8098 
8099 	chk->rec.data.timetodrop = sp->ts;
8100 	chk->flags = sp->act_flags;
8101 
8102 	if (sp->net) {
8103 		chk->whoTo = sp->net;
8104 		atomic_add_int(&chk->whoTo->ref_count, 1);
8105 	} else
8106 		chk->whoTo = NULL;
8107 
8108 	if (sp->holds_key_ref) {
8109 		chk->auth_keyid = sp->auth_keyid;
8110 		sctp_auth_key_acquire(stcb, chk->auth_keyid);
8111 		chk->holds_key_ref = 1;
8112 	}
8113 #if defined(__FreeBSD__) && !defined(__Userspace__)
8114 	chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
8115 #else
8116 	chk->rec.data.tsn = asoc->sending_seq++;
8117 #endif
8118 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
8119 		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
8120 		               (uint32_t)(uintptr_t)stcb, sp->length,
8121 		               (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
8122 		               chk->rec.data.tsn);
8123 	}
8124 	if (stcb->asoc.idata_supported == 0) {
8125 		dchkh = mtod(chk->data, struct sctp_data_chunk *);
8126 	} else {
8127 		ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
8128 	}
8129 	/*
8130 	 * Put the rest of the things in place now. Size was done
8131 	 * earlier in previous loop prior to padding.
8132 	 */
8133 
8134 #ifdef SCTP_ASOCLOG_OF_TSNS
8135 	SCTP_TCB_LOCK_ASSERT(stcb);
8136 	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
8137 		asoc->tsn_out_at = 0;
8138 		asoc->tsn_out_wrapped = 1;
8139 	}
8140 	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
8141 	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
8142 	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
8143 	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
8144 	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
8145 	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
8146 	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
8147 	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
8148 	asoc->tsn_out_at++;
8149 #endif
8150 	if (stcb->asoc.idata_supported == 0) {
8151 		dchkh->ch.chunk_type = SCTP_DATA;
8152 		dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8153 		dchkh->dp.tsn = htonl(chk->rec.data.tsn);
8154 		dchkh->dp.sid = htons(strq->sid);
8155 		dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
8156 		dchkh->dp.ppid = chk->rec.data.ppid;
8157 		dchkh->ch.chunk_length = htons(chk->send_size);
8158 	} else {
8159 		ndchkh->ch.chunk_type = SCTP_IDATA;
8160 		ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8161 		ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
8162 		ndchkh->dp.sid = htons(strq->sid);
8163 		ndchkh->dp.reserved = htons(0);
8164 		ndchkh->dp.mid = htonl(chk->rec.data.mid);
8165 		if (sp->fsn == 0)
8166 			ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
8167 		else
8168 			ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
8169 		sp->fsn++;
8170 		ndchkh->ch.chunk_length = htons(chk->send_size);
8171 	}
8172 	/* Now advance the chk->send_size by the actual pad needed. */
8173 	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
8174 		/* need a pad */
8175 		struct mbuf *lm;
8176 		int pads;
8177 
8178 		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
8179 		lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
8180 		if (lm != NULL) {
8181 			chk->last_mbuf = lm;
8182 			chk->pad_inplace = 1;
8183 		}
8184 		chk->send_size += pads;
8185 	}
8186 	if (PR_SCTP_ENABLED(chk->flags)) {
8187 		asoc->pr_sctp_cnt++;
8188 	}
8189 	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8190 		/* All done pull and kill the message */
8191 		if (sp->put_last_out == 0) {
8192 			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8193 			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8194 			            sp->sender_all_done,
8195 			            sp->length,
8196 			            sp->msg_is_complete,
8197 			            sp->put_last_out,
8198 			            send_lock_up);
8199 		}
8200 		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8201 			SCTP_TCB_SEND_LOCK(stcb);
8202 			send_lock_up = 1;
8203 		}
8204 		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8205 		TAILQ_REMOVE(&strq->outqueue, sp, next);
8206 		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8207 		if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
8208 		    (strq->chunks_on_queues == 0) &&
8209 		    TAILQ_EMPTY(&strq->outqueue)) {
8210 			stcb->asoc.trigger_reset = 1;
8211 		}
8212 		if (sp->net) {
8213 			sctp_free_remote_addr(sp->net);
8214 			sp->net = NULL;
8215 		}
8216 		if (sp->data) {
8217 			sctp_m_freem(sp->data);
8218 			sp->data = NULL;
8219 		}
8220 		sctp_free_a_strmoq(stcb, sp, so_locked);
8221 	}
8222 	asoc->chunks_on_out_queue++;
8223 	strq->chunks_on_queues++;
8224 	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8225 	asoc->send_queue_cnt++;
8226 out_of:
8227 	if (send_lock_up) {
8228 		SCTP_TCB_SEND_UNLOCK(stcb);
8229 	}
8230 	return (to_move);
8231 }
8232 
8233 
8234 static void
8235 sctp_fill_outqueue(struct sctp_tcb *stcb,
8236     struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked)
8237 {
8238 	struct sctp_association *asoc;
8239 	struct sctp_stream_out *strq;
8240 	uint32_t space_left, moved, total_moved;
8241 	int bail, giveup;
8242 
8243 	SCTP_TCB_LOCK_ASSERT(stcb);
8244 	asoc = &stcb->asoc;
8245 	total_moved = 0;
8246 	switch (net->ro._l_addr.sa.sa_family) {
8247 #ifdef INET
8248 		case AF_INET:
8249 			space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
8250 			break;
8251 #endif
8252 #ifdef INET6
8253 		case AF_INET6:
8254 			space_left = net->mtu - SCTP_MIN_OVERHEAD;
8255 			break;
8256 #endif
8257 #if defined(__Userspace__)
8258 		case AF_CONN:
8259 			space_left = net->mtu - sizeof(struct sctphdr);
8260 			break;
8261 #endif
8262 		default:
8263 			/* TSNH */
8264 			space_left = net->mtu;
8265 			break;
8266 	}
8267 	/* Need an allowance for the data chunk header too */
8268 	space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8269 
8270 	/* must make even word boundary */
8271 	space_left &= 0xfffffffc;
8272 	strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8273 	giveup = 0;
8274 	bail = 0;
8275 	while ((space_left > 0) && (strq != NULL)) {
8276 		moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
8277 		                              &giveup, eeor_mode, &bail, so_locked);
8278 		stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
8279 		if ((giveup != 0) || (bail != 0)) {
8280 			break;
8281 		}
8282 		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8283 		total_moved += moved;
8284 		if (space_left >= moved) {
8285 			space_left -= moved;
8286 		} else {
8287 			space_left = 0;
8288 		}
8289 		if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
8290 			space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8291 		} else {
8292 			space_left = 0;
8293 		}
8294 		space_left &= 0xfffffffc;
8295 	}
8296 	if (bail != 0)
8297 		*quit_now = 1;
8298 
8299 	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8300 
8301 	if (total_moved == 0) {
8302 		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8303 		    (net == stcb->asoc.primary_destination)) {
8304 			/* ran dry for primary network net */
8305 			SCTP_STAT_INCR(sctps_primary_randry);
8306 		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
8307 			/* ran dry with CMT on */
8308 			SCTP_STAT_INCR(sctps_cmt_randry);
8309 		}
8310 	}
8311 }
8312 
8313 void
8314 sctp_fix_ecn_echo(struct sctp_association *asoc)
8315 {
8316 	struct sctp_tmit_chunk *chk;
8317 
8318 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8319 		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8320 			chk->sent = SCTP_DATAGRAM_UNSENT;
8321 		}
8322 	}
8323 }
8324 
8325 void
8326 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8327 {
8328 	struct sctp_association *asoc;
8329 	struct sctp_tmit_chunk *chk;
8330 	struct sctp_stream_queue_pending *sp;
8331 	unsigned int i;
8332 
8333 	if (net == NULL) {
8334 		return;
8335 	}
8336 	asoc = &stcb->asoc;
8337 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8338 		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8339 			if (sp->net == net) {
8340 				sctp_free_remote_addr(sp->net);
8341 				sp->net = NULL;
8342 			}
8343 		}
8344 	}
8345 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8346 		if (chk->whoTo == net) {
8347 			sctp_free_remote_addr(chk->whoTo);
8348 			chk->whoTo = NULL;
8349 		}
8350 	}
8351 }
8352 
8353 int
8354 sctp_med_chunk_output(struct sctp_inpcb *inp,
8355 		      struct sctp_tcb *stcb,
8356 		      struct sctp_association *asoc,
8357 		      int *num_out,
8358 		      int *reason_code,
8359 		      int control_only, int from_where,
8360 		      struct timeval *now, int *now_filled, int frag_point, int so_locked)
8361 {
8362 	/**
8363 	 * Ok this is the generic chunk service queue. we must do the
8364 	 * following:
8365 	 * - Service the stream queue that is next, moving any
8366 	 *   message (note I must get a complete message i.e. FIRST/MIDDLE and
8367 	 *   LAST to the out queue in one pass) and assigning TSN's. This
8368 	 *   only applys though if the peer does not support NDATA. For NDATA
8369 	 *   chunks its ok to not send the entire message ;-)
8370 	 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
8371 	 *   fomulate and send the low level chunks. Making sure to combine
8372 	 *   any control in the control chunk queue also.
8373 	 */
8374 	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8375 	struct mbuf *outchain, *endoutchain;
8376 	struct sctp_tmit_chunk *chk, *nchk;
8377 
8378 	/* temp arrays for unlinking */
8379 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8380 	int no_fragmentflg, error;
8381 	unsigned int max_rwnd_per_dest, max_send_per_dest;
8382 	int one_chunk, hbflag, skip_data_for_this_net;
8383 	int asconf, cookie, no_out_cnt;
8384 	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8385 	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8386 	int tsns_sent = 0;
8387 	uint32_t auth_offset;
8388 	struct sctp_auth_chunk *auth;
8389 	uint16_t auth_keyid;
8390 	int override_ok = 1;
8391 	int skip_fill_up = 0;
8392 	int data_auth_reqd = 0;
8393 	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8394 	   the destination. */
8395 	int quit_now = 0;
8396 
8397 #if defined(__APPLE__) && !defined(__Userspace__)
8398 	if (so_locked) {
8399 		sctp_lock_assert(SCTP_INP_SO(inp));
8400 	} else {
8401 		sctp_unlock_assert(SCTP_INP_SO(inp));
8402 	}
8403 #endif
8404 	*num_out = 0;
8405 	*reason_code = 0;
8406 	auth_keyid = stcb->asoc.authinfo.active_keyid;
8407 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8408 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8409 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8410 		eeor_mode = 1;
8411 	} else {
8412 		eeor_mode = 0;
8413 	}
8414 	ctl_cnt = no_out_cnt = asconf = cookie = 0;
8415 	/*
8416 	 * First lets prime the pump. For each destination, if there is room
8417 	 * in the flight size, attempt to pull an MTU's worth out of the
8418 	 * stream queues into the general send_queue
8419 	 */
8420 #ifdef SCTP_AUDITING_ENABLED
8421 	sctp_audit_log(0xC2, 2);
8422 #endif
8423 	SCTP_TCB_LOCK_ASSERT(stcb);
8424 	hbflag = 0;
8425 	if (control_only)
8426 		no_data_chunks = 1;
8427 	else
8428 		no_data_chunks = 0;
8429 
8430 	/* Nothing to possible to send? */
8431 	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8432 	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8433 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8434 	    TAILQ_EMPTY(&asoc->send_queue) &&
8435 	    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
8436 	nothing_to_send:
8437 		*reason_code = 9;
8438 		return (0);
8439 	}
8440 	if (asoc->peers_rwnd == 0) {
8441 		/* No room in peers rwnd */
8442 		*reason_code = 1;
8443 		if (asoc->total_flight > 0) {
8444 			/* we are allowed one chunk in flight */
8445 			no_data_chunks = 1;
8446 		}
8447 	}
8448 	if (stcb->asoc.ecn_echo_cnt_onq) {
8449 		/* Record where a sack goes, if any */
8450 		if (no_data_chunks &&
8451 		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8452 			/* Nothing but ECNe to send - we don't do that */
8453 			goto nothing_to_send;
8454 		}
8455 		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8456 			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8457 			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8458 				sack_goes_to = chk->whoTo;
8459 				break;
8460 			}
8461 		}
8462 	}
8463 	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8464 	if (stcb->sctp_socket)
8465 		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8466 	else
8467 		max_send_per_dest = 0;
8468 	if (no_data_chunks == 0) {
8469 		/* How many non-directed chunks are there? */
8470 		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8471 			if (chk->whoTo == NULL) {
8472 				/* We already have non-directed
8473 				 * chunks on the queue, no need
8474 				 * to do a fill-up.
8475 				 */
8476 				skip_fill_up = 1;
8477 				break;
8478 			}
8479 		}
8480 
8481 	}
8482 	if ((no_data_chunks == 0) &&
8483 	    (skip_fill_up == 0) &&
8484 	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8485 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8486 			/*
8487 			 * This for loop we are in takes in
8488 			 * each net, if its's got space in cwnd and
8489 			 * has data sent to it (when CMT is off) then it
8490 			 * calls sctp_fill_outqueue for the net. This gets
8491 			 * data on the send queue for that network.
8492 			 *
8493 			 * In sctp_fill_outqueue TSN's are assigned and
8494 			 * data is copied out of the stream buffers. Note
8495 			 * mostly copy by reference (we hope).
8496 			 */
8497 			net->window_probe = 0;
8498 			if ((net != stcb->asoc.alternate) &&
8499 			    ((net->dest_state & SCTP_ADDR_PF) ||
8500 			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8501 			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8502 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8503 					sctp_log_cwnd(stcb, net, 1,
8504 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8505 				}
8506 				continue;
8507 			}
8508 			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8509 			    (net->flight_size == 0)) {
8510 				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8511 			}
8512 			if (net->flight_size >= net->cwnd) {
8513 				/* skip this network, no room - can't fill */
8514 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8515 					sctp_log_cwnd(stcb, net, 3,
8516 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8517 				}
8518 				continue;
8519 			}
8520 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8521 				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8522 			}
8523 			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8524 			if (quit_now) {
8525 				/* memory alloc failure */
8526 				no_data_chunks = 1;
8527 				break;
8528 			}
8529 		}
8530 	}
8531 	/* now service each destination and send out what we can for it */
8532 	/* Nothing to send? */
8533 	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8534 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8535 	    TAILQ_EMPTY(&asoc->send_queue)) {
8536 		*reason_code = 8;
8537 		return (0);
8538 	}
8539 
8540 	if (asoc->sctp_cmt_on_off > 0) {
8541 		/* get the last start point */
8542 		start_at = asoc->last_net_cmt_send_started;
8543 		if (start_at == NULL) {
8544 			/* null so to beginning */
8545 			start_at = TAILQ_FIRST(&asoc->nets);
8546 		} else {
8547 			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8548 			if (start_at == NULL) {
8549 				start_at = TAILQ_FIRST(&asoc->nets);
8550 			}
8551 		}
8552 		asoc->last_net_cmt_send_started = start_at;
8553 	} else {
8554 		start_at = TAILQ_FIRST(&asoc->nets);
8555 	}
8556 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8557 		if (chk->whoTo == NULL) {
8558 			if (asoc->alternate) {
8559 				chk->whoTo = asoc->alternate;
8560 			} else {
8561 				chk->whoTo = asoc->primary_destination;
8562 			}
8563 			atomic_add_int(&chk->whoTo->ref_count, 1);
8564 		}
8565 	}
8566 	old_start_at = NULL;
8567 again_one_more_time:
8568 	for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8569 		/* how much can we send? */
8570 		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8571 		if (old_start_at && (old_start_at == net)) {
8572 			/* through list ocmpletely. */
8573 			break;
8574 		}
8575 		tsns_sent = 0xa;
8576 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8577 		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8578 		    (net->flight_size >= net->cwnd)) {
8579 			/* Nothing on control or asconf and flight is full, we can skip
8580 			 * even in the CMT case.
8581 			 */
8582 			continue;
8583 		}
8584 		bundle_at = 0;
8585 		endoutchain = outchain = NULL;
8586 		auth = NULL;
8587 		auth_offset = 0;
8588 		no_fragmentflg = 1;
8589 		one_chunk = 0;
8590 		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8591 			skip_data_for_this_net = 1;
8592 		} else {
8593 			skip_data_for_this_net = 0;
8594 		}
8595 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8596 #ifdef INET
8597 		case AF_INET:
8598 			mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8599 			break;
8600 #endif
8601 #ifdef INET6
8602 		case AF_INET6:
8603 			mtu = net->mtu - SCTP_MIN_OVERHEAD;
8604 			break;
8605 #endif
8606 #if defined(__Userspace__)
8607 		case AF_CONN:
8608 			mtu = net->mtu - sizeof(struct sctphdr);
8609 			break;
8610 #endif
8611 		default:
8612 			/* TSNH */
8613 			mtu = net->mtu;
8614 			break;
8615 		}
8616 		mx_mtu = mtu;
8617 		to_out = 0;
8618 		if (mtu > asoc->peers_rwnd) {
8619 			if (asoc->total_flight > 0) {
8620 				/* We have a packet in flight somewhere */
8621 				r_mtu = asoc->peers_rwnd;
8622 			} else {
8623 				/* We are always allowed to send one MTU out */
8624 				one_chunk = 1;
8625 				r_mtu = mtu;
8626 			}
8627 		} else {
8628 			r_mtu = mtu;
8629 		}
8630 		error = 0;
8631 		/************************/
8632 		/* ASCONF transmission */
8633 		/************************/
8634 		/* Now first lets go through the asconf queue */
8635 		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8636 			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8637 				continue;
8638 			}
8639 			if (chk->whoTo == NULL) {
8640 				if (asoc->alternate == NULL) {
8641 					if (asoc->primary_destination != net) {
8642 						break;
8643 					}
8644 				} else {
8645 					if (asoc->alternate != net) {
8646 						break;
8647 					}
8648 				}
8649 			} else {
8650 				if (chk->whoTo != net) {
8651 					break;
8652 				}
8653 			}
8654 			if (chk->data == NULL) {
8655 				break;
8656 			}
8657 			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8658 			    chk->sent != SCTP_DATAGRAM_RESEND) {
8659 				break;
8660 			}
8661 			/*
8662 			 * if no AUTH is yet included and this chunk
8663 			 * requires it, make sure to account for it.  We
8664 			 * don't apply the size until the AUTH chunk is
8665 			 * actually added below in case there is no room for
8666 			 * this chunk. NOTE: we overload the use of "omtu"
8667 			 * here
8668 			 */
8669 			if ((auth == NULL) &&
8670 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8671 							stcb->asoc.peer_auth_chunks)) {
8672 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8673 			} else
8674 				omtu = 0;
8675 			/* Here we do NOT factor the r_mtu */
8676 			if ((chk->send_size < (int)(mtu - omtu)) ||
8677 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8678 				/*
8679 				 * We probably should glom the mbuf chain
8680 				 * from the chk->data for control but the
8681 				 * problem is it becomes yet one more level
8682 				 * of tracking to do if for some reason
8683 				 * output fails. Then I have got to
8684 				 * reconstruct the merged control chain.. el
8685 				 * yucko.. for now we take the easy way and
8686 				 * do the copy
8687 				 */
8688 				/*
8689 				 * Add an AUTH chunk, if chunk requires it
8690 				 * save the offset into the chain for AUTH
8691 				 */
8692 				if ((auth == NULL) &&
8693 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8694 								 stcb->asoc.peer_auth_chunks))) {
8695 					outchain = sctp_add_auth_chunk(outchain,
8696 								       &endoutchain,
8697 								       &auth,
8698 								       &auth_offset,
8699 								       stcb,
8700 								       chk->rec.chunk_id.id);
8701 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8702 				}
8703 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8704 							       (int)chk->rec.chunk_id.can_take_data,
8705 							       chk->send_size, chk->copy_by_ref);
8706 				if (outchain == NULL) {
8707 					*reason_code = 8;
8708 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8709 					return (ENOMEM);
8710 				}
8711 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8712 				/* update our MTU size */
8713 				if (mtu > (chk->send_size + omtu))
8714 					mtu -= (chk->send_size + omtu);
8715 				else
8716 					mtu = 0;
8717 				to_out += (chk->send_size + omtu);
8718 				/* Do clear IP_DF ? */
8719 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8720 					no_fragmentflg = 0;
8721 				}
8722 				if (chk->rec.chunk_id.can_take_data)
8723 					chk->data = NULL;
8724 				/*
8725 				 * set hb flag since we can
8726 				 * use these for RTO
8727 				 */
8728 				hbflag = 1;
8729 				asconf = 1;
8730 				/*
8731 				 * should sysctl this: don't
8732 				 * bundle data with ASCONF
8733 				 * since it requires AUTH
8734 				 */
8735 				no_data_chunks = 1;
8736 				chk->sent = SCTP_DATAGRAM_SENT;
8737 				if (chk->whoTo == NULL) {
8738 					chk->whoTo = net;
8739 					atomic_add_int(&net->ref_count, 1);
8740 				}
8741 				chk->snd_count++;
8742 				if (mtu == 0) {
8743 					/*
8744 					 * Ok we are out of room but we can
8745 					 * output without effecting the
8746 					 * flight size since this little guy
8747 					 * is a control only packet.
8748 					 */
8749 					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8750 					/*
8751 					 * do NOT clear the asconf
8752 					 * flag as it is used to do
8753 					 * appropriate source address
8754 					 * selection.
8755 					 */
8756 					if (*now_filled == 0) {
8757 						(void)SCTP_GETTIME_TIMEVAL(now);
8758 						*now_filled = 1;
8759 					}
8760 					net->last_sent_time = *now;
8761 					hbflag = 0;
8762 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8763 					                                        (struct sockaddr *)&net->ro._l_addr,
8764 					                                        outchain, auth_offset, auth,
8765 					                                        stcb->asoc.authinfo.active_keyid,
8766 					                                        no_fragmentflg, 0, asconf,
8767 					                                        inp->sctp_lport, stcb->rport,
8768 					                                        htonl(stcb->asoc.peer_vtag),
8769 					                                        net->port, NULL,
8770 #if defined(__FreeBSD__) && !defined(__Userspace__)
8771 					                                        0, 0,
8772 #endif
8773 					                                        so_locked))) {
8774 						/* error, we could not output */
8775 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8776 						if (from_where == 0) {
8777 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
8778 						}
8779 						if (error == ENOBUFS) {
8780 							asoc->ifp_had_enobuf = 1;
8781 							SCTP_STAT_INCR(sctps_lowlevelerr);
8782 						}
8783 						/* error, could not output */
8784 						if (error == EHOSTUNREACH) {
8785 							/*
8786 							 * Destination went
8787 							 * unreachable
8788 							 * during this send
8789 							 */
8790 							sctp_move_chunks_from_net(stcb, net);
8791 						}
8792 						*reason_code = 7;
8793 						break;
8794 					} else {
8795 						asoc->ifp_had_enobuf = 0;
8796 					}
8797 					/*
8798 					 * increase the number we sent, if a
8799 					 * cookie is sent we don't tell them
8800 					 * any was sent out.
8801 					 */
8802 					outchain = endoutchain = NULL;
8803 					auth = NULL;
8804 					auth_offset = 0;
8805 					if (!no_out_cnt)
8806 						*num_out += ctl_cnt;
8807 					/* recalc a clean slate and setup */
8808 					switch (net->ro._l_addr.sa.sa_family) {
8809 #ifdef INET
8810 						case AF_INET:
8811 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8812 							break;
8813 #endif
8814 #ifdef INET6
8815 						case AF_INET6:
8816 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
8817 							break;
8818 #endif
8819 #if defined(__Userspace__)
8820 						case AF_CONN:
8821 							mtu = net->mtu - sizeof(struct sctphdr);
8822 							break;
8823 #endif
8824 						default:
8825 							/* TSNH */
8826 							mtu = net->mtu;
8827 							break;
8828 					}
8829 					to_out = 0;
8830 					no_fragmentflg = 1;
8831 				}
8832 			}
8833 		}
8834 		if (error != 0) {
8835 			/* try next net */
8836 			continue;
8837 		}
8838 		/************************/
8839 		/* Control transmission */
8840 		/************************/
8841 		/* Now first lets go through the control queue */
8842 		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8843 			if ((sack_goes_to) &&
8844 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8845 			    (chk->whoTo != sack_goes_to)) {
8846 				/*
8847 				 * if we have a sack in queue, and we are looking at an
8848 				 * ecn echo that is NOT queued to where the sack is going..
8849 				 */
8850 				if (chk->whoTo == net) {
8851 					/* Don't transmit it to where its going (current net) */
8852 					continue;
8853 				} else if (sack_goes_to == net) {
8854 					/* But do transmit it to this address */
8855 					goto skip_net_check;
8856 				}
8857 			}
8858 			if (chk->whoTo == NULL) {
8859 				if (asoc->alternate == NULL) {
8860 					if (asoc->primary_destination != net) {
8861 						continue;
8862 					}
8863 				} else {
8864 					if (asoc->alternate != net) {
8865 						continue;
8866 					}
8867 				}
8868 			} else {
8869 				if (chk->whoTo != net) {
8870 					continue;
8871 				}
8872 			}
8873 		skip_net_check:
8874 			if (chk->data == NULL) {
8875 				continue;
8876 			}
8877 			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8878 				/*
8879 				 * It must be unsent. Cookies and ASCONF's
8880 				 * hang around but there timers will force
8881 				 * when marked for resend.
8882 				 */
8883 				continue;
8884 			}
8885 			/*
8886 			 * if no AUTH is yet included and this chunk
8887 			 * requires it, make sure to account for it.  We
8888 			 * don't apply the size until the AUTH chunk is
8889 			 * actually added below in case there is no room for
8890 			 * this chunk. NOTE: we overload the use of "omtu"
8891 			 * here
8892 			 */
8893 			if ((auth == NULL) &&
8894 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8895 							stcb->asoc.peer_auth_chunks)) {
8896 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8897 			} else
8898 				omtu = 0;
8899 			/* Here we do NOT factor the r_mtu */
8900 			if ((chk->send_size <= (int)(mtu - omtu)) ||
8901 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8902 				/*
8903 				 * We probably should glom the mbuf chain
8904 				 * from the chk->data for control but the
8905 				 * problem is it becomes yet one more level
8906 				 * of tracking to do if for some reason
8907 				 * output fails. Then I have got to
8908 				 * reconstruct the merged control chain.. el
8909 				 * yucko.. for now we take the easy way and
8910 				 * do the copy
8911 				 */
8912 				/*
8913 				 * Add an AUTH chunk, if chunk requires it
8914 				 * save the offset into the chain for AUTH
8915 				 */
8916 				if ((auth == NULL) &&
8917 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8918 								 stcb->asoc.peer_auth_chunks))) {
8919 					outchain = sctp_add_auth_chunk(outchain,
8920 								       &endoutchain,
8921 								       &auth,
8922 								       &auth_offset,
8923 								       stcb,
8924 								       chk->rec.chunk_id.id);
8925 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8926 				}
8927 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8928 							       (int)chk->rec.chunk_id.can_take_data,
8929 							       chk->send_size, chk->copy_by_ref);
8930 				if (outchain == NULL) {
8931 					*reason_code = 8;
8932 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8933 					return (ENOMEM);
8934 				}
8935 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8936 				/* update our MTU size */
8937 				if (mtu > (chk->send_size + omtu))
8938 					mtu -= (chk->send_size + omtu);
8939 				else
8940 					mtu = 0;
8941 				to_out += (chk->send_size + omtu);
8942 				/* Do clear IP_DF ? */
8943 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8944 					no_fragmentflg = 0;
8945 				}
8946 				if (chk->rec.chunk_id.can_take_data)
8947 					chk->data = NULL;
8948 				/* Mark things to be removed, if needed */
8949 				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8950 				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8951 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8952 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8953 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8954 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8955 				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8956 				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8957 				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8958 				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8959 				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8960 					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8961 						hbflag = 1;
8962 					}
8963 					/* remove these chunks at the end */
8964 					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8965 					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8966 						/* turn off the timer */
8967 						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8968 							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8969 							                inp, stcb, NULL,
8970 							                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8971 						}
8972 					}
8973 					ctl_cnt++;
8974 				} else {
8975 					/*
8976 					 * Other chunks, since they have
8977 					 * timers running (i.e. COOKIE)
8978 					 * we just "trust" that it
8979 					 * gets sent or retransmitted.
8980 					 */
8981 					ctl_cnt++;
8982 					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8983 						cookie = 1;
8984 						no_out_cnt = 1;
8985 					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8986 						/*
8987 						 * Increment ecne send count here
8988 						 * this means we may be over-zealous in
8989 						 * our counting if the send fails, but its
8990 						 * the best place to do it (we used to do
8991 						 * it in the queue of the chunk, but that did
8992 						 * not tell how many times it was sent.
8993 						 */
8994 						SCTP_STAT_INCR(sctps_sendecne);
8995 					}
8996 					chk->sent = SCTP_DATAGRAM_SENT;
8997 					if (chk->whoTo == NULL) {
8998 						chk->whoTo = net;
8999 						atomic_add_int(&net->ref_count, 1);
9000 					}
9001 					chk->snd_count++;
9002 				}
9003 				if (mtu == 0) {
9004 					/*
9005 					 * Ok we are out of room but we can
9006 					 * output without effecting the
9007 					 * flight size since this little guy
9008 					 * is a control only packet.
9009 					 */
9010 					if (asconf) {
9011 						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
9012 						/*
9013 						 * do NOT clear the asconf
9014 						 * flag as it is used to do
9015 						 * appropriate source address
9016 						 * selection.
9017 						 */
9018 					}
9019 					if (cookie) {
9020 						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9021 						cookie = 0;
9022 					}
9023 					/* Only HB or ASCONF advances time */
9024 					if (hbflag) {
9025 						if (*now_filled == 0) {
9026 							(void)SCTP_GETTIME_TIMEVAL(now);
9027 							*now_filled = 1;
9028 						}
9029 						net->last_sent_time = *now;
9030 						hbflag = 0;
9031 					}
9032 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9033 					                                        (struct sockaddr *)&net->ro._l_addr,
9034 					                                        outchain,
9035 					                                        auth_offset, auth,
9036 					                                        stcb->asoc.authinfo.active_keyid,
9037 					                                        no_fragmentflg, 0, asconf,
9038 					                                        inp->sctp_lport, stcb->rport,
9039 					                                        htonl(stcb->asoc.peer_vtag),
9040 					                                        net->port, NULL,
9041 #if defined(__FreeBSD__) && !defined(__Userspace__)
9042 					                                        0, 0,
9043 #endif
9044 					                                        so_locked))) {
9045 						/* error, we could not output */
9046 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9047 						if (from_where == 0) {
9048 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
9049 						}
9050 						if (error == ENOBUFS) {
9051 							asoc->ifp_had_enobuf = 1;
9052 							SCTP_STAT_INCR(sctps_lowlevelerr);
9053 						}
9054 						if (error == EHOSTUNREACH) {
9055 							/*
9056 							 * Destination went
9057 							 * unreachable
9058 							 * during this send
9059 							 */
9060 							sctp_move_chunks_from_net(stcb, net);
9061 						}
9062 						*reason_code = 7;
9063 						break;
9064 					} else {
9065 						asoc->ifp_had_enobuf = 0;
9066 					}
9067 					/*
9068 					 * increase the number we sent, if a
9069 					 * cookie is sent we don't tell them
9070 					 * any was sent out.
9071 					 */
9072 					outchain = endoutchain = NULL;
9073 					auth = NULL;
9074 					auth_offset = 0;
9075 					if (!no_out_cnt)
9076 						*num_out += ctl_cnt;
9077 					/* recalc a clean slate and setup */
9078 					switch (net->ro._l_addr.sa.sa_family) {
9079 #ifdef INET
9080 						case AF_INET:
9081 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9082 							break;
9083 #endif
9084 #ifdef INET6
9085 						case AF_INET6:
9086 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
9087 							break;
9088 #endif
9089 #if defined(__Userspace__)
9090 						case AF_CONN:
9091 							mtu = net->mtu - sizeof(struct sctphdr);
9092 							break;
9093 #endif
9094 						default:
9095 							/* TSNH */
9096 							mtu = net->mtu;
9097 							break;
9098 					}
9099 					to_out = 0;
9100 					no_fragmentflg = 1;
9101 				}
9102 			}
9103 		}
9104 		if (error != 0) {
9105 			/* try next net */
9106 			continue;
9107 		}
9108 		/* JRI: if dest is in PF state, do not send data to it */
9109 		if ((asoc->sctp_cmt_on_off > 0) &&
9110 		    (net != stcb->asoc.alternate) &&
9111 		    (net->dest_state & SCTP_ADDR_PF)) {
9112 			goto no_data_fill;
9113 		}
9114 		if (net->flight_size >= net->cwnd) {
9115 			goto no_data_fill;
9116 		}
9117 		if ((asoc->sctp_cmt_on_off > 0) &&
9118 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
9119 		    (net->flight_size > max_rwnd_per_dest)) {
9120 			goto no_data_fill;
9121 		}
9122 		/*
9123 		 * We need a specific accounting for the usage of the
9124 		 * send buffer. We also need to check the number of messages
9125 		 * per net. For now, this is better than nothing and it
9126 		 * disabled by default...
9127 		 */
9128 		if ((asoc->sctp_cmt_on_off > 0) &&
9129 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
9130 		    (max_send_per_dest > 0) &&
9131 		    (net->flight_size > max_send_per_dest)) {
9132 			goto no_data_fill;
9133 		}
9134 		/*********************/
9135 		/* Data transmission */
9136 		/*********************/
9137 		/*
9138 		 * if AUTH for DATA is required and no AUTH has been added
9139 		 * yet, account for this in the mtu now... if no data can be
9140 		 * bundled, this adjustment won't matter anyways since the
9141 		 * packet will be going out...
9142 		 */
9143 		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9144 							     stcb->asoc.peer_auth_chunks);
9145 		if (data_auth_reqd && (auth == NULL)) {
9146 			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9147 		}
9148 		/* now lets add any data within the MTU constraints */
9149 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9150 #ifdef INET
9151 		case AF_INET:
9152 			if (net->mtu > SCTP_MIN_V4_OVERHEAD)
9153 				omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9154 			else
9155 				omtu = 0;
9156 			break;
9157 #endif
9158 #ifdef INET6
9159 		case AF_INET6:
9160 			if (net->mtu > SCTP_MIN_OVERHEAD)
9161 				omtu = net->mtu - SCTP_MIN_OVERHEAD;
9162 			else
9163 				omtu = 0;
9164 			break;
9165 #endif
9166 #if defined(__Userspace__)
9167 		case AF_CONN:
9168 			if (net->mtu > sizeof(struct sctphdr)) {
9169 				omtu = net->mtu - sizeof(struct sctphdr);
9170 			} else {
9171 				omtu = 0;
9172 			}
9173 			break;
9174 #endif
9175 		default:
9176 			/* TSNH */
9177 			omtu = 0;
9178 			break;
9179 		}
9180 		if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
9181 		      (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
9182 		     (skip_data_for_this_net == 0)) ||
9183 		    (cookie)) {
9184 			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9185 				if (no_data_chunks) {
9186 					/* let only control go out */
9187 					*reason_code = 1;
9188 					break;
9189 				}
9190 				if (net->flight_size >= net->cwnd) {
9191 					/* skip this net, no room for data */
9192 					*reason_code = 2;
9193 					break;
9194 				}
9195 				if ((chk->whoTo != NULL) &&
9196 				    (chk->whoTo != net)) {
9197 					/* Don't send the chunk on this net */
9198 					continue;
9199 				}
9200 
9201 				if (asoc->sctp_cmt_on_off == 0) {
9202 					if ((asoc->alternate) &&
9203 					    (asoc->alternate != net) &&
9204 					    (chk->whoTo == NULL)) {
9205 						continue;
9206 					} else if ((net != asoc->primary_destination) &&
9207 						   (asoc->alternate == NULL) &&
9208 						   (chk->whoTo == NULL)) {
9209 						continue;
9210 					}
9211 				}
9212 				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9213 					/*-
9214 					 * strange, we have a chunk that is
9215 					 * to big for its destination and
9216 					 * yet no fragment ok flag.
9217 					 * Something went wrong when the
9218 					 * PMTU changed...we did not mark
9219 					 * this chunk for some reason?? I
9220 					 * will fix it here by letting IP
9221 					 * fragment it for now and printing
9222 					 * a warning. This really should not
9223 					 * happen ...
9224 					 */
9225 					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9226 						    chk->send_size, mtu);
9227 					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9228 				}
9229 				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9230 				    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9231 					struct sctp_data_chunk *dchkh;
9232 
9233 					dchkh = mtod(chk->data, struct sctp_data_chunk *);
9234 					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9235 				}
9236 				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9237 				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9238 					/* ok we will add this one */
9239 
9240 					/*
9241 					 * Add an AUTH chunk, if chunk
9242 					 * requires it, save the offset into
9243 					 * the chain for AUTH
9244 					 */
9245 					if (data_auth_reqd) {
9246 						if (auth == NULL) {
9247 							outchain = sctp_add_auth_chunk(outchain,
9248 										       &endoutchain,
9249 										       &auth,
9250 										       &auth_offset,
9251 										       stcb,
9252 										       SCTP_DATA);
9253 							auth_keyid = chk->auth_keyid;
9254 							override_ok = 0;
9255 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9256 						} else if (override_ok) {
9257 							/* use this data's keyid */
9258 							auth_keyid = chk->auth_keyid;
9259 							override_ok = 0;
9260 						} else if (auth_keyid != chk->auth_keyid) {
9261 							/* different keyid, so done bundling */
9262 							break;
9263 						}
9264 					}
9265 					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9266 								       chk->send_size, chk->copy_by_ref);
9267 					if (outchain == NULL) {
9268 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9269 						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9270 							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9271 						}
9272 						*reason_code = 3;
9273 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9274 						return (ENOMEM);
9275 					}
9276 					/* upate our MTU size */
9277 					/* Do clear IP_DF ? */
9278 					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9279 						no_fragmentflg = 0;
9280 					}
9281 					/* unsigned subtraction of mtu */
9282 					if (mtu > chk->send_size)
9283 						mtu -= chk->send_size;
9284 					else
9285 						mtu = 0;
9286 					/* unsigned subtraction of r_mtu */
9287 					if (r_mtu > chk->send_size)
9288 						r_mtu -= chk->send_size;
9289 					else
9290 						r_mtu = 0;
9291 
9292 					to_out += chk->send_size;
9293 					if ((to_out > mx_mtu) && no_fragmentflg) {
9294 #ifdef INVARIANTS
9295 						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9296 #else
9297 						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9298 							    mx_mtu, to_out);
9299 #endif
9300 					}
9301 					chk->window_probe = 0;
9302 					data_list[bundle_at++] = chk;
9303 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9304 						break;
9305 					}
9306 					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9307 						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9308 							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9309 						} else {
9310 							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9311 						}
9312 						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9313 						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9314 							/* Count number of user msg's that were fragmented
9315 							 * we do this by counting when we see a LAST fragment
9316 							 * only.
9317 							 */
9318 							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9319 					}
9320 					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9321 						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9322 							data_list[0]->window_probe = 1;
9323 							net->window_probe = 1;
9324 						}
9325 						break;
9326 					}
9327 				} else {
9328 					/*
9329 					 * Must be sent in order of the
9330 					 * TSN's (on a network)
9331 					 */
9332 					break;
9333 				}
9334 			}	/* for (chunk gather loop for this net) */
9335 		}		/* if asoc.state OPEN */
9336 	no_data_fill:
9337 		/* Is there something to send for this destination? */
9338 		if (outchain) {
9339 			/* We may need to start a control timer or two */
9340 			if (asconf) {
9341 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9342 						 stcb, net);
9343 				/*
9344 				 * do NOT clear the asconf flag as it is used
9345 				 * to do appropriate source address selection.
9346 				 */
9347 			}
9348 			if (cookie) {
9349 				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9350 				cookie = 0;
9351 			}
9352 			/* must start a send timer if data is being sent */
9353 			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9354 				/*
9355 				 * no timer running on this destination
9356 				 * restart it.
9357 				 */
9358 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9359 			}
9360 			if (bundle_at || hbflag) {
9361 				/* For data/asconf and hb set time */
9362 				if (*now_filled == 0) {
9363 					(void)SCTP_GETTIME_TIMEVAL(now);
9364 					*now_filled = 1;
9365 				}
9366 				net->last_sent_time = *now;
9367 			}
9368 			/* Now send it, if there is anything to send :> */
9369 			if ((error = sctp_lowlevel_chunk_output(inp,
9370 			                                        stcb,
9371 			                                        net,
9372 			                                        (struct sockaddr *)&net->ro._l_addr,
9373 			                                        outchain,
9374 			                                        auth_offset,
9375 			                                        auth,
9376 			                                        auth_keyid,
9377 			                                        no_fragmentflg,
9378 			                                        bundle_at,
9379 			                                        asconf,
9380 			                                        inp->sctp_lport, stcb->rport,
9381 			                                        htonl(stcb->asoc.peer_vtag),
9382 			                                        net->port, NULL,
9383 #if defined(__FreeBSD__) && !defined(__Userspace__)
9384 			                                        0, 0,
9385 #endif
9386 			                                        so_locked))) {
9387 				/* error, we could not output */
9388 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9389 				if (from_where == 0) {
9390 					SCTP_STAT_INCR(sctps_lowlevelerrusr);
9391 				}
9392 				if (error == ENOBUFS) {
9393 					asoc->ifp_had_enobuf = 1;
9394 					SCTP_STAT_INCR(sctps_lowlevelerr);
9395 				}
9396 				if (error == EHOSTUNREACH) {
9397 					/*
9398 					 * Destination went unreachable
9399 					 * during this send
9400 					 */
9401 					sctp_move_chunks_from_net(stcb, net);
9402 				}
9403 				*reason_code = 6;
9404 				/*-
9405 				 * I add this line to be paranoid. As far as
9406 				 * I can tell the continue, takes us back to
9407 				 * the top of the for, but just to make sure
9408 				 * I will reset these again here.
9409 				 */
9410 				ctl_cnt = bundle_at = 0;
9411 				continue; /* This takes us back to the for() for the nets. */
9412 			} else {
9413 				asoc->ifp_had_enobuf = 0;
9414 			}
9415 			endoutchain = NULL;
9416 			auth = NULL;
9417 			auth_offset = 0;
9418 			if (!no_out_cnt) {
9419 				*num_out += (ctl_cnt + bundle_at);
9420 			}
9421 			if (bundle_at) {
9422 				/* setup for a RTO measurement */
9423 				tsns_sent = data_list[0]->rec.data.tsn;
9424 				/* fill time if not already filled */
9425 				if (*now_filled == 0) {
9426 					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9427 					*now_filled = 1;
9428 					*now = asoc->time_last_sent;
9429 				} else {
9430 					asoc->time_last_sent = *now;
9431 				}
9432 				if (net->rto_needed) {
9433 					data_list[0]->do_rtt = 1;
9434 					net->rto_needed = 0;
9435 				}
9436 				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9437 				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9438 			}
9439 			if (one_chunk) {
9440 				break;
9441 			}
9442 		}
9443 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9444 			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9445 		}
9446 	}
9447 	if (old_start_at == NULL) {
9448 		old_start_at = start_at;
9449 		start_at = TAILQ_FIRST(&asoc->nets);
9450 		if (old_start_at)
9451 			goto again_one_more_time;
9452 	}
9453 
9454 	/*
9455 	 * At the end there should be no NON timed chunks hanging on this
9456 	 * queue.
9457 	 */
9458 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9459 		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9460 	}
9461 	if ((*num_out == 0) && (*reason_code == 0)) {
9462 		*reason_code = 4;
9463 	} else {
9464 		*reason_code = 5;
9465 	}
9466 	sctp_clean_up_ctl(stcb, asoc, so_locked);
9467 	return (0);
9468 }
9469 
9470 void
9471 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9472 {
9473 	/*-
9474 	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9475 	 * the control chunk queue.
9476 	 */
9477 	struct sctp_chunkhdr *hdr;
9478 	struct sctp_tmit_chunk *chk;
9479 	struct mbuf *mat, *last_mbuf;
9480 	uint32_t chunk_length;
9481 	uint16_t padding_length;
9482 
9483 	SCTP_TCB_LOCK_ASSERT(stcb);
9484 	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9485 	if (op_err == NULL) {
9486 		return;
9487 	}
9488 	last_mbuf = NULL;
9489 	chunk_length = 0;
9490 	for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9491 		chunk_length += SCTP_BUF_LEN(mat);
9492 		if (SCTP_BUF_NEXT(mat) == NULL) {
9493 			last_mbuf = mat;
9494 		}
9495 	}
9496 	if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
9497 		sctp_m_freem(op_err);
9498 		return;
9499 	}
9500 	padding_length = chunk_length % 4;
9501 	if (padding_length != 0) {
9502 		padding_length = 4 - padding_length;
9503 	}
9504 	if (padding_length != 0) {
9505 		if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
9506 			sctp_m_freem(op_err);
9507 			return;
9508 		}
9509 	}
9510 	sctp_alloc_a_chunk(stcb, chk);
9511 	if (chk == NULL) {
9512 		/* no memory */
9513 		sctp_m_freem(op_err);
9514 		return;
9515 	}
9516 	chk->copy_by_ref = 0;
9517 	chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9518 	chk->rec.chunk_id.can_take_data = 0;
9519 	chk->flags = 0;
9520 	chk->send_size = (uint16_t)chunk_length;
9521 	chk->sent = SCTP_DATAGRAM_UNSENT;
9522 	chk->snd_count = 0;
9523 	chk->asoc = &stcb->asoc;
9524 	chk->data = op_err;
9525 	chk->whoTo = NULL;
9526 	hdr = mtod(op_err, struct sctp_chunkhdr *);
9527 	hdr->chunk_type = SCTP_OPERATION_ERROR;
9528 	hdr->chunk_flags = 0;
9529 	hdr->chunk_length = htons(chk->send_size);
9530 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9531 	chk->asoc->ctrl_queue_cnt++;
9532 }
9533 
9534 int
9535 sctp_send_cookie_echo(struct mbuf *m,
9536     int offset, int limit,
9537     struct sctp_tcb *stcb,
9538     struct sctp_nets *net)
9539 {
9540 	/*-
9541 	 * pull out the cookie and put it at the front of the control chunk
9542 	 * queue.
9543 	 */
9544 	int at;
9545 	struct mbuf *cookie;
9546 	struct sctp_paramhdr param, *phdr;
9547 	struct sctp_chunkhdr *hdr;
9548 	struct sctp_tmit_chunk *chk;
9549 	uint16_t ptype, plen;
9550 
9551 	SCTP_TCB_LOCK_ASSERT(stcb);
9552 	/* First find the cookie in the param area */
9553 	cookie = NULL;
9554 	at = offset + sizeof(struct sctp_init_chunk);
9555 	for (;;) {
9556 		phdr = sctp_get_next_param(m, at, &param, sizeof(param));
9557 		if (phdr == NULL) {
9558 			return (-3);
9559 		}
9560 		ptype = ntohs(phdr->param_type);
9561 		plen = ntohs(phdr->param_length);
9562 		if (plen < sizeof(struct sctp_paramhdr)) {
9563 			return (-6);
9564 		}
9565 		if (ptype == SCTP_STATE_COOKIE) {
9566 			int pad;
9567 
9568 			/* found the cookie */
9569 			if (at + plen > limit) {
9570 				return (-7);
9571 			}
9572 			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9573 			if (cookie == NULL) {
9574 				/* No memory */
9575 				return (-2);
9576 			}
9577 			if ((pad = (plen % 4)) > 0) {
9578 				pad = 4 - pad;
9579 			}
9580 			if (pad > 0) {
9581 				if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9582 					return (-8);
9583 				}
9584 			}
9585 #ifdef SCTP_MBUF_LOGGING
9586 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9587 				sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9588 			}
9589 #endif
9590 			break;
9591 		}
9592 		at += SCTP_SIZE32(plen);
9593 	}
9594 	/* ok, we got the cookie lets change it into a cookie echo chunk */
9595 	/* first the change from param to cookie */
9596 	hdr = mtod(cookie, struct sctp_chunkhdr *);
9597 	hdr->chunk_type = SCTP_COOKIE_ECHO;
9598 	hdr->chunk_flags = 0;
9599 	/* get the chunk stuff now and place it in the FRONT of the queue */
9600 	sctp_alloc_a_chunk(stcb, chk);
9601 	if (chk == NULL) {
9602 		/* no memory */
9603 		sctp_m_freem(cookie);
9604 		return (-5);
9605 	}
9606 	chk->copy_by_ref = 0;
9607 	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9608 	chk->rec.chunk_id.can_take_data = 0;
9609 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9610 	chk->send_size = SCTP_SIZE32(plen);
9611 	chk->sent = SCTP_DATAGRAM_UNSENT;
9612 	chk->snd_count = 0;
9613 	chk->asoc = &stcb->asoc;
9614 	chk->data = cookie;
9615 	chk->whoTo = net;
9616 	atomic_add_int(&chk->whoTo->ref_count, 1);
9617 	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9618 	chk->asoc->ctrl_queue_cnt++;
9619 	return (0);
9620 }
9621 
9622 void
9623 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9624     struct mbuf *m,
9625     int offset,
9626     int chk_length,
9627     struct sctp_nets *net)
9628 {
9629 	/*
9630 	 * take a HB request and make it into a HB ack and send it.
9631 	 */
9632 	struct mbuf *outchain;
9633 	struct sctp_chunkhdr *chdr;
9634 	struct sctp_tmit_chunk *chk;
9635 
9636 	if (net == NULL)
9637 		/* must have a net pointer */
9638 		return;
9639 
9640 	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9641 	if (outchain == NULL) {
9642 		/* gak out of memory */
9643 		return;
9644 	}
9645 #ifdef SCTP_MBUF_LOGGING
9646 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9647 		sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9648 	}
9649 #endif
9650 	chdr = mtod(outchain, struct sctp_chunkhdr *);
9651 	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9652 	chdr->chunk_flags = 0;
9653 	if (chk_length % 4 != 0) {
9654 		sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9655 	}
9656 	sctp_alloc_a_chunk(stcb, chk);
9657 	if (chk == NULL) {
9658 		/* no memory */
9659 		sctp_m_freem(outchain);
9660 		return;
9661 	}
9662 	chk->copy_by_ref = 0;
9663 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9664 	chk->rec.chunk_id.can_take_data = 1;
9665 	chk->flags = 0;
9666 	chk->send_size = chk_length;
9667 	chk->sent = SCTP_DATAGRAM_UNSENT;
9668 	chk->snd_count = 0;
9669 	chk->asoc = &stcb->asoc;
9670 	chk->data = outchain;
9671 	chk->whoTo = net;
9672 	atomic_add_int(&chk->whoTo->ref_count, 1);
9673 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9674 	chk->asoc->ctrl_queue_cnt++;
9675 }
9676 
9677 void
9678 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9679 {
9680 	/* formulate and queue a cookie-ack back to sender */
9681 	struct mbuf *cookie_ack;
9682 	struct sctp_chunkhdr *hdr;
9683 	struct sctp_tmit_chunk *chk;
9684 
9685 	SCTP_TCB_LOCK_ASSERT(stcb);
9686 
9687 	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9688 	if (cookie_ack == NULL) {
9689 		/* no mbuf's */
9690 		return;
9691 	}
9692 	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9693 	sctp_alloc_a_chunk(stcb, chk);
9694 	if (chk == NULL) {
9695 		/* no memory */
9696 		sctp_m_freem(cookie_ack);
9697 		return;
9698 	}
9699 	chk->copy_by_ref = 0;
9700 	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9701 	chk->rec.chunk_id.can_take_data = 1;
9702 	chk->flags = 0;
9703 	chk->send_size = sizeof(struct sctp_chunkhdr);
9704 	chk->sent = SCTP_DATAGRAM_UNSENT;
9705 	chk->snd_count = 0;
9706 	chk->asoc = &stcb->asoc;
9707 	chk->data = cookie_ack;
9708 	if (chk->asoc->last_control_chunk_from != NULL) {
9709 		chk->whoTo = chk->asoc->last_control_chunk_from;
9710 		atomic_add_int(&chk->whoTo->ref_count, 1);
9711 	} else {
9712 		chk->whoTo = NULL;
9713 	}
9714 	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9715 	hdr->chunk_type = SCTP_COOKIE_ACK;
9716 	hdr->chunk_flags = 0;
9717 	hdr->chunk_length = htons(chk->send_size);
9718 	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9719 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9720 	chk->asoc->ctrl_queue_cnt++;
9721 	return;
9722 }
9723 
9724 
9725 void
9726 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9727 {
9728 	/* formulate and queue a SHUTDOWN-ACK back to the sender */
9729 	struct mbuf *m_shutdown_ack;
9730 	struct sctp_shutdown_ack_chunk *ack_cp;
9731 	struct sctp_tmit_chunk *chk;
9732 
9733 	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9734 	if (m_shutdown_ack == NULL) {
9735 		/* no mbuf's */
9736 		return;
9737 	}
9738 	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9739 	sctp_alloc_a_chunk(stcb, chk);
9740 	if (chk == NULL) {
9741 		/* no memory */
9742 		sctp_m_freem(m_shutdown_ack);
9743 		return;
9744 	}
9745 	chk->copy_by_ref = 0;
9746 	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9747 	chk->rec.chunk_id.can_take_data = 1;
9748 	chk->flags = 0;
9749 	chk->send_size = sizeof(struct sctp_chunkhdr);
9750 	chk->sent = SCTP_DATAGRAM_UNSENT;
9751 	chk->snd_count = 0;
9752 	chk->asoc = &stcb->asoc;
9753 	chk->data = m_shutdown_ack;
9754 	chk->whoTo = net;
9755 	if (chk->whoTo) {
9756 		atomic_add_int(&chk->whoTo->ref_count, 1);
9757 	}
9758 	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9759 	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9760 	ack_cp->ch.chunk_flags = 0;
9761 	ack_cp->ch.chunk_length = htons(chk->send_size);
9762 	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9763 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9764 	chk->asoc->ctrl_queue_cnt++;
9765 	return;
9766 }
9767 
9768 void
9769 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9770 {
9771 	/* formulate and queue a SHUTDOWN to the sender */
9772 	struct mbuf *m_shutdown;
9773 	struct sctp_shutdown_chunk *shutdown_cp;
9774 	struct sctp_tmit_chunk *chk;
9775 
9776 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9777 		if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9778 			/* We already have a SHUTDOWN queued. Reuse it. */
9779 			if (chk->whoTo) {
9780 				sctp_free_remote_addr(chk->whoTo);
9781 				chk->whoTo = NULL;
9782 			}
9783 			break;
9784 		}
9785 	}
9786 	if (chk == NULL) {
9787 		m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9788 		if (m_shutdown == NULL) {
9789 			/* no mbuf's */
9790 			return;
9791 		}
9792 		SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9793 		sctp_alloc_a_chunk(stcb, chk);
9794 		if (chk == NULL) {
9795 			/* no memory */
9796 			sctp_m_freem(m_shutdown);
9797 			return;
9798 		}
9799 		chk->copy_by_ref = 0;
9800 		chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9801 		chk->rec.chunk_id.can_take_data = 1;
9802 		chk->flags = 0;
9803 		chk->send_size = sizeof(struct sctp_shutdown_chunk);
9804 		chk->sent = SCTP_DATAGRAM_UNSENT;
9805 		chk->snd_count = 0;
9806 		chk->asoc = &stcb->asoc;
9807 		chk->data = m_shutdown;
9808 		chk->whoTo = net;
9809 		if (chk->whoTo) {
9810 			atomic_add_int(&chk->whoTo->ref_count, 1);
9811 		}
9812 		shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9813 		shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9814 		shutdown_cp->ch.chunk_flags = 0;
9815 		shutdown_cp->ch.chunk_length = htons(chk->send_size);
9816 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9817 		SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9818 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9819 		chk->asoc->ctrl_queue_cnt++;
9820 	} else {
9821 		TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9822 		chk->whoTo = net;
9823 		if (chk->whoTo) {
9824 			atomic_add_int(&chk->whoTo->ref_count, 1);
9825 		}
9826 		shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9827 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9828 		TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9829 	}
9830 	return;
9831 }
9832 
9833 void
9834 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9835 {
9836 	/*
9837 	 * formulate and queue an ASCONF to the peer.
9838 	 * ASCONF parameters should be queued on the assoc queue.
9839 	 */
9840 	struct sctp_tmit_chunk *chk;
9841 	struct mbuf *m_asconf;
9842 	int len;
9843 
9844 	SCTP_TCB_LOCK_ASSERT(stcb);
9845 
9846 	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9847 	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9848 		/* can't send a new one if there is one in flight already */
9849 		return;
9850 	}
9851 
9852 	/* compose an ASCONF chunk, maximum length is PMTU */
9853 	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9854 	if (m_asconf == NULL) {
9855 		return;
9856 	}
9857 
9858 	sctp_alloc_a_chunk(stcb, chk);
9859 	if (chk == NULL) {
9860 		/* no memory */
9861 		sctp_m_freem(m_asconf);
9862 		return;
9863 	}
9864 
9865 	chk->copy_by_ref = 0;
9866 	chk->rec.chunk_id.id = SCTP_ASCONF;
9867 	chk->rec.chunk_id.can_take_data = 0;
9868 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9869 	chk->data = m_asconf;
9870 	chk->send_size = len;
9871 	chk->sent = SCTP_DATAGRAM_UNSENT;
9872 	chk->snd_count = 0;
9873 	chk->asoc = &stcb->asoc;
9874 	chk->whoTo = net;
9875 	if (chk->whoTo) {
9876 		atomic_add_int(&chk->whoTo->ref_count, 1);
9877 	}
9878 	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9879 	chk->asoc->ctrl_queue_cnt++;
9880 	return;
9881 }
9882 
9883 void
9884 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9885 {
9886 	/*
9887 	 * formulate and queue a asconf-ack back to sender.
9888 	 * the asconf-ack must be stored in the tcb.
9889 	 */
9890 	struct sctp_tmit_chunk *chk;
9891 	struct sctp_asconf_ack *ack, *latest_ack;
9892 	struct mbuf *m_ack;
9893 	struct sctp_nets *net = NULL;
9894 
9895 	SCTP_TCB_LOCK_ASSERT(stcb);
9896 	/* Get the latest ASCONF-ACK */
9897 	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9898 	if (latest_ack == NULL) {
9899 		return;
9900 	}
9901 	if (latest_ack->last_sent_to != NULL &&
9902 	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9903 		/* we're doing a retransmission */
9904 		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9905 		if (net == NULL) {
9906 			/* no alternate */
9907 			if (stcb->asoc.last_control_chunk_from == NULL) {
9908 				if (stcb->asoc.alternate) {
9909 					net = stcb->asoc.alternate;
9910 				} else {
9911 					net = stcb->asoc.primary_destination;
9912 				}
9913 			} else {
9914 				net = stcb->asoc.last_control_chunk_from;
9915 			}
9916 		}
9917 	} else {
9918 		/* normal case */
9919 		if (stcb->asoc.last_control_chunk_from == NULL) {
9920 			if (stcb->asoc.alternate) {
9921 				net = stcb->asoc.alternate;
9922 			} else {
9923 				net = stcb->asoc.primary_destination;
9924 			}
9925 		} else {
9926 			net = stcb->asoc.last_control_chunk_from;
9927 		}
9928 	}
9929 	latest_ack->last_sent_to = net;
9930 
9931 	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9932 		if (ack->data == NULL) {
9933 			continue;
9934 		}
9935 
9936 		/* copy the asconf_ack */
9937 		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9938 		if (m_ack == NULL) {
9939 			/* couldn't copy it */
9940 			return;
9941 		}
9942 #ifdef SCTP_MBUF_LOGGING
9943 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9944 			sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9945 		}
9946 #endif
9947 
9948 		sctp_alloc_a_chunk(stcb, chk);
9949 		if (chk == NULL) {
9950 			/* no memory */
9951 			if (m_ack)
9952 				sctp_m_freem(m_ack);
9953 			return;
9954 		}
9955 		chk->copy_by_ref = 0;
9956 		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9957 		chk->rec.chunk_id.can_take_data = 1;
9958 		chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9959 		chk->whoTo = net;
9960 		if (chk->whoTo) {
9961 			atomic_add_int(&chk->whoTo->ref_count, 1);
9962 		}
9963 		chk->data = m_ack;
9964 		chk->send_size = ack->len;
9965 		chk->sent = SCTP_DATAGRAM_UNSENT;
9966 		chk->snd_count = 0;
9967 		chk->asoc = &stcb->asoc;
9968 
9969 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9970 		chk->asoc->ctrl_queue_cnt++;
9971 	}
9972 	return;
9973 }
9974 
9975 
9976 static int
9977 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9978     struct sctp_tcb *stcb,
9979     struct sctp_association *asoc,
9980     int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
9981 {
9982 	/*-
9983 	 * send out one MTU of retransmission. If fast_retransmit is
9984 	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9985 	 * rwnd. For a Cookie or Asconf in the control chunk queue we
9986 	 * retransmit them by themselves.
9987 	 *
9988 	 * For data chunks we will pick out the lowest TSN's in the sent_queue
9989 	 * marked for resend and bundle them all together (up to a MTU of
9990 	 * destination). The address to send to should have been
9991 	 * selected/changed where the retransmission was marked (i.e. in FR
9992 	 * or t3-timeout routines).
9993 	 */
9994 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9995 	struct sctp_tmit_chunk *chk, *fwd;
9996 	struct mbuf *m, *endofchain;
9997 	struct sctp_nets *net = NULL;
9998 	uint32_t tsns_sent = 0;
9999 	int no_fragmentflg, bundle_at, cnt_thru;
10000 	unsigned int mtu;
10001 	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
10002 	struct sctp_auth_chunk *auth = NULL;
10003 	uint32_t auth_offset = 0;
10004 	uint16_t auth_keyid;
10005 	int override_ok = 1;
10006 	int data_auth_reqd = 0;
10007 	uint32_t dmtu = 0;
10008 
10009 #if defined(__APPLE__) && !defined(__Userspace__)
10010 	if (so_locked) {
10011 		sctp_lock_assert(SCTP_INP_SO(inp));
10012 	} else {
10013 		sctp_unlock_assert(SCTP_INP_SO(inp));
10014 	}
10015 #endif
10016 	SCTP_TCB_LOCK_ASSERT(stcb);
10017 	tmr_started = ctl_cnt = bundle_at = error = 0;
10018 	no_fragmentflg = 1;
10019 	fwd_tsn = 0;
10020 	*cnt_out = 0;
10021 	fwd = NULL;
10022 	endofchain = m = NULL;
10023 	auth_keyid = stcb->asoc.authinfo.active_keyid;
10024 #ifdef SCTP_AUDITING_ENABLED
10025 	sctp_audit_log(0xC3, 1);
10026 #endif
10027 	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
10028 	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
10029 		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
10030 			asoc->sent_queue_retran_cnt);
10031 		asoc->sent_queue_cnt = 0;
10032 		asoc->sent_queue_cnt_removeable = 0;
10033 		/* send back 0/0 so we enter normal transmission */
10034 		*cnt_out = 0;
10035 		return (0);
10036 	}
10037 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10038 		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
10039 		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
10040 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
10041 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
10042 				continue;
10043 			}
10044 			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
10045 				if (chk != asoc->str_reset) {
10046 					/*
10047 					 * not eligible for retran if its
10048 					 * not ours
10049 					 */
10050 					continue;
10051 				}
10052 			}
10053 			ctl_cnt++;
10054 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10055 				fwd_tsn = 1;
10056 			}
10057 			/*
10058 			 * Add an AUTH chunk, if chunk requires it save the
10059 			 * offset into the chain for AUTH
10060 			 */
10061 			if ((auth == NULL) &&
10062 			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
10063 							 stcb->asoc.peer_auth_chunks))) {
10064 				m = sctp_add_auth_chunk(m, &endofchain,
10065 							&auth, &auth_offset,
10066 							stcb,
10067 							chk->rec.chunk_id.id);
10068 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10069 			}
10070 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10071 			break;
10072 		}
10073 	}
10074 	one_chunk = 0;
10075 	cnt_thru = 0;
10076 	/* do we have control chunks to retransmit? */
10077 	if (m != NULL) {
10078 		/* Start a timer no matter if we succeed or fail */
10079 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
10080 			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
10081 		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
10082 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
10083 		chk->snd_count++;	/* update our count */
10084 		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
10085 		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
10086 		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
10087 		                                        no_fragmentflg, 0, 0,
10088 		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10089 		                                        chk->whoTo->port, NULL,
10090 #if defined(__FreeBSD__) && !defined(__Userspace__)
10091 		                                        0, 0,
10092 #endif
10093 		                                        so_locked))) {
10094 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10095 			if (error == ENOBUFS) {
10096 				asoc->ifp_had_enobuf = 1;
10097 				SCTP_STAT_INCR(sctps_lowlevelerr);
10098 			}
10099 			return (error);
10100 		} else {
10101 			asoc->ifp_had_enobuf = 0;
10102 		}
10103 		endofchain = NULL;
10104 		auth = NULL;
10105 		auth_offset = 0;
10106 		/*
10107 		 * We don't want to mark the net->sent time here since this
10108 		 * we use this for HB and retrans cannot measure RTT
10109 		 */
10110 		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
10111 		*cnt_out += 1;
10112 		chk->sent = SCTP_DATAGRAM_SENT;
10113 		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
10114 		if (fwd_tsn == 0) {
10115 			return (0);
10116 		} else {
10117 			/* Clean up the fwd-tsn list */
10118 			sctp_clean_up_ctl(stcb, asoc, so_locked);
10119 			return (0);
10120 		}
10121 	}
10122 	/*
10123 	 * Ok, it is just data retransmission we need to do or that and a
10124 	 * fwd-tsn with it all.
10125 	 */
10126 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
10127 		return (SCTP_RETRAN_DONE);
10128 	}
10129 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
10130 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
10131 		/* not yet open, resend the cookie and that is it */
10132 		return (1);
10133 	}
10134 #ifdef SCTP_AUDITING_ENABLED
10135 	sctp_auditing(20, inp, stcb, NULL);
10136 #endif
10137 	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
10138 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
10139 		if (chk->sent != SCTP_DATAGRAM_RESEND) {
10140 			/* No, not sent to this net or not ready for rtx */
10141 			continue;
10142 		}
10143 		if (chk->data == NULL) {
10144 			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
10145 			            chk->rec.data.tsn, chk->snd_count, chk->sent);
10146 			continue;
10147 		}
10148 		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10149 		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10150 			struct mbuf *op_err;
10151 			char msg[SCTP_DIAG_INFO_LEN];
10152 
10153 			SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
10154 			              chk->rec.data.tsn, chk->snd_count);
10155 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
10156 			                             msg);
10157 			atomic_add_int(&stcb->asoc.refcnt, 1);
10158 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
10159 			                          so_locked);
10160 			SCTP_TCB_LOCK(stcb);
10161 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
10162 			return (SCTP_RETRAN_EXIT);
10163 		}
10164 		/* pick up the net */
10165 		net = chk->whoTo;
10166 		switch (net->ro._l_addr.sa.sa_family) {
10167 #ifdef INET
10168 			case AF_INET:
10169 				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10170 				break;
10171 #endif
10172 #ifdef INET6
10173 			case AF_INET6:
10174 				mtu = net->mtu - SCTP_MIN_OVERHEAD;
10175 				break;
10176 #endif
10177 #if defined(__Userspace__)
10178 			case AF_CONN:
10179 				mtu = net->mtu - sizeof(struct sctphdr);
10180 				break;
10181 #endif
10182 			default:
10183 				/* TSNH */
10184 				mtu = net->mtu;
10185 				break;
10186 		}
10187 
10188 		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10189 			/* No room in peers rwnd */
10190 			uint32_t tsn;
10191 
10192 			tsn = asoc->last_acked_seq + 1;
10193 			if (tsn == chk->rec.data.tsn) {
10194 				/*
10195 				 * we make a special exception for this
10196 				 * case. The peer has no rwnd but is missing
10197 				 * the lowest chunk.. which is probably what
10198 				 * is holding up the rwnd.
10199 				 */
10200 				goto one_chunk_around;
10201 			}
10202 			return (1);
10203 		}
10204 	one_chunk_around:
10205 		if (asoc->peers_rwnd < mtu) {
10206 			one_chunk = 1;
10207 			if ((asoc->peers_rwnd == 0) &&
10208 			    (asoc->total_flight == 0)) {
10209 				chk->window_probe = 1;
10210 				chk->whoTo->window_probe = 1;
10211 			}
10212 		}
10213 #ifdef SCTP_AUDITING_ENABLED
10214 		sctp_audit_log(0xC3, 2);
10215 #endif
10216 		bundle_at = 0;
10217 		m = NULL;
10218 		net->fast_retran_ip = 0;
10219 		if (chk->rec.data.doing_fast_retransmit == 0) {
10220 			/*
10221 			 * if no FR in progress skip destination that have
10222 			 * flight_size > cwnd.
10223 			 */
10224 			if (net->flight_size >= net->cwnd) {
10225 				continue;
10226 			}
10227 		} else {
10228 			/*
10229 			 * Mark the destination net to have FR recovery
10230 			 * limits put on it.
10231 			 */
10232 			*fr_done = 1;
10233 			net->fast_retran_ip = 1;
10234 		}
10235 
10236 		/*
10237 		 * if no AUTH is yet included and this chunk requires it,
10238 		 * make sure to account for it.  We don't apply the size
10239 		 * until the AUTH chunk is actually added below in case
10240 		 * there is no room for this chunk.
10241 		 */
10242 		if (data_auth_reqd && (auth == NULL)) {
10243 			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10244 		} else
10245 			dmtu = 0;
10246 
10247 		if ((chk->send_size <= (mtu - dmtu)) ||
10248 		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10249 			/* ok we will add this one */
10250 			if (data_auth_reqd) {
10251 				if (auth == NULL) {
10252 					m = sctp_add_auth_chunk(m,
10253 								&endofchain,
10254 								&auth,
10255 								&auth_offset,
10256 								stcb,
10257 								SCTP_DATA);
10258 					auth_keyid = chk->auth_keyid;
10259 					override_ok = 0;
10260 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10261 				} else if (override_ok) {
10262 					auth_keyid = chk->auth_keyid;
10263 					override_ok = 0;
10264 				} else if (chk->auth_keyid != auth_keyid) {
10265 					/* different keyid, so done bundling */
10266 					break;
10267 				}
10268 			}
10269 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10270 			if (m == NULL) {
10271 				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10272 				return (ENOMEM);
10273 			}
10274 			/* Do clear IP_DF ? */
10275 			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10276 				no_fragmentflg = 0;
10277 			}
10278 			/* upate our MTU size */
10279 			if (mtu > (chk->send_size + dmtu))
10280 				mtu -= (chk->send_size + dmtu);
10281 			else
10282 				mtu = 0;
10283 			data_list[bundle_at++] = chk;
10284 			if (one_chunk && (asoc->total_flight <= 0)) {
10285 				SCTP_STAT_INCR(sctps_windowprobed);
10286 			}
10287 		}
10288 		if (one_chunk == 0) {
10289 			/*
10290 			 * now are there anymore forward from chk to pick
10291 			 * up?
10292 			 */
10293 			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10294 				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10295 					/* Nope, not for retran */
10296 					continue;
10297 				}
10298 				if (fwd->whoTo != net) {
10299 					/* Nope, not the net in question */
10300 					continue;
10301 				}
10302 				if (data_auth_reqd && (auth == NULL)) {
10303 					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10304 				} else
10305 					dmtu = 0;
10306 				if (fwd->send_size <= (mtu - dmtu)) {
10307 					if (data_auth_reqd) {
10308 						if (auth == NULL) {
10309 							m = sctp_add_auth_chunk(m,
10310 										&endofchain,
10311 										&auth,
10312 										&auth_offset,
10313 										stcb,
10314 										SCTP_DATA);
10315 							auth_keyid = fwd->auth_keyid;
10316 							override_ok = 0;
10317 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10318 						} else if (override_ok) {
10319 							auth_keyid = fwd->auth_keyid;
10320 							override_ok = 0;
10321 						} else if (fwd->auth_keyid != auth_keyid) {
10322 							/* different keyid, so done bundling */
10323 							break;
10324 						}
10325 					}
10326 					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10327 					if (m == NULL) {
10328 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10329 						return (ENOMEM);
10330 					}
10331 					/* Do clear IP_DF ? */
10332 					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10333 						no_fragmentflg = 0;
10334 					}
10335 					/* upate our MTU size */
10336 					if (mtu > (fwd->send_size + dmtu))
10337 						mtu -= (fwd->send_size + dmtu);
10338 					else
10339 						mtu = 0;
10340 					data_list[bundle_at++] = fwd;
10341 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10342 						break;
10343 					}
10344 				} else {
10345 					/* can't fit so we are done */
10346 					break;
10347 				}
10348 			}
10349 		}
10350 		/* Is there something to send for this destination? */
10351 		if (m) {
10352 			/*
10353 			 * No matter if we fail/or succeed we should start a
10354 			 * timer. A failure is like a lost IP packet :-)
10355 			 */
10356 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10357 				/*
10358 				 * no timer running on this destination
10359 				 * restart it.
10360 				 */
10361 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10362 				tmr_started = 1;
10363 			}
10364 			/* Now lets send it, if there is anything to send :> */
10365 			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10366 			                                        (struct sockaddr *)&net->ro._l_addr, m,
10367 			                                        auth_offset, auth, auth_keyid,
10368 			                                        no_fragmentflg, 0, 0,
10369 			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10370 			                                        net->port, NULL,
10371 #if defined(__FreeBSD__) && !defined(__Userspace__)
10372 			                                        0, 0,
10373 #endif
10374 			                                        so_locked))) {
10375 				/* error, we could not output */
10376 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10377 				if (error == ENOBUFS) {
10378 					asoc->ifp_had_enobuf = 1;
10379 					SCTP_STAT_INCR(sctps_lowlevelerr);
10380 				}
10381 				return (error);
10382 			} else {
10383 				asoc->ifp_had_enobuf = 0;
10384 			}
10385 			endofchain = NULL;
10386 			auth = NULL;
10387 			auth_offset = 0;
10388 			/* For HB's */
10389 			/*
10390 			 * We don't want to mark the net->sent time here
10391 			 * since this we use this for HB and retrans cannot
10392 			 * measure RTT
10393 			 */
10394 			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10395 
10396 			/* For auto-close */
10397 			cnt_thru++;
10398 			if (*now_filled == 0) {
10399 				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10400 				*now = asoc->time_last_sent;
10401 				*now_filled = 1;
10402 			} else {
10403 				asoc->time_last_sent = *now;
10404 			}
10405 			*cnt_out += bundle_at;
10406 #ifdef SCTP_AUDITING_ENABLED
10407 			sctp_audit_log(0xC4, bundle_at);
10408 #endif
10409 			if (bundle_at) {
10410 				tsns_sent = data_list[0]->rec.data.tsn;
10411 			}
10412 			for (i = 0; i < bundle_at; i++) {
10413 				SCTP_STAT_INCR(sctps_sendretransdata);
10414 				data_list[i]->sent = SCTP_DATAGRAM_SENT;
10415 				/*
10416 				 * When we have a revoked data, and we
10417 				 * retransmit it, then we clear the revoked
10418 				 * flag since this flag dictates if we
10419 				 * subtracted from the fs
10420 				 */
10421 				if (data_list[i]->rec.data.chunk_was_revoked) {
10422 					/* Deflate the cwnd */
10423 					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10424 					data_list[i]->rec.data.chunk_was_revoked = 0;
10425 				}
10426 				data_list[i]->snd_count++;
10427 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10428 				/* record the time */
10429 				data_list[i]->sent_rcv_time = asoc->time_last_sent;
10430 				if (data_list[i]->book_size_scale) {
10431 					/*
10432 					 * need to double the book size on
10433 					 * this one
10434 					 */
10435 					data_list[i]->book_size_scale = 0;
10436 					/* Since we double the booksize, we must
10437 					 * also double the output queue size, since this
10438 					 * get shrunk when we free by this amount.
10439 					 */
10440 					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10441 					data_list[i]->book_size *= 2;
10442 
10443 
10444 				} else {
10445 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10446 						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10447 						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10448 					}
10449 					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10450 									    (uint32_t) (data_list[i]->send_size +
10451 											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10452 				}
10453 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10454 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10455 						       data_list[i]->whoTo->flight_size,
10456 						       data_list[i]->book_size,
10457 						       (uint32_t)(uintptr_t)data_list[i]->whoTo,
10458 						       data_list[i]->rec.data.tsn);
10459 				}
10460 				sctp_flight_size_increase(data_list[i]);
10461 				sctp_total_flight_increase(stcb, data_list[i]);
10462 				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10463 					/* SWS sender side engages */
10464 					asoc->peers_rwnd = 0;
10465 				}
10466 				if ((i == 0) &&
10467 				    (data_list[i]->rec.data.doing_fast_retransmit)) {
10468 					SCTP_STAT_INCR(sctps_sendfastretrans);
10469 					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10470 					    (tmr_started == 0)) {
10471 						/*-
10472 						 * ok we just fast-retrans'd
10473 						 * the lowest TSN, i.e the
10474 						 * first on the list. In
10475 						 * this case we want to give
10476 						 * some more time to get a
10477 						 * SACK back without a
10478 						 * t3-expiring.
10479 						 */
10480 						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10481 						                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
10482 						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10483 					}
10484 				}
10485 			}
10486 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10487 				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10488 			}
10489 #ifdef SCTP_AUDITING_ENABLED
10490 			sctp_auditing(21, inp, stcb, NULL);
10491 #endif
10492 		} else {
10493 			/* None will fit */
10494 			return (1);
10495 		}
10496 		if (asoc->sent_queue_retran_cnt <= 0) {
10497 			/* all done we have no more to retran */
10498 			asoc->sent_queue_retran_cnt = 0;
10499 			break;
10500 		}
10501 		if (one_chunk) {
10502 			/* No more room in rwnd */
10503 			return (1);
10504 		}
10505 		/* stop the for loop here. we sent out a packet */
10506 		break;
10507 	}
10508 	return (0);
10509 }
10510 
10511 static void
10512 sctp_timer_validation(struct sctp_inpcb *inp,
10513     struct sctp_tcb *stcb,
10514     struct sctp_association *asoc)
10515 {
10516 	struct sctp_nets *net;
10517 
10518 	/* Validate that a timer is running somewhere */
10519 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10520 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10521 			/* Here is a timer */
10522 			return;
10523 		}
10524 	}
10525 	SCTP_TCB_LOCK_ASSERT(stcb);
10526 	/* Gak, we did not have a timer somewhere */
10527 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10528 	if (asoc->alternate) {
10529 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10530 	} else {
10531 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10532 	}
10533 	return;
10534 }
10535 
10536 void
10537 sctp_chunk_output(struct sctp_inpcb *inp,
10538     struct sctp_tcb *stcb,
10539     int from_where,
10540     int so_locked)
10541 {
10542 	/*-
10543 	 * Ok this is the generic chunk service queue. we must do the
10544 	 * following:
10545 	 * - See if there are retransmits pending, if so we must
10546 	 *   do these first.
10547 	 * - Service the stream queue that is next, moving any
10548 	 *   message (note I must get a complete message i.e.
10549 	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10550 	 *   TSN's
10551 	 * - Check to see if the cwnd/rwnd allows any output, if so we
10552 	 *   go ahead and fomulate and send the low level chunks. Making sure
10553 	 *   to combine any control in the control chunk queue also.
10554 	 */
10555 	struct sctp_association *asoc;
10556 	struct sctp_nets *net;
10557 	int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10558 	unsigned int burst_cnt = 0;
10559 	struct timeval now;
10560 	int now_filled = 0;
10561 	int nagle_on;
10562 	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10563 	int un_sent = 0;
10564 	int fr_done;
10565 	unsigned int tot_frs = 0;
10566 
10567 #if defined(__APPLE__) && !defined(__Userspace__)
10568 	if (so_locked) {
10569 		sctp_lock_assert(SCTP_INP_SO(inp));
10570 	} else {
10571 		sctp_unlock_assert(SCTP_INP_SO(inp));
10572 	}
10573 #endif
10574 	asoc = &stcb->asoc;
10575 do_it_again:
10576 	/* The Nagle algorithm is only applied when handling a send call. */
10577 	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10578 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10579 			nagle_on = 0;
10580 		} else {
10581 			nagle_on = 1;
10582 		}
10583 	} else {
10584 		nagle_on = 0;
10585 	}
10586 	SCTP_TCB_LOCK_ASSERT(stcb);
10587 
10588 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10589 
10590 	if ((un_sent <= 0) &&
10591 	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10592 	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10593 	    (asoc->sent_queue_retran_cnt == 0) &&
10594 	    (asoc->trigger_reset == 0)) {
10595 		/* Nothing to do unless there is something to be sent left */
10596 		return;
10597 	}
10598 	/* Do we have something to send, data or control AND
10599 	 * a sack timer running, if so piggy-back the sack.
10600 	 */
10601 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10602 		sctp_send_sack(stcb, so_locked);
10603 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
10604 		                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10605 	}
10606 	while (asoc->sent_queue_retran_cnt) {
10607 		/*-
10608 		 * Ok, it is retransmission time only, we send out only ONE
10609 		 * packet with a single call off to the retran code.
10610 		 */
10611 		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10612 			/*-
10613 			 * Special hook for handling cookiess discarded
10614 			 * by peer that carried data. Send cookie-ack only
10615 			 * and then the next call with get the retran's.
10616 			 */
10617 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10618 						    from_where,
10619 						    &now, &now_filled, frag_point, so_locked);
10620 			return;
10621 		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10622 			/* if its not from a HB then do it */
10623 			fr_done = 0;
10624 			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10625 			if (fr_done) {
10626 				tot_frs++;
10627 			}
10628 		} else {
10629 			/*
10630 			 * its from any other place, we don't allow retran
10631 			 * output (only control)
10632 			 */
10633 			ret = 1;
10634 		}
10635 		if (ret > 0) {
10636 			/* Can't send anymore */
10637 			/*-
10638 			 * now lets push out control by calling med-level
10639 			 * output once. this assures that we WILL send HB's
10640 			 * if queued too.
10641 			 */
10642 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10643 						    from_where,
10644 						    &now, &now_filled, frag_point, so_locked);
10645 #ifdef SCTP_AUDITING_ENABLED
10646 			sctp_auditing(8, inp, stcb, NULL);
10647 #endif
10648 			sctp_timer_validation(inp, stcb, asoc);
10649 			return;
10650 		}
10651 		if (ret < 0) {
10652 			/*-
10653 			 * The count was off.. retran is not happening so do
10654 			 * the normal retransmission.
10655 			 */
10656 #ifdef SCTP_AUDITING_ENABLED
10657 			sctp_auditing(9, inp, stcb, NULL);
10658 #endif
10659 			if (ret == SCTP_RETRAN_EXIT) {
10660 				return;
10661 			}
10662 			break;
10663 		}
10664 		if (from_where == SCTP_OUTPUT_FROM_T3) {
10665 			/* Only one transmission allowed out of a timeout */
10666 #ifdef SCTP_AUDITING_ENABLED
10667 			sctp_auditing(10, inp, stcb, NULL);
10668 #endif
10669 			/* Push out any control */
10670 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10671 						    &now, &now_filled, frag_point, so_locked);
10672 			return;
10673 		}
10674 		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10675 			/* Hit FR burst limit */
10676 			return;
10677 		}
10678 		if ((num_out == 0) && (ret == 0)) {
10679 			/* No more retrans to send */
10680 			break;
10681 		}
10682 	}
10683 #ifdef SCTP_AUDITING_ENABLED
10684 	sctp_auditing(12, inp, stcb, NULL);
10685 #endif
10686 	/* Check for bad destinations, if they exist move chunks around. */
10687 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10688 		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10689 			/*-
10690 			 * if possible move things off of this address we
10691 			 * still may send below due to the dormant state but
10692 			 * we try to find an alternate address to send to
10693 			 * and if we have one we move all queued data on the
10694 			 * out wheel to this alternate address.
10695 			 */
10696 			if (net->ref_count > 1)
10697 				sctp_move_chunks_from_net(stcb, net);
10698 		} else {
10699 			/*-
10700 			 * if ((asoc->sat_network) || (net->addr_is_local))
10701 			 * { burst_limit = asoc->max_burst *
10702 			 * SCTP_SAT_NETWORK_BURST_INCR; }
10703 			 */
10704 			if (asoc->max_burst > 0) {
10705 				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10706 					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10707 						/* JRS - Use the congestion control given in the congestion control module */
10708 						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10709 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10710 							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10711 						}
10712 						SCTP_STAT_INCR(sctps_maxburstqueued);
10713 					}
10714 					net->fast_retran_ip = 0;
10715 				} else {
10716 					if (net->flight_size == 0) {
10717 						/* Should be decaying the cwnd here */
10718 						;
10719 					}
10720 				}
10721 			}
10722 		}
10723 
10724 	}
10725 	burst_cnt = 0;
10726 	do {
10727 		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10728 					      &reason_code, 0, from_where,
10729 					      &now, &now_filled, frag_point, so_locked);
10730 		if (error) {
10731 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10732 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10733 				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10734 			}
10735 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10736 				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10737 				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10738 			}
10739 			break;
10740 		}
10741 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10742 
10743 		tot_out += num_out;
10744 		burst_cnt++;
10745 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10746 			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10747 			if (num_out == 0) {
10748 				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10749 			}
10750 		}
10751 		if (nagle_on) {
10752 			/*
10753 			 * When the Nagle algorithm is used, look at how much
10754 			 * is unsent, then if its smaller than an MTU and we
10755 			 * have data in flight we stop, except if we are
10756 			 * handling a fragmented user message.
10757 			 */
10758 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10759 			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10760 			    (stcb->asoc.total_flight > 0)) {
10761 /*	&&		     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10762 				break;
10763 			}
10764 		}
10765 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10766 		    TAILQ_EMPTY(&asoc->send_queue) &&
10767 		    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10768 			/* Nothing left to send */
10769 			break;
10770 		}
10771 		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10772 			/* Nothing left to send */
10773 			break;
10774 		}
10775 	} while (num_out &&
10776 	         ((asoc->max_burst == 0) ||
10777 		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10778 		  (burst_cnt < asoc->max_burst)));
10779 
10780 	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10781 		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10782 			SCTP_STAT_INCR(sctps_maxburstqueued);
10783 			asoc->burst_limit_applied = 1;
10784 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10785 				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10786 			}
10787 		} else {
10788 			asoc->burst_limit_applied = 0;
10789 		}
10790 	}
10791 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10792 		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10793 	}
10794 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10795 		tot_out);
10796 
10797 	/*-
10798 	 * Now we need to clean up the control chunk chain if a ECNE is on
10799 	 * it. It must be marked as UNSENT again so next call will continue
10800 	 * to send it until such time that we get a CWR, to remove it.
10801 	 */
10802 	if (stcb->asoc.ecn_echo_cnt_onq)
10803 		sctp_fix_ecn_echo(asoc);
10804 
10805 	if (stcb->asoc.trigger_reset) {
10806 		if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0)  {
10807 			goto do_it_again;
10808 		}
10809 	}
10810 	return;
10811 }
10812 
10813 
10814 int
10815 sctp_output(
10816 	struct sctp_inpcb *inp,
10817 	struct mbuf *m,
10818 	struct sockaddr *addr,
10819 	struct mbuf *control,
10820 #if defined(__FreeBSD__) && !defined(__Userspace__)
10821 	struct thread *p,
10822 #elif defined(_WIN32) && !defined(__Userspace__)
10823 	PKTHREAD p,
10824 #else
10825 #if defined(__APPLE__) && !defined(__Userspace__)
10826 	struct proc *p SCTP_UNUSED,
10827 #else
10828 	struct proc *p,
10829 #endif
10830 #endif
10831 	int flags)
10832 {
10833 	if (inp == NULL) {
10834 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10835 		return (EINVAL);
10836 	}
10837 
10838 	if (inp->sctp_socket == NULL) {
10839 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10840 		return (EINVAL);
10841 	}
10842 	return (sctp_sosend(inp->sctp_socket,
10843 			    addr,
10844 			    (struct uio *)NULL,
10845 			    m,
10846 			    control,
10847 #if defined(__APPLE__) && !defined(__Userspace__)
10848 			    flags
10849 #else
10850 			    flags, p
10851 #endif
10852 			));
10853 }
10854 
10855 void
10856 send_forward_tsn(struct sctp_tcb *stcb,
10857 		 struct sctp_association *asoc)
10858 {
10859 	struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10860 	struct sctp_forward_tsn_chunk *fwdtsn;
10861 	struct sctp_strseq *strseq;
10862 	struct sctp_strseq_mid *strseq_m;
10863 	uint32_t advance_peer_ack_point;
10864 	unsigned int cnt_of_space, i, ovh;
10865 	unsigned int space_needed;
10866 	unsigned int cnt_of_skipped = 0;
10867 
10868 	SCTP_TCB_LOCK_ASSERT(stcb);
10869 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10870 		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10871 			/* mark it to unsent */
10872 			chk->sent = SCTP_DATAGRAM_UNSENT;
10873 			chk->snd_count = 0;
10874 			/* Do we correct its output location? */
10875 			if (chk->whoTo) {
10876 				sctp_free_remote_addr(chk->whoTo);
10877 				chk->whoTo = NULL;
10878 			}
10879 			goto sctp_fill_in_rest;
10880 		}
10881 	}
10882 	/* Ok if we reach here we must build one */
10883 	sctp_alloc_a_chunk(stcb, chk);
10884 	if (chk == NULL) {
10885 		return;
10886 	}
10887 	asoc->fwd_tsn_cnt++;
10888 	chk->copy_by_ref = 0;
10889 	/*
10890 	 * We don't do the old thing here since
10891 	 * this is used not for on-wire but to
10892 	 * tell if we are sending a fwd-tsn by
10893 	 * the stack during output. And if its
10894 	 * a IFORWARD or a FORWARD it is a fwd-tsn.
10895 	 */
10896 	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10897 	chk->rec.chunk_id.can_take_data = 0;
10898 	chk->flags = 0;
10899 	chk->asoc = asoc;
10900 	chk->whoTo = NULL;
10901 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10902 	if (chk->data == NULL) {
10903 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10904 		return;
10905 	}
10906 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10907 	chk->sent = SCTP_DATAGRAM_UNSENT;
10908 	chk->snd_count = 0;
10909 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10910 	asoc->ctrl_queue_cnt++;
10911 sctp_fill_in_rest:
10912 	/*-
10913 	 * Here we go through and fill out the part that deals with
10914 	 * stream/seq of the ones we skip.
10915 	 */
10916 	SCTP_BUF_LEN(chk->data) = 0;
10917 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10918 		if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10919 		    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10920 			/* no more to look at */
10921 			break;
10922 		}
10923 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10924 			/* We don't report these */
10925 			continue;
10926 		}
10927 		cnt_of_skipped++;
10928 	}
10929 	if (asoc->idata_supported) {
10930 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10931 		                (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10932 	} else {
10933 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10934 		                (cnt_of_skipped * sizeof(struct sctp_strseq)));
10935 	}
10936 	cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10937 
10938 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10939 		ovh = SCTP_MIN_OVERHEAD;
10940 	} else {
10941 		ovh = SCTP_MIN_V4_OVERHEAD;
10942 	}
10943 	if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10944 		/* trim to a mtu size */
10945 		cnt_of_space = asoc->smallest_mtu - ovh;
10946 	}
10947 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10948 		sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10949 			       0xff, 0, cnt_of_skipped,
10950 			       asoc->advanced_peer_ack_point);
10951 	}
10952 	advance_peer_ack_point = asoc->advanced_peer_ack_point;
10953 	if (cnt_of_space < space_needed) {
10954 		/*-
10955 		 * ok we must trim down the chunk by lowering the
10956 		 * advance peer ack point.
10957 		 */
10958 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10959 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10960 				       0xff, 0xff, cnt_of_space,
10961 				       space_needed);
10962 		}
10963 		cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10964 		if (asoc->idata_supported) {
10965 			cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10966 		} else {
10967 			cnt_of_skipped /= sizeof(struct sctp_strseq);
10968 		}
10969 		/*-
10970 		 * Go through and find the TSN that will be the one
10971 		 * we report.
10972 		 */
10973 		at = TAILQ_FIRST(&asoc->sent_queue);
10974 		if (at != NULL) {
10975 			for (i = 0; i < cnt_of_skipped; i++) {
10976 				tp1 = TAILQ_NEXT(at, sctp_next);
10977 				if (tp1 == NULL) {
10978 					break;
10979 				}
10980 				at = tp1;
10981 			}
10982 		}
10983 		if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10984 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10985 				       0xff, cnt_of_skipped, at->rec.data.tsn,
10986 				       asoc->advanced_peer_ack_point);
10987 		}
10988 		last = at;
10989 		/*-
10990 		 * last now points to last one I can report, update
10991 		 * peer ack point
10992 		 */
10993 		if (last) {
10994 			advance_peer_ack_point = last->rec.data.tsn;
10995 		}
10996 		if (asoc->idata_supported) {
10997 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10998 			               cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10999 		} else {
11000 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
11001 			               cnt_of_skipped * sizeof(struct sctp_strseq);
11002 		}
11003 	}
11004 	chk->send_size = space_needed;
11005 	/* Setup the chunk */
11006 	fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
11007 	fwdtsn->ch.chunk_length = htons(chk->send_size);
11008 	fwdtsn->ch.chunk_flags = 0;
11009 	if (asoc->idata_supported) {
11010 		fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
11011 	} else {
11012 		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
11013 	}
11014 	fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
11015 	SCTP_BUF_LEN(chk->data) = chk->send_size;
11016 	fwdtsn++;
11017 	/*-
11018 	 * Move pointer to after the fwdtsn and transfer to the
11019 	 * strseq pointer.
11020 	 */
11021 	if (asoc->idata_supported) {
11022 		strseq_m = (struct sctp_strseq_mid *)fwdtsn;
11023 		strseq = NULL;
11024 	} else {
11025 		strseq = (struct sctp_strseq *)fwdtsn;
11026 		strseq_m = NULL;
11027 	}
11028 	/*-
11029 	 * Now populate the strseq list. This is done blindly
11030 	 * without pulling out duplicate stream info. This is
11031 	 * inefficent but won't harm the process since the peer will
11032 	 * look at these in sequence and will thus release anything.
11033 	 * It could mean we exceed the PMTU and chop off some that
11034 	 * we could have included.. but this is unlikely (aka 1432/4
11035 	 * would mean 300+ stream seq's would have to be reported in
11036 	 * one FWD-TSN. With a bit of work we can later FIX this to
11037 	 * optimize and pull out duplicates.. but it does add more
11038 	 * overhead. So for now... not!
11039 	 */
11040 	i = 0;
11041 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
11042 		if (i >= cnt_of_skipped) {
11043 			break;
11044 		}
11045 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
11046 			/* We don't report these */
11047 			continue;
11048 		}
11049 		if (at->rec.data.tsn == advance_peer_ack_point) {
11050 			at->rec.data.fwd_tsn_cnt = 0;
11051 		}
11052 		if (asoc->idata_supported) {
11053 			strseq_m->sid = htons(at->rec.data.sid);
11054 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
11055 				strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
11056 			} else {
11057 				strseq_m->flags = 0;
11058 			}
11059 			strseq_m->mid = htonl(at->rec.data.mid);
11060 			strseq_m++;
11061 		} else {
11062 			strseq->sid = htons(at->rec.data.sid);
11063 			strseq->ssn = htons((uint16_t)at->rec.data.mid);
11064 			strseq++;
11065 		}
11066 		i++;
11067 	}
11068 	return;
11069 }
11070 
11071 void
11072 sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
11073 {
11074 	/*-
11075 	 * Queue up a SACK or NR-SACK in the control queue.
11076 	 * We must first check to see if a SACK or NR-SACK is
11077 	 * somehow on the control queue.
11078 	 * If so, we will take and and remove the old one.
11079 	 */
11080 	struct sctp_association *asoc;
11081 	struct sctp_tmit_chunk *chk, *a_chk;
11082 	struct sctp_sack_chunk *sack;
11083 	struct sctp_nr_sack_chunk *nr_sack;
11084 	struct sctp_gap_ack_block *gap_descriptor;
11085 	const struct sack_track *selector;
11086 	int mergeable = 0;
11087 	int offset;
11088 	caddr_t limit;
11089 	uint32_t *dup;
11090 	int limit_reached = 0;
11091 	unsigned int i, siz, j;
11092 	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
11093 	int num_dups = 0;
11094 	int space_req;
11095 	uint32_t highest_tsn;
11096 	uint8_t flags;
11097 	uint8_t type;
11098 	uint8_t tsn_map;
11099 
11100 	if (stcb->asoc.nrsack_supported == 1) {
11101 		type = SCTP_NR_SELECTIVE_ACK;
11102 	} else {
11103 		type = SCTP_SELECTIVE_ACK;
11104 	}
11105 	a_chk = NULL;
11106 	asoc = &stcb->asoc;
11107 	SCTP_TCB_LOCK_ASSERT(stcb);
11108 	if (asoc->last_data_chunk_from == NULL) {
11109 		/* Hmm we never received anything */
11110 		return;
11111 	}
11112 	sctp_slide_mapping_arrays(stcb);
11113 	sctp_set_rwnd(stcb, asoc);
11114 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11115 		if (chk->rec.chunk_id.id == type) {
11116 			/* Hmm, found a sack already on queue, remove it */
11117 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
11118 			asoc->ctrl_queue_cnt--;
11119 			a_chk = chk;
11120 			if (a_chk->data) {
11121 				sctp_m_freem(a_chk->data);
11122 				a_chk->data = NULL;
11123 			}
11124 			if (a_chk->whoTo) {
11125 				sctp_free_remote_addr(a_chk->whoTo);
11126 				a_chk->whoTo = NULL;
11127 			}
11128 			break;
11129 		}
11130 	}
11131 	if (a_chk == NULL) {
11132 		sctp_alloc_a_chunk(stcb, a_chk);
11133 		if (a_chk == NULL) {
11134 			/* No memory so we drop the idea, and set a timer */
11135 			if (stcb->asoc.delayed_ack) {
11136 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11137 				                stcb->sctp_ep, stcb, NULL,
11138 				                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
11139 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11140 				    stcb->sctp_ep, stcb, NULL);
11141 			} else {
11142 				stcb->asoc.send_sack = 1;
11143 			}
11144 			return;
11145 		}
11146 		a_chk->copy_by_ref = 0;
11147 		a_chk->rec.chunk_id.id = type;
11148 		a_chk->rec.chunk_id.can_take_data = 1;
11149 	}
11150 	/* Clear our pkt counts */
11151 	asoc->data_pkts_seen = 0;
11152 
11153 	a_chk->flags = 0;
11154 	a_chk->asoc = asoc;
11155 	a_chk->snd_count = 0;
11156 	a_chk->send_size = 0;	/* fill in later */
11157 	a_chk->sent = SCTP_DATAGRAM_UNSENT;
11158 	a_chk->whoTo = NULL;
11159 
11160 	if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
11161 		/*-
11162 		 * Ok, the destination for the SACK is unreachable, lets see if
11163 		 * we can select an alternate to asoc->last_data_chunk_from
11164 		 */
11165 		a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
11166 		if (a_chk->whoTo == NULL) {
11167 			/* Nope, no alternate */
11168 			a_chk->whoTo = asoc->last_data_chunk_from;
11169 		}
11170 	} else {
11171 		a_chk->whoTo = asoc->last_data_chunk_from;
11172 	}
11173 	if (a_chk->whoTo) {
11174 		atomic_add_int(&a_chk->whoTo->ref_count, 1);
11175 	}
11176 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11177 		highest_tsn = asoc->highest_tsn_inside_map;
11178 	} else {
11179 		highest_tsn = asoc->highest_tsn_inside_nr_map;
11180 	}
11181 	if (highest_tsn == asoc->cumulative_tsn) {
11182 		/* no gaps */
11183 		if (type == SCTP_SELECTIVE_ACK) {
11184 			space_req = sizeof(struct sctp_sack_chunk);
11185 		} else {
11186 			space_req = sizeof(struct sctp_nr_sack_chunk);
11187 		}
11188 	} else {
11189 		/* gaps get a cluster */
11190 		space_req = MCLBYTES;
11191 	}
11192 	/* Ok now lets formulate a MBUF with our sack */
11193 	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11194 	if ((a_chk->data == NULL) ||
11195 	    (a_chk->whoTo == NULL)) {
11196 		/* rats, no mbuf memory */
11197 		if (a_chk->data) {
11198 			/* was a problem with the destination */
11199 			sctp_m_freem(a_chk->data);
11200 			a_chk->data = NULL;
11201 		}
11202 		sctp_free_a_chunk(stcb, a_chk, so_locked);
11203 		/* sa_ignore NO_NULL_CHK */
11204 		if (stcb->asoc.delayed_ack) {
11205 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11206 			                stcb->sctp_ep, stcb, NULL,
11207 			                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
11208 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11209 			    stcb->sctp_ep, stcb, NULL);
11210 		} else {
11211 			stcb->asoc.send_sack = 1;
11212 		}
11213 		return;
11214 	}
11215 	/* ok, lets go through and fill it in */
11216 	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11217 	space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
11218 	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11219 		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11220 	}
11221 	limit = mtod(a_chk->data, caddr_t);
11222 	limit += space;
11223 
11224 	flags = 0;
11225 
11226 	if ((asoc->sctp_cmt_on_off > 0) &&
11227 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11228 		/*-
11229 		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11230 		 * received, then set high bit to 1, else 0. Reset
11231 		 * pkts_rcvd.
11232 		 */
11233 		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11234 		asoc->cmt_dac_pkts_rcvd = 0;
11235 	}
11236 #ifdef SCTP_ASOCLOG_OF_TSNS
11237 	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11238 	stcb->asoc.cumack_log_atsnt++;
11239 	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11240 		stcb->asoc.cumack_log_atsnt = 0;
11241 	}
11242 #endif
11243 	/* reset the readers interpretation */
11244 	stcb->freed_by_sorcv_sincelast = 0;
11245 
11246 	if (type == SCTP_SELECTIVE_ACK) {
11247 		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11248 		nr_sack = NULL;
11249 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11250 		if (highest_tsn > asoc->mapping_array_base_tsn) {
11251 			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11252 		} else {
11253 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
11254 		}
11255 	} else {
11256 		sack = NULL;
11257 		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11258 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11259 		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11260 			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11261 		} else {
11262 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11263 		}
11264 	}
11265 
11266 	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11267 		offset = 1;
11268 	} else {
11269 		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11270 	}
11271 	if (((type == SCTP_SELECTIVE_ACK) &&
11272 	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11273 	    ((type == SCTP_NR_SELECTIVE_ACK) &&
11274 	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11275 		/* we have a gap .. maybe */
11276 		for (i = 0; i < siz; i++) {
11277 			tsn_map = asoc->mapping_array[i];
11278 			if (type == SCTP_SELECTIVE_ACK) {
11279 				tsn_map |= asoc->nr_mapping_array[i];
11280 			}
11281 			if (i == 0) {
11282 				/*
11283 				 * Clear all bits corresponding to TSNs
11284 				 * smaller or equal to the cumulative TSN.
11285 				 */
11286 				tsn_map &= (~0U << (1 - offset));
11287 			}
11288 			selector = &sack_array[tsn_map];
11289 			if (mergeable && selector->right_edge) {
11290 				/*
11291 				 * Backup, left and right edges were ok to
11292 				 * merge.
11293 				 */
11294 				num_gap_blocks--;
11295 				gap_descriptor--;
11296 			}
11297 			if (selector->num_entries == 0)
11298 				mergeable = 0;
11299 			else {
11300 				for (j = 0; j < selector->num_entries; j++) {
11301 					if (mergeable && selector->right_edge) {
11302 						/*
11303 						 * do a merge by NOT setting
11304 						 * the left side
11305 						 */
11306 						mergeable = 0;
11307 					} else {
11308 						/*
11309 						 * no merge, set the left
11310 						 * side
11311 						 */
11312 						mergeable = 0;
11313 						gap_descriptor->start = htons((selector->gaps[j].start + offset));
11314 					}
11315 					gap_descriptor->end = htons((selector->gaps[j].end + offset));
11316 					num_gap_blocks++;
11317 					gap_descriptor++;
11318 					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11319 						/* no more room */
11320 						limit_reached = 1;
11321 						break;
11322 					}
11323 				}
11324 				if (selector->left_edge) {
11325 					mergeable = 1;
11326 				}
11327 			}
11328 			if (limit_reached) {
11329 				/* Reached the limit stop */
11330 				break;
11331 			}
11332 			offset += 8;
11333 		}
11334 	}
11335 	if ((type == SCTP_NR_SELECTIVE_ACK) &&
11336 	    (limit_reached == 0)) {
11337 
11338 		mergeable = 0;
11339 
11340 		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11341 			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11342 		} else {
11343 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11344 		}
11345 
11346 		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11347 			offset = 1;
11348 		} else {
11349 			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11350 		}
11351 		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11352 			/* we have a gap .. maybe */
11353 			for (i = 0; i < siz; i++) {
11354 				tsn_map = asoc->nr_mapping_array[i];
11355 				if (i == 0) {
11356 					/*
11357 					 * Clear all bits corresponding to TSNs
11358 					 * smaller or equal to the cumulative TSN.
11359 					 */
11360 					tsn_map &= (~0U << (1 - offset));
11361 				}
11362 				selector = &sack_array[tsn_map];
11363 				if (mergeable && selector->right_edge) {
11364 					/*
11365 					* Backup, left and right edges were ok to
11366 					* merge.
11367 					*/
11368 					num_nr_gap_blocks--;
11369 					gap_descriptor--;
11370 				}
11371 				if (selector->num_entries == 0)
11372 					mergeable = 0;
11373 				else {
11374 					for (j = 0; j < selector->num_entries; j++) {
11375 						if (mergeable && selector->right_edge) {
11376 							/*
11377 							* do a merge by NOT setting
11378 							* the left side
11379 							*/
11380 							mergeable = 0;
11381 						} else {
11382 							/*
11383 							* no merge, set the left
11384 							* side
11385 							*/
11386 							mergeable = 0;
11387 							gap_descriptor->start = htons((selector->gaps[j].start + offset));
11388 						}
11389 						gap_descriptor->end = htons((selector->gaps[j].end + offset));
11390 						num_nr_gap_blocks++;
11391 						gap_descriptor++;
11392 						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11393 							/* no more room */
11394 							limit_reached = 1;
11395 							break;
11396 						}
11397 					}
11398 					if (selector->left_edge) {
11399 						mergeable = 1;
11400 					}
11401 				}
11402 				if (limit_reached) {
11403 					/* Reached the limit stop */
11404 					break;
11405 				}
11406 				offset += 8;
11407 			}
11408 		}
11409 	}
11410 	/* now we must add any dups we are going to report. */
11411 	if ((limit_reached == 0) && (asoc->numduptsns)) {
11412 		dup = (uint32_t *) gap_descriptor;
11413 		for (i = 0; i < asoc->numduptsns; i++) {
11414 			*dup = htonl(asoc->dup_tsns[i]);
11415 			dup++;
11416 			num_dups++;
11417 			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11418 				/* no more room */
11419 				break;
11420 			}
11421 		}
11422 		asoc->numduptsns = 0;
11423 	}
11424 	/*
11425 	 * now that the chunk is prepared queue it to the control chunk
11426 	 * queue.
11427 	 */
11428 	if (type == SCTP_SELECTIVE_ACK) {
11429 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
11430 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11431 		                              num_dups * sizeof(int32_t));
11432 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11433 		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11434 		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11435 		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11436 		sack->sack.num_dup_tsns = htons(num_dups);
11437 		sack->ch.chunk_type = type;
11438 		sack->ch.chunk_flags = flags;
11439 		sack->ch.chunk_length = htons(a_chk->send_size);
11440 	} else {
11441 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
11442 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11443 		                              num_dups * sizeof(int32_t));
11444 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11445 		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11446 		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11447 		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11448 		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11449 		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11450 		nr_sack->nr_sack.reserved = 0;
11451 		nr_sack->ch.chunk_type = type;
11452 		nr_sack->ch.chunk_flags = flags;
11453 		nr_sack->ch.chunk_length = htons(a_chk->send_size);
11454 	}
11455 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11456 	asoc->my_last_reported_rwnd = asoc->my_rwnd;
11457 	asoc->ctrl_queue_cnt++;
11458 	asoc->send_sack = 0;
11459 	SCTP_STAT_INCR(sctps_sendsacks);
11460 	return;
11461 }
11462 
11463 void
11464 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
11465 {
11466 	struct mbuf *m_abort, *m, *m_last;
11467 	struct mbuf *m_out, *m_end = NULL;
11468 	struct sctp_abort_chunk *abort;
11469 	struct sctp_auth_chunk *auth = NULL;
11470 	struct sctp_nets *net;
11471 	uint32_t vtag;
11472 	uint32_t auth_offset = 0;
11473 	int error;
11474 	uint16_t cause_len, chunk_len, padding_len;
11475 
11476 #if defined(__APPLE__) && !defined(__Userspace__)
11477 	if (so_locked) {
11478 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11479 	} else {
11480 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11481 	}
11482 #endif
11483 	SCTP_TCB_LOCK_ASSERT(stcb);
11484 	/*-
11485 	 * Add an AUTH chunk, if chunk requires it and save the offset into
11486 	 * the chain for AUTH
11487 	 */
11488 	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11489 	                                stcb->asoc.peer_auth_chunks)) {
11490 		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11491 					    stcb, SCTP_ABORT_ASSOCIATION);
11492 		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11493 	} else {
11494 		m_out = NULL;
11495 	}
11496 	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11497 	if (m_abort == NULL) {
11498 		if (m_out) {
11499 			sctp_m_freem(m_out);
11500 		}
11501 		if (operr) {
11502 			sctp_m_freem(operr);
11503 		}
11504 		return;
11505 	}
11506 	/* link in any error */
11507 	SCTP_BUF_NEXT(m_abort) = operr;
11508 	cause_len = 0;
11509 	m_last = NULL;
11510 	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11511 		cause_len += (uint16_t)SCTP_BUF_LEN(m);
11512 		if (SCTP_BUF_NEXT(m) == NULL) {
11513 			m_last = m;
11514 		}
11515 	}
11516 	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11517 	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11518 	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11519 	if (m_out == NULL) {
11520 		/* NO Auth chunk prepended, so reserve space in front */
11521 		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11522 		m_out = m_abort;
11523 	} else {
11524 		/* Put AUTH chunk at the front of the chain */
11525 		SCTP_BUF_NEXT(m_end) = m_abort;
11526 	}
11527 	if (stcb->asoc.alternate) {
11528 		net = stcb->asoc.alternate;
11529 	} else {
11530 		net = stcb->asoc.primary_destination;
11531 	}
11532 	/* Fill in the ABORT chunk header. */
11533 	abort = mtod(m_abort, struct sctp_abort_chunk *);
11534 	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11535 	if (stcb->asoc.peer_vtag == 0) {
11536 		/* This happens iff the assoc is in COOKIE-WAIT state. */
11537 		vtag = stcb->asoc.my_vtag;
11538 		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11539 	} else {
11540 		vtag = stcb->asoc.peer_vtag;
11541 		abort->ch.chunk_flags = 0;
11542 	}
11543 	abort->ch.chunk_length = htons(chunk_len);
11544 	/* Add padding, if necessary. */
11545 	if (padding_len > 0) {
11546 		if ((m_last == NULL) ||
11547 		    (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11548 			sctp_m_freem(m_out);
11549 			return;
11550 		}
11551 	}
11552 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11553 	                                        (struct sockaddr *)&net->ro._l_addr,
11554 	                                        m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11555 	                                        stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11556 	                                        stcb->asoc.primary_destination->port, NULL,
11557 #if defined(__FreeBSD__) && !defined(__Userspace__)
11558 	                                        0, 0,
11559 #endif
11560 	                                        so_locked))) {
11561 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11562 		if (error == ENOBUFS) {
11563 			stcb->asoc.ifp_had_enobuf = 1;
11564 			SCTP_STAT_INCR(sctps_lowlevelerr);
11565 		}
11566 	} else {
11567 		stcb->asoc.ifp_had_enobuf = 0;
11568 	}
11569 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11570 }
11571 
11572 void
11573 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11574                             struct sctp_nets *net,
11575                             int reflect_vtag)
11576 {
11577 	/* formulate and SEND a SHUTDOWN-COMPLETE */
11578 	struct mbuf *m_shutdown_comp;
11579 	struct sctp_shutdown_complete_chunk *shutdown_complete;
11580 	uint32_t vtag;
11581 	int error;
11582 	uint8_t flags;
11583 
11584 	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11585 	if (m_shutdown_comp == NULL) {
11586 		/* no mbuf's */
11587 		return;
11588 	}
11589 	if (reflect_vtag) {
11590 		flags = SCTP_HAD_NO_TCB;
11591 		vtag = stcb->asoc.my_vtag;
11592 	} else {
11593 		flags = 0;
11594 		vtag = stcb->asoc.peer_vtag;
11595 	}
11596 	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11597 	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11598 	shutdown_complete->ch.chunk_flags = flags;
11599 	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11600 	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11601 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11602 	                                        (struct sockaddr *)&net->ro._l_addr,
11603 	                                        m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11604 	                                        stcb->sctp_ep->sctp_lport, stcb->rport,
11605 	                                        htonl(vtag),
11606 	                                        net->port, NULL,
11607 #if defined(__FreeBSD__) && !defined(__Userspace__)
11608 	                                        0, 0,
11609 #endif
11610 	                                        SCTP_SO_NOT_LOCKED))) {
11611 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11612 		if (error == ENOBUFS) {
11613 			stcb->asoc.ifp_had_enobuf = 1;
11614 			SCTP_STAT_INCR(sctps_lowlevelerr);
11615 		}
11616 	} else {
11617 		stcb->asoc.ifp_had_enobuf = 0;
11618 	}
11619 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11620 	return;
11621 }
11622 
11623 #if defined(__FreeBSD__) && !defined(__Userspace__)
11624 static void
11625 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11626                    struct sctphdr *sh, uint32_t vtag,
11627                    uint8_t type, struct mbuf *cause,
11628                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11629                    uint32_t vrf_id, uint16_t port)
11630 #else
11631 static void
11632 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11633                    struct sctphdr *sh, uint32_t vtag,
11634                    uint8_t type, struct mbuf *cause,
11635                    uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11636 #endif
11637 {
11638 	struct mbuf *o_pak;
11639 	struct mbuf *mout;
11640 	struct sctphdr *shout;
11641 	struct sctp_chunkhdr *ch;
11642 #if defined(INET) || defined(INET6)
11643 	struct udphdr *udp;
11644 #endif
11645 	int ret, len, cause_len, padding_len;
11646 #ifdef INET
11647 #if defined(__APPLE__) && !defined(__Userspace__)
11648 	sctp_route_t ro;
11649 #endif
11650 	struct sockaddr_in *src_sin, *dst_sin;
11651 	struct ip *ip;
11652 #endif
11653 #ifdef INET6
11654 	struct sockaddr_in6 *src_sin6, *dst_sin6;
11655 	struct ip6_hdr *ip6;
11656 #endif
11657 
11658 	/* Compute the length of the cause and add final padding. */
11659 	cause_len = 0;
11660 	if (cause != NULL) {
11661 		struct mbuf *m_at, *m_last = NULL;
11662 
11663 		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11664 			if (SCTP_BUF_NEXT(m_at) == NULL)
11665 				m_last = m_at;
11666 			cause_len += SCTP_BUF_LEN(m_at);
11667 		}
11668 		padding_len = cause_len % 4;
11669 		if (padding_len != 0) {
11670 			padding_len = 4 - padding_len;
11671 		}
11672 		if (padding_len != 0) {
11673 			if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11674 				sctp_m_freem(cause);
11675 				return;
11676 			}
11677 		}
11678 	} else {
11679 		padding_len = 0;
11680 	}
11681 	/* Get an mbuf for the header. */
11682 	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11683 	switch (dst->sa_family) {
11684 #ifdef INET
11685 	case AF_INET:
11686 		len += sizeof(struct ip);
11687 		break;
11688 #endif
11689 #ifdef INET6
11690 	case AF_INET6:
11691 		len += sizeof(struct ip6_hdr);
11692 		break;
11693 #endif
11694 	default:
11695 		break;
11696 	}
11697 #if defined(INET) || defined(INET6)
11698 	if (port) {
11699 		len += sizeof(struct udphdr);
11700 	}
11701 #endif
11702 #if defined(__APPLE__) && !defined(__Userspace__)
11703 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11704 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11705 #else
11706 	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11707 #endif
11708 #else
11709 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11710 #endif
11711 	if (mout == NULL) {
11712 		if (cause) {
11713 			sctp_m_freem(cause);
11714 		}
11715 		return;
11716 	}
11717 #if defined(__APPLE__) && !defined(__Userspace__)
11718 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11719 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11720 #else
11721 	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11722 #endif
11723 #else
11724 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11725 #endif
11726 	SCTP_BUF_LEN(mout) = len;
11727 	SCTP_BUF_NEXT(mout) = cause;
11728 #if defined(__FreeBSD__) && !defined(__Userspace__)
11729 	M_SETFIB(mout, fibnum);
11730 	mout->m_pkthdr.flowid = mflowid;
11731 	M_HASHTYPE_SET(mout, mflowtype);
11732 #endif
11733 #ifdef INET
11734 	ip = NULL;
11735 #endif
11736 #ifdef INET6
11737 	ip6 = NULL;
11738 #endif
11739 	switch (dst->sa_family) {
11740 #ifdef INET
11741 	case AF_INET:
11742 		src_sin = (struct sockaddr_in *)src;
11743 		dst_sin = (struct sockaddr_in *)dst;
11744 		ip = mtod(mout, struct ip *);
11745 		ip->ip_v = IPVERSION;
11746 		ip->ip_hl = (sizeof(struct ip) >> 2);
11747 		ip->ip_tos = 0;
11748 #if defined(__FreeBSD__) && !defined(__Userspace__)
11749 		ip->ip_off = htons(IP_DF);
11750 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
11751 		ip->ip_off = IP_DF;
11752 #else
11753 		ip->ip_off = htons(IP_DF);
11754 #endif
11755 #if defined(__Userspace__)
11756 		ip->ip_id = htons(ip_id++);
11757 #elif defined(__FreeBSD__)
11758 		ip_fillid(ip);
11759 #elif defined(__APPLE__)
11760 #if RANDOM_IP_ID
11761 		ip->ip_id = ip_randomid();
11762 #else
11763 		ip->ip_id = htons(ip_id++);
11764 #endif
11765 #else
11766 		ip->ip_id = ip_id++;
11767 #endif
11768 		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11769 		if (port) {
11770 			ip->ip_p = IPPROTO_UDP;
11771 		} else {
11772 			ip->ip_p = IPPROTO_SCTP;
11773 		}
11774 		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11775 		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11776 		ip->ip_sum = 0;
11777 		len = sizeof(struct ip);
11778 		shout = (struct sctphdr *)((caddr_t)ip + len);
11779 		break;
11780 #endif
11781 #ifdef INET6
11782 	case AF_INET6:
11783 		src_sin6 = (struct sockaddr_in6 *)src;
11784 		dst_sin6 = (struct sockaddr_in6 *)dst;
11785 		ip6 = mtod(mout, struct ip6_hdr *);
11786 		ip6->ip6_flow = htonl(0x60000000);
11787 #if defined(__FreeBSD__) && !defined(__Userspace__)
11788 		if (V_ip6_auto_flowlabel) {
11789 			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11790 		}
11791 #endif
11792 #if defined(__Userspace__)
11793 		ip6->ip6_hlim = IPv6_HOP_LIMIT;
11794 #else
11795 		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11796 #endif
11797 		if (port) {
11798 			ip6->ip6_nxt = IPPROTO_UDP;
11799 		} else {
11800 			ip6->ip6_nxt = IPPROTO_SCTP;
11801 		}
11802 		ip6->ip6_src = dst_sin6->sin6_addr;
11803 		ip6->ip6_dst = src_sin6->sin6_addr;
11804 		len = sizeof(struct ip6_hdr);
11805 		shout = (struct sctphdr *)((caddr_t)ip6 + len);
11806 		break;
11807 #endif
11808 	default:
11809 		len = 0;
11810 		shout = mtod(mout, struct sctphdr *);
11811 		break;
11812 	}
11813 #if defined(INET) || defined(INET6)
11814 	if (port) {
11815 		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11816 			sctp_m_freem(mout);
11817 			return;
11818 		}
11819 		udp = (struct udphdr *)shout;
11820 		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11821 		udp->uh_dport = port;
11822 		udp->uh_sum = 0;
11823 		udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11824 		                                sizeof(struct sctphdr) +
11825 		                                sizeof(struct sctp_chunkhdr) +
11826 		                                cause_len + padding_len));
11827 		len += sizeof(struct udphdr);
11828 		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11829 	} else {
11830 		udp = NULL;
11831 	}
11832 #endif
11833 	shout->src_port = sh->dest_port;
11834 	shout->dest_port = sh->src_port;
11835 	shout->checksum = 0;
11836 	if (vtag) {
11837 		shout->v_tag = htonl(vtag);
11838 	} else {
11839 		shout->v_tag = sh->v_tag;
11840 	}
11841 	len += sizeof(struct sctphdr);
11842 	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11843 	ch->chunk_type = type;
11844 	if (vtag) {
11845 		ch->chunk_flags = 0;
11846 	} else {
11847 		ch->chunk_flags = SCTP_HAD_NO_TCB;
11848 	}
11849 	ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11850 	len += sizeof(struct sctp_chunkhdr);
11851 	len += cause_len + padding_len;
11852 
11853 	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11854 		sctp_m_freem(mout);
11855 		return;
11856 	}
11857 	SCTP_ATTACH_CHAIN(o_pak, mout, len);
11858 	switch (dst->sa_family) {
11859 #ifdef INET
11860 	case AF_INET:
11861 #if defined(__APPLE__) && !defined(__Userspace__)
11862 		/* zap the stack pointer to the route */
11863 		memset(&ro, 0, sizeof(sctp_route_t));
11864 #endif
11865 		if (port) {
11866 #if !defined(_WIN32) && !defined(__Userspace__)
11867 #if defined(__FreeBSD__)
11868 			if (V_udp_cksum) {
11869 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11870 			} else {
11871 				udp->uh_sum = 0;
11872 			}
11873 #else
11874 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11875 #endif
11876 #else
11877 			udp->uh_sum = 0;
11878 #endif
11879 		}
11880 #if defined(__FreeBSD__) && !defined(__Userspace__)
11881 		ip->ip_len = htons(len);
11882 #elif defined(__APPLE__) || defined(__Userspace__)
11883 		ip->ip_len = len;
11884 #else
11885 		ip->ip_len = htons(len);
11886 #endif
11887 		if (port) {
11888 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11889 			SCTP_STAT_INCR(sctps_sendswcrc);
11890 #if !defined(_WIN32) && !defined(__Userspace__)
11891 #if defined(__FreeBSD__)
11892 			if (V_udp_cksum) {
11893 				SCTP_ENABLE_UDP_CSUM(o_pak);
11894 			}
11895 #else
11896 			SCTP_ENABLE_UDP_CSUM(o_pak);
11897 #endif
11898 #endif
11899 		} else {
11900 #if defined(__FreeBSD__) && !defined(__Userspace__)
11901 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
11902 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11903 			SCTP_STAT_INCR(sctps_sendhwcrc);
11904 #else
11905 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11906 			SCTP_STAT_INCR(sctps_sendswcrc);
11907 #endif
11908 		}
11909 #ifdef SCTP_PACKET_LOGGING
11910 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11911 			sctp_packet_log(o_pak);
11912 		}
11913 #endif
11914 #if defined(__APPLE__) && !defined(__Userspace__)
11915 		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11916 		/* Free the route if we got one back */
11917 		if (ro.ro_rt) {
11918 			RTFREE(ro.ro_rt);
11919 			ro.ro_rt = NULL;
11920 		}
11921 #else
11922 #if defined(__FreeBSD__) && !defined(__Userspace__)
11923 		SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11924 #endif
11925 		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11926 #endif
11927 		break;
11928 #endif
11929 #ifdef INET6
11930 	case AF_INET6:
11931 		ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11932 		if (port) {
11933 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11934 			SCTP_STAT_INCR(sctps_sendswcrc);
11935 #if !defined(__Userspace__)
11936 #if defined(_WIN32)
11937 			udp->uh_sum = 0;
11938 #else
11939 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11940 				udp->uh_sum = 0xffff;
11941 			}
11942 #endif
11943 #endif
11944 		} else {
11945 #if defined(__FreeBSD__) && !defined(__Userspace__)
11946 			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11947 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11948 			SCTP_STAT_INCR(sctps_sendhwcrc);
11949 #else
11950 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11951 			SCTP_STAT_INCR(sctps_sendswcrc);
11952 #endif
11953 		}
11954 #ifdef SCTP_PACKET_LOGGING
11955 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11956 			sctp_packet_log(o_pak);
11957 		}
11958 #endif
11959 #if defined(__FreeBSD__) && !defined(__Userspace__)
11960 		SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11961 #endif
11962 		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11963 		break;
11964 #endif
11965 #if defined(__Userspace__)
11966 	case AF_CONN:
11967 	{
11968 		char *buffer;
11969 		struct sockaddr_conn *sconn;
11970 
11971 		sconn = (struct sockaddr_conn *)src;
11972 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
11973 			shout->checksum = sctp_calculate_cksum(mout, 0);
11974 			SCTP_STAT_INCR(sctps_sendswcrc);
11975 		} else {
11976 			SCTP_STAT_INCR(sctps_sendhwcrc);
11977 		}
11978 #ifdef SCTP_PACKET_LOGGING
11979 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11980 			sctp_packet_log(mout);
11981 		}
11982 #endif
11983 		/* Don't alloc/free for each packet */
11984 		if ((buffer = malloc(len)) != NULL) {
11985 			m_copydata(mout, 0, len, buffer);
11986 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11987 			free(buffer);
11988 		} else {
11989 			ret = ENOMEM;
11990 		}
11991 		sctp_m_freem(mout);
11992 		break;
11993 	}
11994 #endif
11995 	default:
11996 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11997 		        dst->sa_family);
11998 		sctp_m_freem(mout);
11999 		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12000 		return;
12001 	}
12002 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
12003 #if defined(__FreeBSD__) && !defined(__Userspace__)
12004 	if (port) {
12005 		UDPSTAT_INC(udps_opackets);
12006 	}
12007 #endif
12008 	SCTP_STAT_INCR(sctps_sendpackets);
12009 	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12010 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12011 	if (ret) {
12012 		SCTP_STAT_INCR(sctps_senderrors);
12013 	}
12014 	return;
12015 }
12016 
12017 void
12018 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
12019                              struct sctphdr *sh,
12020 #if defined(__FreeBSD__) && !defined(__Userspace__)
12021                              uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12022 #endif
12023                              uint32_t vrf_id, uint16_t port)
12024 {
12025 	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
12026 #if defined(__FreeBSD__) && !defined(__Userspace__)
12027 	                   mflowtype, mflowid, fibnum,
12028 #endif
12029 	                   vrf_id, port);
12030 }
12031 
12032 void
12033 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked)
12034 {
12035 	struct sctp_tmit_chunk *chk;
12036 	struct sctp_heartbeat_chunk *hb;
12037 	struct timeval now;
12038 
12039 	SCTP_TCB_LOCK_ASSERT(stcb);
12040 	if (net == NULL) {
12041 		return;
12042 	}
12043 	(void)SCTP_GETTIME_TIMEVAL(&now);
12044 	switch (net->ro._l_addr.sa.sa_family) {
12045 #ifdef INET
12046 	case AF_INET:
12047 		break;
12048 #endif
12049 #ifdef INET6
12050 	case AF_INET6:
12051 		break;
12052 #endif
12053 #if defined(__Userspace__)
12054 	case AF_CONN:
12055 		break;
12056 #endif
12057 	default:
12058 		return;
12059 	}
12060 	sctp_alloc_a_chunk(stcb, chk);
12061 	if (chk == NULL) {
12062 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
12063 		return;
12064 	}
12065 
12066 	chk->copy_by_ref = 0;
12067 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
12068 	chk->rec.chunk_id.can_take_data = 1;
12069 	chk->flags = 0;
12070 	chk->asoc = &stcb->asoc;
12071 	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
12072 
12073 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12074 	if (chk->data == NULL) {
12075 		sctp_free_a_chunk(stcb, chk, so_locked);
12076 		return;
12077 	}
12078 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12079 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12080 	chk->sent = SCTP_DATAGRAM_UNSENT;
12081 	chk->snd_count = 0;
12082 	chk->whoTo = net;
12083 	atomic_add_int(&chk->whoTo->ref_count, 1);
12084 	/* Now we have a mbuf that we can fill in with the details */
12085 	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
12086 	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
12087 	/* fill out chunk header */
12088 	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
12089 	hb->ch.chunk_flags = 0;
12090 	hb->ch.chunk_length = htons(chk->send_size);
12091 	/* Fill out hb parameter */
12092 	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
12093 	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
12094 	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
12095 	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
12096 	/* Did our user request this one, put it in */
12097 	hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
12098 #ifdef HAVE_SA_LEN
12099 	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
12100 #else
12101 	switch (net->ro._l_addr.sa.sa_family) {
12102 #ifdef INET
12103 	case AF_INET:
12104 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
12105 		break;
12106 #endif
12107 #ifdef INET6
12108 	case AF_INET6:
12109 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
12110 		break;
12111 #endif
12112 #if defined(__Userspace__)
12113 	case AF_CONN:
12114 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
12115 		break;
12116 #endif
12117 	default:
12118 		hb->heartbeat.hb_info.addr_len = 0;
12119 		break;
12120 	}
12121 #endif
12122 	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
12123 		/*
12124 		 * we only take from the entropy pool if the address is not
12125 		 * confirmed.
12126 		 */
12127 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12128 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12129 	} else {
12130 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
12131 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
12132 	}
12133 	switch (net->ro._l_addr.sa.sa_family) {
12134 #ifdef INET
12135 	case AF_INET:
12136 		memcpy(hb->heartbeat.hb_info.address,
12137 		       &net->ro._l_addr.sin.sin_addr,
12138 		       sizeof(net->ro._l_addr.sin.sin_addr));
12139 		break;
12140 #endif
12141 #ifdef INET6
12142 	case AF_INET6:
12143 		memcpy(hb->heartbeat.hb_info.address,
12144 		       &net->ro._l_addr.sin6.sin6_addr,
12145 		       sizeof(net->ro._l_addr.sin6.sin6_addr));
12146 		break;
12147 #endif
12148 #if defined(__Userspace__)
12149 	case AF_CONN:
12150 		memcpy(hb->heartbeat.hb_info.address,
12151 		       &net->ro._l_addr.sconn.sconn_addr,
12152 		       sizeof(net->ro._l_addr.sconn.sconn_addr));
12153 		break;
12154 #endif
12155 	default:
12156 		if (chk->data) {
12157 			sctp_m_freem(chk->data);
12158 			chk->data = NULL;
12159 		}
12160 		sctp_free_a_chunk(stcb, chk, so_locked);
12161 		return;
12162 		break;
12163 	}
12164 	net->hb_responded = 0;
12165 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12166 	stcb->asoc.ctrl_queue_cnt++;
12167 	SCTP_STAT_INCR(sctps_sendheartbeat);
12168 	return;
12169 }
12170 
12171 void
12172 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
12173 		   uint32_t high_tsn)
12174 {
12175 	struct sctp_association *asoc;
12176 	struct sctp_ecne_chunk *ecne;
12177 	struct sctp_tmit_chunk *chk;
12178 
12179 	if (net == NULL) {
12180 		return;
12181 	}
12182 	asoc = &stcb->asoc;
12183 	SCTP_TCB_LOCK_ASSERT(stcb);
12184 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12185 		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12186 			/* found a previous ECN_ECHO update it if needed */
12187 			uint32_t cnt, ctsn;
12188 			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12189 			ctsn = ntohl(ecne->tsn);
12190 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12191 				ecne->tsn = htonl(high_tsn);
12192 				SCTP_STAT_INCR(sctps_queue_upd_ecne);
12193 			}
12194 			cnt = ntohl(ecne->num_pkts_since_cwr);
12195 			cnt++;
12196 			ecne->num_pkts_since_cwr = htonl(cnt);
12197 			return;
12198 		}
12199 	}
12200 	/* nope could not find one to update so we must build one */
12201 	sctp_alloc_a_chunk(stcb, chk);
12202 	if (chk == NULL) {
12203 		return;
12204 	}
12205 	SCTP_STAT_INCR(sctps_queue_upd_ecne);
12206 	chk->copy_by_ref = 0;
12207 	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12208 	chk->rec.chunk_id.can_take_data = 0;
12209 	chk->flags = 0;
12210 	chk->asoc = &stcb->asoc;
12211 	chk->send_size = sizeof(struct sctp_ecne_chunk);
12212 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12213 	if (chk->data == NULL) {
12214 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12215 		return;
12216 	}
12217 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12218 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12219 	chk->sent = SCTP_DATAGRAM_UNSENT;
12220 	chk->snd_count = 0;
12221 	chk->whoTo = net;
12222 	atomic_add_int(&chk->whoTo->ref_count, 1);
12223 
12224 	stcb->asoc.ecn_echo_cnt_onq++;
12225 	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12226 	ecne->ch.chunk_type = SCTP_ECN_ECHO;
12227 	ecne->ch.chunk_flags = 0;
12228 	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12229 	ecne->tsn = htonl(high_tsn);
12230 	ecne->num_pkts_since_cwr = htonl(1);
12231 	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12232 	asoc->ctrl_queue_cnt++;
12233 }
12234 
12235 void
12236 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12237     struct mbuf *m, int len, int iphlen, int bad_crc)
12238 {
12239 	struct sctp_association *asoc;
12240 	struct sctp_pktdrop_chunk *drp;
12241 	struct sctp_tmit_chunk *chk;
12242 	uint8_t *datap;
12243 	int was_trunc = 0;
12244 	int fullsz = 0;
12245 	long spc;
12246 	int offset;
12247 	struct sctp_chunkhdr *ch, chunk_buf;
12248 	unsigned int chk_length;
12249 
12250         if (!stcb) {
12251             return;
12252         }
12253 	asoc = &stcb->asoc;
12254 	SCTP_TCB_LOCK_ASSERT(stcb);
12255 	if (asoc->pktdrop_supported == 0) {
12256 		/*-
12257 		 * peer must declare support before I send one.
12258 		 */
12259 		return;
12260 	}
12261 	if (stcb->sctp_socket == NULL) {
12262 		return;
12263 	}
12264 	sctp_alloc_a_chunk(stcb, chk);
12265 	if (chk == NULL) {
12266 		return;
12267 	}
12268 	chk->copy_by_ref = 0;
12269 	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12270 	chk->rec.chunk_id.can_take_data = 1;
12271 	chk->flags = 0;
12272 	len -= iphlen;
12273 	chk->send_size = len;
12274 	/* Validate that we do not have an ABORT in here. */
12275 	offset = iphlen + sizeof(struct sctphdr);
12276 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12277 						   sizeof(*ch), (uint8_t *) & chunk_buf);
12278 	while (ch != NULL) {
12279 		chk_length = ntohs(ch->chunk_length);
12280 		if (chk_length < sizeof(*ch)) {
12281 			/* break to abort land */
12282 			break;
12283 		}
12284 		switch (ch->chunk_type) {
12285 		case SCTP_PACKET_DROPPED:
12286 		case SCTP_ABORT_ASSOCIATION:
12287 		case SCTP_INITIATION_ACK:
12288 			/**
12289 			 * We don't respond with an PKT-DROP to an ABORT
12290 			 * or PKT-DROP. We also do not respond to an
12291 			 * INIT-ACK, because we can't know if the initiation
12292 			 * tag is correct or not.
12293 			 */
12294 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12295 			return;
12296 		default:
12297 			break;
12298 		}
12299 		offset += SCTP_SIZE32(chk_length);
12300 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12301 		    sizeof(*ch), (uint8_t *) & chunk_buf);
12302 	}
12303 
12304 	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12305 	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12306 		/* only send 1 mtu worth, trim off the
12307 		 * excess on the end.
12308 		 */
12309 		fullsz = len;
12310 		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12311 		was_trunc = 1;
12312 	}
12313 	chk->asoc = &stcb->asoc;
12314 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12315 	if (chk->data == NULL) {
12316 jump_out:
12317 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12318 		return;
12319 	}
12320 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12321 	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12322 	if (drp == NULL) {
12323 		sctp_m_freem(chk->data);
12324 		chk->data = NULL;
12325 		goto jump_out;
12326 	}
12327 	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12328 	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12329 	chk->book_size_scale = 0;
12330 	if (was_trunc) {
12331 		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12332 		drp->trunc_len = htons(fullsz);
12333 		/* Len is already adjusted to size minus overhead above
12334 		 * take out the pkt_drop chunk itself from it.
12335 		 */
12336 		chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
12337 		len = chk->send_size;
12338 	} else {
12339 		/* no truncation needed */
12340 		drp->ch.chunk_flags = 0;
12341 		drp->trunc_len = htons(0);
12342 	}
12343 	if (bad_crc) {
12344 		drp->ch.chunk_flags |= SCTP_BADCRC;
12345 	}
12346 	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12347 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12348 	chk->sent = SCTP_DATAGRAM_UNSENT;
12349 	chk->snd_count = 0;
12350 	if (net) {
12351 		/* we should hit here */
12352 		chk->whoTo = net;
12353 		atomic_add_int(&chk->whoTo->ref_count, 1);
12354 	} else {
12355 		chk->whoTo = NULL;
12356 	}
12357 	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12358 	drp->ch.chunk_length = htons(chk->send_size);
12359 	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12360 	if (spc < 0) {
12361 		spc = 0;
12362 	}
12363 	drp->bottle_bw = htonl(spc);
12364 	if (asoc->my_rwnd) {
12365 		drp->current_onq = htonl(asoc->size_on_reasm_queue +
12366 		    asoc->size_on_all_streams +
12367 		    asoc->my_rwnd_control_len +
12368 		    stcb->sctp_socket->so_rcv.sb_cc);
12369 	} else {
12370 		/*-
12371 		 * If my rwnd is 0, possibly from mbuf depletion as well as
12372 		 * space used, tell the peer there is NO space aka onq == bw
12373 		 */
12374 		drp->current_onq = htonl(spc);
12375 	}
12376 	drp->reserved = 0;
12377 	datap = drp->data;
12378 	m_copydata(m, iphlen, len, (caddr_t)datap);
12379 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12380 	asoc->ctrl_queue_cnt++;
12381 }
12382 
12383 void
12384 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12385 {
12386 	struct sctp_association *asoc;
12387 	struct sctp_cwr_chunk *cwr;
12388 	struct sctp_tmit_chunk *chk;
12389 
12390 	SCTP_TCB_LOCK_ASSERT(stcb);
12391 	if (net == NULL) {
12392 		return;
12393 	}
12394 	asoc = &stcb->asoc;
12395 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12396 		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12397 			/* found a previous CWR queued to same destination update it if needed */
12398 			uint32_t ctsn;
12399 			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12400 			ctsn = ntohl(cwr->tsn);
12401 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12402 				cwr->tsn = htonl(high_tsn);
12403 			}
12404 			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12405 				/* Make sure override is carried */
12406 				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12407 			}
12408 			return;
12409 		}
12410 	}
12411 	sctp_alloc_a_chunk(stcb, chk);
12412 	if (chk == NULL) {
12413 		return;
12414 	}
12415 	chk->copy_by_ref = 0;
12416 	chk->rec.chunk_id.id = SCTP_ECN_CWR;
12417 	chk->rec.chunk_id.can_take_data = 1;
12418 	chk->flags = 0;
12419 	chk->asoc = &stcb->asoc;
12420 	chk->send_size = sizeof(struct sctp_cwr_chunk);
12421 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12422 	if (chk->data == NULL) {
12423 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12424 		return;
12425 	}
12426 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12427 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12428 	chk->sent = SCTP_DATAGRAM_UNSENT;
12429 	chk->snd_count = 0;
12430 	chk->whoTo = net;
12431 	atomic_add_int(&chk->whoTo->ref_count, 1);
12432 	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12433 	cwr->ch.chunk_type = SCTP_ECN_CWR;
12434 	cwr->ch.chunk_flags = override;
12435 	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12436 	cwr->tsn = htonl(high_tsn);
12437 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12438 	asoc->ctrl_queue_cnt++;
12439 }
12440 
12441 static int
12442 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
12443                           uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12444 {
12445 	uint16_t len, old_len, i;
12446 	struct sctp_stream_reset_out_request *req_out;
12447 	struct sctp_chunkhdr *ch;
12448 	int at;
12449 	int number_entries=0;
12450 
12451 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12452 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12453 	/* get to new offset for the param. */
12454 	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12455 	/* now how long will this param be? */
12456 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12457 		if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12458 		    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12459 		    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12460 			number_entries++;
12461 		}
12462 	}
12463 	if (number_entries == 0) {
12464 		return (0);
12465 	}
12466 	if (number_entries == stcb->asoc.streamoutcnt) {
12467 		number_entries = 0;
12468 	}
12469 	if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
12470 		number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
12471 	}
12472 	len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12473 	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12474 	req_out->ph.param_length = htons(len);
12475 	req_out->request_seq = htonl(seq);
12476 	req_out->response_seq = htonl(resp_seq);
12477 	req_out->send_reset_at_tsn = htonl(last_sent);
12478 	at = 0;
12479 	if (number_entries) {
12480 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12481 			if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12482 			    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12483 			    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12484 				req_out->list_of_streams[at] = htons(i);
12485 				at++;
12486 				stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12487 				if (at >= number_entries) {
12488 					break;
12489 				}
12490 			}
12491 		}
12492 	} else {
12493 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12494 			stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12495 		}
12496 	}
12497 	if (SCTP_SIZE32(len) > len) {
12498 		/*-
12499 		 * Need to worry about the pad we may end up adding to the
12500 		 * end. This is easy since the struct is either aligned to 4
12501 		 * bytes or 2 bytes off.
12502 		 */
12503 		req_out->list_of_streams[number_entries] = 0;
12504 	}
12505 	/* now fix the chunk length */
12506 	ch->chunk_length = htons(len + old_len);
12507 	chk->book_size = len + old_len;
12508 	chk->book_size_scale = 0;
12509 	chk->send_size = SCTP_SIZE32(chk->book_size);
12510 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12511 	return (1);
12512 }
12513 
12514 static void
12515 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12516                          int number_entries, uint16_t *list,
12517                          uint32_t seq)
12518 {
12519 	uint16_t len, old_len, i;
12520 	struct sctp_stream_reset_in_request *req_in;
12521 	struct sctp_chunkhdr *ch;
12522 
12523 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12524 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12525 
12526 	/* get to new offset for the param. */
12527 	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12528 	/* now how long will this param be? */
12529 	len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12530 	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12531 	req_in->ph.param_length = htons(len);
12532 	req_in->request_seq = htonl(seq);
12533 	if (number_entries) {
12534 		for (i = 0; i < number_entries; i++) {
12535 			req_in->list_of_streams[i] = htons(list[i]);
12536 		}
12537 	}
12538 	if (SCTP_SIZE32(len) > len) {
12539 		/*-
12540 		 * Need to worry about the pad we may end up adding to the
12541 		 * end. This is easy since the struct is either aligned to 4
12542 		 * bytes or 2 bytes off.
12543 		 */
12544 		req_in->list_of_streams[number_entries] = 0;
12545 	}
12546 	/* now fix the chunk length */
12547 	ch->chunk_length = htons(len + old_len);
12548 	chk->book_size = len + old_len;
12549 	chk->book_size_scale = 0;
12550 	chk->send_size = SCTP_SIZE32(chk->book_size);
12551 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12552 	return;
12553 }
12554 
12555 static void
12556 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12557                           uint32_t seq)
12558 {
12559 	uint16_t len, old_len;
12560 	struct sctp_stream_reset_tsn_request *req_tsn;
12561 	struct sctp_chunkhdr *ch;
12562 
12563 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12564 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12565 
12566 	/* get to new offset for the param. */
12567 	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12568 	/* now how long will this param be? */
12569 	len = sizeof(struct sctp_stream_reset_tsn_request);
12570 	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12571 	req_tsn->ph.param_length = htons(len);
12572 	req_tsn->request_seq = htonl(seq);
12573 
12574 	/* now fix the chunk length */
12575 	ch->chunk_length = htons(len + old_len);
12576 	chk->send_size = len + old_len;
12577 	chk->book_size = SCTP_SIZE32(chk->send_size);
12578 	chk->book_size_scale = 0;
12579 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12580 	return;
12581 }
12582 
12583 void
12584 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12585                              uint32_t resp_seq, uint32_t result)
12586 {
12587 	uint16_t len, old_len;
12588 	struct sctp_stream_reset_response *resp;
12589 	struct sctp_chunkhdr *ch;
12590 
12591 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12592 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12593 
12594 	/* get to new offset for the param. */
12595 	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12596 	/* now how long will this param be? */
12597 	len = sizeof(struct sctp_stream_reset_response);
12598 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12599 	resp->ph.param_length = htons(len);
12600 	resp->response_seq = htonl(resp_seq);
12601 	resp->result = ntohl(result);
12602 
12603 	/* now fix the chunk length */
12604 	ch->chunk_length = htons(len + old_len);
12605 	chk->book_size = len + old_len;
12606 	chk->book_size_scale = 0;
12607 	chk->send_size = SCTP_SIZE32(chk->book_size);
12608 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12609 	return;
12610 }
12611 
12612 void
12613 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
12614 				 struct sctp_stream_reset_list *ent,
12615 				 int response)
12616 {
12617 	struct sctp_association *asoc;
12618 	struct sctp_tmit_chunk *chk;
12619 	struct sctp_chunkhdr *ch;
12620 
12621 	asoc = &stcb->asoc;
12622 
12623 	/*
12624 	 * Reset our last reset action to the new one IP -> response
12625 	 * (PERFORMED probably). This assures that if we fail to send, a
12626 	 * retran from the peer will get the new response.
12627 	 */
12628 	asoc->last_reset_action[0] = response;
12629 	if (asoc->stream_reset_outstanding) {
12630 		return;
12631 	}
12632 	sctp_alloc_a_chunk(stcb, chk);
12633 	if (chk == NULL) {
12634 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12635 		return;
12636 	}
12637 	chk->copy_by_ref = 0;
12638 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12639 	chk->rec.chunk_id.can_take_data = 0;
12640 	chk->flags = 0;
12641 	chk->asoc = &stcb->asoc;
12642 	chk->book_size = sizeof(struct sctp_chunkhdr);
12643 	chk->send_size = SCTP_SIZE32(chk->book_size);
12644 	chk->book_size_scale = 0;
12645 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12646 	if (chk->data == NULL) {
12647 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12648 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12649 		return;
12650 	}
12651 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12652 	/* setup chunk parameters */
12653 	chk->sent = SCTP_DATAGRAM_UNSENT;
12654 	chk->snd_count = 0;
12655 	if (stcb->asoc.alternate) {
12656 		chk->whoTo = stcb->asoc.alternate;
12657 	} else {
12658 		chk->whoTo = stcb->asoc.primary_destination;
12659 	}
12660 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12661 	ch->chunk_type = SCTP_STREAM_RESET;
12662 	ch->chunk_flags = 0;
12663 	ch->chunk_length = htons(chk->book_size);
12664 	atomic_add_int(&chk->whoTo->ref_count, 1);
12665 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12666 	sctp_add_stream_reset_result(chk, ent->seq, response);
12667 	/* insert the chunk for sending */
12668 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12669 			  chk,
12670 			  sctp_next);
12671 	asoc->ctrl_queue_cnt++;
12672 }
12673 
12674 void
12675 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12676                                  uint32_t resp_seq, uint32_t result,
12677                                  uint32_t send_una, uint32_t recv_next)
12678 {
12679 	uint16_t len, old_len;
12680 	struct sctp_stream_reset_response_tsn *resp;
12681 	struct sctp_chunkhdr *ch;
12682 
12683 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12684 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12685 
12686 	/* get to new offset for the param. */
12687 	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12688 	/* now how long will this param be? */
12689 	len = sizeof(struct sctp_stream_reset_response_tsn);
12690 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12691 	resp->ph.param_length = htons(len);
12692 	resp->response_seq = htonl(resp_seq);
12693 	resp->result = htonl(result);
12694 	resp->senders_next_tsn = htonl(send_una);
12695 	resp->receivers_next_tsn = htonl(recv_next);
12696 
12697 	/* now fix the chunk length */
12698 	ch->chunk_length = htons(len + old_len);
12699 	chk->book_size = len + old_len;
12700 	chk->send_size = SCTP_SIZE32(chk->book_size);
12701 	chk->book_size_scale = 0;
12702 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12703 	return;
12704 }
12705 
12706 static void
12707 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12708 		       uint32_t seq,
12709 		       uint16_t adding)
12710 {
12711 	uint16_t len, old_len;
12712 	struct sctp_chunkhdr *ch;
12713 	struct sctp_stream_reset_add_strm *addstr;
12714 
12715 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12716 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12717 
12718 	/* get to new offset for the param. */
12719 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12720 	/* now how long will this param be? */
12721 	len = sizeof(struct sctp_stream_reset_add_strm);
12722 
12723 	/* Fill it out. */
12724 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12725 	addstr->ph.param_length = htons(len);
12726 	addstr->request_seq = htonl(seq);
12727 	addstr->number_of_streams = htons(adding);
12728 	addstr->reserved = 0;
12729 
12730 	/* now fix the chunk length */
12731 	ch->chunk_length = htons(len + old_len);
12732 	chk->send_size = len + old_len;
12733 	chk->book_size = SCTP_SIZE32(chk->send_size);
12734 	chk->book_size_scale = 0;
12735 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12736 	return;
12737 }
12738 
12739 static void
12740 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12741                       uint32_t seq,
12742                       uint16_t adding)
12743 {
12744 	uint16_t len, old_len;
12745 	struct sctp_chunkhdr *ch;
12746 	struct sctp_stream_reset_add_strm *addstr;
12747 
12748 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12749 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12750 
12751 	/* get to new offset for the param. */
12752 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12753 	/* now how long will this param be? */
12754 	len = sizeof(struct sctp_stream_reset_add_strm);
12755 	/* Fill it out. */
12756 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12757 	addstr->ph.param_length = htons(len);
12758 	addstr->request_seq = htonl(seq);
12759 	addstr->number_of_streams = htons(adding);
12760 	addstr->reserved = 0;
12761 
12762 	/* now fix the chunk length */
12763 	ch->chunk_length = htons(len + old_len);
12764 	chk->send_size = len + old_len;
12765 	chk->book_size = SCTP_SIZE32(chk->send_size);
12766 	chk->book_size_scale = 0;
12767 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12768 	return;
12769 }
12770 
12771 int
12772 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12773 {
12774 	struct sctp_association *asoc;
12775 	struct sctp_tmit_chunk *chk;
12776 	struct sctp_chunkhdr *ch;
12777 	uint32_t seq;
12778 
12779 	asoc = &stcb->asoc;
12780 	asoc->trigger_reset = 0;
12781 	if (asoc->stream_reset_outstanding) {
12782 		return (EALREADY);
12783 	}
12784 	sctp_alloc_a_chunk(stcb, chk);
12785 	if (chk == NULL) {
12786 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12787 		return (ENOMEM);
12788 	}
12789 	chk->copy_by_ref = 0;
12790 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12791 	chk->rec.chunk_id.can_take_data = 0;
12792 	chk->flags = 0;
12793 	chk->asoc = &stcb->asoc;
12794 	chk->book_size = sizeof(struct sctp_chunkhdr);
12795 	chk->send_size = SCTP_SIZE32(chk->book_size);
12796 	chk->book_size_scale = 0;
12797 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12798 	if (chk->data == NULL) {
12799 		sctp_free_a_chunk(stcb, chk, so_locked);
12800 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12801 		return (ENOMEM);
12802 	}
12803 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12804 
12805 	/* setup chunk parameters */
12806 	chk->sent = SCTP_DATAGRAM_UNSENT;
12807 	chk->snd_count = 0;
12808 	if (stcb->asoc.alternate) {
12809 		chk->whoTo = stcb->asoc.alternate;
12810 	} else {
12811 		chk->whoTo = stcb->asoc.primary_destination;
12812 	}
12813 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12814 	ch->chunk_type = SCTP_STREAM_RESET;
12815 	ch->chunk_flags = 0;
12816 	ch->chunk_length = htons(chk->book_size);
12817 	atomic_add_int(&chk->whoTo->ref_count, 1);
12818 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12819 	seq = stcb->asoc.str_reset_seq_out;
12820 	if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12821 		seq++;
12822 		asoc->stream_reset_outstanding++;
12823 	} else {
12824 		m_freem(chk->data);
12825 		chk->data = NULL;
12826 		sctp_free_a_chunk(stcb, chk, so_locked);
12827 		return (ENOENT);
12828 	}
12829 	asoc->str_reset = chk;
12830 	/* insert the chunk for sending */
12831 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12832 			  chk,
12833 			  sctp_next);
12834 	asoc->ctrl_queue_cnt++;
12835 
12836 	if (stcb->asoc.send_sack) {
12837 		sctp_send_sack(stcb, so_locked);
12838 	}
12839 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12840 	return (0);
12841 }
12842 
12843 int
12844 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12845                         uint16_t number_entries, uint16_t *list,
12846                         uint8_t send_in_req,
12847                         uint8_t send_tsn_req,
12848                         uint8_t add_stream,
12849                         uint16_t adding_o,
12850                         uint16_t adding_i, uint8_t peer_asked)
12851 {
12852 	struct sctp_association *asoc;
12853 	struct sctp_tmit_chunk *chk;
12854 	struct sctp_chunkhdr *ch;
12855 	int can_send_out_req=0;
12856 	uint32_t seq;
12857 
12858 	asoc = &stcb->asoc;
12859 	if (asoc->stream_reset_outstanding) {
12860 		/*-
12861 		 * Already one pending, must get ACK back to clear the flag.
12862 		 */
12863 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12864 		return (EBUSY);
12865 	}
12866 	if ((send_in_req == 0) && (send_tsn_req == 0) &&
12867 	    (add_stream == 0)) {
12868 		/* nothing to do */
12869 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12870 		return (EINVAL);
12871 	}
12872 	if (send_tsn_req && send_in_req) {
12873 		/* error, can't do that */
12874 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12875 		return (EINVAL);
12876 	} else if (send_in_req) {
12877 		can_send_out_req = 1;
12878 	}
12879 	if (number_entries > (MCLBYTES -
12880 	                      SCTP_MIN_OVERHEAD -
12881 	                      sizeof(struct sctp_chunkhdr) -
12882 	                      sizeof(struct sctp_stream_reset_out_request)) /
12883 	                     sizeof(uint16_t)) {
12884 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12885 		return (ENOMEM);
12886 	}
12887 	sctp_alloc_a_chunk(stcb, chk);
12888 	if (chk == NULL) {
12889 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12890 		return (ENOMEM);
12891 	}
12892 	chk->copy_by_ref = 0;
12893 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12894 	chk->rec.chunk_id.can_take_data = 0;
12895 	chk->flags = 0;
12896 	chk->asoc = &stcb->asoc;
12897 	chk->book_size = sizeof(struct sctp_chunkhdr);
12898 	chk->send_size = SCTP_SIZE32(chk->book_size);
12899 	chk->book_size_scale = 0;
12900 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12901 	if (chk->data == NULL) {
12902 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12903 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12904 		return (ENOMEM);
12905 	}
12906 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12907 
12908 	/* setup chunk parameters */
12909 	chk->sent = SCTP_DATAGRAM_UNSENT;
12910 	chk->snd_count = 0;
12911 	if (stcb->asoc.alternate) {
12912 		chk->whoTo = stcb->asoc.alternate;
12913 	} else {
12914 		chk->whoTo = stcb->asoc.primary_destination;
12915 	}
12916 	atomic_add_int(&chk->whoTo->ref_count, 1);
12917 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12918 	ch->chunk_type = SCTP_STREAM_RESET;
12919 	ch->chunk_flags = 0;
12920 	ch->chunk_length = htons(chk->book_size);
12921 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12922 
12923 	seq = stcb->asoc.str_reset_seq_out;
12924 	if (can_send_out_req) {
12925 		int ret;
12926 	        ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12927 		if (ret) {
12928 			seq++;
12929 			asoc->stream_reset_outstanding++;
12930 		}
12931 	}
12932 	if ((add_stream & 1) &&
12933 	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12934 		/* Need to allocate more */
12935 		struct sctp_stream_out *oldstream;
12936 		struct sctp_stream_queue_pending *sp, *nsp;
12937 		int i;
12938 #if defined(SCTP_DETAILED_STR_STATS)
12939 		int j;
12940 #endif
12941 
12942 		oldstream = stcb->asoc.strmout;
12943 		/* get some more */
12944 		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12945 			    (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12946 			    SCTP_M_STRMO);
12947 		if (stcb->asoc.strmout == NULL) {
12948 			uint8_t x;
12949 			stcb->asoc.strmout = oldstream;
12950 			/* Turn off the bit */
12951 			x = add_stream & 0xfe;
12952 			add_stream = x;
12953 			goto skip_stuff;
12954 		}
12955 		/* Ok now we proceed with copying the old out stuff and
12956 		 * initializing the new stuff.
12957 		 */
12958 		SCTP_TCB_SEND_LOCK(stcb);
12959 		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12960 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12961 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12962 			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12963 			stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12964 			stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12965 			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12966 			stcb->asoc.strmout[i].sid = i;
12967 			stcb->asoc.strmout[i].state = oldstream[i].state;
12968 			/* FIX ME FIX ME */
12969 			/* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
12970 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12971 			/* now anything on those queues? */
12972 			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12973 				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12974 				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12975 			}
12976 
12977 		}
12978 		/* now the new streams */
12979 		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12980 		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12981 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12982 			stcb->asoc.strmout[i].chunks_on_queues = 0;
12983 #if defined(SCTP_DETAILED_STR_STATS)
12984 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12985 				stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12986 				stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12987 			}
12988 #else
12989 			stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12990 			stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12991 #endif
12992 			stcb->asoc.strmout[i].next_mid_ordered = 0;
12993 			stcb->asoc.strmout[i].next_mid_unordered = 0;
12994 			stcb->asoc.strmout[i].sid = i;
12995 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
12996 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12997 			stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12998 		}
12999 		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
13000 		SCTP_FREE(oldstream, SCTP_M_STRMO);
13001 		SCTP_TCB_SEND_UNLOCK(stcb);
13002 	}
13003 skip_stuff:
13004 	if ((add_stream & 1) && (adding_o > 0)) {
13005 		asoc->strm_pending_add_size = adding_o;
13006 		asoc->peer_req_out = peer_asked;
13007 		sctp_add_an_out_stream(chk, seq, adding_o);
13008 		seq++;
13009 		asoc->stream_reset_outstanding++;
13010 	}
13011 	if ((add_stream & 2) && (adding_i > 0)) {
13012 		sctp_add_an_in_stream(chk, seq, adding_i);
13013 		seq++;
13014 		asoc->stream_reset_outstanding++;
13015 	}
13016 	if (send_in_req) {
13017 		sctp_add_stream_reset_in(chk, number_entries, list, seq);
13018 		seq++;
13019 		asoc->stream_reset_outstanding++;
13020 	}
13021 	if (send_tsn_req) {
13022 		sctp_add_stream_reset_tsn(chk, seq);
13023 		asoc->stream_reset_outstanding++;
13024 	}
13025 	asoc->str_reset = chk;
13026 	/* insert the chunk for sending */
13027 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
13028 			  chk,
13029 			  sctp_next);
13030 	asoc->ctrl_queue_cnt++;
13031 	if (stcb->asoc.send_sack) {
13032 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
13033 	}
13034 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
13035 	return (0);
13036 }
13037 
13038 void
13039 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
13040                 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13041 #if defined(__FreeBSD__) && !defined(__Userspace__)
13042                 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13043 #endif
13044                 uint32_t vrf_id, uint16_t port)
13045 {
13046 	/* Don't respond to an ABORT with an ABORT. */
13047 	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
13048 		if (cause)
13049 			sctp_m_freem(cause);
13050 		return;
13051 	}
13052 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
13053 #if defined(__FreeBSD__) && !defined(__Userspace__)
13054 	                   mflowtype, mflowid, fibnum,
13055 #endif
13056 	                   vrf_id, port);
13057 	return;
13058 }
13059 
13060 void
13061 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
13062                    struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13063 #if defined(__FreeBSD__) && !defined(__Userspace__)
13064                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13065 #endif
13066                    uint32_t vrf_id, uint16_t port)
13067 {
13068 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
13069 #if defined(__FreeBSD__) && !defined(__Userspace__)
13070 	                   mflowtype, mflowid, fibnum,
13071 #endif
13072 	                   vrf_id, port);
13073 	return;
13074 }
13075 
13076 static struct mbuf *
13077 sctp_copy_resume(struct uio *uio,
13078 		 int max_send_len,
13079 #if defined(__FreeBSD__) || defined(__Userspace__)
13080 		 int user_marks_eor,
13081 #endif
13082 		 int *error,
13083 		 uint32_t *sndout,
13084 		 struct mbuf **new_tail)
13085 {
13086 #if defined(__FreeBSD__) || defined(__Userspace__)
13087 	struct mbuf *m;
13088 
13089 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13090 		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
13091 	if (m == NULL) {
13092 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13093 		*error = ENOBUFS;
13094 	} else {
13095 		*sndout = m_length(m, NULL);
13096 		*new_tail = m_last(m);
13097 	}
13098 	return (m);
13099 #else
13100 	int left, cancpy, willcpy;
13101 	struct mbuf *m, *head;
13102 
13103 #if defined(__APPLE__) && !defined(__Userspace__)
13104 #if defined(APPLE_LEOPARD)
13105 	left = (int)min(uio->uio_resid, max_send_len);
13106 #else
13107 	left = (int)min(uio_resid(uio), max_send_len);
13108 #endif
13109 #else
13110 	left = (int)min(uio->uio_resid, max_send_len);
13111 #endif
13112 	/* Always get a header just in case */
13113 	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13114 	if (head == NULL) {
13115 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13116 		*error = ENOBUFS;
13117 		return (NULL);
13118 	}
13119 	cancpy = (int)M_TRAILINGSPACE(head);
13120 	willcpy = min(cancpy, left);
13121 	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
13122 	if (*error) {
13123 		sctp_m_freem(head);
13124 		return (NULL);
13125 	}
13126 	*sndout += willcpy;
13127 	left -= willcpy;
13128 	SCTP_BUF_LEN(head) = willcpy;
13129 	m = head;
13130 	*new_tail = head;
13131 	while (left > 0) {
13132 		/* move in user data */
13133 		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13134 		if (SCTP_BUF_NEXT(m) == NULL) {
13135 			sctp_m_freem(head);
13136 			*new_tail = NULL;
13137 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13138 			*error = ENOBUFS;
13139 			return (NULL);
13140 		}
13141 		m = SCTP_BUF_NEXT(m);
13142 		cancpy = (int)M_TRAILINGSPACE(m);
13143 		willcpy = min(cancpy, left);
13144 		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
13145 		if (*error) {
13146 			sctp_m_freem(head);
13147 			*new_tail = NULL;
13148 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13149 			*error = EFAULT;
13150 			return (NULL);
13151 		}
13152 		SCTP_BUF_LEN(m) = willcpy;
13153 		left -= willcpy;
13154 		*sndout += willcpy;
13155 		*new_tail = m;
13156 		if (left == 0) {
13157 			SCTP_BUF_NEXT(m) = NULL;
13158 		}
13159 	}
13160 	return (head);
13161 #endif
13162 }
13163 
13164 static int
13165 sctp_copy_one(struct sctp_stream_queue_pending *sp,
13166               struct uio *uio,
13167               int resv_upfront)
13168 {
13169 #if defined(__FreeBSD__) || defined(__Userspace__)
13170 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13171 	                       resv_upfront, 0);
13172 	if (sp->data == NULL) {
13173 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13174 		return (ENOBUFS);
13175 	}
13176 
13177 	sp->tail_mbuf = m_last(sp->data);
13178 	return (0);
13179 #else
13180 	int left;
13181 	int cancpy, willcpy, error;
13182 	struct mbuf *m, *head;
13183 	int cpsz = 0;
13184 
13185 	/* First one gets a header */
13186 	left = sp->length;
13187 	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
13188 	if (m == NULL) {
13189 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13190 		return (ENOBUFS);
13191 	}
13192 	/*-
13193 	 * Add this one for m in now, that way if the alloc fails we won't
13194 	 * have a bad cnt.
13195 	 */
13196 	SCTP_BUF_RESV_UF(m, resv_upfront);
13197 	cancpy = (int)M_TRAILINGSPACE(m);
13198 	willcpy = min(cancpy, left);
13199 	while (left > 0) {
13200 		/* move in user data */
13201 		error = uiomove(mtod(m, caddr_t), willcpy, uio);
13202 		if (error) {
13203 			sctp_m_freem(head);
13204 			return (error);
13205 		}
13206 		SCTP_BUF_LEN(m) = willcpy;
13207 		left -= willcpy;
13208 		cpsz += willcpy;
13209 		if (left > 0) {
13210 			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13211 			if (SCTP_BUF_NEXT(m) == NULL) {
13212 				/*
13213 				 * the head goes back to caller, he can free
13214 				 * the rest
13215 				 */
13216 				sctp_m_freem(head);
13217 				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13218 				return (ENOBUFS);
13219 			}
13220 			m = SCTP_BUF_NEXT(m);
13221 			cancpy = (int)M_TRAILINGSPACE(m);
13222 			willcpy = min(cancpy, left);
13223 		} else {
13224 			sp->tail_mbuf = m;
13225 			SCTP_BUF_NEXT(m) = NULL;
13226 		}
13227 	}
13228 	sp->data = head;
13229 	sp->length = cpsz;
13230 	return (0);
13231 #endif
13232 }
13233 
13234 
13235 
13236 static struct sctp_stream_queue_pending *
13237 sctp_copy_it_in(struct sctp_tcb *stcb,
13238     struct sctp_association *asoc,
13239     struct sctp_sndrcvinfo *srcv,
13240     struct uio *uio,
13241     struct sctp_nets *net,
13242     ssize_t max_send_len,
13243     int user_marks_eor,
13244     int *error)
13245 
13246 {
13247 	/*-
13248 	 * This routine must be very careful in its work. Protocol
13249 	 * processing is up and running so care must be taken to spl...()
13250 	 * when you need to do something that may effect the stcb/asoc. The
13251 	 * sb is locked however. When data is copied the protocol processing
13252 	 * should be enabled since this is a slower operation...
13253 	 */
13254 	struct sctp_stream_queue_pending *sp = NULL;
13255 	int resv_in_first;
13256 
13257 	*error = 0;
13258 	/* Now can we send this? */
13259 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13260 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13261 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13262 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13263 		/* got data while shutting down */
13264 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13265 		*error = ECONNRESET;
13266 		goto out_now;
13267 	}
13268 	sctp_alloc_a_strmoq(stcb, sp);
13269 	if (sp == NULL) {
13270 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13271 		*error = ENOMEM;
13272 		goto out_now;
13273 	}
13274 	sp->act_flags = 0;
13275 	sp->sender_all_done = 0;
13276 	sp->sinfo_flags = srcv->sinfo_flags;
13277 	sp->timetolive = srcv->sinfo_timetolive;
13278 	sp->ppid = srcv->sinfo_ppid;
13279 	sp->context = srcv->sinfo_context;
13280 	sp->fsn = 0;
13281 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
13282 
13283 	sp->sid = srcv->sinfo_stream;
13284 #if defined(__APPLE__) && !defined(__Userspace__)
13285 #if defined(APPLE_LEOPARD)
13286 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13287 #else
13288 	sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
13289 #endif
13290 #else
13291 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13292 #endif
13293 #if defined(__APPLE__) && !defined(__Userspace__)
13294 #if defined(APPLE_LEOPARD)
13295 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13296 #else
13297 	if ((sp->length == (uint32_t)uio_resid(uio)) &&
13298 #endif
13299 #else
13300 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13301 #endif
13302 	    ((user_marks_eor == 0) ||
13303 	     (srcv->sinfo_flags & SCTP_EOF) ||
13304 	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13305 		sp->msg_is_complete = 1;
13306 	} else {
13307 		sp->msg_is_complete = 0;
13308 	}
13309 	sp->sender_all_done = 0;
13310 	sp->some_taken = 0;
13311 	sp->put_last_out = 0;
13312 	resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
13313 	sp->data = sp->tail_mbuf = NULL;
13314 	if (sp->length == 0) {
13315 		goto skip_copy;
13316 	}
13317 	if (srcv->sinfo_keynumber_valid) {
13318 		sp->auth_keyid = srcv->sinfo_keynumber;
13319 	} else {
13320 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
13321 	}
13322 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
13323 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
13324 		sp->holds_key_ref = 1;
13325 	}
13326 #if defined(__APPLE__) && !defined(__Userspace__)
13327 	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13328 #endif
13329 	*error = sctp_copy_one(sp, uio, resv_in_first);
13330 #if defined(__APPLE__) && !defined(__Userspace__)
13331 	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13332 #endif
13333  skip_copy:
13334 	if (*error) {
13335 #if defined(__Userspace__)
13336 		SCTP_TCB_LOCK(stcb);
13337 #endif
13338 		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
13339 #if defined(__Userspace__)
13340 		SCTP_TCB_UNLOCK(stcb);
13341 #endif
13342 		sp = NULL;
13343 	} else {
13344 		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
13345 			sp->net = net;
13346 			atomic_add_int(&sp->net->ref_count, 1);
13347 		} else {
13348 			sp->net = NULL;
13349 		}
13350 		sctp_set_prsctp_policy(sp);
13351 	}
13352 out_now:
13353 	return (sp);
13354 }
13355 
13356 
13357 int
13358 sctp_sosend(struct socket *so,
13359             struct sockaddr *addr,
13360             struct uio *uio,
13361             struct mbuf *top,
13362             struct mbuf *control,
13363 #if defined(__APPLE__) && !defined(__Userspace__)
13364             int flags
13365 #else
13366             int flags,
13367 #if defined(__FreeBSD__) && !defined(__Userspace__)
13368             struct thread *p
13369 #elif defined(_WIN32) && !defined(__Userspace__)
13370             PKTHREAD p
13371 #else
13372 #if defined(__Userspace__)
13373             /*
13374 	     * proc is a dummy in __Userspace__ and will not be passed
13375 	     * to sctp_lower_sosend
13376 	     */
13377 #endif
13378             struct proc *p
13379 #endif
13380 #endif
13381 )
13382 {
13383 #if defined(__APPLE__) && !defined(__Userspace__)
13384 	struct proc *p = current_proc();
13385 #endif
13386 	int error, use_sndinfo = 0;
13387 	struct sctp_sndrcvinfo sndrcvninfo;
13388 	struct sockaddr *addr_to_use;
13389 #if defined(INET) && defined(INET6)
13390 	struct sockaddr_in sin;
13391 #endif
13392 
13393 #if defined(__APPLE__) && !defined(__Userspace__)
13394 	SCTP_SOCKET_LOCK(so, 1);
13395 #endif
13396 	if (control) {
13397 		/* process cmsg snd/rcv info (maybe a assoc-id) */
13398 		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13399 		    sizeof(sndrcvninfo))) {
13400 			/* got one */
13401 			use_sndinfo = 1;
13402 		}
13403 	}
13404 	addr_to_use = addr;
13405 #if defined(INET) && defined(INET6)
13406 	if ((addr) && (addr->sa_family == AF_INET6)) {
13407 		struct sockaddr_in6 *sin6;
13408 
13409 		sin6 = (struct sockaddr_in6 *)addr;
13410 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13411 			in6_sin6_2_sin(&sin, sin6);
13412 			addr_to_use = (struct sockaddr *)&sin;
13413 		}
13414 	}
13415 #endif
13416 	error = sctp_lower_sosend(so, addr_to_use, uio, top,
13417 				  control,
13418 				  flags,
13419 				  use_sndinfo ? &sndrcvninfo: NULL
13420 #if !defined(__Userspace__)
13421 				  , p
13422 #endif
13423 		);
13424 #if defined(__APPLE__) && !defined(__Userspace__)
13425 	SCTP_SOCKET_UNLOCK(so, 1);
13426 #endif
13427 	return (error);
13428 }
13429 
13430 
13431 int
13432 sctp_lower_sosend(struct socket *so,
13433                   struct sockaddr *addr,
13434                   struct uio *uio,
13435                   struct mbuf *i_pak,
13436                   struct mbuf *control,
13437                   int flags,
13438                   struct sctp_sndrcvinfo *srcv
13439 #if !defined(__Userspace__)
13440                   ,
13441 #if defined(__FreeBSD__)
13442                   struct thread *p
13443 #elif defined(_WIN32)
13444                   PKTHREAD p
13445 #else
13446                   struct proc *p
13447 #endif
13448 #endif
13449 	)
13450 {
13451 #if defined(__FreeBSD__) && !defined(__Userspace__)
13452 	struct epoch_tracker et;
13453 #endif
13454 	ssize_t sndlen = 0, max_len, local_add_more;
13455 	int error, len;
13456 	struct mbuf *top = NULL;
13457 	int queue_only = 0, queue_only_for_init = 0;
13458 	int free_cnt_applied = 0;
13459 	int un_sent;
13460 	int now_filled = 0;
13461 	unsigned int inqueue_bytes = 0;
13462 	struct sctp_block_entry be;
13463 	struct sctp_inpcb *inp;
13464 	struct sctp_tcb *stcb = NULL;
13465 	struct timeval now;
13466 	struct sctp_nets *net;
13467 	struct sctp_association *asoc;
13468 	struct sctp_inpcb *t_inp;
13469 	int user_marks_eor;
13470 	int create_lock_applied = 0;
13471 	int nagle_applies = 0;
13472 	int some_on_control = 0;
13473 	int got_all_of_the_send = 0;
13474 	int hold_tcblock = 0;
13475 	int non_blocking = 0;
13476 	ssize_t local_soresv = 0;
13477 	uint16_t port;
13478 	uint16_t sinfo_flags;
13479 	sctp_assoc_t sinfo_assoc_id;
13480 
13481 	error = 0;
13482 	net = NULL;
13483 	stcb = NULL;
13484 	asoc = NULL;
13485 
13486 #if defined(__APPLE__) && !defined(__Userspace__)
13487 	sctp_lock_assert(so);
13488 #endif
13489 	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13490 	if (inp == NULL) {
13491 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13492 		error = EINVAL;
13493 		if (i_pak) {
13494 			SCTP_RELEASE_PKT(i_pak);
13495 		}
13496 		return (error);
13497 	}
13498 	if ((uio == NULL) && (i_pak == NULL)) {
13499 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13500 		return (EINVAL);
13501 	}
13502 	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13503 	atomic_add_int(&inp->total_sends, 1);
13504 	if (uio) {
13505 #if defined(__APPLE__) && !defined(__Userspace__)
13506 #if defined(APPLE_LEOPARD)
13507 		if (uio->uio_resid < 0) {
13508 #else
13509 		if (uio_resid(uio) < 0) {
13510 #endif
13511 #else
13512 		if (uio->uio_resid < 0) {
13513 #endif
13514 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13515 			return (EINVAL);
13516 		}
13517 #if defined(__APPLE__) && !defined(__Userspace__)
13518 #if defined(APPLE_LEOPARD)
13519 		sndlen = uio->uio_resid;
13520 #else
13521 		sndlen = uio_resid(uio);
13522 #endif
13523 #else
13524 		sndlen = uio->uio_resid;
13525 #endif
13526 	} else {
13527 		top = SCTP_HEADER_TO_CHAIN(i_pak);
13528 		sndlen = SCTP_HEADER_LEN(i_pak);
13529 	}
13530 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
13531 	        (void *)addr,
13532 	        sndlen);
13533 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13534 	    SCTP_IS_LISTENING(inp)) {
13535 		/* The listener can NOT send */
13536 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13537 		error = ENOTCONN;
13538 		goto out_unlocked;
13539 	}
13540 	/**
13541 	 * Pre-screen address, if one is given the sin-len
13542 	 * must be set correctly!
13543 	 */
13544 	if (addr) {
13545 		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13546 		switch (raddr->sa.sa_family) {
13547 #ifdef INET
13548 		case AF_INET:
13549 #ifdef HAVE_SIN_LEN
13550 			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13551 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13552 				error = EINVAL;
13553 				goto out_unlocked;
13554 			}
13555 #endif
13556 			port = raddr->sin.sin_port;
13557 			break;
13558 #endif
13559 #ifdef INET6
13560 		case AF_INET6:
13561 #ifdef HAVE_SIN6_LEN
13562 			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13563 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13564 				error = EINVAL;
13565 				goto out_unlocked;
13566 			}
13567 #endif
13568 			port = raddr->sin6.sin6_port;
13569 			break;
13570 #endif
13571 #if defined(__Userspace__)
13572 		case AF_CONN:
13573 #ifdef HAVE_SCONN_LEN
13574 			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13575 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13576 				error = EINVAL;
13577 				goto out_unlocked;
13578 			}
13579 #endif
13580 			port = raddr->sconn.sconn_port;
13581 			break;
13582 #endif
13583 		default:
13584 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13585 			error = EAFNOSUPPORT;
13586 			goto out_unlocked;
13587 		}
13588 	} else
13589 		port = 0;
13590 
13591 	if (srcv) {
13592 		sinfo_flags = srcv->sinfo_flags;
13593 		sinfo_assoc_id = srcv->sinfo_assoc_id;
13594 		if (INVALID_SINFO_FLAG(sinfo_flags) ||
13595 		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13596 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13597 			error = EINVAL;
13598 			goto out_unlocked;
13599 		}
13600 		if (srcv->sinfo_flags)
13601 			SCTP_STAT_INCR(sctps_sends_with_flags);
13602 	} else {
13603 		sinfo_flags = inp->def_send.sinfo_flags;
13604 		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13605 	}
13606 #if defined(__FreeBSD__) && !defined(__Userspace__)
13607 	if (flags & MSG_EOR) {
13608 		sinfo_flags |= SCTP_EOR;
13609 	}
13610 	if (flags & MSG_EOF) {
13611 		sinfo_flags |= SCTP_EOF;
13612 	}
13613 #endif
13614 	if (sinfo_flags & SCTP_SENDALL) {
13615 		/* its a sendall */
13616 		error = sctp_sendall(inp, uio, top, srcv);
13617 		top = NULL;
13618 		goto out_unlocked;
13619 	}
13620 	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13621 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13622 		error = EINVAL;
13623 		goto out_unlocked;
13624 	}
13625 	/* now we must find the assoc */
13626 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13627 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13628 		SCTP_INP_RLOCK(inp);
13629 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
13630 		if (stcb) {
13631 			SCTP_TCB_LOCK(stcb);
13632 			hold_tcblock = 1;
13633 		}
13634 		SCTP_INP_RUNLOCK(inp);
13635 	} else if (sinfo_assoc_id) {
13636 		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
13637 		if (stcb != NULL) {
13638 			hold_tcblock = 1;
13639 		}
13640 	} else if (addr) {
13641 		/*-
13642 		 * Since we did not use findep we must
13643 		 * increment it, and if we don't find a tcb
13644 		 * decrement it.
13645 		 */
13646 		SCTP_INP_WLOCK(inp);
13647 		SCTP_INP_INCR_REF(inp);
13648 		SCTP_INP_WUNLOCK(inp);
13649 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13650 		if (stcb == NULL) {
13651 			SCTP_INP_WLOCK(inp);
13652 			SCTP_INP_DECR_REF(inp);
13653 			SCTP_INP_WUNLOCK(inp);
13654 		} else {
13655 			hold_tcblock = 1;
13656 		}
13657 	}
13658 	if ((stcb == NULL) && (addr)) {
13659 		/* Possible implicit send? */
13660 		SCTP_ASOC_CREATE_LOCK(inp);
13661 		create_lock_applied = 1;
13662 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13663 		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13664 			/* Should I really unlock ? */
13665 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13666 			error = EINVAL;
13667 			goto out_unlocked;
13668 
13669 		}
13670 		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13671 		    (addr->sa_family == AF_INET6)) {
13672 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13673 			error = EINVAL;
13674 			goto out_unlocked;
13675 		}
13676 		SCTP_INP_WLOCK(inp);
13677 		SCTP_INP_INCR_REF(inp);
13678 		SCTP_INP_WUNLOCK(inp);
13679 		/* With the lock applied look again */
13680 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13681 #if defined(INET) || defined(INET6)
13682 		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13683 			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13684 		}
13685 #endif
13686 		if (stcb == NULL) {
13687 			SCTP_INP_WLOCK(inp);
13688 			SCTP_INP_DECR_REF(inp);
13689 			SCTP_INP_WUNLOCK(inp);
13690 		} else {
13691 			hold_tcblock = 1;
13692 		}
13693 		if (error) {
13694 			goto out_unlocked;
13695 		}
13696 		if (t_inp != inp) {
13697 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13698 			error = ENOTCONN;
13699 			goto out_unlocked;
13700 		}
13701 	}
13702 	if (stcb == NULL) {
13703 		if (addr == NULL) {
13704 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13705 			error = ENOENT;
13706 			goto out_unlocked;
13707 		} else {
13708 			/* We must go ahead and start the INIT process */
13709 			uint32_t vrf_id;
13710 
13711 			if ((sinfo_flags & SCTP_ABORT) ||
13712 			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13713 				/*-
13714 				 * User asks to abort a non-existant assoc,
13715 				 * or EOF a non-existant assoc with no data
13716 				 */
13717 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13718 				error = ENOENT;
13719 				goto out_unlocked;
13720 			}
13721 			/* get an asoc/stcb struct */
13722 			vrf_id = inp->def_vrf_id;
13723 #ifdef INVARIANTS
13724 			if (create_lock_applied == 0) {
13725 				panic("Error, should hold create lock and I don't?");
13726 			}
13727 #endif
13728 			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13729 			                       inp->sctp_ep.pre_open_stream_count,
13730 			                       inp->sctp_ep.port,
13731 #if !defined(__Userspace__)
13732 			                       p,
13733 #else
13734 			                       (struct proc *)NULL,
13735 #endif
13736 			                       SCTP_INITIALIZE_AUTH_PARAMS);
13737 			if (stcb == NULL) {
13738 				/* Error is setup for us in the call */
13739 				goto out_unlocked;
13740 			}
13741 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13742 				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13743 				/* Set the connected flag so we can queue data */
13744 				soisconnecting(so);
13745 			}
13746 			hold_tcblock = 1;
13747 			if (create_lock_applied) {
13748 				SCTP_ASOC_CREATE_UNLOCK(inp);
13749 				create_lock_applied = 0;
13750 			} else {
13751 				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13752 			}
13753 			/* Turn on queue only flag to prevent data from being sent */
13754 			queue_only = 1;
13755 			asoc = &stcb->asoc;
13756 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13757 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13758 
13759 			if (control) {
13760 				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13761 					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
13762 					                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
13763 					hold_tcblock = 0;
13764 					stcb = NULL;
13765 					goto out_unlocked;
13766 				}
13767 			}
13768 			/* out with the INIT */
13769 			queue_only_for_init = 1;
13770 			/*-
13771 			 * we may want to dig in after this call and adjust the MTU
13772 			 * value. It defaulted to 1500 (constant) but the ro
13773 			 * structure may now have an update and thus we may need to
13774 			 * change it BEFORE we append the message.
13775 			 */
13776 		}
13777 	} else
13778 		asoc = &stcb->asoc;
13779 	if (srcv == NULL) {
13780 		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13781 		sinfo_flags = srcv->sinfo_flags;
13782 #if defined(__FreeBSD__) && !defined(__Userspace__)
13783 		if (flags & MSG_EOR) {
13784 			sinfo_flags |= SCTP_EOR;
13785 		}
13786 		if (flags & MSG_EOF) {
13787 			sinfo_flags |= SCTP_EOF;
13788 		}
13789 #endif
13790 	}
13791 	if (sinfo_flags & SCTP_ADDR_OVER) {
13792 		if (addr)
13793 			net = sctp_findnet(stcb, addr);
13794 		else
13795 			net = NULL;
13796 		if ((net == NULL) ||
13797 		    ((port != 0) && (port != stcb->rport))) {
13798 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13799 			error = EINVAL;
13800 			goto out_unlocked;
13801 		}
13802 	} else {
13803 		if (stcb->asoc.alternate) {
13804 			net = stcb->asoc.alternate;
13805 		} else {
13806 			net = stcb->asoc.primary_destination;
13807 		}
13808 	}
13809 	atomic_add_int(&stcb->total_sends, 1);
13810 	/* Keep the stcb from being freed under our feet */
13811 	atomic_add_int(&asoc->refcnt, 1);
13812 	free_cnt_applied = 1;
13813 
13814 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13815 		if (sndlen > (ssize_t)asoc->smallest_mtu) {
13816 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13817 			error = EMSGSIZE;
13818 			goto out_unlocked;
13819 		}
13820 	}
13821 #if defined(__Userspace__)
13822 	if (inp->recv_callback) {
13823 		non_blocking = 1;
13824 	}
13825 #endif
13826 	if (SCTP_SO_IS_NBIO(so)
13827 #if defined(__FreeBSD__) && !defined(__Userspace__)
13828 	     || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
13829 #endif
13830 	    ) {
13831 		non_blocking = 1;
13832 	}
13833 	/* would we block? */
13834 	if (non_blocking) {
13835 		ssize_t amount;
13836 
13837 		if (hold_tcblock == 0) {
13838 			SCTP_TCB_LOCK(stcb);
13839 			hold_tcblock = 1;
13840 		}
13841 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13842 		if (user_marks_eor == 0) {
13843 			amount = sndlen;
13844 		} else {
13845 			amount = 1;
13846 		}
13847 		if ((SCTP_SB_LIMIT_SND(so) <  (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13848 		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13849 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13850 			if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
13851 				error = EMSGSIZE;
13852 			else
13853 				error = EWOULDBLOCK;
13854 			goto out_unlocked;
13855 		}
13856 		stcb->asoc.sb_send_resv += (uint32_t)sndlen;
13857 		SCTP_TCB_UNLOCK(stcb);
13858 		hold_tcblock = 0;
13859 	} else {
13860 		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13861 	}
13862 	local_soresv = sndlen;
13863 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13864 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13865 		error = ECONNRESET;
13866 		goto out_unlocked;
13867 	}
13868 	if (create_lock_applied) {
13869 		SCTP_ASOC_CREATE_UNLOCK(inp);
13870 		create_lock_applied = 0;
13871 	}
13872 	/* Is the stream no. valid? */
13873 	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13874 		/* Invalid stream number */
13875 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13876 		error = EINVAL;
13877 		goto out_unlocked;
13878 	}
13879 	if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
13880 	    (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
13881 		/*
13882 		 * Can't queue any data while stream reset is underway.
13883 		 */
13884 		if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
13885 			error = EAGAIN;
13886 		} else {
13887 			error = EINVAL;
13888 		}
13889 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
13890 		goto out_unlocked;
13891 	}
13892 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13893 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13894 		queue_only = 1;
13895 	}
13896 	/* we are now done with all control */
13897 	if (control) {
13898 		sctp_m_freem(control);
13899 		control = NULL;
13900 	}
13901 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13902 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13903 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13904 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13905 		if (sinfo_flags & SCTP_ABORT) {
13906 			;
13907 		} else {
13908 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13909 			error = ECONNRESET;
13910 			goto out_unlocked;
13911 		}
13912 	}
13913 	/* Ok, we will attempt a msgsnd :> */
13914 #if !(defined(_WIN32) || defined(__Userspace__))
13915 	if (p) {
13916 #if defined(__FreeBSD__)
13917 		p->td_ru.ru_msgsnd++;
13918 #else
13919 		p->p_stats->p_ru.ru_msgsnd++;
13920 #endif
13921 	}
13922 #endif
13923 	/* Are we aborting? */
13924 	if (sinfo_flags & SCTP_ABORT) {
13925 		struct mbuf *mm;
13926 		ssize_t tot_demand, tot_out = 0, max_out;
13927 
13928 		SCTP_STAT_INCR(sctps_sends_with_abort);
13929 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13930 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13931 			/* It has to be up before we abort */
13932 			/* how big is the user initiated abort? */
13933 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13934 			error = EINVAL;
13935 			goto out;
13936 		}
13937 		if (hold_tcblock) {
13938 			SCTP_TCB_UNLOCK(stcb);
13939 			hold_tcblock = 0;
13940 		}
13941 		if (top) {
13942 			struct mbuf *cntm = NULL;
13943 
13944 			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13945 			if (sndlen != 0) {
13946 				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13947 					tot_out += SCTP_BUF_LEN(cntm);
13948 				}
13949 			}
13950 		} else {
13951 			/* Must fit in a MTU */
13952 			tot_out = sndlen;
13953 			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13954 			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13955 				/* To big */
13956 				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13957 				error = EMSGSIZE;
13958 				goto out;
13959 			}
13960 			mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
13961 		}
13962 		if (mm == NULL) {
13963 			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13964 			error = ENOMEM;
13965 			goto out;
13966 		}
13967 		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13968 		max_out -= sizeof(struct sctp_abort_msg);
13969 		if (tot_out > max_out) {
13970 			tot_out = max_out;
13971 		}
13972 		if (mm) {
13973 			struct sctp_paramhdr *ph;
13974 
13975 			/* now move forward the data pointer */
13976 			ph = mtod(mm, struct sctp_paramhdr *);
13977 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13978 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
13979 			ph++;
13980 			SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
13981 			if (top == NULL) {
13982 #if defined(__APPLE__) && !defined(__Userspace__)
13983 				SCTP_SOCKET_UNLOCK(so, 0);
13984 #endif
13985 				error = uiomove((caddr_t)ph, (int)tot_out, uio);
13986 #if defined(__APPLE__) && !defined(__Userspace__)
13987 				SCTP_SOCKET_LOCK(so, 0);
13988 #endif
13989 				if (error) {
13990 					/*-
13991 					 * Here if we can't get his data we
13992 					 * still abort we just don't get to
13993 					 * send the users note :-0
13994 					 */
13995 					sctp_m_freem(mm);
13996 					mm = NULL;
13997 				}
13998 			} else {
13999 				if (sndlen != 0) {
14000 					SCTP_BUF_NEXT(mm) = top;
14001 				}
14002 			}
14003 		}
14004 		if (hold_tcblock == 0) {
14005 			SCTP_TCB_LOCK(stcb);
14006 		}
14007 		atomic_add_int(&stcb->asoc.refcnt, -1);
14008 		free_cnt_applied = 0;
14009 		/* release this lock, otherwise we hang on ourselves */
14010 #if defined(__FreeBSD__) && !defined(__Userspace__)
14011 		NET_EPOCH_ENTER(et);
14012 #endif
14013 		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
14014 #if defined(__FreeBSD__) && !defined(__Userspace__)
14015 		NET_EPOCH_EXIT(et);
14016 #endif
14017 		/* now relock the stcb so everything is sane */
14018 		hold_tcblock = 0;
14019 		stcb = NULL;
14020 		/* In this case top is already chained to mm
14021 		 * avoid double free, since we free it below if
14022 		 * top != NULL and driver would free it after sending
14023 		 * the packet out
14024 		 */
14025 		if (sndlen != 0) {
14026 			top = NULL;
14027 		}
14028 		goto out_unlocked;
14029 	}
14030 	/* Calculate the maximum we can send */
14031 	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14032 	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14033 		max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14034 	} else {
14035 		max_len = 0;
14036 	}
14037 	if (hold_tcblock) {
14038 		SCTP_TCB_UNLOCK(stcb);
14039 		hold_tcblock = 0;
14040 	}
14041 	if (asoc->strmout == NULL) {
14042 		/* huh? software error */
14043 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
14044 		error = EFAULT;
14045 		goto out_unlocked;
14046 	}
14047 
14048 	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
14049 	if ((user_marks_eor == 0) &&
14050 	    (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
14051 		/* It will NEVER fit */
14052 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14053 		error = EMSGSIZE;
14054 		goto out_unlocked;
14055 	}
14056 	if ((uio == NULL) && user_marks_eor) {
14057 		/*-
14058 		 * We do not support eeor mode for
14059 		 * sending with mbuf chains (like sendfile).
14060 		 */
14061 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14062 		error = EINVAL;
14063 		goto out_unlocked;
14064 	}
14065 
14066 	if (user_marks_eor) {
14067 		local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
14068 	} else {
14069 		/*-
14070 		 * For non-eeor the whole message must fit in
14071 		 * the socket send buffer.
14072 		 */
14073 		local_add_more = sndlen;
14074 	}
14075 	len = 0;
14076 	if (non_blocking) {
14077 		goto skip_preblock;
14078 	}
14079 	if (((max_len <= local_add_more) &&
14080 	     ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
14081 	    (max_len == 0) ||
14082 	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14083 		/* No room right now ! */
14084 		SOCKBUF_LOCK(&so->so_snd);
14085 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14086 		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
14087 		       ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14088 			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
14089 			        (unsigned int)SCTP_SB_LIMIT_SND(so),
14090 			        inqueue_bytes,
14091 			        local_add_more,
14092 			        stcb->asoc.stream_queue_cnt,
14093 			        stcb->asoc.chunks_on_out_queue,
14094 			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
14095 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14096 				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
14097 			}
14098 			be.error = 0;
14099 #if !(defined(_WIN32) && !defined(__Userspace__))
14100 			stcb->block_entry = &be;
14101 #endif
14102 			error = sbwait(&so->so_snd);
14103 			stcb->block_entry = NULL;
14104 			if (error || so->so_error || be.error) {
14105 				if (error == 0) {
14106 					if (so->so_error)
14107 						error = so->so_error;
14108 					if (be.error) {
14109 						error = be.error;
14110 					}
14111 				}
14112 				SOCKBUF_UNLOCK(&so->so_snd);
14113 				goto out_unlocked;
14114 			}
14115 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14116 				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14117 				               asoc, stcb->asoc.total_output_queue_size);
14118 			}
14119 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14120 				SOCKBUF_UNLOCK(&so->so_snd);
14121 				goto out_unlocked;
14122 			}
14123 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14124 		}
14125 		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14126 			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
14127 		} else {
14128 			max_len = 0;
14129 		}
14130 		SOCKBUF_UNLOCK(&so->so_snd);
14131 	}
14132 
14133 skip_preblock:
14134 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14135 		goto out_unlocked;
14136 	}
14137 #if defined(__APPLE__) && !defined(__Userspace__)
14138 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14139 #endif
14140 	/* sndlen covers for mbuf case
14141 	 * uio_resid covers for the non-mbuf case
14142 	 * NOTE: uio will be null when top/mbuf is passed
14143 	 */
14144 	if (sndlen == 0) {
14145 		if (sinfo_flags & SCTP_EOF) {
14146 			got_all_of_the_send = 1;
14147 			goto dataless_eof;
14148 		} else {
14149 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14150 			error = EINVAL;
14151 			goto out;
14152 		}
14153 	}
14154 	if (top == NULL) {
14155 		struct sctp_stream_queue_pending *sp;
14156 		struct sctp_stream_out *strm;
14157 		uint32_t sndout;
14158 
14159 		SCTP_TCB_SEND_LOCK(stcb);
14160 		if ((asoc->stream_locked) &&
14161 		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
14162 			SCTP_TCB_SEND_UNLOCK(stcb);
14163 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14164 			error = EINVAL;
14165 			goto out;
14166 		}
14167 		SCTP_TCB_SEND_UNLOCK(stcb);
14168 
14169 		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
14170 		if (strm->last_msg_incomplete == 0) {
14171 		do_a_copy_in:
14172 			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
14173 			if (error) {
14174 				goto out;
14175 			}
14176 			SCTP_TCB_SEND_LOCK(stcb);
14177 			if (sp->msg_is_complete) {
14178 				strm->last_msg_incomplete = 0;
14179 				asoc->stream_locked = 0;
14180 			} else {
14181 				/* Just got locked to this guy in
14182 				 * case of an interrupt.
14183 				 */
14184 				strm->last_msg_incomplete = 1;
14185 				if (stcb->asoc.idata_supported == 0) {
14186 					asoc->stream_locked = 1;
14187 					asoc->stream_locked_on  = srcv->sinfo_stream;
14188 				}
14189 				sp->sender_all_done = 0;
14190 			}
14191 			sctp_snd_sb_alloc(stcb, sp->length);
14192 			atomic_add_int(&asoc->stream_queue_cnt, 1);
14193 			if (sinfo_flags & SCTP_UNORDERED) {
14194 				SCTP_STAT_INCR(sctps_sends_with_unord);
14195 			}
14196 			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
14197 			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
14198 			SCTP_TCB_SEND_UNLOCK(stcb);
14199 		} else {
14200 			SCTP_TCB_SEND_LOCK(stcb);
14201 			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
14202 			SCTP_TCB_SEND_UNLOCK(stcb);
14203 			if (sp == NULL) {
14204 				/* ???? Huh ??? last msg is gone */
14205 #ifdef INVARIANTS
14206 				panic("Warning: Last msg marked incomplete, yet nothing left?");
14207 #else
14208 				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
14209 				strm->last_msg_incomplete = 0;
14210 #endif
14211 				goto do_a_copy_in;
14212 
14213 			}
14214 		}
14215 #if defined(__APPLE__) && !defined(__Userspace__)
14216 #if defined(APPLE_LEOPARD)
14217 		while (uio->uio_resid > 0) {
14218 #else
14219 		while (uio_resid(uio) > 0) {
14220 #endif
14221 #else
14222 		while (uio->uio_resid > 0) {
14223 #endif
14224 			/* How much room do we have? */
14225 			struct mbuf *new_tail, *mm;
14226 
14227 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14228 			if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14229 				max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14230 			else
14231 				max_len = 0;
14232 
14233 			if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
14234 			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
14235 #if defined(__APPLE__) && !defined(__Userspace__)
14236 #if defined(APPLE_LEOPARD)
14237 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14238 #else
14239 			    (uio_resid(uio) && (uio_resid(uio) <= max_len))) {
14240 #endif
14241 #else
14242 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14243 #endif
14244 				sndout = 0;
14245 				new_tail = NULL;
14246 				if (hold_tcblock) {
14247 					SCTP_TCB_UNLOCK(stcb);
14248 					hold_tcblock = 0;
14249 				}
14250 #if defined(__APPLE__) && !defined(__Userspace__)
14251 				SCTP_SOCKET_UNLOCK(so, 0);
14252 #endif
14253 #if defined(__FreeBSD__) || defined(__Userspace__)
14254 				mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
14255 #else
14256 				mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail);
14257 #endif
14258 #if defined(__APPLE__) && !defined(__Userspace__)
14259 				SCTP_SOCKET_LOCK(so, 0);
14260 #endif
14261 				if ((mm == NULL) || error) {
14262 					if (mm) {
14263 						sctp_m_freem(mm);
14264 					}
14265 					goto out;
14266 				}
14267 				/* Update the mbuf and count */
14268 				SCTP_TCB_SEND_LOCK(stcb);
14269 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14270 					/* we need to get out.
14271 					 * Peer probably aborted.
14272 					 */
14273 					sctp_m_freem(mm);
14274 					if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
14275 						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14276 						error = ECONNRESET;
14277 					}
14278 					SCTP_TCB_SEND_UNLOCK(stcb);
14279 					goto out;
14280 				}
14281 				if (sp->tail_mbuf) {
14282 					/* tack it to the end */
14283 					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
14284 					sp->tail_mbuf = new_tail;
14285 				} else {
14286 					/* A stolen mbuf */
14287 					sp->data = mm;
14288 					sp->tail_mbuf = new_tail;
14289 				}
14290 				sctp_snd_sb_alloc(stcb, sndout);
14291 				atomic_add_int(&sp->length, sndout);
14292 				len += sndout;
14293 				if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
14294 					sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
14295 				}
14296 
14297 				/* Did we reach EOR? */
14298 #if defined(__APPLE__) && !defined(__Userspace__)
14299 #if defined(APPLE_LEOPARD)
14300 				if ((uio->uio_resid == 0) &&
14301 #else
14302 				if ((uio_resid(uio) == 0) &&
14303 #endif
14304 #else
14305 				if ((uio->uio_resid == 0) &&
14306 #endif
14307 				    ((user_marks_eor == 0) ||
14308 				     (sinfo_flags & SCTP_EOF) ||
14309 				     (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
14310 					sp->msg_is_complete = 1;
14311 				} else {
14312 					sp->msg_is_complete = 0;
14313 				}
14314 				SCTP_TCB_SEND_UNLOCK(stcb);
14315 			}
14316 #if defined(__APPLE__) && !defined(__Userspace__)
14317 #if defined(APPLE_LEOPARD)
14318 			if (uio->uio_resid == 0) {
14319 #else
14320 			if (uio_resid(uio) == 0) {
14321 #endif
14322 #else
14323 			if (uio->uio_resid == 0) {
14324 #endif
14325 				/* got it all? */
14326 				continue;
14327 			}
14328 			/* PR-SCTP? */
14329 			if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
14330 				/* This is ugly but we must assure locking order */
14331 				if (hold_tcblock == 0) {
14332 					SCTP_TCB_LOCK(stcb);
14333 					hold_tcblock = 1;
14334 				}
14335 				sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
14336 				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14337 				if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14338 					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14339 				else
14340 					max_len = 0;
14341 				if (max_len > 0) {
14342 					continue;
14343 				}
14344 				SCTP_TCB_UNLOCK(stcb);
14345 				hold_tcblock = 0;
14346 			}
14347 			/* wait for space now */
14348 			if (non_blocking) {
14349 				/* Non-blocking io in place out */
14350 				goto skip_out_eof;
14351 			}
14352 			/* What about the INIT, send it maybe */
14353 			if (queue_only_for_init) {
14354 				if (hold_tcblock == 0) {
14355 					SCTP_TCB_LOCK(stcb);
14356 					hold_tcblock = 1;
14357 				}
14358 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14359 					/* a collision took us forward? */
14360 					queue_only = 0;
14361 				} else {
14362 #if defined(__FreeBSD__) && !defined(__Userspace__)
14363 					NET_EPOCH_ENTER(et);
14364 #endif
14365 					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14366 #if defined(__FreeBSD__) && !defined(__Userspace__)
14367 					NET_EPOCH_EXIT(et);
14368 #endif
14369 					SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14370 					queue_only = 1;
14371 				}
14372 			}
14373 			if ((net->flight_size > net->cwnd) &&
14374 			    (asoc->sctp_cmt_on_off == 0)) {
14375 				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14376 				queue_only = 1;
14377 			} else if (asoc->ifp_had_enobuf) {
14378 				SCTP_STAT_INCR(sctps_ifnomemqueued);
14379 				if (net->flight_size > (2 * net->mtu)) {
14380 					queue_only = 1;
14381 				}
14382 				asoc->ifp_had_enobuf = 0;
14383 			}
14384 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14385 			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14386 			    (stcb->asoc.total_flight > 0) &&
14387 			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14388 			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14389 
14390 				/*-
14391 				 * Ok, Nagle is set on and we have data outstanding.
14392 				 * Don't send anything and let SACKs drive out the
14393 				 * data unless we have a "full" segment to send.
14394 				 */
14395 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14396 					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14397 				}
14398 				SCTP_STAT_INCR(sctps_naglequeued);
14399 				nagle_applies = 1;
14400 			} else {
14401 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14402 					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14403 						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14404 				}
14405 				SCTP_STAT_INCR(sctps_naglesent);
14406 				nagle_applies = 0;
14407 			}
14408 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14409 
14410 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14411 					       nagle_applies, un_sent);
14412 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14413 					       stcb->asoc.total_flight,
14414 					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14415 			}
14416 			if (queue_only_for_init)
14417 				queue_only_for_init = 0;
14418 			if ((queue_only == 0) && (nagle_applies == 0)) {
14419 				/*-
14420 				 * need to start chunk output
14421 				 * before blocking.. note that if
14422 				 * a lock is already applied, then
14423 				 * the input via the net is happening
14424 				 * and I don't need to start output :-D
14425 				 */
14426 #if defined(__FreeBSD__) && !defined(__Userspace__)
14427 				NET_EPOCH_ENTER(et);
14428 #endif
14429 				if (hold_tcblock == 0) {
14430 					if (SCTP_TCB_TRYLOCK(stcb)) {
14431 						hold_tcblock = 1;
14432 						sctp_chunk_output(inp,
14433 								  stcb,
14434 								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14435 					}
14436 				} else {
14437 					sctp_chunk_output(inp,
14438 							  stcb,
14439 							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14440 				}
14441 #if defined(__FreeBSD__) && !defined(__Userspace__)
14442 				NET_EPOCH_EXIT(et);
14443 #endif
14444 			}
14445 			if (hold_tcblock == 1) {
14446 				SCTP_TCB_UNLOCK(stcb);
14447 				hold_tcblock = 0;
14448 			}
14449 			SOCKBUF_LOCK(&so->so_snd);
14450 			/*-
14451 			 * This is a bit strange, but I think it will
14452 			 * work. The total_output_queue_size is locked and
14453 			 * protected by the TCB_LOCK, which we just released.
14454 			 * There is a race that can occur between releasing it
14455 			 * above, and me getting the socket lock, where sacks
14456 			 * come in but we have not put the SB_WAIT on the
14457 			 * so_snd buffer to get the wakeup. After the LOCK
14458 			 * is applied the sack_processing will also need to
14459 			 * LOCK the so->so_snd to do the actual sowwakeup(). So
14460 			 * once we have the socket buffer lock if we recheck the
14461 			 * size we KNOW we will get to sleep safely with the
14462 			 * wakeup flag in place.
14463 			 */
14464 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14465 			if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
14466 						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14467 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14468 #if defined(__APPLE__) && !defined(__Userspace__)
14469 #if defined(APPLE_LEOPARD)
14470 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14471 						       asoc, uio->uio_resid);
14472 #else
14473 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14474 						       asoc, uio_resid(uio));
14475 #endif
14476 #else
14477 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14478 						       asoc, uio->uio_resid);
14479 #endif
14480 				}
14481 				be.error = 0;
14482 #if !(defined(_WIN32) && !defined(__Userspace__))
14483 				stcb->block_entry = &be;
14484 #endif
14485 #if defined(__APPLE__) && !defined(__Userspace__)
14486 				sbunlock(&so->so_snd, 1);
14487 #endif
14488 				error = sbwait(&so->so_snd);
14489 				stcb->block_entry = NULL;
14490 
14491 				if (error || so->so_error || be.error) {
14492 					if (error == 0) {
14493 						if (so->so_error)
14494 							error = so->so_error;
14495 						if (be.error) {
14496 							error = be.error;
14497 						}
14498 					}
14499 					SOCKBUF_UNLOCK(&so->so_snd);
14500 					goto out_unlocked;
14501 				}
14502 
14503 #if defined(__APPLE__) && !defined(__Userspace__)
14504 				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14505 #endif
14506 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14507 					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14508 						       asoc, stcb->asoc.total_output_queue_size);
14509 				}
14510 			}
14511 			SOCKBUF_UNLOCK(&so->so_snd);
14512 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14513 				goto out_unlocked;
14514 			}
14515 		}
14516 		SCTP_TCB_SEND_LOCK(stcb);
14517 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14518 			SCTP_TCB_SEND_UNLOCK(stcb);
14519 			goto out_unlocked;
14520 		}
14521 		if (sp) {
14522 			if (sp->msg_is_complete == 0) {
14523 				strm->last_msg_incomplete = 1;
14524 				if (stcb->asoc.idata_supported == 0) {
14525 					asoc->stream_locked = 1;
14526 					asoc->stream_locked_on  = srcv->sinfo_stream;
14527 				}
14528 			} else {
14529 				sp->sender_all_done = 1;
14530 				strm->last_msg_incomplete = 0;
14531 				asoc->stream_locked = 0;
14532 			}
14533 		} else {
14534 			SCTP_PRINTF("Huh no sp TSNH?\n");
14535 			strm->last_msg_incomplete = 0;
14536 			asoc->stream_locked = 0;
14537 		}
14538 		SCTP_TCB_SEND_UNLOCK(stcb);
14539 #if defined(__APPLE__) && !defined(__Userspace__)
14540 #if defined(APPLE_LEOPARD)
14541 		if (uio->uio_resid == 0) {
14542 #else
14543 		if (uio_resid(uio) == 0) {
14544 #endif
14545 #else
14546 		if (uio->uio_resid == 0) {
14547 #endif
14548 			got_all_of_the_send = 1;
14549 		}
14550 	} else {
14551 		/* We send in a 0, since we do NOT have any locks */
14552 		error = sctp_msg_append(stcb, net, top, srcv, 0);
14553 		top = NULL;
14554 		if (sinfo_flags & SCTP_EOF) {
14555 			got_all_of_the_send = 1;
14556 		}
14557 	}
14558 	if (error) {
14559 		goto out;
14560 	}
14561 dataless_eof:
14562 	/* EOF thing ? */
14563 	if ((sinfo_flags & SCTP_EOF) &&
14564 	    (got_all_of_the_send == 1)) {
14565 		SCTP_STAT_INCR(sctps_sends_with_eof);
14566 		error = 0;
14567 		if (hold_tcblock == 0) {
14568 			SCTP_TCB_LOCK(stcb);
14569 			hold_tcblock = 1;
14570 		}
14571 		if (TAILQ_EMPTY(&asoc->send_queue) &&
14572 		    TAILQ_EMPTY(&asoc->sent_queue) &&
14573 		    sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
14574 			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14575 				goto abort_anyway;
14576 			}
14577 			/* there is nothing queued to send, so I'm done... */
14578 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14579 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14580 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14581 				struct sctp_nets *netp;
14582 
14583 				/* only send SHUTDOWN the first time through */
14584 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14585 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14586 				}
14587 				SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
14588 				sctp_stop_timers_for_shutdown(stcb);
14589 				if (stcb->asoc.alternate) {
14590 					netp = stcb->asoc.alternate;
14591 				} else {
14592 					netp = stcb->asoc.primary_destination;
14593 				}
14594 				sctp_send_shutdown(stcb, netp);
14595 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14596 				                 netp);
14597 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14598 				                 NULL);
14599 			}
14600 		} else {
14601 			/*-
14602 			 * we still got (or just got) data to send, so set
14603 			 * SHUTDOWN_PENDING
14604 			 */
14605 			/*-
14606 			 * XXX sockets draft says that SCTP_EOF should be
14607 			 * sent with no data.  currently, we will allow user
14608 			 * data to be sent first and move to
14609 			 * SHUTDOWN-PENDING
14610 			 */
14611 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14612 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14613 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14614 				if (hold_tcblock == 0) {
14615 					SCTP_TCB_LOCK(stcb);
14616 					hold_tcblock = 1;
14617 				}
14618 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14619 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
14620 				}
14621 				SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
14622 				if (TAILQ_EMPTY(&asoc->send_queue) &&
14623 				    TAILQ_EMPTY(&asoc->sent_queue) &&
14624 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14625 					struct mbuf *op_err;
14626 					char msg[SCTP_DIAG_INFO_LEN];
14627 
14628 				abort_anyway:
14629 					if (free_cnt_applied) {
14630 						atomic_add_int(&stcb->asoc.refcnt, -1);
14631 						free_cnt_applied = 0;
14632 					}
14633 					SCTP_SNPRINTF(msg, sizeof(msg),
14634 					              "%s:%d at %s", __FILE__, __LINE__, __func__);
14635 					op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
14636 					                             msg);
14637 #if defined(__FreeBSD__) && !defined(__Userspace__)
14638 					NET_EPOCH_ENTER(et);
14639 #endif
14640 					sctp_abort_an_association(stcb->sctp_ep, stcb,
14641 					                          op_err, SCTP_SO_LOCKED);
14642 #if defined(__FreeBSD__) && !defined(__Userspace__)
14643 					NET_EPOCH_EXIT(et);
14644 #endif
14645 					/* now relock the stcb so everything is sane */
14646 					hold_tcblock = 0;
14647 					stcb = NULL;
14648 					goto out;
14649 				}
14650 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14651 				                 NULL);
14652 				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14653 			}
14654 		}
14655 	}
14656 skip_out_eof:
14657 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14658 		some_on_control = 1;
14659 	}
14660 	if (queue_only_for_init) {
14661 		if (hold_tcblock == 0) {
14662 			SCTP_TCB_LOCK(stcb);
14663 			hold_tcblock = 1;
14664 		}
14665 		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14666 			/* a collision took us forward? */
14667 			queue_only = 0;
14668 		} else {
14669 #if defined(__FreeBSD__) && !defined(__Userspace__)
14670 			NET_EPOCH_ENTER(et);
14671 #endif
14672 			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14673 #if defined(__FreeBSD__) && !defined(__Userspace__)
14674 			NET_EPOCH_EXIT(et);
14675 #endif
14676 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14677 			queue_only = 1;
14678 		}
14679 	}
14680 	if ((net->flight_size > net->cwnd) &&
14681 	    (stcb->asoc.sctp_cmt_on_off == 0)) {
14682 		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14683 		queue_only = 1;
14684 	} else if (asoc->ifp_had_enobuf) {
14685 		SCTP_STAT_INCR(sctps_ifnomemqueued);
14686 		if (net->flight_size > (2 * net->mtu)) {
14687 			queue_only = 1;
14688 		}
14689 		asoc->ifp_had_enobuf = 0;
14690 	}
14691 	un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14692 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14693 	    (stcb->asoc.total_flight > 0) &&
14694 	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14695 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14696 		/*-
14697 		 * Ok, Nagle is set on and we have data outstanding.
14698 		 * Don't send anything and let SACKs drive out the
14699 		 * data unless wen have a "full" segment to send.
14700 		 */
14701 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14702 			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14703 		}
14704 		SCTP_STAT_INCR(sctps_naglequeued);
14705 		nagle_applies = 1;
14706 	} else {
14707 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14708 			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14709 				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14710 		}
14711 		SCTP_STAT_INCR(sctps_naglesent);
14712 		nagle_applies = 0;
14713 	}
14714 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14715 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14716 		               nagle_applies, un_sent);
14717 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14718 		               stcb->asoc.total_flight,
14719 		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14720 	}
14721 #if defined(__FreeBSD__) && !defined(__Userspace__)
14722 	NET_EPOCH_ENTER(et);
14723 #endif
14724 	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14725 		/* we can attempt to send too. */
14726 		if (hold_tcblock == 0) {
14727 			/* If there is activity recv'ing sacks no need to send */
14728 			if (SCTP_TCB_TRYLOCK(stcb)) {
14729 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14730 				hold_tcblock = 1;
14731 			}
14732 		} else {
14733 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14734 		}
14735 	} else if ((queue_only == 0) &&
14736 	           (stcb->asoc.peers_rwnd == 0) &&
14737 	           (stcb->asoc.total_flight == 0)) {
14738 		/* We get to have a probe outstanding */
14739 		if (hold_tcblock == 0) {
14740 			hold_tcblock = 1;
14741 			SCTP_TCB_LOCK(stcb);
14742 		}
14743 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14744 	} else if (some_on_control) {
14745 		int num_out, reason, frag_point;
14746 
14747 		/* Here we do control only */
14748 		if (hold_tcblock == 0) {
14749 			hold_tcblock = 1;
14750 			SCTP_TCB_LOCK(stcb);
14751 		}
14752 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14753 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14754 		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14755 	}
14756 #if defined(__FreeBSD__) && !defined(__Userspace__)
14757 	NET_EPOCH_EXIT(et);
14758 #endif
14759 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14760 	        queue_only, stcb->asoc.peers_rwnd, un_sent,
14761 		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14762 	        stcb->asoc.total_output_queue_size, error);
14763 
14764 out:
14765 #if defined(__APPLE__) && !defined(__Userspace__)
14766 	sbunlock(&so->so_snd, 1);
14767 #endif
14768 out_unlocked:
14769 
14770 	if (local_soresv && stcb) {
14771 		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14772 	}
14773 	if (create_lock_applied) {
14774 		SCTP_ASOC_CREATE_UNLOCK(inp);
14775 	}
14776 	if ((stcb) && hold_tcblock) {
14777 		SCTP_TCB_UNLOCK(stcb);
14778 	}
14779 	if (stcb && free_cnt_applied) {
14780 		atomic_add_int(&stcb->asoc.refcnt, -1);
14781 	}
14782 #ifdef INVARIANTS
14783 #if defined(__FreeBSD__) && !defined(__Userspace__)
14784 	if (stcb) {
14785 		if (mtx_owned(&stcb->tcb_mtx)) {
14786 			panic("Leaving with tcb mtx owned?");
14787 		}
14788 		if (mtx_owned(&stcb->tcb_send_mtx)) {
14789 			panic("Leaving with tcb send mtx owned?");
14790 		}
14791 	}
14792 #endif
14793 #endif
14794 	if (top) {
14795 		sctp_m_freem(top);
14796 	}
14797 	if (control) {
14798 		sctp_m_freem(control);
14799 	}
14800 	return (error);
14801 }
14802 
14803 
14804 /*
14805  * generate an AUTHentication chunk, if required
14806  */
14807 struct mbuf *
14808 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14809     struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14810     struct sctp_tcb *stcb, uint8_t chunk)
14811 {
14812 	struct mbuf *m_auth;
14813 	struct sctp_auth_chunk *auth;
14814 	int chunk_len;
14815 	struct mbuf *cn;
14816 
14817 	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14818 	    (stcb == NULL))
14819 		return (m);
14820 
14821 	if (stcb->asoc.auth_supported == 0) {
14822 		return (m);
14823 	}
14824 	/* does the requested chunk require auth? */
14825 	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14826 		return (m);
14827 	}
14828 	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14829 	if (m_auth == NULL) {
14830 		/* no mbuf's */
14831 		return (m);
14832 	}
14833 	/* reserve some space if this will be the first mbuf */
14834 	if (m == NULL)
14835 		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14836 	/* fill in the AUTH chunk details */
14837 	auth = mtod(m_auth, struct sctp_auth_chunk *);
14838 	memset(auth, 0, sizeof(*auth));
14839 	auth->ch.chunk_type = SCTP_AUTHENTICATION;
14840 	auth->ch.chunk_flags = 0;
14841 	chunk_len = sizeof(*auth) +
14842 	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14843 	auth->ch.chunk_length = htons(chunk_len);
14844 	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14845 	/* key id and hmac digest will be computed and filled in upon send */
14846 
14847 	/* save the offset where the auth was inserted into the chain */
14848 	*offset = 0;
14849 	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14850 		*offset += SCTP_BUF_LEN(cn);
14851 	}
14852 
14853 	/* update length and return pointer to the auth chunk */
14854 	SCTP_BUF_LEN(m_auth) = chunk_len;
14855 	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14856 	if (auth_ret != NULL)
14857 		*auth_ret = auth;
14858 
14859 	return (m);
14860 }
14861 
14862 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
14863 #ifdef INET6
14864 int
14865 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14866 {
14867 	struct nd_prefix *pfx = NULL;
14868 	struct nd_pfxrouter *pfxrtr = NULL;
14869 	struct sockaddr_in6 gw6;
14870 
14871 #if defined(__FreeBSD__)
14872 	if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
14873 #else
14874 	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14875 #endif
14876 		return (0);
14877 
14878 	/* get prefix entry of address */
14879 #if defined(__FreeBSD__)
14880 	ND6_RLOCK();
14881 #endif
14882 	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14883 		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14884 			continue;
14885 		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14886 		    &src6->sin6_addr, &pfx->ndpr_mask))
14887 			break;
14888 	}
14889 	/* no prefix entry in the prefix list */
14890 	if (pfx == NULL) {
14891 #if defined(__FreeBSD__)
14892 		ND6_RUNLOCK();
14893 #endif
14894 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14895 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14896 		return (0);
14897 	}
14898 
14899 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14900 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14901 
14902 	/* search installed gateway from prefix entry */
14903 	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14904 		memset(&gw6, 0, sizeof(struct sockaddr_in6));
14905 		gw6.sin6_family = AF_INET6;
14906 #ifdef HAVE_SIN6_LEN
14907 		gw6.sin6_len = sizeof(struct sockaddr_in6);
14908 #endif
14909 		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14910 		    sizeof(struct in6_addr));
14911 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14912 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14913 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14914 #if defined(__FreeBSD__)
14915 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14916 #else
14917 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14918 #endif
14919 #if defined(__FreeBSD__)
14920 		if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
14921 			ND6_RUNLOCK();
14922 #else
14923 		if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
14924 #endif
14925 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14926 			return (1);
14927 		}
14928 	}
14929 #if defined(__FreeBSD__)
14930 	ND6_RUNLOCK();
14931 #endif
14932 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14933 	return (0);
14934 }
14935 #endif
14936 
14937 int
14938 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14939 {
14940 #ifdef INET
14941 	struct sockaddr_in *sin, *mask;
14942 	struct ifaddr *ifa;
14943 	struct in_addr srcnetaddr, gwnetaddr;
14944 
14945 #if defined(__FreeBSD__)
14946 	if (ro == NULL || ro->ro_nh == NULL ||
14947 #else
14948 	if (ro == NULL || ro->ro_rt == NULL ||
14949 #endif
14950 	    sifa->address.sa.sa_family != AF_INET) {
14951 		return (0);
14952 	}
14953 	ifa = (struct ifaddr *)sifa->ifa;
14954 	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14955 	sin = &sifa->address.sin;
14956 	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14957 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14958 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14959 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14960 
14961 #if defined(__FreeBSD__)
14962 	sin = &ro->ro_nh->gw4_sa;
14963 #else
14964 	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14965 #endif
14966 	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14967 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14968 #if defined(__FreeBSD__)
14969 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14970 #else
14971 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14972 #endif
14973 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14974 	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14975 		return (1);
14976 	}
14977 #endif
14978 	return (0);
14979 }
14980 #elif defined(__Userspace__)
14981 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14982 int
14983 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14984 {
14985     return (0);
14986 }
14987 int
14988 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14989 {
14990     return (0);
14991 }
14992 
14993 #endif
14994