• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifdef __FreeBSD__
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 264701 2014-04-20 18:15:23Z tuexen $");
36 #endif
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #if defined(__Userspace__) || defined(__FreeBSD__)
45 #include <netinet6/sctp6_var.h>
46 #endif
47 #endif
48 #include <netinet/sctp_header.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctp_timer.h>
52 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
53 #include <netinet/sctp_auth.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_bsd_addr.h>
56 #if defined(__Userspace__)
57 #include <netinet/sctp_constants.h>
58 #endif
59 #if defined(__FreeBSD__)
60 #include <netinet/udp.h>
61 #include <netinet/udp_var.h>
62 #include <sys/proc.h>
63 #endif
64 
65 #if defined(__APPLE__)
66 #define APPLE_FILE_NO 8
67 #endif
68 
69 #if defined(__Windows__)
70 #if !defined(SCTP_LOCAL_TRACE_BUF)
71 #include "eventrace_netinet.h"
72 #include "sctputil.tmh" /* this is the file that will be auto generated */
73 #endif
74 #else
75 #ifndef KTR_SCTP
76 #define KTR_SCTP KTR_SUBSYS
77 #endif
78 #endif
79 
80 extern struct sctp_cc_functions sctp_cc_functions[];
81 extern struct sctp_ss_functions sctp_ss_functions[];
82 
83 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)84 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
85 {
86 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.sb.stcb = stcb;
90 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
91 	if (stcb)
92 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
93 	else
94 		sctp_clog.x.sb.stcb_sbcc = 0;
95 	sctp_clog.x.sb.incr = incr;
96 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
97 	     SCTP_LOG_EVENT_SB,
98 	     from,
99 	     sctp_clog.x.misc.log1,
100 	     sctp_clog.x.misc.log2,
101 	     sctp_clog.x.misc.log3,
102 	     sctp_clog.x.misc.log4);
103 #endif
104 }
105 
106 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)107 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
108 {
109 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	sctp_clog.x.close.inp = (void *)inp;
113 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
114 	if (stcb) {
115 		sctp_clog.x.close.stcb = (void *)stcb;
116 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
117 	} else {
118 		sctp_clog.x.close.stcb = 0;
119 		sctp_clog.x.close.state = 0;
120 	}
121 	sctp_clog.x.close.loc = loc;
122 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
123 	     SCTP_LOG_EVENT_CLOSE,
124 	     0,
125 	     sctp_clog.x.misc.log1,
126 	     sctp_clog.x.misc.log2,
127 	     sctp_clog.x.misc.log3,
128 	     sctp_clog.x.misc.log4);
129 #endif
130 }
131 
132 void
rto_logging(struct sctp_nets * net,int from)133 rto_logging(struct sctp_nets *net, int from)
134 {
135 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
136 	struct sctp_cwnd_log sctp_clog;
137 
138 	memset(&sctp_clog, 0, sizeof(sctp_clog));
139 	sctp_clog.x.rto.net = (void *) net;
140 	sctp_clog.x.rto.rtt = net->rtt / 1000;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	     SCTP_LOG_EVENT_RTT,
143 	     from,
144 	     sctp_clog.x.misc.log1,
145 	     sctp_clog.x.misc.log2,
146 	     sctp_clog.x.misc.log3,
147 	     sctp_clog.x.misc.log4);
148 #endif
149 }
150 
151 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)152 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
153 {
154 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.strlog.stcb = stcb;
158 	sctp_clog.x.strlog.n_tsn = tsn;
159 	sctp_clog.x.strlog.n_sseq = sseq;
160 	sctp_clog.x.strlog.e_tsn = 0;
161 	sctp_clog.x.strlog.e_sseq = 0;
162 	sctp_clog.x.strlog.strm = stream;
163 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
164 	     SCTP_LOG_EVENT_STRM,
165 	     from,
166 	     sctp_clog.x.misc.log1,
167 	     sctp_clog.x.misc.log2,
168 	     sctp_clog.x.misc.log3,
169 	     sctp_clog.x.misc.log4);
170 #endif
171 }
172 
173 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)174 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
175 {
176 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
177 	struct sctp_cwnd_log sctp_clog;
178 
179 	sctp_clog.x.nagle.stcb = (void *)stcb;
180 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
181 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
182 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
183 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
184 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
185 	     SCTP_LOG_EVENT_NAGLE,
186 	     action,
187 	     sctp_clog.x.misc.log1,
188 	     sctp_clog.x.misc.log2,
189 	     sctp_clog.x.misc.log3,
190 	     sctp_clog.x.misc.log4);
191 #endif
192 }
193 
194 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)195 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
196 {
197 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
198 	struct sctp_cwnd_log sctp_clog;
199 
200 	sctp_clog.x.sack.cumack = cumack;
201 	sctp_clog.x.sack.oldcumack = old_cumack;
202 	sctp_clog.x.sack.tsn = tsn;
203 	sctp_clog.x.sack.numGaps = gaps;
204 	sctp_clog.x.sack.numDups = dups;
205 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
206 	     SCTP_LOG_EVENT_SACK,
207 	     from,
208 	     sctp_clog.x.misc.log1,
209 	     sctp_clog.x.misc.log2,
210 	     sctp_clog.x.misc.log3,
211 	     sctp_clog.x.misc.log4);
212 #endif
213 }
214 
215 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)216 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
217 {
218 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
219 	struct sctp_cwnd_log sctp_clog;
220 
221 	memset(&sctp_clog, 0, sizeof(sctp_clog));
222 	sctp_clog.x.map.base = map;
223 	sctp_clog.x.map.cum = cum;
224 	sctp_clog.x.map.high = high;
225 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
226 	     SCTP_LOG_EVENT_MAP,
227 	     from,
228 	     sctp_clog.x.misc.log1,
229 	     sctp_clog.x.misc.log2,
230 	     sctp_clog.x.misc.log3,
231 	     sctp_clog.x.misc.log4);
232 #endif
233 }
234 
235 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)236 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
237 {
238 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
239 	struct sctp_cwnd_log sctp_clog;
240 
241 	memset(&sctp_clog, 0, sizeof(sctp_clog));
242 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
243 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
244 	sctp_clog.x.fr.tsn = tsn;
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	     SCTP_LOG_EVENT_FR,
247 	     from,
248 	     sctp_clog.x.misc.log1,
249 	     sctp_clog.x.misc.log2,
250 	     sctp_clog.x.misc.log3,
251 	     sctp_clog.x.misc.log4);
252 #endif
253 }
254 
255 void
sctp_log_mb(struct mbuf * m,int from)256 sctp_log_mb(struct mbuf *m, int from)
257 {
258 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	sctp_clog.x.mb.mp = m;
262 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
263 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
264 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
265 	if (SCTP_BUF_IS_EXTENDED(m)) {
266 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
267 #if defined(__APPLE__)
268 		/* APPLE does not use a ref_cnt, but a forward/backward ref queue */
269 #else
270 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
271 #endif
272 	} else {
273 		sctp_clog.x.mb.ext = 0;
274 		sctp_clog.x.mb.refcnt = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	     SCTP_LOG_EVENT_MBUF,
278 	     from,
279 	     sctp_clog.x.misc.log1,
280 	     sctp_clog.x.misc.log2,
281 	     sctp_clog.x.misc.log3,
282 	     sctp_clog.x.misc.log4);
283 #endif
284 }
285 
286 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)287 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
288 {
289 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	if (control == NULL) {
293 		SCTP_PRINTF("Gak log of NULL?\n");
294 		return;
295 	}
296 	sctp_clog.x.strlog.stcb = control->stcb;
297 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
298 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
299 	sctp_clog.x.strlog.strm = control->sinfo_stream;
300 	if (poschk != NULL) {
301 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
302 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
303 	} else {
304 		sctp_clog.x.strlog.e_tsn = 0;
305 		sctp_clog.x.strlog.e_sseq = 0;
306 	}
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	     SCTP_LOG_EVENT_STRM,
309 	     from,
310 	     sctp_clog.x.misc.log1,
311 	     sctp_clog.x.misc.log2,
312 	     sctp_clog.x.misc.log3,
313 	     sctp_clog.x.misc.log4);
314 #endif
315 }
316 
317 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)318 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
319 {
320 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
321 	struct sctp_cwnd_log sctp_clog;
322 
323 	sctp_clog.x.cwnd.net = net;
324 	if (stcb->asoc.send_queue_cnt > 255)
325 		sctp_clog.x.cwnd.cnt_in_send = 255;
326 	else
327 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
328 	if (stcb->asoc.stream_queue_cnt > 255)
329 		sctp_clog.x.cwnd.cnt_in_str = 255;
330 	else
331 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
332 
333 	if (net) {
334 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
335 		sctp_clog.x.cwnd.inflight = net->flight_size;
336 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
337 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
338 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
339 	}
340 	if (SCTP_CWNDLOG_PRESEND == from) {
341 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
342 	}
343 	sctp_clog.x.cwnd.cwnd_augment = augment;
344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 	     SCTP_LOG_EVENT_CWND,
346 	     from,
347 	     sctp_clog.x.misc.log1,
348 	     sctp_clog.x.misc.log2,
349 	     sctp_clog.x.misc.log3,
350 	     sctp_clog.x.misc.log4);
351 #endif
352 }
353 
354 #ifndef __APPLE__
355 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)356 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
357 {
358 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
359 	struct sctp_cwnd_log sctp_clog;
360 
361 	memset(&sctp_clog, 0, sizeof(sctp_clog));
362 	if (inp) {
363 		sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
364 
365 	} else {
366 		sctp_clog.x.lock.sock = (void *) NULL;
367 	}
368 	sctp_clog.x.lock.inp = (void *) inp;
369 #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
370 	if (stcb) {
371 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
372 	} else {
373 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
374 	}
375 	if (inp) {
376 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
377 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
378 	} else {
379 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
380 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
381 	}
382 #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
383 	sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
384 #else
385 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
386 #endif
387 	if (inp && (inp->sctp_socket)) {
388 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
389 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
390 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
391 	} else {
392 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
393 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
394 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
395 	}
396 #endif
397 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
398 	     SCTP_LOG_LOCK_EVENT,
399 	     from,
400 	     sctp_clog.x.misc.log1,
401 	     sctp_clog.x.misc.log2,
402 	     sctp_clog.x.misc.log3,
403 	     sctp_clog.x.misc.log4);
404 #endif
405 }
406 #endif
407 
408 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)409 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
410 {
411 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
412 	struct sctp_cwnd_log sctp_clog;
413 
414 	memset(&sctp_clog, 0, sizeof(sctp_clog));
415 	sctp_clog.x.cwnd.net = net;
416 	sctp_clog.x.cwnd.cwnd_new_value = error;
417 	sctp_clog.x.cwnd.inflight = net->flight_size;
418 	sctp_clog.x.cwnd.cwnd_augment = burst;
419 	if (stcb->asoc.send_queue_cnt > 255)
420 		sctp_clog.x.cwnd.cnt_in_send = 255;
421 	else
422 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
423 	if (stcb->asoc.stream_queue_cnt > 255)
424 		sctp_clog.x.cwnd.cnt_in_str = 255;
425 	else
426 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	     SCTP_LOG_EVENT_MAXBURST,
429 	     from,
430 	     sctp_clog.x.misc.log1,
431 	     sctp_clog.x.misc.log2,
432 	     sctp_clog.x.misc.log3,
433 	     sctp_clog.x.misc.log4);
434 #endif
435 }
436 
437 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)438 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
439 {
440 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
441 	struct sctp_cwnd_log sctp_clog;
442 
443 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
444 	sctp_clog.x.rwnd.send_size = snd_size;
445 	sctp_clog.x.rwnd.overhead = overhead;
446 	sctp_clog.x.rwnd.new_rwnd = 0;
447 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
448 	     SCTP_LOG_EVENT_RWND,
449 	     from,
450 	     sctp_clog.x.misc.log1,
451 	     sctp_clog.x.misc.log2,
452 	     sctp_clog.x.misc.log3,
453 	     sctp_clog.x.misc.log4);
454 #endif
455 }
456 
457 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)458 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
459 {
460 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
461 	struct sctp_cwnd_log sctp_clog;
462 
463 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
464 	sctp_clog.x.rwnd.send_size = flight_size;
465 	sctp_clog.x.rwnd.overhead = overhead;
466 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
467 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
468 	     SCTP_LOG_EVENT_RWND,
469 	     from,
470 	     sctp_clog.x.misc.log1,
471 	     sctp_clog.x.misc.log2,
472 	     sctp_clog.x.misc.log3,
473 	     sctp_clog.x.misc.log4);
474 #endif
475 }
476 
477 void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)478 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
479 {
480 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
481 	struct sctp_cwnd_log sctp_clog;
482 
483 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
484 	sctp_clog.x.mbcnt.size_change = book;
485 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
486 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 	     SCTP_LOG_EVENT_MBCNT,
489 	     from,
490 	     sctp_clog.x.misc.log1,
491 	     sctp_clog.x.misc.log2,
492 	     sctp_clog.x.misc.log3,
493 	     sctp_clog.x.misc.log4);
494 #endif
495 }
496 
497 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)498 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
499 {
500 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 	     SCTP_LOG_MISC_EVENT,
503 	     from,
504 	     a, b, c, d);
505 #endif
506 }
507 
508 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)509 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
510 {
511 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.wake.stcb = (void *)stcb;
515 	sctp_clog.x.wake.wake_cnt = wake_cnt;
516 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
517 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
518 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
519 
520 	if (stcb->asoc.stream_queue_cnt < 0xff)
521 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
522 	else
523 		sctp_clog.x.wake.stream_qcnt = 0xff;
524 
525 	if (stcb->asoc.chunks_on_out_queue < 0xff)
526 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
527 	else
528 		sctp_clog.x.wake.chunks_on_oque = 0xff;
529 
530 	sctp_clog.x.wake.sctpflags = 0;
531 	/* set in the defered mode stuff */
532 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
533 		sctp_clog.x.wake.sctpflags |= 1;
534 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
535 		sctp_clog.x.wake.sctpflags |= 2;
536 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
537 		sctp_clog.x.wake.sctpflags |= 4;
538 	/* what about the sb */
539 	if (stcb->sctp_socket) {
540 		struct socket *so = stcb->sctp_socket;
541 
542 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
543 	} else {
544 		sctp_clog.x.wake.sbflags = 0xff;
545 	}
546 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
547 	     SCTP_LOG_EVENT_WAKE,
548 	     from,
549 	     sctp_clog.x.misc.log1,
550 	     sctp_clog.x.misc.log2,
551 	     sctp_clog.x.misc.log3,
552 	     sctp_clog.x.misc.log4);
553 #endif
554 }
555 
556 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,int sendlen)557 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
558 {
559 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
560 	struct sctp_cwnd_log sctp_clog;
561 
562 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
563 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
564 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
565 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
566 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
567 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
568 	sctp_clog.x.blk.sndlen = sendlen;
569 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
570 	     SCTP_LOG_EVENT_BLOCK,
571 	     from,
572 	     sctp_clog.x.misc.log1,
573 	     sctp_clog.x.misc.log2,
574 	     sctp_clog.x.misc.log3,
575 	     sctp_clog.x.misc.log4);
576 #endif
577 }
578 
579 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)580 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
581 {
582 	/* May need to fix this if ktrdump does not work */
583 	return (0);
584 }
585 
586 #ifdef SCTP_AUDITING_ENABLED
587 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
588 static int sctp_audit_indx = 0;
589 
590 static
591 void
sctp_print_audit_report(void)592 sctp_print_audit_report(void)
593 {
594 	int i;
595 	int cnt;
596 
597 	cnt = 0;
598 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
599 		if ((sctp_audit_data[i][0] == 0xe0) &&
600 		    (sctp_audit_data[i][1] == 0x01)) {
601 			cnt = 0;
602 			SCTP_PRINTF("\n");
603 		} else if (sctp_audit_data[i][0] == 0xf0) {
604 			cnt = 0;
605 			SCTP_PRINTF("\n");
606 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
607 		    (sctp_audit_data[i][1] == 0x01)) {
608 			SCTP_PRINTF("\n");
609 			cnt = 0;
610 		}
611 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
612 			    (uint32_t) sctp_audit_data[i][1]);
613 		cnt++;
614 		if ((cnt % 14) == 0)
615 			SCTP_PRINTF("\n");
616 	}
617 	for (i = 0; i < sctp_audit_indx; i++) {
618 		if ((sctp_audit_data[i][0] == 0xe0) &&
619 		    (sctp_audit_data[i][1] == 0x01)) {
620 			cnt = 0;
621 			SCTP_PRINTF("\n");
622 		} else if (sctp_audit_data[i][0] == 0xf0) {
623 			cnt = 0;
624 			SCTP_PRINTF("\n");
625 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
626 		    (sctp_audit_data[i][1] == 0x01)) {
627 			SCTP_PRINTF("\n");
628 			cnt = 0;
629 		}
630 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
631 			    (uint32_t) sctp_audit_data[i][1]);
632 		cnt++;
633 		if ((cnt % 14) == 0)
634 			SCTP_PRINTF("\n");
635 	}
636 	SCTP_PRINTF("\n");
637 }
638 
639 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)640 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
641     struct sctp_nets *net)
642 {
643 	int resend_cnt, tot_out, rep, tot_book_cnt;
644 	struct sctp_nets *lnet;
645 	struct sctp_tmit_chunk *chk;
646 
647 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
648 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
649 	sctp_audit_indx++;
650 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 		sctp_audit_indx = 0;
652 	}
653 	if (inp == NULL) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		return;
661 	}
662 	if (stcb == NULL) {
663 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
664 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
665 		sctp_audit_indx++;
666 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
667 			sctp_audit_indx = 0;
668 		}
669 		return;
670 	}
671 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
672 	sctp_audit_data[sctp_audit_indx][1] =
673 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
674 	sctp_audit_indx++;
675 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 		sctp_audit_indx = 0;
677 	}
678 	rep = 0;
679 	tot_book_cnt = 0;
680 	resend_cnt = tot_out = 0;
681 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
683 			resend_cnt++;
684 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
685 			tot_out += chk->book_size;
686 			tot_book_cnt++;
687 		}
688 	}
689 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
690 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
691 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
692 		sctp_audit_indx++;
693 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
694 			sctp_audit_indx = 0;
695 		}
696 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
697 			    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
698 		rep = 1;
699 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
700 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
701 		sctp_audit_data[sctp_audit_indx][1] =
702 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 	}
708 	if (tot_out != stcb->asoc.total_flight) {
709 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
710 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
711 		sctp_audit_indx++;
712 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
713 			sctp_audit_indx = 0;
714 		}
715 		rep = 1;
716 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
717 			    (int)stcb->asoc.total_flight);
718 		stcb->asoc.total_flight = tot_out;
719 	}
720 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
721 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
722 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
723 		sctp_audit_indx++;
724 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
725 			sctp_audit_indx = 0;
726 		}
727 		rep = 1;
728 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
729 
730 		stcb->asoc.total_flight_count = tot_book_cnt;
731 	}
732 	tot_out = 0;
733 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
734 		tot_out += lnet->flight_size;
735 	}
736 	if (tot_out != stcb->asoc.total_flight) {
737 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
738 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
739 		sctp_audit_indx++;
740 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
741 			sctp_audit_indx = 0;
742 		}
743 		rep = 1;
744 		SCTP_PRINTF("real flight:%d net total was %d\n",
745 			    stcb->asoc.total_flight, tot_out);
746 		/* now corrective action */
747 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
748 
749 			tot_out = 0;
750 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
751 				if ((chk->whoTo == lnet) &&
752 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
753 					tot_out += chk->book_size;
754 				}
755 			}
756 			if (lnet->flight_size != tot_out) {
757 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
758 					    (void *)lnet, lnet->flight_size,
759 					    tot_out);
760 				lnet->flight_size = tot_out;
761 			}
762 		}
763 	}
764 	if (rep) {
765 		sctp_print_audit_report();
766 	}
767 }
768 
769 void
sctp_audit_log(uint8_t ev,uint8_t fd)770 sctp_audit_log(uint8_t ev, uint8_t fd)
771 {
772 
773 	sctp_audit_data[sctp_audit_indx][0] = ev;
774 	sctp_audit_data[sctp_audit_indx][1] = fd;
775 	sctp_audit_indx++;
776 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
777 		sctp_audit_indx = 0;
778 	}
779 }
780 
781 #endif
782 
783 /*
784  * sctp_stop_timers_for_shutdown() should be called
785  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
786  * state to make sure that all timers are stopped.
787  */
788 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)789 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
790 {
791 	struct sctp_association *asoc;
792 	struct sctp_nets *net;
793 
794 	asoc = &stcb->asoc;
795 
796 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
797 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
798 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
799 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
800 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
801 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
802 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
803 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
804 	}
805 }
806 
807 /*
808  * a list of sizes based on typical mtu's, used only if next hop size not
809  * returned.
810  */
811 static uint32_t sctp_mtu_sizes[] = {
812 	68,
813 	296,
814 	508,
815 	512,
816 	544,
817 	576,
818 	1006,
819 	1492,
820 	1500,
821 	1536,
822 	2002,
823 	2048,
824 	4352,
825 	4464,
826 	8166,
827 	17914,
828 	32000,
829 	65535
830 };
831 
832 /*
833  * Return the largest MTU smaller than val. If there is no
834  * entry, just return val.
835  */
836 uint32_t
sctp_get_prev_mtu(uint32_t val)837 sctp_get_prev_mtu(uint32_t val)
838 {
839 	uint32_t i;
840 
841 	if (val <= sctp_mtu_sizes[0]) {
842 		return (val);
843 	}
844 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
845 		if (val <= sctp_mtu_sizes[i]) {
846 			break;
847 		}
848 	}
849 	return (sctp_mtu_sizes[i - 1]);
850 }
851 
852 /*
853  * Return the smallest MTU larger than val. If there is no
854  * entry, just return val.
855  */
856 uint32_t
sctp_get_next_mtu(uint32_t val)857 sctp_get_next_mtu(uint32_t val)
858 {
859 	/* select another MTU that is just bigger than this one */
860 	uint32_t i;
861 
862 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
863 		if (val < sctp_mtu_sizes[i]) {
864 			return (sctp_mtu_sizes[i]);
865 		}
866 	}
867 	return (val);
868 }
869 
870 void
sctp_fill_random_store(struct sctp_pcb * m)871 sctp_fill_random_store(struct sctp_pcb *m)
872 {
873 	/*
874 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
875 	 * our counter. The result becomes our good random numbers and we
876 	 * then setup to give these out. Note that we do no locking to
877 	 * protect this. This is ok, since if competing folks call this we
878 	 * will get more gobbled gook in the random store which is what we
879 	 * want. There is a danger that two guys will use the same random
880 	 * numbers, but thats ok too since that is random as well :->
881 	 */
882 	m->store_at = 0;
883 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
884 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
885 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
886 	m->random_counter++;
887 }
888 
889 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)890 sctp_select_initial_TSN(struct sctp_pcb *inp)
891 {
892 	/*
893 	 * A true implementation should use random selection process to get
894 	 * the initial stream sequence number, using RFC1750 as a good
895 	 * guideline
896 	 */
897 	uint32_t x, *xp;
898 	uint8_t *p;
899 	int store_at, new_store;
900 
901 	if (inp->initial_sequence_debug != 0) {
902 		uint32_t ret;
903 
904 		ret = inp->initial_sequence_debug;
905 		inp->initial_sequence_debug++;
906 		return (ret);
907 	}
908  retry:
909 	store_at = inp->store_at;
910 	new_store = store_at + sizeof(uint32_t);
911 	if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
912 		new_store = 0;
913 	}
914 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
915 		goto retry;
916 	}
917 	if (new_store == 0) {
918 		/* Refill the random store */
919 		sctp_fill_random_store(inp);
920 	}
921 	p = &inp->random_store[store_at];
922 	xp = (uint32_t *)p;
923 	x = *xp;
924 	return (x);
925 }
926 
927 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)928 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
929 {
930 	uint32_t x;
931 	struct timeval now;
932 
933 	if (check) {
934 		(void)SCTP_GETTIME_TIMEVAL(&now);
935 	}
936 	for (;;) {
937 		x = sctp_select_initial_TSN(&inp->sctp_ep);
938 		if (x == 0) {
939 			/* we never use 0 */
940 			continue;
941 		}
942 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
943 			break;
944 		}
945 	}
946 	return (x);
947 }
948 
949 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id)950 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
951                uint32_t override_tag, uint32_t vrf_id)
952 {
953 	struct sctp_association *asoc;
954 	/*
955 	 * Anything set to zero is taken care of by the allocation routine's
956 	 * bzero
957 	 */
958 
959 	/*
960 	 * Up front select what scoping to apply on addresses I tell my peer
961 	 * Not sure what to do with these right now, we will need to come up
962 	 * with a way to set them. We may need to pass them through from the
963 	 * caller in the sctp_aloc_assoc() function.
964 	 */
965 	int i;
966 
967 	asoc = &stcb->asoc;
968 	/* init all variables to a known value. */
969 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
970 	asoc->max_burst = inp->sctp_ep.max_burst;
971 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
972 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
973 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
974 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
975 	asoc->ecn_allowed = inp->sctp_ecn_enable;
976 	asoc->sctp_nr_sack_on_off = (uint8_t)SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
977 	asoc->sctp_cmt_pf = (uint8_t)0;
978 	asoc->sctp_frag_point = inp->sctp_frag_point;
979 	asoc->sctp_features = inp->sctp_features;
980 	asoc->default_dscp = inp->sctp_ep.default_dscp;
981 #ifdef INET6
982 	if (inp->sctp_ep.default_flowlabel) {
983 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
984 	} else {
985 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
986 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
987 			asoc->default_flowlabel &= 0x000fffff;
988 			asoc->default_flowlabel |= 0x80000000;
989 		} else {
990 			asoc->default_flowlabel = 0;
991 		}
992 	}
993 #endif
994 	asoc->sb_send_resv = 0;
995 	if (override_tag) {
996 		asoc->my_vtag = override_tag;
997 	} else {
998 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
999 	}
1000 	/* Get the nonce tags */
1001 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1002 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1003 	asoc->vrf_id = vrf_id;
1004 
1005 #ifdef SCTP_ASOCLOG_OF_TSNS
1006 	asoc->tsn_in_at = 0;
1007  	asoc->tsn_out_at = 0;
1008 	asoc->tsn_in_wrapped = 0;
1009 	asoc->tsn_out_wrapped = 0;
1010 	asoc->cumack_log_at = 0;
1011 	asoc->cumack_log_atsnt = 0;
1012 #endif
1013 #ifdef SCTP_FS_SPEC_LOG
1014 	asoc->fs_index = 0;
1015 #endif
1016 	asoc->refcnt = 0;
1017 	asoc->assoc_up_sent = 0;
1018 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1019 	    sctp_select_initial_TSN(&inp->sctp_ep);
1020 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1021 	/* we are optimisitic here */
1022 	asoc->peer_supports_pktdrop = 1;
1023 	asoc->peer_supports_nat = 0;
1024 	asoc->sent_queue_retran_cnt = 0;
1025 
1026 	/* for CMT */
1027         asoc->last_net_cmt_send_started = NULL;
1028 
1029 	/* This will need to be adjusted */
1030 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1031 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1032 	asoc->asconf_seq_in = asoc->last_acked_seq;
1033 
1034 	/* here we are different, we hold the next one we expect */
1035 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1036 
1037 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1038 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1039 
1040 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1041 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1042 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1043 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1044 	asoc->free_chunk_cnt = 0;
1045 
1046 	asoc->iam_blocking = 0;
1047 	asoc->context = inp->sctp_context;
1048 	asoc->local_strreset_support = inp->local_strreset_support;
1049 	asoc->def_send = inp->def_send;
1050 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1051 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1052 	asoc->pr_sctp_cnt = 0;
1053 	asoc->total_output_queue_size = 0;
1054 
1055 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1056 		asoc->scope.ipv6_addr_legal = 1;
1057 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1058 			asoc->scope.ipv4_addr_legal = 1;
1059 		} else {
1060 			asoc->scope.ipv4_addr_legal = 0;
1061 		}
1062 #if defined(__Userspace__)
1063 			asoc->scope.conn_addr_legal = 0;
1064 #endif
1065 	} else {
1066 		asoc->scope.ipv6_addr_legal = 0;
1067 #if defined(__Userspace__)
1068 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1069 			asoc->scope.conn_addr_legal = 1;
1070 			asoc->scope.ipv4_addr_legal = 0;
1071 		} else {
1072 			asoc->scope.conn_addr_legal = 0;
1073 			asoc->scope.ipv4_addr_legal = 1;
1074 		}
1075 #else
1076 		asoc->scope.ipv4_addr_legal = 1;
1077 #endif
1078 	}
1079 
1080 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1081 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1082 
1083 	asoc->smallest_mtu = inp->sctp_frag_point;
1084 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1085 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1086 
1087 	asoc->locked_on_sending = NULL;
1088 	asoc->stream_locked_on = 0;
1089 	asoc->ecn_echo_cnt_onq = 0;
1090 	asoc->stream_locked = 0;
1091 
1092 	asoc->send_sack = 1;
1093 
1094 	LIST_INIT(&asoc->sctp_restricted_addrs);
1095 
1096 	TAILQ_INIT(&asoc->nets);
1097 	TAILQ_INIT(&asoc->pending_reply_queue);
1098 	TAILQ_INIT(&asoc->asconf_ack_sent);
1099 	/* Setup to fill the hb random cache at first HB */
1100 	asoc->hb_random_idx = 4;
1101 
1102 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1103 
1104 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1105 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1106 
1107 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1108 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1109 
1110 	/*
1111 	 * Now the stream parameters, here we allocate space for all streams
1112 	 * that we request by default.
1113 	 */
1114 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1115 	    inp->sctp_ep.pre_open_stream_count;
1116 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1117 		    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1118 		    SCTP_M_STRMO);
1119 	if (asoc->strmout == NULL) {
1120 		/* big trouble no memory */
1121 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1122 		return (ENOMEM);
1123 	}
1124 	for (i = 0; i < asoc->streamoutcnt; i++) {
1125 		/*
1126 		 * inbound side must be set to 0xffff, also NOTE when we get
1127 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1128 		 * count (streamoutcnt) but first check if we sent to any of
1129 		 * the upper streams that were dropped (if some were). Those
1130 		 * that were dropped must be notified to the upper layer as
1131 		 * failed to send.
1132 		 */
1133 		asoc->strmout[i].next_sequence_send = 0x0;
1134 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1135 		asoc->strmout[i].chunks_on_queues = 0;
1136 		asoc->strmout[i].stream_no = i;
1137 		asoc->strmout[i].last_msg_incomplete = 0;
1138 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1139 	}
1140 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1141 
1142 	/* Now the mapping array */
1143 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1144 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1145 		    SCTP_M_MAP);
1146 	if (asoc->mapping_array == NULL) {
1147 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1152 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1153 	    SCTP_M_MAP);
1154 	if (asoc->nr_mapping_array == NULL) {
1155 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1156 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1157 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1158 		return (ENOMEM);
1159 	}
1160 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1161 
1162 	/* Now the init of the other outqueues */
1163 	TAILQ_INIT(&asoc->free_chunks);
1164 	TAILQ_INIT(&asoc->control_send_queue);
1165 	TAILQ_INIT(&asoc->asconf_send_queue);
1166 	TAILQ_INIT(&asoc->send_queue);
1167 	TAILQ_INIT(&asoc->sent_queue);
1168 	TAILQ_INIT(&asoc->reasmqueue);
1169 	TAILQ_INIT(&asoc->resetHead);
1170 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1171 	TAILQ_INIT(&asoc->asconf_queue);
1172 	/* authentication fields */
1173 	asoc->authinfo.random = NULL;
1174 	asoc->authinfo.active_keyid = 0;
1175 	asoc->authinfo.assoc_key = NULL;
1176 	asoc->authinfo.assoc_keyid = 0;
1177 	asoc->authinfo.recv_key = NULL;
1178 	asoc->authinfo.recv_keyid = 0;
1179 	LIST_INIT(&asoc->shared_keys);
1180 	asoc->marked_retrans = 0;
1181 	asoc->port = inp->sctp_ep.port;
1182 	asoc->timoinit = 0;
1183 	asoc->timodata = 0;
1184 	asoc->timosack = 0;
1185 	asoc->timoshutdown = 0;
1186 	asoc->timoheartbeat = 0;
1187 	asoc->timocookie = 0;
1188 	asoc->timoshutdownack = 0;
1189 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1190 	asoc->discontinuity_time = asoc->start_time;
1191 	/* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1192 	 * the association is freed.
1193 	 */
1194 	return (0);
1195 }
1196 
1197 void
sctp_print_mapping_array(struct sctp_association * asoc)1198 sctp_print_mapping_array(struct sctp_association *asoc)
1199 {
1200 	unsigned int i, limit;
1201 
1202 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1203 	            asoc->mapping_array_size,
1204 	            asoc->mapping_array_base_tsn,
1205 	            asoc->cumulative_tsn,
1206 	            asoc->highest_tsn_inside_map,
1207 	            asoc->highest_tsn_inside_nr_map);
1208 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1209 		if (asoc->mapping_array[limit - 1] != 0) {
1210 			break;
1211 		}
1212 	}
1213 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1214 	for (i = 0; i < limit; i++) {
1215 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1216 	}
1217 	if (limit % 16)
1218 		SCTP_PRINTF("\n");
1219 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1220 		if (asoc->nr_mapping_array[limit - 1]) {
1221 			break;
1222 		}
1223 	}
1224 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1225 	for (i = 0; i < limit; i++) {
1226 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1227 	}
1228 	if (limit % 16)
1229 		SCTP_PRINTF("\n");
1230 }
1231 
1232 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1233 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1234 {
1235 	/* mapping array needs to grow */
1236 	uint8_t *new_array1, *new_array2;
1237 	uint32_t new_size;
1238 
1239 	new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1240 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1241 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1242 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1243 		/* can't get more, forget it */
1244 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1245 		if (new_array1) {
1246 			SCTP_FREE(new_array1, SCTP_M_MAP);
1247 		}
1248 		if (new_array2) {
1249 			SCTP_FREE(new_array2, SCTP_M_MAP);
1250 		}
1251 		return (-1);
1252 	}
1253 	memset(new_array1, 0, new_size);
1254 	memset(new_array2, 0, new_size);
1255 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1256 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1257 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1259 	asoc->mapping_array = new_array1;
1260 	asoc->nr_mapping_array = new_array2;
1261 	asoc->mapping_array_size = new_size;
1262 	return (0);
1263 }
1264 
1265 
1266 static void
sctp_iterator_work(struct sctp_iterator * it)1267 sctp_iterator_work(struct sctp_iterator *it)
1268 {
1269 	int iteration_count = 0;
1270 	int inp_skip = 0;
1271 	int first_in = 1;
1272 	struct sctp_inpcb *tinp;
1273 
1274 	SCTP_INP_INFO_RLOCK();
1275 	SCTP_ITERATOR_LOCK();
1276  	if (it->inp) {
1277 		SCTP_INP_RLOCK(it->inp);
1278 		SCTP_INP_DECR_REF(it->inp);
1279 	}
1280 	if (it->inp == NULL) {
1281 		/* iterator is complete */
1282 done_with_iterator:
1283 		SCTP_ITERATOR_UNLOCK();
1284 		SCTP_INP_INFO_RUNLOCK();
1285 		if (it->function_atend != NULL) {
1286 			(*it->function_atend) (it->pointer, it->val);
1287 		}
1288 		SCTP_FREE(it, SCTP_M_ITER);
1289 		return;
1290 	}
1291 select_a_new_ep:
1292 	if (first_in) {
1293 		first_in = 0;
1294 	} else {
1295 		SCTP_INP_RLOCK(it->inp);
1296 	}
1297 	while (((it->pcb_flags) &&
1298 		((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1299 	       ((it->pcb_features) &&
1300 		((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1301 		/* endpoint flags or features don't match, so keep looking */
1302 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1303 			SCTP_INP_RUNLOCK(it->inp);
1304 			goto done_with_iterator;
1305 		}
1306 		tinp = it->inp;
1307 		it->inp = LIST_NEXT(it->inp, sctp_list);
1308 		SCTP_INP_RUNLOCK(tinp);
1309 		if (it->inp == NULL) {
1310 			goto done_with_iterator;
1311 		}
1312 		SCTP_INP_RLOCK(it->inp);
1313 	}
1314 	/* now go through each assoc which is in the desired state */
1315 	if (it->done_current_ep == 0) {
1316 		if (it->function_inp != NULL)
1317 			inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1318 		it->done_current_ep = 1;
1319 	}
1320 	if (it->stcb == NULL) {
1321 		/* run the per instance function */
1322 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1323 	}
1324 	if ((inp_skip) || it->stcb == NULL) {
1325 		if (it->function_inp_end != NULL) {
1326 			inp_skip = (*it->function_inp_end)(it->inp,
1327 							   it->pointer,
1328 							   it->val);
1329 		}
1330 		SCTP_INP_RUNLOCK(it->inp);
1331 		goto no_stcb;
1332 	}
1333 	while (it->stcb) {
1334 		SCTP_TCB_LOCK(it->stcb);
1335 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1336 			/* not in the right state... keep looking */
1337 			SCTP_TCB_UNLOCK(it->stcb);
1338 			goto next_assoc;
1339 		}
1340 		/* see if we have limited out the iterator loop */
1341 		iteration_count++;
1342 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1343 			/* Pause to let others grab the lock */
1344 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			SCTP_INP_INCR_REF(it->inp);
1347 			SCTP_INP_RUNLOCK(it->inp);
1348 			SCTP_ITERATOR_UNLOCK();
1349 			SCTP_INP_INFO_RUNLOCK();
1350 			SCTP_INP_INFO_RLOCK();
1351 			SCTP_ITERATOR_LOCK();
1352 			if (sctp_it_ctl.iterator_flags) {
1353 				/* We won't be staying here */
1354 				SCTP_INP_DECR_REF(it->inp);
1355 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1356 #if !defined(__FreeBSD__)
1357 				if (sctp_it_ctl.iterator_flags &
1358 				   SCTP_ITERATOR_MUST_EXIT) {
1359 					goto done_with_iterator;
1360 				}
1361 #endif
1362 				if (sctp_it_ctl.iterator_flags &
1363 				   SCTP_ITERATOR_STOP_CUR_IT) {
1364 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1365 					goto done_with_iterator;
1366 				}
1367 				if (sctp_it_ctl.iterator_flags &
1368 				   SCTP_ITERATOR_STOP_CUR_INP) {
1369 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1370 					goto no_stcb;
1371 				}
1372 				/* If we reach here huh? */
1373 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1374 					    sctp_it_ctl.iterator_flags);
1375 				sctp_it_ctl.iterator_flags = 0;
1376 			}
1377 			SCTP_INP_RLOCK(it->inp);
1378 			SCTP_INP_DECR_REF(it->inp);
1379 			SCTP_TCB_LOCK(it->stcb);
1380 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1381 			iteration_count = 0;
1382 		}
1383 
1384 		/* run function on this one */
1385 		(*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1386 
1387 		/*
1388 		 * we lie here, it really needs to have its own type but
1389 		 * first I must verify that this won't effect things :-0
1390 		 */
1391 		if (it->no_chunk_output == 0)
1392 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1393 
1394 		SCTP_TCB_UNLOCK(it->stcb);
1395 	next_assoc:
1396 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1397 		if (it->stcb == NULL) {
1398 			/* Run last function */
1399 			if (it->function_inp_end != NULL) {
1400 				inp_skip = (*it->function_inp_end)(it->inp,
1401 								   it->pointer,
1402 								   it->val);
1403 			}
1404 		}
1405 	}
1406 	SCTP_INP_RUNLOCK(it->inp);
1407  no_stcb:
1408 	/* done with all assocs on this endpoint, move on to next endpoint */
1409 	it->done_current_ep = 0;
1410 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1411 		it->inp = NULL;
1412 	} else {
1413 		it->inp = LIST_NEXT(it->inp, sctp_list);
1414 	}
1415 	if (it->inp == NULL) {
1416 		goto done_with_iterator;
1417 	}
1418 	goto select_a_new_ep;
1419 }
1420 
1421 void
sctp_iterator_worker(void)1422 sctp_iterator_worker(void)
1423 {
1424 	struct sctp_iterator *it, *nit;
1425 
1426 	/* This function is called with the WQ lock in place */
1427 
1428 	sctp_it_ctl.iterator_running = 1;
1429 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1430 		sctp_it_ctl.cur_it = it;
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1435 		CURVNET_SET(it->vn);
1436 #endif
1437 		sctp_iterator_work(it);
1438 		sctp_it_ctl.cur_it = NULL;
1439 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1440 		CURVNET_RESTORE();
1441 #endif
1442 		SCTP_IPI_ITERATOR_WQ_LOCK();
1443 #if !defined(__FreeBSD__)
1444 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1445 			break;
1446 		}
1447 #endif
1448 	        /*sa_ignore FREED_MEMORY*/
1449 	}
1450 	sctp_it_ctl.iterator_running = 0;
1451 	return;
1452 }
1453 
1454 
1455 static void
sctp_handle_addr_wq(void)1456 sctp_handle_addr_wq(void)
1457 {
1458 	/* deal with the ADDR wq from the rtsock calls */
1459 	struct sctp_laddr *wi, *nwi;
1460 	struct sctp_asconf_iterator *asc;
1461 
1462 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1463 		    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1464 	if (asc == NULL) {
1465 		/* Try later, no memory */
1466 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1467 				 (struct sctp_inpcb *)NULL,
1468 				 (struct sctp_tcb *)NULL,
1469 				 (struct sctp_nets *)NULL);
1470 		return;
1471 	}
1472 	LIST_INIT(&asc->list_of_work);
1473 	asc->cnt = 0;
1474 
1475 	SCTP_WQ_ADDR_LOCK();
1476 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1477 		LIST_REMOVE(wi, sctp_nxt_addr);
1478 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1479 		asc->cnt++;
1480 	}
1481 	SCTP_WQ_ADDR_UNLOCK();
1482 
1483 	if (asc->cnt == 0) {
1484 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1485 	} else {
1486 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1487 					     sctp_asconf_iterator_stcb,
1488 					     NULL, /* No ep end for boundall */
1489 					     SCTP_PCB_FLAGS_BOUNDALL,
1490 					     SCTP_PCB_ANY_FEATURES,
1491 					     SCTP_ASOC_ANY_STATE,
1492 					     (void *)asc, 0,
1493 					     sctp_asconf_iterator_end, NULL, 0);
1494 	}
1495 }
1496 
1497 void
sctp_timeout_handler(void * t)1498 sctp_timeout_handler(void *t)
1499 {
1500 	struct sctp_inpcb *inp;
1501 	struct sctp_tcb *stcb;
1502 	struct sctp_nets *net;
1503 	struct sctp_timer *tmr;
1504 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1505 	struct socket *so;
1506 #endif
1507 	int did_output, type;
1508 
1509 	tmr = (struct sctp_timer *)t;
1510 	inp = (struct sctp_inpcb *)tmr->ep;
1511 	stcb = (struct sctp_tcb *)tmr->tcb;
1512 	net = (struct sctp_nets *)tmr->net;
1513 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1514 	CURVNET_SET((struct vnet *)tmr->vnet);
1515 #endif
1516 	did_output = 1;
1517 
1518 #ifdef SCTP_AUDITING_ENABLED
1519 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1520 	sctp_auditing(3, inp, stcb, net);
1521 #endif
1522 
1523 	/* sanity checks... */
1524 	if (tmr->self != (void *)tmr) {
1525 		/*
1526 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1527 		 *             (void *)tmr);
1528 		 */
1529 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1530 		CURVNET_RESTORE();
1531 #endif
1532 		return;
1533 	}
1534 	tmr->stopped_from = 0xa001;
1535 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1536 		/*
1537 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1538 		 * tmr->type);
1539 		 */
1540 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1541 		CURVNET_RESTORE();
1542 #endif
1543 		return;
1544 	}
1545 	tmr->stopped_from = 0xa002;
1546 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1547 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1548 		CURVNET_RESTORE();
1549 #endif
1550 		return;
1551 	}
1552 	/* if this is an iterator timeout, get the struct and clear inp */
1553 	tmr->stopped_from = 0xa003;
1554 	type = tmr->type;
1555 	if (inp) {
1556 		SCTP_INP_INCR_REF(inp);
1557 		if ((inp->sctp_socket == NULL) &&
1558 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1559 		     (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1560 		     (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1561 		     (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1562 		     (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1563 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1564 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1565 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1566 		     (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1567 			) {
1568 			SCTP_INP_DECR_REF(inp);
1569 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1570 			CURVNET_RESTORE();
1571 #endif
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1584 			CURVNET_RESTORE();
1585 #endif
1586 			return;
1587 		}
1588 	}
1589 	tmr->stopped_from = 0xa005;
1590 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1591 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1592 		if (inp) {
1593 			SCTP_INP_DECR_REF(inp);
1594 		}
1595 		if (stcb) {
1596 			atomic_add_int(&stcb->asoc.refcnt, -1);
1597 		}
1598 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1599 		CURVNET_RESTORE();
1600 #endif
1601 		return;
1602 	}
1603 	tmr->stopped_from = 0xa006;
1604 
1605 	if (stcb) {
1606 		SCTP_TCB_LOCK(stcb);
1607 		atomic_add_int(&stcb->asoc.refcnt, -1);
1608 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1609 		    ((stcb->asoc.state == 0) ||
1610 		     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1611 			SCTP_TCB_UNLOCK(stcb);
1612 			if (inp) {
1613 				SCTP_INP_DECR_REF(inp);
1614 			}
1615 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1616 			CURVNET_RESTORE();
1617 #endif
1618 			return;
1619 		}
1620 	}
1621 	/* record in stopped what t-o occured */
1622 	tmr->stopped_from = tmr->type;
1623 
1624 	/* mark as being serviced now */
1625 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1626 		/*
1627 		 * Callout has been rescheduled.
1628 		 */
1629 		goto get_out;
1630 	}
1631 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1632 		/*
1633 		 * Not active, so no action.
1634 		 */
1635 		goto get_out;
1636 	}
1637 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1638 
1639 	/* call the handler for the appropriate timer type */
1640 	switch (tmr->type) {
1641 	case SCTP_TIMER_TYPE_ZERO_COPY:
1642 		if (inp == NULL) {
1643 			break;
1644 		}
1645 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1646 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1647 		}
1648 		break;
1649 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1650 		if (inp == NULL) {
1651 			break;
1652 		}
1653 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1654 		    SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1655 		}
1656                 break;
1657 	case SCTP_TIMER_TYPE_ADDR_WQ:
1658 		sctp_handle_addr_wq();
1659 		break;
1660 	case SCTP_TIMER_TYPE_SEND:
1661 		if ((stcb == NULL) || (inp == NULL)) {
1662 			break;
1663 		}
1664 		SCTP_STAT_INCR(sctps_timodata);
1665 		stcb->asoc.timodata++;
1666 		stcb->asoc.num_send_timers_up--;
1667 		if (stcb->asoc.num_send_timers_up < 0) {
1668 			stcb->asoc.num_send_timers_up = 0;
1669 		}
1670 		SCTP_TCB_LOCK_ASSERT(stcb);
1671 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1672 			/* no need to unlock on tcb its gone */
1673 
1674 			goto out_decr;
1675 		}
1676 		SCTP_TCB_LOCK_ASSERT(stcb);
1677 #ifdef SCTP_AUDITING_ENABLED
1678 		sctp_auditing(4, inp, stcb, net);
1679 #endif
1680 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1681 		if ((stcb->asoc.num_send_timers_up == 0) &&
1682 		    (stcb->asoc.sent_queue_cnt > 0)) {
1683 			struct sctp_tmit_chunk *chk;
1684 
1685 			/*
1686 			 * safeguard. If there on some on the sent queue
1687 			 * somewhere but no timers running something is
1688 			 * wrong... so we start a timer on the first chunk
1689 			 * on the send queue on whatever net it is sent to.
1690 			 */
1691 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1692 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1693 			    chk->whoTo);
1694 		}
1695 		break;
1696 	case SCTP_TIMER_TYPE_INIT:
1697 		if ((stcb == NULL) || (inp == NULL)) {
1698 			break;
1699 		}
1700 		SCTP_STAT_INCR(sctps_timoinit);
1701 		stcb->asoc.timoinit++;
1702 		if (sctp_t1init_timer(inp, stcb, net)) {
1703 			/* no need to unlock on tcb its gone */
1704 			goto out_decr;
1705 		}
1706 		/* We do output but not here */
1707 		did_output = 0;
1708 		break;
1709 	case SCTP_TIMER_TYPE_RECV:
1710 		if ((stcb == NULL) || (inp == NULL)) {
1711 			break;
1712 		}
1713 		SCTP_STAT_INCR(sctps_timosack);
1714 		stcb->asoc.timosack++;
1715 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1716 #ifdef SCTP_AUDITING_ENABLED
1717 		sctp_auditing(4, inp, stcb, net);
1718 #endif
1719 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1720 		break;
1721 	case SCTP_TIMER_TYPE_SHUTDOWN:
1722 		if ((stcb == NULL) || (inp == NULL)) {
1723 			break;
1724 		}
1725 		if (sctp_shutdown_timer(inp, stcb, net)) {
1726 			/* no need to unlock on tcb its gone */
1727 			goto out_decr;
1728 		}
1729 		SCTP_STAT_INCR(sctps_timoshutdown);
1730 		stcb->asoc.timoshutdown++;
1731 #ifdef SCTP_AUDITING_ENABLED
1732 		sctp_auditing(4, inp, stcb, net);
1733 #endif
1734 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1735 		break;
1736 	case SCTP_TIMER_TYPE_HEARTBEAT:
1737 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1738 			break;
1739 		}
1740 		SCTP_STAT_INCR(sctps_timoheartbeat);
1741 		stcb->asoc.timoheartbeat++;
1742 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1743 			/* no need to unlock on tcb its gone */
1744 			goto out_decr;
1745 		}
1746 #ifdef SCTP_AUDITING_ENABLED
1747 		sctp_auditing(4, inp, stcb, net);
1748 #endif
1749 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1750 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1751 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1752 		}
1753 		break;
1754 	case SCTP_TIMER_TYPE_COOKIE:
1755 		if ((stcb == NULL) || (inp == NULL)) {
1756 			break;
1757 		}
1758 
1759 		if (sctp_cookie_timer(inp, stcb, net)) {
1760 			/* no need to unlock on tcb its gone */
1761 			goto out_decr;
1762 		}
1763 		SCTP_STAT_INCR(sctps_timocookie);
1764 		stcb->asoc.timocookie++;
1765 #ifdef SCTP_AUDITING_ENABLED
1766 		sctp_auditing(4, inp, stcb, net);
1767 #endif
1768 		/*
1769 		 * We consider T3 and Cookie timer pretty much the same with
1770 		 * respect to where from in chunk_output.
1771 		 */
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1773 		break;
1774 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1775 		{
1776 			struct timeval tv;
1777 			int i, secret;
1778 			if (inp == NULL) {
1779 				break;
1780 			}
1781 			SCTP_STAT_INCR(sctps_timosecret);
1782 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1783 			SCTP_INP_WLOCK(inp);
1784 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1785 			inp->sctp_ep.last_secret_number =
1786 			    inp->sctp_ep.current_secret_number;
1787 			inp->sctp_ep.current_secret_number++;
1788 			if (inp->sctp_ep.current_secret_number >=
1789 			    SCTP_HOW_MANY_SECRETS) {
1790 				inp->sctp_ep.current_secret_number = 0;
1791 			}
1792 			secret = (int)inp->sctp_ep.current_secret_number;
1793 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1794 				inp->sctp_ep.secret_key[secret][i] =
1795 				    sctp_select_initial_TSN(&inp->sctp_ep);
1796 			}
1797 			SCTP_INP_WUNLOCK(inp);
1798 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1799 		}
1800 		did_output = 0;
1801 		break;
1802 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		SCTP_STAT_INCR(sctps_timopathmtu);
1807 		sctp_pathmtu_timer(inp, stcb, net);
1808 		did_output = 0;
1809 		break;
1810 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1811 		if ((stcb == NULL) || (inp == NULL)) {
1812 			break;
1813 		}
1814 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1815 			/* no need to unlock on tcb its gone */
1816 			goto out_decr;
1817 		}
1818 		SCTP_STAT_INCR(sctps_timoshutdownack);
1819  		stcb->asoc.timoshutdownack++;
1820 #ifdef SCTP_AUDITING_ENABLED
1821 		sctp_auditing(4, inp, stcb, net);
1822 #endif
1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1824 		break;
1825 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1830 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1831 		/* no need to unlock on tcb its gone */
1832 		goto out_decr;
1833 
1834 	case SCTP_TIMER_TYPE_STRRESET:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		if (sctp_strreset_timer(inp, stcb, net)) {
1839 			/* no need to unlock on tcb its gone */
1840 			goto out_decr;
1841 		}
1842 		SCTP_STAT_INCR(sctps_timostrmrst);
1843 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1844 		break;
1845 	case SCTP_TIMER_TYPE_ASCONF:
1846 		if ((stcb == NULL) || (inp == NULL)) {
1847 			break;
1848 		}
1849 		if (sctp_asconf_timer(inp, stcb, net)) {
1850 			/* no need to unlock on tcb its gone */
1851 			goto out_decr;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoasconf);
1854 #ifdef SCTP_AUDITING_ENABLED
1855 		sctp_auditing(4, inp, stcb, net);
1856 #endif
1857 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1858 		break;
1859 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		sctp_delete_prim_timer(inp, stcb, net);
1864 		SCTP_STAT_INCR(sctps_timodelprim);
1865 		break;
1866 
1867 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1868 		if ((stcb == NULL) || (inp == NULL)) {
1869 			break;
1870 		}
1871 		SCTP_STAT_INCR(sctps_timoautoclose);
1872 		sctp_autoclose_timer(inp, stcb, net);
1873 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1874 		did_output = 0;
1875 		break;
1876 	case SCTP_TIMER_TYPE_ASOCKILL:
1877 		if ((stcb == NULL) || (inp == NULL)) {
1878 			break;
1879 		}
1880 		SCTP_STAT_INCR(sctps_timoassockill);
1881 		/* Can we free it yet? */
1882 		SCTP_INP_DECR_REF(inp);
1883 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_1);
1884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1885 		so = SCTP_INP_SO(inp);
1886 		atomic_add_int(&stcb->asoc.refcnt, 1);
1887 		SCTP_TCB_UNLOCK(stcb);
1888 		SCTP_SOCKET_LOCK(so, 1);
1889 		SCTP_TCB_LOCK(stcb);
1890 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1891 #endif
1892 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_2);
1893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1894 		SCTP_SOCKET_UNLOCK(so, 1);
1895 #endif
1896 		/*
1897 		 * free asoc, always unlocks (or destroy's) so prevent
1898 		 * duplicate unlock or unlock of a free mtx :-0
1899 		 */
1900 		stcb = NULL;
1901 		goto out_no_decr;
1902 	case SCTP_TIMER_TYPE_INPKILL:
1903 		SCTP_STAT_INCR(sctps_timoinpkill);
1904 		if (inp == NULL) {
1905 			break;
1906 		}
1907 		/*
1908 		 * special case, take away our increment since WE are the
1909 		 * killer
1910 		 */
1911 		SCTP_INP_DECR_REF(inp);
1912 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_3);
1913 #if defined(__APPLE__)
1914 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
1915 #endif
1916 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1917 				SCTP_CALLED_FROM_INPKILL_TIMER);
1918 #if defined(__APPLE__)
1919 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
1920 #endif
1921 		inp = NULL;
1922 		goto out_no_decr;
1923 	default:
1924 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1925 			tmr->type);
1926 		break;
1927 	}
1928 #ifdef SCTP_AUDITING_ENABLED
1929 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1930 	if (inp)
1931 		sctp_auditing(5, inp, stcb, net);
1932 #endif
1933 	if ((did_output) && stcb) {
1934 		/*
1935 		 * Now we need to clean up the control chunk chain if an
1936 		 * ECNE is on it. It must be marked as UNSENT again so next
1937 		 * call will continue to send it until such time that we get
1938 		 * a CWR, to remove it. It is, however, less likely that we
1939 		 * will find a ecn echo on the chain though.
1940 		 */
1941 		sctp_fix_ecn_echo(&stcb->asoc);
1942 	}
1943 get_out:
1944 	if (stcb) {
1945 		SCTP_TCB_UNLOCK(stcb);
1946 	}
1947 
1948 out_decr:
1949 	if (inp) {
1950 		SCTP_INP_DECR_REF(inp);
1951 	}
1952 
1953 out_no_decr:
1954 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1955 			  type);
1956 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1957 	CURVNET_RESTORE();
1958 #endif
1959 }
1960 
1961 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1962 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1963     struct sctp_nets *net)
1964 {
1965 	uint32_t to_ticks;
1966 	struct sctp_timer *tmr;
1967 
1968 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1969 		return;
1970 
1971 	tmr = NULL;
1972 	if (stcb) {
1973 		SCTP_TCB_LOCK_ASSERT(stcb);
1974 	}
1975 	switch (t_type) {
1976 	case SCTP_TIMER_TYPE_ZERO_COPY:
1977 		tmr = &inp->sctp_ep.zero_copy_timer;
1978 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1979 		break;
1980 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1981 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1982 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1983 		break;
1984 	case SCTP_TIMER_TYPE_ADDR_WQ:
1985 		/* Only 1 tick away :-) */
1986 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1987 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1988 		break;
1989 	case SCTP_TIMER_TYPE_SEND:
1990 		/* Here we use the RTO timer */
1991 		{
1992 			int rto_val;
1993 
1994 			if ((stcb == NULL) || (net == NULL)) {
1995 				return;
1996 			}
1997 			tmr = &net->rxt_timer;
1998 			if (net->RTO == 0) {
1999 				rto_val = stcb->asoc.initial_rto;
2000 			} else {
2001 				rto_val = net->RTO;
2002 			}
2003 			to_ticks = MSEC_TO_TICKS(rto_val);
2004 		}
2005 		break;
2006 	case SCTP_TIMER_TYPE_INIT:
2007 		/*
2008 		 * Here we use the INIT timer default usually about 1
2009 		 * minute.
2010 		 */
2011 		if ((stcb == NULL) || (net == NULL)) {
2012 			return;
2013 		}
2014 		tmr = &net->rxt_timer;
2015 		if (net->RTO == 0) {
2016 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2017 		} else {
2018 			to_ticks = MSEC_TO_TICKS(net->RTO);
2019 		}
2020 		break;
2021 	case SCTP_TIMER_TYPE_RECV:
2022 		/*
2023 		 * Here we use the Delayed-Ack timer value from the inp
2024 		 * ususually about 200ms.
2025 		 */
2026 		if (stcb == NULL) {
2027 			return;
2028 		}
2029 		tmr = &stcb->asoc.dack_timer;
2030 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2031 		break;
2032 	case SCTP_TIMER_TYPE_SHUTDOWN:
2033 		/* Here we use the RTO of the destination. */
2034 		if ((stcb == NULL) || (net == NULL)) {
2035 			return;
2036 		}
2037 		if (net->RTO == 0) {
2038 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2039 		} else {
2040 			to_ticks = MSEC_TO_TICKS(net->RTO);
2041 		}
2042 		tmr = &net->rxt_timer;
2043 		break;
2044 	case SCTP_TIMER_TYPE_HEARTBEAT:
2045 		/*
2046 		 * the net is used here so that we can add in the RTO. Even
2047 		 * though we use a different timer. We also add the HB timer
2048 		 * PLUS a random jitter.
2049 		 */
2050 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2051 			return;
2052 		} else {
2053 			uint32_t rndval;
2054 			uint32_t jitter;
2055 
2056 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2057 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2058 				return;
2059 			}
2060 			if (net->RTO == 0) {
2061 				to_ticks = stcb->asoc.initial_rto;
2062 			} else {
2063 				to_ticks = net->RTO;
2064 			}
2065 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2066 			jitter = rndval % to_ticks;
2067 			if (jitter >= (to_ticks >> 1)) {
2068 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2069 			} else {
2070 				to_ticks = to_ticks - jitter;
2071 			}
2072 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2073 			    !(net->dest_state & SCTP_ADDR_PF)) {
2074 				to_ticks += net->heart_beat_delay;
2075 			}
2076 			/*
2077 			 * Now we must convert the to_ticks that are now in
2078 			 * ms to ticks.
2079 			 */
2080 			to_ticks = MSEC_TO_TICKS(to_ticks);
2081 			tmr = &net->hb_timer;
2082 		}
2083 		break;
2084 	case SCTP_TIMER_TYPE_COOKIE:
2085 		/*
2086 		 * Here we can use the RTO timer from the network since one
2087 		 * RTT was compelete. If a retran happened then we will be
2088 		 * using the RTO initial value.
2089 		 */
2090 		if ((stcb == NULL) || (net == NULL)) {
2091 			return;
2092 		}
2093 		if (net->RTO == 0) {
2094 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2095 		} else {
2096 			to_ticks = MSEC_TO_TICKS(net->RTO);
2097 		}
2098 		tmr = &net->rxt_timer;
2099 		break;
2100 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2101 		/*
2102 		 * nothing needed but the endpoint here ususually about 60
2103 		 * minutes.
2104 		 */
2105 		if (inp == NULL) {
2106 			return;
2107 		}
2108 		tmr = &inp->sctp_ep.signature_change;
2109 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2110 		break;
2111 	case SCTP_TIMER_TYPE_ASOCKILL:
2112 		if (stcb == NULL) {
2113 			return;
2114 		}
2115 		tmr = &stcb->asoc.strreset_timer;
2116 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2117 		break;
2118 	case SCTP_TIMER_TYPE_INPKILL:
2119 		/*
2120 		 * The inp is setup to die. We re-use the signature_chage
2121 		 * timer since that has stopped and we are in the GONE
2122 		 * state.
2123 		 */
2124 		if (inp == NULL) {
2125 			return;
2126 		}
2127 		tmr = &inp->sctp_ep.signature_change;
2128 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2129 		break;
2130 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2131 		/*
2132 		 * Here we use the value found in the EP for PMTU ususually
2133 		 * about 10 minutes.
2134 		 */
2135 		if ((stcb == NULL) || (inp == NULL)) {
2136 			return;
2137 		}
2138 		if (net == NULL) {
2139 			return;
2140 		}
2141 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2142 			return;
2143 		}
2144 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2145 		tmr = &net->pmtu_timer;
2146 		break;
2147 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2148 		/* Here we use the RTO of the destination */
2149 		if ((stcb == NULL) || (net == NULL)) {
2150 			return;
2151 		}
2152 		if (net->RTO == 0) {
2153 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2154 		} else {
2155 			to_ticks = MSEC_TO_TICKS(net->RTO);
2156 		}
2157 		tmr = &net->rxt_timer;
2158 		break;
2159 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2160 		/*
2161 		 * Here we use the endpoints shutdown guard timer usually
2162 		 * about 3 minutes.
2163 		 */
2164 		if ((inp == NULL) || (stcb == NULL)) {
2165 			return;
2166 		}
2167 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2168 		tmr = &stcb->asoc.shut_guard_timer;
2169 		break;
2170 	case SCTP_TIMER_TYPE_STRRESET:
2171 		/*
2172 		 * Here the timer comes from the stcb but its value is from
2173 		 * the net's RTO.
2174 		 */
2175 		if ((stcb == NULL) || (net == NULL)) {
2176 			return;
2177 		}
2178 		if (net->RTO == 0) {
2179 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2180 		} else {
2181 			to_ticks = MSEC_TO_TICKS(net->RTO);
2182 		}
2183 		tmr = &stcb->asoc.strreset_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_ASCONF:
2186 		/*
2187 		 * Here the timer comes from the stcb but its value is from
2188 		 * the net's RTO.
2189 		 */
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		if (net->RTO == 0) {
2194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2195 		} else {
2196 			to_ticks = MSEC_TO_TICKS(net->RTO);
2197 		}
2198 		tmr = &stcb->asoc.asconf_timer;
2199 		break;
2200 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2201 		if ((stcb == NULL) || (net != NULL)) {
2202 			return;
2203 		}
2204 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2205 		tmr = &stcb->asoc.delete_prim_timer;
2206 		break;
2207 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2208 		if (stcb == NULL) {
2209 			return;
2210 		}
2211 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2212 			/*
2213 			 * Really an error since stcb is NOT set to
2214 			 * autoclose
2215 			 */
2216 			return;
2217 		}
2218 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2219 		tmr = &stcb->asoc.autoclose_timer;
2220 		break;
2221 	default:
2222 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2223 			__FUNCTION__, t_type);
2224 		return;
2225 		break;
2226 	}
2227 	if ((to_ticks <= 0) || (tmr == NULL)) {
2228 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2229 			__FUNCTION__, t_type, to_ticks, (void *)tmr);
2230 		return;
2231 	}
2232 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2233 		/*
2234 		 * we do NOT allow you to have it already running. if it is
2235 		 * we leave the current one up unchanged
2236 		 */
2237 		return;
2238 	}
2239 	/* At this point we can proceed */
2240 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2241 		stcb->asoc.num_send_timers_up++;
2242 	}
2243 	tmr->stopped_from = 0;
2244 	tmr->type = t_type;
2245 	tmr->ep = (void *)inp;
2246 	tmr->tcb = (void *)stcb;
2247 	tmr->net = (void *)net;
2248 	tmr->self = (void *)tmr;
2249 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
2250 	tmr->vnet = (void *)curvnet;
2251 #endif
2252 #ifndef __Panda__
2253 	tmr->ticks = sctp_get_tick_count();
2254 #endif
2255 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2256 	return;
2257 }
2258 
2259 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2260 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2261     struct sctp_nets *net, uint32_t from)
2262 {
2263 	struct sctp_timer *tmr;
2264 
2265 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2266 	    (inp == NULL))
2267 		return;
2268 
2269 	tmr = NULL;
2270 	if (stcb) {
2271 		SCTP_TCB_LOCK_ASSERT(stcb);
2272 	}
2273 	switch (t_type) {
2274 	case SCTP_TIMER_TYPE_ZERO_COPY:
2275 		tmr = &inp->sctp_ep.zero_copy_timer;
2276 		break;
2277 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2278 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2279 		break;
2280 	case SCTP_TIMER_TYPE_ADDR_WQ:
2281 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2282 		break;
2283 	case SCTP_TIMER_TYPE_SEND:
2284 		if ((stcb == NULL) || (net == NULL)) {
2285 			return;
2286 		}
2287 		tmr = &net->rxt_timer;
2288 		break;
2289 	case SCTP_TIMER_TYPE_INIT:
2290 		if ((stcb == NULL) || (net == NULL)) {
2291 			return;
2292 		}
2293 		tmr = &net->rxt_timer;
2294 		break;
2295 	case SCTP_TIMER_TYPE_RECV:
2296 		if (stcb == NULL) {
2297 			return;
2298 		}
2299 		tmr = &stcb->asoc.dack_timer;
2300 		break;
2301 	case SCTP_TIMER_TYPE_SHUTDOWN:
2302 		if ((stcb == NULL) || (net == NULL)) {
2303 			return;
2304 		}
2305 		tmr = &net->rxt_timer;
2306 		break;
2307 	case SCTP_TIMER_TYPE_HEARTBEAT:
2308 		if ((stcb == NULL) || (net == NULL)) {
2309 			return;
2310 		}
2311 		tmr = &net->hb_timer;
2312 		break;
2313 	case SCTP_TIMER_TYPE_COOKIE:
2314 		if ((stcb == NULL) || (net == NULL)) {
2315 			return;
2316 		}
2317 		tmr = &net->rxt_timer;
2318 		break;
2319 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2320 		/* nothing needed but the endpoint here */
2321 		tmr = &inp->sctp_ep.signature_change;
2322 		/*
2323 		 * We re-use the newcookie timer for the INP kill timer. We
2324 		 * must assure that we do not kill it by accident.
2325 		 */
2326 		break;
2327 	case SCTP_TIMER_TYPE_ASOCKILL:
2328 		/*
2329 		 * Stop the asoc kill timer.
2330 		 */
2331 		if (stcb == NULL) {
2332 			return;
2333 		}
2334 		tmr = &stcb->asoc.strreset_timer;
2335 		break;
2336 
2337 	case SCTP_TIMER_TYPE_INPKILL:
2338 		/*
2339 		 * The inp is setup to die. We re-use the signature_chage
2340 		 * timer since that has stopped and we are in the GONE
2341 		 * state.
2342 		 */
2343 		tmr = &inp->sctp_ep.signature_change;
2344 		break;
2345 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2346 		if ((stcb == NULL) || (net == NULL)) {
2347 			return;
2348 		}
2349 		tmr = &net->pmtu_timer;
2350 		break;
2351 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2352 		if ((stcb == NULL) || (net == NULL)) {
2353 			return;
2354 		}
2355 		tmr = &net->rxt_timer;
2356 		break;
2357 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.shut_guard_timer;
2362 		break;
2363 	case SCTP_TIMER_TYPE_STRRESET:
2364 		if (stcb == NULL) {
2365 			return;
2366 		}
2367 		tmr = &stcb->asoc.strreset_timer;
2368 		break;
2369 	case SCTP_TIMER_TYPE_ASCONF:
2370 		if (stcb == NULL) {
2371 			return;
2372 		}
2373 		tmr = &stcb->asoc.asconf_timer;
2374 		break;
2375 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2376 		if (stcb == NULL) {
2377 			return;
2378 		}
2379 		tmr = &stcb->asoc.delete_prim_timer;
2380 		break;
2381 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2382 		if (stcb == NULL) {
2383 			return;
2384 		}
2385 		tmr = &stcb->asoc.autoclose_timer;
2386 		break;
2387 	default:
2388 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2389 			__FUNCTION__, t_type);
2390 		break;
2391 	}
2392 	if (tmr == NULL) {
2393 		return;
2394 	}
2395 	if ((tmr->type != t_type) && tmr->type) {
2396 		/*
2397 		 * Ok we have a timer that is under joint use. Cookie timer
2398 		 * per chance with the SEND timer. We therefore are NOT
2399 		 * running the timer that the caller wants stopped.  So just
2400 		 * return.
2401 		 */
2402 		return;
2403 	}
2404 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2405 		stcb->asoc.num_send_timers_up--;
2406 		if (stcb->asoc.num_send_timers_up < 0) {
2407 			stcb->asoc.num_send_timers_up = 0;
2408 		}
2409 	}
2410 	tmr->self = NULL;
2411 	tmr->stopped_from = from;
2412 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2413 	return;
2414 }
2415 
2416 uint32_t
sctp_calculate_len(struct mbuf * m)2417 sctp_calculate_len(struct mbuf *m)
2418 {
2419 	uint32_t tlen = 0;
2420 	struct mbuf *at;
2421 
2422 	at = m;
2423 	while (at) {
2424 		tlen += SCTP_BUF_LEN(at);
2425 		at = SCTP_BUF_NEXT(at);
2426 	}
2427 	return (tlen);
2428 }
2429 
2430 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)2431 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2432     struct sctp_association *asoc, uint32_t mtu)
2433 {
2434 	/*
2435 	 * Reset the P-MTU size on this association, this involves changing
2436 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2437 	 * allow the DF flag to be cleared.
2438 	 */
2439 	struct sctp_tmit_chunk *chk;
2440 	unsigned int eff_mtu, ovh;
2441 
2442 	asoc->smallest_mtu = mtu;
2443 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2444 		ovh = SCTP_MIN_OVERHEAD;
2445 	} else {
2446 		ovh = SCTP_MIN_V4_OVERHEAD;
2447 	}
2448 	eff_mtu = mtu - ovh;
2449 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2450 		if (chk->send_size > eff_mtu) {
2451 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2452 		}
2453 	}
2454 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2455 		if (chk->send_size > eff_mtu) {
2456 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2457 		}
2458 	}
2459 }
2460 
2461 
2462 /*
2463  * given an association and starting time of the current RTT period return
2464  * RTO in number of msecs net should point to the current network
2465  */
2466 
2467 uint32_t
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * told,int safe,int rtt_from_sack)2468 sctp_calculate_rto(struct sctp_tcb *stcb,
2469 		   struct sctp_association *asoc,
2470 		   struct sctp_nets *net,
2471 		   struct timeval *told,
2472 		   int safe, int rtt_from_sack)
2473 {
2474 	/*-
2475 	 * given an association and the starting time of the current RTT
2476 	 * period (in value1/value2) return RTO in number of msecs.
2477 	 */
2478 	int32_t rtt; /* RTT in ms */
2479 	uint32_t new_rto;
2480 	int first_measure = 0;
2481 	struct timeval now, then, *old;
2482 
2483 	/* Copy it out for sparc64 */
2484 	if (safe == sctp_align_unsafe_makecopy) {
2485 		old = &then;
2486 		memcpy(&then, told, sizeof(struct timeval));
2487 	} else if (safe == sctp_align_safe_nocopy) {
2488 		old = told;
2489 	} else {
2490 		/* error */
2491 		SCTP_PRINTF("Huh, bad rto calc call\n");
2492 		return (0);
2493 	}
2494 	/************************/
2495 	/* 1. calculate new RTT */
2496 	/************************/
2497 	/* get the current time */
2498 	if (stcb->asoc.use_precise_time) {
2499 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2500 	} else {
2501 		(void)SCTP_GETTIME_TIMEVAL(&now);
2502 	}
2503 	timevalsub(&now, old);
2504 	/* store the current RTT in us */
2505 	net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
2506 	           (uint64_t)now.tv_usec;
2507 	/* computer rtt in ms */
2508 	rtt = net->rtt / 1000;
2509 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2510 		/* Tell the CC module that a new update has just occurred from a sack */
2511 		(*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
2512 	}
2513 	/* Do we need to determine the lan? We do this only
2514 	 * on sacks i.e. RTT being determined from data not
2515 	 * non-data (HB/INIT->INITACK).
2516 	 */
2517 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2518 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2519 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2520 			net->lan_type = SCTP_LAN_INTERNET;
2521 		} else {
2522 			net->lan_type = SCTP_LAN_LOCAL;
2523 		}
2524 	}
2525 
2526 	/***************************/
2527 	/* 2. update RTTVAR & SRTT */
2528 	/***************************/
2529 	/*-
2530 	 * Compute the scaled average lastsa and the
2531 	 * scaled variance lastsv as described in van Jacobson
2532 	 * Paper "Congestion Avoidance and Control", Annex A.
2533 	 *
2534 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2535 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2536 	 */
2537 	if (net->RTO_measured) {
2538 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2539 		net->lastsa += rtt;
2540 		if (rtt < 0) {
2541 			rtt = -rtt;
2542 		}
2543 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2544 		net->lastsv += rtt;
2545 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2546 			rto_logging(net, SCTP_LOG_RTTVAR);
2547 		}
2548 	} else {
2549 		/* First RTO measurment */
2550 		net->RTO_measured = 1;
2551 		first_measure = 1;
2552 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2553 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2555 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2556 		}
2557 	}
2558 	if (net->lastsv == 0) {
2559 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2560 	}
2561 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2562 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2563 	    (stcb->asoc.sat_network_lockout == 0)) {
2564 		stcb->asoc.sat_network = 1;
2565 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2566 		stcb->asoc.sat_network = 0;
2567 		stcb->asoc.sat_network_lockout = 1;
2568 	}
2569  	/* bound it, per C6/C7 in Section 5.3.1 */
2570  	if (new_rto < stcb->asoc.minrto) {
2571 		new_rto = stcb->asoc.minrto;
2572 	}
2573 	if (new_rto > stcb->asoc.maxrto) {
2574 		new_rto = stcb->asoc.maxrto;
2575 	}
2576 	/* we are now returning the RTO */
2577  	return (new_rto);
2578 }
2579 
2580 /*
2581  * return a pointer to a contiguous piece of data from the given mbuf chain
2582  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2583  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2584  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2585  */
2586 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)2587 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2588 {
2589 	uint32_t count;
2590 	uint8_t *ptr;
2591 
2592 	ptr = in_ptr;
2593 	if ((off < 0) || (len <= 0))
2594 		return (NULL);
2595 
2596 	/* find the desired start location */
2597 	while ((m != NULL) && (off > 0)) {
2598 		if (off < SCTP_BUF_LEN(m))
2599 			break;
2600 		off -= SCTP_BUF_LEN(m);
2601 		m = SCTP_BUF_NEXT(m);
2602 	}
2603 	if (m == NULL)
2604 		return (NULL);
2605 
2606 	/* is the current mbuf large enough (eg. contiguous)? */
2607 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2608 		return (mtod(m, caddr_t) + off);
2609 	} else {
2610 		/* else, it spans more than one mbuf, so save a temp copy... */
2611 		while ((m != NULL) && (len > 0)) {
2612 			count = min(SCTP_BUF_LEN(m) - off, len);
2613 			bcopy(mtod(m, caddr_t) + off, ptr, count);
2614 			len -= count;
2615 			ptr += count;
2616 			off = 0;
2617 			m = SCTP_BUF_NEXT(m);
2618 		}
2619 		if ((m == NULL) && (len > 0))
2620 			return (NULL);
2621 		else
2622 			return ((caddr_t)in_ptr);
2623 	}
2624 }
2625 
2626 
2627 
2628 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)2629 sctp_get_next_param(struct mbuf *m,
2630     int offset,
2631     struct sctp_paramhdr *pull,
2632     int pull_limit)
2633 {
2634 	/* This just provides a typed signature to Peter's Pull routine */
2635 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2636 	    (uint8_t *) pull));
2637 }
2638 
2639 
2640 int
sctp_add_pad_tombuf(struct mbuf * m,int padlen)2641 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2642 {
2643 	/*
2644 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2645 	 * padlen is > 3 this routine will fail.
2646 	 */
2647 	uint8_t *dp;
2648 	int i;
2649 
2650 	if (padlen > 3) {
2651 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2652 		return (ENOBUFS);
2653 	}
2654 	if (padlen <= M_TRAILINGSPACE(m)) {
2655 		/*
2656 		 * The easy way. We hope the majority of the time we hit
2657 		 * here :)
2658 		 */
2659 		dp = (uint8_t *) (mtod(m, caddr_t) + SCTP_BUF_LEN(m));
2660 		SCTP_BUF_LEN(m) += padlen;
2661 	} else {
2662 		/* Hard way we must grow the mbuf */
2663 		struct mbuf *tmp;
2664 
2665 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2666 		if (tmp == NULL) {
2667 			/* Out of space GAK! we are in big trouble. */
2668 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2669 			return (ENOBUFS);
2670 		}
2671 		/* setup and insert in middle */
2672 		SCTP_BUF_LEN(tmp) = padlen;
2673 		SCTP_BUF_NEXT(tmp) = NULL;
2674 		SCTP_BUF_NEXT(m) = tmp;
2675 		dp = mtod(tmp, uint8_t *);
2676 	}
2677 	/* zero out the pad */
2678 	for (i = 0; i < padlen; i++) {
2679 		*dp = 0;
2680 		dp++;
2681 	}
2682 	return (0);
2683 }
2684 
2685 int
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)2686 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2687 {
2688 	/* find the last mbuf in chain and pad it */
2689 	struct mbuf *m_at;
2690 
2691 	if (last_mbuf) {
2692 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2693 	} else {
2694 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2695 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2696 				return (sctp_add_pad_tombuf(m_at, padval));
2697 			}
2698 		}
2699 	}
2700 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2701 	return (EFAULT);
2702 }
2703 
2704 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked SCTP_UNUSED)2705 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2706     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2707 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2708     SCTP_UNUSED
2709 #endif
2710     )
2711 {
2712 	struct mbuf *m_notify;
2713 	struct sctp_assoc_change *sac;
2714 	struct sctp_queued_to_read *control;
2715 	size_t notif_len, abort_len;
2716 	unsigned int i;
2717 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2718 	struct socket *so;
2719 #endif
2720 
2721 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2722 		notif_len = sizeof(struct sctp_assoc_change);
2723 		if (abort != NULL) {
2724 			abort_len = ntohs(abort->ch.chunk_length);
2725 		} else {
2726 			abort_len = 0;
2727 		}
2728 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2729 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2730 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2731 			notif_len += abort_len;
2732 		}
2733 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2734 		if (m_notify == NULL) {
2735 			/* Retry with smaller value. */
2736 			notif_len = sizeof(struct sctp_assoc_change);
2737 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2738 			if (m_notify == NULL) {
2739 				goto set_error;
2740 			}
2741 		}
2742 		SCTP_BUF_NEXT(m_notify) = NULL;
2743 		sac = mtod(m_notify, struct sctp_assoc_change *);
2744 		sac->sac_type = SCTP_ASSOC_CHANGE;
2745 		sac->sac_flags = 0;
2746 		sac->sac_length = sizeof(struct sctp_assoc_change);
2747 		sac->sac_state = state;
2748 		sac->sac_error = error;
2749 		/* XXX verify these stream counts */
2750 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2751 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2752 		sac->sac_assoc_id = sctp_get_associd(stcb);
2753 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2754 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2755 				i = 0;
2756 				if (stcb->asoc.peer_supports_prsctp) {
2757 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2758 				}
2759 				if (stcb->asoc.peer_supports_auth) {
2760 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2761 				}
2762 				if (stcb->asoc.peer_supports_asconf) {
2763 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2764 				}
2765 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2766 				if (stcb->asoc.peer_supports_strreset) {
2767 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2768 				}
2769 				sac->sac_length += i;
2770 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2771 				memcpy(sac->sac_info, abort, abort_len);
2772 				sac->sac_length += abort_len;
2773 			}
2774 		}
2775 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2776 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2777 		                                 0, 0, stcb->asoc.context, 0, 0, 0,
2778 		                                 m_notify);
2779 		if (control != NULL) {
2780 			control->length = SCTP_BUF_LEN(m_notify);
2781 			/* not that we need this */
2782 			control->tail_mbuf = m_notify;
2783 			control->spec_flags = M_NOTIFICATION;
2784 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2785 			                  control,
2786 			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2787 			                  so_locked);
2788 		} else {
2789 			sctp_m_freem(m_notify);
2790 		}
2791 	}
2792 	/*
2793 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2794 	 * comes in.
2795 	 */
2796 set_error:
2797 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2798 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2799 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2800 		SOCK_LOCK(stcb->sctp_socket);
2801 		if (from_peer) {
2802 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2803 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2804 				stcb->sctp_socket->so_error = ECONNREFUSED;
2805 			} else {
2806 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2807 				stcb->sctp_socket->so_error = ECONNRESET;
2808 			}
2809 		} else {
2810 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2811 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2812 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2813 				stcb->sctp_socket->so_error = ETIMEDOUT;
2814 			} else {
2815 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2816 				stcb->sctp_socket->so_error = ECONNABORTED;
2817 			}
2818 		}
2819 	}
2820 	/* Wake ANY sleepers */
2821 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2822 	so = SCTP_INP_SO(stcb->sctp_ep);
2823 	if (!so_locked) {
2824 		atomic_add_int(&stcb->asoc.refcnt, 1);
2825 		SCTP_TCB_UNLOCK(stcb);
2826 		SCTP_SOCKET_LOCK(so, 1);
2827 		SCTP_TCB_LOCK(stcb);
2828 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2829 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2830 			SCTP_SOCKET_UNLOCK(so, 1);
2831 			return;
2832 		}
2833 	}
2834 #endif
2835 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2836 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2837 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2838 #if defined(__APPLE__)
2839 		socantrcvmore(stcb->sctp_socket);
2840 #else
2841 		socantrcvmore_locked(stcb->sctp_socket);
2842 #endif
2843 	}
2844 	sorwakeup(stcb->sctp_socket);
2845 	sowwakeup(stcb->sctp_socket);
2846 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2847 	if (!so_locked) {
2848 		SCTP_SOCKET_UNLOCK(so, 1);
2849 	}
2850 #endif
2851 }
2852 
2853 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error)2854 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2855     struct sockaddr *sa, uint32_t error)
2856 {
2857 	struct mbuf *m_notify;
2858 	struct sctp_paddr_change *spc;
2859 	struct sctp_queued_to_read *control;
2860 
2861 	if ((stcb == NULL) ||
2862 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2863 		/* event not enabled */
2864 		return;
2865 	}
2866 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2867 	if (m_notify == NULL)
2868 		return;
2869 	SCTP_BUF_LEN(m_notify) = 0;
2870 	spc = mtod(m_notify, struct sctp_paddr_change *);
2871 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2872 	spc->spc_flags = 0;
2873 	spc->spc_length = sizeof(struct sctp_paddr_change);
2874 	switch (sa->sa_family) {
2875 #ifdef INET
2876 	case AF_INET:
2877 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2878 		break;
2879 #endif
2880 #ifdef INET6
2881 	case AF_INET6:
2882 	{
2883 #ifdef SCTP_EMBEDDED_V6_SCOPE
2884 		struct sockaddr_in6 *sin6;
2885 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2886 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2887 
2888 #ifdef SCTP_EMBEDDED_V6_SCOPE
2889 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2890 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2891 			if (sin6->sin6_scope_id == 0) {
2892 				/* recover scope_id for user */
2893 #ifdef SCTP_KAME
2894 		 		(void)sa6_recoverscope(sin6);
2895 #else
2896 				(void)in6_recoverscope(sin6, &sin6->sin6_addr,
2897 						       NULL);
2898 #endif
2899 			} else {
2900 				/* clear embedded scope_id for user */
2901 				in6_clearscope(&sin6->sin6_addr);
2902 			}
2903 		}
2904 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2905 		break;
2906 	}
2907 #endif
2908 #if defined(__Userspace__)
2909 	case AF_CONN:
2910 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
2911 		break;
2912 #endif
2913 	default:
2914 		/* TSNH */
2915 		break;
2916 	}
2917 	spc->spc_state = state;
2918 	spc->spc_error = error;
2919 	spc->spc_assoc_id = sctp_get_associd(stcb);
2920 
2921 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2922 	SCTP_BUF_NEXT(m_notify) = NULL;
2923 
2924 	/* append to socket */
2925 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2926 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
2927 	                                 m_notify);
2928 	if (control == NULL) {
2929 		/* no memory */
2930 		sctp_m_freem(m_notify);
2931 		return;
2932 	}
2933 	control->length = SCTP_BUF_LEN(m_notify);
2934 	control->spec_flags = M_NOTIFICATION;
2935 	/* not that we need this */
2936 	control->tail_mbuf = m_notify;
2937 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2938 	                  control,
2939 	                  &stcb->sctp_socket->so_rcv, 1,
2940 	                  SCTP_READ_LOCK_NOT_HELD,
2941 	                  SCTP_SO_NOT_LOCKED);
2942 }
2943 
2944 
2945 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked SCTP_UNUSED)2946 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2947     struct sctp_tmit_chunk *chk, int so_locked
2948 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2949     SCTP_UNUSED
2950 #endif
2951     )
2952 {
2953 	struct mbuf *m_notify;
2954 	struct sctp_send_failed *ssf;
2955 	struct sctp_send_failed_event *ssfe;
2956 	struct sctp_queued_to_read *control;
2957 	int length;
2958 
2959 	if ((stcb == NULL) ||
2960 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2961 	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2962 		/* event not enabled */
2963 		return;
2964 	}
2965 
2966 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2967 		length = sizeof(struct sctp_send_failed_event);
2968 	} else {
2969 		length = sizeof(struct sctp_send_failed);
2970 	}
2971 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2972 	if (m_notify == NULL)
2973 		/* no space left */
2974 		return;
2975 	length += chk->send_size;
2976 	length -= sizeof(struct sctp_data_chunk);
2977 	SCTP_BUF_LEN(m_notify) = 0;
2978 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2979 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2980 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2981 		if (sent) {
2982 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2983 		} else {
2984 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2985 		}
2986 		ssfe->ssfe_length = length;
2987 		ssfe->ssfe_error = error;
2988 		/* not exactly what the user sent in, but should be close :) */
2989 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2990 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2991 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2992 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2993 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2994 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2995 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2996 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2997 	} else {
2998 		ssf = mtod(m_notify, struct sctp_send_failed *);
2999 		ssf->ssf_type = SCTP_SEND_FAILED;
3000 		if (sent) {
3001 			ssf->ssf_flags = SCTP_DATA_SENT;
3002 		} else {
3003 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3004 		}
3005 		ssf->ssf_length = length;
3006 		ssf->ssf_error = error;
3007 		/* not exactly what the user sent in, but should be close :) */
3008 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3009 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3010 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3011 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3012 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3013 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3014 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3015 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3016 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3017 	}
3018 	if (chk->data) {
3019 		/*
3020 		 * trim off the sctp chunk header(it should
3021 		 * be there)
3022 		 */
3023 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3024 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3025 			sctp_mbuf_crush(chk->data);
3026 			chk->send_size -= sizeof(struct sctp_data_chunk);
3027 		}
3028 	}
3029 	SCTP_BUF_NEXT(m_notify) = chk->data;
3030 	/* Steal off the mbuf */
3031 	chk->data = NULL;
3032 	/*
3033 	 * For this case, we check the actual socket buffer, since the assoc
3034 	 * is going away we don't want to overfill the socket buffer for a
3035 	 * non-reader
3036 	 */
3037 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3038 		sctp_m_freem(m_notify);
3039 		return;
3040 	}
3041 	/* append to socket */
3042 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3043 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3044 	                                 m_notify);
3045 	if (control == NULL) {
3046 		/* no memory */
3047 		sctp_m_freem(m_notify);
3048 		return;
3049 	}
3050 	control->spec_flags = M_NOTIFICATION;
3051 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3052 	                  control,
3053 	                  &stcb->sctp_socket->so_rcv, 1,
3054 	                  SCTP_READ_LOCK_NOT_HELD,
3055 	                  so_locked);
3056 }
3057 
3058 
3059 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked SCTP_UNUSED)3060 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3061 			 struct sctp_stream_queue_pending *sp, int so_locked
3062 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3063                          SCTP_UNUSED
3064 #endif
3065                          )
3066 {
3067 	struct mbuf *m_notify;
3068 	struct sctp_send_failed *ssf;
3069 	struct sctp_send_failed_event *ssfe;
3070 	struct sctp_queued_to_read *control;
3071 	int length;
3072 
3073 	if ((stcb == NULL) ||
3074 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3075 	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3076 		/* event not enabled */
3077 		return;
3078 	}
3079 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3080 		length = sizeof(struct sctp_send_failed_event);
3081 	} else {
3082 		length = sizeof(struct sctp_send_failed);
3083 	}
3084 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3085 	if (m_notify == NULL) {
3086 		/* no space left */
3087 		return;
3088 	}
3089 	length += sp->length;
3090 	SCTP_BUF_LEN(m_notify) = 0;
3091 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3092 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3093 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3094 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3095 		ssfe->ssfe_length = length;
3096 		ssfe->ssfe_error = error;
3097 		/* not exactly what the user sent in, but should be close :) */
3098 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
3099 		ssfe->ssfe_info.snd_sid = sp->stream;
3100 		if (sp->some_taken) {
3101 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3102 		} else {
3103 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3104 		}
3105 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3106 		ssfe->ssfe_info.snd_context = sp->context;
3107 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3108 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3109 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3110 	} else {
3111 		ssf = mtod(m_notify, struct sctp_send_failed *);
3112 		ssf->ssf_type = SCTP_SEND_FAILED;
3113 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3114 		ssf->ssf_length = length;
3115 		ssf->ssf_error = error;
3116 		/* not exactly what the user sent in, but should be close :) */
3117 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3118 		ssf->ssf_info.sinfo_stream = sp->stream;
3119 		ssf->ssf_info.sinfo_ssn = 0;
3120 		if (sp->some_taken) {
3121 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3122 		} else {
3123 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3124 		}
3125 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3126 		ssf->ssf_info.sinfo_context = sp->context;
3127 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3128 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3129 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3130 	}
3131 	SCTP_BUF_NEXT(m_notify) = sp->data;
3132 
3133 	/* Steal off the mbuf */
3134 	sp->data = NULL;
3135 	/*
3136 	 * For this case, we check the actual socket buffer, since the assoc
3137 	 * is going away we don't want to overfill the socket buffer for a
3138 	 * non-reader
3139 	 */
3140 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3141 		sctp_m_freem(m_notify);
3142 		return;
3143 	}
3144 	/* append to socket */
3145 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3146 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3147 	                                 m_notify);
3148 	if (control == NULL) {
3149 		/* no memory */
3150 		sctp_m_freem(m_notify);
3151 		return;
3152 	}
3153 	control->spec_flags = M_NOTIFICATION;
3154 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3155 	    control,
3156 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3157 }
3158 
3159 
3160 
3161 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3162 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3163 {
3164 	struct mbuf *m_notify;
3165 	struct sctp_adaptation_event *sai;
3166 	struct sctp_queued_to_read *control;
3167 
3168 	if ((stcb == NULL) ||
3169 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3170 		/* event not enabled */
3171 		return;
3172 	}
3173 
3174 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3175 	if (m_notify == NULL)
3176 		/* no space left */
3177 		return;
3178 	SCTP_BUF_LEN(m_notify) = 0;
3179 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3180 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3181 	sai->sai_flags = 0;
3182 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3183 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3184 	sai->sai_assoc_id = sctp_get_associd(stcb);
3185 
3186 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3187 	SCTP_BUF_NEXT(m_notify) = NULL;
3188 
3189 	/* append to socket */
3190 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3191 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3192 	                                 m_notify);
3193 	if (control == NULL) {
3194 		/* no memory */
3195 		sctp_m_freem(m_notify);
3196 		return;
3197 	}
3198 	control->length = SCTP_BUF_LEN(m_notify);
3199 	control->spec_flags = M_NOTIFICATION;
3200 	/* not that we need this */
3201 	control->tail_mbuf = m_notify;
3202 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3203 	    control,
3204 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3205 }
3206 
3207 /* This always must be called with the read-queue LOCKED in the INP */
3208 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked SCTP_UNUSED)3209 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3210 					uint32_t val, int so_locked
3211 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3212                              SCTP_UNUSED
3213 #endif
3214                                         )
3215 {
3216 	struct mbuf *m_notify;
3217 	struct sctp_pdapi_event *pdapi;
3218 	struct sctp_queued_to_read *control;
3219 	struct sockbuf *sb;
3220 
3221 	if ((stcb == NULL) ||
3222 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3223 		/* event not enabled */
3224 		return;
3225 	}
3226 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3227 		return;
3228 	}
3229 
3230 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3231 	if (m_notify == NULL)
3232 		/* no space left */
3233 		return;
3234 	SCTP_BUF_LEN(m_notify) = 0;
3235 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3236 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3237 	pdapi->pdapi_flags = 0;
3238 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3239 	pdapi->pdapi_indication = error;
3240 	pdapi->pdapi_stream = (val >> 16);
3241 	pdapi->pdapi_seq = (val & 0x0000ffff);
3242 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3243 
3244 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3245 	SCTP_BUF_NEXT(m_notify) = NULL;
3246 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3247 					 0, 0, stcb->asoc.context, 0, 0, 0,
3248 					 m_notify);
3249 	if (control == NULL) {
3250 		/* no memory */
3251 		sctp_m_freem(m_notify);
3252 		return;
3253 	}
3254 	control->spec_flags = M_NOTIFICATION;
3255 	control->length = SCTP_BUF_LEN(m_notify);
3256 	/* not that we need this */
3257 	control->tail_mbuf = m_notify;
3258 	control->held_length = 0;
3259 	control->length = 0;
3260 	sb = &stcb->sctp_socket->so_rcv;
3261 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3262 		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3263 	}
3264 	sctp_sballoc(stcb, sb, m_notify);
3265 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3266 		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3267 	}
3268 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3269 	control->end_added = 1;
3270 	if (stcb->asoc.control_pdapi)
3271 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi,  control, next);
3272 	else {
3273 		/* we really should not see this case */
3274 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3275 	}
3276 	if (stcb->sctp_ep && stcb->sctp_socket) {
3277 		/* This should always be the case */
3278 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3279 		struct socket *so;
3280 
3281 		so = SCTP_INP_SO(stcb->sctp_ep);
3282 		if (!so_locked) {
3283 			atomic_add_int(&stcb->asoc.refcnt, 1);
3284 			SCTP_TCB_UNLOCK(stcb);
3285 			SCTP_SOCKET_LOCK(so, 1);
3286 			SCTP_TCB_LOCK(stcb);
3287 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3288 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3289 				SCTP_SOCKET_UNLOCK(so, 1);
3290 				return;
3291 			}
3292 		}
3293 #endif
3294 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3295 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3296 		if (!so_locked) {
3297 			SCTP_SOCKET_UNLOCK(so, 1);
3298 		}
3299 #endif
3300 	}
3301 }
3302 
3303 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3304 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3305 {
3306 	struct mbuf *m_notify;
3307 	struct sctp_shutdown_event *sse;
3308 	struct sctp_queued_to_read *control;
3309 
3310 	/*
3311 	 * For TCP model AND UDP connected sockets we will send an error up
3312 	 * when an SHUTDOWN completes
3313 	 */
3314 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3315 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3316 		/* mark socket closed for read/write and wakeup! */
3317 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3318 		struct socket *so;
3319 
3320 		so = SCTP_INP_SO(stcb->sctp_ep);
3321 		atomic_add_int(&stcb->asoc.refcnt, 1);
3322 		SCTP_TCB_UNLOCK(stcb);
3323 		SCTP_SOCKET_LOCK(so, 1);
3324 		SCTP_TCB_LOCK(stcb);
3325 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3326 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3327 			SCTP_SOCKET_UNLOCK(so, 1);
3328 			return;
3329 		}
3330 #endif
3331 		socantsendmore(stcb->sctp_socket);
3332 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3333 		SCTP_SOCKET_UNLOCK(so, 1);
3334 #endif
3335 	}
3336 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3337 		/* event not enabled */
3338 		return;
3339 	}
3340 
3341 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3342 	if (m_notify == NULL)
3343 		/* no space left */
3344 		return;
3345 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3346 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3347 	sse->sse_flags = 0;
3348 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3349 	sse->sse_assoc_id = sctp_get_associd(stcb);
3350 
3351 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3352 	SCTP_BUF_NEXT(m_notify) = NULL;
3353 
3354 	/* append to socket */
3355 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3356 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3357 	                                 m_notify);
3358 	if (control == NULL) {
3359 		/* no memory */
3360 		sctp_m_freem(m_notify);
3361 		return;
3362 	}
3363 	control->spec_flags = M_NOTIFICATION;
3364 	control->length = SCTP_BUF_LEN(m_notify);
3365 	/* not that we need this */
3366 	control->tail_mbuf = m_notify;
3367 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3368 	    control,
3369 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3370 }
3371 
3372 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)3373 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3374                              int so_locked
3375 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3376                              SCTP_UNUSED
3377 #endif
3378                              )
3379 {
3380 	struct mbuf *m_notify;
3381 	struct sctp_sender_dry_event *event;
3382 	struct sctp_queued_to_read *control;
3383 
3384 	if ((stcb == NULL) ||
3385 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3386 		/* event not enabled */
3387 		return;
3388 	}
3389 
3390 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3391 	if (m_notify == NULL) {
3392 		/* no space left */
3393 		return;
3394 	}
3395 	SCTP_BUF_LEN(m_notify) = 0;
3396 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3397 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3398 	event->sender_dry_flags = 0;
3399 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3400 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3401 
3402 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3403 	SCTP_BUF_NEXT(m_notify) = NULL;
3404 
3405 	/* append to socket */
3406 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3407 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3408 	                                 m_notify);
3409 	if (control == NULL) {
3410 		/* no memory */
3411 		sctp_m_freem(m_notify);
3412 		return;
3413 	}
3414 	control->length = SCTP_BUF_LEN(m_notify);
3415 	control->spec_flags = M_NOTIFICATION;
3416 	/* not that we need this */
3417 	control->tail_mbuf = m_notify;
3418 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3419 	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3420 }
3421 
3422 
3423 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)3424 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3425 {
3426 	struct mbuf *m_notify;
3427 	struct sctp_queued_to_read *control;
3428 	struct sctp_stream_change_event *stradd;
3429 	int len;
3430 
3431 	if ((stcb == NULL) ||
3432 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3433 		/* event not enabled */
3434 		return;
3435 	}
3436 	if ((stcb->asoc.peer_req_out) && flag) {
3437 		/* Peer made the request, don't tell the local user */
3438 		stcb->asoc.peer_req_out = 0;
3439 		return;
3440 	}
3441 	stcb->asoc.peer_req_out = 0;
3442 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3443 	if (m_notify == NULL)
3444 		/* no space left */
3445 		return;
3446 	SCTP_BUF_LEN(m_notify) = 0;
3447 	len = sizeof(struct sctp_stream_change_event);
3448 	if (len > M_TRAILINGSPACE(m_notify)) {
3449 		/* never enough room */
3450 		sctp_m_freem(m_notify);
3451 		return;
3452 	}
3453 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3454 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3455 	stradd->strchange_flags = flag;
3456 	stradd->strchange_length = len;
3457 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3458 	stradd->strchange_instrms = numberin;
3459 	stradd->strchange_outstrms = numberout;
3460 	SCTP_BUF_LEN(m_notify) = len;
3461 	SCTP_BUF_NEXT(m_notify) = NULL;
3462 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3463 		/* no space */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	/* append to socket */
3468 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3469 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3470 	                                 m_notify);
3471 	if (control == NULL) {
3472 		/* no memory */
3473 		sctp_m_freem(m_notify);
3474 		return;
3475 	}
3476 	control->spec_flags = M_NOTIFICATION;
3477 	control->length = SCTP_BUF_LEN(m_notify);
3478 	/* not that we need this */
3479 	control->tail_mbuf = m_notify;
3480 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3481 	    control,
3482 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3483 }
3484 
3485 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)3486 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3487 {
3488 	struct mbuf *m_notify;
3489 	struct sctp_queued_to_read *control;
3490 	struct sctp_assoc_reset_event *strasoc;
3491 	int len;
3492 
3493 	if ((stcb == NULL) ||
3494 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3495 		/* event not enabled */
3496 		return;
3497 	}
3498 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3499 	if (m_notify == NULL)
3500 		/* no space left */
3501 		return;
3502 	SCTP_BUF_LEN(m_notify) = 0;
3503 	len = sizeof(struct sctp_assoc_reset_event);
3504 	if (len > M_TRAILINGSPACE(m_notify)) {
3505 		/* never enough room */
3506 		sctp_m_freem(m_notify);
3507 		return;
3508 	}
3509 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event  *);
3510 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3511 	strasoc->assocreset_flags = flag;
3512 	strasoc->assocreset_length = len;
3513 	strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
3514 	strasoc->assocreset_local_tsn = sending_tsn;
3515 	strasoc->assocreset_remote_tsn = recv_tsn;
3516 	SCTP_BUF_LEN(m_notify) = len;
3517 	SCTP_BUF_NEXT(m_notify) = NULL;
3518 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3519 		/* no space */
3520 		sctp_m_freem(m_notify);
3521 		return;
3522 	}
3523 	/* append to socket */
3524 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3525 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3526 	                                 m_notify);
3527 	if (control == NULL) {
3528 		/* no memory */
3529 		sctp_m_freem(m_notify);
3530 		return;
3531 	}
3532 	control->spec_flags = M_NOTIFICATION;
3533 	control->length = SCTP_BUF_LEN(m_notify);
3534 	/* not that we need this */
3535 	control->tail_mbuf = m_notify;
3536 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3537 	    control,
3538 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3539 }
3540 
3541 
3542 
3543 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)3544 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3545     int number_entries, uint16_t * list, int flag)
3546 {
3547 	struct mbuf *m_notify;
3548 	struct sctp_queued_to_read *control;
3549 	struct sctp_stream_reset_event *strreset;
3550 	int len;
3551 
3552 	if ((stcb == NULL) ||
3553 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3554 		/* event not enabled */
3555 		return;
3556 	}
3557 
3558 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3559 	if (m_notify == NULL)
3560 		/* no space left */
3561 		return;
3562 	SCTP_BUF_LEN(m_notify) = 0;
3563 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3564 	if (len > M_TRAILINGSPACE(m_notify)) {
3565 		/* never enough room */
3566 		sctp_m_freem(m_notify);
3567 		return;
3568 	}
3569 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3570 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3571 	strreset->strreset_flags = flag;
3572 	strreset->strreset_length = len;
3573 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3574 	if (number_entries) {
3575 		int i;
3576 
3577 		for (i = 0; i < number_entries; i++) {
3578 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3579 		}
3580 	}
3581 	SCTP_BUF_LEN(m_notify) = len;
3582 	SCTP_BUF_NEXT(m_notify) = NULL;
3583 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3584 		/* no space */
3585 		sctp_m_freem(m_notify);
3586 		return;
3587 	}
3588 	/* append to socket */
3589 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3590 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3591 	                                 m_notify);
3592 	if (control == NULL) {
3593 		/* no memory */
3594 		sctp_m_freem(m_notify);
3595 		return;
3596 	}
3597 	control->spec_flags = M_NOTIFICATION;
3598 	control->length = SCTP_BUF_LEN(m_notify);
3599 	/* not that we need this */
3600 	control->tail_mbuf = m_notify;
3601 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3602 	                  control,
3603 	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3604 }
3605 
3606 
3607 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)3608 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3609 {
3610 	struct mbuf *m_notify;
3611 	struct sctp_remote_error *sre;
3612 	struct sctp_queued_to_read *control;
3613 	size_t notif_len, chunk_len;
3614 
3615 	if ((stcb == NULL) ||
3616 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3617 		return;
3618 	}
3619 	if (chunk != NULL) {
3620 		chunk_len = ntohs(chunk->ch.chunk_length);
3621 	} else {
3622 		chunk_len = 0;
3623 	}
3624 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3625 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3626 	if (m_notify == NULL) {
3627 		/* Retry with smaller value. */
3628 		notif_len = sizeof(struct sctp_remote_error);
3629 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3630 		if (m_notify == NULL) {
3631 			return;
3632 		}
3633 	}
3634 	SCTP_BUF_NEXT(m_notify) = NULL;
3635 	sre = mtod(m_notify, struct sctp_remote_error *);
3636 	sre->sre_type = SCTP_REMOTE_ERROR;
3637 	sre->sre_flags = 0;
3638 	sre->sre_length = sizeof(struct sctp_remote_error);
3639 	sre->sre_error = error;
3640 	sre->sre_assoc_id = sctp_get_associd(stcb);
3641 	if (notif_len > sizeof(struct sctp_remote_error)) {
3642 		memcpy(sre->sre_data, chunk, chunk_len);
3643 		sre->sre_length += chunk_len;
3644 	}
3645 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3646 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3647 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3648 	                                 m_notify);
3649 	if (control != NULL) {
3650 		control->length = SCTP_BUF_LEN(m_notify);
3651 		/* not that we need this */
3652 		control->tail_mbuf = m_notify;
3653 		control->spec_flags = M_NOTIFICATION;
3654 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3655 		                  control,
3656 		                  &stcb->sctp_socket->so_rcv, 1,
3657 				  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3658 	} else {
3659 		sctp_m_freem(m_notify);
3660 	}
3661 }
3662 
3663 
3664 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked SCTP_UNUSED)3665 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3666     uint32_t error, void *data, int so_locked
3667 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3668     SCTP_UNUSED
3669 #endif
3670     )
3671 {
3672 	if ((stcb == NULL) ||
3673 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3674 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3675 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3676 		/* If the socket is gone we are out of here */
3677 		return;
3678 	}
3679 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
3680 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3681 #else
3682 	if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
3683 #endif
3684 		return;
3685 	}
3686 #if defined(__APPLE__)
3687 	if (so_locked) {
3688 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3689 	} else {
3690 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3691 	}
3692 #endif
3693 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3694 	    (stcb->asoc.state &  SCTP_STATE_COOKIE_ECHOED)) {
3695 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3696 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3697 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3698 			/* Don't report these in front states */
3699 			return;
3700 		}
3701 	}
3702 	switch (notification) {
3703 	case SCTP_NOTIFY_ASSOC_UP:
3704 		if (stcb->asoc.assoc_up_sent == 0) {
3705 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3706 			stcb->asoc.assoc_up_sent = 1;
3707 		}
3708 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3709 			sctp_notify_adaptation_layer(stcb);
3710 		}
3711 		if (stcb->asoc.peer_supports_auth == 0) {
3712 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3713 			                NULL, so_locked);
3714 		}
3715 		break;
3716 	case SCTP_NOTIFY_ASSOC_DOWN:
3717 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3718 #if defined(__Userspace__)
3719 		if (stcb->sctp_ep->recv_callback) {
3720 			if (stcb->sctp_socket) {
3721 				union sctp_sockstore addr;
3722 				struct sctp_rcvinfo rcv;
3723 
3724 				memset(&addr, 0, sizeof(union sctp_sockstore));
3725 				memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
3726 				atomic_add_int(&stcb->asoc.refcnt, 1);
3727 				SCTP_TCB_UNLOCK(stcb);
3728 				stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
3729 				SCTP_TCB_LOCK(stcb);
3730 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
3731 			}
3732 		}
3733 #endif
3734 		break;
3735 	case SCTP_NOTIFY_INTERFACE_DOWN:
3736 		{
3737 			struct sctp_nets *net;
3738 
3739 			net = (struct sctp_nets *)data;
3740 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3741 			    (struct sockaddr *)&net->ro._l_addr, error);
3742 			break;
3743 		}
3744 	case SCTP_NOTIFY_INTERFACE_UP:
3745 		{
3746 			struct sctp_nets *net;
3747 
3748 			net = (struct sctp_nets *)data;
3749 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3750 			    (struct sockaddr *)&net->ro._l_addr, error);
3751 			break;
3752 		}
3753 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3754 		{
3755 			struct sctp_nets *net;
3756 
3757 			net = (struct sctp_nets *)data;
3758 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3759 			    (struct sockaddr *)&net->ro._l_addr, error);
3760 			break;
3761 		}
3762 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3763 		sctp_notify_send_failed2(stcb, error,
3764 		                         (struct sctp_stream_queue_pending *)data, so_locked);
3765 		break;
3766 	case SCTP_NOTIFY_SENT_DG_FAIL:
3767 		sctp_notify_send_failed(stcb, 1, error,
3768 		    (struct sctp_tmit_chunk *)data, so_locked);
3769 		break;
3770 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3771 		sctp_notify_send_failed(stcb, 0, error,
3772 		                        (struct sctp_tmit_chunk *)data, so_locked);
3773 		break;
3774 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3775 		{
3776 			uint32_t val;
3777 			val = *((uint32_t *)data);
3778 
3779 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3780 		break;
3781 		}
3782 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3783 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3784 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3785 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3786 		} else {
3787 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3788 		}
3789 		break;
3790 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3791 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3792 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3793 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3794 		} else {
3795 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3796 		}
3797 		break;
3798 	case SCTP_NOTIFY_ASSOC_RESTART:
3799 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3800 		if (stcb->asoc.peer_supports_auth == 0) {
3801 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3802 			                NULL, so_locked);
3803 		}
3804 		break;
3805 	case SCTP_NOTIFY_STR_RESET_SEND:
3806 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3807 		break;
3808 	case SCTP_NOTIFY_STR_RESET_RECV:
3809 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3810 		break;
3811 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3812 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3813 		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
3814 		break;
3815 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3816 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3817 		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
3818 		break;
3819 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3820 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3821 		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
3822 		break;
3823 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3824 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3825 		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
3826 		break;
3827 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3828 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3829 		    error);
3830 		break;
3831 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3832 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3833 		                             error);
3834 		break;
3835 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3836 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3837 		                             error);
3838 		break;
3839 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3840 		sctp_notify_shutdown_event(stcb);
3841 		break;
3842 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3843 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3844 		                           (uint16_t)(uintptr_t)data,
3845 		                           so_locked);
3846 		break;
3847 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3848 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3849 		                           (uint16_t)(uintptr_t)data,
3850 		                           so_locked);
3851 		break;
3852 	case SCTP_NOTIFY_NO_PEER_AUTH:
3853 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3854 		                           (uint16_t)(uintptr_t)data,
3855 		                           so_locked);
3856 		break;
3857 	case SCTP_NOTIFY_SENDER_DRY:
3858 		sctp_notify_sender_dry_event(stcb, so_locked);
3859 		break;
3860 	case SCTP_NOTIFY_REMOTE_ERROR:
3861 		sctp_notify_remote_error(stcb, error, data);
3862 		break;
3863 	default:
3864 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3865 			__FUNCTION__, notification, notification);
3866 		break;
3867 	}			/* end switch */
3868 }
3869 
3870 void
3871 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3872 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3873     SCTP_UNUSED
3874 #endif
3875     )
3876 {
3877 	struct sctp_association *asoc;
3878 	struct sctp_stream_out *outs;
3879 	struct sctp_tmit_chunk *chk, *nchk;
3880 	struct sctp_stream_queue_pending *sp, *nsp;
3881 	int i;
3882 
3883 	if (stcb == NULL) {
3884 		return;
3885 	}
3886 	asoc = &stcb->asoc;
3887 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3888 		/* already being freed */
3889 		return;
3890 	}
3891 #if defined(__APPLE__)
3892 	if (so_locked) {
3893 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3894 	} else {
3895 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3896 	}
3897 #endif
3898 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3899 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3900 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3901 		return;
3902 	}
3903 	/* now through all the gunk freeing chunks */
3904 	if (holds_lock == 0) {
3905 		SCTP_TCB_SEND_LOCK(stcb);
3906 	}
3907 	/* sent queue SHOULD be empty */
3908 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3909 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3910 		asoc->sent_queue_cnt--;
3911 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3912 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3913 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3914 #ifdef INVARIANTS
3915 			} else {
3916 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3917 #endif
3918 			}
3919 		}
3920 		if (chk->data != NULL) {
3921 			sctp_free_bufspace(stcb, asoc, chk, 1);
3922 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3923 			                error, chk, so_locked);
3924 			if (chk->data) {
3925 				sctp_m_freem(chk->data);
3926 				chk->data = NULL;
3927 			}
3928 		}
3929 		sctp_free_a_chunk(stcb, chk, so_locked);
3930 		/*sa_ignore FREED_MEMORY*/
3931 	}
3932 	/* pending send queue SHOULD be empty */
3933 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3934 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3935 		asoc->send_queue_cnt--;
3936 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3937 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3938 #ifdef INVARIANTS
3939 		} else {
3940 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3941 #endif
3942 		}
3943 		if (chk->data != NULL) {
3944 			sctp_free_bufspace(stcb, asoc, chk, 1);
3945 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3946 			                error, chk, so_locked);
3947 			if (chk->data) {
3948 				sctp_m_freem(chk->data);
3949 				chk->data = NULL;
3950 			}
3951 		}
3952 		sctp_free_a_chunk(stcb, chk, so_locked);
3953 		/*sa_ignore FREED_MEMORY*/
3954 	}
3955 	for (i = 0; i < asoc->streamoutcnt; i++) {
3956 		/* For each stream */
3957 		outs = &asoc->strmout[i];
3958 		/* clean up any sends there */
3959 		asoc->locked_on_sending = NULL;
3960 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3961 			asoc->stream_queue_cnt--;
3962 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3963 			sctp_free_spbufspace(stcb, asoc, sp);
3964 			if (sp->data) {
3965 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3966 						error, (void *)sp, so_locked);
3967 				if (sp->data) {
3968 					sctp_m_freem(sp->data);
3969 					sp->data = NULL;
3970 					sp->tail_mbuf = NULL;
3971 					sp->length = 0;
3972 				}
3973 			}
3974 			if (sp->net) {
3975 				sctp_free_remote_addr(sp->net);
3976 				sp->net = NULL;
3977 			}
3978 			/* Free the chunk */
3979 			sctp_free_a_strmoq(stcb, sp, so_locked);
3980 			/*sa_ignore FREED_MEMORY*/
3981 		}
3982 	}
3983 
3984 	if (holds_lock == 0) {
3985 		SCTP_TCB_SEND_UNLOCK(stcb);
3986 	}
3987 }
3988 
3989 void
3990 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3991 			struct sctp_abort_chunk *abort, int so_locked
3992 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3993     SCTP_UNUSED
3994 #endif
3995     )
3996 {
3997 	if (stcb == NULL) {
3998 		return;
3999 	}
4000 #if defined(__APPLE__)
4001 	if (so_locked) {
4002 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4003 	} else {
4004 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4005 	}
4006 #endif
4007 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4008 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4009 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4010 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4011 	}
4012 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4013 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4014 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4015 		return;
4016 	}
4017 	/* Tell them we lost the asoc */
4018 	sctp_report_all_outbound(stcb, error, 1, so_locked);
4019 	if (from_peer) {
4020 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4021 	} else {
4022 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4023 	}
4024 }
4025 
4026 void
4027 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4028                        struct mbuf *m, int iphlen,
4029                        struct sockaddr *src, struct sockaddr *dst,
4030                        struct sctphdr *sh, struct mbuf *op_err,
4031 #if defined(__FreeBSD__)
4032                        uint8_t use_mflowid, uint32_t mflowid,
4033 #endif
4034                        uint32_t vrf_id, uint16_t port)
4035 {
4036 	uint32_t vtag;
4037 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4038 	struct socket *so;
4039 #endif
4040 
4041 	vtag = 0;
4042 	if (stcb != NULL) {
4043 		/* We have a TCB to abort, send notification too */
4044 		vtag = stcb->asoc.peer_vtag;
4045 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4046 		/* get the assoc vrf id and table id */
4047 		vrf_id = stcb->asoc.vrf_id;
4048 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4049 	}
4050 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4051 #if defined(__FreeBSD__)
4052 	                use_mflowid, mflowid,
4053 #endif
4054 	                vrf_id, port);
4055 	if (stcb != NULL) {
4056 		/* Ok, now lets free it */
4057 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4058 		so = SCTP_INP_SO(inp);
4059 		atomic_add_int(&stcb->asoc.refcnt, 1);
4060 		SCTP_TCB_UNLOCK(stcb);
4061 		SCTP_SOCKET_LOCK(so, 1);
4062 		SCTP_TCB_LOCK(stcb);
4063 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4064 #endif
4065 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4066 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4067 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4068 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4069 		}
4070 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_4);
4071 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4072 		SCTP_SOCKET_UNLOCK(so, 1);
4073 #endif
4074 	}
4075 }
4076 #ifdef SCTP_ASOCLOG_OF_TSNS
4077 void
4078 sctp_print_out_track_log(struct sctp_tcb *stcb)
4079 {
4080 #ifdef NOSIY_PRINTS
4081 	int i;
4082 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4083 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4084 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4085 		SCTP_PRINTF("None rcvd\n");
4086 		goto none_in;
4087 	}
4088 	if (stcb->asoc.tsn_in_wrapped) {
4089 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4090 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4091 				    stcb->asoc.in_tsnlog[i].tsn,
4092 				    stcb->asoc.in_tsnlog[i].strm,
4093 				    stcb->asoc.in_tsnlog[i].seq,
4094 				    stcb->asoc.in_tsnlog[i].flgs,
4095 				    stcb->asoc.in_tsnlog[i].sz);
4096 		}
4097 	}
4098 	if (stcb->asoc.tsn_in_at) {
4099 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4100 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4101 				    stcb->asoc.in_tsnlog[i].tsn,
4102 				    stcb->asoc.in_tsnlog[i].strm,
4103 				    stcb->asoc.in_tsnlog[i].seq,
4104 				    stcb->asoc.in_tsnlog[i].flgs,
4105 				    stcb->asoc.in_tsnlog[i].sz);
4106 		}
4107 	}
4108  none_in:
4109 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4110 	if ((stcb->asoc.tsn_out_at == 0) &&
4111 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4112 		SCTP_PRINTF("None sent\n");
4113 	}
4114 	if (stcb->asoc.tsn_out_wrapped) {
4115 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4116 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4117 				    stcb->asoc.out_tsnlog[i].tsn,
4118 				    stcb->asoc.out_tsnlog[i].strm,
4119 				    stcb->asoc.out_tsnlog[i].seq,
4120 				    stcb->asoc.out_tsnlog[i].flgs,
4121 				    stcb->asoc.out_tsnlog[i].sz);
4122 		}
4123 	}
4124 	if (stcb->asoc.tsn_out_at) {
4125 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4126 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4127 				    stcb->asoc.out_tsnlog[i].tsn,
4128 				    stcb->asoc.out_tsnlog[i].strm,
4129 				    stcb->asoc.out_tsnlog[i].seq,
4130 				    stcb->asoc.out_tsnlog[i].flgs,
4131 				    stcb->asoc.out_tsnlog[i].sz);
4132 		}
4133 	}
4134 #endif
4135 }
4136 #endif
4137 
4138 void
4139 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4140                           struct mbuf *op_err,
4141                           int so_locked
4142 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4143                           SCTP_UNUSED
4144 #endif
4145 )
4146 {
4147 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4148 	struct socket *so;
4149 #endif
4150 
4151 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4152 	so = SCTP_INP_SO(inp);
4153 #endif
4154 #if defined(__APPLE__)
4155 	if (so_locked) {
4156 		sctp_lock_assert(SCTP_INP_SO(inp));
4157 	} else {
4158 		sctp_unlock_assert(SCTP_INP_SO(inp));
4159 	}
4160 #endif
4161 	if (stcb == NULL) {
4162 		/* Got to have a TCB */
4163 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4164 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4165 #if defined(__APPLE__)
4166 				if (!so_locked) {
4167 					SCTP_SOCKET_LOCK(so, 1);
4168 				}
4169 #endif
4170 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4171 						SCTP_CALLED_DIRECTLY_NOCMPSET);
4172 #if defined(__APPLE__)
4173 				if (!so_locked) {
4174 					SCTP_SOCKET_UNLOCK(so, 1);
4175 				}
4176 #endif
4177 			}
4178 		}
4179 		return;
4180 	} else {
4181 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4182 	}
4183 	/* notify the ulp */
4184 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4185 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4186 	}
4187 	/* notify the peer */
4188 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4189 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4190 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4191 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4192 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4193 	}
4194 	/* now free the asoc */
4195 #ifdef SCTP_ASOCLOG_OF_TSNS
4196 	sctp_print_out_track_log(stcb);
4197 #endif
4198 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4199 	if (!so_locked) {
4200 		atomic_add_int(&stcb->asoc.refcnt, 1);
4201 		SCTP_TCB_UNLOCK(stcb);
4202 		SCTP_SOCKET_LOCK(so, 1);
4203 		SCTP_TCB_LOCK(stcb);
4204 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4205 	}
4206 #endif
4207 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_5);
4208 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4209 	if (!so_locked) {
4210 		SCTP_SOCKET_UNLOCK(so, 1);
4211 	}
4212 #endif
4213 }
4214 
4215 void
4216 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4217                  struct sockaddr *src, struct sockaddr *dst,
4218                  struct sctphdr *sh, struct sctp_inpcb *inp,
4219                  struct mbuf *cause,
4220 #if defined(__FreeBSD__)
4221                  uint8_t use_mflowid, uint32_t mflowid,
4222 #endif
4223                  uint32_t vrf_id, uint16_t port)
4224 {
4225 	struct sctp_chunkhdr *ch, chunk_buf;
4226 	unsigned int chk_length;
4227 	int contains_init_chunk;
4228 
4229 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4230 	/* Generate a TO address for future reference */
4231 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4232 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4233 #if defined(__APPLE__)
4234 			SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4235 #endif
4236 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4237 					SCTP_CALLED_DIRECTLY_NOCMPSET);
4238 #if defined(__APPLE__)
4239 			SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4240 #endif
4241 		}
4242 	}
4243 	contains_init_chunk = 0;
4244 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4245 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4246 	while (ch != NULL) {
4247 		chk_length = ntohs(ch->chunk_length);
4248 		if (chk_length < sizeof(*ch)) {
4249 			/* break to abort land */
4250 			break;
4251 		}
4252 		switch (ch->chunk_type) {
4253 		case SCTP_INIT:
4254 			contains_init_chunk = 1;
4255 			break;
4256 		case SCTP_PACKET_DROPPED:
4257 			/* we don't respond to pkt-dropped */
4258 			return;
4259 		case SCTP_ABORT_ASSOCIATION:
4260 			/* we don't respond with an ABORT to an ABORT */
4261 			return;
4262 		case SCTP_SHUTDOWN_COMPLETE:
4263 			/*
4264 			 * we ignore it since we are not waiting for it and
4265 			 * peer is gone
4266 			 */
4267 			return;
4268 		case SCTP_SHUTDOWN_ACK:
4269 			sctp_send_shutdown_complete2(src, dst, sh,
4270 #if defined(__FreeBSD__)
4271 			                             use_mflowid, mflowid,
4272 #endif
4273 			                             vrf_id, port);
4274 			return;
4275 		default:
4276 			break;
4277 		}
4278 		offset += SCTP_SIZE32(chk_length);
4279 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4280 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4281 	}
4282 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4283 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4284 	     (contains_init_chunk == 0))) {
4285 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4286 #if defined(__FreeBSD__)
4287 		                use_mflowid, mflowid,
4288 #endif
4289 		                vrf_id, port);
4290 	}
4291 }
4292 
4293 /*
4294  * check the inbound datagram to make sure there is not an abort inside it,
4295  * if there is return 1, else return 0.
4296  */
4297 int
4298 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4299 {
4300 	struct sctp_chunkhdr *ch;
4301 	struct sctp_init_chunk *init_chk, chunk_buf;
4302 	int offset;
4303 	unsigned int chk_length;
4304 
4305 	offset = iphlen + sizeof(struct sctphdr);
4306 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4307 	    (uint8_t *) & chunk_buf);
4308 	while (ch != NULL) {
4309 		chk_length = ntohs(ch->chunk_length);
4310 		if (chk_length < sizeof(*ch)) {
4311 			/* packet is probably corrupt */
4312 			break;
4313 		}
4314 		/* we seem to be ok, is it an abort? */
4315 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4316 			/* yep, tell them */
4317 			return (1);
4318 		}
4319 		if (ch->chunk_type == SCTP_INITIATION) {
4320 			/* need to update the Vtag */
4321 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4322 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4323 			if (init_chk != NULL) {
4324 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4325 			}
4326 		}
4327 		/* Nope, move to the next chunk */
4328 		offset += SCTP_SIZE32(chk_length);
4329 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4330 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4331 	}
4332 	return (0);
4333 }
4334 
4335 /*
4336  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4337  * set (i.e. it's 0) so, create this function to compare link local scopes
4338  */
4339 #ifdef INET6
4340 uint32_t
4341 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4342 {
4343 #if defined(__Userspace__)
4344     /*__Userspace__ Returning 1 here always */
4345 #endif
4346 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4347 	struct sockaddr_in6 a, b;
4348 
4349 	/* save copies */
4350 	a = *addr1;
4351 	b = *addr2;
4352 
4353 	if (a.sin6_scope_id == 0)
4354 #ifdef SCTP_KAME
4355 		if (sa6_recoverscope(&a)) {
4356 #else
4357 		if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4358 #endif				/* SCTP_KAME */
4359 			/* can't get scope, so can't match */
4360 			return (0);
4361 		}
4362 	if (b.sin6_scope_id == 0)
4363 #ifdef SCTP_KAME
4364 		if (sa6_recoverscope(&b)) {
4365 #else
4366 		if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4367 #endif				/* SCTP_KAME */
4368 			/* can't get scope, so can't match */
4369 			return (0);
4370 		}
4371 	if (a.sin6_scope_id != b.sin6_scope_id)
4372 		return (0);
4373 #else
4374 	if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4375 		return (0);
4376 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4377 
4378 	return (1);
4379 }
4380 
4381 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4382 /*
4383  * returns a sockaddr_in6 with embedded scope recovered and removed
4384  */
4385 struct sockaddr_in6 *
4386 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4387 {
4388 	/* check and strip embedded scope junk */
4389 	if (addr->sin6_family == AF_INET6) {
4390 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4391 			if (addr->sin6_scope_id == 0) {
4392 				*store = *addr;
4393 #ifdef SCTP_KAME
4394 				if (!sa6_recoverscope(store)) {
4395 #else
4396 				if (!in6_recoverscope(store, &store->sin6_addr,
4397 				    NULL)) {
4398 #endif /* SCTP_KAME */
4399 					/* use the recovered scope */
4400 					addr = store;
4401 				}
4402 			} else {
4403 				/* else, return the original "to" addr */
4404 				in6_clearscope(&addr->sin6_addr);
4405 			}
4406 		}
4407 	}
4408 	return (addr);
4409 }
4410 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4411 #endif
4412 
4413 /*
4414  * are the two addresses the same?  currently a "scopeless" check returns: 1
4415  * if same, 0 if not
4416  */
4417 int
4418 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4419 {
4420 
4421 	/* must be valid */
4422 	if (sa1 == NULL || sa2 == NULL)
4423 		return (0);
4424 
4425 	/* must be the same family */
4426 	if (sa1->sa_family != sa2->sa_family)
4427 		return (0);
4428 
4429 	switch (sa1->sa_family) {
4430 #ifdef INET6
4431 	case AF_INET6:
4432 	{
4433 		/* IPv6 addresses */
4434 		struct sockaddr_in6 *sin6_1, *sin6_2;
4435 
4436 		sin6_1 = (struct sockaddr_in6 *)sa1;
4437 		sin6_2 = (struct sockaddr_in6 *)sa2;
4438 		return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4439 		    sin6_2));
4440 	}
4441 #endif
4442 #ifdef INET
4443 	case AF_INET:
4444 	{
4445 		/* IPv4 addresses */
4446 		struct sockaddr_in *sin_1, *sin_2;
4447 
4448 		sin_1 = (struct sockaddr_in *)sa1;
4449 		sin_2 = (struct sockaddr_in *)sa2;
4450 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4451 	}
4452 #endif
4453 #if defined(__Userspace__)
4454 	case AF_CONN:
4455 	{
4456 		struct sockaddr_conn *sconn_1, *sconn_2;
4457 
4458 		sconn_1 = (struct sockaddr_conn *)sa1;
4459 		sconn_2 = (struct sockaddr_conn *)sa2;
4460 		return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4461 	}
4462 #endif
4463 	default:
4464 		/* we don't do these... */
4465 		return (0);
4466 	}
4467 }
4468 
4469 void
4470 sctp_print_address(struct sockaddr *sa)
4471 {
4472 #ifdef INET6
4473 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4474 	char ip6buf[INET6_ADDRSTRLEN];
4475 #endif
4476 #endif
4477 
4478 	switch (sa->sa_family) {
4479 #ifdef INET6
4480 	case AF_INET6:
4481 	{
4482 		struct sockaddr_in6 *sin6;
4483 
4484 		sin6 = (struct sockaddr_in6 *)sa;
4485 #if defined(__Userspace__)
4486 		SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
4487 			    ntohs(sin6->sin6_addr.s6_addr16[0]),
4488 			    ntohs(sin6->sin6_addr.s6_addr16[1]),
4489 			    ntohs(sin6->sin6_addr.s6_addr16[2]),
4490 			    ntohs(sin6->sin6_addr.s6_addr16[3]),
4491 			    ntohs(sin6->sin6_addr.s6_addr16[4]),
4492 			    ntohs(sin6->sin6_addr.s6_addr16[5]),
4493 			    ntohs(sin6->sin6_addr.s6_addr16[6]),
4494 			    ntohs(sin6->sin6_addr.s6_addr16[7]),
4495 			    ntohs(sin6->sin6_port),
4496 			    sin6->sin6_scope_id);
4497 #else
4498 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4499 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4500 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4501 			    ntohs(sin6->sin6_port),
4502 			    sin6->sin6_scope_id);
4503 #else
4504 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4505 			    ip6_sprintf(&sin6->sin6_addr),
4506 			    ntohs(sin6->sin6_port),
4507 			    sin6->sin6_scope_id);
4508 #endif
4509 #endif
4510 		break;
4511 	}
4512 #endif
4513 #ifdef INET
4514 	case AF_INET:
4515 	{
4516 		struct sockaddr_in *sin;
4517 		unsigned char *p;
4518 
4519 		sin = (struct sockaddr_in *)sa;
4520 		p = (unsigned char *)&sin->sin_addr;
4521 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4522 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4523 		break;
4524 	}
4525 #endif
4526 #if defined(__Userspace__)
4527 	case AF_CONN:
4528 	{
4529 		struct sockaddr_conn *sconn;
4530 
4531 		sconn = (struct sockaddr_conn *)sa;
4532 		SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
4533 		break;
4534 	}
4535 #endif
4536 	default:
4537 		SCTP_PRINTF("?\n");
4538 		break;
4539 	}
4540 }
4541 
4542 void
4543 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4544     struct sctp_inpcb *new_inp,
4545     struct sctp_tcb *stcb,
4546     int waitflags)
4547 {
4548 	/*
4549 	 * go through our old INP and pull off any control structures that
4550 	 * belong to stcb and move then to the new inp.
4551 	 */
4552 	struct socket *old_so, *new_so;
4553 	struct sctp_queued_to_read *control, *nctl;
4554 	struct sctp_readhead tmp_queue;
4555 	struct mbuf *m;
4556 	int error = 0;
4557 
4558 	old_so = old_inp->sctp_socket;
4559 	new_so = new_inp->sctp_socket;
4560 	TAILQ_INIT(&tmp_queue);
4561 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4562 	SOCKBUF_LOCK(&(old_so->so_rcv));
4563 #endif
4564 #if defined(__FreeBSD__) || defined(__APPLE__)
4565 	error = sblock(&old_so->so_rcv, waitflags);
4566 #endif
4567 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4568 	SOCKBUF_UNLOCK(&(old_so->so_rcv));
4569 #endif
4570 	if (error) {
4571 		/* Gak, can't get sblock, we have a problem.
4572 		 * data will be left stranded.. and we
4573 		 * don't dare look at it since the
4574 		 * other thread may be reading something.
4575 		 * Oh well, its a screwed up app that does
4576 		 * a peeloff OR a accept while reading
4577 		 * from the main socket... actually its
4578 		 * only the peeloff() case, since I think
4579 		 * read will fail on a listening socket..
4580 		 */
4581 		return;
4582 	}
4583 	/* lock the socket buffers */
4584 	SCTP_INP_READ_LOCK(old_inp);
4585 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4586 		/* Pull off all for out target stcb */
4587 		if (control->stcb == stcb) {
4588 			/* remove it we want it */
4589 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4590 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4591 			m = control->data;
4592 			while (m) {
4593 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4594 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
4595 				}
4596 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4597 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4598 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4599 				}
4600 				m = SCTP_BUF_NEXT(m);
4601 			}
4602 		}
4603 	}
4604 	SCTP_INP_READ_UNLOCK(old_inp);
4605 	/* Remove the sb-lock on the old socket */
4606 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4607 	SOCKBUF_LOCK(&(old_so->so_rcv));
4608 #endif
4609 #if defined(__APPLE__)
4610 	sbunlock(&old_so->so_rcv, 1);
4611 #endif
4612 
4613 #if defined(__FreeBSD__)
4614 	sbunlock(&old_so->so_rcv);
4615 #endif
4616 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4617 	SOCKBUF_UNLOCK(&(old_so->so_rcv));
4618 #endif
4619 	/* Now we move them over to the new socket buffer */
4620 	SCTP_INP_READ_LOCK(new_inp);
4621 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4622 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4623 		m = control->data;
4624 		while (m) {
4625 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4626 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4627 			}
4628 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4629 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4630 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4631 			}
4632 			m = SCTP_BUF_NEXT(m);
4633 		}
4634 	}
4635 	SCTP_INP_READ_UNLOCK(new_inp);
4636 }
4637 
4638 void
4639 sctp_add_to_readq(struct sctp_inpcb *inp,
4640     struct sctp_tcb *stcb,
4641     struct sctp_queued_to_read *control,
4642     struct sockbuf *sb,
4643     int end,
4644     int inp_read_lock_held,
4645     int so_locked
4646 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4647     SCTP_UNUSED
4648 #endif
4649     )
4650 {
4651 	/*
4652 	 * Here we must place the control on the end of the socket read
4653 	 * queue AND increment sb_cc so that select will work properly on
4654 	 * read.
4655 	 */
4656 	struct mbuf *m, *prev = NULL;
4657 
4658 	if (inp == NULL) {
4659 		/* Gak, TSNH!! */
4660 #ifdef INVARIANTS
4661 		panic("Gak, inp NULL on add_to_readq");
4662 #endif
4663 		return;
4664 	}
4665 #if defined(__APPLE__)
4666 	if (so_locked) {
4667 		sctp_lock_assert(SCTP_INP_SO(inp));
4668 	} else {
4669 		sctp_unlock_assert(SCTP_INP_SO(inp));
4670 	}
4671 #endif
4672 	if (inp_read_lock_held == 0)
4673 		SCTP_INP_READ_LOCK(inp);
4674 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4675 		sctp_free_remote_addr(control->whoFrom);
4676 		if (control->data) {
4677 			sctp_m_freem(control->data);
4678 			control->data = NULL;
4679 		}
4680 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4681 		if (inp_read_lock_held == 0)
4682 			SCTP_INP_READ_UNLOCK(inp);
4683 		return;
4684 	}
4685 	if (!(control->spec_flags & M_NOTIFICATION)) {
4686 		atomic_add_int(&inp->total_recvs, 1);
4687 		if (!control->do_not_ref_stcb) {
4688 			atomic_add_int(&stcb->total_recvs, 1);
4689 		}
4690 	}
4691 	m = control->data;
4692 	control->held_length = 0;
4693 	control->length = 0;
4694 	while (m) {
4695 		if (SCTP_BUF_LEN(m) == 0) {
4696 			/* Skip mbufs with NO length */
4697 			if (prev == NULL) {
4698 				/* First one */
4699 				control->data = sctp_m_free(m);
4700 				m = control->data;
4701 			} else {
4702 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4703 				m = SCTP_BUF_NEXT(prev);
4704 			}
4705 			if (m == NULL) {
4706 				control->tail_mbuf = prev;
4707 			}
4708 			continue;
4709 		}
4710 		prev = m;
4711 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4712 			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4713 		}
4714 		sctp_sballoc(stcb, sb, m);
4715 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4716 			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4717 		}
4718 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4719 		m = SCTP_BUF_NEXT(m);
4720 	}
4721 	if (prev != NULL) {
4722 		control->tail_mbuf = prev;
4723 	} else {
4724 		/* Everything got collapsed out?? */
4725 		sctp_free_remote_addr(control->whoFrom);
4726 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4727 		if (inp_read_lock_held == 0)
4728 			SCTP_INP_READ_UNLOCK(inp);
4729 		return;
4730 	}
4731 	if (end) {
4732 		control->end_added = 1;
4733 	}
4734 #if defined(__Userspace__)
4735 	if (inp->recv_callback) {
4736 		if (inp_read_lock_held == 0)
4737 			SCTP_INP_READ_UNLOCK(inp);
4738 		if (control->end_added == 1) {
4739 			struct socket *so;
4740 			struct mbuf *m;
4741 			char *buffer;
4742 			struct sctp_rcvinfo rcv;
4743 			union sctp_sockstore addr;
4744 			int flags;
4745 
4746 			if ((buffer = malloc(control->length)) == NULL) {
4747 				return;
4748 			}
4749 			so = stcb->sctp_socket;
4750 			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4751 				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4752 			}
4753 			atomic_add_int(&stcb->asoc.refcnt, 1);
4754 			SCTP_TCB_UNLOCK(stcb);
4755 			m_copydata(control->data, 0, control->length, buffer);
4756 			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4757 			rcv.rcv_sid = control->sinfo_stream;
4758 			rcv.rcv_ssn = control->sinfo_ssn;
4759 			rcv.rcv_flags = control->sinfo_flags;
4760 			rcv.rcv_ppid = control->sinfo_ppid;
4761 			rcv.rcv_tsn = control->sinfo_tsn;
4762 			rcv.rcv_cumtsn = control->sinfo_cumtsn;
4763 			rcv.rcv_context = control->sinfo_context;
4764 			rcv.rcv_assoc_id = control->sinfo_assoc_id;
4765 			memset(&addr, 0, sizeof(union sctp_sockstore));
4766 			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4767 #ifdef INET
4768 			case AF_INET:
4769 				addr.sin = control->whoFrom->ro._l_addr.sin;
4770 				break;
4771 #endif
4772 #ifdef INET6
4773 			case AF_INET6:
4774 				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
4775 				break;
4776 #endif
4777 			case AF_CONN:
4778 				addr.sconn = control->whoFrom->ro._l_addr.sconn;
4779 				break;
4780 			default:
4781 				addr.sa = control->whoFrom->ro._l_addr.sa;
4782 				break;
4783 			}
4784 			flags = MSG_EOR;
4785 			if (control->spec_flags & M_NOTIFICATION) {
4786 				flags |= MSG_NOTIFICATION;
4787 			}
4788 			inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
4789 			SCTP_TCB_LOCK(stcb);
4790 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4791 			sctp_free_remote_addr(control->whoFrom);
4792 			control->whoFrom = NULL;
4793 			sctp_m_freem(control->data);
4794 			control->data = NULL;
4795 			control->length = 0;
4796 			sctp_free_a_readq(stcb, control);
4797 		}
4798 		return;
4799 	}
4800 #endif
4801 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4802 	if (inp_read_lock_held == 0)
4803 		SCTP_INP_READ_UNLOCK(inp);
4804 	if (inp && inp->sctp_socket) {
4805 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4806 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4807 		} else {
4808 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4809 			struct socket *so;
4810 
4811 			so = SCTP_INP_SO(inp);
4812 			if (!so_locked) {
4813 				if (stcb) {
4814 					atomic_add_int(&stcb->asoc.refcnt, 1);
4815 					SCTP_TCB_UNLOCK(stcb);
4816 				}
4817 				SCTP_SOCKET_LOCK(so, 1);
4818 				if (stcb) {
4819 					SCTP_TCB_LOCK(stcb);
4820 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4821 				}
4822 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4823 					SCTP_SOCKET_UNLOCK(so, 1);
4824 					return;
4825 				}
4826 			}
4827 #endif
4828 			sctp_sorwakeup(inp, inp->sctp_socket);
4829 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4830 			if (!so_locked) {
4831 				SCTP_SOCKET_UNLOCK(so, 1);
4832 			}
4833 #endif
4834 		}
4835 	}
4836 }
4837 
4838 
4839 int
4840 sctp_append_to_readq(struct sctp_inpcb *inp,
4841     struct sctp_tcb *stcb,
4842     struct sctp_queued_to_read *control,
4843     struct mbuf *m,
4844     int end,
4845     int ctls_cumack,
4846     struct sockbuf *sb)
4847 {
4848 	/*
4849 	 * A partial delivery API event is underway. OR we are appending on
4850 	 * the reassembly queue.
4851 	 *
4852 	 * If PDAPI this means we need to add m to the end of the data.
4853 	 * Increase the length in the control AND increment the sb_cc.
4854 	 * Otherwise sb is NULL and all we need to do is put it at the end
4855 	 * of the mbuf chain.
4856 	 */
4857 	int len = 0;
4858 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4859 
4860 	if (inp) {
4861 		SCTP_INP_READ_LOCK(inp);
4862 	}
4863 	if (control == NULL) {
4864 	get_out:
4865 		if (inp) {
4866 			SCTP_INP_READ_UNLOCK(inp);
4867 		}
4868 		return (-1);
4869 	}
4870 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4871 		SCTP_INP_READ_UNLOCK(inp);
4872 		return (0);
4873 	}
4874 	if (control->end_added) {
4875 		/* huh this one is complete? */
4876 		goto get_out;
4877 	}
4878 	mm = m;
4879 	if (mm == NULL) {
4880 		goto get_out;
4881 	}
4882 
4883 	while (mm) {
4884 		if (SCTP_BUF_LEN(mm) == 0) {
4885 			/* Skip mbufs with NO lenght */
4886 			if (prev == NULL) {
4887 				/* First one */
4888 				m = sctp_m_free(mm);
4889 				mm = m;
4890 			} else {
4891 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4892 				mm = SCTP_BUF_NEXT(prev);
4893 			}
4894 			continue;
4895 		}
4896 		prev = mm;
4897 		len += SCTP_BUF_LEN(mm);
4898 		if (sb) {
4899 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4900 				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4901 			}
4902 			sctp_sballoc(stcb, sb, mm);
4903 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4904 				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4905 			}
4906 		}
4907 		mm = SCTP_BUF_NEXT(mm);
4908 	}
4909 	if (prev) {
4910 		tail = prev;
4911 	} else {
4912 		/* Really there should always be a prev */
4913 		if (m == NULL) {
4914 			/* Huh nothing left? */
4915 #ifdef INVARIANTS
4916 			panic("Nothing left to add?");
4917 #else
4918 			goto get_out;
4919 #endif
4920 		}
4921 		tail = m;
4922 	}
4923 	if (control->tail_mbuf) {
4924 		/* append */
4925 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4926 		control->tail_mbuf = tail;
4927 	} else {
4928 		/* nothing there */
4929 #ifdef INVARIANTS
4930 		if (control->data != NULL) {
4931 			panic("This should NOT happen");
4932 		}
4933 #endif
4934 		control->data = m;
4935 		control->tail_mbuf = tail;
4936 	}
4937 	atomic_add_int(&control->length, len);
4938 	if (end) {
4939 		/* message is complete */
4940 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4941 			stcb->asoc.control_pdapi = NULL;
4942 		}
4943 		control->held_length = 0;
4944 		control->end_added = 1;
4945 	}
4946 	if (stcb == NULL) {
4947 		control->do_not_ref_stcb = 1;
4948 	}
4949 	/*
4950 	 * When we are appending in partial delivery, the cum-ack is used
4951 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4952 	 * is populated in the outbound sinfo structure from the true cumack
4953 	 * if the association exists...
4954 	 */
4955 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4956 #if defined(__Userspace__)
4957 	if (inp->recv_callback) {
4958 		uint32_t pd_point, length;
4959 
4960 		length = control->length;
4961 		if (stcb != NULL && stcb->sctp_socket != NULL) {
4962 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
4963 			               stcb->sctp_ep->partial_delivery_point);
4964 		} else {
4965 			pd_point = inp->partial_delivery_point;
4966 		}
4967 		if ((control->end_added == 1) || (length >= pd_point)) {
4968 			struct socket *so;
4969 			char *buffer;
4970 			struct sctp_rcvinfo rcv;
4971 			union sctp_sockstore addr;
4972 			int flags;
4973 
4974 			if ((buffer = malloc(control->length)) == NULL) {
4975 				return (-1);
4976 			}
4977 			so = stcb->sctp_socket;
4978 			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4979 				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4980 			}
4981 			m_copydata(control->data, 0, control->length, buffer);
4982 			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4983 			rcv.rcv_sid = control->sinfo_stream;
4984 			rcv.rcv_ssn = control->sinfo_ssn;
4985 			rcv.rcv_flags = control->sinfo_flags;
4986 			rcv.rcv_ppid = control->sinfo_ppid;
4987 			rcv.rcv_tsn = control->sinfo_tsn;
4988 			rcv.rcv_cumtsn = control->sinfo_cumtsn;
4989 			rcv.rcv_context = control->sinfo_context;
4990 			rcv.rcv_assoc_id = control->sinfo_assoc_id;
4991 			memset(&addr, 0, sizeof(union sctp_sockstore));
4992 			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4993 #ifdef INET
4994 			case AF_INET:
4995 				addr.sin = control->whoFrom->ro._l_addr.sin;
4996 				break;
4997 #endif
4998 #ifdef INET6
4999 			case AF_INET6:
5000 				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
5001 				break;
5002 #endif
5003 			case AF_CONN:
5004 				addr.sconn = control->whoFrom->ro._l_addr.sconn;
5005 				break;
5006 			default:
5007 				addr.sa = control->whoFrom->ro._l_addr.sa;
5008 				break;
5009 			}
5010 			flags = 0;
5011 			if (control->end_added == 1) {
5012 				flags |= MSG_EOR;
5013 			}
5014 			if (control->spec_flags & M_NOTIFICATION) {
5015 				flags |= MSG_NOTIFICATION;
5016 			}
5017 			sctp_m_freem(control->data);
5018 			control->data = NULL;
5019 			control->tail_mbuf = NULL;
5020 			control->length = 0;
5021 			if (control->end_added) {
5022 				sctp_free_remote_addr(control->whoFrom);
5023 				control->whoFrom = NULL;
5024 				sctp_free_a_readq(stcb, control);
5025 			} else {
5026 				control->some_taken = 1;
5027 			}
5028 			atomic_add_int(&stcb->asoc.refcnt, 1);
5029 			SCTP_TCB_UNLOCK(stcb);
5030 			inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
5031 			SCTP_TCB_LOCK(stcb);
5032 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5033 		}
5034 		if (inp)
5035 			SCTP_INP_READ_UNLOCK(inp);
5036 		return (0);
5037 	}
5038 #endif
5039 	if (inp) {
5040 		SCTP_INP_READ_UNLOCK(inp);
5041 	}
5042 	if (inp && inp->sctp_socket) {
5043 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
5044 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
5045 		} else {
5046 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5047 			struct socket *so;
5048 
5049 			so = SCTP_INP_SO(inp);
5050 			if (stcb) {
5051 				atomic_add_int(&stcb->asoc.refcnt, 1);
5052 				SCTP_TCB_UNLOCK(stcb);
5053 			}
5054 			SCTP_SOCKET_LOCK(so, 1);
5055 			if (stcb) {
5056 				SCTP_TCB_LOCK(stcb);
5057 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5058 			}
5059 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5060 				SCTP_SOCKET_UNLOCK(so, 1);
5061 				return (0);
5062 			}
5063 #endif
5064 			sctp_sorwakeup(inp, inp->sctp_socket);
5065 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5066 			SCTP_SOCKET_UNLOCK(so, 1);
5067 #endif
5068 		}
5069 	}
5070 	return (0);
5071 }
5072 
5073 
5074 
5075 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5076  *************ALTERNATE ROUTING CODE
5077  */
5078 
5079 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5080  *************ALTERNATE ROUTING CODE
5081  */
5082 
5083 struct mbuf *
5084 sctp_generate_cause(uint16_t code, char *info)
5085 {
5086 	struct mbuf *m;
5087 	struct sctp_gen_error_cause *cause;
5088 	size_t info_len, len;
5089 
5090 	if ((code == 0) || (info == NULL)) {
5091 		return (NULL);
5092 	}
5093 	info_len = strlen(info);
5094 	len = sizeof(struct sctp_paramhdr) + info_len;
5095 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5096 	if (m != NULL) {
5097 		SCTP_BUF_LEN(m) = len;
5098 		cause = mtod(m, struct sctp_gen_error_cause *);
5099 		cause->code = htons(code);
5100 		cause->length = htons((uint16_t)len);
5101 		memcpy(cause->info, info, info_len);
5102 	}
5103 	return (m);
5104 }
5105 
5106 struct mbuf *
5107 sctp_generate_no_user_data_cause(uint32_t tsn)
5108 {
5109 	struct mbuf *m;
5110 	struct sctp_error_no_user_data *no_user_data_cause;
5111 	size_t len;
5112 
5113 	len = sizeof(struct sctp_error_no_user_data);
5114 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5115 	if (m != NULL) {
5116 		SCTP_BUF_LEN(m) = len;
5117 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5118 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5119 		no_user_data_cause->cause.length = htons((uint16_t)len);
5120 		no_user_data_cause->tsn = tsn; /* tsn is passed in as NBO */
5121 	}
5122 	return (m);
5123 }
5124 
5125 #ifdef SCTP_MBCNT_LOGGING
5126 void
5127 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5128     struct sctp_tmit_chunk *tp1, int chk_cnt)
5129 {
5130 	if (tp1->data == NULL) {
5131 		return;
5132 	}
5133 	asoc->chunks_on_out_queue -= chk_cnt;
5134 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5135 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5136 			       asoc->total_output_queue_size,
5137 			       tp1->book_size,
5138 			       0,
5139 			       tp1->mbcnt);
5140 	}
5141 	if (asoc->total_output_queue_size >= tp1->book_size) {
5142 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5143 	} else {
5144 		asoc->total_output_queue_size = 0;
5145 	}
5146 
5147 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5148 				  ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5149 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5150 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5151 		} else {
5152 			stcb->sctp_socket->so_snd.sb_cc = 0;
5153 
5154 		}
5155 	}
5156 }
5157 
5158 #endif
5159 
5160 int
5161 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5162 			   uint8_t sent, int so_locked
5163 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5164 			   SCTP_UNUSED
5165 #endif
5166 	)
5167 {
5168 	struct sctp_stream_out *strq;
5169 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5170 	struct sctp_stream_queue_pending *sp;
5171 	uint16_t stream = 0, seq = 0;
5172 	uint8_t foundeom = 0;
5173 	int ret_sz = 0;
5174 	int notdone;
5175 	int do_wakeup_routine = 0;
5176 #if defined(__APPLE__)
5177 	if (so_locked) {
5178 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5179 	} else {
5180 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5181 	}
5182 #endif
5183 	stream = tp1->rec.data.stream_number;
5184 	seq = tp1->rec.data.stream_seq;
5185 	do {
5186 		ret_sz += tp1->book_size;
5187 		if (tp1->data != NULL) {
5188 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5189 				sctp_flight_size_decrease(tp1);
5190 				sctp_total_flight_decrease(stcb, tp1);
5191 			}
5192 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5193 			stcb->asoc.peers_rwnd += tp1->send_size;
5194 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5195 			if (sent) {
5196 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5197 			} else {
5198 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5199 			}
5200 			if (tp1->data) {
5201 				sctp_m_freem(tp1->data);
5202 				tp1->data = NULL;
5203 			}
5204 			do_wakeup_routine = 1;
5205 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5206 				stcb->asoc.sent_queue_cnt_removeable--;
5207 			}
5208 		}
5209 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5210 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5211 		    SCTP_DATA_NOT_FRAG) {
5212 			/* not frag'ed we ae done   */
5213 			notdone = 0;
5214 			foundeom = 1;
5215 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5216 			/* end of frag, we are done */
5217 			notdone = 0;
5218 			foundeom = 1;
5219 		} else {
5220 			/*
5221 			 * Its a begin or middle piece, we must mark all of
5222 			 * it
5223 			 */
5224 			notdone = 1;
5225 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5226 		}
5227 	} while (tp1 && notdone);
5228 	if (foundeom == 0) {
5229 		/*
5230 		 * The multi-part message was scattered across the send and
5231 		 * sent queue.
5232 		 */
5233 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5234 			if ((tp1->rec.data.stream_number != stream) ||
5235 		   	    (tp1->rec.data.stream_seq != seq)) {
5236 				break;
5237 			}
5238 			/* save to chk in case we have some on stream out
5239 			 * queue. If so and we have an un-transmitted one
5240 			 * we don't have to fudge the TSN.
5241 			 */
5242 			chk = tp1;
5243 			ret_sz += tp1->book_size;
5244 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5245 			if (sent) {
5246 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5247 			} else {
5248 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5249 			}
5250 			if (tp1->data) {
5251 				sctp_m_freem(tp1->data);
5252 				tp1->data = NULL;
5253 			}
5254 			/* No flight involved here book the size to 0 */
5255 			tp1->book_size = 0;
5256 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5257 				foundeom = 1;
5258 			}
5259 			do_wakeup_routine = 1;
5260 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5261 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5262 			/* on to the sent queue so we can wait for it to be passed by. */
5263 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5264 					  sctp_next);
5265 			stcb->asoc.send_queue_cnt--;
5266 			stcb->asoc.sent_queue_cnt++;
5267 		}
5268 	}
5269 	if (foundeom == 0) {
5270 		/*
5271 		 * Still no eom found. That means there
5272 		 * is stuff left on the stream out queue.. yuck.
5273 		 */
5274 		SCTP_TCB_SEND_LOCK(stcb);
5275 		strq = &stcb->asoc.strmout[stream];
5276 		sp = TAILQ_FIRST(&strq->outqueue);
5277 		if (sp != NULL) {
5278 			sp->discard_rest = 1;
5279 			/*
5280 			 * We may need to put a chunk on the
5281 			 * queue that holds the TSN that
5282 			 * would have been sent with the LAST
5283 			 * bit.
5284 			 */
5285 			if (chk == NULL) {
5286 				/* Yep, we have to */
5287 				sctp_alloc_a_chunk(stcb, chk);
5288 				if (chk == NULL) {
5289 					/* we are hosed. All we can
5290 					 * do is nothing.. which will
5291 					 * cause an abort if the peer is
5292 					 * paying attention.
5293 					 */
5294 					goto oh_well;
5295 				}
5296 				memset(chk, 0, sizeof(*chk));
5297 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
5298 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5299 				chk->asoc = &stcb->asoc;
5300 				chk->rec.data.stream_seq = strq->next_sequence_send;
5301 				chk->rec.data.stream_number = sp->stream;
5302 				chk->rec.data.payloadtype = sp->ppid;
5303 				chk->rec.data.context = sp->context;
5304 				chk->flags = sp->act_flags;
5305 				if (sp->net)
5306 					chk->whoTo = sp->net;
5307 				else
5308 					chk->whoTo = stcb->asoc.primary_destination;
5309 				atomic_add_int(&chk->whoTo->ref_count, 1);
5310 #if defined(__FreeBSD__) || defined(__Panda__)
5311 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5312 #else
5313 				chk->rec.data.TSN_seq = stcb->asoc.sending_seq++;
5314 #endif
5315 				stcb->asoc.pr_sctp_cnt++;
5316 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5317 				stcb->asoc.sent_queue_cnt++;
5318 				stcb->asoc.pr_sctp_cnt++;
5319 			} else {
5320 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5321 			}
5322 			strq->next_sequence_send++;
5323 		oh_well:
5324 			if (sp->data) {
5325 				/* Pull any data to free up the SB and
5326 				 * allow sender to "add more" while we
5327 				 * will throw away :-)
5328 				 */
5329 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5330 				ret_sz += sp->length;
5331 				do_wakeup_routine = 1;
5332 				sp->some_taken = 1;
5333 				sctp_m_freem(sp->data);
5334 				sp->data = NULL;
5335 				sp->tail_mbuf = NULL;
5336 				sp->length = 0;
5337 			}
5338 		}
5339 		SCTP_TCB_SEND_UNLOCK(stcb);
5340 	}
5341 	if (do_wakeup_routine) {
5342 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5343 		struct socket *so;
5344 
5345 		so = SCTP_INP_SO(stcb->sctp_ep);
5346 		if (!so_locked) {
5347 			atomic_add_int(&stcb->asoc.refcnt, 1);
5348 			SCTP_TCB_UNLOCK(stcb);
5349 			SCTP_SOCKET_LOCK(so, 1);
5350 			SCTP_TCB_LOCK(stcb);
5351 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5352 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5353 				/* assoc was freed while we were unlocked */
5354 				SCTP_SOCKET_UNLOCK(so, 1);
5355 				return (ret_sz);
5356 			}
5357 		}
5358 #endif
5359 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5360 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5361 		if (!so_locked) {
5362 			SCTP_SOCKET_UNLOCK(so, 1);
5363 		}
5364 #endif
5365 	}
5366 	return (ret_sz);
5367 }
5368 
5369 /*
5370  * checks to see if the given address, sa, is one that is currently known by
5371  * the kernel note: can't distinguish the same address on multiple interfaces
5372  * and doesn't handle multiple addresses with different zone/scope id's note:
5373  * ifa_ifwithaddr() compares the entire sockaddr struct
5374  */
5375 struct sctp_ifa *
5376 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5377 		    int holds_lock)
5378 {
5379 	struct sctp_laddr *laddr;
5380 
5381 	if (holds_lock == 0) {
5382 		SCTP_INP_RLOCK(inp);
5383 	}
5384 
5385 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5386 		if (laddr->ifa == NULL)
5387 			continue;
5388 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5389 			continue;
5390 #ifdef INET
5391 		if (addr->sa_family == AF_INET) {
5392 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5393 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5394 				/* found him. */
5395 				if (holds_lock == 0) {
5396 					SCTP_INP_RUNLOCK(inp);
5397 				}
5398 				return (laddr->ifa);
5399 				break;
5400 			}
5401 		}
5402 #endif
5403 #ifdef INET6
5404 		if (addr->sa_family == AF_INET6) {
5405 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5406 						 &laddr->ifa->address.sin6)) {
5407 				/* found him. */
5408 				if (holds_lock == 0) {
5409 					SCTP_INP_RUNLOCK(inp);
5410 				}
5411 				return (laddr->ifa);
5412 				break;
5413 			}
5414 		}
5415 #endif
5416 #if defined(__Userspace__)
5417 		if (addr->sa_family == AF_CONN) {
5418 			if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5419 				/* found him. */
5420 				if (holds_lock == 0) {
5421 					SCTP_INP_RUNLOCK(inp);
5422 				}
5423 				return (laddr->ifa);
5424 				break;
5425 			}
5426 		}
5427 #endif
5428 	}
5429 	if (holds_lock == 0) {
5430 		SCTP_INP_RUNLOCK(inp);
5431 	}
5432 	return (NULL);
5433 }
5434 
5435 uint32_t
5436 sctp_get_ifa_hash_val(struct sockaddr *addr)
5437 {
5438 	switch (addr->sa_family) {
5439 #ifdef INET
5440 	case AF_INET:
5441 	{
5442 		struct sockaddr_in *sin;
5443 
5444 		sin = (struct sockaddr_in *)addr;
5445 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5446 	}
5447 #endif
5448 #ifdef INET6
5449 	case AF_INET6:
5450 	{
5451 		struct sockaddr_in6 *sin6;
5452 		uint32_t hash_of_addr;
5453 
5454 		sin6 = (struct sockaddr_in6 *)addr;
5455 #if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
5456 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5457 				sin6->sin6_addr.s6_addr32[1] +
5458 				sin6->sin6_addr.s6_addr32[2] +
5459 				sin6->sin6_addr.s6_addr32[3]);
5460 #else
5461 		hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5462 				((uint32_t *)&sin6->sin6_addr)[1] +
5463 				((uint32_t *)&sin6->sin6_addr)[2] +
5464 				((uint32_t *)&sin6->sin6_addr)[3]);
5465 #endif
5466 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5467 		return (hash_of_addr);
5468 	}
5469 #endif
5470 #if defined(__Userspace__)
5471 	case AF_CONN:
5472 	{
5473 		struct sockaddr_conn *sconn;
5474 		uintptr_t temp;
5475 
5476 		sconn = (struct sockaddr_conn *)addr;
5477 		temp = (uintptr_t)sconn->sconn_addr;
5478 		return ((uint32_t)(temp ^ (temp >> 16)));
5479 	}
5480 #endif
5481 	default:
5482 		break;
5483 	}
5484 	return (0);
5485 }
5486 
5487 struct sctp_ifa *
5488 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5489 {
5490 	struct sctp_ifa *sctp_ifap;
5491 	struct sctp_vrf *vrf;
5492 	struct sctp_ifalist *hash_head;
5493 	uint32_t hash_of_addr;
5494 
5495 	if (holds_lock == 0)
5496 		SCTP_IPI_ADDR_RLOCK();
5497 
5498 	vrf = sctp_find_vrf(vrf_id);
5499 	if (vrf == NULL) {
5500 	stage_right:
5501 		if (holds_lock == 0)
5502 			SCTP_IPI_ADDR_RUNLOCK();
5503 		return (NULL);
5504 	}
5505 
5506 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5507 
5508 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5509 	if (hash_head == NULL) {
5510 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5511 			    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5512 			    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5513 		sctp_print_address(addr);
5514 		SCTP_PRINTF("No such bucket for address\n");
5515 		if (holds_lock == 0)
5516 			SCTP_IPI_ADDR_RUNLOCK();
5517 
5518 		return (NULL);
5519 	}
5520 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5521 		if (sctp_ifap == NULL) {
5522 #ifdef INVARIANTS
5523 			panic("Huh LIST_FOREACH corrupt");
5524 		        goto stage_right;
5525 #else
5526 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5527 			goto stage_right;
5528 #endif
5529 		}
5530 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5531 			continue;
5532 #ifdef INET
5533 		if (addr->sa_family == AF_INET) {
5534 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5535 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5536 				/* found him. */
5537 				if (holds_lock == 0)
5538 					SCTP_IPI_ADDR_RUNLOCK();
5539 				return (sctp_ifap);
5540 				break;
5541 			}
5542 		}
5543 #endif
5544 #ifdef INET6
5545 		if (addr->sa_family == AF_INET6) {
5546 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5547 						 &sctp_ifap->address.sin6)) {
5548 				/* found him. */
5549 				if (holds_lock == 0)
5550 					SCTP_IPI_ADDR_RUNLOCK();
5551 				return (sctp_ifap);
5552 				break;
5553 			}
5554 		}
5555 #endif
5556 #if defined(__Userspace__)
5557 		if (addr->sa_family == AF_CONN) {
5558 			if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5559 				/* found him. */
5560 				if (holds_lock == 0)
5561 					SCTP_IPI_ADDR_RUNLOCK();
5562 				return (sctp_ifap);
5563 				break;
5564 			}
5565 		}
5566 #endif
5567 	}
5568 	if (holds_lock == 0)
5569 		SCTP_IPI_ADDR_RUNLOCK();
5570 	return (NULL);
5571 }
5572 
5573 static void
5574 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5575 	       uint32_t rwnd_req)
5576 {
5577 	/* User pulled some data, do we need a rwnd update? */
5578 	int r_unlocked = 0;
5579 	uint32_t dif, rwnd;
5580 	struct socket *so = NULL;
5581 
5582 	if (stcb == NULL)
5583 		return;
5584 
5585 	atomic_add_int(&stcb->asoc.refcnt, 1);
5586 
5587 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5588 				SCTP_STATE_SHUTDOWN_RECEIVED |
5589 				SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5590 		/* Pre-check If we are freeing no update */
5591 		goto no_lock;
5592 	}
5593 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5594 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5595 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5596 		goto out;
5597 	}
5598 	so = stcb->sctp_socket;
5599 	if (so == NULL) {
5600 		goto out;
5601 	}
5602 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5603 	/* Have you have freed enough to look */
5604 	*freed_so_far = 0;
5605 	/* Yep, its worth a look and the lock overhead */
5606 
5607 	/* Figure out what the rwnd would be */
5608 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5609 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5610 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5611 	} else {
5612 		dif = 0;
5613 	}
5614 	if (dif >= rwnd_req) {
5615 		if (hold_rlock) {
5616 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5617 			r_unlocked = 1;
5618 		}
5619 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5620 			/*
5621 			 * One last check before we allow the guy possibly
5622 			 * to get in. There is a race, where the guy has not
5623 			 * reached the gate. In that case
5624 			 */
5625 			goto out;
5626 		}
5627 		SCTP_TCB_LOCK(stcb);
5628 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5629 			/* No reports here */
5630 			SCTP_TCB_UNLOCK(stcb);
5631 			goto out;
5632 		}
5633 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5634 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5635 
5636 		sctp_chunk_output(stcb->sctp_ep, stcb,
5637 				  SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5638 		/* make sure no timer is running */
5639 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_6);
5640 		SCTP_TCB_UNLOCK(stcb);
5641 	} else {
5642 		/* Update how much we have pending */
5643 		stcb->freed_by_sorcv_sincelast = dif;
5644 	}
5645  out:
5646 	if (so && r_unlocked && hold_rlock) {
5647 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5648 	}
5649 
5650 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5651  no_lock:
5652 	atomic_add_int(&stcb->asoc.refcnt, -1);
5653 	return;
5654 }
5655 
5656 int
5657 sctp_sorecvmsg(struct socket *so,
5658     struct uio *uio,
5659     struct mbuf **mp,
5660     struct sockaddr *from,
5661     int fromlen,
5662     int *msg_flags,
5663     struct sctp_sndrcvinfo *sinfo,
5664     int filling_sinfo)
5665 {
5666 	/*
5667 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5668 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5669 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5670 	 * On the way out we may send out any combination of:
5671 	 * MSG_NOTIFICATION MSG_EOR
5672 	 *
5673 	 */
5674 	struct sctp_inpcb *inp = NULL;
5675 	int my_len = 0;
5676 	int cp_len = 0, error = 0;
5677 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5678 	struct mbuf *m = NULL;
5679 	struct sctp_tcb *stcb = NULL;
5680 	int wakeup_read_socket = 0;
5681 	int freecnt_applied = 0;
5682 	int out_flags = 0, in_flags = 0;
5683 	int block_allowed = 1;
5684 	uint32_t freed_so_far = 0;
5685 	uint32_t copied_so_far = 0;
5686 	int in_eeor_mode = 0;
5687 	int no_rcv_needed = 0;
5688 	uint32_t rwnd_req = 0;
5689 	int hold_sblock = 0;
5690 	int hold_rlock = 0;
5691 	int slen = 0;
5692 	uint32_t held_length = 0;
5693 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5694 	int sockbuf_lock = 0;
5695 #endif
5696 
5697 	if (uio == NULL) {
5698 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5699 		return (EINVAL);
5700 	}
5701 
5702 	if (msg_flags) {
5703 		in_flags = *msg_flags;
5704 		if (in_flags & MSG_PEEK)
5705 			SCTP_STAT_INCR(sctps_read_peeks);
5706 	} else {
5707 		in_flags = 0;
5708 	}
5709 #if defined(__APPLE__)
5710 #if defined(APPLE_LEOPARD)
5711 	slen = uio->uio_resid;
5712 #else
5713 	slen = uio_resid(uio);
5714 #endif
5715 #else
5716 	slen = uio->uio_resid;
5717 #endif
5718 
5719 	/* Pull in and set up our int flags */
5720 	if (in_flags & MSG_OOB) {
5721 		/* Out of band's NOT supported */
5722 		return (EOPNOTSUPP);
5723 	}
5724 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5725 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5726 		return (EINVAL);
5727 	}
5728 	if ((in_flags & (MSG_DONTWAIT
5729 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
5730 			 | MSG_NBIO
5731 #endif
5732 		     )) ||
5733 	    SCTP_SO_IS_NBIO(so)) {
5734 		block_allowed = 0;
5735 	}
5736 	/* setup the endpoint */
5737 	inp = (struct sctp_inpcb *)so->so_pcb;
5738 	if (inp == NULL) {
5739 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5740 		return (EFAULT);
5741 	}
5742 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5743 	/* Must be at least a MTU's worth */
5744 	if (rwnd_req < SCTP_MIN_RWND)
5745 		rwnd_req = SCTP_MIN_RWND;
5746 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5747 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5748 #if defined(__APPLE__)
5749 #if defined(APPLE_LEOPARD)
5750 		sctp_misc_ints(SCTP_SORECV_ENTER,
5751 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5752 #else
5753 		sctp_misc_ints(SCTP_SORECV_ENTER,
5754 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
5755 #endif
5756 #else
5757 		sctp_misc_ints(SCTP_SORECV_ENTER,
5758 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5759 #endif
5760 	}
5761 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5762 	SOCKBUF_LOCK(&so->so_rcv);
5763 	hold_sblock = 1;
5764 #endif
5765 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
5766 #if defined(__APPLE__)
5767 #if defined(APPLE_LEOPARD)
5768 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5769 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5770 #else
5771 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5772 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
5773 #endif
5774 #else
5775 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5776 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5777 #endif
5778 	}
5779 
5780 #if defined(__APPLE__)
5781 	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5782 #endif
5783 
5784 #if defined(__FreeBSD__)
5785 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5786 #endif
5787 	if (error) {
5788 		goto release_unlocked;
5789 	}
5790 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5791         sockbuf_lock = 1;
5792 #endif
5793  restart:
5794 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5795 	if (hold_sblock == 0) {
5796 		SOCKBUF_LOCK(&so->so_rcv);
5797 		hold_sblock = 1;
5798 	}
5799 #endif
5800 #if defined(__APPLE__)
5801 	sbunlock(&so->so_rcv, 1);
5802 #endif
5803 
5804 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
5805 	sbunlock(&so->so_rcv);
5806 #endif
5807 
5808  restart_nosblocks:
5809 	if (hold_sblock == 0) {
5810 		SOCKBUF_LOCK(&so->so_rcv);
5811 		hold_sblock = 1;
5812 	}
5813 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5814 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5815 		goto out;
5816 	}
5817 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
5818 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5819 #else
5820 	if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5821 #endif
5822 		if (so->so_error) {
5823 			error = so->so_error;
5824 			if ((in_flags & MSG_PEEK) == 0)
5825 				so->so_error = 0;
5826 			goto out;
5827 		} else {
5828 			if (so->so_rcv.sb_cc == 0) {
5829 				/* indicate EOF */
5830 				error = 0;
5831 				goto out;
5832 			}
5833 		}
5834 	}
5835 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5836 		/* we need to wait for data */
5837 		if ((so->so_rcv.sb_cc == 0) &&
5838 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5839 		     (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5840 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5841 				/* For active open side clear flags for re-use
5842 				 * passive open is blocked by connect.
5843 				 */
5844 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5845 					/* You were aborted, passive side always hits here */
5846 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5847 					error = ECONNRESET;
5848 				}
5849 				so->so_state &= ~(SS_ISCONNECTING |
5850 						  SS_ISDISCONNECTING |
5851 						  SS_ISCONFIRMING |
5852 						  SS_ISCONNECTED);
5853 				if (error == 0) {
5854 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5855 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5856 						error = ENOTCONN;
5857 					}
5858 				}
5859 				goto out;
5860 			}
5861 		}
5862 		error = sbwait(&so->so_rcv);
5863 		if (error) {
5864 			goto out;
5865 		}
5866 		held_length = 0;
5867 		goto restart_nosblocks;
5868 	} else if (so->so_rcv.sb_cc == 0) {
5869 		if (so->so_error) {
5870 			error = so->so_error;
5871 			if ((in_flags & MSG_PEEK) == 0)
5872 				so->so_error = 0;
5873 		} else {
5874 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5875 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5876 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5877 					/* For active open side clear flags for re-use
5878 					 * passive open is blocked by connect.
5879 					 */
5880 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5881 						/* You were aborted, passive side always hits here */
5882 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5883 						error = ECONNRESET;
5884 					}
5885 					so->so_state &= ~(SS_ISCONNECTING |
5886 							  SS_ISDISCONNECTING |
5887 							  SS_ISCONFIRMING |
5888 							  SS_ISCONNECTED);
5889 					if (error == 0) {
5890 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5891 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5892 							error = ENOTCONN;
5893 						}
5894 					}
5895 					goto out;
5896 				}
5897 			}
5898 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5899 			error = EWOULDBLOCK;
5900 		}
5901 		goto out;
5902 	}
5903 	if (hold_sblock == 1) {
5904 		SOCKBUF_UNLOCK(&so->so_rcv);
5905 		hold_sblock = 0;
5906 	}
5907 #if defined(__APPLE__)
5908 	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5909 #endif
5910 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
5911 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
5912 #endif
5913 	/* we possibly have data we can read */
5914 	/*sa_ignore FREED_MEMORY*/
5915 	control = TAILQ_FIRST(&inp->read_queue);
5916 	if (control == NULL) {
5917 		/* This could be happening since
5918 		 * the appender did the increment but as not
5919 		 * yet did the tailq insert onto the read_queue
5920 		 */
5921 		if (hold_rlock == 0) {
5922 			SCTP_INP_READ_LOCK(inp);
5923 		}
5924 		control = TAILQ_FIRST(&inp->read_queue);
5925 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5926 #ifdef INVARIANTS
5927 			panic("Huh, its non zero and nothing on control?");
5928 #endif
5929 			so->so_rcv.sb_cc = 0;
5930 		}
5931 		SCTP_INP_READ_UNLOCK(inp);
5932 		hold_rlock = 0;
5933 		goto restart;
5934 	}
5935 
5936 	if ((control->length == 0) &&
5937 	    (control->do_not_ref_stcb)) {
5938 		/* Clean up code for freeing assoc that left behind a pdapi..
5939 		 * maybe a peer in EEOR that just closed after sending and
5940 		 * never indicated a EOR.
5941 		 */
5942 		if (hold_rlock == 0) {
5943 			hold_rlock = 1;
5944 			SCTP_INP_READ_LOCK(inp);
5945 		}
5946 		control->held_length = 0;
5947 		if (control->data) {
5948 			/* Hmm there is data here .. fix */
5949 			struct mbuf *m_tmp;
5950 			int cnt = 0;
5951 			m_tmp = control->data;
5952 			while (m_tmp) {
5953 				cnt += SCTP_BUF_LEN(m_tmp);
5954 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5955 					control->tail_mbuf = m_tmp;
5956 					control->end_added = 1;
5957 				}
5958 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5959 			}
5960 			control->length = cnt;
5961 		} else {
5962 			/* remove it */
5963 			TAILQ_REMOVE(&inp->read_queue, control, next);
5964 			/* Add back any hiddend data */
5965 			sctp_free_remote_addr(control->whoFrom);
5966 			sctp_free_a_readq(stcb, control);
5967 		}
5968 		if (hold_rlock) {
5969 			hold_rlock = 0;
5970 			SCTP_INP_READ_UNLOCK(inp);
5971 		}
5972 		goto restart;
5973 	}
5974 	if ((control->length == 0) &&
5975 	    (control->end_added == 1)) {
5976 		/* Do we also need to check for (control->pdapi_aborted == 1)? */
5977 		if (hold_rlock == 0) {
5978 			hold_rlock = 1;
5979 			SCTP_INP_READ_LOCK(inp);
5980 		}
5981 		TAILQ_REMOVE(&inp->read_queue, control, next);
5982 		if (control->data) {
5983 #ifdef INVARIANTS
5984 			panic("control->data not null but control->length == 0");
5985 #else
5986 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5987 			sctp_m_freem(control->data);
5988 			control->data = NULL;
5989 #endif
5990 		}
5991 		if (control->aux_data) {
5992 			sctp_m_free (control->aux_data);
5993 			control->aux_data = NULL;
5994 		}
5995 		sctp_free_remote_addr(control->whoFrom);
5996 		sctp_free_a_readq(stcb, control);
5997 		if (hold_rlock) {
5998 			hold_rlock = 0;
5999 			SCTP_INP_READ_UNLOCK(inp);
6000 		}
6001 		goto restart;
6002 	}
6003 	if (control->length == 0) {
6004 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
6005 		    (filling_sinfo)) {
6006 			/* find a more suitable one then this */
6007 			ctl = TAILQ_NEXT(control, next);
6008 			while (ctl) {
6009 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
6010 				    (ctl->some_taken ||
6011 				     (ctl->spec_flags & M_NOTIFICATION) ||
6012 				     ((ctl->do_not_ref_stcb == 0) &&
6013 				      (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
6014 					) {
6015 					/*-
6016 					 * If we have a different TCB next, and there is data
6017 					 * present. If we have already taken some (pdapi), OR we can
6018 					 * ref the tcb and no delivery as started on this stream, we
6019 					 * take it. Note we allow a notification on a different
6020 					 * assoc to be delivered..
6021 					 */
6022 					control = ctl;
6023 					goto found_one;
6024 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
6025 					   (ctl->length) &&
6026 					   ((ctl->some_taken) ||
6027 					    ((ctl->do_not_ref_stcb == 0) &&
6028 					     ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
6029 					     (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
6030 					/*-
6031 					 * If we have the same tcb, and there is data present, and we
6032 					 * have the strm interleave feature present. Then if we have
6033 					 * taken some (pdapi) or we can refer to tht tcb AND we have
6034 					 * not started a delivery for this stream, we can take it.
6035 					 * Note we do NOT allow a notificaiton on the same assoc to
6036 					 * be delivered.
6037 					 */
6038 					control = ctl;
6039 					goto found_one;
6040 				}
6041 				ctl = TAILQ_NEXT(ctl, next);
6042 			}
6043 		}
6044 		/*
6045 		 * if we reach here, not suitable replacement is available
6046 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
6047 		 * into the our held count, and its time to sleep again.
6048 		 */
6049 		held_length = so->so_rcv.sb_cc;
6050 		control->held_length = so->so_rcv.sb_cc;
6051 		goto restart;
6052 	}
6053 	/* Clear the held length since there is something to read */
6054 	control->held_length = 0;
6055 	if (hold_rlock) {
6056 		SCTP_INP_READ_UNLOCK(inp);
6057 		hold_rlock = 0;
6058 	}
6059  found_one:
6060 	/*
6061 	 * If we reach here, control has a some data for us to read off.
6062 	 * Note that stcb COULD be NULL.
6063 	 */
6064 	control->some_taken++;
6065 	if (hold_sblock) {
6066 		SOCKBUF_UNLOCK(&so->so_rcv);
6067 		hold_sblock = 0;
6068 	}
6069 	stcb = control->stcb;
6070 	if (stcb) {
6071 		if ((control->do_not_ref_stcb == 0) &&
6072 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6073 			if (freecnt_applied == 0)
6074 				stcb = NULL;
6075 		} else if (control->do_not_ref_stcb == 0) {
6076 			/* you can't free it on me please */
6077 			/*
6078 			 * The lock on the socket buffer protects us so the
6079 			 * free code will stop. But since we used the socketbuf
6080 			 * lock and the sender uses the tcb_lock to increment,
6081 			 * we need to use the atomic add to the refcnt
6082 			 */
6083 			if (freecnt_applied) {
6084 #ifdef INVARIANTS
6085 				panic("refcnt already incremented");
6086 #else
6087 				SCTP_PRINTF("refcnt already incremented?\n");
6088 #endif
6089 			} else {
6090 				atomic_add_int(&stcb->asoc.refcnt, 1);
6091 				freecnt_applied = 1;
6092 			}
6093 			/*
6094 			 * Setup to remember how much we have not yet told
6095 			 * the peer our rwnd has opened up. Note we grab
6096 			 * the value from the tcb from last time.
6097 			 * Note too that sack sending clears this when a sack
6098 			 * is sent, which is fine. Once we hit the rwnd_req,
6099 			 * we then will go to the sctp_user_rcvd() that will
6100 			 * not lock until it KNOWs it MUST send a WUP-SACK.
6101 			 */
6102 			freed_so_far = stcb->freed_by_sorcv_sincelast;
6103 			stcb->freed_by_sorcv_sincelast = 0;
6104 		}
6105         }
6106 	if (stcb &&
6107 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
6108 	    control->do_not_ref_stcb == 0) {
6109 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6110 	}
6111 
6112 	/* First lets get off the sinfo and sockaddr info */
6113 	if ((sinfo) && filling_sinfo) {
6114 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
6115 		nxt = TAILQ_NEXT(control, next);
6116 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6117 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6118 			struct sctp_extrcvinfo *s_extra;
6119 			s_extra = (struct sctp_extrcvinfo *)sinfo;
6120 			if ((nxt) &&
6121 			    (nxt->length)) {
6122 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6123 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
6124 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6125 				}
6126 				if (nxt->spec_flags & M_NOTIFICATION) {
6127 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6128 				}
6129 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
6130 				s_extra->sreinfo_next_length = nxt->length;
6131 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
6132 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
6133 				if (nxt->tail_mbuf != NULL) {
6134 					if (nxt->end_added) {
6135 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6136 					}
6137 				}
6138 			} else {
6139 				/* we explicitly 0 this, since the memcpy got
6140 				 * some other things beyond the older sinfo_
6141 				 * that is on the control's structure :-D
6142 				 */
6143 				nxt = NULL;
6144 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6145 				s_extra->sreinfo_next_aid = 0;
6146 				s_extra->sreinfo_next_length = 0;
6147 				s_extra->sreinfo_next_ppid = 0;
6148 				s_extra->sreinfo_next_stream = 0;
6149 			}
6150 		}
6151 		/*
6152 		 * update off the real current cum-ack, if we have an stcb.
6153 		 */
6154 		if ((control->do_not_ref_stcb == 0) && stcb)
6155 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6156 		/*
6157 		 * mask off the high bits, we keep the actual chunk bits in
6158 		 * there.
6159 		 */
6160 		sinfo->sinfo_flags &= 0x00ff;
6161 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6162 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6163 		}
6164 	}
6165 #ifdef SCTP_ASOCLOG_OF_TSNS
6166 	{
6167 		int index, newindex;
6168 		struct sctp_pcbtsn_rlog *entry;
6169 		do {
6170 			index = inp->readlog_index;
6171 			newindex = index + 1;
6172 			if (newindex >= SCTP_READ_LOG_SIZE) {
6173 				newindex = 0;
6174 			}
6175 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6176 		entry = &inp->readlog[index];
6177 		entry->vtag = control->sinfo_assoc_id;
6178 		entry->strm = control->sinfo_stream;
6179 		entry->seq = control->sinfo_ssn;
6180 		entry->sz = control->length;
6181 		entry->flgs = control->sinfo_flags;
6182 	}
6183 #endif
6184 	if (fromlen && from) {
6185 #ifdef HAVE_SA_LEN
6186 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
6187 #endif
6188 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6189 #ifdef INET6
6190 			case AF_INET6:
6191 #ifndef HAVE_SA_LEN
6192 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in6));
6193 #endif
6194 				((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
6195 				break;
6196 #endif
6197 #ifdef INET
6198 			case AF_INET:
6199 #ifndef HAVE_SA_LEN
6200 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in));
6201 #endif
6202 				((struct sockaddr_in *)from)->sin_port = control->port_from;
6203 				break;
6204 #endif
6205 #if defined(__Userspace__)
6206 			case AF_CONN:
6207 #ifndef HAVE_SA_LEN
6208 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_conn));
6209 #endif
6210 				((struct sockaddr_conn *)from)->sconn_port = control->port_from;
6211 				break;
6212 #endif
6213 			default:
6214 #ifndef HAVE_SA_LEN
6215 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr));
6216 #endif
6217 				break;
6218 		}
6219 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
6220 
6221 #if defined(INET) && defined(INET6)
6222 		if ((sctp_is_feature_on(inp,SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
6223 		    (from->sa_family == AF_INET) &&
6224 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
6225 			struct sockaddr_in *sin;
6226 			struct sockaddr_in6 sin6;
6227 
6228 			sin = (struct sockaddr_in *)from;
6229 			bzero(&sin6, sizeof(sin6));
6230 			sin6.sin6_family = AF_INET6;
6231 #ifdef HAVE_SIN6_LEN
6232 			sin6.sin6_len = sizeof(struct sockaddr_in6);
6233 #endif
6234 #if defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Darwin) || defined(__Userspace_os_Windows)
6235 			((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
6236 			bcopy(&sin->sin_addr,
6237 			      &(((uint32_t *)&sin6.sin6_addr)[3]),
6238 			      sizeof(uint32_t));
6239 #elif defined(__Windows__)
6240 			((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
6241 			bcopy(&sin->sin_addr,
6242 			      &((uint32_t *)&sin6.sin6_addr)[3],
6243 			      sizeof(uint32_t));
6244 #else
6245 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
6246 			bcopy(&sin->sin_addr,
6247 			      &sin6.sin6_addr.s6_addr32[3],
6248 			      sizeof(sin6.sin6_addr.s6_addr32[3]));
6249 #endif
6250 			sin6.sin6_port = sin->sin_port;
6251 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
6252 		}
6253 #endif
6254 #if defined(SCTP_EMBEDDED_V6_SCOPE)
6255 #ifdef INET6
6256 		{
6257 			struct sockaddr_in6 lsa6, *from6;
6258 
6259 			from6 = (struct sockaddr_in6 *)from;
6260 			sctp_recover_scope_mac(from6, (&lsa6));
6261 		}
6262 #endif
6263 #endif
6264 	}
6265 	/* now copy out what data we can */
6266 	if (mp == NULL) {
6267 		/* copy out each mbuf in the chain up to length */
6268 	get_more_data:
6269 		m = control->data;
6270 		while (m) {
6271 			/* Move out all we can */
6272 #if defined(__APPLE__)
6273 #if defined(APPLE_LEOPARD)
6274 			cp_len = (int)uio->uio_resid;
6275 #else
6276 			cp_len = (int)uio_resid(uio);
6277 #endif
6278 #else
6279 			cp_len = (int)uio->uio_resid;
6280 #endif
6281 			my_len = (int)SCTP_BUF_LEN(m);
6282 			if (cp_len > my_len) {
6283 				/* not enough in this buf */
6284 				cp_len = my_len;
6285 			}
6286 			if (hold_rlock) {
6287 				SCTP_INP_READ_UNLOCK(inp);
6288 				hold_rlock = 0;
6289 			}
6290 #if defined(__APPLE__)
6291 			SCTP_SOCKET_UNLOCK(so, 0);
6292 #endif
6293 			if (cp_len > 0)
6294 				error = uiomove(mtod(m, char *), cp_len, uio);
6295 #if defined(__APPLE__)
6296 			SCTP_SOCKET_LOCK(so, 0);
6297 #endif
6298 			/* re-read */
6299 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6300 				goto release;
6301 			}
6302 
6303 			if ((control->do_not_ref_stcb == 0) && stcb &&
6304 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6305 				no_rcv_needed = 1;
6306 			}
6307 			if (error) {
6308 				/* error we are out of here */
6309 				goto release;
6310 			}
6311 			if ((SCTP_BUF_NEXT(m) == NULL) &&
6312 			    (cp_len >= SCTP_BUF_LEN(m)) &&
6313 			    ((control->end_added == 0) ||
6314 			     (control->end_added &&
6315 			      (TAILQ_NEXT(control, next) == NULL)))
6316 				) {
6317 				SCTP_INP_READ_LOCK(inp);
6318 				hold_rlock = 1;
6319 			}
6320 			if (cp_len == SCTP_BUF_LEN(m)) {
6321 				if ((SCTP_BUF_NEXT(m)== NULL) &&
6322 				    (control->end_added)) {
6323 					out_flags |= MSG_EOR;
6324 					if ((control->do_not_ref_stcb == 0)  &&
6325 					    (control->stcb != NULL) &&
6326 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6327 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6328 				}
6329 				if (control->spec_flags & M_NOTIFICATION) {
6330 					out_flags |= MSG_NOTIFICATION;
6331 				}
6332 				/* we ate up the mbuf */
6333 				if (in_flags & MSG_PEEK) {
6334 					/* just looking */
6335 					m = SCTP_BUF_NEXT(m);
6336 					copied_so_far += cp_len;
6337 				} else {
6338 					/* dispose of the mbuf */
6339 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6340 						sctp_sblog(&so->so_rcv,
6341 						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6342 					}
6343 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6344 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6345 						sctp_sblog(&so->so_rcv,
6346 						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6347 					}
6348 					copied_so_far += cp_len;
6349 					freed_so_far += cp_len;
6350 					freed_so_far += MSIZE;
6351 					atomic_subtract_int(&control->length, cp_len);
6352 					control->data = sctp_m_free(m);
6353 					m = control->data;
6354 					/* been through it all, must hold sb lock ok to null tail */
6355 					if (control->data == NULL) {
6356 #ifdef INVARIANTS
6357 #if !defined(__APPLE__)
6358 						if ((control->end_added == 0) ||
6359 						    (TAILQ_NEXT(control, next) == NULL)) {
6360 							/* If the end is not added, OR the
6361 							 * next is NOT null we MUST have the lock.
6362 							 */
6363 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6364 								panic("Hmm we don't own the lock?");
6365 							}
6366 						}
6367 #endif
6368 #endif
6369 						control->tail_mbuf = NULL;
6370 #ifdef INVARIANTS
6371 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6372 							panic("end_added, nothing left and no MSG_EOR");
6373 						}
6374 #endif
6375 					}
6376 				}
6377 			} else {
6378 				/* Do we need to trim the mbuf? */
6379 				if (control->spec_flags & M_NOTIFICATION) {
6380 					out_flags |= MSG_NOTIFICATION;
6381 				}
6382 				if ((in_flags & MSG_PEEK) == 0) {
6383 					SCTP_BUF_RESV_UF(m, cp_len);
6384 					SCTP_BUF_LEN(m) -= cp_len;
6385 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6386 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
6387 					}
6388 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6389 					if ((control->do_not_ref_stcb == 0) &&
6390 					    stcb) {
6391 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6392 					}
6393 					copied_so_far += cp_len;
6394 					freed_so_far += cp_len;
6395 					freed_so_far += MSIZE;
6396 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6397 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6398 							   SCTP_LOG_SBRESULT, 0);
6399 					}
6400 					atomic_subtract_int(&control->length, cp_len);
6401 				} else {
6402 					copied_so_far += cp_len;
6403 				}
6404 			}
6405 #if defined(__APPLE__)
6406 #if defined(APPLE_LEOPARD)
6407 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6408 #else
6409 			if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6410 #endif
6411 #else
6412 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6413 #endif
6414 				break;
6415 			}
6416 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6417 			    (control->do_not_ref_stcb == 0) &&
6418 			    (freed_so_far >= rwnd_req)) {
6419 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6420 			}
6421 		} /* end while(m) */
6422 		/*
6423 		 * At this point we have looked at it all and we either have
6424 		 * a MSG_EOR/or read all the user wants... <OR>
6425 		 * control->length == 0.
6426 		 */
6427 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6428 			/* we are done with this control */
6429 			if (control->length == 0) {
6430 				if (control->data) {
6431 #ifdef INVARIANTS
6432 					panic("control->data not null at read eor?");
6433 #else
6434 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6435 					sctp_m_freem(control->data);
6436 					control->data = NULL;
6437 #endif
6438 				}
6439 			done_with_control:
6440 				if (TAILQ_NEXT(control, next) == NULL) {
6441 					/* If we don't have a next we need a
6442 					 * lock, if there is a next interrupt
6443 					 * is filling ahead of us and we don't
6444 					 * need a lock to remove this guy
6445 					 * (which is the head of the queue).
6446 					 */
6447 					if (hold_rlock == 0) {
6448 						SCTP_INP_READ_LOCK(inp);
6449 						hold_rlock = 1;
6450 					}
6451 				}
6452 				TAILQ_REMOVE(&inp->read_queue, control, next);
6453 				/* Add back any hiddend data */
6454 				if (control->held_length) {
6455 					held_length = 0;
6456 					control->held_length = 0;
6457 					wakeup_read_socket = 1;
6458 				}
6459 				if (control->aux_data) {
6460 					sctp_m_free (control->aux_data);
6461 					control->aux_data = NULL;
6462 				}
6463 				no_rcv_needed = control->do_not_ref_stcb;
6464 				sctp_free_remote_addr(control->whoFrom);
6465 				control->data = NULL;
6466 				sctp_free_a_readq(stcb, control);
6467 				control = NULL;
6468 				if ((freed_so_far >= rwnd_req) &&
6469 				    (no_rcv_needed == 0))
6470 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6471 
6472 			} else {
6473 				/*
6474 				 * The user did not read all of this
6475 				 * message, turn off the returned MSG_EOR
6476 				 * since we are leaving more behind on the
6477 				 * control to read.
6478 				 */
6479 #ifdef INVARIANTS
6480 				if (control->end_added &&
6481 				    (control->data == NULL) &&
6482 				    (control->tail_mbuf == NULL)) {
6483 					panic("Gak, control->length is corrupt?");
6484 				}
6485 #endif
6486 				no_rcv_needed = control->do_not_ref_stcb;
6487 				out_flags &= ~MSG_EOR;
6488 			}
6489 		}
6490 		if (out_flags & MSG_EOR) {
6491 			goto release;
6492 		}
6493 #if defined(__APPLE__)
6494 #if defined(APPLE_LEOPARD)
6495 		if ((uio->uio_resid == 0) ||
6496 #else
6497 		if ((uio_resid(uio) == 0) ||
6498 #endif
6499 #else
6500 		if ((uio->uio_resid == 0) ||
6501 #endif
6502 		    ((in_eeor_mode) &&
6503 		     (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
6504 			goto release;
6505 		}
6506 		/*
6507 		 * If I hit here the receiver wants more and this message is
6508 		 * NOT done (pd-api). So two questions. Can we block? if not
6509 		 * we are done. Did the user NOT set MSG_WAITALL?
6510 		 */
6511 		if (block_allowed == 0) {
6512 			goto release;
6513 		}
6514 		/*
6515 		 * We need to wait for more data a few things: - We don't
6516 		 * sbunlock() so we don't get someone else reading. - We
6517 		 * must be sure to account for the case where what is added
6518 		 * is NOT to our control when we wakeup.
6519 		 */
6520 
6521 		/* Do we need to tell the transport a rwnd update might be
6522 		 * needed before we go to sleep?
6523 		 */
6524 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6525 		    ((freed_so_far >= rwnd_req) &&
6526 		     (control->do_not_ref_stcb == 0) &&
6527 		     (no_rcv_needed == 0))) {
6528 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6529 		}
6530 	wait_some_more:
6531 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6532 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6533 			goto release;
6534 		}
6535 #else
6536 		if (so->so_state & SS_CANTRCVMORE) {
6537 			goto release;
6538 		}
6539 #endif
6540 
6541 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6542 			goto release;
6543 
6544 		if (hold_rlock == 1) {
6545 			SCTP_INP_READ_UNLOCK(inp);
6546 			hold_rlock = 0;
6547 		}
6548 		if (hold_sblock == 0) {
6549 			SOCKBUF_LOCK(&so->so_rcv);
6550 			hold_sblock = 1;
6551 		}
6552 		if ((copied_so_far) && (control->length == 0) &&
6553 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6554 			goto release;
6555 		}
6556 #if defined(__APPLE__)
6557 		sbunlock(&so->so_rcv, 1);
6558 #endif
6559 		if (so->so_rcv.sb_cc <= control->held_length) {
6560 			error = sbwait(&so->so_rcv);
6561 			if (error) {
6562 #if defined(__FreeBSD__)
6563 				goto release;
6564 #else
6565 				goto release_unlocked;
6566 #endif
6567 			}
6568 			control->held_length = 0;
6569 		}
6570 #if defined(__APPLE__)
6571 		error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6572 #endif
6573 		if (hold_sblock) {
6574 			SOCKBUF_UNLOCK(&so->so_rcv);
6575 			hold_sblock = 0;
6576 		}
6577 		if (control->length == 0) {
6578 			/* still nothing here */
6579 			if (control->end_added == 1) {
6580 				/* he aborted, or is done i.e.did a shutdown */
6581 				out_flags |= MSG_EOR;
6582 				if (control->pdapi_aborted) {
6583 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6584 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6585 
6586 					out_flags |= MSG_TRUNC;
6587 				} else {
6588 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6589 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6590 				}
6591 				goto done_with_control;
6592 			}
6593 			if (so->so_rcv.sb_cc > held_length) {
6594 				control->held_length = so->so_rcv.sb_cc;
6595 				held_length = 0;
6596 			}
6597 			goto wait_some_more;
6598 		} else if (control->data == NULL) {
6599 			/* we must re-sync since data
6600 			 * is probably being added
6601 			 */
6602 			SCTP_INP_READ_LOCK(inp);
6603 			if ((control->length > 0) && (control->data == NULL)) {
6604 				/* big trouble.. we have the lock and its corrupt? */
6605 #ifdef INVARIANTS
6606 				panic ("Impossible data==NULL length !=0");
6607 #endif
6608 				out_flags |= MSG_EOR;
6609 				out_flags |= MSG_TRUNC;
6610 				control->length = 0;
6611 				SCTP_INP_READ_UNLOCK(inp);
6612 				goto done_with_control;
6613 			}
6614 			SCTP_INP_READ_UNLOCK(inp);
6615 			/* We will fall around to get more data */
6616 		}
6617 		goto get_more_data;
6618 	} else {
6619 		/*-
6620 		 * Give caller back the mbuf chain,
6621 		 * store in uio_resid the length
6622 		 */
6623 		wakeup_read_socket = 0;
6624 		if ((control->end_added == 0) ||
6625 		    (TAILQ_NEXT(control, next) == NULL)) {
6626 			/* Need to get rlock */
6627 			if (hold_rlock == 0) {
6628 				SCTP_INP_READ_LOCK(inp);
6629 				hold_rlock = 1;
6630 			}
6631 		}
6632 		if (control->end_added) {
6633 			out_flags |= MSG_EOR;
6634 			if ((control->do_not_ref_stcb == 0) &&
6635 			    (control->stcb != NULL) &&
6636 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6637 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6638 		}
6639 		if (control->spec_flags & M_NOTIFICATION) {
6640 			out_flags |= MSG_NOTIFICATION;
6641 		}
6642 #if defined(__APPLE__)
6643 #if defined(APPLE_LEOPARD)
6644 		uio->uio_resid = control->length;
6645 #else
6646 		uio_setresid(uio, control->length);
6647 #endif
6648 #else
6649 		uio->uio_resid = control->length;
6650 #endif
6651 		*mp = control->data;
6652 		m = control->data;
6653 		while (m) {
6654 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6655 				sctp_sblog(&so->so_rcv,
6656 				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6657 			}
6658 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6659 			freed_so_far += SCTP_BUF_LEN(m);
6660 			freed_so_far += MSIZE;
6661 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6662 				sctp_sblog(&so->so_rcv,
6663 				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6664 			}
6665 			m = SCTP_BUF_NEXT(m);
6666 		}
6667 		control->data = control->tail_mbuf = NULL;
6668 		control->length = 0;
6669 		if (out_flags & MSG_EOR) {
6670 			/* Done with this control */
6671 			goto done_with_control;
6672 		}
6673 	}
6674  release:
6675 	if (hold_rlock == 1) {
6676 		SCTP_INP_READ_UNLOCK(inp);
6677 		hold_rlock = 0;
6678 	}
6679 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
6680 	if (hold_sblock == 0) {
6681 		SOCKBUF_LOCK(&so->so_rcv);
6682 		hold_sblock = 1;
6683 	}
6684 #else
6685 	if (hold_sblock == 1) {
6686 		SOCKBUF_UNLOCK(&so->so_rcv);
6687 		hold_sblock = 0;
6688 	}
6689 #endif
6690 #if defined(__APPLE__)
6691 	sbunlock(&so->so_rcv, 1);
6692 #endif
6693 
6694 #if defined(__FreeBSD__)
6695 	sbunlock(&so->so_rcv);
6696 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6697 	sockbuf_lock = 0;
6698 #endif
6699 #endif
6700 
6701  release_unlocked:
6702 	if (hold_sblock) {
6703 		SOCKBUF_UNLOCK(&so->so_rcv);
6704 		hold_sblock = 0;
6705 	}
6706 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6707 		if ((freed_so_far >= rwnd_req) &&
6708 		    (control && (control->do_not_ref_stcb == 0)) &&
6709 		    (no_rcv_needed == 0))
6710 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6711 	}
6712  out:
6713 	if (msg_flags) {
6714 		*msg_flags = out_flags;
6715 	}
6716 	if (((out_flags & MSG_EOR) == 0) &&
6717 	    ((in_flags & MSG_PEEK) == 0) &&
6718 	    (sinfo) &&
6719 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6720 	     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6721 		struct sctp_extrcvinfo *s_extra;
6722 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6723 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6724 	}
6725 	if (hold_rlock == 1) {
6726 		SCTP_INP_READ_UNLOCK(inp);
6727 	}
6728 	if (hold_sblock) {
6729 		SOCKBUF_UNLOCK(&so->so_rcv);
6730 	}
6731 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6732 	if (sockbuf_lock) {
6733 		sbunlock(&so->so_rcv);
6734 	}
6735 #endif
6736 
6737 	if (freecnt_applied) {
6738 		/*
6739 		 * The lock on the socket buffer protects us so the free
6740 		 * code will stop. But since we used the socketbuf lock and
6741 		 * the sender uses the tcb_lock to increment, we need to use
6742 		 * the atomic add to the refcnt.
6743 		 */
6744 		if (stcb == NULL) {
6745 #ifdef INVARIANTS
6746 			panic("stcb for refcnt has gone NULL?");
6747 			goto stage_left;
6748 #else
6749 			goto stage_left;
6750 #endif
6751 		}
6752 		atomic_add_int(&stcb->asoc.refcnt, -1);
6753 		/* Save the value back for next time */
6754 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6755 	}
6756 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6757 		if (stcb) {
6758 			sctp_misc_ints(SCTP_SORECV_DONE,
6759 				       freed_so_far,
6760 #if defined(__APPLE__)
6761 #if defined(APPLE_LEOPARD)
6762 				       ((uio) ? (slen - uio->uio_resid) : slen),
6763 #else
6764 				       ((uio) ? (slen - uio_resid(uio)) : slen),
6765 #endif
6766 #else
6767 				       ((uio) ? (slen - uio->uio_resid) : slen),
6768 #endif
6769 				       stcb->asoc.my_rwnd,
6770 				       so->so_rcv.sb_cc);
6771 		} else {
6772 			sctp_misc_ints(SCTP_SORECV_DONE,
6773 				       freed_so_far,
6774 #if defined(__APPLE__)
6775 #if defined(APPLE_LEOPARD)
6776 				       ((uio) ? (slen - uio->uio_resid) : slen),
6777 #else
6778 				       ((uio) ? (slen - uio_resid(uio)) : slen),
6779 #endif
6780 #else
6781 				       ((uio) ? (slen - uio->uio_resid) : slen),
6782 #endif
6783 				       0,
6784 				       so->so_rcv.sb_cc);
6785 		}
6786 	}
6787  stage_left:
6788 	if (wakeup_read_socket) {
6789 		sctp_sorwakeup(inp, so);
6790 	}
6791 	return (error);
6792 }
6793 
6794 
6795 #ifdef SCTP_MBUF_LOGGING
6796 struct mbuf *
6797 sctp_m_free(struct mbuf *m)
6798 {
6799 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6800 		if (SCTP_BUF_IS_EXTENDED(m)) {
6801 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6802 		}
6803 	}
6804 	return (m_free(m));
6805 }
6806 
6807 void sctp_m_freem(struct mbuf *mb)
6808 {
6809 	while (mb != NULL)
6810 		mb = sctp_m_free(mb);
6811 }
6812 
6813 #endif
6814 
6815 int
6816 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6817 {
6818 	/* Given a local address. For all associations
6819 	 * that holds the address, request a peer-set-primary.
6820 	 */
6821 	struct sctp_ifa *ifa;
6822 	struct sctp_laddr *wi;
6823 
6824 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6825 	if (ifa == NULL) {
6826 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6827 		return (EADDRNOTAVAIL);
6828 	}
6829 	/* Now that we have the ifa we must awaken the
6830 	 * iterator with this message.
6831 	 */
6832 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6833 	if (wi == NULL) {
6834 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6835 		return (ENOMEM);
6836 	}
6837 	/* Now incr the count and int wi structure */
6838 	SCTP_INCR_LADDR_COUNT();
6839 	bzero(wi, sizeof(*wi));
6840 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6841 	wi->ifa = ifa;
6842 	wi->action = SCTP_SET_PRIM_ADDR;
6843 	atomic_add_int(&ifa->refcount, 1);
6844 
6845 	/* Now add it to the work queue */
6846 	SCTP_WQ_ADDR_LOCK();
6847 	/*
6848 	 * Should this really be a tailq? As it is we will process the
6849 	 * newest first :-0
6850 	 */
6851 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6852 	SCTP_WQ_ADDR_UNLOCK();
6853 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6854 			 (struct sctp_inpcb *)NULL,
6855 			 (struct sctp_tcb *)NULL,
6856 			 (struct sctp_nets *)NULL);
6857 	return (0);
6858 }
6859 
6860 #if defined(__Userspace__)
6861 /* no sctp_soreceive for __Userspace__ now */
6862 #endif
6863 
6864 #if !defined(__Userspace__)
6865 int
6866 sctp_soreceive(	struct socket *so,
6867 		struct sockaddr **psa,
6868 		struct uio *uio,
6869 		struct mbuf **mp0,
6870 		struct mbuf **controlp,
6871 		int *flagsp)
6872 {
6873 	int error, fromlen;
6874 	uint8_t sockbuf[256];
6875 	struct sockaddr *from;
6876 	struct sctp_extrcvinfo sinfo;
6877 	int filling_sinfo = 1;
6878 	struct sctp_inpcb *inp;
6879 
6880 	inp = (struct sctp_inpcb *)so->so_pcb;
6881 	/* pickup the assoc we are reading from */
6882 	if (inp == NULL) {
6883 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6884 		return (EINVAL);
6885 	}
6886 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6887 	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6888 	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6889 	    (controlp == NULL)) {
6890 		/* user does not want the sndrcv ctl */
6891 		filling_sinfo = 0;
6892 	}
6893 	if (psa) {
6894 		from = (struct sockaddr *)sockbuf;
6895 		fromlen = sizeof(sockbuf);
6896 #ifdef HAVE_SA_LEN
6897 		from->sa_len = 0;
6898 #endif
6899 	} else {
6900 		from = NULL;
6901 		fromlen = 0;
6902 	}
6903 
6904 #if defined(__APPLE__)
6905 	SCTP_SOCKET_LOCK(so, 1);
6906 #endif
6907 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6908 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6909 	if ((controlp) && (filling_sinfo)) {
6910 		/* copy back the sinfo in a CMSG format */
6911 		if (filling_sinfo)
6912 			*controlp = sctp_build_ctl_nchunk(inp,
6913 			                                  (struct sctp_sndrcvinfo *)&sinfo);
6914 		else
6915 			*controlp = NULL;
6916 	}
6917 	if (psa) {
6918 		/* copy back the address info */
6919 #ifdef HAVE_SA_LEN
6920 		if (from && from->sa_len) {
6921 #else
6922 		if (from) {
6923 #endif
6924 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6925 			*psa = sodupsockaddr(from, M_NOWAIT);
6926 #else
6927 			*psa = dup_sockaddr(from, mp0 == 0);
6928 #endif
6929 		} else {
6930 			*psa = NULL;
6931 		}
6932 	}
6933 #if defined(__APPLE__)
6934 	SCTP_SOCKET_UNLOCK(so, 1);
6935 #endif
6936 	return (error);
6937 }
6938 
6939 
6940 #if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
6941 /*
6942  * General routine to allocate a hash table with control of memory flags.
6943  * is in 7.0 and beyond for sure :-)
6944  */
6945 void *
6946 sctp_hashinit_flags(int elements, struct malloc_type *type,
6947                     u_long *hashmask, int flags)
6948 {
6949 	long hashsize;
6950 	LIST_HEAD(generic, generic) *hashtbl;
6951 	int i;
6952 
6953 
6954 	if (elements <= 0) {
6955 #ifdef INVARIANTS
6956 		panic("hashinit: bad elements");
6957 #else
6958 		SCTP_PRINTF("hashinit: bad elements?");
6959 		elements = 1;
6960 #endif
6961 	}
6962 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6963 		continue;
6964 	hashsize >>= 1;
6965 	if (flags & HASH_WAITOK)
6966 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
6967 	else if (flags & HASH_NOWAIT)
6968 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
6969 	else {
6970 #ifdef INVARIANTS
6971 		panic("flag incorrect in hashinit_flags");
6972 #else
6973 		return (NULL);
6974 #endif
6975 	}
6976 
6977 	/* no memory? */
6978 	if (hashtbl == NULL)
6979 		return (NULL);
6980 
6981 	for (i = 0; i < hashsize; i++)
6982 		LIST_INIT(&hashtbl[i]);
6983 	*hashmask = hashsize - 1;
6984 	return (hashtbl);
6985 }
6986 #endif
6987 
6988 #else /*  __Userspace__ ifdef above sctp_soreceive */
6989 /*
6990  * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
6991  * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
6992  *__FreeBSD__ must be excluded.
6993  *
6994  */
6995 
6996 void *
6997 sctp_hashinit_flags(int elements, struct malloc_type *type,
6998                     u_long *hashmask, int flags)
6999 {
7000 	long hashsize;
7001 	LIST_HEAD(generic, generic) *hashtbl;
7002 	int i;
7003 
7004 	if (elements <= 0) {
7005 		SCTP_PRINTF("hashinit: bad elements?");
7006 #ifdef INVARIANTS
7007 		return (NULL);
7008 #else
7009 		elements = 1;
7010 #endif
7011 	}
7012 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7013 		continue;
7014 	hashsize >>= 1;
7015 	/*cannot use MALLOC here because it has to be declared or defined
7016 	  using MALLOC_DECLARE or MALLOC_DEFINE first. */
7017 	if (flags & HASH_WAITOK)
7018 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7019 	else if (flags & HASH_NOWAIT)
7020 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7021 	else {
7022 #ifdef INVARIANTS
7023 		SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
7024 #endif
7025 		return (NULL);
7026 	}
7027 
7028 	/* no memory? */
7029 	if (hashtbl == NULL)
7030 		return (NULL);
7031 
7032 	for (i = 0; i < hashsize; i++)
7033 		LIST_INIT(&hashtbl[i]);
7034 	*hashmask = hashsize - 1;
7035 	return (hashtbl);
7036 }
7037 
7038 
7039 void
7040 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7041 {
7042 	LIST_HEAD(generic, generic) *hashtbl, *hp;
7043 
7044 	hashtbl = vhashtbl;
7045 	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7046 		if (!LIST_EMPTY(hp)) {
7047 			SCTP_PRINTF("hashdestroy: hash not empty.\n");
7048 			return;
7049 		}
7050 	FREE(hashtbl, type);
7051 }
7052 
7053 
7054 void
7055 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7056 {
7057 	LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
7058 	/*
7059 	LIST_ENTRY(type) *start, *temp;
7060 	 */
7061 	hashtbl = vhashtbl;
7062 	/* Apparently temp is not dynamically allocated, so attempts to
7063 	   free it results in error.
7064 	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7065 		if (!LIST_EMPTY(hp)) {
7066 			start = LIST_FIRST(hp);
7067 			while (start != NULL) {
7068 				temp = start;
7069 				start = start->le_next;
7070 				SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
7071 				FREE(temp, type);
7072 			}
7073 		}
7074 	 */
7075 	FREE(hashtbl, type);
7076 }
7077 
7078 
7079 #endif
7080 
7081 
7082 int
7083 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7084 			 int totaddr, int *error)
7085 {
7086 	int added = 0;
7087 	int i;
7088 	struct sctp_inpcb *inp;
7089 	struct sockaddr *sa;
7090 	size_t incr = 0;
7091 #ifdef INET
7092 	struct sockaddr_in *sin;
7093 #endif
7094 #ifdef INET6
7095 	struct sockaddr_in6 *sin6;
7096 #endif
7097 
7098 	sa = addr;
7099 	inp = stcb->sctp_ep;
7100 	*error = 0;
7101 	for (i = 0; i < totaddr; i++) {
7102 		switch (sa->sa_family) {
7103 #ifdef INET
7104 		case AF_INET:
7105 			incr = sizeof(struct sockaddr_in);
7106 			sin = (struct sockaddr_in *)sa;
7107 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7108 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7109 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7110 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7111 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
7112 				*error = EINVAL;
7113 				goto out_now;
7114 			}
7115 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7116 				/* assoc gone no un-lock */
7117 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7118 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
7119 				*error = ENOBUFS;
7120 				goto out_now;
7121 			}
7122 			added++;
7123 			break;
7124 #endif
7125 #ifdef INET6
7126 		case AF_INET6:
7127 			incr = sizeof(struct sockaddr_in6);
7128 			sin6 = (struct sockaddr_in6 *)sa;
7129 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7130 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7131 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7132 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7133 				*error = EINVAL;
7134 				goto out_now;
7135 			}
7136 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7137 				/* assoc gone no un-lock */
7138 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7139 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7140 				*error = ENOBUFS;
7141 				goto out_now;
7142 			}
7143 			added++;
7144 			break;
7145 #endif
7146 #if defined(__Userspace__)
7147 		case AF_CONN:
7148 			incr = sizeof(struct sockaddr_in6);
7149 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7150 				/* assoc gone no un-lock */
7151 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7152 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7153 				*error = ENOBUFS;
7154 				goto out_now;
7155 			}
7156 			added++;
7157 			break;
7158 #endif
7159 		default:
7160 			break;
7161 		}
7162 		sa = (struct sockaddr *)((caddr_t)sa + incr);
7163 	}
7164  out_now:
7165 	return (added);
7166 }
7167 
7168 struct sctp_tcb *
7169 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7170 			  int *totaddr, int *num_v4, int *num_v6, int *error,
7171 			  int limit, int *bad_addr)
7172 {
7173 	struct sockaddr *sa;
7174 	struct sctp_tcb *stcb = NULL;
7175 	size_t incr, at, i;
7176 	at = incr = 0;
7177 	sa = addr;
7178 
7179 	*error = *num_v6 = *num_v4 = 0;
7180 	/* account and validate addresses */
7181 	for (i = 0; i < (size_t)*totaddr; i++) {
7182 		switch (sa->sa_family) {
7183 #ifdef INET
7184 		case AF_INET:
7185 			(*num_v4) += 1;
7186 			incr = sizeof(struct sockaddr_in);
7187 #ifdef HAVE_SA_LEN
7188 			if (sa->sa_len != incr) {
7189 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7190 				*error = EINVAL;
7191 				*bad_addr = 1;
7192 				return (NULL);
7193 			}
7194 #endif
7195 			break;
7196 #endif
7197 #ifdef INET6
7198 		case AF_INET6:
7199 		{
7200 			struct sockaddr_in6 *sin6;
7201 
7202 			sin6 = (struct sockaddr_in6 *)sa;
7203 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7204 				/* Must be non-mapped for connectx */
7205 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7206 				*error = EINVAL;
7207 				*bad_addr = 1;
7208 				return (NULL);
7209 			}
7210 			(*num_v6) += 1;
7211 			incr = sizeof(struct sockaddr_in6);
7212 #ifdef HAVE_SA_LEN
7213 			if (sa->sa_len != incr) {
7214 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7215 				*error = EINVAL;
7216 				*bad_addr = 1;
7217 				return (NULL);
7218 			}
7219 #endif
7220 			break;
7221 		}
7222 #endif
7223 		default:
7224 			*totaddr = i;
7225 			/* we are done */
7226 			break;
7227 		}
7228 		if (i == (size_t)*totaddr) {
7229 			break;
7230 		}
7231 		SCTP_INP_INCR_REF(inp);
7232 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7233 		if (stcb != NULL) {
7234 			/* Already have or am bring up an association */
7235 			return (stcb);
7236 		} else {
7237 			SCTP_INP_DECR_REF(inp);
7238 		}
7239 		if ((at + incr) > (size_t)limit) {
7240 			*totaddr = i;
7241 			break;
7242 		}
7243 		sa = (struct sockaddr *)((caddr_t)sa + incr);
7244 	}
7245 	return ((struct sctp_tcb *)NULL);
7246 }
7247 
7248 /*
7249  * sctp_bindx(ADD) for one address.
7250  * assumes all arguments are valid/checked by caller.
7251  */
7252 void
7253 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7254 		       struct sockaddr *sa, sctp_assoc_t assoc_id,
7255 		       uint32_t vrf_id, int *error, void *p)
7256 {
7257 	struct sockaddr *addr_touse;
7258 #ifdef INET6
7259 	struct sockaddr_in sin;
7260 #endif
7261 #ifdef SCTP_MVRF
7262 	int i, fnd = 0;
7263 #endif
7264 
7265 	/* see if we're bound all already! */
7266 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7267 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7268 		*error = EINVAL;
7269 		return;
7270 	}
7271 #ifdef SCTP_MVRF
7272 	/* Is the VRF one we have */
7273 	for (i = 0; i < inp->num_vrfs; i++) {
7274 		if (vrf_id == inp->m_vrf_ids[i]) {
7275 			fnd = 1;
7276 			break;
7277 		}
7278 	}
7279 	if (!fnd) {
7280 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7281 		*error = EINVAL;
7282 		return;
7283 	}
7284 #endif
7285 	addr_touse = sa;
7286 #ifdef INET6
7287 	if (sa->sa_family == AF_INET6) {
7288 		struct sockaddr_in6 *sin6;
7289 #ifdef HAVE_SA_LEN
7290 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7291 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7292 			*error = EINVAL;
7293 			return;
7294 		}
7295 #endif
7296 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7297 			/* can only bind v6 on PF_INET6 sockets */
7298 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7299 			*error = EINVAL;
7300 			return;
7301 		}
7302 		sin6 = (struct sockaddr_in6 *)addr_touse;
7303 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7304 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7305 			    SCTP_IPV6_V6ONLY(inp)) {
7306 				/* can't bind v4-mapped on PF_INET sockets */
7307 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7308 				*error = EINVAL;
7309 				return;
7310 			}
7311 			in6_sin6_2_sin(&sin, sin6);
7312 			addr_touse = (struct sockaddr *)&sin;
7313 		}
7314 	}
7315 #endif
7316 #ifdef INET
7317 	if (sa->sa_family == AF_INET) {
7318 #ifdef HAVE_SA_LEN
7319 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7320 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7321 			*error = EINVAL;
7322 			return;
7323 		}
7324 #endif
7325 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7326 		    SCTP_IPV6_V6ONLY(inp)) {
7327 			/* can't bind v4 on PF_INET sockets */
7328 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7329 			*error = EINVAL;
7330 			return;
7331 		}
7332 	}
7333 #endif
7334 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7335 #if !(defined(__Panda__) || defined(__Windows__))
7336 		if (p == NULL) {
7337 			/* Can't get proc for Net/Open BSD */
7338 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7339 			*error = EINVAL;
7340 			return;
7341 		}
7342 #endif
7343 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
7344 		return;
7345 	}
7346 	/*
7347 	 * No locks required here since bind and mgmt_ep_sa
7348 	 * all do their own locking. If we do something for
7349 	 * the FIX: below we may need to lock in that case.
7350 	 */
7351 	if (assoc_id == 0) {
7352 		/* add the address */
7353 		struct sctp_inpcb *lep;
7354 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7355 
7356 		/* validate the incoming port */
7357 		if ((lsin->sin_port != 0) &&
7358 		    (lsin->sin_port != inp->sctp_lport)) {
7359 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7360 			*error = EINVAL;
7361 			return;
7362 		} else {
7363 			/* user specified 0 port, set it to existing port */
7364 			lsin->sin_port = inp->sctp_lport;
7365 		}
7366 
7367 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7368 		if (lep != NULL) {
7369 			/*
7370 			 * We must decrement the refcount
7371 			 * since we have the ep already and
7372 			 * are binding. No remove going on
7373 			 * here.
7374 			 */
7375 			SCTP_INP_DECR_REF(lep);
7376 		}
7377 		if (lep == inp) {
7378 			/* already bound to it.. ok */
7379 			return;
7380 		} else if (lep == NULL) {
7381 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
7382 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7383 						      SCTP_ADD_IP_ADDRESS,
7384 						      vrf_id, NULL);
7385 		} else {
7386 			*error = EADDRINUSE;
7387 		}
7388 		if (*error)
7389 			return;
7390 	} else {
7391 		/*
7392 		 * FIX: decide whether we allow assoc based
7393 		 * bindx
7394 		 */
7395 	}
7396 }
7397 
7398 /*
7399  * sctp_bindx(DELETE) for one address.
7400  * assumes all arguments are valid/checked by caller.
7401  */
7402 void
7403 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7404 			  struct sockaddr *sa, sctp_assoc_t assoc_id,
7405 			  uint32_t vrf_id, int *error)
7406 {
7407 	struct sockaddr *addr_touse;
7408 #ifdef INET6
7409 	struct sockaddr_in sin;
7410 #endif
7411 #ifdef SCTP_MVRF
7412 	int i, fnd = 0;
7413 #endif
7414 
7415 	/* see if we're bound all already! */
7416 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7417 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7418 		*error = EINVAL;
7419 		return;
7420 	}
7421 #ifdef SCTP_MVRF
7422 	/* Is the VRF one we have */
7423 	for (i = 0; i < inp->num_vrfs; i++) {
7424 		if (vrf_id == inp->m_vrf_ids[i]) {
7425 			fnd = 1;
7426 			break;
7427 		}
7428 	}
7429 	if (!fnd) {
7430 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7431 		*error = EINVAL;
7432 		return;
7433 	}
7434 #endif
7435 	addr_touse = sa;
7436 #ifdef INET6
7437 	if (sa->sa_family == AF_INET6) {
7438 		struct sockaddr_in6 *sin6;
7439 #ifdef HAVE_SA_LEN
7440 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7441 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7442 			*error = EINVAL;
7443 			return;
7444 		}
7445 #endif
7446 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7447 			/* can only bind v6 on PF_INET6 sockets */
7448 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7449 			*error = EINVAL;
7450 			return;
7451 		}
7452 		sin6 = (struct sockaddr_in6 *)addr_touse;
7453 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7454 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7455 			    SCTP_IPV6_V6ONLY(inp)) {
7456 				/* can't bind mapped-v4 on PF_INET sockets */
7457 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7458 				*error = EINVAL;
7459 				return;
7460 			}
7461 			in6_sin6_2_sin(&sin, sin6);
7462 			addr_touse = (struct sockaddr *)&sin;
7463 		}
7464 	}
7465 #endif
7466 #ifdef INET
7467 	if (sa->sa_family == AF_INET) {
7468 #ifdef HAVE_SA_LEN
7469 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7470 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7471 			*error = EINVAL;
7472 			return;
7473 		}
7474 #endif
7475 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7476 		    SCTP_IPV6_V6ONLY(inp)) {
7477 			/* can't bind v4 on PF_INET sockets */
7478 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7479 			*error = EINVAL;
7480 			return;
7481 		}
7482 	}
7483 #endif
7484 	/*
7485 	 * No lock required mgmt_ep_sa does its own locking.
7486 	 * If the FIX: below is ever changed we may need to
7487 	 * lock before calling association level binding.
7488 	 */
7489 	if (assoc_id == 0) {
7490 		/* delete the address */
7491 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7492 					      SCTP_DEL_IP_ADDRESS,
7493 					      vrf_id, NULL);
7494 	} else {
7495 		/*
7496 		 * FIX: decide whether we allow assoc based
7497 		 * bindx
7498 		 */
7499 	}
7500 }
7501 
7502 /*
7503  * returns the valid local address count for an assoc, taking into account
7504  * all scoping rules
7505  */
7506 int
7507 sctp_local_addr_count(struct sctp_tcb *stcb)
7508 {
7509 	int loopback_scope;
7510 #if defined(INET)
7511 	int ipv4_local_scope, ipv4_addr_legal;
7512 #endif
7513 #if defined (INET6)
7514 	int local_scope, site_scope, ipv6_addr_legal;
7515 #endif
7516 #if defined(__Userspace__)
7517 	int conn_addr_legal;
7518 #endif
7519 	struct sctp_vrf *vrf;
7520 	struct sctp_ifn *sctp_ifn;
7521 	struct sctp_ifa *sctp_ifa;
7522 	int count = 0;
7523 
7524 	/* Turn on all the appropriate scopes */
7525 	loopback_scope = stcb->asoc.scope.loopback_scope;
7526 #if defined(INET)
7527 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7528 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7529 #endif
7530 #if defined(INET6)
7531 	local_scope = stcb->asoc.scope.local_scope;
7532 	site_scope = stcb->asoc.scope.site_scope;
7533 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7534 #endif
7535 #if defined(__Userspace__)
7536 	conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7537 #endif
7538 	SCTP_IPI_ADDR_RLOCK();
7539 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7540 	if (vrf == NULL) {
7541 		/* no vrf, no addresses */
7542 		SCTP_IPI_ADDR_RUNLOCK();
7543 		return (0);
7544 	}
7545 
7546 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7547 		/*
7548 		 * bound all case: go through all ifns on the vrf
7549 		 */
7550 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7551 			if ((loopback_scope == 0) &&
7552 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7553 				continue;
7554 			}
7555 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7556 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7557 					continue;
7558 				switch (sctp_ifa->address.sa.sa_family) {
7559 #ifdef INET
7560 				case AF_INET:
7561 					if (ipv4_addr_legal) {
7562 						struct sockaddr_in *sin;
7563 
7564 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
7565 						if (sin->sin_addr.s_addr == 0) {
7566 							/* skip unspecified addrs */
7567 							continue;
7568 						}
7569 						if ((ipv4_local_scope == 0) &&
7570 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7571 							continue;
7572 						}
7573 						/* count this one */
7574 						count++;
7575 					} else {
7576 						continue;
7577 					}
7578 					break;
7579 #endif
7580 #ifdef INET6
7581 				case AF_INET6:
7582 					if (ipv6_addr_legal) {
7583 						struct sockaddr_in6 *sin6;
7584 
7585 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7586 						struct sockaddr_in6 lsa6;
7587 #endif
7588 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
7589 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7590 							continue;
7591 						}
7592 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7593 							if (local_scope == 0)
7594 								continue;
7595 #if defined(SCTP_EMBEDDED_V6_SCOPE)
7596 							if (sin6->sin6_scope_id == 0) {
7597 #ifdef SCTP_KAME
7598 								if (sa6_recoverscope(sin6) != 0)
7599 									/*
7600 									 * bad link
7601 									 * local
7602 									 * address
7603 									 */
7604 									continue;
7605 #else
7606 								lsa6 = *sin6;
7607 								if (in6_recoverscope(&lsa6,
7608 								                     &lsa6.sin6_addr,
7609 								                     NULL))
7610 									/*
7611 									 * bad link
7612 									 * local
7613 									 * address
7614 									 */
7615 									continue;
7616 								sin6 = &lsa6;
7617 #endif /* SCTP_KAME */
7618 							}
7619 #endif /* SCTP_EMBEDDED_V6_SCOPE */
7620 						}
7621 						if ((site_scope == 0) &&
7622 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7623 							continue;
7624 						}
7625 						/* count this one */
7626 						count++;
7627 					}
7628 					break;
7629 #endif
7630 #if defined(__Userspace__)
7631 				case AF_CONN:
7632 					if (conn_addr_legal) {
7633 						count++;
7634 					}
7635 					break;
7636 #endif
7637 				default:
7638 					/* TSNH */
7639 					break;
7640 				}
7641 			}
7642 		}
7643 	} else {
7644 		/*
7645 		 * subset bound case
7646 		 */
7647 		struct sctp_laddr *laddr;
7648 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7649 			     sctp_nxt_addr) {
7650 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7651 				continue;
7652 			}
7653 			/* count this one */
7654 			count++;
7655 		}
7656 	}
7657 	SCTP_IPI_ADDR_RUNLOCK();
7658 	return (count);
7659 }
7660 
7661 #if defined(SCTP_LOCAL_TRACE_BUF)
7662 
7663 void
7664 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7665 {
7666 	uint32_t saveindex, newindex;
7667 
7668 #if defined(__Windows__)
7669 	if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7670 		return;
7671 	}
7672 	do {
7673 		saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7674 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7675 			newindex = 1;
7676 		} else {
7677 			newindex = saveindex + 1;
7678 		}
7679 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7680 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7681 		saveindex = 0;
7682 	}
7683 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7684 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7685 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
7686 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
7687 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
7688 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
7689 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
7690 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
7691 #else
7692 	do {
7693 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7694 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7695 			newindex = 1;
7696 		} else {
7697 			newindex = saveindex + 1;
7698 		}
7699 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7700 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7701 		saveindex = 0;
7702 	}
7703 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7704 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7705 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7706 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7707 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7708 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7709 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7710 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7711 #endif
7712 }
7713 
7714 #endif
7715 #if defined(__FreeBSD__)
7716 #if __FreeBSD_version >= 800044
7717 static void
7718 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
7719 {
7720 	struct ip *iph;
7721 #ifdef INET6
7722 	struct ip6_hdr *ip6;
7723 #endif
7724 	struct mbuf *sp, *last;
7725 	struct udphdr *uhdr;
7726 	uint16_t port;
7727 
7728 	if ((m->m_flags & M_PKTHDR) == 0) {
7729 		/* Can't handle one that is not a pkt hdr */
7730 		goto out;
7731 	}
7732 	/* Pull the src port */
7733 	iph = mtod(m, struct ip *);
7734 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7735 	port = uhdr->uh_sport;
7736 	/* Split out the mbuf chain. Leave the
7737 	 * IP header in m, place the
7738 	 * rest in the sp.
7739 	 */
7740 	sp = m_split(m, off, M_NOWAIT);
7741 	if (sp == NULL) {
7742 		/* Gak, drop packet, we can't do a split */
7743 		goto out;
7744 	}
7745 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7746 		/* Gak, packet can't have an SCTP header in it - too small */
7747 		m_freem(sp);
7748 		goto out;
7749 	}
7750 	/* Now pull up the UDP header and SCTP header together */
7751 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7752 	if (sp == NULL) {
7753 		/* Gak pullup failed */
7754 		goto out;
7755 	}
7756 	/* Trim out the UDP header */
7757 	m_adj(sp, sizeof(struct udphdr));
7758 
7759 	/* Now reconstruct the mbuf chain */
7760 	for (last = m; last->m_next; last = last->m_next);
7761 	last->m_next = sp;
7762 	m->m_pkthdr.len += sp->m_pkthdr.len;
7763 	iph = mtod(m, struct ip *);
7764 	switch (iph->ip_v) {
7765 #ifdef INET
7766 	case IPVERSION:
7767 #if __FreeBSD_version >= 1000000
7768 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7769 #else
7770 		iph->ip_len -= sizeof(struct udphdr);
7771 #endif
7772 		sctp_input_with_port(m, off, port);
7773 		break;
7774 #endif
7775 #ifdef INET6
7776 	case IPV6_VERSION >> 4:
7777 		ip6 = mtod(m, struct ip6_hdr *);
7778 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7779 		sctp6_input_with_port(&m, &off, port);
7780 		break;
7781 #endif
7782 	default:
7783 		goto out;
7784 		break;
7785 	}
7786 	return;
7787  out:
7788 	m_freem(m);
7789 }
7790 #endif
7791 
7792 void
7793 sctp_over_udp_stop(void)
7794 {
7795 	/*
7796 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
7797 	 */
7798 #ifdef INET
7799 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7800 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7801 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7802 	}
7803 #endif
7804 #ifdef INET6
7805 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7806 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7807 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7808 	}
7809 #endif
7810 }
7811 
7812 int
7813 sctp_over_udp_start(void)
7814 {
7815 #if __FreeBSD_version >= 800044
7816 	uint16_t port;
7817 	int ret;
7818 #ifdef INET
7819 	struct sockaddr_in sin;
7820 #endif
7821 #ifdef INET6
7822 	struct sockaddr_in6 sin6;
7823 #endif
7824 	/*
7825 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
7826 	 */
7827 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7828 	if (ntohs(port) == 0) {
7829 		/* Must have a port set */
7830 		return (EINVAL);
7831 	}
7832 #ifdef INET
7833 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7834 		/* Already running -- must stop first */
7835 		return (EALREADY);
7836 	}
7837 #endif
7838 #ifdef INET6
7839 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7840 		/* Already running -- must stop first */
7841 		return (EALREADY);
7842 	}
7843 #endif
7844 #ifdef INET
7845 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7846 	                    SOCK_DGRAM, IPPROTO_UDP,
7847 	                    curthread->td_ucred, curthread))) {
7848 		sctp_over_udp_stop();
7849 		return (ret);
7850 	}
7851 	/* Call the special UDP hook. */
7852 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7853 	                                    sctp_recv_udp_tunneled_packet))) {
7854 		sctp_over_udp_stop();
7855 		return (ret);
7856 	}
7857 	/* Ok, we have a socket, bind it to the port. */
7858 	memset(&sin, 0, sizeof(struct sockaddr_in));
7859 	sin.sin_len = sizeof(struct sockaddr_in);
7860 	sin.sin_family = AF_INET;
7861 	sin.sin_port = htons(port);
7862 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7863 	                  (struct sockaddr *)&sin, curthread))) {
7864 		sctp_over_udp_stop();
7865 		return (ret);
7866 	}
7867 #endif
7868 #ifdef INET6
7869 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7870 	                    SOCK_DGRAM, IPPROTO_UDP,
7871 	                    curthread->td_ucred, curthread))) {
7872 		sctp_over_udp_stop();
7873 		return (ret);
7874 	}
7875 	/* Call the special UDP hook. */
7876 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7877 	                                    sctp_recv_udp_tunneled_packet))) {
7878 		sctp_over_udp_stop();
7879 		return (ret);
7880 	}
7881 	/* Ok, we have a socket, bind it to the port. */
7882 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7883 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7884 	sin6.sin6_family = AF_INET6;
7885 	sin6.sin6_port = htons(port);
7886 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7887 	                  (struct sockaddr *)&sin6, curthread))) {
7888 		sctp_over_udp_stop();
7889 		return (ret);
7890 	}
7891 #endif
7892 	return (0);
7893 #else
7894 	return (ENOTSUP);
7895 #endif
7896 }
7897 #endif
7898