1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 363323 2020-07-19 12:34:19Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_var.h>
44 #include <netinet/sctp_sysctl.h>
45 #ifdef INET6
46 #if defined(__Userspace__) || defined(__FreeBSD__)
47 #include <netinet6/sctp6_var.h>
48 #endif
49 #endif
50 #include <netinet/sctp_header.h>
51 #include <netinet/sctp_output.h>
52 #include <netinet/sctp_uio.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_auth.h>
56 #include <netinet/sctp_asconf.h>
57 #include <netinet/sctp_bsd_addr.h>
58 #if defined(__Userspace__)
59 #include <netinet/sctp_constants.h>
60 #endif
61 #if defined(__FreeBSD__) && !defined(__Userspace__)
62 #include <netinet/sctp_kdtrace.h>
63 #if defined(INET6) || defined(INET)
64 #include <netinet/tcp_var.h>
65 #endif
66 #include <netinet/udp.h>
67 #include <netinet/udp_var.h>
68 #include <sys/proc.h>
69 #ifdef INET6
70 #include <netinet/icmp6.h>
71 #endif
72 #endif
73
74 #if defined(_WIN32) && !defined(__Userspace__)
75 #if !defined(SCTP_LOCAL_TRACE_BUF)
76 #include "eventrace_netinet.h"
77 #include "sctputil.tmh" /* this is the file that will be auto generated */
78 #endif
79 #else
80 #ifndef KTR_SCTP
81 #define KTR_SCTP KTR_SUBSYS
82 #endif
83 #endif
84
85 extern const struct sctp_cc_functions sctp_cc_functions[];
86 extern const struct sctp_ss_functions sctp_ss_functions[];
87
88 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)89 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
90 {
91 #if defined(SCTP_LOCAL_TRACE_BUF)
92 struct sctp_cwnd_log sctp_clog;
93
94 sctp_clog.x.sb.stcb = stcb;
95 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
96 if (stcb)
97 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
98 else
99 sctp_clog.x.sb.stcb_sbcc = 0;
100 sctp_clog.x.sb.incr = incr;
101 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
102 SCTP_LOG_EVENT_SB,
103 from,
104 sctp_clog.x.misc.log1,
105 sctp_clog.x.misc.log2,
106 sctp_clog.x.misc.log3,
107 sctp_clog.x.misc.log4);
108 #endif
109 }
110
111 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)112 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
113 {
114 #if defined(SCTP_LOCAL_TRACE_BUF)
115 struct sctp_cwnd_log sctp_clog;
116
117 sctp_clog.x.close.inp = (void *)inp;
118 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
119 if (stcb) {
120 sctp_clog.x.close.stcb = (void *)stcb;
121 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
122 } else {
123 sctp_clog.x.close.stcb = 0;
124 sctp_clog.x.close.state = 0;
125 }
126 sctp_clog.x.close.loc = loc;
127 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
128 SCTP_LOG_EVENT_CLOSE,
129 0,
130 sctp_clog.x.misc.log1,
131 sctp_clog.x.misc.log2,
132 sctp_clog.x.misc.log3,
133 sctp_clog.x.misc.log4);
134 #endif
135 }
136
137 void
rto_logging(struct sctp_nets * net,int from)138 rto_logging(struct sctp_nets *net, int from)
139 {
140 #if defined(SCTP_LOCAL_TRACE_BUF)
141 struct sctp_cwnd_log sctp_clog;
142
143 memset(&sctp_clog, 0, sizeof(sctp_clog));
144 sctp_clog.x.rto.net = (void *) net;
145 sctp_clog.x.rto.rtt = net->rtt / 1000;
146 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
147 SCTP_LOG_EVENT_RTT,
148 from,
149 sctp_clog.x.misc.log1,
150 sctp_clog.x.misc.log2,
151 sctp_clog.x.misc.log3,
152 sctp_clog.x.misc.log4);
153 #endif
154 }
155
156 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)157 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
158 {
159 #if defined(SCTP_LOCAL_TRACE_BUF)
160 struct sctp_cwnd_log sctp_clog;
161
162 sctp_clog.x.strlog.stcb = stcb;
163 sctp_clog.x.strlog.n_tsn = tsn;
164 sctp_clog.x.strlog.n_sseq = sseq;
165 sctp_clog.x.strlog.e_tsn = 0;
166 sctp_clog.x.strlog.e_sseq = 0;
167 sctp_clog.x.strlog.strm = stream;
168 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
169 SCTP_LOG_EVENT_STRM,
170 from,
171 sctp_clog.x.misc.log1,
172 sctp_clog.x.misc.log2,
173 sctp_clog.x.misc.log3,
174 sctp_clog.x.misc.log4);
175 #endif
176 }
177
178 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)179 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
180 {
181 #if defined(SCTP_LOCAL_TRACE_BUF)
182 struct sctp_cwnd_log sctp_clog;
183
184 sctp_clog.x.nagle.stcb = (void *)stcb;
185 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
186 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
187 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
188 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
189 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
190 SCTP_LOG_EVENT_NAGLE,
191 action,
192 sctp_clog.x.misc.log1,
193 sctp_clog.x.misc.log2,
194 sctp_clog.x.misc.log3,
195 sctp_clog.x.misc.log4);
196 #endif
197 }
198
199 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)200 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
201 {
202 #if defined(SCTP_LOCAL_TRACE_BUF)
203 struct sctp_cwnd_log sctp_clog;
204
205 sctp_clog.x.sack.cumack = cumack;
206 sctp_clog.x.sack.oldcumack = old_cumack;
207 sctp_clog.x.sack.tsn = tsn;
208 sctp_clog.x.sack.numGaps = gaps;
209 sctp_clog.x.sack.numDups = dups;
210 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
211 SCTP_LOG_EVENT_SACK,
212 from,
213 sctp_clog.x.misc.log1,
214 sctp_clog.x.misc.log2,
215 sctp_clog.x.misc.log3,
216 sctp_clog.x.misc.log4);
217 #endif
218 }
219
220 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)221 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
222 {
223 #if defined(SCTP_LOCAL_TRACE_BUF)
224 struct sctp_cwnd_log sctp_clog;
225
226 memset(&sctp_clog, 0, sizeof(sctp_clog));
227 sctp_clog.x.map.base = map;
228 sctp_clog.x.map.cum = cum;
229 sctp_clog.x.map.high = high;
230 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
231 SCTP_LOG_EVENT_MAP,
232 from,
233 sctp_clog.x.misc.log1,
234 sctp_clog.x.misc.log2,
235 sctp_clog.x.misc.log3,
236 sctp_clog.x.misc.log4);
237 #endif
238 }
239
240 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)241 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
242 {
243 #if defined(SCTP_LOCAL_TRACE_BUF)
244 struct sctp_cwnd_log sctp_clog;
245
246 memset(&sctp_clog, 0, sizeof(sctp_clog));
247 sctp_clog.x.fr.largest_tsn = biggest_tsn;
248 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
249 sctp_clog.x.fr.tsn = tsn;
250 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
251 SCTP_LOG_EVENT_FR,
252 from,
253 sctp_clog.x.misc.log1,
254 sctp_clog.x.misc.log2,
255 sctp_clog.x.misc.log3,
256 sctp_clog.x.misc.log4);
257 #endif
258 }
259
260 #ifdef SCTP_MBUF_LOGGING
261 void
sctp_log_mb(struct mbuf * m,int from)262 sctp_log_mb(struct mbuf *m, int from)
263 {
264 #if defined(SCTP_LOCAL_TRACE_BUF)
265 struct sctp_cwnd_log sctp_clog;
266
267 sctp_clog.x.mb.mp = m;
268 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
269 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
270 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
271 if (SCTP_BUF_IS_EXTENDED(m)) {
272 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
273 #if defined(__APPLE__) && !defined(__Userspace__)
274 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
275 #else
276 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
277 #endif
278 } else {
279 sctp_clog.x.mb.ext = 0;
280 sctp_clog.x.mb.refcnt = 0;
281 }
282 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
283 SCTP_LOG_EVENT_MBUF,
284 from,
285 sctp_clog.x.misc.log1,
286 sctp_clog.x.misc.log2,
287 sctp_clog.x.misc.log3,
288 sctp_clog.x.misc.log4);
289 #endif
290 }
291
292 void
sctp_log_mbc(struct mbuf * m,int from)293 sctp_log_mbc(struct mbuf *m, int from)
294 {
295 struct mbuf *mat;
296
297 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
298 sctp_log_mb(mat, from);
299 }
300 }
301 #endif
302
303 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)304 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
305 {
306 #if defined(SCTP_LOCAL_TRACE_BUF)
307 struct sctp_cwnd_log sctp_clog;
308
309 if (control == NULL) {
310 SCTP_PRINTF("Gak log of NULL?\n");
311 return;
312 }
313 sctp_clog.x.strlog.stcb = control->stcb;
314 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
315 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
316 sctp_clog.x.strlog.strm = control->sinfo_stream;
317 if (poschk != NULL) {
318 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
319 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
320 } else {
321 sctp_clog.x.strlog.e_tsn = 0;
322 sctp_clog.x.strlog.e_sseq = 0;
323 }
324 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
325 SCTP_LOG_EVENT_STRM,
326 from,
327 sctp_clog.x.misc.log1,
328 sctp_clog.x.misc.log2,
329 sctp_clog.x.misc.log3,
330 sctp_clog.x.misc.log4);
331 #endif
332 }
333
334 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)335 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
336 {
337 #if defined(SCTP_LOCAL_TRACE_BUF)
338 struct sctp_cwnd_log sctp_clog;
339
340 sctp_clog.x.cwnd.net = net;
341 if (stcb->asoc.send_queue_cnt > 255)
342 sctp_clog.x.cwnd.cnt_in_send = 255;
343 else
344 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
345 if (stcb->asoc.stream_queue_cnt > 255)
346 sctp_clog.x.cwnd.cnt_in_str = 255;
347 else
348 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
349
350 if (net) {
351 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
352 sctp_clog.x.cwnd.inflight = net->flight_size;
353 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
354 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
355 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
356 }
357 if (SCTP_CWNDLOG_PRESEND == from) {
358 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
359 }
360 sctp_clog.x.cwnd.cwnd_augment = augment;
361 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
362 SCTP_LOG_EVENT_CWND,
363 from,
364 sctp_clog.x.misc.log1,
365 sctp_clog.x.misc.log2,
366 sctp_clog.x.misc.log3,
367 sctp_clog.x.misc.log4);
368 #endif
369 }
370
371 #if !defined(__APPLE__) && !defined(__Userspace__)
372 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)373 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
374 {
375 #if defined(SCTP_LOCAL_TRACE_BUF)
376 struct sctp_cwnd_log sctp_clog;
377
378 memset(&sctp_clog, 0, sizeof(sctp_clog));
379 if (inp) {
380 sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
381
382 } else {
383 sctp_clog.x.lock.sock = (void *) NULL;
384 }
385 sctp_clog.x.lock.inp = (void *) inp;
386 #if defined(__FreeBSD__)
387 if (stcb) {
388 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
389 } else {
390 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
391 }
392 if (inp) {
393 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
394 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
395 } else {
396 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
397 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
398 }
399 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
400 if (inp && (inp->sctp_socket)) {
401 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
402 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
403 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
404 } else {
405 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
406 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
407 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
408 }
409 #endif
410 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
411 SCTP_LOG_LOCK_EVENT,
412 from,
413 sctp_clog.x.misc.log1,
414 sctp_clog.x.misc.log2,
415 sctp_clog.x.misc.log3,
416 sctp_clog.x.misc.log4);
417 #endif
418 }
419 #endif
420
421 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)422 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
423 {
424 #if defined(SCTP_LOCAL_TRACE_BUF)
425 struct sctp_cwnd_log sctp_clog;
426
427 memset(&sctp_clog, 0, sizeof(sctp_clog));
428 sctp_clog.x.cwnd.net = net;
429 sctp_clog.x.cwnd.cwnd_new_value = error;
430 sctp_clog.x.cwnd.inflight = net->flight_size;
431 sctp_clog.x.cwnd.cwnd_augment = burst;
432 if (stcb->asoc.send_queue_cnt > 255)
433 sctp_clog.x.cwnd.cnt_in_send = 255;
434 else
435 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
436 if (stcb->asoc.stream_queue_cnt > 255)
437 sctp_clog.x.cwnd.cnt_in_str = 255;
438 else
439 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
440 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 SCTP_LOG_EVENT_MAXBURST,
442 from,
443 sctp_clog.x.misc.log1,
444 sctp_clog.x.misc.log2,
445 sctp_clog.x.misc.log3,
446 sctp_clog.x.misc.log4);
447 #endif
448 }
449
450 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)451 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
452 {
453 #if defined(SCTP_LOCAL_TRACE_BUF)
454 struct sctp_cwnd_log sctp_clog;
455
456 sctp_clog.x.rwnd.rwnd = peers_rwnd;
457 sctp_clog.x.rwnd.send_size = snd_size;
458 sctp_clog.x.rwnd.overhead = overhead;
459 sctp_clog.x.rwnd.new_rwnd = 0;
460 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
461 SCTP_LOG_EVENT_RWND,
462 from,
463 sctp_clog.x.misc.log1,
464 sctp_clog.x.misc.log2,
465 sctp_clog.x.misc.log3,
466 sctp_clog.x.misc.log4);
467 #endif
468 }
469
470 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)471 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
472 {
473 #if defined(SCTP_LOCAL_TRACE_BUF)
474 struct sctp_cwnd_log sctp_clog;
475
476 sctp_clog.x.rwnd.rwnd = peers_rwnd;
477 sctp_clog.x.rwnd.send_size = flight_size;
478 sctp_clog.x.rwnd.overhead = overhead;
479 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 SCTP_LOG_EVENT_RWND,
482 from,
483 sctp_clog.x.misc.log1,
484 sctp_clog.x.misc.log2,
485 sctp_clog.x.misc.log3,
486 sctp_clog.x.misc.log4);
487 #endif
488 }
489
490 #ifdef SCTP_MBCNT_LOGGING
491 static void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)492 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
493 {
494 #if defined(SCTP_LOCAL_TRACE_BUF)
495 struct sctp_cwnd_log sctp_clog;
496
497 sctp_clog.x.mbcnt.total_queue_size = total_oq;
498 sctp_clog.x.mbcnt.size_change = book;
499 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
500 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 SCTP_LOG_EVENT_MBCNT,
503 from,
504 sctp_clog.x.misc.log1,
505 sctp_clog.x.misc.log2,
506 sctp_clog.x.misc.log3,
507 sctp_clog.x.misc.log4);
508 #endif
509 }
510 #endif
511
512 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)513 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
514 {
515 #if defined(SCTP_LOCAL_TRACE_BUF)
516 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
517 SCTP_LOG_MISC_EVENT,
518 from,
519 a, b, c, d);
520 #endif
521 }
522
523 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)524 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
525 {
526 #if defined(SCTP_LOCAL_TRACE_BUF)
527 struct sctp_cwnd_log sctp_clog;
528
529 sctp_clog.x.wake.stcb = (void *)stcb;
530 sctp_clog.x.wake.wake_cnt = wake_cnt;
531 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
532 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
533 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
534
535 if (stcb->asoc.stream_queue_cnt < 0xff)
536 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
537 else
538 sctp_clog.x.wake.stream_qcnt = 0xff;
539
540 if (stcb->asoc.chunks_on_out_queue < 0xff)
541 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
542 else
543 sctp_clog.x.wake.chunks_on_oque = 0xff;
544
545 sctp_clog.x.wake.sctpflags = 0;
546 /* set in the defered mode stuff */
547 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
548 sctp_clog.x.wake.sctpflags |= 1;
549 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
550 sctp_clog.x.wake.sctpflags |= 2;
551 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
552 sctp_clog.x.wake.sctpflags |= 4;
553 /* what about the sb */
554 if (stcb->sctp_socket) {
555 struct socket *so = stcb->sctp_socket;
556
557 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
558 } else {
559 sctp_clog.x.wake.sbflags = 0xff;
560 }
561 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 SCTP_LOG_EVENT_WAKE,
563 from,
564 sctp_clog.x.misc.log1,
565 sctp_clog.x.misc.log2,
566 sctp_clog.x.misc.log3,
567 sctp_clog.x.misc.log4);
568 #endif
569 }
570
571 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,ssize_t sendlen)572 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
573 {
574 #if defined(SCTP_LOCAL_TRACE_BUF)
575 struct sctp_cwnd_log sctp_clog;
576
577 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
578 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
579 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
580 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
581 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
582 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
583 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
584 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
585 SCTP_LOG_EVENT_BLOCK,
586 from,
587 sctp_clog.x.misc.log1,
588 sctp_clog.x.misc.log2,
589 sctp_clog.x.misc.log3,
590 sctp_clog.x.misc.log4);
591 #endif
592 }
593
594 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)595 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
596 {
597 /* May need to fix this if ktrdump does not work */
598 return (0);
599 }
600
601 #ifdef SCTP_AUDITING_ENABLED
602 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
603 static int sctp_audit_indx = 0;
604
605 static
606 void
sctp_print_audit_report(void)607 sctp_print_audit_report(void)
608 {
609 int i;
610 int cnt;
611
612 cnt = 0;
613 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
614 if ((sctp_audit_data[i][0] == 0xe0) &&
615 (sctp_audit_data[i][1] == 0x01)) {
616 cnt = 0;
617 SCTP_PRINTF("\n");
618 } else if (sctp_audit_data[i][0] == 0xf0) {
619 cnt = 0;
620 SCTP_PRINTF("\n");
621 } else if ((sctp_audit_data[i][0] == 0xc0) &&
622 (sctp_audit_data[i][1] == 0x01)) {
623 SCTP_PRINTF("\n");
624 cnt = 0;
625 }
626 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
627 (uint32_t) sctp_audit_data[i][1]);
628 cnt++;
629 if ((cnt % 14) == 0)
630 SCTP_PRINTF("\n");
631 }
632 for (i = 0; i < sctp_audit_indx; i++) {
633 if ((sctp_audit_data[i][0] == 0xe0) &&
634 (sctp_audit_data[i][1] == 0x01)) {
635 cnt = 0;
636 SCTP_PRINTF("\n");
637 } else if (sctp_audit_data[i][0] == 0xf0) {
638 cnt = 0;
639 SCTP_PRINTF("\n");
640 } else if ((sctp_audit_data[i][0] == 0xc0) &&
641 (sctp_audit_data[i][1] == 0x01)) {
642 SCTP_PRINTF("\n");
643 cnt = 0;
644 }
645 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
646 (uint32_t) sctp_audit_data[i][1]);
647 cnt++;
648 if ((cnt % 14) == 0)
649 SCTP_PRINTF("\n");
650 }
651 SCTP_PRINTF("\n");
652 }
653
654 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)655 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
656 struct sctp_nets *net)
657 {
658 int resend_cnt, tot_out, rep, tot_book_cnt;
659 struct sctp_nets *lnet;
660 struct sctp_tmit_chunk *chk;
661
662 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
663 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
664 sctp_audit_indx++;
665 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
666 sctp_audit_indx = 0;
667 }
668 if (inp == NULL) {
669 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
670 sctp_audit_data[sctp_audit_indx][1] = 0x01;
671 sctp_audit_indx++;
672 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
673 sctp_audit_indx = 0;
674 }
675 return;
676 }
677 if (stcb == NULL) {
678 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
679 sctp_audit_data[sctp_audit_indx][1] = 0x02;
680 sctp_audit_indx++;
681 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
682 sctp_audit_indx = 0;
683 }
684 return;
685 }
686 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
687 sctp_audit_data[sctp_audit_indx][1] =
688 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
689 sctp_audit_indx++;
690 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 sctp_audit_indx = 0;
692 }
693 rep = 0;
694 tot_book_cnt = 0;
695 resend_cnt = tot_out = 0;
696 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 if (chk->sent == SCTP_DATAGRAM_RESEND) {
698 resend_cnt++;
699 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
700 tot_out += chk->book_size;
701 tot_book_cnt++;
702 }
703 }
704 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
705 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
706 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
707 sctp_audit_indx++;
708 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
709 sctp_audit_indx = 0;
710 }
711 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
712 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
713 rep = 1;
714 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
715 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
716 sctp_audit_data[sctp_audit_indx][1] =
717 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
718 sctp_audit_indx++;
719 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
720 sctp_audit_indx = 0;
721 }
722 }
723 if (tot_out != stcb->asoc.total_flight) {
724 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
725 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
726 sctp_audit_indx++;
727 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 sctp_audit_indx = 0;
729 }
730 rep = 1;
731 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
732 (int)stcb->asoc.total_flight);
733 stcb->asoc.total_flight = tot_out;
734 }
735 if (tot_book_cnt != stcb->asoc.total_flight_count) {
736 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
737 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
738 sctp_audit_indx++;
739 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
740 sctp_audit_indx = 0;
741 }
742 rep = 1;
743 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
744
745 stcb->asoc.total_flight_count = tot_book_cnt;
746 }
747 tot_out = 0;
748 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
749 tot_out += lnet->flight_size;
750 }
751 if (tot_out != stcb->asoc.total_flight) {
752 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
753 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
754 sctp_audit_indx++;
755 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
756 sctp_audit_indx = 0;
757 }
758 rep = 1;
759 SCTP_PRINTF("real flight:%d net total was %d\n",
760 stcb->asoc.total_flight, tot_out);
761 /* now corrective action */
762 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
763
764 tot_out = 0;
765 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
766 if ((chk->whoTo == lnet) &&
767 (chk->sent < SCTP_DATAGRAM_RESEND)) {
768 tot_out += chk->book_size;
769 }
770 }
771 if (lnet->flight_size != tot_out) {
772 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
773 (void *)lnet, lnet->flight_size,
774 tot_out);
775 lnet->flight_size = tot_out;
776 }
777 }
778 }
779 if (rep) {
780 sctp_print_audit_report();
781 }
782 }
783
784 void
sctp_audit_log(uint8_t ev,uint8_t fd)785 sctp_audit_log(uint8_t ev, uint8_t fd)
786 {
787
788 sctp_audit_data[sctp_audit_indx][0] = ev;
789 sctp_audit_data[sctp_audit_indx][1] = fd;
790 sctp_audit_indx++;
791 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
792 sctp_audit_indx = 0;
793 }
794 }
795
796 #endif
797
798 /*
799 * The conversion from time to ticks and vice versa is done by rounding
800 * upwards. This way we can test in the code the time to be positive and
801 * know that this corresponds to a positive number of ticks.
802 */
803
804 uint32_t
sctp_msecs_to_ticks(uint32_t msecs)805 sctp_msecs_to_ticks(uint32_t msecs)
806 {
807 uint64_t temp;
808 uint32_t ticks;
809
810 if (hz == 1000) {
811 ticks = msecs;
812 } else {
813 temp = (((uint64_t)msecs * hz) + 999) / 1000;
814 if (temp > UINT32_MAX) {
815 ticks = UINT32_MAX;
816 } else {
817 ticks = (uint32_t)temp;
818 }
819 }
820 return (ticks);
821 }
822
823 uint32_t
sctp_ticks_to_msecs(uint32_t ticks)824 sctp_ticks_to_msecs(uint32_t ticks)
825 {
826 uint64_t temp;
827 uint32_t msecs;
828
829 if (hz == 1000) {
830 msecs = ticks;
831 } else {
832 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
833 if (temp > UINT32_MAX) {
834 msecs = UINT32_MAX;
835 } else {
836 msecs = (uint32_t)temp;
837 }
838 }
839 return (msecs);
840 }
841
842 uint32_t
sctp_secs_to_ticks(uint32_t secs)843 sctp_secs_to_ticks(uint32_t secs)
844 {
845 uint64_t temp;
846 uint32_t ticks;
847
848 temp = (uint64_t)secs * hz;
849 if (temp > UINT32_MAX) {
850 ticks = UINT32_MAX;
851 } else {
852 ticks = (uint32_t)temp;
853 }
854 return (ticks);
855 }
856
857 uint32_t
sctp_ticks_to_secs(uint32_t ticks)858 sctp_ticks_to_secs(uint32_t ticks)
859 {
860 uint64_t temp;
861 uint32_t secs;
862
863 temp = ((uint64_t)ticks + (hz - 1)) / hz;
864 if (temp > UINT32_MAX) {
865 secs = UINT32_MAX;
866 } else {
867 secs = (uint32_t)temp;
868 }
869 return (secs);
870 }
871
872 /*
873 * sctp_stop_timers_for_shutdown() should be called
874 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
875 * state to make sure that all timers are stopped.
876 */
877 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)878 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
879 {
880 struct sctp_inpcb *inp;
881 struct sctp_nets *net;
882
883 inp = stcb->sctp_ep;
884
885 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
886 SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
887 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
888 SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
889 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
890 SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
891 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
892 SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
893 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
894 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
895 SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
896 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
897 SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
898 }
899 }
900
901 void
sctp_stop_association_timers(struct sctp_tcb * stcb,bool stop_assoc_kill_timer)902 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
903 {
904 struct sctp_inpcb *inp;
905 struct sctp_nets *net;
906
907 inp = stcb->sctp_ep;
908 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
909 SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
910 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
911 SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
912 if (stop_assoc_kill_timer) {
913 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
914 SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
915 }
916 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
917 SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
918 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
919 SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
920 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
921 SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
922 /* Mobility adaptation */
923 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
924 SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
925 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
926 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
927 SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
928 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
929 SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
930 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
931 SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
932 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
933 SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
934 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
935 SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
936 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
937 SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
938 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
939 SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
940 }
941 }
942
943 /*
944 * A list of sizes based on typical mtu's, used only if next hop size not
945 * returned. These values MUST be multiples of 4 and MUST be ordered.
946 */
947 static uint32_t sctp_mtu_sizes[] = {
948 68,
949 296,
950 508,
951 512,
952 544,
953 576,
954 1004,
955 1492,
956 1500,
957 1536,
958 2000,
959 2048,
960 4352,
961 4464,
962 8168,
963 17912,
964 32000,
965 65532
966 };
967
968 /*
969 * Return the largest MTU in sctp_mtu_sizes smaller than val.
970 * If val is smaller than the minimum, just return the largest
971 * multiple of 4 smaller or equal to val.
972 * Ensure that the result is a multiple of 4.
973 */
974 uint32_t
sctp_get_prev_mtu(uint32_t val)975 sctp_get_prev_mtu(uint32_t val)
976 {
977 uint32_t i;
978
979 val &= 0xfffffffc;
980 if (val <= sctp_mtu_sizes[0]) {
981 return (val);
982 }
983 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
984 if (val <= sctp_mtu_sizes[i]) {
985 break;
986 }
987 }
988 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
989 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
990 return (sctp_mtu_sizes[i - 1]);
991 }
992
993 /*
994 * Return the smallest MTU in sctp_mtu_sizes larger than val.
995 * If val is larger than the maximum, just return the largest multiple of 4 smaller
996 * or equal to val.
997 * Ensure that the result is a multiple of 4.
998 */
999 uint32_t
sctp_get_next_mtu(uint32_t val)1000 sctp_get_next_mtu(uint32_t val)
1001 {
1002 /* select another MTU that is just bigger than this one */
1003 uint32_t i;
1004
1005 val &= 0xfffffffc;
1006 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
1007 if (val < sctp_mtu_sizes[i]) {
1008 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
1009 ("sctp_mtu_sizes[%u] not a multiple of 4", i));
1010 return (sctp_mtu_sizes[i]);
1011 }
1012 }
1013 return (val);
1014 }
1015
1016 void
sctp_fill_random_store(struct sctp_pcb * m)1017 sctp_fill_random_store(struct sctp_pcb *m)
1018 {
1019 /*
1020 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
1021 * our counter. The result becomes our good random numbers and we
1022 * then setup to give these out. Note that we do no locking to
1023 * protect this. This is ok, since if competing folks call this we
1024 * will get more gobbled gook in the random store which is what we
1025 * want. There is a danger that two guys will use the same random
1026 * numbers, but thats ok too since that is random as well :->
1027 */
1028 m->store_at = 0;
1029 #if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION)
1030 for (int i = 0; i < (int) (sizeof(m->random_store) / sizeof(m->random_store[0])); i++) {
1031 m->random_store[i] = (uint8_t) rand();
1032 }
1033 #else
1034 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
1035 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
1036 sizeof(m->random_counter), (uint8_t *)m->random_store);
1037 #endif
1038 m->random_counter++;
1039 }
1040
1041 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)1042 sctp_select_initial_TSN(struct sctp_pcb *inp)
1043 {
1044 /*
1045 * A true implementation should use random selection process to get
1046 * the initial stream sequence number, using RFC1750 as a good
1047 * guideline
1048 */
1049 uint32_t x, *xp;
1050 uint8_t *p;
1051 int store_at, new_store;
1052
1053 if (inp->initial_sequence_debug != 0) {
1054 uint32_t ret;
1055
1056 ret = inp->initial_sequence_debug;
1057 inp->initial_sequence_debug++;
1058 return (ret);
1059 }
1060 retry:
1061 store_at = inp->store_at;
1062 new_store = store_at + sizeof(uint32_t);
1063 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
1064 new_store = 0;
1065 }
1066 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
1067 goto retry;
1068 }
1069 if (new_store == 0) {
1070 /* Refill the random store */
1071 sctp_fill_random_store(inp);
1072 }
1073 p = &inp->random_store[store_at];
1074 xp = (uint32_t *)p;
1075 x = *xp;
1076 return (x);
1077 }
1078
1079 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)1080 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
1081 {
1082 uint32_t x;
1083 struct timeval now;
1084
1085 if (check) {
1086 (void)SCTP_GETTIME_TIMEVAL(&now);
1087 }
1088 for (;;) {
1089 x = sctp_select_initial_TSN(&inp->sctp_ep);
1090 if (x == 0) {
1091 /* we never use 0 */
1092 continue;
1093 }
1094 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
1095 break;
1096 }
1097 }
1098 return (x);
1099 }
1100
1101 int32_t
sctp_map_assoc_state(int kernel_state)1102 sctp_map_assoc_state(int kernel_state)
1103 {
1104 int32_t user_state;
1105
1106 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1107 user_state = SCTP_CLOSED;
1108 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1109 user_state = SCTP_SHUTDOWN_PENDING;
1110 } else {
1111 switch (kernel_state & SCTP_STATE_MASK) {
1112 case SCTP_STATE_EMPTY:
1113 user_state = SCTP_CLOSED;
1114 break;
1115 case SCTP_STATE_INUSE:
1116 user_state = SCTP_CLOSED;
1117 break;
1118 case SCTP_STATE_COOKIE_WAIT:
1119 user_state = SCTP_COOKIE_WAIT;
1120 break;
1121 case SCTP_STATE_COOKIE_ECHOED:
1122 user_state = SCTP_COOKIE_ECHOED;
1123 break;
1124 case SCTP_STATE_OPEN:
1125 user_state = SCTP_ESTABLISHED;
1126 break;
1127 case SCTP_STATE_SHUTDOWN_SENT:
1128 user_state = SCTP_SHUTDOWN_SENT;
1129 break;
1130 case SCTP_STATE_SHUTDOWN_RECEIVED:
1131 user_state = SCTP_SHUTDOWN_RECEIVED;
1132 break;
1133 case SCTP_STATE_SHUTDOWN_ACK_SENT:
1134 user_state = SCTP_SHUTDOWN_ACK_SENT;
1135 break;
1136 default:
1137 user_state = SCTP_CLOSED;
1138 break;
1139 }
1140 }
1141 return (user_state);
1142 }
1143
1144 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id,uint16_t o_strms)1145 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1146 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1147 {
1148 struct sctp_association *asoc;
1149 /*
1150 * Anything set to zero is taken care of by the allocation routine's
1151 * bzero
1152 */
1153
1154 /*
1155 * Up front select what scoping to apply on addresses I tell my peer
1156 * Not sure what to do with these right now, we will need to come up
1157 * with a way to set them. We may need to pass them through from the
1158 * caller in the sctp_aloc_assoc() function.
1159 */
1160 int i;
1161 #if defined(SCTP_DETAILED_STR_STATS)
1162 int j;
1163 #endif
1164
1165 asoc = &stcb->asoc;
1166 /* init all variables to a known value. */
1167 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1168 asoc->max_burst = inp->sctp_ep.max_burst;
1169 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1170 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1171 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1172 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1173 asoc->ecn_supported = inp->ecn_supported;
1174 asoc->prsctp_supported = inp->prsctp_supported;
1175 asoc->auth_supported = inp->auth_supported;
1176 asoc->asconf_supported = inp->asconf_supported;
1177 asoc->reconfig_supported = inp->reconfig_supported;
1178 asoc->nrsack_supported = inp->nrsack_supported;
1179 asoc->pktdrop_supported = inp->pktdrop_supported;
1180 asoc->idata_supported = inp->idata_supported;
1181 asoc->sctp_cmt_pf = (uint8_t)0;
1182 asoc->sctp_frag_point = inp->sctp_frag_point;
1183 asoc->sctp_features = inp->sctp_features;
1184 asoc->default_dscp = inp->sctp_ep.default_dscp;
1185 asoc->max_cwnd = inp->max_cwnd;
1186 #ifdef INET6
1187 if (inp->sctp_ep.default_flowlabel) {
1188 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1189 } else {
1190 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1191 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1192 asoc->default_flowlabel &= 0x000fffff;
1193 asoc->default_flowlabel |= 0x80000000;
1194 } else {
1195 asoc->default_flowlabel = 0;
1196 }
1197 }
1198 #endif
1199 asoc->sb_send_resv = 0;
1200 if (override_tag) {
1201 asoc->my_vtag = override_tag;
1202 } else {
1203 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1204 }
1205 /* Get the nonce tags */
1206 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1207 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1208 asoc->vrf_id = vrf_id;
1209
1210 #ifdef SCTP_ASOCLOG_OF_TSNS
1211 asoc->tsn_in_at = 0;
1212 asoc->tsn_out_at = 0;
1213 asoc->tsn_in_wrapped = 0;
1214 asoc->tsn_out_wrapped = 0;
1215 asoc->cumack_log_at = 0;
1216 asoc->cumack_log_atsnt = 0;
1217 #endif
1218 #ifdef SCTP_FS_SPEC_LOG
1219 asoc->fs_index = 0;
1220 #endif
1221 asoc->refcnt = 0;
1222 asoc->assoc_up_sent = 0;
1223 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1224 sctp_select_initial_TSN(&inp->sctp_ep);
1225 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1226 /* we are optimisitic here */
1227 asoc->peer_supports_nat = 0;
1228 asoc->sent_queue_retran_cnt = 0;
1229
1230 /* for CMT */
1231 asoc->last_net_cmt_send_started = NULL;
1232
1233 /* This will need to be adjusted */
1234 asoc->last_acked_seq = asoc->init_seq_number - 1;
1235 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1236 asoc->asconf_seq_in = asoc->last_acked_seq;
1237
1238 /* here we are different, we hold the next one we expect */
1239 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1240
1241 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1242 asoc->initial_rto = inp->sctp_ep.initial_rto;
1243
1244 asoc->default_mtu = inp->sctp_ep.default_mtu;
1245 asoc->max_init_times = inp->sctp_ep.max_init_times;
1246 asoc->max_send_times = inp->sctp_ep.max_send_times;
1247 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1248 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1249 asoc->free_chunk_cnt = 0;
1250
1251 asoc->iam_blocking = 0;
1252 asoc->context = inp->sctp_context;
1253 asoc->local_strreset_support = inp->local_strreset_support;
1254 asoc->def_send = inp->def_send;
1255 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1256 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1257 asoc->pr_sctp_cnt = 0;
1258 asoc->total_output_queue_size = 0;
1259
1260 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1261 asoc->scope.ipv6_addr_legal = 1;
1262 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1263 asoc->scope.ipv4_addr_legal = 1;
1264 } else {
1265 asoc->scope.ipv4_addr_legal = 0;
1266 }
1267 #if defined(__Userspace__)
1268 asoc->scope.conn_addr_legal = 0;
1269 #endif
1270 } else {
1271 asoc->scope.ipv6_addr_legal = 0;
1272 #if defined(__Userspace__)
1273 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1274 asoc->scope.conn_addr_legal = 1;
1275 asoc->scope.ipv4_addr_legal = 0;
1276 } else {
1277 asoc->scope.conn_addr_legal = 0;
1278 asoc->scope.ipv4_addr_legal = 1;
1279 }
1280 #else
1281 asoc->scope.ipv4_addr_legal = 1;
1282 #endif
1283 }
1284
1285 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1286 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1287
1288 asoc->smallest_mtu = inp->sctp_frag_point;
1289 asoc->minrto = inp->sctp_ep.sctp_minrto;
1290 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1291
1292 asoc->stream_locked_on = 0;
1293 asoc->ecn_echo_cnt_onq = 0;
1294 asoc->stream_locked = 0;
1295
1296 asoc->send_sack = 1;
1297
1298 LIST_INIT(&asoc->sctp_restricted_addrs);
1299
1300 TAILQ_INIT(&asoc->nets);
1301 TAILQ_INIT(&asoc->pending_reply_queue);
1302 TAILQ_INIT(&asoc->asconf_ack_sent);
1303 /* Setup to fill the hb random cache at first HB */
1304 asoc->hb_random_idx = 4;
1305
1306 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1307
1308 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1309 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1310
1311 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1312 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1313
1314 /*
1315 * Now the stream parameters, here we allocate space for all streams
1316 * that we request by default.
1317 */
1318 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1319 o_strms;
1320 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1321 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1322 SCTP_M_STRMO);
1323 if (asoc->strmout == NULL) {
1324 /* big trouble no memory */
1325 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1326 return (ENOMEM);
1327 }
1328 for (i = 0; i < asoc->streamoutcnt; i++) {
1329 /*
1330 * inbound side must be set to 0xffff, also NOTE when we get
1331 * the INIT-ACK back (for INIT sender) we MUST reduce the
1332 * count (streamoutcnt) but first check if we sent to any of
1333 * the upper streams that were dropped (if some were). Those
1334 * that were dropped must be notified to the upper layer as
1335 * failed to send.
1336 */
1337 asoc->strmout[i].next_mid_ordered = 0;
1338 asoc->strmout[i].next_mid_unordered = 0;
1339 TAILQ_INIT(&asoc->strmout[i].outqueue);
1340 asoc->strmout[i].chunks_on_queues = 0;
1341 #if defined(SCTP_DETAILED_STR_STATS)
1342 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1343 asoc->strmout[i].abandoned_sent[j] = 0;
1344 asoc->strmout[i].abandoned_unsent[j] = 0;
1345 }
1346 #else
1347 asoc->strmout[i].abandoned_sent[0] = 0;
1348 asoc->strmout[i].abandoned_unsent[0] = 0;
1349 #endif
1350 asoc->strmout[i].sid = i;
1351 asoc->strmout[i].last_msg_incomplete = 0;
1352 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1353 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1354 }
1355 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1356
1357 /* Now the mapping array */
1358 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1359 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1360 SCTP_M_MAP);
1361 if (asoc->mapping_array == NULL) {
1362 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1363 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1364 return (ENOMEM);
1365 }
1366 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1367 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1368 SCTP_M_MAP);
1369 if (asoc->nr_mapping_array == NULL) {
1370 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1371 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1372 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1373 return (ENOMEM);
1374 }
1375 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1376
1377 /* Now the init of the other outqueues */
1378 TAILQ_INIT(&asoc->free_chunks);
1379 TAILQ_INIT(&asoc->control_send_queue);
1380 TAILQ_INIT(&asoc->asconf_send_queue);
1381 TAILQ_INIT(&asoc->send_queue);
1382 TAILQ_INIT(&asoc->sent_queue);
1383 TAILQ_INIT(&asoc->resetHead);
1384 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1385 TAILQ_INIT(&asoc->asconf_queue);
1386 /* authentication fields */
1387 asoc->authinfo.random = NULL;
1388 asoc->authinfo.active_keyid = 0;
1389 asoc->authinfo.assoc_key = NULL;
1390 asoc->authinfo.assoc_keyid = 0;
1391 asoc->authinfo.recv_key = NULL;
1392 asoc->authinfo.recv_keyid = 0;
1393 LIST_INIT(&asoc->shared_keys);
1394 asoc->marked_retrans = 0;
1395 asoc->port = inp->sctp_ep.port;
1396 asoc->timoinit = 0;
1397 asoc->timodata = 0;
1398 asoc->timosack = 0;
1399 asoc->timoshutdown = 0;
1400 asoc->timoheartbeat = 0;
1401 asoc->timocookie = 0;
1402 asoc->timoshutdownack = 0;
1403 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1404 asoc->discontinuity_time = asoc->start_time;
1405 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1406 asoc->abandoned_unsent[i] = 0;
1407 asoc->abandoned_sent[i] = 0;
1408 }
1409 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1410 * the association is freed.
1411 */
1412 return (0);
1413 }
1414
1415 void
sctp_print_mapping_array(struct sctp_association * asoc)1416 sctp_print_mapping_array(struct sctp_association *asoc)
1417 {
1418 unsigned int i, limit;
1419
1420 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1421 asoc->mapping_array_size,
1422 asoc->mapping_array_base_tsn,
1423 asoc->cumulative_tsn,
1424 asoc->highest_tsn_inside_map,
1425 asoc->highest_tsn_inside_nr_map);
1426 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1427 if (asoc->mapping_array[limit - 1] != 0) {
1428 break;
1429 }
1430 }
1431 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1432 for (i = 0; i < limit; i++) {
1433 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1434 }
1435 if (limit % 16)
1436 SCTP_PRINTF("\n");
1437 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1438 if (asoc->nr_mapping_array[limit - 1]) {
1439 break;
1440 }
1441 }
1442 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1443 for (i = 0; i < limit; i++) {
1444 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1445 }
1446 if (limit % 16)
1447 SCTP_PRINTF("\n");
1448 }
1449
1450 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1451 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1452 {
1453 /* mapping array needs to grow */
1454 uint8_t *new_array1, *new_array2;
1455 uint32_t new_size;
1456
1457 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1458 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1459 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1460 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1461 /* can't get more, forget it */
1462 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1463 if (new_array1) {
1464 SCTP_FREE(new_array1, SCTP_M_MAP);
1465 }
1466 if (new_array2) {
1467 SCTP_FREE(new_array2, SCTP_M_MAP);
1468 }
1469 return (-1);
1470 }
1471 memset(new_array1, 0, new_size);
1472 memset(new_array2, 0, new_size);
1473 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1474 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1475 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1476 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1477 asoc->mapping_array = new_array1;
1478 asoc->nr_mapping_array = new_array2;
1479 asoc->mapping_array_size = new_size;
1480 return (0);
1481 }
1482
1483
1484 static void
sctp_iterator_work(struct sctp_iterator * it)1485 sctp_iterator_work(struct sctp_iterator *it)
1486 {
1487 #if defined(__FreeBSD__) && !defined(__Userspace__)
1488 struct epoch_tracker et;
1489 #endif
1490 struct sctp_inpcb *tinp;
1491 int iteration_count = 0;
1492 int inp_skip = 0;
1493 int first_in = 1;
1494
1495 #if defined(__FreeBSD__) && !defined(__Userspace__)
1496 NET_EPOCH_ENTER(et);
1497 #endif
1498 SCTP_INP_INFO_RLOCK();
1499 SCTP_ITERATOR_LOCK();
1500 sctp_it_ctl.cur_it = it;
1501 if (it->inp) {
1502 SCTP_INP_RLOCK(it->inp);
1503 SCTP_INP_DECR_REF(it->inp);
1504 }
1505 if (it->inp == NULL) {
1506 /* iterator is complete */
1507 done_with_iterator:
1508 sctp_it_ctl.cur_it = NULL;
1509 SCTP_ITERATOR_UNLOCK();
1510 SCTP_INP_INFO_RUNLOCK();
1511 if (it->function_atend != NULL) {
1512 (*it->function_atend) (it->pointer, it->val);
1513 }
1514 SCTP_FREE(it, SCTP_M_ITER);
1515 #if defined(__FreeBSD__) && !defined(__Userspace__)
1516 NET_EPOCH_EXIT(et);
1517 #endif
1518 return;
1519 }
1520 select_a_new_ep:
1521 if (first_in) {
1522 first_in = 0;
1523 } else {
1524 SCTP_INP_RLOCK(it->inp);
1525 }
1526 while (((it->pcb_flags) &&
1527 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1528 ((it->pcb_features) &&
1529 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1530 /* endpoint flags or features don't match, so keep looking */
1531 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1532 SCTP_INP_RUNLOCK(it->inp);
1533 goto done_with_iterator;
1534 }
1535 tinp = it->inp;
1536 it->inp = LIST_NEXT(it->inp, sctp_list);
1537 it->stcb = NULL;
1538 SCTP_INP_RUNLOCK(tinp);
1539 if (it->inp == NULL) {
1540 goto done_with_iterator;
1541 }
1542 SCTP_INP_RLOCK(it->inp);
1543 }
1544 /* now go through each assoc which is in the desired state */
1545 if (it->done_current_ep == 0) {
1546 if (it->function_inp != NULL)
1547 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1548 it->done_current_ep = 1;
1549 }
1550 if (it->stcb == NULL) {
1551 /* run the per instance function */
1552 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1553 }
1554 if ((inp_skip) || it->stcb == NULL) {
1555 if (it->function_inp_end != NULL) {
1556 inp_skip = (*it->function_inp_end)(it->inp,
1557 it->pointer,
1558 it->val);
1559 }
1560 SCTP_INP_RUNLOCK(it->inp);
1561 goto no_stcb;
1562 }
1563 while (it->stcb) {
1564 SCTP_TCB_LOCK(it->stcb);
1565 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1566 /* not in the right state... keep looking */
1567 SCTP_TCB_UNLOCK(it->stcb);
1568 goto next_assoc;
1569 }
1570 /* see if we have limited out the iterator loop */
1571 iteration_count++;
1572 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1573 /* Pause to let others grab the lock */
1574 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1575 SCTP_TCB_UNLOCK(it->stcb);
1576 SCTP_INP_INCR_REF(it->inp);
1577 SCTP_INP_RUNLOCK(it->inp);
1578 SCTP_ITERATOR_UNLOCK();
1579 SCTP_INP_INFO_RUNLOCK();
1580 SCTP_INP_INFO_RLOCK();
1581 SCTP_ITERATOR_LOCK();
1582 if (sctp_it_ctl.iterator_flags) {
1583 /* We won't be staying here */
1584 SCTP_INP_DECR_REF(it->inp);
1585 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1586 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
1587 if (sctp_it_ctl.iterator_flags &
1588 SCTP_ITERATOR_MUST_EXIT) {
1589 goto done_with_iterator;
1590 }
1591 #endif
1592 if (sctp_it_ctl.iterator_flags &
1593 SCTP_ITERATOR_STOP_CUR_IT) {
1594 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1595 goto done_with_iterator;
1596 }
1597 if (sctp_it_ctl.iterator_flags &
1598 SCTP_ITERATOR_STOP_CUR_INP) {
1599 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1600 goto no_stcb;
1601 }
1602 /* If we reach here huh? */
1603 SCTP_PRINTF("Unknown it ctl flag %x\n",
1604 sctp_it_ctl.iterator_flags);
1605 sctp_it_ctl.iterator_flags = 0;
1606 }
1607 SCTP_INP_RLOCK(it->inp);
1608 SCTP_INP_DECR_REF(it->inp);
1609 SCTP_TCB_LOCK(it->stcb);
1610 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1611 iteration_count = 0;
1612 }
1613 KASSERT(it->inp == it->stcb->sctp_ep,
1614 ("%s: stcb %p does not belong to inp %p, but inp %p",
1615 __func__, it->stcb, it->inp, it->stcb->sctp_ep));
1616
1617 /* run function on this one */
1618 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1619
1620 /*
1621 * we lie here, it really needs to have its own type but
1622 * first I must verify that this won't effect things :-0
1623 */
1624 if (it->no_chunk_output == 0)
1625 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1626
1627 SCTP_TCB_UNLOCK(it->stcb);
1628 next_assoc:
1629 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1630 if (it->stcb == NULL) {
1631 /* Run last function */
1632 if (it->function_inp_end != NULL) {
1633 inp_skip = (*it->function_inp_end)(it->inp,
1634 it->pointer,
1635 it->val);
1636 }
1637 }
1638 }
1639 SCTP_INP_RUNLOCK(it->inp);
1640 no_stcb:
1641 /* done with all assocs on this endpoint, move on to next endpoint */
1642 it->done_current_ep = 0;
1643 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1644 it->inp = NULL;
1645 } else {
1646 it->inp = LIST_NEXT(it->inp, sctp_list);
1647 }
1648 it->stcb = NULL;
1649 if (it->inp == NULL) {
1650 goto done_with_iterator;
1651 }
1652 goto select_a_new_ep;
1653 }
1654
1655 void
sctp_iterator_worker(void)1656 sctp_iterator_worker(void)
1657 {
1658 struct sctp_iterator *it;
1659
1660 /* This function is called with the WQ lock in place */
1661 sctp_it_ctl.iterator_running = 1;
1662 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1663 /* now lets work on this one */
1664 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1665 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1666 #if defined(__FreeBSD__) && !defined(__Userspace__)
1667 CURVNET_SET(it->vn);
1668 #endif
1669 sctp_iterator_work(it);
1670 #if defined(__FreeBSD__) && !defined(__Userspace__)
1671 CURVNET_RESTORE();
1672 #endif
1673 SCTP_IPI_ITERATOR_WQ_LOCK();
1674 #if !defined(__FreeBSD__) && !defined(__Userspace__)
1675 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1676 break;
1677 }
1678 #endif
1679 /*sa_ignore FREED_MEMORY*/
1680 }
1681 sctp_it_ctl.iterator_running = 0;
1682 return;
1683 }
1684
1685
1686 static void
sctp_handle_addr_wq(void)1687 sctp_handle_addr_wq(void)
1688 {
1689 /* deal with the ADDR wq from the rtsock calls */
1690 struct sctp_laddr *wi, *nwi;
1691 struct sctp_asconf_iterator *asc;
1692
1693 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1694 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1695 if (asc == NULL) {
1696 /* Try later, no memory */
1697 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1698 (struct sctp_inpcb *)NULL,
1699 (struct sctp_tcb *)NULL,
1700 (struct sctp_nets *)NULL);
1701 return;
1702 }
1703 LIST_INIT(&asc->list_of_work);
1704 asc->cnt = 0;
1705
1706 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1707 LIST_REMOVE(wi, sctp_nxt_addr);
1708 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1709 asc->cnt++;
1710 }
1711
1712 if (asc->cnt == 0) {
1713 SCTP_FREE(asc, SCTP_M_ASC_IT);
1714 } else {
1715 int ret;
1716
1717 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1718 sctp_asconf_iterator_stcb,
1719 NULL, /* No ep end for boundall */
1720 SCTP_PCB_FLAGS_BOUNDALL,
1721 SCTP_PCB_ANY_FEATURES,
1722 SCTP_ASOC_ANY_STATE,
1723 (void *)asc, 0,
1724 sctp_asconf_iterator_end, NULL, 0);
1725 if (ret) {
1726 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1727 /* Freeing if we are stopping or put back on the addr_wq. */
1728 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1729 sctp_asconf_iterator_end(asc, 0);
1730 } else {
1731 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1732 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1733 }
1734 SCTP_FREE(asc, SCTP_M_ASC_IT);
1735 }
1736 }
1737 }
1738 }
1739
1740 /*-
1741 * The following table shows which pointers for the inp, stcb, or net are
1742 * stored for each timer after it was started.
1743 *
1744 *|Name |Timer |inp |stcb|net |
1745 *|-----------------------------|-----------------------------|----|----|----|
1746 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes |
1747 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes |
1748 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No |
1749 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes |
1750 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes |
1751 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes |
1752 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No |
1753 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes |
1754 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes |
1755 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes |
1756 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No |
1757 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No |
1758 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No |
1759 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No |
1760 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No |
1761 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No |
1762 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No |
1763 */
1764
1765 void
sctp_timeout_handler(void * t)1766 sctp_timeout_handler(void *t)
1767 {
1768 #if defined(__FreeBSD__) && !defined(__Userspace__)
1769 struct epoch_tracker et;
1770 #endif
1771 struct timeval tv;
1772 struct sctp_inpcb *inp;
1773 struct sctp_tcb *stcb;
1774 struct sctp_nets *net;
1775 struct sctp_timer *tmr;
1776 struct mbuf *op_err;
1777 #if defined(__APPLE__) && !defined(__Userspace__)
1778 struct socket *so;
1779 #endif
1780 #if defined(__Userspace__)
1781 struct socket *upcall_socket = NULL;
1782 #endif
1783 int type;
1784 int i, secret;
1785 bool did_output, released_asoc_reference;
1786
1787 /*
1788 * If inp, stcb or net are not NULL, then references to these were
1789 * added when the timer was started, and must be released before this
1790 * function returns.
1791 */
1792 tmr = (struct sctp_timer *)t;
1793 inp = (struct sctp_inpcb *)tmr->ep;
1794 stcb = (struct sctp_tcb *)tmr->tcb;
1795 net = (struct sctp_nets *)tmr->net;
1796 #if defined(__FreeBSD__) && !defined(__Userspace__)
1797 CURVNET_SET((struct vnet *)tmr->vnet);
1798 #endif
1799 did_output = 1;
1800 released_asoc_reference = false;
1801
1802 #ifdef SCTP_AUDITING_ENABLED
1803 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1804 sctp_auditing(3, inp, stcb, net);
1805 #endif
1806
1807 /* sanity checks... */
1808 KASSERT(tmr->self == NULL || tmr->self == tmr,
1809 ("sctp_timeout_handler: tmr->self corrupted"));
1810 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
1811 ("sctp_timeout_handler: invalid timer type %d", tmr->type));
1812 type = tmr->type;
1813 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
1814 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
1815 type, stcb, stcb->sctp_ep));
1816 tmr->stopped_from = 0xa001;
1817 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) {
1818 SCTPDBG(SCTP_DEBUG_TIMER2,
1819 "Timer type %d handler exiting due to CLOSED association.\n",
1820 type);
1821 goto out_decr;
1822 }
1823 tmr->stopped_from = 0xa002;
1824 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1825 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1826 SCTPDBG(SCTP_DEBUG_TIMER2,
1827 "Timer type %d handler exiting due to not being active.\n",
1828 type);
1829 goto out_decr;
1830 }
1831
1832 tmr->stopped_from = 0xa003;
1833 if (stcb) {
1834 SCTP_TCB_LOCK(stcb);
1835 /*
1836 * Release reference so that association can be freed if
1837 * necessary below.
1838 * This is safe now that we have acquired the lock.
1839 */
1840 atomic_add_int(&stcb->asoc.refcnt, -1);
1841 released_asoc_reference = true;
1842 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1843 ((stcb->asoc.state == SCTP_STATE_EMPTY) ||
1844 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1845 SCTPDBG(SCTP_DEBUG_TIMER2,
1846 "Timer type %d handler exiting due to CLOSED association.\n",
1847 type);
1848 goto out;
1849 }
1850 } else if (inp != NULL) {
1851 SCTP_INP_WLOCK(inp);
1852 } else {
1853 SCTP_WQ_ADDR_LOCK();
1854 }
1855
1856 /* Record in stopped_from which timeout occurred. */
1857 tmr->stopped_from = type;
1858 #if defined(__FreeBSD__) && !defined(__Userspace__)
1859 NET_EPOCH_ENTER(et);
1860 #endif
1861 /* mark as being serviced now */
1862 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1863 /*
1864 * Callout has been rescheduled.
1865 */
1866 goto out;
1867 }
1868 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1869 /*
1870 * Not active, so no action.
1871 */
1872 goto out;
1873 }
1874 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1875
1876 #if defined(__Userspace__)
1877 if ((stcb != NULL) &&
1878 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
1879 (stcb->sctp_socket != NULL)) {
1880 upcall_socket = stcb->sctp_socket;
1881 SOCK_LOCK(upcall_socket);
1882 soref(upcall_socket);
1883 SOCK_UNLOCK(upcall_socket);
1884 }
1885 #endif
1886 /* call the handler for the appropriate timer type */
1887 switch (type) {
1888 case SCTP_TIMER_TYPE_SEND:
1889 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1890 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1891 type, inp, stcb, net));
1892 SCTP_STAT_INCR(sctps_timodata);
1893 stcb->asoc.timodata++;
1894 stcb->asoc.num_send_timers_up--;
1895 if (stcb->asoc.num_send_timers_up < 0) {
1896 stcb->asoc.num_send_timers_up = 0;
1897 }
1898 SCTP_TCB_LOCK_ASSERT(stcb);
1899 if (sctp_t3rxt_timer(inp, stcb, net)) {
1900 /* no need to unlock on tcb its gone */
1901
1902 goto out_decr;
1903 }
1904 SCTP_TCB_LOCK_ASSERT(stcb);
1905 #ifdef SCTP_AUDITING_ENABLED
1906 sctp_auditing(4, inp, stcb, net);
1907 #endif
1908 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1909 did_output = true;
1910 if ((stcb->asoc.num_send_timers_up == 0) &&
1911 (stcb->asoc.sent_queue_cnt > 0)) {
1912 struct sctp_tmit_chunk *chk;
1913
1914 /*
1915 * Safeguard. If there on some on the sent queue
1916 * somewhere but no timers running something is
1917 * wrong... so we start a timer on the first chunk
1918 * on the send queue on whatever net it is sent to.
1919 */
1920 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1921 if (chk->whoTo != NULL) {
1922 break;
1923 }
1924 }
1925 if (chk != NULL) {
1926 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
1927 }
1928 }
1929 break;
1930 case SCTP_TIMER_TYPE_INIT:
1931 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1932 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1933 type, inp, stcb, net));
1934 SCTP_STAT_INCR(sctps_timoinit);
1935 stcb->asoc.timoinit++;
1936 if (sctp_t1init_timer(inp, stcb, net)) {
1937 /* no need to unlock on tcb its gone */
1938 goto out_decr;
1939 }
1940 did_output = false;
1941 break;
1942 case SCTP_TIMER_TYPE_RECV:
1943 KASSERT(inp != NULL && stcb != NULL && net == NULL,
1944 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1945 type, inp, stcb, net));
1946 SCTP_STAT_INCR(sctps_timosack);
1947 stcb->asoc.timosack++;
1948 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1949 #ifdef SCTP_AUDITING_ENABLED
1950 sctp_auditing(4, inp, stcb, NULL);
1951 #endif
1952 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1953 did_output = true;
1954 break;
1955 case SCTP_TIMER_TYPE_SHUTDOWN:
1956 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1957 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1958 type, inp, stcb, net));
1959 SCTP_STAT_INCR(sctps_timoshutdown);
1960 stcb->asoc.timoshutdown++;
1961 if (sctp_shutdown_timer(inp, stcb, net)) {
1962 /* no need to unlock on tcb its gone */
1963 goto out_decr;
1964 }
1965 #ifdef SCTP_AUDITING_ENABLED
1966 sctp_auditing(4, inp, stcb, net);
1967 #endif
1968 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1969 did_output = true;
1970 break;
1971 case SCTP_TIMER_TYPE_HEARTBEAT:
1972 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1973 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1974 type, inp, stcb, net));
1975 SCTP_STAT_INCR(sctps_timoheartbeat);
1976 stcb->asoc.timoheartbeat++;
1977 if (sctp_heartbeat_timer(inp, stcb, net)) {
1978 /* no need to unlock on tcb its gone */
1979 goto out_decr;
1980 }
1981 #ifdef SCTP_AUDITING_ENABLED
1982 sctp_auditing(4, inp, stcb, net);
1983 #endif
1984 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1985 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1986 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1987 did_output = true;
1988 } else {
1989 did_output = false;
1990 }
1991 break;
1992 case SCTP_TIMER_TYPE_COOKIE:
1993 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1994 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1995 type, inp, stcb, net));
1996 SCTP_STAT_INCR(sctps_timocookie);
1997 stcb->asoc.timocookie++;
1998 if (sctp_cookie_timer(inp, stcb, net)) {
1999 /* no need to unlock on tcb its gone */
2000 goto out_decr;
2001 }
2002 #ifdef SCTP_AUDITING_ENABLED
2003 sctp_auditing(4, inp, stcb, net);
2004 #endif
2005 /*
2006 * We consider T3 and Cookie timer pretty much the same with
2007 * respect to where from in chunk_output.
2008 */
2009 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
2010 did_output = true;
2011 break;
2012 case SCTP_TIMER_TYPE_NEWCOOKIE:
2013 KASSERT(inp != NULL && stcb == NULL && net == NULL,
2014 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2015 type, inp, stcb, net));
2016 SCTP_STAT_INCR(sctps_timosecret);
2017 (void)SCTP_GETTIME_TIMEVAL(&tv);
2018 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
2019 inp->sctp_ep.last_secret_number =
2020 inp->sctp_ep.current_secret_number;
2021 inp->sctp_ep.current_secret_number++;
2022 if (inp->sctp_ep.current_secret_number >=
2023 SCTP_HOW_MANY_SECRETS) {
2024 inp->sctp_ep.current_secret_number = 0;
2025 }
2026 secret = (int)inp->sctp_ep.current_secret_number;
2027 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
2028 inp->sctp_ep.secret_key[secret][i] =
2029 sctp_select_initial_TSN(&inp->sctp_ep);
2030 }
2031 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
2032 did_output = false;
2033 break;
2034 case SCTP_TIMER_TYPE_PATHMTURAISE:
2035 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2036 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2037 type, inp, stcb, net));
2038 SCTP_STAT_INCR(sctps_timopathmtu);
2039 sctp_pathmtu_timer(inp, stcb, net);
2040 did_output = false;
2041 break;
2042 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2043 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2044 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2045 type, inp, stcb, net));
2046 if (sctp_shutdownack_timer(inp, stcb, net)) {
2047 /* no need to unlock on tcb its gone */
2048 goto out_decr;
2049 }
2050 SCTP_STAT_INCR(sctps_timoshutdownack);
2051 stcb->asoc.timoshutdownack++;
2052 #ifdef SCTP_AUDITING_ENABLED
2053 sctp_auditing(4, inp, stcb, net);
2054 #endif
2055 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
2056 did_output = true;
2057 break;
2058 case SCTP_TIMER_TYPE_ASCONF:
2059 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2060 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2061 type, inp, stcb, net));
2062 SCTP_STAT_INCR(sctps_timoasconf);
2063 if (sctp_asconf_timer(inp, stcb, net)) {
2064 /* no need to unlock on tcb its gone */
2065 goto out_decr;
2066 }
2067 #ifdef SCTP_AUDITING_ENABLED
2068 sctp_auditing(4, inp, stcb, net);
2069 #endif
2070 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
2071 did_output = true;
2072 break;
2073 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2074 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2075 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2076 type, inp, stcb, net));
2077 SCTP_STAT_INCR(sctps_timoshutdownguard);
2078 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
2079 "Shutdown guard timer expired");
2080 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2081 did_output = true;
2082 /* no need to unlock on tcb its gone */
2083 goto out_decr;
2084 case SCTP_TIMER_TYPE_AUTOCLOSE:
2085 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2086 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2087 type, inp, stcb, net));
2088 SCTP_STAT_INCR(sctps_timoautoclose);
2089 sctp_autoclose_timer(inp, stcb);
2090 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
2091 did_output = true;
2092 break;
2093 case SCTP_TIMER_TYPE_STRRESET:
2094 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2095 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2096 type, inp, stcb, net));
2097 SCTP_STAT_INCR(sctps_timostrmrst);
2098 if (sctp_strreset_timer(inp, stcb)) {
2099 /* no need to unlock on tcb its gone */
2100 goto out_decr;
2101 }
2102 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
2103 did_output = true;
2104 break;
2105 case SCTP_TIMER_TYPE_INPKILL:
2106 KASSERT(inp != NULL && stcb == NULL && net == NULL,
2107 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2108 type, inp, stcb, net));
2109 SCTP_STAT_INCR(sctps_timoinpkill);
2110 /*
2111 * special case, take away our increment since WE are the
2112 * killer
2113 */
2114 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2115 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2116 #if defined(__APPLE__) && !defined(__Userspace__)
2117 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
2118 #endif
2119 SCTP_INP_DECR_REF(inp);
2120 SCTP_INP_WUNLOCK(inp);
2121 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2122 SCTP_CALLED_FROM_INPKILL_TIMER);
2123 #if defined(__APPLE__) && !defined(__Userspace__)
2124 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
2125 #endif
2126 inp = NULL;
2127 goto out_no_decr;
2128 case SCTP_TIMER_TYPE_ASOCKILL:
2129 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2130 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2131 type, inp, stcb, net));
2132 SCTP_STAT_INCR(sctps_timoassockill);
2133 /* Can we free it yet? */
2134 SCTP_INP_DECR_REF(inp);
2135 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
2136 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
2137 #if defined(__APPLE__) && !defined(__Userspace__)
2138 so = SCTP_INP_SO(inp);
2139 atomic_add_int(&stcb->asoc.refcnt, 1);
2140 SCTP_TCB_UNLOCK(stcb);
2141 SCTP_SOCKET_LOCK(so, 1);
2142 SCTP_TCB_LOCK(stcb);
2143 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2144 #endif
2145 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2146 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
2147 #if defined(__APPLE__) && !defined(__Userspace__)
2148 SCTP_SOCKET_UNLOCK(so, 1);
2149 #endif
2150 /*
2151 * free asoc, always unlocks (or destroy's) so prevent
2152 * duplicate unlock or unlock of a free mtx :-0
2153 */
2154 stcb = NULL;
2155 goto out_no_decr;
2156 case SCTP_TIMER_TYPE_ADDR_WQ:
2157 KASSERT(inp == NULL && stcb == NULL && net == NULL,
2158 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2159 type, inp, stcb, net));
2160 sctp_handle_addr_wq();
2161 did_output = true;
2162 break;
2163 case SCTP_TIMER_TYPE_PRIM_DELETED:
2164 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2165 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2166 type, inp, stcb, net));
2167 SCTP_STAT_INCR(sctps_timodelprim);
2168 sctp_delete_prim_timer(inp, stcb);
2169 did_output = false;
2170 break;
2171 default:
2172 #ifdef INVARIANTS
2173 panic("Unknown timer type %d", type);
2174 #else
2175 did_output = false;
2176 goto out;
2177 #endif
2178 }
2179 #ifdef SCTP_AUDITING_ENABLED
2180 sctp_audit_log(0xF1, (uint8_t) type);
2181 if (inp != NULL)
2182 sctp_auditing(5, inp, stcb, net);
2183 #endif
2184 if (did_output && (stcb != NULL) ) {
2185 /*
2186 * Now we need to clean up the control chunk chain if an
2187 * ECNE is on it. It must be marked as UNSENT again so next
2188 * call will continue to send it until such time that we get
2189 * a CWR, to remove it. It is, however, less likely that we
2190 * will find a ecn echo on the chain though.
2191 */
2192 sctp_fix_ecn_echo(&stcb->asoc);
2193 }
2194 out:
2195 if (stcb != NULL) {
2196 SCTP_TCB_UNLOCK(stcb);
2197 } else if (inp != NULL) {
2198 SCTP_INP_WUNLOCK(inp);
2199 } else {
2200 SCTP_WQ_ADDR_UNLOCK();
2201 }
2202
2203 out_decr:
2204 #if defined(__Userspace__)
2205 if (upcall_socket != NULL) {
2206 if ((upcall_socket->so_upcall != NULL) &&
2207 (upcall_socket->so_error != 0)) {
2208 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
2209 }
2210 ACCEPT_LOCK();
2211 SOCK_LOCK(upcall_socket);
2212 sorele(upcall_socket);
2213 }
2214 #endif
2215 /* These reference counts were incremented in sctp_timer_start(). */
2216 if (inp != NULL) {
2217 SCTP_INP_DECR_REF(inp);
2218 }
2219 if ((stcb != NULL) && !released_asoc_reference) {
2220 atomic_add_int(&stcb->asoc.refcnt, -1);
2221 }
2222 if (net != NULL) {
2223 sctp_free_remote_addr(net);
2224 }
2225 out_no_decr:
2226 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2227 #if defined(__FreeBSD__) && !defined(__Userspace__)
2228 CURVNET_RESTORE();
2229 NET_EPOCH_EXIT(et);
2230 #endif
2231 }
2232
2233 /*-
2234 * The following table shows which parameters must be provided
2235 * when calling sctp_timer_start(). For parameters not being
2236 * provided, NULL must be used.
2237 *
2238 * |Name |inp |stcb|net |
2239 * |-----------------------------|----|----|----|
2240 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
2241 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
2242 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
2243 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
2244 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
2245 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
2246 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
2247 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2248 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
2249 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes |
2250 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
2251 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
2252 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes |
2253 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
2254 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
2255 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
2256 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
2257 *
2258 */
2259
2260 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)2261 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2262 struct sctp_nets *net)
2263 {
2264 struct sctp_timer *tmr;
2265 uint32_t to_ticks;
2266 uint32_t rndval, jitter;
2267
2268 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2269 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p",
2270 t_type, stcb, stcb->sctp_ep));
2271 tmr = NULL;
2272 to_ticks = 0;
2273 if (stcb != NULL) {
2274 SCTP_TCB_LOCK_ASSERT(stcb);
2275 } else if (inp != NULL) {
2276 SCTP_INP_WLOCK_ASSERT(inp);
2277 } else {
2278 SCTP_WQ_ADDR_LOCK_ASSERT();
2279 }
2280 if (stcb != NULL) {
2281 /* Don't restart timer on association that's about to be killed. */
2282 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2283 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2284 SCTPDBG(SCTP_DEBUG_TIMER2,
2285 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2286 t_type, inp, stcb, net);
2287 return;
2288 }
2289 /* Don't restart timer on net that's been removed. */
2290 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2291 SCTPDBG(SCTP_DEBUG_TIMER2,
2292 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2293 t_type, inp, stcb, net);
2294 return;
2295 }
2296 }
2297 switch (t_type) {
2298 case SCTP_TIMER_TYPE_SEND:
2299 /* Here we use the RTO timer. */
2300 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2301 #ifdef INVARIANTS
2302 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2303 t_type, inp, stcb, net);
2304 #else
2305 return;
2306 #endif
2307 }
2308 tmr = &net->rxt_timer;
2309 if (net->RTO == 0) {
2310 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2311 } else {
2312 to_ticks = sctp_msecs_to_ticks(net->RTO);
2313 }
2314 break;
2315 case SCTP_TIMER_TYPE_INIT:
2316 /*
2317 * Here we use the INIT timer default usually about 1
2318 * second.
2319 */
2320 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2321 #ifdef INVARIANTS
2322 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2323 t_type, inp, stcb, net);
2324 #else
2325 return;
2326 #endif
2327 }
2328 tmr = &net->rxt_timer;
2329 if (net->RTO == 0) {
2330 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2331 } else {
2332 to_ticks = sctp_msecs_to_ticks(net->RTO);
2333 }
2334 break;
2335 case SCTP_TIMER_TYPE_RECV:
2336 /*
2337 * Here we use the Delayed-Ack timer value from the inp,
2338 * ususually about 200ms.
2339 */
2340 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2341 #ifdef INVARIANTS
2342 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2343 t_type, inp, stcb, net);
2344 #else
2345 return;
2346 #endif
2347 }
2348 tmr = &stcb->asoc.dack_timer;
2349 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
2350 break;
2351 case SCTP_TIMER_TYPE_SHUTDOWN:
2352 /* Here we use the RTO of the destination. */
2353 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2354 #ifdef INVARIANTS
2355 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2356 t_type, inp, stcb, net);
2357 #else
2358 return;
2359 #endif
2360 }
2361 tmr = &net->rxt_timer;
2362 if (net->RTO == 0) {
2363 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2364 } else {
2365 to_ticks = sctp_msecs_to_ticks(net->RTO);
2366 }
2367 break;
2368 case SCTP_TIMER_TYPE_HEARTBEAT:
2369 /*
2370 * The net is used here so that we can add in the RTO. Even
2371 * though we use a different timer. We also add the HB timer
2372 * PLUS a random jitter.
2373 */
2374 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2375 #ifdef INVARIANTS
2376 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2377 t_type, inp, stcb, net);
2378 #else
2379 return;
2380 #endif
2381 }
2382 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2383 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2384 SCTPDBG(SCTP_DEBUG_TIMER2,
2385 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2386 t_type, inp, stcb, net);
2387 return;
2388 }
2389 tmr = &net->hb_timer;
2390 if (net->RTO == 0) {
2391 to_ticks = stcb->asoc.initial_rto;
2392 } else {
2393 to_ticks = net->RTO;
2394 }
2395 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2396 jitter = rndval % to_ticks;
2397 if (jitter >= (to_ticks >> 1)) {
2398 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2399 } else {
2400 to_ticks = to_ticks - jitter;
2401 }
2402 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2403 !(net->dest_state & SCTP_ADDR_PF)) {
2404 to_ticks += net->heart_beat_delay;
2405 }
2406 /*
2407 * Now we must convert the to_ticks that are now in
2408 * ms to ticks.
2409 */
2410 to_ticks = sctp_msecs_to_ticks(to_ticks);
2411 break;
2412 case SCTP_TIMER_TYPE_COOKIE:
2413 /*
2414 * Here we can use the RTO timer from the network since one
2415 * RTT was complete. If a retransmission happened then we will
2416 * be using the RTO initial value.
2417 */
2418 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2419 #ifdef INVARIANTS
2420 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2421 t_type, inp, stcb, net);
2422 #else
2423 return;
2424 #endif
2425 }
2426 tmr = &net->rxt_timer;
2427 if (net->RTO == 0) {
2428 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2429 } else {
2430 to_ticks = sctp_msecs_to_ticks(net->RTO);
2431 }
2432 break;
2433 case SCTP_TIMER_TYPE_NEWCOOKIE:
2434 /*
2435 * Nothing needed but the endpoint here ususually about 60
2436 * minutes.
2437 */
2438 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2439 #ifdef INVARIANTS
2440 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2441 t_type, inp, stcb, net);
2442 #else
2443 return;
2444 #endif
2445 }
2446 tmr = &inp->sctp_ep.signature_change;
2447 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2448 break;
2449 case SCTP_TIMER_TYPE_PATHMTURAISE:
2450 /*
2451 * Here we use the value found in the EP for PMTUD, ususually
2452 * about 10 minutes.
2453 */
2454 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2455 #ifdef INVARIANTS
2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2457 t_type, inp, stcb, net);
2458 #else
2459 return;
2460 #endif
2461 }
2462 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2463 SCTPDBG(SCTP_DEBUG_TIMER2,
2464 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2465 t_type, inp, stcb, net);
2466 return;
2467 }
2468 tmr = &net->pmtu_timer;
2469 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2470 break;
2471 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2472 /* Here we use the RTO of the destination. */
2473 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2474 #ifdef INVARIANTS
2475 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2476 t_type, inp, stcb, net);
2477 #else
2478 return;
2479 #endif
2480 }
2481 tmr = &net->rxt_timer;
2482 if (net->RTO == 0) {
2483 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2484 } else {
2485 to_ticks = sctp_msecs_to_ticks(net->RTO);
2486 }
2487 break;
2488 case SCTP_TIMER_TYPE_ASCONF:
2489 /*
2490 * Here the timer comes from the stcb but its value is from
2491 * the net's RTO.
2492 */
2493 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2494 #ifdef INVARIANTS
2495 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2496 t_type, inp, stcb, net);
2497 #else
2498 return;
2499 #endif
2500 }
2501 tmr = &stcb->asoc.asconf_timer;
2502 if (net->RTO == 0) {
2503 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2504 } else {
2505 to_ticks = sctp_msecs_to_ticks(net->RTO);
2506 }
2507 break;
2508 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2509 /*
2510 * Here we use the endpoints shutdown guard timer usually
2511 * about 3 minutes.
2512 */
2513 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2514 #ifdef INVARIANTS
2515 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2516 t_type, inp, stcb, net);
2517 #else
2518 return;
2519 #endif
2520 }
2521 tmr = &stcb->asoc.shut_guard_timer;
2522 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2523 if (stcb->asoc.maxrto < UINT32_MAX / 5) {
2524 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
2525 } else {
2526 to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
2527 }
2528 } else {
2529 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2530 }
2531 break;
2532 case SCTP_TIMER_TYPE_AUTOCLOSE:
2533 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2534 #ifdef INVARIANTS
2535 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2536 t_type, inp, stcb, net);
2537 #else
2538 return;
2539 #endif
2540 }
2541 tmr = &stcb->asoc.autoclose_timer;
2542 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2543 break;
2544 case SCTP_TIMER_TYPE_STRRESET:
2545 /*
2546 * Here the timer comes from the stcb but its value is from
2547 * the net's RTO.
2548 */
2549 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2550 #ifdef INVARIANTS
2551 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2552 t_type, inp, stcb, net);
2553 #else
2554 return;
2555 #endif
2556 }
2557 tmr = &stcb->asoc.strreset_timer;
2558 if (net->RTO == 0) {
2559 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2560 } else {
2561 to_ticks = sctp_msecs_to_ticks(net->RTO);
2562 }
2563 break;
2564 case SCTP_TIMER_TYPE_INPKILL:
2565 /*
2566 * The inp is setup to die. We re-use the signature_chage
2567 * timer since that has stopped and we are in the GONE
2568 * state.
2569 */
2570 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2571 #ifdef INVARIANTS
2572 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2573 t_type, inp, stcb, net);
2574 #else
2575 return;
2576 #endif
2577 }
2578 tmr = &inp->sctp_ep.signature_change;
2579 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
2580 break;
2581 case SCTP_TIMER_TYPE_ASOCKILL:
2582 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2583 #ifdef INVARIANTS
2584 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2585 t_type, inp, stcb, net);
2586 #else
2587 return;
2588 #endif
2589 }
2590 tmr = &stcb->asoc.strreset_timer;
2591 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
2592 break;
2593 case SCTP_TIMER_TYPE_ADDR_WQ:
2594 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2595 #ifdef INVARIANTS
2596 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2597 t_type, inp, stcb, net);
2598 #else
2599 return;
2600 #endif
2601 }
2602 /* Only 1 tick away :-) */
2603 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2604 to_ticks = SCTP_ADDRESS_TICK_DELAY;
2605 break;
2606 case SCTP_TIMER_TYPE_PRIM_DELETED:
2607 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2608 #ifdef INVARIANTS
2609 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2610 t_type, inp, stcb, net);
2611 #else
2612 return;
2613 #endif
2614 }
2615 tmr = &stcb->asoc.delete_prim_timer;
2616 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2617 break;
2618 default:
2619 #ifdef INVARIANTS
2620 panic("Unknown timer type %d", t_type);
2621 #else
2622 return;
2623 #endif
2624 }
2625 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2626 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2627 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2628 /*
2629 * We do NOT allow you to have it already running. If it is,
2630 * we leave the current one up unchanged.
2631 */
2632 SCTPDBG(SCTP_DEBUG_TIMER2,
2633 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2634 t_type, inp, stcb, net);
2635 return;
2636 }
2637 /* At this point we can proceed. */
2638 if (t_type == SCTP_TIMER_TYPE_SEND) {
2639 stcb->asoc.num_send_timers_up++;
2640 }
2641 tmr->stopped_from = 0;
2642 tmr->type = t_type;
2643 tmr->ep = (void *)inp;
2644 tmr->tcb = (void *)stcb;
2645 if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2646 tmr->net = NULL;
2647 } else {
2648 tmr->net = (void *)net;
2649 }
2650 tmr->self = (void *)tmr;
2651 #if defined(__FreeBSD__) && !defined(__Userspace__)
2652 tmr->vnet = (void *)curvnet;
2653 #endif
2654 tmr->ticks = sctp_get_tick_count();
2655 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2656 SCTPDBG(SCTP_DEBUG_TIMER2,
2657 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2658 t_type, to_ticks, inp, stcb, net);
2659 /*
2660 * If this is a newly scheduled callout, as opposed to a
2661 * rescheduled one, increment relevant reference counts.
2662 */
2663 if (tmr->ep != NULL) {
2664 SCTP_INP_INCR_REF(inp);
2665 }
2666 if (tmr->tcb != NULL) {
2667 atomic_add_int(&stcb->asoc.refcnt, 1);
2668 }
2669 if (tmr->net != NULL) {
2670 atomic_add_int(&net->ref_count, 1);
2671 }
2672 } else {
2673 /*
2674 * This should not happen, since we checked for pending
2675 * above.
2676 */
2677 SCTPDBG(SCTP_DEBUG_TIMER2,
2678 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2679 t_type, to_ticks, inp, stcb, net);
2680 }
2681 return;
2682 }
2683
2684 /*-
2685 * The following table shows which parameters must be provided
2686 * when calling sctp_timer_stop(). For parameters not being
2687 * provided, NULL must be used.
2688 *
2689 * |Name |inp |stcb|net |
2690 * |-----------------------------|----|----|----|
2691 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
2692 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
2693 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
2694 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
2695 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
2696 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
2697 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
2698 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2699 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
2700 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No |
2701 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
2702 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
2703 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No |
2704 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
2705 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
2706 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
2707 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
2708 *
2709 */
2710
2711 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2712 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2713 struct sctp_nets *net, uint32_t from)
2714 {
2715 struct sctp_timer *tmr;
2716
2717 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2718 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p",
2719 t_type, stcb, stcb->sctp_ep));
2720 if (stcb != NULL) {
2721 SCTP_TCB_LOCK_ASSERT(stcb);
2722 } else if (inp != NULL) {
2723 SCTP_INP_WLOCK_ASSERT(inp);
2724 } else {
2725 SCTP_WQ_ADDR_LOCK_ASSERT();
2726 }
2727 tmr = NULL;
2728 switch (t_type) {
2729 case SCTP_TIMER_TYPE_SEND:
2730 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2731 #ifdef INVARIANTS
2732 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2733 t_type, inp, stcb, net);
2734 #else
2735 return;
2736 #endif
2737 }
2738 tmr = &net->rxt_timer;
2739 break;
2740 case SCTP_TIMER_TYPE_INIT:
2741 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2742 #ifdef INVARIANTS
2743 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2744 t_type, inp, stcb, net);
2745 #else
2746 return;
2747 #endif
2748 }
2749 tmr = &net->rxt_timer;
2750 break;
2751 case SCTP_TIMER_TYPE_RECV:
2752 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2753 #ifdef INVARIANTS
2754 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2755 t_type, inp, stcb, net);
2756 #else
2757 return;
2758 #endif
2759 }
2760 tmr = &stcb->asoc.dack_timer;
2761 break;
2762 case SCTP_TIMER_TYPE_SHUTDOWN:
2763 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2764 #ifdef INVARIANTS
2765 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2766 t_type, inp, stcb, net);
2767 #else
2768 return;
2769 #endif
2770 }
2771 tmr = &net->rxt_timer;
2772 break;
2773 case SCTP_TIMER_TYPE_HEARTBEAT:
2774 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2775 #ifdef INVARIANTS
2776 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2777 t_type, inp, stcb, net);
2778 #else
2779 return;
2780 #endif
2781 }
2782 tmr = &net->hb_timer;
2783 break;
2784 case SCTP_TIMER_TYPE_COOKIE:
2785 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2786 #ifdef INVARIANTS
2787 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2788 t_type, inp, stcb, net);
2789 #else
2790 return;
2791 #endif
2792 }
2793 tmr = &net->rxt_timer;
2794 break;
2795 case SCTP_TIMER_TYPE_NEWCOOKIE:
2796 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2797 #ifdef INVARIANTS
2798 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2799 t_type, inp, stcb, net);
2800 #else
2801 return;
2802 #endif
2803 }
2804 tmr = &inp->sctp_ep.signature_change;
2805 break;
2806 case SCTP_TIMER_TYPE_PATHMTURAISE:
2807 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2808 #ifdef INVARIANTS
2809 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2810 t_type, inp, stcb, net);
2811 #else
2812 return;
2813 #endif
2814 }
2815 tmr = &net->pmtu_timer;
2816 break;
2817 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2818 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2819 #ifdef INVARIANTS
2820 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2821 t_type, inp, stcb, net);
2822 #else
2823 return;
2824 #endif
2825 }
2826 tmr = &net->rxt_timer;
2827 break;
2828 case SCTP_TIMER_TYPE_ASCONF:
2829 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2830 #ifdef INVARIANTS
2831 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2832 t_type, inp, stcb, net);
2833 #else
2834 return;
2835 #endif
2836 }
2837 tmr = &stcb->asoc.asconf_timer;
2838 break;
2839 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2840 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2841 #ifdef INVARIANTS
2842 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2843 t_type, inp, stcb, net);
2844 #else
2845 return;
2846 #endif
2847 }
2848 tmr = &stcb->asoc.shut_guard_timer;
2849 break;
2850 case SCTP_TIMER_TYPE_AUTOCLOSE:
2851 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2852 #ifdef INVARIANTS
2853 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2854 t_type, inp, stcb, net);
2855 #else
2856 return;
2857 #endif
2858 }
2859 tmr = &stcb->asoc.autoclose_timer;
2860 break;
2861 case SCTP_TIMER_TYPE_STRRESET:
2862 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2863 #ifdef INVARIANTS
2864 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2865 t_type, inp, stcb, net);
2866 #else
2867 return;
2868 #endif
2869 }
2870 tmr = &stcb->asoc.strreset_timer;
2871 break;
2872 case SCTP_TIMER_TYPE_INPKILL:
2873 /*
2874 * The inp is setup to die. We re-use the signature_chage
2875 * timer since that has stopped and we are in the GONE
2876 * state.
2877 */
2878 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2879 #ifdef INVARIANTS
2880 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2881 t_type, inp, stcb, net);
2882 #else
2883 return;
2884 #endif
2885 }
2886 tmr = &inp->sctp_ep.signature_change;
2887 break;
2888 case SCTP_TIMER_TYPE_ASOCKILL:
2889 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2890 #ifdef INVARIANTS
2891 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2892 t_type, inp, stcb, net);
2893 #else
2894 return;
2895 #endif
2896 }
2897 tmr = &stcb->asoc.strreset_timer;
2898 break;
2899 case SCTP_TIMER_TYPE_ADDR_WQ:
2900 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2901 #ifdef INVARIANTS
2902 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2903 t_type, inp, stcb, net);
2904 #else
2905 return;
2906 #endif
2907 }
2908 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2909 break;
2910 case SCTP_TIMER_TYPE_PRIM_DELETED:
2911 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2912 #ifdef INVARIANTS
2913 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2914 t_type, inp, stcb, net);
2915 #else
2916 return;
2917 #endif
2918 }
2919 tmr = &stcb->asoc.delete_prim_timer;
2920 break;
2921 default:
2922 #ifdef INVARIANTS
2923 panic("Unknown timer type %d", t_type);
2924 #else
2925 return;
2926 #endif
2927 }
2928 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2929 if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2930 (tmr->type != t_type)) {
2931 /*
2932 * Ok we have a timer that is under joint use. Cookie timer
2933 * per chance with the SEND timer. We therefore are NOT
2934 * running the timer that the caller wants stopped. So just
2935 * return.
2936 */
2937 SCTPDBG(SCTP_DEBUG_TIMER2,
2938 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2939 t_type, inp, stcb, net);
2940 return;
2941 }
2942 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2943 stcb->asoc.num_send_timers_up--;
2944 if (stcb->asoc.num_send_timers_up < 0) {
2945 stcb->asoc.num_send_timers_up = 0;
2946 }
2947 }
2948 tmr->self = NULL;
2949 tmr->stopped_from = from;
2950 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2951 KASSERT(tmr->ep == inp,
2952 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2953 t_type, inp, tmr->ep));
2954 KASSERT(tmr->tcb == stcb,
2955 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2956 t_type, stcb, tmr->tcb));
2957 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2958 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2959 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2960 t_type, net, tmr->net));
2961 SCTPDBG(SCTP_DEBUG_TIMER2,
2962 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2963 t_type, inp, stcb, net);
2964 /*
2965 * If the timer was actually stopped, decrement reference counts
2966 * that were incremented in sctp_timer_start().
2967 */
2968 if (tmr->ep != NULL) {
2969 SCTP_INP_DECR_REF(inp);
2970 tmr->ep = NULL;
2971 }
2972 if (tmr->tcb != NULL) {
2973 atomic_add_int(&stcb->asoc.refcnt, -1);
2974 tmr->tcb = NULL;
2975 }
2976 if (tmr->net != NULL) {
2977 /*
2978 * Can't use net, since it doesn't work for
2979 * SCTP_TIMER_TYPE_ASCONF.
2980 */
2981 sctp_free_remote_addr((struct sctp_nets *)tmr->net);
2982 tmr->net = NULL;
2983 }
2984 } else {
2985 SCTPDBG(SCTP_DEBUG_TIMER2,
2986 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2987 t_type, inp, stcb, net);
2988 }
2989 return;
2990 }
2991
2992 uint32_t
sctp_calculate_len(struct mbuf * m)2993 sctp_calculate_len(struct mbuf *m)
2994 {
2995 uint32_t tlen = 0;
2996 struct mbuf *at;
2997
2998 at = m;
2999 while (at) {
3000 tlen += SCTP_BUF_LEN(at);
3001 at = SCTP_BUF_NEXT(at);
3002 }
3003 return (tlen);
3004 }
3005
3006 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)3007 sctp_mtu_size_reset(struct sctp_inpcb *inp,
3008 struct sctp_association *asoc, uint32_t mtu)
3009 {
3010 /*
3011 * Reset the P-MTU size on this association, this involves changing
3012 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
3013 * allow the DF flag to be cleared.
3014 */
3015 struct sctp_tmit_chunk *chk;
3016 unsigned int eff_mtu, ovh;
3017
3018 asoc->smallest_mtu = mtu;
3019 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3020 ovh = SCTP_MIN_OVERHEAD;
3021 } else {
3022 ovh = SCTP_MIN_V4_OVERHEAD;
3023 }
3024 eff_mtu = mtu - ovh;
3025 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
3026 if (chk->send_size > eff_mtu) {
3027 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
3028 }
3029 }
3030 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3031 if (chk->send_size > eff_mtu) {
3032 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
3033 }
3034 }
3035 }
3036
3037
3038 /*
3039 * Given an association and starting time of the current RTT period, update
3040 * RTO in number of msecs. net should point to the current network.
3041 * Return 1, if an RTO update was performed, return 0 if no update was
3042 * performed due to invalid starting point.
3043 */
3044
3045 int
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * old,int rtt_from_sack)3046 sctp_calculate_rto(struct sctp_tcb *stcb,
3047 struct sctp_association *asoc,
3048 struct sctp_nets *net,
3049 struct timeval *old,
3050 int rtt_from_sack)
3051 {
3052 struct timeval now;
3053 uint64_t rtt_us; /* RTT in us */
3054 int32_t rtt; /* RTT in ms */
3055 uint32_t new_rto;
3056 int first_measure = 0;
3057
3058 /************************/
3059 /* 1. calculate new RTT */
3060 /************************/
3061 /* get the current time */
3062 if (stcb->asoc.use_precise_time) {
3063 (void)SCTP_GETPTIME_TIMEVAL(&now);
3064 } else {
3065 (void)SCTP_GETTIME_TIMEVAL(&now);
3066 }
3067 if ((old->tv_sec > now.tv_sec) ||
3068 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) {
3069 /* The starting point is in the future. */
3070 return (0);
3071 }
3072 timevalsub(&now, old);
3073 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
3074 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
3075 /* The RTT is larger than a sane value. */
3076 return (0);
3077 }
3078 /* store the current RTT in us */
3079 net->rtt = rtt_us;
3080 /* compute rtt in ms */
3081 rtt = (int32_t)(net->rtt / 1000);
3082 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
3083 /* Tell the CC module that a new update has just occurred from a sack */
3084 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
3085 }
3086 /* Do we need to determine the lan? We do this only
3087 * on sacks i.e. RTT being determined from data not
3088 * non-data (HB/INIT->INITACK).
3089 */
3090 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
3091 (net->lan_type == SCTP_LAN_UNKNOWN)) {
3092 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
3093 net->lan_type = SCTP_LAN_INTERNET;
3094 } else {
3095 net->lan_type = SCTP_LAN_LOCAL;
3096 }
3097 }
3098
3099 /***************************/
3100 /* 2. update RTTVAR & SRTT */
3101 /***************************/
3102 /*-
3103 * Compute the scaled average lastsa and the
3104 * scaled variance lastsv as described in van Jacobson
3105 * Paper "Congestion Avoidance and Control", Annex A.
3106 *
3107 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
3108 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
3109 */
3110 if (net->RTO_measured) {
3111 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
3112 net->lastsa += rtt;
3113 if (rtt < 0) {
3114 rtt = -rtt;
3115 }
3116 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
3117 net->lastsv += rtt;
3118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
3119 rto_logging(net, SCTP_LOG_RTTVAR);
3120 }
3121 } else {
3122 /* First RTO measurment */
3123 net->RTO_measured = 1;
3124 first_measure = 1;
3125 net->lastsa = rtt << SCTP_RTT_SHIFT;
3126 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
3127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
3128 rto_logging(net, SCTP_LOG_INITIAL_RTT);
3129 }
3130 }
3131 if (net->lastsv == 0) {
3132 net->lastsv = SCTP_CLOCK_GRANULARITY;
3133 }
3134 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3135 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
3136 (stcb->asoc.sat_network_lockout == 0)) {
3137 stcb->asoc.sat_network = 1;
3138 } else if ((!first_measure) && stcb->asoc.sat_network) {
3139 stcb->asoc.sat_network = 0;
3140 stcb->asoc.sat_network_lockout = 1;
3141 }
3142 /* bound it, per C6/C7 in Section 5.3.1 */
3143 if (new_rto < stcb->asoc.minrto) {
3144 new_rto = stcb->asoc.minrto;
3145 }
3146 if (new_rto > stcb->asoc.maxrto) {
3147 new_rto = stcb->asoc.maxrto;
3148 }
3149 net->RTO = new_rto;
3150 return (1);
3151 }
3152
3153 /*
3154 * return a pointer to a contiguous piece of data from the given mbuf chain
3155 * starting at 'off' for 'len' bytes. If the desired piece spans more than
3156 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
3157 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
3158 */
3159 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)3160 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
3161 {
3162 uint32_t count;
3163 uint8_t *ptr;
3164
3165 ptr = in_ptr;
3166 if ((off < 0) || (len <= 0))
3167 return (NULL);
3168
3169 /* find the desired start location */
3170 while ((m != NULL) && (off > 0)) {
3171 if (off < SCTP_BUF_LEN(m))
3172 break;
3173 off -= SCTP_BUF_LEN(m);
3174 m = SCTP_BUF_NEXT(m);
3175 }
3176 if (m == NULL)
3177 return (NULL);
3178
3179 /* is the current mbuf large enough (eg. contiguous)? */
3180 if ((SCTP_BUF_LEN(m) - off) >= len) {
3181 return (mtod(m, caddr_t) + off);
3182 } else {
3183 /* else, it spans more than one mbuf, so save a temp copy... */
3184 while ((m != NULL) && (len > 0)) {
3185 count = min(SCTP_BUF_LEN(m) - off, len);
3186 memcpy(ptr, mtod(m, caddr_t) + off, count);
3187 len -= count;
3188 ptr += count;
3189 off = 0;
3190 m = SCTP_BUF_NEXT(m);
3191 }
3192 if ((m == NULL) && (len > 0))
3193 return (NULL);
3194 else
3195 return ((caddr_t)in_ptr);
3196 }
3197 }
3198
3199
3200
3201 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)3202 sctp_get_next_param(struct mbuf *m,
3203 int offset,
3204 struct sctp_paramhdr *pull,
3205 int pull_limit)
3206 {
3207 /* This just provides a typed signature to Peter's Pull routine */
3208 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
3209 (uint8_t *) pull));
3210 }
3211
3212
3213 struct mbuf *
sctp_add_pad_tombuf(struct mbuf * m,int padlen)3214 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3215 {
3216 struct mbuf *m_last;
3217 caddr_t dp;
3218
3219 if (padlen > 3) {
3220 return (NULL);
3221 }
3222 if (padlen <= M_TRAILINGSPACE(m)) {
3223 /*
3224 * The easy way. We hope the majority of the time we hit
3225 * here :)
3226 */
3227 m_last = m;
3228 } else {
3229 /* Hard way we must grow the mbuf chain */
3230 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3231 if (m_last == NULL) {
3232 return (NULL);
3233 }
3234 SCTP_BUF_LEN(m_last) = 0;
3235 SCTP_BUF_NEXT(m_last) = NULL;
3236 SCTP_BUF_NEXT(m) = m_last;
3237 }
3238 dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
3239 SCTP_BUF_LEN(m_last) += padlen;
3240 memset(dp, 0, padlen);
3241 return (m_last);
3242 }
3243
3244 struct mbuf *
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)3245 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3246 {
3247 /* find the last mbuf in chain and pad it */
3248 struct mbuf *m_at;
3249
3250 if (last_mbuf != NULL) {
3251 return (sctp_add_pad_tombuf(last_mbuf, padval));
3252 } else {
3253 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3254 if (SCTP_BUF_NEXT(m_at) == NULL) {
3255 return (sctp_add_pad_tombuf(m_at, padval));
3256 }
3257 }
3258 }
3259 return (NULL);
3260 }
3261
3262 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked)3263 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3264 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked)
3265 {
3266 struct mbuf *m_notify;
3267 struct sctp_assoc_change *sac;
3268 struct sctp_queued_to_read *control;
3269 unsigned int notif_len;
3270 uint16_t abort_len;
3271 unsigned int i;
3272 #if defined(__APPLE__) && !defined(__Userspace__)
3273 struct socket *so;
3274 #endif
3275
3276 if (stcb == NULL) {
3277 return;
3278 }
3279 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3280 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3281 if (abort != NULL) {
3282 abort_len = ntohs(abort->ch.chunk_length);
3283 /*
3284 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3285 * contiguous.
3286 */
3287 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3288 abort_len = SCTP_CHUNK_BUFFER_SIZE;
3289 }
3290 } else {
3291 abort_len = 0;
3292 }
3293 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3294 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3295 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3296 notif_len += abort_len;
3297 }
3298 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3299 if (m_notify == NULL) {
3300 /* Retry with smaller value. */
3301 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3302 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3303 if (m_notify == NULL) {
3304 goto set_error;
3305 }
3306 }
3307 SCTP_BUF_NEXT(m_notify) = NULL;
3308 sac = mtod(m_notify, struct sctp_assoc_change *);
3309 memset(sac, 0, notif_len);
3310 sac->sac_type = SCTP_ASSOC_CHANGE;
3311 sac->sac_flags = 0;
3312 sac->sac_length = sizeof(struct sctp_assoc_change);
3313 sac->sac_state = state;
3314 sac->sac_error = error;
3315 /* XXX verify these stream counts */
3316 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3317 sac->sac_inbound_streams = stcb->asoc.streamincnt;
3318 sac->sac_assoc_id = sctp_get_associd(stcb);
3319 if (notif_len > sizeof(struct sctp_assoc_change)) {
3320 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3321 i = 0;
3322 if (stcb->asoc.prsctp_supported == 1) {
3323 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3324 }
3325 if (stcb->asoc.auth_supported == 1) {
3326 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3327 }
3328 if (stcb->asoc.asconf_supported == 1) {
3329 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3330 }
3331 if (stcb->asoc.idata_supported == 1) {
3332 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3333 }
3334 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3335 if (stcb->asoc.reconfig_supported == 1) {
3336 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3337 }
3338 sac->sac_length += i;
3339 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3340 memcpy(sac->sac_info, abort, abort_len);
3341 sac->sac_length += abort_len;
3342 }
3343 }
3344 SCTP_BUF_LEN(m_notify) = sac->sac_length;
3345 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3346 0, 0, stcb->asoc.context, 0, 0, 0,
3347 m_notify);
3348 if (control != NULL) {
3349 control->length = SCTP_BUF_LEN(m_notify);
3350 control->spec_flags = M_NOTIFICATION;
3351 /* not that we need this */
3352 control->tail_mbuf = m_notify;
3353 sctp_add_to_readq(stcb->sctp_ep, stcb,
3354 control,
3355 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3356 so_locked);
3357 } else {
3358 sctp_m_freem(m_notify);
3359 }
3360 }
3361 /*
3362 * For 1-to-1 style sockets, we send up and error when an ABORT
3363 * comes in.
3364 */
3365 set_error:
3366 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3367 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3368 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3369 SOCK_LOCK(stcb->sctp_socket);
3370 if (from_peer) {
3371 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3372 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3373 stcb->sctp_socket->so_error = ECONNREFUSED;
3374 } else {
3375 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3376 stcb->sctp_socket->so_error = ECONNRESET;
3377 }
3378 } else {
3379 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3380 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3381 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3382 stcb->sctp_socket->so_error = ETIMEDOUT;
3383 } else {
3384 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3385 stcb->sctp_socket->so_error = ECONNABORTED;
3386 }
3387 }
3388 SOCK_UNLOCK(stcb->sctp_socket);
3389 }
3390 /* Wake ANY sleepers */
3391 #if defined(__APPLE__) && !defined(__Userspace__)
3392 so = SCTP_INP_SO(stcb->sctp_ep);
3393 if (!so_locked) {
3394 atomic_add_int(&stcb->asoc.refcnt, 1);
3395 SCTP_TCB_UNLOCK(stcb);
3396 SCTP_SOCKET_LOCK(so, 1);
3397 SCTP_TCB_LOCK(stcb);
3398 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3399 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3400 SCTP_SOCKET_UNLOCK(so, 1);
3401 return;
3402 }
3403 }
3404 #endif
3405 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3406 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3407 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3408 socantrcvmore(stcb->sctp_socket);
3409 }
3410 sorwakeup(stcb->sctp_socket);
3411 sowwakeup(stcb->sctp_socket);
3412 #if defined(__APPLE__) && !defined(__Userspace__)
3413 if (!so_locked) {
3414 SCTP_SOCKET_UNLOCK(so, 1);
3415 }
3416 #endif
3417 }
3418
3419 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error,int so_locked)3420 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3421 struct sockaddr *sa, uint32_t error, int so_locked)
3422 {
3423 struct mbuf *m_notify;
3424 struct sctp_paddr_change *spc;
3425 struct sctp_queued_to_read *control;
3426
3427 if ((stcb == NULL) ||
3428 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3429 /* event not enabled */
3430 return;
3431 }
3432 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3433 if (m_notify == NULL)
3434 return;
3435 SCTP_BUF_LEN(m_notify) = 0;
3436 spc = mtod(m_notify, struct sctp_paddr_change *);
3437 memset(spc, 0, sizeof(struct sctp_paddr_change));
3438 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3439 spc->spc_flags = 0;
3440 spc->spc_length = sizeof(struct sctp_paddr_change);
3441 switch (sa->sa_family) {
3442 #ifdef INET
3443 case AF_INET:
3444 #ifdef INET6
3445 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3446 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3447 (struct sockaddr_in6 *)&spc->spc_aaddr);
3448 } else {
3449 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3450 }
3451 #else
3452 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3453 #endif
3454 break;
3455 #endif
3456 #ifdef INET6
3457 case AF_INET6:
3458 {
3459 #ifdef SCTP_EMBEDDED_V6_SCOPE
3460 struct sockaddr_in6 *sin6;
3461 #endif /* SCTP_EMBEDDED_V6_SCOPE */
3462 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3463
3464 #ifdef SCTP_EMBEDDED_V6_SCOPE
3465 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3466 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3467 if (sin6->sin6_scope_id == 0) {
3468 /* recover scope_id for user */
3469 #ifdef SCTP_KAME
3470 (void)sa6_recoverscope(sin6);
3471 #else
3472 (void)in6_recoverscope(sin6, &sin6->sin6_addr,
3473 NULL);
3474 #endif
3475 } else {
3476 /* clear embedded scope_id for user */
3477 in6_clearscope(&sin6->sin6_addr);
3478 }
3479 }
3480 #endif /* SCTP_EMBEDDED_V6_SCOPE */
3481 break;
3482 }
3483 #endif
3484 #if defined(__Userspace__)
3485 case AF_CONN:
3486 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
3487 break;
3488 #endif
3489 default:
3490 /* TSNH */
3491 break;
3492 }
3493 spc->spc_state = state;
3494 spc->spc_error = error;
3495 spc->spc_assoc_id = sctp_get_associd(stcb);
3496
3497 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3498 SCTP_BUF_NEXT(m_notify) = NULL;
3499
3500 /* append to socket */
3501 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3502 0, 0, stcb->asoc.context, 0, 0, 0,
3503 m_notify);
3504 if (control == NULL) {
3505 /* no memory */
3506 sctp_m_freem(m_notify);
3507 return;
3508 }
3509 control->length = SCTP_BUF_LEN(m_notify);
3510 control->spec_flags = M_NOTIFICATION;
3511 /* not that we need this */
3512 control->tail_mbuf = m_notify;
3513 sctp_add_to_readq(stcb->sctp_ep, stcb,
3514 control,
3515 &stcb->sctp_socket->so_rcv, 1,
3516 SCTP_READ_LOCK_NOT_HELD,
3517 so_locked);
3518 }
3519
3520
3521 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked)3522 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3523 struct sctp_tmit_chunk *chk, int so_locked)
3524 {
3525 struct mbuf *m_notify;
3526 struct sctp_send_failed *ssf;
3527 struct sctp_send_failed_event *ssfe;
3528 struct sctp_queued_to_read *control;
3529 struct sctp_chunkhdr *chkhdr;
3530 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3531
3532 if ((stcb == NULL) ||
3533 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3534 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3535 /* event not enabled */
3536 return;
3537 }
3538
3539 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3540 notifhdr_len = sizeof(struct sctp_send_failed_event);
3541 } else {
3542 notifhdr_len = sizeof(struct sctp_send_failed);
3543 }
3544 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3545 if (m_notify == NULL)
3546 /* no space left */
3547 return;
3548 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3549 if (stcb->asoc.idata_supported) {
3550 chkhdr_len = sizeof(struct sctp_idata_chunk);
3551 } else {
3552 chkhdr_len = sizeof(struct sctp_data_chunk);
3553 }
3554 /* Use some defaults in case we can't access the chunk header */
3555 if (chk->send_size >= chkhdr_len) {
3556 payload_len = chk->send_size - chkhdr_len;
3557 } else {
3558 payload_len = 0;
3559 }
3560 padding_len = 0;
3561 if (chk->data != NULL) {
3562 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3563 if (chkhdr != NULL) {
3564 chk_len = ntohs(chkhdr->chunk_length);
3565 if ((chk_len >= chkhdr_len) &&
3566 (chk->send_size >= chk_len) &&
3567 (chk->send_size - chk_len < 4)) {
3568 padding_len = chk->send_size - chk_len;
3569 payload_len = chk->send_size - chkhdr_len - padding_len;
3570 }
3571 }
3572 }
3573 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3574 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3575 memset(ssfe, 0, notifhdr_len);
3576 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3577 if (sent) {
3578 ssfe->ssfe_flags = SCTP_DATA_SENT;
3579 } else {
3580 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3581 }
3582 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3583 ssfe->ssfe_error = error;
3584 /* not exactly what the user sent in, but should be close :) */
3585 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3586 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3587 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3588 ssfe->ssfe_info.snd_context = chk->rec.data.context;
3589 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3590 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3591 } else {
3592 ssf = mtod(m_notify, struct sctp_send_failed *);
3593 memset(ssf, 0, notifhdr_len);
3594 ssf->ssf_type = SCTP_SEND_FAILED;
3595 if (sent) {
3596 ssf->ssf_flags = SCTP_DATA_SENT;
3597 } else {
3598 ssf->ssf_flags = SCTP_DATA_UNSENT;
3599 }
3600 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3601 ssf->ssf_error = error;
3602 /* not exactly what the user sent in, but should be close :) */
3603 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3604 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3605 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3606 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3607 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3608 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3609 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3610 }
3611 if (chk->data != NULL) {
3612 /* Trim off the sctp chunk header (it should be there) */
3613 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3614 m_adj(chk->data, chkhdr_len);
3615 m_adj(chk->data, -padding_len);
3616 sctp_mbuf_crush(chk->data);
3617 chk->send_size -= (chkhdr_len + padding_len);
3618 }
3619 }
3620 SCTP_BUF_NEXT(m_notify) = chk->data;
3621 /* Steal off the mbuf */
3622 chk->data = NULL;
3623 /*
3624 * For this case, we check the actual socket buffer, since the assoc
3625 * is going away we don't want to overfill the socket buffer for a
3626 * non-reader
3627 */
3628 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3629 sctp_m_freem(m_notify);
3630 return;
3631 }
3632 /* append to socket */
3633 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3634 0, 0, stcb->asoc.context, 0, 0, 0,
3635 m_notify);
3636 if (control == NULL) {
3637 /* no memory */
3638 sctp_m_freem(m_notify);
3639 return;
3640 }
3641 control->length = SCTP_BUF_LEN(m_notify);
3642 control->spec_flags = M_NOTIFICATION;
3643 /* not that we need this */
3644 control->tail_mbuf = m_notify;
3645 sctp_add_to_readq(stcb->sctp_ep, stcb,
3646 control,
3647 &stcb->sctp_socket->so_rcv, 1,
3648 SCTP_READ_LOCK_NOT_HELD,
3649 so_locked);
3650 }
3651
3652
3653 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked)3654 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3655 struct sctp_stream_queue_pending *sp, int so_locked)
3656 {
3657 struct mbuf *m_notify;
3658 struct sctp_send_failed *ssf;
3659 struct sctp_send_failed_event *ssfe;
3660 struct sctp_queued_to_read *control;
3661 int notifhdr_len;
3662
3663 if ((stcb == NULL) ||
3664 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3665 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3666 /* event not enabled */
3667 return;
3668 }
3669 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3670 notifhdr_len = sizeof(struct sctp_send_failed_event);
3671 } else {
3672 notifhdr_len = sizeof(struct sctp_send_failed);
3673 }
3674 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3675 if (m_notify == NULL) {
3676 /* no space left */
3677 return;
3678 }
3679 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3680 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3681 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3682 memset(ssfe, 0, notifhdr_len);
3683 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3684 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3685 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3686 ssfe->ssfe_error = error;
3687 /* not exactly what the user sent in, but should be close :) */
3688 ssfe->ssfe_info.snd_sid = sp->sid;
3689 if (sp->some_taken) {
3690 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3691 } else {
3692 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3693 }
3694 ssfe->ssfe_info.snd_ppid = sp->ppid;
3695 ssfe->ssfe_info.snd_context = sp->context;
3696 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3697 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3698 } else {
3699 ssf = mtod(m_notify, struct sctp_send_failed *);
3700 memset(ssf, 0, notifhdr_len);
3701 ssf->ssf_type = SCTP_SEND_FAILED;
3702 ssf->ssf_flags = SCTP_DATA_UNSENT;
3703 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3704 ssf->ssf_error = error;
3705 /* not exactly what the user sent in, but should be close :) */
3706 ssf->ssf_info.sinfo_stream = sp->sid;
3707 ssf->ssf_info.sinfo_ssn = 0;
3708 if (sp->some_taken) {
3709 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3710 } else {
3711 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3712 }
3713 ssf->ssf_info.sinfo_ppid = sp->ppid;
3714 ssf->ssf_info.sinfo_context = sp->context;
3715 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3716 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3717 }
3718 SCTP_BUF_NEXT(m_notify) = sp->data;
3719
3720 /* Steal off the mbuf */
3721 sp->data = NULL;
3722 /*
3723 * For this case, we check the actual socket buffer, since the assoc
3724 * is going away we don't want to overfill the socket buffer for a
3725 * non-reader
3726 */
3727 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3728 sctp_m_freem(m_notify);
3729 return;
3730 }
3731 /* append to socket */
3732 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3733 0, 0, stcb->asoc.context, 0, 0, 0,
3734 m_notify);
3735 if (control == NULL) {
3736 /* no memory */
3737 sctp_m_freem(m_notify);
3738 return;
3739 }
3740 control->length = SCTP_BUF_LEN(m_notify);
3741 control->spec_flags = M_NOTIFICATION;
3742 /* not that we need this */
3743 control->tail_mbuf = m_notify;
3744 sctp_add_to_readq(stcb->sctp_ep, stcb,
3745 control,
3746 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3747 }
3748
3749
3750
3751 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3752 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3753 {
3754 struct mbuf *m_notify;
3755 struct sctp_adaptation_event *sai;
3756 struct sctp_queued_to_read *control;
3757
3758 if ((stcb == NULL) ||
3759 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3760 /* event not enabled */
3761 return;
3762 }
3763
3764 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3765 if (m_notify == NULL)
3766 /* no space left */
3767 return;
3768 SCTP_BUF_LEN(m_notify) = 0;
3769 sai = mtod(m_notify, struct sctp_adaptation_event *);
3770 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3771 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3772 sai->sai_flags = 0;
3773 sai->sai_length = sizeof(struct sctp_adaptation_event);
3774 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3775 sai->sai_assoc_id = sctp_get_associd(stcb);
3776
3777 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3778 SCTP_BUF_NEXT(m_notify) = NULL;
3779
3780 /* append to socket */
3781 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3782 0, 0, stcb->asoc.context, 0, 0, 0,
3783 m_notify);
3784 if (control == NULL) {
3785 /* no memory */
3786 sctp_m_freem(m_notify);
3787 return;
3788 }
3789 control->length = SCTP_BUF_LEN(m_notify);
3790 control->spec_flags = M_NOTIFICATION;
3791 /* not that we need this */
3792 control->tail_mbuf = m_notify;
3793 sctp_add_to_readq(stcb->sctp_ep, stcb,
3794 control,
3795 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3796 }
3797
3798 /* This always must be called with the read-queue LOCKED in the INP */
3799 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked)3800 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3801 uint32_t val, int so_locked)
3802 {
3803 struct mbuf *m_notify;
3804 struct sctp_pdapi_event *pdapi;
3805 struct sctp_queued_to_read *control;
3806 struct sockbuf *sb;
3807
3808 if ((stcb == NULL) ||
3809 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3810 /* event not enabled */
3811 return;
3812 }
3813 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3814 return;
3815 }
3816
3817 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3818 if (m_notify == NULL)
3819 /* no space left */
3820 return;
3821 SCTP_BUF_LEN(m_notify) = 0;
3822 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3823 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3824 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3825 pdapi->pdapi_flags = 0;
3826 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3827 pdapi->pdapi_indication = error;
3828 pdapi->pdapi_stream = (val >> 16);
3829 pdapi->pdapi_seq = (val & 0x0000ffff);
3830 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3831
3832 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3833 SCTP_BUF_NEXT(m_notify) = NULL;
3834 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3835 0, 0, stcb->asoc.context, 0, 0, 0,
3836 m_notify);
3837 if (control == NULL) {
3838 /* no memory */
3839 sctp_m_freem(m_notify);
3840 return;
3841 }
3842 control->length = SCTP_BUF_LEN(m_notify);
3843 control->spec_flags = M_NOTIFICATION;
3844 /* not that we need this */
3845 control->tail_mbuf = m_notify;
3846 sb = &stcb->sctp_socket->so_rcv;
3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3848 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3849 }
3850 sctp_sballoc(stcb, sb, m_notify);
3851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3852 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3853 }
3854 control->end_added = 1;
3855 if (stcb->asoc.control_pdapi)
3856 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3857 else {
3858 /* we really should not see this case */
3859 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3860 }
3861 if (stcb->sctp_ep && stcb->sctp_socket) {
3862 /* This should always be the case */
3863 #if defined(__APPLE__) && !defined(__Userspace__)
3864 struct socket *so;
3865
3866 so = SCTP_INP_SO(stcb->sctp_ep);
3867 if (!so_locked) {
3868 atomic_add_int(&stcb->asoc.refcnt, 1);
3869 SCTP_TCB_UNLOCK(stcb);
3870 SCTP_SOCKET_LOCK(so, 1);
3871 SCTP_TCB_LOCK(stcb);
3872 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3873 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3874 SCTP_SOCKET_UNLOCK(so, 1);
3875 return;
3876 }
3877 }
3878 #endif
3879 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3880 #if defined(__APPLE__) && !defined(__Userspace__)
3881 if (!so_locked) {
3882 SCTP_SOCKET_UNLOCK(so, 1);
3883 }
3884 #endif
3885 }
3886 }
3887
3888 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3889 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3890 {
3891 struct mbuf *m_notify;
3892 struct sctp_shutdown_event *sse;
3893 struct sctp_queued_to_read *control;
3894
3895 /*
3896 * For TCP model AND UDP connected sockets we will send an error up
3897 * when an SHUTDOWN completes
3898 */
3899 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3900 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3901 /* mark socket closed for read/write and wakeup! */
3902 #if defined(__APPLE__) && !defined(__Userspace__)
3903 struct socket *so;
3904
3905 so = SCTP_INP_SO(stcb->sctp_ep);
3906 atomic_add_int(&stcb->asoc.refcnt, 1);
3907 SCTP_TCB_UNLOCK(stcb);
3908 SCTP_SOCKET_LOCK(so, 1);
3909 SCTP_TCB_LOCK(stcb);
3910 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3911 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3912 SCTP_SOCKET_UNLOCK(so, 1);
3913 return;
3914 }
3915 #endif
3916 socantsendmore(stcb->sctp_socket);
3917 #if defined(__APPLE__) && !defined(__Userspace__)
3918 SCTP_SOCKET_UNLOCK(so, 1);
3919 #endif
3920 }
3921 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3922 /* event not enabled */
3923 return;
3924 }
3925
3926 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3927 if (m_notify == NULL)
3928 /* no space left */
3929 return;
3930 sse = mtod(m_notify, struct sctp_shutdown_event *);
3931 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3932 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3933 sse->sse_flags = 0;
3934 sse->sse_length = sizeof(struct sctp_shutdown_event);
3935 sse->sse_assoc_id = sctp_get_associd(stcb);
3936
3937 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3938 SCTP_BUF_NEXT(m_notify) = NULL;
3939
3940 /* append to socket */
3941 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3942 0, 0, stcb->asoc.context, 0, 0, 0,
3943 m_notify);
3944 if (control == NULL) {
3945 /* no memory */
3946 sctp_m_freem(m_notify);
3947 return;
3948 }
3949 control->length = SCTP_BUF_LEN(m_notify);
3950 control->spec_flags = M_NOTIFICATION;
3951 /* not that we need this */
3952 control->tail_mbuf = m_notify;
3953 sctp_add_to_readq(stcb->sctp_ep, stcb,
3954 control,
3955 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3956 }
3957
3958 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked)3959 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3960 int so_locked)
3961 {
3962 struct mbuf *m_notify;
3963 struct sctp_sender_dry_event *event;
3964 struct sctp_queued_to_read *control;
3965
3966 if ((stcb == NULL) ||
3967 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3968 /* event not enabled */
3969 return;
3970 }
3971
3972 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3973 if (m_notify == NULL) {
3974 /* no space left */
3975 return;
3976 }
3977 SCTP_BUF_LEN(m_notify) = 0;
3978 event = mtod(m_notify, struct sctp_sender_dry_event *);
3979 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3980 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3981 event->sender_dry_flags = 0;
3982 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3983 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3984
3985 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3986 SCTP_BUF_NEXT(m_notify) = NULL;
3987
3988 /* append to socket */
3989 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3990 0, 0, stcb->asoc.context, 0, 0, 0,
3991 m_notify);
3992 if (control == NULL) {
3993 /* no memory */
3994 sctp_m_freem(m_notify);
3995 return;
3996 }
3997 control->length = SCTP_BUF_LEN(m_notify);
3998 control->spec_flags = M_NOTIFICATION;
3999 /* not that we need this */
4000 control->tail_mbuf = m_notify;
4001 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
4002 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
4003 }
4004
4005
4006 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)4007 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
4008 {
4009 struct mbuf *m_notify;
4010 struct sctp_queued_to_read *control;
4011 struct sctp_stream_change_event *stradd;
4012
4013 if ((stcb == NULL) ||
4014 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
4015 /* event not enabled */
4016 return;
4017 }
4018 if ((stcb->asoc.peer_req_out) && flag) {
4019 /* Peer made the request, don't tell the local user */
4020 stcb->asoc.peer_req_out = 0;
4021 return;
4022 }
4023 stcb->asoc.peer_req_out = 0;
4024 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
4025 if (m_notify == NULL)
4026 /* no space left */
4027 return;
4028 SCTP_BUF_LEN(m_notify) = 0;
4029 stradd = mtod(m_notify, struct sctp_stream_change_event *);
4030 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
4031 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
4032 stradd->strchange_flags = flag;
4033 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
4034 stradd->strchange_assoc_id = sctp_get_associd(stcb);
4035 stradd->strchange_instrms = numberin;
4036 stradd->strchange_outstrms = numberout;
4037 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
4038 SCTP_BUF_NEXT(m_notify) = NULL;
4039 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4040 /* no space */
4041 sctp_m_freem(m_notify);
4042 return;
4043 }
4044 /* append to socket */
4045 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4046 0, 0, stcb->asoc.context, 0, 0, 0,
4047 m_notify);
4048 if (control == NULL) {
4049 /* no memory */
4050 sctp_m_freem(m_notify);
4051 return;
4052 }
4053 control->length = SCTP_BUF_LEN(m_notify);
4054 control->spec_flags = M_NOTIFICATION;
4055 /* not that we need this */
4056 control->tail_mbuf = m_notify;
4057 sctp_add_to_readq(stcb->sctp_ep, stcb,
4058 control,
4059 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4060 }
4061
4062 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)4063 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
4064 {
4065 struct mbuf *m_notify;
4066 struct sctp_queued_to_read *control;
4067 struct sctp_assoc_reset_event *strasoc;
4068
4069 if ((stcb == NULL) ||
4070 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
4071 /* event not enabled */
4072 return;
4073 }
4074 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
4075 if (m_notify == NULL)
4076 /* no space left */
4077 return;
4078 SCTP_BUF_LEN(m_notify) = 0;
4079 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
4080 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
4081 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
4082 strasoc->assocreset_flags = flag;
4083 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
4084 strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
4085 strasoc->assocreset_local_tsn = sending_tsn;
4086 strasoc->assocreset_remote_tsn = recv_tsn;
4087 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
4088 SCTP_BUF_NEXT(m_notify) = NULL;
4089 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4090 /* no space */
4091 sctp_m_freem(m_notify);
4092 return;
4093 }
4094 /* append to socket */
4095 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4096 0, 0, stcb->asoc.context, 0, 0, 0,
4097 m_notify);
4098 if (control == NULL) {
4099 /* no memory */
4100 sctp_m_freem(m_notify);
4101 return;
4102 }
4103 control->length = SCTP_BUF_LEN(m_notify);
4104 control->spec_flags = M_NOTIFICATION;
4105 /* not that we need this */
4106 control->tail_mbuf = m_notify;
4107 sctp_add_to_readq(stcb->sctp_ep, stcb,
4108 control,
4109 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4110 }
4111
4112
4113
4114 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)4115 sctp_notify_stream_reset(struct sctp_tcb *stcb,
4116 int number_entries, uint16_t * list, int flag)
4117 {
4118 struct mbuf *m_notify;
4119 struct sctp_queued_to_read *control;
4120 struct sctp_stream_reset_event *strreset;
4121 int len;
4122
4123 if ((stcb == NULL) ||
4124 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
4125 /* event not enabled */
4126 return;
4127 }
4128
4129 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4130 if (m_notify == NULL)
4131 /* no space left */
4132 return;
4133 SCTP_BUF_LEN(m_notify) = 0;
4134 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
4135 if (len > M_TRAILINGSPACE(m_notify)) {
4136 /* never enough room */
4137 sctp_m_freem(m_notify);
4138 return;
4139 }
4140 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
4141 memset(strreset, 0, len);
4142 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
4143 strreset->strreset_flags = flag;
4144 strreset->strreset_length = len;
4145 strreset->strreset_assoc_id = sctp_get_associd(stcb);
4146 if (number_entries) {
4147 int i;
4148
4149 for (i = 0; i < number_entries; i++) {
4150 strreset->strreset_stream_list[i] = ntohs(list[i]);
4151 }
4152 }
4153 SCTP_BUF_LEN(m_notify) = len;
4154 SCTP_BUF_NEXT(m_notify) = NULL;
4155 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4156 /* no space */
4157 sctp_m_freem(m_notify);
4158 return;
4159 }
4160 /* append to socket */
4161 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4162 0, 0, stcb->asoc.context, 0, 0, 0,
4163 m_notify);
4164 if (control == NULL) {
4165 /* no memory */
4166 sctp_m_freem(m_notify);
4167 return;
4168 }
4169 control->length = SCTP_BUF_LEN(m_notify);
4170 control->spec_flags = M_NOTIFICATION;
4171 /* not that we need this */
4172 control->tail_mbuf = m_notify;
4173 sctp_add_to_readq(stcb->sctp_ep, stcb,
4174 control,
4175 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4176 }
4177
4178
4179 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)4180 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
4181 {
4182 struct mbuf *m_notify;
4183 struct sctp_remote_error *sre;
4184 struct sctp_queued_to_read *control;
4185 unsigned int notif_len;
4186 uint16_t chunk_len;
4187
4188 if ((stcb == NULL) ||
4189 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
4190 return;
4191 }
4192 if (chunk != NULL) {
4193 chunk_len = ntohs(chunk->ch.chunk_length);
4194 /*
4195 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
4196 * contiguous.
4197 */
4198 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4199 chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4200 }
4201 } else {
4202 chunk_len = 0;
4203 }
4204 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4205 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4206 if (m_notify == NULL) {
4207 /* Retry with smaller value. */
4208 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4209 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4210 if (m_notify == NULL) {
4211 return;
4212 }
4213 }
4214 SCTP_BUF_NEXT(m_notify) = NULL;
4215 sre = mtod(m_notify, struct sctp_remote_error *);
4216 memset(sre, 0, notif_len);
4217 sre->sre_type = SCTP_REMOTE_ERROR;
4218 sre->sre_flags = 0;
4219 sre->sre_length = sizeof(struct sctp_remote_error);
4220 sre->sre_error = error;
4221 sre->sre_assoc_id = sctp_get_associd(stcb);
4222 if (notif_len > sizeof(struct sctp_remote_error)) {
4223 memcpy(sre->sre_data, chunk, chunk_len);
4224 sre->sre_length += chunk_len;
4225 }
4226 SCTP_BUF_LEN(m_notify) = sre->sre_length;
4227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4228 0, 0, stcb->asoc.context, 0, 0, 0,
4229 m_notify);
4230 if (control != NULL) {
4231 control->length = SCTP_BUF_LEN(m_notify);
4232 control->spec_flags = M_NOTIFICATION;
4233 /* not that we need this */
4234 control->tail_mbuf = m_notify;
4235 sctp_add_to_readq(stcb->sctp_ep, stcb,
4236 control,
4237 &stcb->sctp_socket->so_rcv, 1,
4238 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4239 } else {
4240 sctp_m_freem(m_notify);
4241 }
4242 }
4243
4244
4245 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked)4246 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4247 uint32_t error, void *data, int so_locked)
4248 {
4249 if ((stcb == NULL) ||
4250 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4251 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4252 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4253 /* If the socket is gone we are out of here */
4254 return;
4255 }
4256 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
4257 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4258 #else
4259 if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
4260 #endif
4261 return;
4262 }
4263 #if defined(__APPLE__) && !defined(__Userspace__)
4264 if (so_locked) {
4265 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4266 } else {
4267 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4268 }
4269 #endif
4270 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4271 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4272 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4273 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4274 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4275 /* Don't report these in front states */
4276 return;
4277 }
4278 }
4279 switch (notification) {
4280 case SCTP_NOTIFY_ASSOC_UP:
4281 if (stcb->asoc.assoc_up_sent == 0) {
4282 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4283 stcb->asoc.assoc_up_sent = 1;
4284 }
4285 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4286 sctp_notify_adaptation_layer(stcb);
4287 }
4288 if (stcb->asoc.auth_supported == 0) {
4289 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4290 NULL, so_locked);
4291 }
4292 break;
4293 case SCTP_NOTIFY_ASSOC_DOWN:
4294 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4295 #if defined(__Userspace__)
4296 if (stcb->sctp_ep->recv_callback) {
4297 if (stcb->sctp_socket) {
4298 union sctp_sockstore addr;
4299 struct sctp_rcvinfo rcv;
4300
4301 memset(&addr, 0, sizeof(union sctp_sockstore));
4302 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4303 atomic_add_int(&stcb->asoc.refcnt, 1);
4304 SCTP_TCB_UNLOCK(stcb);
4305 stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
4306 SCTP_TCB_LOCK(stcb);
4307 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4308 }
4309 }
4310 #endif
4311 break;
4312 case SCTP_NOTIFY_INTERFACE_DOWN:
4313 {
4314 struct sctp_nets *net;
4315
4316 net = (struct sctp_nets *)data;
4317 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4318 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4319 break;
4320 }
4321 case SCTP_NOTIFY_INTERFACE_UP:
4322 {
4323 struct sctp_nets *net;
4324
4325 net = (struct sctp_nets *)data;
4326 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4327 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4328 break;
4329 }
4330 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4331 {
4332 struct sctp_nets *net;
4333
4334 net = (struct sctp_nets *)data;
4335 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4336 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4337 break;
4338 }
4339 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4340 sctp_notify_send_failed2(stcb, error,
4341 (struct sctp_stream_queue_pending *)data, so_locked);
4342 break;
4343 case SCTP_NOTIFY_SENT_DG_FAIL:
4344 sctp_notify_send_failed(stcb, 1, error,
4345 (struct sctp_tmit_chunk *)data, so_locked);
4346 break;
4347 case SCTP_NOTIFY_UNSENT_DG_FAIL:
4348 sctp_notify_send_failed(stcb, 0, error,
4349 (struct sctp_tmit_chunk *)data, so_locked);
4350 break;
4351 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4352 {
4353 uint32_t val;
4354 val = *((uint32_t *)data);
4355
4356 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4357 break;
4358 }
4359 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4360 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4361 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4362 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4363 } else {
4364 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4365 }
4366 break;
4367 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4368 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4369 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4370 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4371 } else {
4372 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4373 }
4374 break;
4375 case SCTP_NOTIFY_ASSOC_RESTART:
4376 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4377 if (stcb->asoc.auth_supported == 0) {
4378 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4379 NULL, so_locked);
4380 }
4381 break;
4382 case SCTP_NOTIFY_STR_RESET_SEND:
4383 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
4384 break;
4385 case SCTP_NOTIFY_STR_RESET_RECV:
4386 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
4387 break;
4388 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4389 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4390 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
4391 break;
4392 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4393 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4394 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
4395 break;
4396 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4397 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4398 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
4399 break;
4400 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4401 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4402 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
4403 break;
4404 case SCTP_NOTIFY_ASCONF_ADD_IP:
4405 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4406 error, so_locked);
4407 break;
4408 case SCTP_NOTIFY_ASCONF_DELETE_IP:
4409 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4410 error, so_locked);
4411 break;
4412 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4413 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4414 error, so_locked);
4415 break;
4416 case SCTP_NOTIFY_PEER_SHUTDOWN:
4417 sctp_notify_shutdown_event(stcb);
4418 break;
4419 case SCTP_NOTIFY_AUTH_NEW_KEY:
4420 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4421 (uint16_t)(uintptr_t)data,
4422 so_locked);
4423 break;
4424 case SCTP_NOTIFY_AUTH_FREE_KEY:
4425 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4426 (uint16_t)(uintptr_t)data,
4427 so_locked);
4428 break;
4429 case SCTP_NOTIFY_NO_PEER_AUTH:
4430 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4431 (uint16_t)(uintptr_t)data,
4432 so_locked);
4433 break;
4434 case SCTP_NOTIFY_SENDER_DRY:
4435 sctp_notify_sender_dry_event(stcb, so_locked);
4436 break;
4437 case SCTP_NOTIFY_REMOTE_ERROR:
4438 sctp_notify_remote_error(stcb, error, data);
4439 break;
4440 default:
4441 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4442 __func__, notification, notification);
4443 break;
4444 } /* end switch */
4445 }
4446
4447 void
4448 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked)
4449 {
4450 struct sctp_association *asoc;
4451 struct sctp_stream_out *outs;
4452 struct sctp_tmit_chunk *chk, *nchk;
4453 struct sctp_stream_queue_pending *sp, *nsp;
4454 int i;
4455
4456 if (stcb == NULL) {
4457 return;
4458 }
4459 asoc = &stcb->asoc;
4460 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4461 /* already being freed */
4462 return;
4463 }
4464 #if defined(__APPLE__) && !defined(__Userspace__)
4465 if (so_locked) {
4466 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4467 } else {
4468 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4469 }
4470 #endif
4471 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4472 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4473 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4474 return;
4475 }
4476 /* now through all the gunk freeing chunks */
4477 if (holds_lock == 0) {
4478 SCTP_TCB_SEND_LOCK(stcb);
4479 }
4480 /* sent queue SHOULD be empty */
4481 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4482 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4483 asoc->sent_queue_cnt--;
4484 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4485 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4486 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4487 #ifdef INVARIANTS
4488 } else {
4489 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4490 #endif
4491 }
4492 }
4493 if (chk->data != NULL) {
4494 sctp_free_bufspace(stcb, asoc, chk, 1);
4495 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4496 error, chk, so_locked);
4497 if (chk->data) {
4498 sctp_m_freem(chk->data);
4499 chk->data = NULL;
4500 }
4501 }
4502 sctp_free_a_chunk(stcb, chk, so_locked);
4503 /*sa_ignore FREED_MEMORY*/
4504 }
4505 /* pending send queue SHOULD be empty */
4506 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4507 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4508 asoc->send_queue_cnt--;
4509 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4510 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4511 #ifdef INVARIANTS
4512 } else {
4513 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4514 #endif
4515 }
4516 if (chk->data != NULL) {
4517 sctp_free_bufspace(stcb, asoc, chk, 1);
4518 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4519 error, chk, so_locked);
4520 if (chk->data) {
4521 sctp_m_freem(chk->data);
4522 chk->data = NULL;
4523 }
4524 }
4525 sctp_free_a_chunk(stcb, chk, so_locked);
4526 /*sa_ignore FREED_MEMORY*/
4527 }
4528 for (i = 0; i < asoc->streamoutcnt; i++) {
4529 /* For each stream */
4530 outs = &asoc->strmout[i];
4531 /* clean up any sends there */
4532 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4533 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4534 TAILQ_REMOVE(&outs->outqueue, sp, next);
4535 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4536 sctp_free_spbufspace(stcb, asoc, sp);
4537 if (sp->data) {
4538 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4539 error, (void *)sp, so_locked);
4540 if (sp->data) {
4541 sctp_m_freem(sp->data);
4542 sp->data = NULL;
4543 sp->tail_mbuf = NULL;
4544 sp->length = 0;
4545 }
4546 }
4547 if (sp->net) {
4548 sctp_free_remote_addr(sp->net);
4549 sp->net = NULL;
4550 }
4551 /* Free the chunk */
4552 sctp_free_a_strmoq(stcb, sp, so_locked);
4553 /*sa_ignore FREED_MEMORY*/
4554 }
4555 }
4556
4557 if (holds_lock == 0) {
4558 SCTP_TCB_SEND_UNLOCK(stcb);
4559 }
4560 }
4561
4562 void
4563 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4564 struct sctp_abort_chunk *abort, int so_locked)
4565 {
4566 if (stcb == NULL) {
4567 return;
4568 }
4569 #if defined(__APPLE__) && !defined(__Userspace__)
4570 if (so_locked) {
4571 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4572 } else {
4573 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4574 }
4575 #endif
4576 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4577 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4578 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4579 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4580 }
4581 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4582 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4583 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4584 return;
4585 }
4586 /* Tell them we lost the asoc */
4587 sctp_report_all_outbound(stcb, error, 0, so_locked);
4588 if (from_peer) {
4589 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4590 } else {
4591 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4592 }
4593 }
4594
4595 void
4596 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4597 struct mbuf *m, int iphlen,
4598 struct sockaddr *src, struct sockaddr *dst,
4599 struct sctphdr *sh, struct mbuf *op_err,
4600 #if defined(__FreeBSD__) && !defined(__Userspace__)
4601 uint8_t mflowtype, uint32_t mflowid,
4602 #endif
4603 uint32_t vrf_id, uint16_t port)
4604 {
4605 uint32_t vtag;
4606 #if defined(__APPLE__) && !defined(__Userspace__)
4607 struct socket *so;
4608 #endif
4609
4610 vtag = 0;
4611 if (stcb != NULL) {
4612 vtag = stcb->asoc.peer_vtag;
4613 vrf_id = stcb->asoc.vrf_id;
4614 }
4615 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4616 #if defined(__FreeBSD__) && !defined(__Userspace__)
4617 mflowtype, mflowid, inp->fibnum,
4618 #endif
4619 vrf_id, port);
4620 if (stcb != NULL) {
4621 /* We have a TCB to abort, send notification too */
4622 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4623 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4624 /* Ok, now lets free it */
4625 #if defined(__APPLE__) && !defined(__Userspace__)
4626 so = SCTP_INP_SO(inp);
4627 atomic_add_int(&stcb->asoc.refcnt, 1);
4628 SCTP_TCB_UNLOCK(stcb);
4629 SCTP_SOCKET_LOCK(so, 1);
4630 SCTP_TCB_LOCK(stcb);
4631 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4632 #endif
4633 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4634 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4635 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4636 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4637 }
4638 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4639 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4640 #if defined(__APPLE__) && !defined(__Userspace__)
4641 SCTP_SOCKET_UNLOCK(so, 1);
4642 #endif
4643 }
4644 }
4645 #ifdef SCTP_ASOCLOG_OF_TSNS
4646 void
4647 sctp_print_out_track_log(struct sctp_tcb *stcb)
4648 {
4649 #ifdef NOSIY_PRINTS
4650 int i;
4651 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4652 SCTP_PRINTF("IN bound TSN log-aaa\n");
4653 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4654 SCTP_PRINTF("None rcvd\n");
4655 goto none_in;
4656 }
4657 if (stcb->asoc.tsn_in_wrapped) {
4658 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4659 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4660 stcb->asoc.in_tsnlog[i].tsn,
4661 stcb->asoc.in_tsnlog[i].strm,
4662 stcb->asoc.in_tsnlog[i].seq,
4663 stcb->asoc.in_tsnlog[i].flgs,
4664 stcb->asoc.in_tsnlog[i].sz);
4665 }
4666 }
4667 if (stcb->asoc.tsn_in_at) {
4668 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4669 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4670 stcb->asoc.in_tsnlog[i].tsn,
4671 stcb->asoc.in_tsnlog[i].strm,
4672 stcb->asoc.in_tsnlog[i].seq,
4673 stcb->asoc.in_tsnlog[i].flgs,
4674 stcb->asoc.in_tsnlog[i].sz);
4675 }
4676 }
4677 none_in:
4678 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4679 if ((stcb->asoc.tsn_out_at == 0) &&
4680 (stcb->asoc.tsn_out_wrapped == 0)) {
4681 SCTP_PRINTF("None sent\n");
4682 }
4683 if (stcb->asoc.tsn_out_wrapped) {
4684 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4685 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4686 stcb->asoc.out_tsnlog[i].tsn,
4687 stcb->asoc.out_tsnlog[i].strm,
4688 stcb->asoc.out_tsnlog[i].seq,
4689 stcb->asoc.out_tsnlog[i].flgs,
4690 stcb->asoc.out_tsnlog[i].sz);
4691 }
4692 }
4693 if (stcb->asoc.tsn_out_at) {
4694 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4695 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4696 stcb->asoc.out_tsnlog[i].tsn,
4697 stcb->asoc.out_tsnlog[i].strm,
4698 stcb->asoc.out_tsnlog[i].seq,
4699 stcb->asoc.out_tsnlog[i].flgs,
4700 stcb->asoc.out_tsnlog[i].sz);
4701 }
4702 }
4703 #endif
4704 }
4705 #endif
4706
4707 void
4708 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4709 struct mbuf *op_err,
4710 int so_locked)
4711 {
4712 #if defined(__APPLE__) && !defined(__Userspace__)
4713 struct socket *so;
4714 #endif
4715
4716 #if defined(__APPLE__) && !defined(__Userspace__)
4717 so = SCTP_INP_SO(inp);
4718 #endif
4719 #if defined(__APPLE__) && !defined(__Userspace__)
4720 if (so_locked) {
4721 sctp_lock_assert(SCTP_INP_SO(inp));
4722 } else {
4723 sctp_unlock_assert(SCTP_INP_SO(inp));
4724 }
4725 #endif
4726 if (stcb == NULL) {
4727 /* Got to have a TCB */
4728 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4729 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4730 #if defined(__APPLE__) && !defined(__Userspace__)
4731 if (!so_locked) {
4732 SCTP_SOCKET_LOCK(so, 1);
4733 }
4734 #endif
4735 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4736 SCTP_CALLED_DIRECTLY_NOCMPSET);
4737 #if defined(__APPLE__) && !defined(__Userspace__)
4738 if (!so_locked) {
4739 SCTP_SOCKET_UNLOCK(so, 1);
4740 }
4741 #endif
4742 }
4743 }
4744 return;
4745 } else {
4746 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4747 }
4748 /* notify the peer */
4749 sctp_send_abort_tcb(stcb, op_err, so_locked);
4750 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4751 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4752 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4753 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4754 }
4755 /* notify the ulp */
4756 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4757 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4758 }
4759 /* now free the asoc */
4760 #ifdef SCTP_ASOCLOG_OF_TSNS
4761 sctp_print_out_track_log(stcb);
4762 #endif
4763 #if defined(__APPLE__) && !defined(__Userspace__)
4764 if (!so_locked) {
4765 atomic_add_int(&stcb->asoc.refcnt, 1);
4766 SCTP_TCB_UNLOCK(stcb);
4767 SCTP_SOCKET_LOCK(so, 1);
4768 SCTP_TCB_LOCK(stcb);
4769 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4770 }
4771 #endif
4772 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4773 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4774 #if defined(__APPLE__) && !defined(__Userspace__)
4775 if (!so_locked) {
4776 SCTP_SOCKET_UNLOCK(so, 1);
4777 }
4778 #endif
4779 }
4780
4781 void
4782 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4783 struct sockaddr *src, struct sockaddr *dst,
4784 struct sctphdr *sh, struct sctp_inpcb *inp,
4785 struct mbuf *cause,
4786 #if defined(__FreeBSD__) && !defined(__Userspace__)
4787 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4788 #endif
4789 uint32_t vrf_id, uint16_t port)
4790 {
4791 struct sctp_chunkhdr *ch, chunk_buf;
4792 unsigned int chk_length;
4793 int contains_init_chunk;
4794
4795 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4796 /* Generate a TO address for future reference */
4797 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4798 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4799 #if defined(__APPLE__) && !defined(__Userspace__)
4800 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4801 #endif
4802 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4803 SCTP_CALLED_DIRECTLY_NOCMPSET);
4804 #if defined(__APPLE__) && !defined(__Userspace__)
4805 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4806 #endif
4807 }
4808 }
4809 contains_init_chunk = 0;
4810 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4811 sizeof(*ch), (uint8_t *) & chunk_buf);
4812 while (ch != NULL) {
4813 chk_length = ntohs(ch->chunk_length);
4814 if (chk_length < sizeof(*ch)) {
4815 /* break to abort land */
4816 break;
4817 }
4818 switch (ch->chunk_type) {
4819 case SCTP_INIT:
4820 contains_init_chunk = 1;
4821 break;
4822 case SCTP_PACKET_DROPPED:
4823 /* we don't respond to pkt-dropped */
4824 return;
4825 case SCTP_ABORT_ASSOCIATION:
4826 /* we don't respond with an ABORT to an ABORT */
4827 return;
4828 case SCTP_SHUTDOWN_COMPLETE:
4829 /*
4830 * we ignore it since we are not waiting for it and
4831 * peer is gone
4832 */
4833 return;
4834 case SCTP_SHUTDOWN_ACK:
4835 sctp_send_shutdown_complete2(src, dst, sh,
4836 #if defined(__FreeBSD__) && !defined(__Userspace__)
4837 mflowtype, mflowid, fibnum,
4838 #endif
4839 vrf_id, port);
4840 return;
4841 default:
4842 break;
4843 }
4844 offset += SCTP_SIZE32(chk_length);
4845 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4846 sizeof(*ch), (uint8_t *) & chunk_buf);
4847 }
4848 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4849 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4850 (contains_init_chunk == 0))) {
4851 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4852 #if defined(__FreeBSD__) && !defined(__Userspace__)
4853 mflowtype, mflowid, fibnum,
4854 #endif
4855 vrf_id, port);
4856 }
4857 }
4858
4859 /*
4860 * check the inbound datagram to make sure there is not an abort inside it,
4861 * if there is return 1, else return 0.
4862 */
4863 int
4864 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4865 {
4866 struct sctp_chunkhdr *ch;
4867 struct sctp_init_chunk *init_chk, chunk_buf;
4868 int offset;
4869 unsigned int chk_length;
4870
4871 offset = iphlen + sizeof(struct sctphdr);
4872 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4873 (uint8_t *) & chunk_buf);
4874 while (ch != NULL) {
4875 chk_length = ntohs(ch->chunk_length);
4876 if (chk_length < sizeof(*ch)) {
4877 /* packet is probably corrupt */
4878 break;
4879 }
4880 /* we seem to be ok, is it an abort? */
4881 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4882 /* yep, tell them */
4883 return (1);
4884 }
4885 if (ch->chunk_type == SCTP_INITIATION) {
4886 /* need to update the Vtag */
4887 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4888 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4889 if (init_chk != NULL) {
4890 *vtagfill = ntohl(init_chk->init.initiate_tag);
4891 }
4892 }
4893 /* Nope, move to the next chunk */
4894 offset += SCTP_SIZE32(chk_length);
4895 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4896 sizeof(*ch), (uint8_t *) & chunk_buf);
4897 }
4898 return (0);
4899 }
4900
4901 /*
4902 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4903 * set (i.e. it's 0) so, create this function to compare link local scopes
4904 */
4905 #ifdef INET6
4906 uint32_t
4907 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4908 {
4909 #if defined(__Userspace__)
4910 /*__Userspace__ Returning 1 here always */
4911 #endif
4912 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4913 struct sockaddr_in6 a, b;
4914
4915 /* save copies */
4916 a = *addr1;
4917 b = *addr2;
4918
4919 if (a.sin6_scope_id == 0)
4920 #ifdef SCTP_KAME
4921 if (sa6_recoverscope(&a)) {
4922 #else
4923 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4924 #endif /* SCTP_KAME */
4925 /* can't get scope, so can't match */
4926 return (0);
4927 }
4928 if (b.sin6_scope_id == 0)
4929 #ifdef SCTP_KAME
4930 if (sa6_recoverscope(&b)) {
4931 #else
4932 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4933 #endif /* SCTP_KAME */
4934 /* can't get scope, so can't match */
4935 return (0);
4936 }
4937 if (a.sin6_scope_id != b.sin6_scope_id)
4938 return (0);
4939 #else
4940 if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4941 return (0);
4942 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4943
4944 return (1);
4945 }
4946
4947 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4948 /*
4949 * returns a sockaddr_in6 with embedded scope recovered and removed
4950 */
4951 struct sockaddr_in6 *
4952 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4953 {
4954 /* check and strip embedded scope junk */
4955 if (addr->sin6_family == AF_INET6) {
4956 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4957 if (addr->sin6_scope_id == 0) {
4958 *store = *addr;
4959 #ifdef SCTP_KAME
4960 if (!sa6_recoverscope(store)) {
4961 #else
4962 if (!in6_recoverscope(store, &store->sin6_addr,
4963 NULL)) {
4964 #endif /* SCTP_KAME */
4965 /* use the recovered scope */
4966 addr = store;
4967 }
4968 } else {
4969 /* else, return the original "to" addr */
4970 in6_clearscope(&addr->sin6_addr);
4971 }
4972 }
4973 }
4974 return (addr);
4975 }
4976 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4977 #endif
4978
4979 /*
4980 * are the two addresses the same? currently a "scopeless" check returns: 1
4981 * if same, 0 if not
4982 */
4983 int
4984 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4985 {
4986
4987 /* must be valid */
4988 if (sa1 == NULL || sa2 == NULL)
4989 return (0);
4990
4991 /* must be the same family */
4992 if (sa1->sa_family != sa2->sa_family)
4993 return (0);
4994
4995 switch (sa1->sa_family) {
4996 #ifdef INET6
4997 case AF_INET6:
4998 {
4999 /* IPv6 addresses */
5000 struct sockaddr_in6 *sin6_1, *sin6_2;
5001
5002 sin6_1 = (struct sockaddr_in6 *)sa1;
5003 sin6_2 = (struct sockaddr_in6 *)sa2;
5004 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
5005 sin6_2));
5006 }
5007 #endif
5008 #ifdef INET
5009 case AF_INET:
5010 {
5011 /* IPv4 addresses */
5012 struct sockaddr_in *sin_1, *sin_2;
5013
5014 sin_1 = (struct sockaddr_in *)sa1;
5015 sin_2 = (struct sockaddr_in *)sa2;
5016 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
5017 }
5018 #endif
5019 #if defined(__Userspace__)
5020 case AF_CONN:
5021 {
5022 struct sockaddr_conn *sconn_1, *sconn_2;
5023
5024 sconn_1 = (struct sockaddr_conn *)sa1;
5025 sconn_2 = (struct sockaddr_conn *)sa2;
5026 return (sconn_1->sconn_addr == sconn_2->sconn_addr);
5027 }
5028 #endif
5029 default:
5030 /* we don't do these... */
5031 return (0);
5032 }
5033 }
5034
5035 void
5036 sctp_print_address(struct sockaddr *sa)
5037 {
5038 #ifdef INET6
5039 #if defined(__FreeBSD__) && !defined(__Userspace__)
5040 char ip6buf[INET6_ADDRSTRLEN];
5041 #endif
5042 #endif
5043
5044 switch (sa->sa_family) {
5045 #ifdef INET6
5046 case AF_INET6:
5047 {
5048 struct sockaddr_in6 *sin6;
5049
5050 sin6 = (struct sockaddr_in6 *)sa;
5051 #if defined(__Userspace__)
5052 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
5053 ntohs(sin6->sin6_addr.s6_addr16[0]),
5054 ntohs(sin6->sin6_addr.s6_addr16[1]),
5055 ntohs(sin6->sin6_addr.s6_addr16[2]),
5056 ntohs(sin6->sin6_addr.s6_addr16[3]),
5057 ntohs(sin6->sin6_addr.s6_addr16[4]),
5058 ntohs(sin6->sin6_addr.s6_addr16[5]),
5059 ntohs(sin6->sin6_addr.s6_addr16[6]),
5060 ntohs(sin6->sin6_addr.s6_addr16[7]),
5061 ntohs(sin6->sin6_port),
5062 sin6->sin6_scope_id);
5063 #else
5064 #if defined(__FreeBSD__) && !defined(__Userspace__)
5065 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
5066 ip6_sprintf(ip6buf, &sin6->sin6_addr),
5067 ntohs(sin6->sin6_port),
5068 sin6->sin6_scope_id);
5069 #else
5070 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
5071 ip6_sprintf(&sin6->sin6_addr),
5072 ntohs(sin6->sin6_port),
5073 sin6->sin6_scope_id);
5074 #endif
5075 #endif
5076 break;
5077 }
5078 #endif
5079 #ifdef INET
5080 case AF_INET:
5081 {
5082 struct sockaddr_in *sin;
5083 unsigned char *p;
5084
5085 sin = (struct sockaddr_in *)sa;
5086 p = (unsigned char *)&sin->sin_addr;
5087 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
5088 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
5089 break;
5090 }
5091 #endif
5092 #if defined(__Userspace__)
5093 case AF_CONN:
5094 {
5095 struct sockaddr_conn *sconn;
5096
5097 sconn = (struct sockaddr_conn *)sa;
5098 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
5099 break;
5100 }
5101 #endif
5102 default:
5103 SCTP_PRINTF("?\n");
5104 break;
5105 }
5106 }
5107
5108 void
5109 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
5110 struct sctp_inpcb *new_inp,
5111 struct sctp_tcb *stcb,
5112 int waitflags)
5113 {
5114 /*
5115 * go through our old INP and pull off any control structures that
5116 * belong to stcb and move then to the new inp.
5117 */
5118 struct socket *old_so, *new_so;
5119 struct sctp_queued_to_read *control, *nctl;
5120 struct sctp_readhead tmp_queue;
5121 struct mbuf *m;
5122 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
5123 int error = 0;
5124 #endif
5125
5126 old_so = old_inp->sctp_socket;
5127 new_so = new_inp->sctp_socket;
5128 TAILQ_INIT(&tmp_queue);
5129 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
5130 error = sblock(&old_so->so_rcv, waitflags);
5131 if (error) {
5132 /* Gak, can't get sblock, we have a problem.
5133 * data will be left stranded.. and we
5134 * don't dare look at it since the
5135 * other thread may be reading something.
5136 * Oh well, its a screwed up app that does
5137 * a peeloff OR a accept while reading
5138 * from the main socket... actually its
5139 * only the peeloff() case, since I think
5140 * read will fail on a listening socket..
5141 */
5142 return;
5143 }
5144 #endif
5145 /* lock the socket buffers */
5146 SCTP_INP_READ_LOCK(old_inp);
5147 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
5148 /* Pull off all for out target stcb */
5149 if (control->stcb == stcb) {
5150 /* remove it we want it */
5151 TAILQ_REMOVE(&old_inp->read_queue, control, next);
5152 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
5153 m = control->data;
5154 while (m) {
5155 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5156 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
5157 }
5158 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
5159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5160 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5161 }
5162 m = SCTP_BUF_NEXT(m);
5163 }
5164 }
5165 }
5166 SCTP_INP_READ_UNLOCK(old_inp);
5167 /* Remove the sb-lock on the old socket */
5168 #if defined(__APPLE__) && !defined(__Userspace__)
5169 sbunlock(&old_so->so_rcv, 1);
5170 #endif
5171
5172 #if defined(__FreeBSD__) && !defined(__Userspace__)
5173 sbunlock(&old_so->so_rcv);
5174 #endif
5175 /* Now we move them over to the new socket buffer */
5176 SCTP_INP_READ_LOCK(new_inp);
5177 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
5178 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
5179 m = control->data;
5180 while (m) {
5181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5182 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5183 }
5184 sctp_sballoc(stcb, &new_so->so_rcv, m);
5185 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5186 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5187 }
5188 m = SCTP_BUF_NEXT(m);
5189 }
5190 }
5191 SCTP_INP_READ_UNLOCK(new_inp);
5192 }
5193
5194 void
5195 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
5196 struct sctp_tcb *stcb,
5197 int so_locked
5198 #if !(defined(__APPLE__) && !defined(__Userspace__))
5199 SCTP_UNUSED
5200 #endif
5201 )
5202 {
5203 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
5204 #if defined(__APPLE__) && !defined(__Userspace__)
5205 struct socket *so;
5206
5207 so = SCTP_INP_SO(inp);
5208 if (!so_locked) {
5209 if (stcb) {
5210 atomic_add_int(&stcb->asoc.refcnt, 1);
5211 SCTP_TCB_UNLOCK(stcb);
5212 }
5213 SCTP_SOCKET_LOCK(so, 1);
5214 if (stcb) {
5215 SCTP_TCB_LOCK(stcb);
5216 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5217 }
5218 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5219 SCTP_SOCKET_UNLOCK(so, 1);
5220 return;
5221 }
5222 }
5223 #endif
5224 sctp_sorwakeup(inp, inp->sctp_socket);
5225 #if defined(__APPLE__) && !defined(__Userspace__)
5226 if (!so_locked) {
5227 SCTP_SOCKET_UNLOCK(so, 1);
5228 }
5229 #endif
5230 }
5231 }
5232 #if defined(__Userspace__)
5233
5234 void
5235 sctp_invoke_recv_callback(struct sctp_inpcb *inp,
5236 struct sctp_tcb *stcb,
5237 struct sctp_queued_to_read *control,
5238 int inp_read_lock_held)
5239 {
5240 uint32_t pd_point, length;
5241
5242 if ((inp->recv_callback == NULL) ||
5243 (stcb == NULL) ||
5244 (stcb->sctp_socket == NULL)) {
5245 return;
5246 }
5247
5248 length = control->length;
5249 if (stcb != NULL && stcb->sctp_socket != NULL) {
5250 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
5251 stcb->sctp_ep->partial_delivery_point);
5252 } else {
5253 pd_point = inp->partial_delivery_point;
5254 }
5255 if ((control->end_added == 1) || (length >= pd_point)) {
5256 struct socket *so;
5257 struct mbuf *m;
5258 char *buffer;
5259 struct sctp_rcvinfo rcv;
5260 union sctp_sockstore addr;
5261 int flags;
5262
5263 if ((buffer = malloc(length)) == NULL) {
5264 return;
5265 }
5266 if (inp_read_lock_held == 0) {
5267 SCTP_INP_READ_LOCK(inp);
5268 }
5269 so = stcb->sctp_socket;
5270 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
5271 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
5272 }
5273 m_copydata(control->data, 0, length, buffer);
5274 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
5275 rcv.rcv_sid = control->sinfo_stream;
5276 rcv.rcv_ssn = (uint16_t)control->mid;
5277 rcv.rcv_flags = control->sinfo_flags;
5278 rcv.rcv_ppid = control->sinfo_ppid;
5279 rcv.rcv_tsn = control->sinfo_tsn;
5280 rcv.rcv_cumtsn = control->sinfo_cumtsn;
5281 rcv.rcv_context = control->sinfo_context;
5282 rcv.rcv_assoc_id = control->sinfo_assoc_id;
5283 memset(&addr, 0, sizeof(union sctp_sockstore));
5284 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5285 #ifdef INET
5286 case AF_INET:
5287 addr.sin = control->whoFrom->ro._l_addr.sin;
5288 break;
5289 #endif
5290 #ifdef INET6
5291 case AF_INET6:
5292 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
5293 break;
5294 #endif
5295 case AF_CONN:
5296 addr.sconn = control->whoFrom->ro._l_addr.sconn;
5297 break;
5298 default:
5299 addr.sa = control->whoFrom->ro._l_addr.sa;
5300 break;
5301 }
5302 flags = 0;
5303 if (control->end_added == 1) {
5304 flags |= MSG_EOR;
5305 }
5306 if (control->spec_flags & M_NOTIFICATION) {
5307 flags |= MSG_NOTIFICATION;
5308 }
5309 sctp_m_freem(control->data);
5310 control->data = NULL;
5311 control->tail_mbuf = NULL;
5312 control->length = 0;
5313 if (control->end_added) {
5314 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
5315 control->on_read_q = 0;
5316 sctp_free_remote_addr(control->whoFrom);
5317 control->whoFrom = NULL;
5318 sctp_free_a_readq(stcb, control);
5319 }
5320 atomic_add_int(&stcb->asoc.refcnt, 1);
5321 SCTP_TCB_UNLOCK(stcb);
5322 if (inp_read_lock_held == 0) {
5323 SCTP_INP_READ_UNLOCK(inp);
5324 }
5325 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
5326 SCTP_TCB_LOCK(stcb);
5327 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5328 }
5329 }
5330 #endif
5331
5332 void
5333 sctp_add_to_readq(struct sctp_inpcb *inp,
5334 struct sctp_tcb *stcb,
5335 struct sctp_queued_to_read *control,
5336 struct sockbuf *sb,
5337 int end,
5338 int inp_read_lock_held,
5339 int so_locked)
5340 {
5341 /*
5342 * Here we must place the control on the end of the socket read
5343 * queue AND increment sb_cc so that select will work properly on
5344 * read.
5345 */
5346 struct mbuf *m, *prev = NULL;
5347
5348 if (inp == NULL) {
5349 /* Gak, TSNH!! */
5350 #ifdef INVARIANTS
5351 panic("Gak, inp NULL on add_to_readq");
5352 #endif
5353 return;
5354 }
5355 #if defined(__APPLE__) && !defined(__Userspace__)
5356 if (so_locked) {
5357 sctp_lock_assert(SCTP_INP_SO(inp));
5358 } else {
5359 sctp_unlock_assert(SCTP_INP_SO(inp));
5360 }
5361 #endif
5362 if (inp_read_lock_held == 0)
5363 SCTP_INP_READ_LOCK(inp);
5364 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
5365 if (!control->on_strm_q) {
5366 sctp_free_remote_addr(control->whoFrom);
5367 if (control->data) {
5368 sctp_m_freem(control->data);
5369 control->data = NULL;
5370 }
5371 sctp_free_a_readq(stcb, control);
5372 }
5373 if (inp_read_lock_held == 0)
5374 SCTP_INP_READ_UNLOCK(inp);
5375 return;
5376 }
5377 if (!(control->spec_flags & M_NOTIFICATION)) {
5378 atomic_add_int(&inp->total_recvs, 1);
5379 if (!control->do_not_ref_stcb) {
5380 atomic_add_int(&stcb->total_recvs, 1);
5381 }
5382 }
5383 m = control->data;
5384 control->held_length = 0;
5385 control->length = 0;
5386 while (m) {
5387 if (SCTP_BUF_LEN(m) == 0) {
5388 /* Skip mbufs with NO length */
5389 if (prev == NULL) {
5390 /* First one */
5391 control->data = sctp_m_free(m);
5392 m = control->data;
5393 } else {
5394 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
5395 m = SCTP_BUF_NEXT(prev);
5396 }
5397 if (m == NULL) {
5398 control->tail_mbuf = prev;
5399 }
5400 continue;
5401 }
5402 prev = m;
5403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5404 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5405 }
5406 sctp_sballoc(stcb, sb, m);
5407 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5408 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5409 }
5410 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
5411 m = SCTP_BUF_NEXT(m);
5412 }
5413 if (prev != NULL) {
5414 control->tail_mbuf = prev;
5415 } else {
5416 /* Everything got collapsed out?? */
5417 if (!control->on_strm_q) {
5418 sctp_free_remote_addr(control->whoFrom);
5419 sctp_free_a_readq(stcb, control);
5420 }
5421 if (inp_read_lock_held == 0)
5422 SCTP_INP_READ_UNLOCK(inp);
5423 return;
5424 }
5425 if (end) {
5426 control->end_added = 1;
5427 }
5428 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
5429 control->on_read_q = 1;
5430 if (inp_read_lock_held == 0)
5431 SCTP_INP_READ_UNLOCK(inp);
5432 #if defined(__Userspace__)
5433 sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
5434 #endif
5435 if (inp && inp->sctp_socket) {
5436 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5437 }
5438 }
5439
5440 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5441 *************ALTERNATE ROUTING CODE
5442 */
5443
5444 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5445 *************ALTERNATE ROUTING CODE
5446 */
5447
5448 struct mbuf *
5449 sctp_generate_cause(uint16_t code, char *info)
5450 {
5451 struct mbuf *m;
5452 struct sctp_gen_error_cause *cause;
5453 size_t info_len;
5454 uint16_t len;
5455
5456 if ((code == 0) || (info == NULL)) {
5457 return (NULL);
5458 }
5459 info_len = strlen(info);
5460 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5461 return (NULL);
5462 }
5463 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5464 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5465 if (m != NULL) {
5466 SCTP_BUF_LEN(m) = len;
5467 cause = mtod(m, struct sctp_gen_error_cause *);
5468 cause->code = htons(code);
5469 cause->length = htons(len);
5470 memcpy(cause->info, info, info_len);
5471 }
5472 return (m);
5473 }
5474
5475 struct mbuf *
5476 sctp_generate_no_user_data_cause(uint32_t tsn)
5477 {
5478 struct mbuf *m;
5479 struct sctp_error_no_user_data *no_user_data_cause;
5480 uint16_t len;
5481
5482 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5483 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5484 if (m != NULL) {
5485 SCTP_BUF_LEN(m) = len;
5486 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5487 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5488 no_user_data_cause->cause.length = htons(len);
5489 no_user_data_cause->tsn = htonl(tsn);
5490 }
5491 return (m);
5492 }
5493
5494 #ifdef SCTP_MBCNT_LOGGING
5495 void
5496 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5497 struct sctp_tmit_chunk *tp1, int chk_cnt)
5498 {
5499 if (tp1->data == NULL) {
5500 return;
5501 }
5502 asoc->chunks_on_out_queue -= chk_cnt;
5503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5504 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5505 asoc->total_output_queue_size,
5506 tp1->book_size,
5507 0,
5508 tp1->mbcnt);
5509 }
5510 if (asoc->total_output_queue_size >= tp1->book_size) {
5511 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5512 } else {
5513 asoc->total_output_queue_size = 0;
5514 }
5515
5516 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5517 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5518 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5519 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5520 } else {
5521 stcb->sctp_socket->so_snd.sb_cc = 0;
5522
5523 }
5524 }
5525 }
5526
5527 #endif
5528
5529 int
5530 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5531 uint8_t sent, int so_locked)
5532 {
5533 struct sctp_stream_out *strq;
5534 struct sctp_tmit_chunk *chk = NULL, *tp2;
5535 struct sctp_stream_queue_pending *sp;
5536 uint32_t mid;
5537 uint16_t sid;
5538 uint8_t foundeom = 0;
5539 int ret_sz = 0;
5540 int notdone;
5541 int do_wakeup_routine = 0;
5542
5543 #if defined(__APPLE__) && !defined(__Userspace__)
5544 if (so_locked) {
5545 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5546 } else {
5547 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5548 }
5549 #endif
5550 sid = tp1->rec.data.sid;
5551 mid = tp1->rec.data.mid;
5552 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5553 stcb->asoc.abandoned_sent[0]++;
5554 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5555 stcb->asoc.strmout[sid].abandoned_sent[0]++;
5556 #if defined(SCTP_DETAILED_STR_STATS)
5557 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5558 #endif
5559 } else {
5560 stcb->asoc.abandoned_unsent[0]++;
5561 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5562 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5563 #if defined(SCTP_DETAILED_STR_STATS)
5564 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5565 #endif
5566 }
5567 do {
5568 ret_sz += tp1->book_size;
5569 if (tp1->data != NULL) {
5570 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5571 sctp_flight_size_decrease(tp1);
5572 sctp_total_flight_decrease(stcb, tp1);
5573 }
5574 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5575 stcb->asoc.peers_rwnd += tp1->send_size;
5576 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5577 if (sent) {
5578 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5579 } else {
5580 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5581 }
5582 if (tp1->data) {
5583 sctp_m_freem(tp1->data);
5584 tp1->data = NULL;
5585 }
5586 do_wakeup_routine = 1;
5587 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5588 stcb->asoc.sent_queue_cnt_removeable--;
5589 }
5590 }
5591 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5592 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5593 SCTP_DATA_NOT_FRAG) {
5594 /* not frag'ed we ae done */
5595 notdone = 0;
5596 foundeom = 1;
5597 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5598 /* end of frag, we are done */
5599 notdone = 0;
5600 foundeom = 1;
5601 } else {
5602 /*
5603 * Its a begin or middle piece, we must mark all of
5604 * it
5605 */
5606 notdone = 1;
5607 tp1 = TAILQ_NEXT(tp1, sctp_next);
5608 }
5609 } while (tp1 && notdone);
5610 if (foundeom == 0) {
5611 /*
5612 * The multi-part message was scattered across the send and
5613 * sent queue.
5614 */
5615 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5616 if ((tp1->rec.data.sid != sid) ||
5617 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5618 break;
5619 }
5620 /* save to chk in case we have some on stream out
5621 * queue. If so and we have an un-transmitted one
5622 * we don't have to fudge the TSN.
5623 */
5624 chk = tp1;
5625 ret_sz += tp1->book_size;
5626 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5627 if (sent) {
5628 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5629 } else {
5630 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5631 }
5632 if (tp1->data) {
5633 sctp_m_freem(tp1->data);
5634 tp1->data = NULL;
5635 }
5636 /* No flight involved here book the size to 0 */
5637 tp1->book_size = 0;
5638 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5639 foundeom = 1;
5640 }
5641 do_wakeup_routine = 1;
5642 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5643 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5644 /* on to the sent queue so we can wait for it to be passed by. */
5645 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5646 sctp_next);
5647 stcb->asoc.send_queue_cnt--;
5648 stcb->asoc.sent_queue_cnt++;
5649 }
5650 }
5651 if (foundeom == 0) {
5652 /*
5653 * Still no eom found. That means there
5654 * is stuff left on the stream out queue.. yuck.
5655 */
5656 SCTP_TCB_SEND_LOCK(stcb);
5657 strq = &stcb->asoc.strmout[sid];
5658 sp = TAILQ_FIRST(&strq->outqueue);
5659 if (sp != NULL) {
5660 sp->discard_rest = 1;
5661 /*
5662 * We may need to put a chunk on the
5663 * queue that holds the TSN that
5664 * would have been sent with the LAST
5665 * bit.
5666 */
5667 if (chk == NULL) {
5668 /* Yep, we have to */
5669 sctp_alloc_a_chunk(stcb, chk);
5670 if (chk == NULL) {
5671 /* we are hosed. All we can
5672 * do is nothing.. which will
5673 * cause an abort if the peer is
5674 * paying attention.
5675 */
5676 goto oh_well;
5677 }
5678 memset(chk, 0, sizeof(*chk));
5679 chk->rec.data.rcv_flags = 0;
5680 chk->sent = SCTP_FORWARD_TSN_SKIP;
5681 chk->asoc = &stcb->asoc;
5682 if (stcb->asoc.idata_supported == 0) {
5683 if (sp->sinfo_flags & SCTP_UNORDERED) {
5684 chk->rec.data.mid = 0;
5685 } else {
5686 chk->rec.data.mid = strq->next_mid_ordered;
5687 }
5688 } else {
5689 if (sp->sinfo_flags & SCTP_UNORDERED) {
5690 chk->rec.data.mid = strq->next_mid_unordered;
5691 } else {
5692 chk->rec.data.mid = strq->next_mid_ordered;
5693 }
5694 }
5695 chk->rec.data.sid = sp->sid;
5696 chk->rec.data.ppid = sp->ppid;
5697 chk->rec.data.context = sp->context;
5698 chk->flags = sp->act_flags;
5699 chk->whoTo = NULL;
5700 #if defined(__FreeBSD__) && !defined(__Userspace__)
5701 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5702 #else
5703 chk->rec.data.tsn = stcb->asoc.sending_seq++;
5704 #endif
5705 strq->chunks_on_queues++;
5706 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5707 stcb->asoc.sent_queue_cnt++;
5708 stcb->asoc.pr_sctp_cnt++;
5709 }
5710 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5711 if (sp->sinfo_flags & SCTP_UNORDERED) {
5712 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5713 }
5714 if (stcb->asoc.idata_supported == 0) {
5715 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5716 strq->next_mid_ordered++;
5717 }
5718 } else {
5719 if (sp->sinfo_flags & SCTP_UNORDERED) {
5720 strq->next_mid_unordered++;
5721 } else {
5722 strq->next_mid_ordered++;
5723 }
5724 }
5725 oh_well:
5726 if (sp->data) {
5727 /* Pull any data to free up the SB and
5728 * allow sender to "add more" while we
5729 * will throw away :-)
5730 */
5731 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5732 ret_sz += sp->length;
5733 do_wakeup_routine = 1;
5734 sp->some_taken = 1;
5735 sctp_m_freem(sp->data);
5736 sp->data = NULL;
5737 sp->tail_mbuf = NULL;
5738 sp->length = 0;
5739 }
5740 }
5741 SCTP_TCB_SEND_UNLOCK(stcb);
5742 }
5743 if (do_wakeup_routine) {
5744 #if defined(__APPLE__) && !defined(__Userspace__)
5745 struct socket *so;
5746
5747 so = SCTP_INP_SO(stcb->sctp_ep);
5748 if (!so_locked) {
5749 atomic_add_int(&stcb->asoc.refcnt, 1);
5750 SCTP_TCB_UNLOCK(stcb);
5751 SCTP_SOCKET_LOCK(so, 1);
5752 SCTP_TCB_LOCK(stcb);
5753 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5754 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5755 /* assoc was freed while we were unlocked */
5756 SCTP_SOCKET_UNLOCK(so, 1);
5757 return (ret_sz);
5758 }
5759 }
5760 #endif
5761 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5762 #if defined(__APPLE__) && !defined(__Userspace__)
5763 if (!so_locked) {
5764 SCTP_SOCKET_UNLOCK(so, 1);
5765 }
5766 #endif
5767 }
5768 return (ret_sz);
5769 }
5770
5771 /*
5772 * checks to see if the given address, sa, is one that is currently known by
5773 * the kernel note: can't distinguish the same address on multiple interfaces
5774 * and doesn't handle multiple addresses with different zone/scope id's note:
5775 * ifa_ifwithaddr() compares the entire sockaddr struct
5776 */
5777 struct sctp_ifa *
5778 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5779 int holds_lock)
5780 {
5781 struct sctp_laddr *laddr;
5782
5783 if (holds_lock == 0) {
5784 SCTP_INP_RLOCK(inp);
5785 }
5786
5787 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5788 if (laddr->ifa == NULL)
5789 continue;
5790 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5791 continue;
5792 #ifdef INET
5793 if (addr->sa_family == AF_INET) {
5794 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5795 laddr->ifa->address.sin.sin_addr.s_addr) {
5796 /* found him. */
5797 break;
5798 }
5799 }
5800 #endif
5801 #ifdef INET6
5802 if (addr->sa_family == AF_INET6) {
5803 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5804 &laddr->ifa->address.sin6)) {
5805 /* found him. */
5806 break;
5807 }
5808 }
5809 #endif
5810 #if defined(__Userspace__)
5811 if (addr->sa_family == AF_CONN) {
5812 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5813 /* found him. */
5814 break;
5815 }
5816 }
5817 #endif
5818 }
5819 if (holds_lock == 0) {
5820 SCTP_INP_RUNLOCK(inp);
5821 }
5822 if (laddr != NULL) {
5823 return (laddr->ifa);
5824 } else {
5825 return (NULL);
5826 }
5827 }
5828
5829 uint32_t
5830 sctp_get_ifa_hash_val(struct sockaddr *addr)
5831 {
5832 switch (addr->sa_family) {
5833 #ifdef INET
5834 case AF_INET:
5835 {
5836 struct sockaddr_in *sin;
5837
5838 sin = (struct sockaddr_in *)addr;
5839 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5840 }
5841 #endif
5842 #ifdef INET6
5843 case AF_INET6:
5844 {
5845 struct sockaddr_in6 *sin6;
5846 uint32_t hash_of_addr;
5847
5848 sin6 = (struct sockaddr_in6 *)addr;
5849 #if !defined(_WIN32) && !(defined(__FreeBSD__) && defined(__Userspace__)) && !defined(__APPLE__)
5850 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5851 sin6->sin6_addr.s6_addr32[1] +
5852 sin6->sin6_addr.s6_addr32[2] +
5853 sin6->sin6_addr.s6_addr32[3]);
5854 #else
5855 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5856 ((uint32_t *)&sin6->sin6_addr)[1] +
5857 ((uint32_t *)&sin6->sin6_addr)[2] +
5858 ((uint32_t *)&sin6->sin6_addr)[3]);
5859 #endif
5860 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5861 return (hash_of_addr);
5862 }
5863 #endif
5864 #if defined(__Userspace__)
5865 case AF_CONN:
5866 {
5867 struct sockaddr_conn *sconn;
5868 uintptr_t temp;
5869
5870 sconn = (struct sockaddr_conn *)addr;
5871 temp = (uintptr_t)sconn->sconn_addr;
5872 return ((uint32_t)(temp ^ (temp >> 16)));
5873 }
5874 #endif
5875 default:
5876 break;
5877 }
5878 return (0);
5879 }
5880
5881 struct sctp_ifa *
5882 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5883 {
5884 struct sctp_ifa *sctp_ifap;
5885 struct sctp_vrf *vrf;
5886 struct sctp_ifalist *hash_head;
5887 uint32_t hash_of_addr;
5888
5889 if (holds_lock == 0) {
5890 SCTP_IPI_ADDR_RLOCK();
5891 } else {
5892 SCTP_IPI_ADDR_LOCK_ASSERT();
5893 }
5894
5895 vrf = sctp_find_vrf(vrf_id);
5896 if (vrf == NULL) {
5897 if (holds_lock == 0)
5898 SCTP_IPI_ADDR_RUNLOCK();
5899 return (NULL);
5900 }
5901
5902 hash_of_addr = sctp_get_ifa_hash_val(addr);
5903
5904 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5905 if (hash_head == NULL) {
5906 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5907 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5908 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5909 sctp_print_address(addr);
5910 SCTP_PRINTF("No such bucket for address\n");
5911 if (holds_lock == 0)
5912 SCTP_IPI_ADDR_RUNLOCK();
5913
5914 return (NULL);
5915 }
5916 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5917 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5918 continue;
5919 #ifdef INET
5920 if (addr->sa_family == AF_INET) {
5921 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5922 sctp_ifap->address.sin.sin_addr.s_addr) {
5923 /* found him. */
5924 break;
5925 }
5926 }
5927 #endif
5928 #ifdef INET6
5929 if (addr->sa_family == AF_INET6) {
5930 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5931 &sctp_ifap->address.sin6)) {
5932 /* found him. */
5933 break;
5934 }
5935 }
5936 #endif
5937 #if defined(__Userspace__)
5938 if (addr->sa_family == AF_CONN) {
5939 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5940 /* found him. */
5941 break;
5942 }
5943 }
5944 #endif
5945 }
5946 if (holds_lock == 0)
5947 SCTP_IPI_ADDR_RUNLOCK();
5948 return (sctp_ifap);
5949 }
5950
5951 static void
5952 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5953 uint32_t rwnd_req)
5954 {
5955 /* User pulled some data, do we need a rwnd update? */
5956 #if defined(__FreeBSD__) && !defined(__Userspace__)
5957 struct epoch_tracker et;
5958 #endif
5959 int r_unlocked = 0;
5960 uint32_t dif, rwnd;
5961 struct socket *so = NULL;
5962
5963 if (stcb == NULL)
5964 return;
5965
5966 atomic_add_int(&stcb->asoc.refcnt, 1);
5967
5968 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5969 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5970 /* Pre-check If we are freeing no update */
5971 goto no_lock;
5972 }
5973 SCTP_INP_INCR_REF(stcb->sctp_ep);
5974 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5975 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5976 goto out;
5977 }
5978 so = stcb->sctp_socket;
5979 if (so == NULL) {
5980 goto out;
5981 }
5982 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5983 /* Have you have freed enough to look */
5984 *freed_so_far = 0;
5985 /* Yep, its worth a look and the lock overhead */
5986
5987 /* Figure out what the rwnd would be */
5988 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5989 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5990 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5991 } else {
5992 dif = 0;
5993 }
5994 if (dif >= rwnd_req) {
5995 if (hold_rlock) {
5996 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5997 r_unlocked = 1;
5998 }
5999 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6000 /*
6001 * One last check before we allow the guy possibly
6002 * to get in. There is a race, where the guy has not
6003 * reached the gate. In that case
6004 */
6005 goto out;
6006 }
6007 SCTP_TCB_LOCK(stcb);
6008 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6009 /* No reports here */
6010 SCTP_TCB_UNLOCK(stcb);
6011 goto out;
6012 }
6013 SCTP_STAT_INCR(sctps_wu_sacks_sent);
6014 #if defined(__FreeBSD__) && !defined(__Userspace__)
6015 NET_EPOCH_ENTER(et);
6016 #endif
6017 sctp_send_sack(stcb, SCTP_SO_LOCKED);
6018
6019 sctp_chunk_output(stcb->sctp_ep, stcb,
6020 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
6021 /* make sure no timer is running */
6022 #if defined(__FreeBSD__) && !defined(__Userspace__)
6023 NET_EPOCH_EXIT(et);
6024 #endif
6025 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
6026 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
6027 SCTP_TCB_UNLOCK(stcb);
6028 } else {
6029 /* Update how much we have pending */
6030 stcb->freed_by_sorcv_sincelast = dif;
6031 }
6032 out:
6033 if (so && r_unlocked && hold_rlock) {
6034 SCTP_INP_READ_LOCK(stcb->sctp_ep);
6035 }
6036
6037 SCTP_INP_DECR_REF(stcb->sctp_ep);
6038 no_lock:
6039 atomic_add_int(&stcb->asoc.refcnt, -1);
6040 return;
6041 }
6042
6043 int
6044 sctp_sorecvmsg(struct socket *so,
6045 struct uio *uio,
6046 struct mbuf **mp,
6047 struct sockaddr *from,
6048 int fromlen,
6049 int *msg_flags,
6050 struct sctp_sndrcvinfo *sinfo,
6051 int filling_sinfo)
6052 {
6053 /*
6054 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
6055 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
6056 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
6057 * On the way out we may send out any combination of:
6058 * MSG_NOTIFICATION MSG_EOR
6059 *
6060 */
6061 struct sctp_inpcb *inp = NULL;
6062 ssize_t my_len = 0;
6063 ssize_t cp_len = 0;
6064 int error = 0;
6065 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
6066 struct mbuf *m = NULL;
6067 struct sctp_tcb *stcb = NULL;
6068 int wakeup_read_socket = 0;
6069 int freecnt_applied = 0;
6070 int out_flags = 0, in_flags = 0;
6071 int block_allowed = 1;
6072 uint32_t freed_so_far = 0;
6073 ssize_t copied_so_far = 0;
6074 int in_eeor_mode = 0;
6075 int no_rcv_needed = 0;
6076 uint32_t rwnd_req = 0;
6077 int hold_sblock = 0;
6078 int hold_rlock = 0;
6079 ssize_t slen = 0;
6080 uint32_t held_length = 0;
6081 #if defined(__FreeBSD__) && !defined(__Userspace__)
6082 int sockbuf_lock = 0;
6083 #endif
6084
6085 if (uio == NULL) {
6086 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6087 return (EINVAL);
6088 }
6089
6090 if (msg_flags) {
6091 in_flags = *msg_flags;
6092 if (in_flags & MSG_PEEK)
6093 SCTP_STAT_INCR(sctps_read_peeks);
6094 } else {
6095 in_flags = 0;
6096 }
6097 #if defined(__APPLE__) && !defined(__Userspace__)
6098 #if defined(APPLE_LEOPARD)
6099 slen = uio->uio_resid;
6100 #else
6101 slen = uio_resid(uio);
6102 #endif
6103 #else
6104 slen = uio->uio_resid;
6105 #endif
6106
6107 /* Pull in and set up our int flags */
6108 if (in_flags & MSG_OOB) {
6109 /* Out of band's NOT supported */
6110 return (EOPNOTSUPP);
6111 }
6112 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
6113 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6114 return (EINVAL);
6115 }
6116 if ((in_flags & (MSG_DONTWAIT
6117 #if defined(__FreeBSD__) && !defined(__Userspace__)
6118 | MSG_NBIO
6119 #endif
6120 )) ||
6121 SCTP_SO_IS_NBIO(so)) {
6122 block_allowed = 0;
6123 }
6124 /* setup the endpoint */
6125 inp = (struct sctp_inpcb *)so->so_pcb;
6126 if (inp == NULL) {
6127 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
6128 return (EFAULT);
6129 }
6130 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
6131 /* Must be at least a MTU's worth */
6132 if (rwnd_req < SCTP_MIN_RWND)
6133 rwnd_req = SCTP_MIN_RWND;
6134 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
6135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6136 #if defined(__APPLE__) && !defined(__Userspace__)
6137 #if defined(APPLE_LEOPARD)
6138 sctp_misc_ints(SCTP_SORECV_ENTER,
6139 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
6140 #else
6141 sctp_misc_ints(SCTP_SORECV_ENTER,
6142 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
6143 #endif
6144 #else
6145 sctp_misc_ints(SCTP_SORECV_ENTER,
6146 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
6147 #endif
6148 }
6149 #if defined(__Userspace__)
6150 SOCKBUF_LOCK(&so->so_rcv);
6151 hold_sblock = 1;
6152 #endif
6153 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6154 #if defined(__APPLE__) && !defined(__Userspace__)
6155 #if defined(APPLE_LEOPARD)
6156 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6157 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
6158 #else
6159 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6160 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
6161 #endif
6162 #else
6163 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6164 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
6165 #endif
6166 }
6167
6168 #if defined(__APPLE__) && !defined(__Userspace__)
6169 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6170 #endif
6171
6172 #if defined(__FreeBSD__) && !defined(__Userspace__)
6173 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
6174 #endif
6175 if (error) {
6176 goto release_unlocked;
6177 }
6178 #if defined(__FreeBSD__) && !defined(__Userspace__)
6179 sockbuf_lock = 1;
6180 #endif
6181 restart:
6182 #if defined(__Userspace__)
6183 if (hold_sblock == 0) {
6184 SOCKBUF_LOCK(&so->so_rcv);
6185 hold_sblock = 1;
6186 }
6187 #endif
6188 #if defined(__APPLE__) && !defined(__Userspace__)
6189 sbunlock(&so->so_rcv, 1);
6190 #endif
6191
6192 restart_nosblocks:
6193 if (hold_sblock == 0) {
6194 SOCKBUF_LOCK(&so->so_rcv);
6195 hold_sblock = 1;
6196 }
6197 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
6198 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
6199 goto out;
6200 }
6201 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
6202 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
6203 #else
6204 if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
6205 #endif
6206 if (so->so_error) {
6207 error = so->so_error;
6208 if ((in_flags & MSG_PEEK) == 0)
6209 so->so_error = 0;
6210 goto out;
6211 } else {
6212 if (so->so_rcv.sb_cc == 0) {
6213 /* indicate EOF */
6214 error = 0;
6215 goto out;
6216 }
6217 }
6218 }
6219 if (so->so_rcv.sb_cc <= held_length) {
6220 if (so->so_error) {
6221 error = so->so_error;
6222 if ((in_flags & MSG_PEEK) == 0) {
6223 so->so_error = 0;
6224 }
6225 goto out;
6226 }
6227 if ((so->so_rcv.sb_cc == 0) &&
6228 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6229 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
6230 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
6231 /* For active open side clear flags for re-use
6232 * passive open is blocked by connect.
6233 */
6234 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
6235 /* You were aborted, passive side always hits here */
6236 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
6237 error = ECONNRESET;
6238 }
6239 so->so_state &= ~(SS_ISCONNECTING |
6240 SS_ISDISCONNECTING |
6241 SS_ISCONFIRMING |
6242 SS_ISCONNECTED);
6243 if (error == 0) {
6244 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
6245 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
6246 error = ENOTCONN;
6247 }
6248 }
6249 goto out;
6250 }
6251 }
6252 if (block_allowed) {
6253 error = sbwait(&so->so_rcv);
6254 if (error) {
6255 goto out;
6256 }
6257 held_length = 0;
6258 goto restart_nosblocks;
6259 } else {
6260 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
6261 error = EWOULDBLOCK;
6262 goto out;
6263 }
6264 }
6265 if (hold_sblock == 1) {
6266 SOCKBUF_UNLOCK(&so->so_rcv);
6267 hold_sblock = 0;
6268 }
6269 #if defined(__APPLE__) && !defined(__Userspace__)
6270 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6271 #endif
6272 /* we possibly have data we can read */
6273 /*sa_ignore FREED_MEMORY*/
6274 control = TAILQ_FIRST(&inp->read_queue);
6275 if (control == NULL) {
6276 /* This could be happening since
6277 * the appender did the increment but as not
6278 * yet did the tailq insert onto the read_queue
6279 */
6280 if (hold_rlock == 0) {
6281 SCTP_INP_READ_LOCK(inp);
6282 }
6283 control = TAILQ_FIRST(&inp->read_queue);
6284 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
6285 #ifdef INVARIANTS
6286 panic("Huh, its non zero and nothing on control?");
6287 #endif
6288 so->so_rcv.sb_cc = 0;
6289 }
6290 SCTP_INP_READ_UNLOCK(inp);
6291 hold_rlock = 0;
6292 goto restart;
6293 }
6294
6295 if ((control->length == 0) &&
6296 (control->do_not_ref_stcb)) {
6297 /* Clean up code for freeing assoc that left behind a pdapi..
6298 * maybe a peer in EEOR that just closed after sending and
6299 * never indicated a EOR.
6300 */
6301 if (hold_rlock == 0) {
6302 hold_rlock = 1;
6303 SCTP_INP_READ_LOCK(inp);
6304 }
6305 control->held_length = 0;
6306 if (control->data) {
6307 /* Hmm there is data here .. fix */
6308 struct mbuf *m_tmp;
6309 int cnt = 0;
6310 m_tmp = control->data;
6311 while (m_tmp) {
6312 cnt += SCTP_BUF_LEN(m_tmp);
6313 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6314 control->tail_mbuf = m_tmp;
6315 control->end_added = 1;
6316 }
6317 m_tmp = SCTP_BUF_NEXT(m_tmp);
6318 }
6319 control->length = cnt;
6320 } else {
6321 /* remove it */
6322 TAILQ_REMOVE(&inp->read_queue, control, next);
6323 /* Add back any hiddend data */
6324 sctp_free_remote_addr(control->whoFrom);
6325 sctp_free_a_readq(stcb, control);
6326 }
6327 if (hold_rlock) {
6328 hold_rlock = 0;
6329 SCTP_INP_READ_UNLOCK(inp);
6330 }
6331 goto restart;
6332 }
6333 if ((control->length == 0) &&
6334 (control->end_added == 1)) {
6335 /* Do we also need to check for (control->pdapi_aborted == 1)? */
6336 if (hold_rlock == 0) {
6337 hold_rlock = 1;
6338 SCTP_INP_READ_LOCK(inp);
6339 }
6340 TAILQ_REMOVE(&inp->read_queue, control, next);
6341 if (control->data) {
6342 #ifdef INVARIANTS
6343 panic("control->data not null but control->length == 0");
6344 #else
6345 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
6346 sctp_m_freem(control->data);
6347 control->data = NULL;
6348 #endif
6349 }
6350 if (control->aux_data) {
6351 sctp_m_free (control->aux_data);
6352 control->aux_data = NULL;
6353 }
6354 #ifdef INVARIANTS
6355 if (control->on_strm_q) {
6356 panic("About to free ctl:%p so:%p and its in %d",
6357 control, so, control->on_strm_q);
6358 }
6359 #endif
6360 sctp_free_remote_addr(control->whoFrom);
6361 sctp_free_a_readq(stcb, control);
6362 if (hold_rlock) {
6363 hold_rlock = 0;
6364 SCTP_INP_READ_UNLOCK(inp);
6365 }
6366 goto restart;
6367 }
6368 if (control->length == 0) {
6369 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
6370 (filling_sinfo)) {
6371 /* find a more suitable one then this */
6372 ctl = TAILQ_NEXT(control, next);
6373 while (ctl) {
6374 if ((ctl->stcb != control->stcb) && (ctl->length) &&
6375 (ctl->some_taken ||
6376 (ctl->spec_flags & M_NOTIFICATION) ||
6377 ((ctl->do_not_ref_stcb == 0) &&
6378 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
6379 ) {
6380 /*-
6381 * If we have a different TCB next, and there is data
6382 * present. If we have already taken some (pdapi), OR we can
6383 * ref the tcb and no delivery as started on this stream, we
6384 * take it. Note we allow a notification on a different
6385 * assoc to be delivered..
6386 */
6387 control = ctl;
6388 goto found_one;
6389 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
6390 (ctl->length) &&
6391 ((ctl->some_taken) ||
6392 ((ctl->do_not_ref_stcb == 0) &&
6393 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
6394 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
6395 /*-
6396 * If we have the same tcb, and there is data present, and we
6397 * have the strm interleave feature present. Then if we have
6398 * taken some (pdapi) or we can refer to tht tcb AND we have
6399 * not started a delivery for this stream, we can take it.
6400 * Note we do NOT allow a notificaiton on the same assoc to
6401 * be delivered.
6402 */
6403 control = ctl;
6404 goto found_one;
6405 }
6406 ctl = TAILQ_NEXT(ctl, next);
6407 }
6408 }
6409 /*
6410 * if we reach here, not suitable replacement is available
6411 * <or> fragment interleave is NOT on. So stuff the sb_cc
6412 * into the our held count, and its time to sleep again.
6413 */
6414 held_length = so->so_rcv.sb_cc;
6415 control->held_length = so->so_rcv.sb_cc;
6416 goto restart;
6417 }
6418 /* Clear the held length since there is something to read */
6419 control->held_length = 0;
6420 found_one:
6421 /*
6422 * If we reach here, control has a some data for us to read off.
6423 * Note that stcb COULD be NULL.
6424 */
6425 if (hold_rlock == 0) {
6426 hold_rlock = 1;
6427 SCTP_INP_READ_LOCK(inp);
6428 }
6429 control->some_taken++;
6430 stcb = control->stcb;
6431 if (stcb) {
6432 if ((control->do_not_ref_stcb == 0) &&
6433 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6434 if (freecnt_applied == 0)
6435 stcb = NULL;
6436 } else if (control->do_not_ref_stcb == 0) {
6437 /* you can't free it on me please */
6438 /*
6439 * The lock on the socket buffer protects us so the
6440 * free code will stop. But since we used the socketbuf
6441 * lock and the sender uses the tcb_lock to increment,
6442 * we need to use the atomic add to the refcnt
6443 */
6444 if (freecnt_applied) {
6445 #ifdef INVARIANTS
6446 panic("refcnt already incremented");
6447 #else
6448 SCTP_PRINTF("refcnt already incremented?\n");
6449 #endif
6450 } else {
6451 atomic_add_int(&stcb->asoc.refcnt, 1);
6452 freecnt_applied = 1;
6453 }
6454 /*
6455 * Setup to remember how much we have not yet told
6456 * the peer our rwnd has opened up. Note we grab
6457 * the value from the tcb from last time.
6458 * Note too that sack sending clears this when a sack
6459 * is sent, which is fine. Once we hit the rwnd_req,
6460 * we then will go to the sctp_user_rcvd() that will
6461 * not lock until it KNOWs it MUST send a WUP-SACK.
6462 */
6463 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
6464 stcb->freed_by_sorcv_sincelast = 0;
6465 }
6466 }
6467 if (stcb &&
6468 ((control->spec_flags & M_NOTIFICATION) == 0) &&
6469 control->do_not_ref_stcb == 0) {
6470 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6471 }
6472
6473 /* First lets get off the sinfo and sockaddr info */
6474 if ((sinfo != NULL) && (filling_sinfo != 0)) {
6475 sinfo->sinfo_stream = control->sinfo_stream;
6476 sinfo->sinfo_ssn = (uint16_t)control->mid;
6477 sinfo->sinfo_flags = control->sinfo_flags;
6478 sinfo->sinfo_ppid = control->sinfo_ppid;
6479 sinfo->sinfo_context =control->sinfo_context;
6480 sinfo->sinfo_timetolive = control->sinfo_timetolive;
6481 sinfo->sinfo_tsn = control->sinfo_tsn;
6482 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6483 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6484 nxt = TAILQ_NEXT(control, next);
6485 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6486 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6487 struct sctp_extrcvinfo *s_extra;
6488 s_extra = (struct sctp_extrcvinfo *)sinfo;
6489 if ((nxt) &&
6490 (nxt->length)) {
6491 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6492 if (nxt->sinfo_flags & SCTP_UNORDERED) {
6493 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6494 }
6495 if (nxt->spec_flags & M_NOTIFICATION) {
6496 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6497 }
6498 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6499 s_extra->serinfo_next_length = nxt->length;
6500 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6501 s_extra->serinfo_next_stream = nxt->sinfo_stream;
6502 if (nxt->tail_mbuf != NULL) {
6503 if (nxt->end_added) {
6504 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6505 }
6506 }
6507 } else {
6508 /* we explicitly 0 this, since the memcpy got
6509 * some other things beyond the older sinfo_
6510 * that is on the control's structure :-D
6511 */
6512 nxt = NULL;
6513 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6514 s_extra->serinfo_next_aid = 0;
6515 s_extra->serinfo_next_length = 0;
6516 s_extra->serinfo_next_ppid = 0;
6517 s_extra->serinfo_next_stream = 0;
6518 }
6519 }
6520 /*
6521 * update off the real current cum-ack, if we have an stcb.
6522 */
6523 if ((control->do_not_ref_stcb == 0) && stcb)
6524 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6525 /*
6526 * mask off the high bits, we keep the actual chunk bits in
6527 * there.
6528 */
6529 sinfo->sinfo_flags &= 0x00ff;
6530 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6531 sinfo->sinfo_flags |= SCTP_UNORDERED;
6532 }
6533 }
6534 #ifdef SCTP_ASOCLOG_OF_TSNS
6535 {
6536 int index, newindex;
6537 struct sctp_pcbtsn_rlog *entry;
6538 do {
6539 index = inp->readlog_index;
6540 newindex = index + 1;
6541 if (newindex >= SCTP_READ_LOG_SIZE) {
6542 newindex = 0;
6543 }
6544 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6545 entry = &inp->readlog[index];
6546 entry->vtag = control->sinfo_assoc_id;
6547 entry->strm = control->sinfo_stream;
6548 entry->seq = (uint16_t)control->mid;
6549 entry->sz = control->length;
6550 entry->flgs = control->sinfo_flags;
6551 }
6552 #endif
6553 if ((fromlen > 0) && (from != NULL)) {
6554 union sctp_sockstore store;
6555 size_t len;
6556
6557 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6558 #ifdef INET6
6559 case AF_INET6:
6560 len = sizeof(struct sockaddr_in6);
6561 store.sin6 = control->whoFrom->ro._l_addr.sin6;
6562 store.sin6.sin6_port = control->port_from;
6563 break;
6564 #endif
6565 #ifdef INET
6566 case AF_INET:
6567 #ifdef INET6
6568 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6569 len = sizeof(struct sockaddr_in6);
6570 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6571 &store.sin6);
6572 store.sin6.sin6_port = control->port_from;
6573 } else {
6574 len = sizeof(struct sockaddr_in);
6575 store.sin = control->whoFrom->ro._l_addr.sin;
6576 store.sin.sin_port = control->port_from;
6577 }
6578 #else
6579 len = sizeof(struct sockaddr_in);
6580 store.sin = control->whoFrom->ro._l_addr.sin;
6581 store.sin.sin_port = control->port_from;
6582 #endif
6583 break;
6584 #endif
6585 #if defined(__Userspace__)
6586 case AF_CONN:
6587 len = sizeof(struct sockaddr_conn);
6588 store.sconn = control->whoFrom->ro._l_addr.sconn;
6589 store.sconn.sconn_port = control->port_from;
6590 break;
6591 #endif
6592 default:
6593 len = 0;
6594 break;
6595 }
6596 memcpy(from, &store, min((size_t)fromlen, len));
6597 #if defined(SCTP_EMBEDDED_V6_SCOPE)
6598 #ifdef INET6
6599 {
6600 struct sockaddr_in6 lsa6, *from6;
6601
6602 from6 = (struct sockaddr_in6 *)from;
6603 sctp_recover_scope_mac(from6, (&lsa6));
6604 }
6605 #endif
6606 #endif
6607 }
6608 if (hold_rlock) {
6609 SCTP_INP_READ_UNLOCK(inp);
6610 hold_rlock = 0;
6611 }
6612 if (hold_sblock) {
6613 SOCKBUF_UNLOCK(&so->so_rcv);
6614 hold_sblock = 0;
6615 }
6616 /* now copy out what data we can */
6617 if (mp == NULL) {
6618 /* copy out each mbuf in the chain up to length */
6619 get_more_data:
6620 m = control->data;
6621 while (m) {
6622 /* Move out all we can */
6623 #if defined(__APPLE__) && !defined(__Userspace__)
6624 #if defined(APPLE_LEOPARD)
6625 cp_len = uio->uio_resid;
6626 #else
6627 cp_len = uio_resid(uio);
6628 #endif
6629 #else
6630 cp_len = uio->uio_resid;
6631 #endif
6632 my_len = SCTP_BUF_LEN(m);
6633 if (cp_len > my_len) {
6634 /* not enough in this buf */
6635 cp_len = my_len;
6636 }
6637 if (hold_rlock) {
6638 SCTP_INP_READ_UNLOCK(inp);
6639 hold_rlock = 0;
6640 }
6641 #if defined(__APPLE__) && !defined(__Userspace__)
6642 SCTP_SOCKET_UNLOCK(so, 0);
6643 #endif
6644 if (cp_len > 0)
6645 error = uiomove(mtod(m, char *), (int)cp_len, uio);
6646 #if defined(__APPLE__) && !defined(__Userspace__)
6647 SCTP_SOCKET_LOCK(so, 0);
6648 #endif
6649 /* re-read */
6650 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6651 goto release;
6652 }
6653
6654 if ((control->do_not_ref_stcb == 0) && stcb &&
6655 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6656 no_rcv_needed = 1;
6657 }
6658 if (error) {
6659 /* error we are out of here */
6660 goto release;
6661 }
6662 SCTP_INP_READ_LOCK(inp);
6663 hold_rlock = 1;
6664 if (cp_len == SCTP_BUF_LEN(m)) {
6665 if ((SCTP_BUF_NEXT(m)== NULL) &&
6666 (control->end_added)) {
6667 out_flags |= MSG_EOR;
6668 if ((control->do_not_ref_stcb == 0) &&
6669 (control->stcb != NULL) &&
6670 ((control->spec_flags & M_NOTIFICATION) == 0))
6671 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6672 }
6673 if (control->spec_flags & M_NOTIFICATION) {
6674 out_flags |= MSG_NOTIFICATION;
6675 }
6676 /* we ate up the mbuf */
6677 if (in_flags & MSG_PEEK) {
6678 /* just looking */
6679 m = SCTP_BUF_NEXT(m);
6680 copied_so_far += cp_len;
6681 } else {
6682 /* dispose of the mbuf */
6683 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6684 sctp_sblog(&so->so_rcv,
6685 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6686 }
6687 sctp_sbfree(control, stcb, &so->so_rcv, m);
6688 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6689 sctp_sblog(&so->so_rcv,
6690 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6691 }
6692 copied_so_far += cp_len;
6693 freed_so_far += (uint32_t)cp_len;
6694 freed_so_far += MSIZE;
6695 atomic_subtract_int(&control->length, cp_len);
6696 control->data = sctp_m_free(m);
6697 m = control->data;
6698 /* been through it all, must hold sb lock ok to null tail */
6699 if (control->data == NULL) {
6700 #ifdef INVARIANTS
6701 #if defined(__FreeBSD__) && !defined(__Userspace__)
6702 if ((control->end_added == 0) ||
6703 (TAILQ_NEXT(control, next) == NULL)) {
6704 /* If the end is not added, OR the
6705 * next is NOT null we MUST have the lock.
6706 */
6707 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6708 panic("Hmm we don't own the lock?");
6709 }
6710 }
6711 #endif
6712 #endif
6713 control->tail_mbuf = NULL;
6714 #ifdef INVARIANTS
6715 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6716 panic("end_added, nothing left and no MSG_EOR");
6717 }
6718 #endif
6719 }
6720 }
6721 } else {
6722 /* Do we need to trim the mbuf? */
6723 if (control->spec_flags & M_NOTIFICATION) {
6724 out_flags |= MSG_NOTIFICATION;
6725 }
6726 if ((in_flags & MSG_PEEK) == 0) {
6727 SCTP_BUF_RESV_UF(m, cp_len);
6728 SCTP_BUF_LEN(m) -= (int)cp_len;
6729 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6730 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, (int)cp_len);
6731 }
6732 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6733 if ((control->do_not_ref_stcb == 0) &&
6734 stcb) {
6735 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6736 }
6737 copied_so_far += cp_len;
6738 freed_so_far += (uint32_t)cp_len;
6739 freed_so_far += MSIZE;
6740 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6741 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6742 SCTP_LOG_SBRESULT, 0);
6743 }
6744 atomic_subtract_int(&control->length, cp_len);
6745 } else {
6746 copied_so_far += cp_len;
6747 }
6748 }
6749 #if defined(__APPLE__) && !defined(__Userspace__)
6750 #if defined(APPLE_LEOPARD)
6751 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6752 #else
6753 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6754 #endif
6755 #else
6756 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6757 #endif
6758 break;
6759 }
6760 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6761 (control->do_not_ref_stcb == 0) &&
6762 (freed_so_far >= rwnd_req)) {
6763 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6764 }
6765 } /* end while(m) */
6766 /*
6767 * At this point we have looked at it all and we either have
6768 * a MSG_EOR/or read all the user wants... <OR>
6769 * control->length == 0.
6770 */
6771 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6772 /* we are done with this control */
6773 if (control->length == 0) {
6774 if (control->data) {
6775 #ifdef INVARIANTS
6776 panic("control->data not null at read eor?");
6777 #else
6778 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6779 sctp_m_freem(control->data);
6780 control->data = NULL;
6781 #endif
6782 }
6783 done_with_control:
6784 if (hold_rlock == 0) {
6785 SCTP_INP_READ_LOCK(inp);
6786 hold_rlock = 1;
6787 }
6788 TAILQ_REMOVE(&inp->read_queue, control, next);
6789 /* Add back any hiddend data */
6790 if (control->held_length) {
6791 held_length = 0;
6792 control->held_length = 0;
6793 wakeup_read_socket = 1;
6794 }
6795 if (control->aux_data) {
6796 sctp_m_free (control->aux_data);
6797 control->aux_data = NULL;
6798 }
6799 no_rcv_needed = control->do_not_ref_stcb;
6800 sctp_free_remote_addr(control->whoFrom);
6801 control->data = NULL;
6802 #ifdef INVARIANTS
6803 if (control->on_strm_q) {
6804 panic("About to free ctl:%p so:%p and its in %d",
6805 control, so, control->on_strm_q);
6806 }
6807 #endif
6808 sctp_free_a_readq(stcb, control);
6809 control = NULL;
6810 if ((freed_so_far >= rwnd_req) &&
6811 (no_rcv_needed == 0))
6812 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6813
6814 } else {
6815 /*
6816 * The user did not read all of this
6817 * message, turn off the returned MSG_EOR
6818 * since we are leaving more behind on the
6819 * control to read.
6820 */
6821 #ifdef INVARIANTS
6822 if (control->end_added &&
6823 (control->data == NULL) &&
6824 (control->tail_mbuf == NULL)) {
6825 panic("Gak, control->length is corrupt?");
6826 }
6827 #endif
6828 no_rcv_needed = control->do_not_ref_stcb;
6829 out_flags &= ~MSG_EOR;
6830 }
6831 }
6832 if (out_flags & MSG_EOR) {
6833 goto release;
6834 }
6835 #if defined(__APPLE__) && !defined(__Userspace__)
6836 #if defined(APPLE_LEOPARD)
6837 if ((uio->uio_resid == 0) ||
6838 #else
6839 if ((uio_resid(uio) == 0) ||
6840 #endif
6841 #else
6842 if ((uio->uio_resid == 0) ||
6843 #endif
6844 ((in_eeor_mode) &&
6845 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6846 goto release;
6847 }
6848 /*
6849 * If I hit here the receiver wants more and this message is
6850 * NOT done (pd-api). So two questions. Can we block? if not
6851 * we are done. Did the user NOT set MSG_WAITALL?
6852 */
6853 if (block_allowed == 0) {
6854 goto release;
6855 }
6856 /*
6857 * We need to wait for more data a few things: - We don't
6858 * sbunlock() so we don't get someone else reading. - We
6859 * must be sure to account for the case where what is added
6860 * is NOT to our control when we wakeup.
6861 */
6862
6863 /* Do we need to tell the transport a rwnd update might be
6864 * needed before we go to sleep?
6865 */
6866 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6867 ((freed_so_far >= rwnd_req) &&
6868 (control->do_not_ref_stcb == 0) &&
6869 (no_rcv_needed == 0))) {
6870 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6871 }
6872 wait_some_more:
6873 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
6874 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6875 goto release;
6876 }
6877 #else
6878 if (so->so_state & SS_CANTRCVMORE) {
6879 goto release;
6880 }
6881 #endif
6882
6883 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6884 goto release;
6885
6886 if (hold_rlock == 1) {
6887 SCTP_INP_READ_UNLOCK(inp);
6888 hold_rlock = 0;
6889 }
6890 if (hold_sblock == 0) {
6891 SOCKBUF_LOCK(&so->so_rcv);
6892 hold_sblock = 1;
6893 }
6894 if ((copied_so_far) && (control->length == 0) &&
6895 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6896 goto release;
6897 }
6898 #if defined(__APPLE__) && !defined(__Userspace__)
6899 sbunlock(&so->so_rcv, 1);
6900 #endif
6901 if (so->so_rcv.sb_cc <= control->held_length) {
6902 error = sbwait(&so->so_rcv);
6903 if (error) {
6904 #if defined(__FreeBSD__) && !defined(__Userspace__)
6905 goto release;
6906 #else
6907 goto release_unlocked;
6908 #endif
6909 }
6910 control->held_length = 0;
6911 }
6912 #if defined(__APPLE__) && !defined(__Userspace__)
6913 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6914 #endif
6915 if (hold_sblock) {
6916 SOCKBUF_UNLOCK(&so->so_rcv);
6917 hold_sblock = 0;
6918 }
6919 if (control->length == 0) {
6920 /* still nothing here */
6921 if (control->end_added == 1) {
6922 /* he aborted, or is done i.e.did a shutdown */
6923 out_flags |= MSG_EOR;
6924 if (control->pdapi_aborted) {
6925 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6926 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6927
6928 out_flags |= MSG_TRUNC;
6929 } else {
6930 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6931 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6932 }
6933 goto done_with_control;
6934 }
6935 if (so->so_rcv.sb_cc > held_length) {
6936 control->held_length = so->so_rcv.sb_cc;
6937 held_length = 0;
6938 }
6939 goto wait_some_more;
6940 } else if (control->data == NULL) {
6941 /* we must re-sync since data
6942 * is probably being added
6943 */
6944 SCTP_INP_READ_LOCK(inp);
6945 if ((control->length > 0) && (control->data == NULL)) {
6946 /* big trouble.. we have the lock and its corrupt? */
6947 #ifdef INVARIANTS
6948 panic ("Impossible data==NULL length !=0");
6949 #endif
6950 out_flags |= MSG_EOR;
6951 out_flags |= MSG_TRUNC;
6952 control->length = 0;
6953 SCTP_INP_READ_UNLOCK(inp);
6954 goto done_with_control;
6955 }
6956 SCTP_INP_READ_UNLOCK(inp);
6957 /* We will fall around to get more data */
6958 }
6959 goto get_more_data;
6960 } else {
6961 /*-
6962 * Give caller back the mbuf chain,
6963 * store in uio_resid the length
6964 */
6965 wakeup_read_socket = 0;
6966 if ((control->end_added == 0) ||
6967 (TAILQ_NEXT(control, next) == NULL)) {
6968 /* Need to get rlock */
6969 if (hold_rlock == 0) {
6970 SCTP_INP_READ_LOCK(inp);
6971 hold_rlock = 1;
6972 }
6973 }
6974 if (control->end_added) {
6975 out_flags |= MSG_EOR;
6976 if ((control->do_not_ref_stcb == 0) &&
6977 (control->stcb != NULL) &&
6978 ((control->spec_flags & M_NOTIFICATION) == 0))
6979 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6980 }
6981 if (control->spec_flags & M_NOTIFICATION) {
6982 out_flags |= MSG_NOTIFICATION;
6983 }
6984 #if defined(__APPLE__) && !defined(__Userspace__)
6985 #if defined(APPLE_LEOPARD)
6986 uio->uio_resid = control->length;
6987 #else
6988 uio_setresid(uio, control->length);
6989 #endif
6990 #else
6991 uio->uio_resid = control->length;
6992 #endif
6993 *mp = control->data;
6994 m = control->data;
6995 while (m) {
6996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6997 sctp_sblog(&so->so_rcv,
6998 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6999 }
7000 sctp_sbfree(control, stcb, &so->so_rcv, m);
7001 freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
7002 freed_so_far += MSIZE;
7003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
7004 sctp_sblog(&so->so_rcv,
7005 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
7006 }
7007 m = SCTP_BUF_NEXT(m);
7008 }
7009 control->data = control->tail_mbuf = NULL;
7010 control->length = 0;
7011 if (out_flags & MSG_EOR) {
7012 /* Done with this control */
7013 goto done_with_control;
7014 }
7015 }
7016 release:
7017 if (hold_rlock == 1) {
7018 SCTP_INP_READ_UNLOCK(inp);
7019 hold_rlock = 0;
7020 }
7021 #if defined(__Userspace__)
7022 if (hold_sblock == 0) {
7023 SOCKBUF_LOCK(&so->so_rcv);
7024 hold_sblock = 1;
7025 }
7026 #else
7027 if (hold_sblock == 1) {
7028 SOCKBUF_UNLOCK(&so->so_rcv);
7029 hold_sblock = 0;
7030 }
7031 #endif
7032 #if defined(__APPLE__) && !defined(__Userspace__)
7033 sbunlock(&so->so_rcv, 1);
7034 #endif
7035
7036 #if defined(__FreeBSD__) && !defined(__Userspace__)
7037 sbunlock(&so->so_rcv);
7038 sockbuf_lock = 0;
7039 #endif
7040
7041 release_unlocked:
7042 if (hold_sblock) {
7043 SOCKBUF_UNLOCK(&so->so_rcv);
7044 hold_sblock = 0;
7045 }
7046 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
7047 if ((freed_so_far >= rwnd_req) &&
7048 (control && (control->do_not_ref_stcb == 0)) &&
7049 (no_rcv_needed == 0))
7050 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
7051 }
7052 out:
7053 if (msg_flags) {
7054 *msg_flags = out_flags;
7055 }
7056 if (((out_flags & MSG_EOR) == 0) &&
7057 ((in_flags & MSG_PEEK) == 0) &&
7058 (sinfo) &&
7059 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
7060 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
7061 struct sctp_extrcvinfo *s_extra;
7062 s_extra = (struct sctp_extrcvinfo *)sinfo;
7063 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
7064 }
7065 if (hold_rlock == 1) {
7066 SCTP_INP_READ_UNLOCK(inp);
7067 }
7068 if (hold_sblock) {
7069 SOCKBUF_UNLOCK(&so->so_rcv);
7070 }
7071 #if defined(__FreeBSD__) && !defined(__Userspace__)
7072 if (sockbuf_lock) {
7073 sbunlock(&so->so_rcv);
7074 }
7075 #endif
7076
7077 if (freecnt_applied) {
7078 /*
7079 * The lock on the socket buffer protects us so the free
7080 * code will stop. But since we used the socketbuf lock and
7081 * the sender uses the tcb_lock to increment, we need to use
7082 * the atomic add to the refcnt.
7083 */
7084 if (stcb == NULL) {
7085 #ifdef INVARIANTS
7086 panic("stcb for refcnt has gone NULL?");
7087 goto stage_left;
7088 #else
7089 goto stage_left;
7090 #endif
7091 }
7092 /* Save the value back for next time */
7093 stcb->freed_by_sorcv_sincelast = freed_so_far;
7094 atomic_add_int(&stcb->asoc.refcnt, -1);
7095 }
7096 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
7097 if (stcb) {
7098 sctp_misc_ints(SCTP_SORECV_DONE,
7099 freed_so_far,
7100 #if defined(__APPLE__) && !defined(__Userspace__)
7101 #if defined(APPLE_LEOPARD)
7102 ((uio) ? (slen - uio->uio_resid) : slen),
7103 #else
7104 ((uio) ? (slen - uio_resid(uio)) : slen),
7105 #endif
7106 #else
7107 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
7108 #endif
7109 stcb->asoc.my_rwnd,
7110 so->so_rcv.sb_cc);
7111 } else {
7112 sctp_misc_ints(SCTP_SORECV_DONE,
7113 freed_so_far,
7114 #if defined(__APPLE__) && !defined(__Userspace__)
7115 #if defined(APPLE_LEOPARD)
7116 ((uio) ? (slen - uio->uio_resid) : slen),
7117 #else
7118 ((uio) ? (slen - uio_resid(uio)) : slen),
7119 #endif
7120 #else
7121 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
7122 #endif
7123 0,
7124 so->so_rcv.sb_cc);
7125 }
7126 }
7127 stage_left:
7128 if (wakeup_read_socket) {
7129 sctp_sorwakeup(inp, so);
7130 }
7131 return (error);
7132 }
7133
7134
7135 #ifdef SCTP_MBUF_LOGGING
7136 struct mbuf *
7137 sctp_m_free(struct mbuf *m)
7138 {
7139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7140 sctp_log_mb(m, SCTP_MBUF_IFREE);
7141 }
7142 return (m_free(m));
7143 }
7144
7145 void
7146 sctp_m_freem(struct mbuf *mb)
7147 {
7148 while (mb != NULL)
7149 mb = sctp_m_free(mb);
7150 }
7151
7152 #endif
7153
7154 int
7155 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
7156 {
7157 /* Given a local address. For all associations
7158 * that holds the address, request a peer-set-primary.
7159 */
7160 struct sctp_ifa *ifa;
7161 struct sctp_laddr *wi;
7162
7163 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
7164 if (ifa == NULL) {
7165 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
7166 return (EADDRNOTAVAIL);
7167 }
7168 /* Now that we have the ifa we must awaken the
7169 * iterator with this message.
7170 */
7171 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
7172 if (wi == NULL) {
7173 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
7174 return (ENOMEM);
7175 }
7176 /* Now incr the count and int wi structure */
7177 SCTP_INCR_LADDR_COUNT();
7178 memset(wi, 0, sizeof(*wi));
7179 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
7180 wi->ifa = ifa;
7181 wi->action = SCTP_SET_PRIM_ADDR;
7182 atomic_add_int(&ifa->refcount, 1);
7183
7184 /* Now add it to the work queue */
7185 SCTP_WQ_ADDR_LOCK();
7186 /*
7187 * Should this really be a tailq? As it is we will process the
7188 * newest first :-0
7189 */
7190 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
7191 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
7192 (struct sctp_inpcb *)NULL,
7193 (struct sctp_tcb *)NULL,
7194 (struct sctp_nets *)NULL);
7195 SCTP_WQ_ADDR_UNLOCK();
7196 return (0);
7197 }
7198
7199 #if defined(__Userspace__)
7200 /* no sctp_soreceive for __Userspace__ now */
7201 #endif
7202
7203 #if !defined(__Userspace__)
7204 int
7205 sctp_soreceive( struct socket *so,
7206 struct sockaddr **psa,
7207 struct uio *uio,
7208 struct mbuf **mp0,
7209 struct mbuf **controlp,
7210 int *flagsp)
7211 {
7212 int error, fromlen;
7213 uint8_t sockbuf[256];
7214 struct sockaddr *from;
7215 struct sctp_extrcvinfo sinfo;
7216 int filling_sinfo = 1;
7217 int flags;
7218 struct sctp_inpcb *inp;
7219
7220 inp = (struct sctp_inpcb *)so->so_pcb;
7221 /* pickup the assoc we are reading from */
7222 if (inp == NULL) {
7223 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7224 return (EINVAL);
7225 }
7226 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
7227 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
7228 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
7229 (controlp == NULL)) {
7230 /* user does not want the sndrcv ctl */
7231 filling_sinfo = 0;
7232 }
7233 if (psa) {
7234 from = (struct sockaddr *)sockbuf;
7235 fromlen = sizeof(sockbuf);
7236 #ifdef HAVE_SA_LEN
7237 from->sa_len = 0;
7238 #endif
7239 } else {
7240 from = NULL;
7241 fromlen = 0;
7242 }
7243
7244 #if defined(__APPLE__) && !defined(__Userspace__)
7245 SCTP_SOCKET_LOCK(so, 1);
7246 #endif
7247 if (filling_sinfo) {
7248 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
7249 }
7250 if (flagsp != NULL) {
7251 flags = *flagsp;
7252 } else {
7253 flags = 0;
7254 }
7255 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
7256 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
7257 if (flagsp != NULL) {
7258 *flagsp = flags;
7259 }
7260 if (controlp != NULL) {
7261 /* copy back the sinfo in a CMSG format */
7262 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
7263 *controlp = sctp_build_ctl_nchunk(inp,
7264 (struct sctp_sndrcvinfo *)&sinfo);
7265 } else {
7266 *controlp = NULL;
7267 }
7268 }
7269 if (psa) {
7270 /* copy back the address info */
7271 #ifdef HAVE_SA_LEN
7272 if (from && from->sa_len) {
7273 #else
7274 if (from) {
7275 #endif
7276 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
7277 *psa = sodupsockaddr(from, M_NOWAIT);
7278 #else
7279 *psa = dup_sockaddr(from, mp0 == 0);
7280 #endif
7281 } else {
7282 *psa = NULL;
7283 }
7284 }
7285 #if defined(__APPLE__) && !defined(__Userspace__)
7286 SCTP_SOCKET_UNLOCK(so, 1);
7287 #endif
7288 return (error);
7289 }
7290
7291
7292 #if defined(_WIN32) && !defined(__Userspace__)
7293 /*
7294 * General routine to allocate a hash table with control of memory flags.
7295 * is in 7.0 and beyond for sure :-)
7296 */
7297 void *
7298 sctp_hashinit_flags(int elements, struct malloc_type *type,
7299 u_long *hashmask, int flags)
7300 {
7301 long hashsize;
7302 LIST_HEAD(generic, generic) *hashtbl;
7303 int i;
7304
7305
7306 if (elements <= 0) {
7307 #ifdef INVARIANTS
7308 panic("hashinit: bad elements");
7309 #else
7310 SCTP_PRINTF("hashinit: bad elements?");
7311 elements = 1;
7312 #endif
7313 }
7314 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7315 continue;
7316 hashsize >>= 1;
7317 if (flags & HASH_WAITOK)
7318 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
7319 else if (flags & HASH_NOWAIT)
7320 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
7321 else {
7322 #ifdef INVARIANTS
7323 panic("flag incorrect in hashinit_flags");
7324 #else
7325 return (NULL);
7326 #endif
7327 }
7328
7329 /* no memory? */
7330 if (hashtbl == NULL)
7331 return (NULL);
7332
7333 for (i = 0; i < hashsize; i++)
7334 LIST_INIT(&hashtbl[i]);
7335 *hashmask = hashsize - 1;
7336 return (hashtbl);
7337 }
7338 #endif
7339
7340 #else /* __Userspace__ ifdef above sctp_soreceive */
7341 /*
7342 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
7343 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
7344 *__FreeBSD__ must be excluded.
7345 *
7346 */
7347
7348 void *
7349 sctp_hashinit_flags(int elements, struct malloc_type *type,
7350 u_long *hashmask, int flags)
7351 {
7352 long hashsize;
7353 LIST_HEAD(generic, generic) *hashtbl;
7354 int i;
7355
7356 if (elements <= 0) {
7357 SCTP_PRINTF("hashinit: bad elements?");
7358 #ifdef INVARIANTS
7359 return (NULL);
7360 #else
7361 elements = 1;
7362 #endif
7363 }
7364 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7365 continue;
7366 hashsize >>= 1;
7367 /*cannot use MALLOC here because it has to be declared or defined
7368 using MALLOC_DECLARE or MALLOC_DEFINE first. */
7369 if (flags & HASH_WAITOK)
7370 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7371 else if (flags & HASH_NOWAIT)
7372 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7373 else {
7374 #ifdef INVARIANTS
7375 SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
7376 #endif
7377 return (NULL);
7378 }
7379
7380 /* no memory? */
7381 if (hashtbl == NULL)
7382 return (NULL);
7383
7384 for (i = 0; i < hashsize; i++)
7385 LIST_INIT(&hashtbl[i]);
7386 *hashmask = hashsize - 1;
7387 return (hashtbl);
7388 }
7389
7390
7391 void
7392 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7393 {
7394 LIST_HEAD(generic, generic) *hashtbl, *hp;
7395
7396 hashtbl = vhashtbl;
7397 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7398 if (!LIST_EMPTY(hp)) {
7399 SCTP_PRINTF("hashdestroy: hash not empty.\n");
7400 return;
7401 }
7402 FREE(hashtbl, type);
7403 }
7404
7405
7406 void
7407 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7408 {
7409 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
7410 /*
7411 LIST_ENTRY(type) *start, *temp;
7412 */
7413 hashtbl = vhashtbl;
7414 /* Apparently temp is not dynamically allocated, so attempts to
7415 free it results in error.
7416 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7417 if (!LIST_EMPTY(hp)) {
7418 start = LIST_FIRST(hp);
7419 while (start != NULL) {
7420 temp = start;
7421 start = start->le_next;
7422 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
7423 FREE(temp, type);
7424 }
7425 }
7426 */
7427 FREE(hashtbl, type);
7428 }
7429
7430
7431 #endif
7432
7433
7434 int
7435 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7436 int totaddr, int *error)
7437 {
7438 int added = 0;
7439 int i;
7440 struct sctp_inpcb *inp;
7441 struct sockaddr *sa;
7442 size_t incr = 0;
7443 #ifdef INET
7444 struct sockaddr_in *sin;
7445 #endif
7446 #ifdef INET6
7447 struct sockaddr_in6 *sin6;
7448 #endif
7449
7450 sa = addr;
7451 inp = stcb->sctp_ep;
7452 *error = 0;
7453 for (i = 0; i < totaddr; i++) {
7454 switch (sa->sa_family) {
7455 #ifdef INET
7456 case AF_INET:
7457 incr = sizeof(struct sockaddr_in);
7458 sin = (struct sockaddr_in *)sa;
7459 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7460 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7461 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7462 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7463 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7464 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
7465 *error = EINVAL;
7466 goto out_now;
7467 }
7468 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7469 SCTP_DONOT_SETSCOPE,
7470 SCTP_ADDR_IS_CONFIRMED)) {
7471 /* assoc gone no un-lock */
7472 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7473 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7474 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
7475 *error = ENOBUFS;
7476 goto out_now;
7477 }
7478 added++;
7479 break;
7480 #endif
7481 #ifdef INET6
7482 case AF_INET6:
7483 incr = sizeof(struct sockaddr_in6);
7484 sin6 = (struct sockaddr_in6 *)sa;
7485 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7486 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7487 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7488 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7489 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
7490 *error = EINVAL;
7491 goto out_now;
7492 }
7493 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7494 SCTP_DONOT_SETSCOPE,
7495 SCTP_ADDR_IS_CONFIRMED)) {
7496 /* assoc gone no un-lock */
7497 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7498 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7499 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
7500 *error = ENOBUFS;
7501 goto out_now;
7502 }
7503 added++;
7504 break;
7505 #endif
7506 #if defined(__Userspace__)
7507 case AF_CONN:
7508 incr = sizeof(struct sockaddr_conn);
7509 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7510 SCTP_DONOT_SETSCOPE,
7511 SCTP_ADDR_IS_CONFIRMED)) {
7512 /* assoc gone no un-lock */
7513 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7514 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7515 SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
7516 *error = ENOBUFS;
7517 goto out_now;
7518 }
7519 added++;
7520 break;
7521 #endif
7522 default:
7523 break;
7524 }
7525 sa = (struct sockaddr *)((caddr_t)sa + incr);
7526 }
7527 out_now:
7528 return (added);
7529 }
7530
7531 int
7532 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7533 unsigned int totaddr,
7534 unsigned int *num_v4, unsigned int *num_v6,
7535 unsigned int limit)
7536 {
7537 struct sockaddr *sa;
7538 struct sctp_tcb *stcb;
7539 unsigned int incr, at, i;
7540
7541 at = 0;
7542 sa = addr;
7543 *num_v6 = *num_v4 = 0;
7544 /* account and validate addresses */
7545 if (totaddr == 0) {
7546 return (EINVAL);
7547 }
7548 for (i = 0; i < totaddr; i++) {
7549 if (at + sizeof(struct sockaddr) > limit) {
7550 return (EINVAL);
7551 }
7552 switch (sa->sa_family) {
7553 #ifdef INET
7554 case AF_INET:
7555 incr = (unsigned int)sizeof(struct sockaddr_in);
7556 #ifdef HAVE_SA_LEN
7557 if (sa->sa_len != incr) {
7558 return (EINVAL);
7559 }
7560 #endif
7561 (*num_v4) += 1;
7562 break;
7563 #endif
7564 #ifdef INET6
7565 case AF_INET6:
7566 {
7567 struct sockaddr_in6 *sin6;
7568
7569 sin6 = (struct sockaddr_in6 *)sa;
7570 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7571 /* Must be non-mapped for connectx */
7572 return (EINVAL);
7573 }
7574 incr = (unsigned int)sizeof(struct sockaddr_in6);
7575 #ifdef HAVE_SA_LEN
7576 if (sa->sa_len != incr) {
7577 return (EINVAL);
7578 }
7579 #endif
7580 (*num_v6) += 1;
7581 break;
7582 }
7583 #endif
7584 default:
7585 return (EINVAL);
7586 }
7587 if ((at + incr) > limit) {
7588 return (EINVAL);
7589 }
7590 SCTP_INP_INCR_REF(inp);
7591 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7592 if (stcb != NULL) {
7593 SCTP_TCB_UNLOCK(stcb);
7594 return (EALREADY);
7595 } else {
7596 SCTP_INP_DECR_REF(inp);
7597 }
7598 at += incr;
7599 sa = (struct sockaddr *)((caddr_t)sa + incr);
7600 }
7601 return (0);
7602 }
7603
7604 /*
7605 * sctp_bindx(ADD) for one address.
7606 * assumes all arguments are valid/checked by caller.
7607 */
7608 void
7609 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7610 struct sockaddr *sa, uint32_t vrf_id, int *error,
7611 void *p)
7612 {
7613 #if defined(INET) && defined(INET6)
7614 struct sockaddr_in sin;
7615 #endif
7616 #ifdef INET6
7617 struct sockaddr_in6 *sin6;
7618 #endif
7619 #ifdef INET
7620 struct sockaddr_in *sinp;
7621 #endif
7622 struct sockaddr *addr_to_use;
7623 struct sctp_inpcb *lep;
7624 #ifdef SCTP_MVRF
7625 int i;
7626 #endif
7627 uint16_t port;
7628
7629 /* see if we're bound all already! */
7630 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7631 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7632 *error = EINVAL;
7633 return;
7634 }
7635 #ifdef SCTP_MVRF
7636 /* Is the VRF one we have */
7637 for (i = 0; i < inp->num_vrfs; i++) {
7638 if (vrf_id == inp->m_vrf_ids[i]) {
7639 break;
7640 }
7641 }
7642 if (i == inp->num_vrfs) {
7643 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7644 *error = EINVAL;
7645 return;
7646 }
7647 #endif
7648 switch (sa->sa_family) {
7649 #ifdef INET6
7650 case AF_INET6:
7651 #ifdef HAVE_SA_LEN
7652 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7653 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7654 *error = EINVAL;
7655 return;
7656 }
7657 #endif
7658 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7659 /* can only bind v6 on PF_INET6 sockets */
7660 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7661 *error = EINVAL;
7662 return;
7663 }
7664 sin6 = (struct sockaddr_in6 *)sa;
7665 port = sin6->sin6_port;
7666 #ifdef INET
7667 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7668 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7669 SCTP_IPV6_V6ONLY(inp)) {
7670 /* can't bind v4-mapped on PF_INET sockets */
7671 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7672 *error = EINVAL;
7673 return;
7674 }
7675 in6_sin6_2_sin(&sin, sin6);
7676 addr_to_use = (struct sockaddr *)&sin;
7677 } else {
7678 addr_to_use = sa;
7679 }
7680 #else
7681 addr_to_use = sa;
7682 #endif
7683 break;
7684 #endif
7685 #ifdef INET
7686 case AF_INET:
7687 #ifdef HAVE_SA_LEN
7688 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7690 *error = EINVAL;
7691 return;
7692 }
7693 #endif
7694 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7695 SCTP_IPV6_V6ONLY(inp)) {
7696 /* can't bind v4 on PF_INET sockets */
7697 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7698 *error = EINVAL;
7699 return;
7700 }
7701 sinp = (struct sockaddr_in *)sa;
7702 port = sinp->sin_port;
7703 addr_to_use = sa;
7704 break;
7705 #endif
7706 default:
7707 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7708 *error = EINVAL;
7709 return;
7710 }
7711 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7712 #if !(defined(_WIN32) || defined(__Userspace__))
7713 if (p == NULL) {
7714 /* Can't get proc for Net/Open BSD */
7715 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7716 *error = EINVAL;
7717 return;
7718 }
7719 #endif
7720 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p);
7721 return;
7722 }
7723 /* Validate the incoming port. */
7724 if ((port != 0) && (port != inp->sctp_lport)) {
7725 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7726 *error = EINVAL;
7727 return;
7728 }
7729 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id);
7730 if (lep == NULL) {
7731 /* add the address */
7732 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use,
7733 SCTP_ADD_IP_ADDRESS, vrf_id);
7734 } else {
7735 if (lep != inp) {
7736 *error = EADDRINUSE;
7737 }
7738 SCTP_INP_DECR_REF(lep);
7739 }
7740 }
7741
7742 /*
7743 * sctp_bindx(DELETE) for one address.
7744 * assumes all arguments are valid/checked by caller.
7745 */
7746 void
7747 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7748 struct sockaddr *sa, uint32_t vrf_id, int *error)
7749 {
7750 struct sockaddr *addr_to_use;
7751 #if defined(INET) && defined(INET6)
7752 struct sockaddr_in6 *sin6;
7753 struct sockaddr_in sin;
7754 #endif
7755 #ifdef SCTP_MVRF
7756 int i;
7757 #endif
7758
7759 /* see if we're bound all already! */
7760 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7762 *error = EINVAL;
7763 return;
7764 }
7765 #ifdef SCTP_MVRF
7766 /* Is the VRF one we have */
7767 for (i = 0; i < inp->num_vrfs; i++) {
7768 if (vrf_id == inp->m_vrf_ids[i]) {
7769 break;
7770 }
7771 }
7772 if (i == inp->num_vrfs) {
7773 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7774 *error = EINVAL;
7775 return;
7776 }
7777 #endif
7778 switch (sa->sa_family) {
7779 #ifdef INET6
7780 case AF_INET6:
7781 #ifdef HAVE_SA_LEN
7782 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7783 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7784 *error = EINVAL;
7785 return;
7786 }
7787 #endif
7788 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7789 /* can only bind v6 on PF_INET6 sockets */
7790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7791 *error = EINVAL;
7792 return;
7793 }
7794 #ifdef INET
7795 sin6 = (struct sockaddr_in6 *)sa;
7796 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7797 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7798 SCTP_IPV6_V6ONLY(inp)) {
7799 /* can't bind mapped-v4 on PF_INET sockets */
7800 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7801 *error = EINVAL;
7802 return;
7803 }
7804 in6_sin6_2_sin(&sin, sin6);
7805 addr_to_use = (struct sockaddr *)&sin;
7806 } else {
7807 addr_to_use = sa;
7808 }
7809 #else
7810 addr_to_use = sa;
7811 #endif
7812 break;
7813 #endif
7814 #ifdef INET
7815 case AF_INET:
7816 #ifdef HAVE_SA_LEN
7817 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7818 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7819 *error = EINVAL;
7820 return;
7821 }
7822 #endif
7823 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7824 SCTP_IPV6_V6ONLY(inp)) {
7825 /* can't bind v4 on PF_INET sockets */
7826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7827 *error = EINVAL;
7828 return;
7829 }
7830 addr_to_use = sa;
7831 break;
7832 #endif
7833 default:
7834 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7835 *error = EINVAL;
7836 return;
7837 }
7838 /* No lock required mgmt_ep_sa does its own locking. */
7839 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS,
7840 vrf_id);
7841 }
7842
7843 /*
7844 * returns the valid local address count for an assoc, taking into account
7845 * all scoping rules
7846 */
7847 int
7848 sctp_local_addr_count(struct sctp_tcb *stcb)
7849 {
7850 int loopback_scope;
7851 #if defined(INET)
7852 int ipv4_local_scope, ipv4_addr_legal;
7853 #endif
7854 #if defined(INET6)
7855 int local_scope, site_scope, ipv6_addr_legal;
7856 #endif
7857 #if defined(__Userspace__)
7858 int conn_addr_legal;
7859 #endif
7860 struct sctp_vrf *vrf;
7861 struct sctp_ifn *sctp_ifn;
7862 struct sctp_ifa *sctp_ifa;
7863 int count = 0;
7864
7865 /* Turn on all the appropriate scopes */
7866 loopback_scope = stcb->asoc.scope.loopback_scope;
7867 #if defined(INET)
7868 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7869 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7870 #endif
7871 #if defined(INET6)
7872 local_scope = stcb->asoc.scope.local_scope;
7873 site_scope = stcb->asoc.scope.site_scope;
7874 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7875 #endif
7876 #if defined(__Userspace__)
7877 conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7878 #endif
7879 SCTP_IPI_ADDR_RLOCK();
7880 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7881 if (vrf == NULL) {
7882 /* no vrf, no addresses */
7883 SCTP_IPI_ADDR_RUNLOCK();
7884 return (0);
7885 }
7886
7887 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7888 /*
7889 * bound all case: go through all ifns on the vrf
7890 */
7891 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7892 if ((loopback_scope == 0) &&
7893 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7894 continue;
7895 }
7896 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7897 if (sctp_is_addr_restricted(stcb, sctp_ifa))
7898 continue;
7899 switch (sctp_ifa->address.sa.sa_family) {
7900 #ifdef INET
7901 case AF_INET:
7902 if (ipv4_addr_legal) {
7903 struct sockaddr_in *sin;
7904
7905 sin = &sctp_ifa->address.sin;
7906 if (sin->sin_addr.s_addr == 0) {
7907 /* skip unspecified addrs */
7908 continue;
7909 }
7910 #if defined(__FreeBSD__) && !defined(__Userspace__)
7911 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7912 &sin->sin_addr) != 0) {
7913 continue;
7914 }
7915 #endif
7916 if ((ipv4_local_scope == 0) &&
7917 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7918 continue;
7919 }
7920 /* count this one */
7921 count++;
7922 } else {
7923 continue;
7924 }
7925 break;
7926 #endif
7927 #ifdef INET6
7928 case AF_INET6:
7929 if (ipv6_addr_legal) {
7930 struct sockaddr_in6 *sin6;
7931
7932 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7933 struct sockaddr_in6 lsa6;
7934 #endif
7935 sin6 = &sctp_ifa->address.sin6;
7936 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7937 continue;
7938 }
7939 #if defined(__FreeBSD__) && !defined(__Userspace__)
7940 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7941 &sin6->sin6_addr) != 0) {
7942 continue;
7943 }
7944 #endif
7945 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7946 if (local_scope == 0)
7947 continue;
7948 #if defined(SCTP_EMBEDDED_V6_SCOPE)
7949 if (sin6->sin6_scope_id == 0) {
7950 #ifdef SCTP_KAME
7951 if (sa6_recoverscope(sin6) != 0)
7952 /*
7953 * bad link
7954 * local
7955 * address
7956 */
7957 continue;
7958 #else
7959 lsa6 = *sin6;
7960 if (in6_recoverscope(&lsa6,
7961 &lsa6.sin6_addr,
7962 NULL))
7963 /*
7964 * bad link
7965 * local
7966 * address
7967 */
7968 continue;
7969 sin6 = &lsa6;
7970 #endif /* SCTP_KAME */
7971 }
7972 #endif /* SCTP_EMBEDDED_V6_SCOPE */
7973 }
7974 if ((site_scope == 0) &&
7975 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7976 continue;
7977 }
7978 /* count this one */
7979 count++;
7980 }
7981 break;
7982 #endif
7983 #if defined(__Userspace__)
7984 case AF_CONN:
7985 if (conn_addr_legal) {
7986 count++;
7987 }
7988 break;
7989 #endif
7990 default:
7991 /* TSNH */
7992 break;
7993 }
7994 }
7995 }
7996 } else {
7997 /*
7998 * subset bound case
7999 */
8000 struct sctp_laddr *laddr;
8001 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
8002 sctp_nxt_addr) {
8003 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
8004 continue;
8005 }
8006 /* count this one */
8007 count++;
8008 }
8009 }
8010 SCTP_IPI_ADDR_RUNLOCK();
8011 return (count);
8012 }
8013
8014 #if defined(SCTP_LOCAL_TRACE_BUF)
8015
8016 void
8017 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
8018 {
8019 uint32_t saveindex, newindex;
8020
8021 #if defined(_WIN32) && !defined(__Userspace__)
8022 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
8023 return;
8024 }
8025 do {
8026 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
8027 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8028 newindex = 1;
8029 } else {
8030 newindex = saveindex + 1;
8031 }
8032 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
8033 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8034 saveindex = 0;
8035 }
8036 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
8037 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
8038 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
8039 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
8040 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
8041 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
8042 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
8043 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
8044 #else
8045 do {
8046 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
8047 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8048 newindex = 1;
8049 } else {
8050 newindex = saveindex + 1;
8051 }
8052 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
8053 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8054 saveindex = 0;
8055 }
8056 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
8057 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
8058 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
8059 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
8060 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
8061 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
8062 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
8063 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
8064 #endif
8065 }
8066
8067 #endif
8068 #if defined(__FreeBSD__) && !defined(__Userspace__)
8069 static void
8070 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
8071 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
8072 {
8073 struct ip *iph;
8074 #ifdef INET6
8075 struct ip6_hdr *ip6;
8076 #endif
8077 struct mbuf *sp, *last;
8078 struct udphdr *uhdr;
8079 uint16_t port;
8080
8081 if ((m->m_flags & M_PKTHDR) == 0) {
8082 /* Can't handle one that is not a pkt hdr */
8083 goto out;
8084 }
8085 /* Pull the src port */
8086 iph = mtod(m, struct ip *);
8087 uhdr = (struct udphdr *)((caddr_t)iph + off);
8088 port = uhdr->uh_sport;
8089 /* Split out the mbuf chain. Leave the
8090 * IP header in m, place the
8091 * rest in the sp.
8092 */
8093 sp = m_split(m, off, M_NOWAIT);
8094 if (sp == NULL) {
8095 /* Gak, drop packet, we can't do a split */
8096 goto out;
8097 }
8098 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
8099 /* Gak, packet can't have an SCTP header in it - too small */
8100 m_freem(sp);
8101 goto out;
8102 }
8103 /* Now pull up the UDP header and SCTP header together */
8104 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
8105 if (sp == NULL) {
8106 /* Gak pullup failed */
8107 goto out;
8108 }
8109 /* Trim out the UDP header */
8110 m_adj(sp, sizeof(struct udphdr));
8111
8112 /* Now reconstruct the mbuf chain */
8113 for (last = m; last->m_next; last = last->m_next);
8114 last->m_next = sp;
8115 m->m_pkthdr.len += sp->m_pkthdr.len;
8116 /*
8117 * The CSUM_DATA_VALID flags indicates that the HW checked the
8118 * UDP checksum and it was valid.
8119 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
8120 * the HW also verified the SCTP checksum. Therefore, clear the bit.
8121 */
8122 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
8123 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
8124 m->m_pkthdr.len,
8125 if_name(m->m_pkthdr.rcvif),
8126 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
8127 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
8128 iph = mtod(m, struct ip *);
8129 switch (iph->ip_v) {
8130 #ifdef INET
8131 case IPVERSION:
8132 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
8133 sctp_input_with_port(m, off, port);
8134 break;
8135 #endif
8136 #ifdef INET6
8137 case IPV6_VERSION >> 4:
8138 ip6 = mtod(m, struct ip6_hdr *);
8139 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
8140 sctp6_input_with_port(&m, &off, port);
8141 break;
8142 #endif
8143 default:
8144 goto out;
8145 break;
8146 }
8147 return;
8148 out:
8149 m_freem(m);
8150 }
8151
8152 #ifdef INET
8153 static void
8154 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
8155 {
8156 struct ip *outer_ip, *inner_ip;
8157 struct sctphdr *sh;
8158 struct icmp *icmp;
8159 struct udphdr *udp;
8160 struct sctp_inpcb *inp;
8161 struct sctp_tcb *stcb;
8162 struct sctp_nets *net;
8163 struct sctp_init_chunk *ch;
8164 struct sockaddr_in src, dst;
8165 uint8_t type, code;
8166
8167 inner_ip = (struct ip *)vip;
8168 icmp = (struct icmp *)((caddr_t)inner_ip -
8169 (sizeof(struct icmp) - sizeof(struct ip)));
8170 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
8171 if (ntohs(outer_ip->ip_len) <
8172 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
8173 return;
8174 }
8175 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
8176 sh = (struct sctphdr *)(udp + 1);
8177 memset(&src, 0, sizeof(struct sockaddr_in));
8178 src.sin_family = AF_INET;
8179 #ifdef HAVE_SIN_LEN
8180 src.sin_len = sizeof(struct sockaddr_in);
8181 #endif
8182 src.sin_port = sh->src_port;
8183 src.sin_addr = inner_ip->ip_src;
8184 memset(&dst, 0, sizeof(struct sockaddr_in));
8185 dst.sin_family = AF_INET;
8186 #ifdef HAVE_SIN_LEN
8187 dst.sin_len = sizeof(struct sockaddr_in);
8188 #endif
8189 dst.sin_port = sh->dest_port;
8190 dst.sin_addr = inner_ip->ip_dst;
8191 /*
8192 * 'dst' holds the dest of the packet that failed to be sent.
8193 * 'src' holds our local endpoint address. Thus we reverse
8194 * the dst and the src in the lookup.
8195 */
8196 inp = NULL;
8197 net = NULL;
8198 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
8199 (struct sockaddr *)&src,
8200 &inp, &net, 1,
8201 SCTP_DEFAULT_VRFID);
8202 if ((stcb != NULL) &&
8203 (net != NULL) &&
8204 (inp != NULL)) {
8205 /* Check the UDP port numbers */
8206 if ((udp->uh_dport != net->port) ||
8207 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
8208 SCTP_TCB_UNLOCK(stcb);
8209 return;
8210 }
8211 /* Check the verification tag */
8212 if (ntohl(sh->v_tag) != 0) {
8213 /*
8214 * This must be the verification tag used
8215 * for sending out packets. We don't
8216 * consider packets reflecting the
8217 * verification tag.
8218 */
8219 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
8220 SCTP_TCB_UNLOCK(stcb);
8221 return;
8222 }
8223 } else {
8224 if (ntohs(outer_ip->ip_len) >=
8225 sizeof(struct ip) +
8226 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
8227 /*
8228 * In this case we can check if we
8229 * got an INIT chunk and if the
8230 * initiate tag matches.
8231 */
8232 ch = (struct sctp_init_chunk *)(sh + 1);
8233 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
8234 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
8235 SCTP_TCB_UNLOCK(stcb);
8236 return;
8237 }
8238 } else {
8239 SCTP_TCB_UNLOCK(stcb);
8240 return;
8241 }
8242 }
8243 type = icmp->icmp_type;
8244 code = icmp->icmp_code;
8245 if ((type == ICMP_UNREACH) &&
8246 (code == ICMP_UNREACH_PORT)) {
8247 code = ICMP_UNREACH_PROTOCOL;
8248 }
8249 sctp_notify(inp, stcb, net, type, code,
8250 ntohs(inner_ip->ip_len),
8251 (uint32_t)ntohs(icmp->icmp_nextmtu));
8252 #if defined(__Userspace__)
8253 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
8254 (stcb->sctp_socket != NULL)) {
8255 struct socket *upcall_socket;
8256
8257 upcall_socket = stcb->sctp_socket;
8258 SOCK_LOCK(upcall_socket);
8259 soref(upcall_socket);
8260 SOCK_UNLOCK(upcall_socket);
8261 if ((upcall_socket->so_upcall != NULL) &&
8262 (upcall_socket->so_error != 0)) {
8263 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
8264 }
8265 ACCEPT_LOCK();
8266 SOCK_LOCK(upcall_socket);
8267 sorele(upcall_socket);
8268 }
8269 #endif
8270 } else {
8271 if ((stcb == NULL) && (inp != NULL)) {
8272 /* reduce ref-count */
8273 SCTP_INP_WLOCK(inp);
8274 SCTP_INP_DECR_REF(inp);
8275 SCTP_INP_WUNLOCK(inp);
8276 }
8277 if (stcb) {
8278 SCTP_TCB_UNLOCK(stcb);
8279 }
8280 }
8281 return;
8282 }
8283 #endif
8284
8285 #ifdef INET6
8286 static void
8287 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
8288 {
8289 struct ip6ctlparam *ip6cp;
8290 struct sctp_inpcb *inp;
8291 struct sctp_tcb *stcb;
8292 struct sctp_nets *net;
8293 struct sctphdr sh;
8294 struct udphdr udp;
8295 struct sockaddr_in6 src, dst;
8296 uint8_t type, code;
8297
8298 ip6cp = (struct ip6ctlparam *)d;
8299 /*
8300 * XXX: We assume that when IPV6 is non NULL, M and OFF are
8301 * valid.
8302 */
8303 if (ip6cp->ip6c_m == NULL) {
8304 return;
8305 }
8306 /* Check if we can safely examine the ports and the
8307 * verification tag of the SCTP common header.
8308 */
8309 if (ip6cp->ip6c_m->m_pkthdr.len <
8310 ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
8311 return;
8312 }
8313 /* Copy out the UDP header. */
8314 memset(&udp, 0, sizeof(struct udphdr));
8315 m_copydata(ip6cp->ip6c_m,
8316 ip6cp->ip6c_off,
8317 sizeof(struct udphdr),
8318 (caddr_t)&udp);
8319 /* Copy out the port numbers and the verification tag. */
8320 memset(&sh, 0, sizeof(struct sctphdr));
8321 m_copydata(ip6cp->ip6c_m,
8322 ip6cp->ip6c_off + sizeof(struct udphdr),
8323 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
8324 (caddr_t)&sh);
8325 memset(&src, 0, sizeof(struct sockaddr_in6));
8326 src.sin6_family = AF_INET6;
8327 #ifdef HAVE_SIN6_LEN
8328 src.sin6_len = sizeof(struct sockaddr_in6);
8329 #endif
8330 src.sin6_port = sh.src_port;
8331 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
8332 #if defined(__FreeBSD__) && !defined(__Userspace__)
8333 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
8334 return;
8335 }
8336 #endif
8337 memset(&dst, 0, sizeof(struct sockaddr_in6));
8338 dst.sin6_family = AF_INET6;
8339 #ifdef HAVE_SIN6_LEN
8340 dst.sin6_len = sizeof(struct sockaddr_in6);
8341 #endif
8342 dst.sin6_port = sh.dest_port;
8343 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
8344 #if defined(__FreeBSD__) && !defined(__Userspace__)
8345 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
8346 return;
8347 }
8348 #endif
8349 inp = NULL;
8350 net = NULL;
8351 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
8352 (struct sockaddr *)&src,
8353 &inp, &net, 1, SCTP_DEFAULT_VRFID);
8354 if ((stcb != NULL) &&
8355 (net != NULL) &&
8356 (inp != NULL)) {
8357 /* Check the UDP port numbers */
8358 if ((udp.uh_dport != net->port) ||
8359 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
8360 SCTP_TCB_UNLOCK(stcb);
8361 return;
8362 }
8363 /* Check the verification tag */
8364 if (ntohl(sh.v_tag) != 0) {
8365 /*
8366 * This must be the verification tag used for
8367 * sending out packets. We don't consider
8368 * packets reflecting the verification tag.
8369 */
8370 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
8371 SCTP_TCB_UNLOCK(stcb);
8372 return;
8373 }
8374 } else {
8375 #if defined(__FreeBSD__) && !defined(__Userspace__)
8376 if (ip6cp->ip6c_m->m_pkthdr.len >=
8377 ip6cp->ip6c_off + sizeof(struct udphdr) +
8378 sizeof(struct sctphdr) +
8379 sizeof(struct sctp_chunkhdr) +
8380 offsetof(struct sctp_init, a_rwnd)) {
8381 /*
8382 * In this case we can check if we
8383 * got an INIT chunk and if the
8384 * initiate tag matches.
8385 */
8386 uint32_t initiate_tag;
8387 uint8_t chunk_type;
8388
8389 m_copydata(ip6cp->ip6c_m,
8390 ip6cp->ip6c_off +
8391 sizeof(struct udphdr) +
8392 sizeof(struct sctphdr),
8393 sizeof(uint8_t),
8394 (caddr_t)&chunk_type);
8395 m_copydata(ip6cp->ip6c_m,
8396 ip6cp->ip6c_off +
8397 sizeof(struct udphdr) +
8398 sizeof(struct sctphdr) +
8399 sizeof(struct sctp_chunkhdr),
8400 sizeof(uint32_t),
8401 (caddr_t)&initiate_tag);
8402 if ((chunk_type != SCTP_INITIATION) ||
8403 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
8404 SCTP_TCB_UNLOCK(stcb);
8405 return;
8406 }
8407 } else {
8408 SCTP_TCB_UNLOCK(stcb);
8409 return;
8410 }
8411 #else
8412 SCTP_TCB_UNLOCK(stcb);
8413 return;
8414 #endif
8415 }
8416 type = ip6cp->ip6c_icmp6->icmp6_type;
8417 code = ip6cp->ip6c_icmp6->icmp6_code;
8418 if ((type == ICMP6_DST_UNREACH) &&
8419 (code == ICMP6_DST_UNREACH_NOPORT)) {
8420 type = ICMP6_PARAM_PROB;
8421 code = ICMP6_PARAMPROB_NEXTHEADER;
8422 }
8423 sctp6_notify(inp, stcb, net, type, code,
8424 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
8425 #if defined(__Userspace__)
8426 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
8427 (stcb->sctp_socket != NULL)) {
8428 struct socket *upcall_socket;
8429
8430 upcall_socket = stcb->sctp_socket;
8431 SOCK_LOCK(upcall_socket);
8432 soref(upcall_socket);
8433 SOCK_UNLOCK(upcall_socket);
8434 if ((upcall_socket->so_upcall != NULL) &&
8435 (upcall_socket->so_error != 0)) {
8436 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
8437 }
8438 ACCEPT_LOCK();
8439 SOCK_LOCK(upcall_socket);
8440 sorele(upcall_socket);
8441 }
8442 #endif
8443 } else {
8444 if ((stcb == NULL) && (inp != NULL)) {
8445 /* reduce inp's ref-count */
8446 SCTP_INP_WLOCK(inp);
8447 SCTP_INP_DECR_REF(inp);
8448 SCTP_INP_WUNLOCK(inp);
8449 }
8450 if (stcb) {
8451 SCTP_TCB_UNLOCK(stcb);
8452 }
8453 }
8454 }
8455 #endif
8456
8457 void
8458 sctp_over_udp_stop(void)
8459 {
8460 /*
8461 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8462 */
8463 #ifdef INET
8464 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8465 soclose(SCTP_BASE_INFO(udp4_tun_socket));
8466 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
8467 }
8468 #endif
8469 #ifdef INET6
8470 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8471 soclose(SCTP_BASE_INFO(udp6_tun_socket));
8472 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
8473 }
8474 #endif
8475 }
8476
8477 int
8478 sctp_over_udp_start(void)
8479 {
8480 uint16_t port;
8481 int ret;
8482 #ifdef INET
8483 struct sockaddr_in sin;
8484 #endif
8485 #ifdef INET6
8486 struct sockaddr_in6 sin6;
8487 #endif
8488 /*
8489 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8490 */
8491 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
8492 if (ntohs(port) == 0) {
8493 /* Must have a port set */
8494 return (EINVAL);
8495 }
8496 #ifdef INET
8497 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8498 /* Already running -- must stop first */
8499 return (EALREADY);
8500 }
8501 #endif
8502 #ifdef INET6
8503 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8504 /* Already running -- must stop first */
8505 return (EALREADY);
8506 }
8507 #endif
8508 #ifdef INET
8509 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
8510 SOCK_DGRAM, IPPROTO_UDP,
8511 curthread->td_ucred, curthread))) {
8512 sctp_over_udp_stop();
8513 return (ret);
8514 }
8515 /* Call the special UDP hook. */
8516 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
8517 sctp_recv_udp_tunneled_packet,
8518 sctp_recv_icmp_tunneled_packet,
8519 NULL))) {
8520 sctp_over_udp_stop();
8521 return (ret);
8522 }
8523 /* Ok, we have a socket, bind it to the port. */
8524 memset(&sin, 0, sizeof(struct sockaddr_in));
8525 sin.sin_len = sizeof(struct sockaddr_in);
8526 sin.sin_family = AF_INET;
8527 sin.sin_port = htons(port);
8528 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
8529 (struct sockaddr *)&sin, curthread))) {
8530 sctp_over_udp_stop();
8531 return (ret);
8532 }
8533 #endif
8534 #ifdef INET6
8535 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
8536 SOCK_DGRAM, IPPROTO_UDP,
8537 curthread->td_ucred, curthread))) {
8538 sctp_over_udp_stop();
8539 return (ret);
8540 }
8541 /* Call the special UDP hook. */
8542 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
8543 sctp_recv_udp_tunneled_packet,
8544 sctp_recv_icmp6_tunneled_packet,
8545 NULL))) {
8546 sctp_over_udp_stop();
8547 return (ret);
8548 }
8549 /* Ok, we have a socket, bind it to the port. */
8550 memset(&sin6, 0, sizeof(struct sockaddr_in6));
8551 sin6.sin6_len = sizeof(struct sockaddr_in6);
8552 sin6.sin6_family = AF_INET6;
8553 sin6.sin6_port = htons(port);
8554 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
8555 (struct sockaddr *)&sin6, curthread))) {
8556 sctp_over_udp_stop();
8557 return (ret);
8558 }
8559 #endif
8560 return (0);
8561 }
8562 #endif
8563
8564 /*
8565 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
8566 * If all arguments are zero, zero is returned.
8567 */
8568 uint32_t
8569 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
8570 {
8571 if (mtu1 > 0) {
8572 if (mtu2 > 0) {
8573 if (mtu3 > 0) {
8574 return (min(mtu1, min(mtu2, mtu3)));
8575 } else {
8576 return (min(mtu1, mtu2));
8577 }
8578 } else {
8579 if (mtu3 > 0) {
8580 return (min(mtu1, mtu3));
8581 } else {
8582 return (mtu1);
8583 }
8584 }
8585 } else {
8586 if (mtu2 > 0) {
8587 if (mtu3 > 0) {
8588 return (min(mtu2, mtu3));
8589 } else {
8590 return (mtu2);
8591 }
8592 } else {
8593 return (mtu3);
8594 }
8595 }
8596 }
8597
8598 #if defined(__FreeBSD__) && !defined(__Userspace__)
8599 void
8600 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
8601 {
8602 struct in_conninfo inc;
8603
8604 memset(&inc, 0, sizeof(struct in_conninfo));
8605 inc.inc_fibnum = fibnum;
8606 switch (addr->sa.sa_family) {
8607 #ifdef INET
8608 case AF_INET:
8609 inc.inc_faddr = addr->sin.sin_addr;
8610 break;
8611 #endif
8612 #ifdef INET6
8613 case AF_INET6:
8614 inc.inc_flags |= INC_ISIPV6;
8615 inc.inc6_faddr = addr->sin6.sin6_addr;
8616 break;
8617 #endif
8618 default:
8619 return;
8620 }
8621 tcp_hc_updatemtu(&inc, (u_long)mtu);
8622 }
8623
8624 uint32_t
8625 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
8626 {
8627 struct in_conninfo inc;
8628
8629 memset(&inc, 0, sizeof(struct in_conninfo));
8630 inc.inc_fibnum = fibnum;
8631 switch (addr->sa.sa_family) {
8632 #ifdef INET
8633 case AF_INET:
8634 inc.inc_faddr = addr->sin.sin_addr;
8635 break;
8636 #endif
8637 #ifdef INET6
8638 case AF_INET6:
8639 inc.inc_flags |= INC_ISIPV6;
8640 inc.inc6_faddr = addr->sin6.sin6_addr;
8641 break;
8642 #endif
8643 default:
8644 return (0);
8645 }
8646 return ((uint32_t)tcp_hc_getmtu(&inc));
8647 }
8648 #endif
8649
8650 void
8651 sctp_set_state(struct sctp_tcb *stcb, int new_state)
8652 {
8653 #if defined(KDTRACE_HOOKS)
8654 int old_state = stcb->asoc.state;
8655 #endif
8656
8657 KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
8658 ("sctp_set_state: Can't set substate (new_state = %x)",
8659 new_state));
8660 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
8661 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8662 (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
8663 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
8664 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
8665 }
8666 #if defined(KDTRACE_HOOKS)
8667 if (((old_state & SCTP_STATE_MASK) != new_state) &&
8668 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
8669 (new_state == SCTP_STATE_INUSE))) {
8670 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
8671 }
8672 #endif
8673 }
8674
8675 void
8676 sctp_add_substate(struct sctp_tcb *stcb, int substate)
8677 {
8678 #if defined(KDTRACE_HOOKS)
8679 int old_state = stcb->asoc.state;
8680 #endif
8681
8682 KASSERT((substate & SCTP_STATE_MASK) == 0,
8683 ("sctp_add_substate: Can't set state (substate = %x)",
8684 substate));
8685 stcb->asoc.state |= substate;
8686 #if defined(KDTRACE_HOOKS)
8687 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
8688 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
8689 ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
8690 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
8691 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
8692 }
8693 #endif
8694 }
8695
8696