• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *   the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #ifndef __sctp_process_lock_h__
35 #define __sctp_process_lock_h__
36 
37 /*
38  * Need to yet define five atomic fuctions or
39  * their equivalant.
40  * - atomic_add_int(&foo, val) - add atomically the value
41  * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
42  *				      but value it was is returned.
43  * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
44  *
45  * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
46  *					        in foo if and only if
47  *					        foo is value. Returns 0
48  *					        on success.
49  */
50 
51 #ifdef SCTP_PER_SOCKET_LOCKING
52 /*
53  * per socket level locking
54  */
55 
56 #if defined(__Userspace_os_Windows)
57 /* Lock for INFO stuff */
58 #define SCTP_INP_INFO_LOCK_INIT()
59 #define SCTP_INP_INFO_RLOCK()
60 #define SCTP_INP_INFO_RUNLOCK()
61 #define SCTP_INP_INFO_WLOCK()
62 #define SCTP_INP_INFO_WUNLOCK()
63 #define SCTP_INP_INFO_LOCK_DESTROY()
64 #define SCTP_IPI_COUNT_INIT()
65 #define SCTP_IPI_COUNT_DESTROY()
66 #else
67 #define SCTP_INP_INFO_LOCK_INIT()
68 #define SCTP_INP_INFO_RLOCK()
69 #define SCTP_INP_INFO_RUNLOCK()
70 #define SCTP_INP_INFO_WLOCK()
71 #define SCTP_INP_INFO_WUNLOCK()
72 #define SCTP_INP_INFO_LOCK_DESTROY()
73 #define SCTP_IPI_COUNT_INIT()
74 #define SCTP_IPI_COUNT_DESTROY()
75 #endif
76 
77 #define SCTP_TCB_SEND_LOCK_INIT(_tcb)
78 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
79 #define SCTP_TCB_SEND_LOCK(_tcb)
80 #define SCTP_TCB_SEND_UNLOCK(_tcb)
81 
82 /* Lock for INP */
83 #define SCTP_INP_LOCK_INIT(_inp)
84 #define SCTP_INP_LOCK_DESTROY(_inp)
85 
86 #define SCTP_INP_RLOCK(_inp)
87 #define SCTP_INP_RUNLOCK(_inp)
88 #define SCTP_INP_WLOCK(_inp)
89 #define SCTP_INP_WUNLOCK(_inp)
90 #define SCTP_INP_RLOCK_ASSERT(_inp)
91 #define SCTP_INP_WLOCK_ASSERT(_inp)
92 #define SCTP_INP_INCR_REF(_inp)
93 #define SCTP_INP_DECR_REF(_inp)
94 
95 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
96 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
97 #define SCTP_ASOC_CREATE_LOCK(_inp)
98 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
99 
100 #define SCTP_INP_READ_INIT(_inp)
101 #define SCTP_INP_READ_DESTROY(_inp)
102 #define SCTP_INP_READ_LOCK(_inp)
103 #define SCTP_INP_READ_UNLOCK(_inp)
104 
105 /* Lock for TCB */
106 #define SCTP_TCB_LOCK_INIT(_tcb)
107 #define SCTP_TCB_LOCK_DESTROY(_tcb)
108 #define SCTP_TCB_LOCK(_tcb)
109 #define SCTP_TCB_TRYLOCK(_tcb) 1
110 #define SCTP_TCB_UNLOCK(_tcb)
111 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
112 #define SCTP_TCB_LOCK_ASSERT(_tcb)
113 
114 #else
115 /*
116  * per tcb level locking
117  */
118 #define SCTP_IPI_COUNT_INIT()
119 
120 #if defined(__Userspace_os_Windows)
121 #define SCTP_WQ_ADDR_INIT() \
122 	InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
123 #define SCTP_WQ_ADDR_DESTROY() \
124 	DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
125 #define SCTP_WQ_ADDR_LOCK() \
126 	EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
127 #define SCTP_WQ_ADDR_UNLOCK() \
128 	LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
129 #define SCTP_WQ_ADDR_LOCK_ASSERT()
130 
131 #define SCTP_INP_INFO_LOCK_INIT() \
132 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
133 #define SCTP_INP_INFO_LOCK_DESTROY() \
134 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
135 #define SCTP_INP_INFO_RLOCK() \
136 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
137 #define SCTP_INP_INFO_TRYLOCK()	\
138 	TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
139 #define SCTP_INP_INFO_WLOCK() \
140 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
141 #define SCTP_INP_INFO_RUNLOCK() \
142 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
143 #define SCTP_INP_INFO_WUNLOCK()	\
144 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
145 
146 #define SCTP_IP_PKTLOG_INIT() \
147 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
148 #define SCTP_IP_PKTLOG_DESTROY () \
149 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
150 #define SCTP_IP_PKTLOG_LOCK() \
151 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
152 #define SCTP_IP_PKTLOG_UNLOCK() \
153 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
154 
155 /*
156  * The INP locks we will use for locking an SCTP endpoint, so for example if
157  * we want to change something at the endpoint level for example random_store
158  * or cookie secrets we lock the INP level.
159  */
160 #define SCTP_INP_READ_INIT(_inp) \
161 	InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
162 #define SCTP_INP_READ_DESTROY(_inp) \
163 	DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
164 #define SCTP_INP_READ_LOCK(_inp) \
165 	EnterCriticalSection(&(_inp)->inp_rdata_mtx)
166 #define SCTP_INP_READ_UNLOCK(_inp) \
167 	LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
168 
169 #define SCTP_INP_LOCK_INIT(_inp) \
170 	InitializeCriticalSection(&(_inp)->inp_mtx)
171 #define SCTP_INP_LOCK_DESTROY(_inp) \
172 	DeleteCriticalSection(&(_inp)->inp_mtx)
173 #ifdef SCTP_LOCK_LOGGING
174 #define SCTP_INP_RLOCK(_inp) do { 						\
175 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
176 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
177 		EnterCriticalSection(&(_inp)->inp_mtx);				\
178 } while (0)
179 #define SCTP_INP_WLOCK(_inp) do { 						\
180 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
181 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
182 	EnterCriticalSection(&(_inp)->inp_mtx);					\
183 } while (0)
184 #else
185 #define SCTP_INP_RLOCK(_inp) \
186 	EnterCriticalSection(&(_inp)->inp_mtx)
187 #define SCTP_INP_WLOCK(_inp) \
188 	EnterCriticalSection(&(_inp)->inp_mtx)
189 #endif
190 #define SCTP_INP_RLOCK_ASSERT(_tcb)
191 #define SCTP_INP_WLOCK_ASSERT(_tcb)
192 
193 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
194 	InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
195 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
196 	DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
197 #define SCTP_TCB_SEND_LOCK(_tcb) \
198 	EnterCriticalSection(&(_tcb)->tcb_send_mtx)
199 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
200 	LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
201 
202 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
203 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
204 
205 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
206 	InitializeCriticalSection(&(_inp)->inp_create_mtx)
207 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
208 	DeleteCriticalSection(&(_inp)->inp_create_mtx)
209 #ifdef SCTP_LOCK_LOGGING
210 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
211 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
212 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
213 	EnterCriticalSection(&(_inp)->inp_create_mtx);				\
214 } while (0)
215 #else
216 #define SCTP_ASOC_CREATE_LOCK(_inp) \
217 	EnterCriticalSection(&(_inp)->inp_create_mtx)
218 #endif
219 
220 #define SCTP_INP_RUNLOCK(_inp) \
221 	LeaveCriticalSection(&(_inp)->inp_mtx)
222 #define SCTP_INP_WUNLOCK(_inp) \
223 	LeaveCriticalSection(&(_inp)->inp_mtx)
224 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
225 	LeaveCriticalSection(&(_inp)->inp_create_mtx)
226 
227 /*
228  * For the majority of things (once we have found the association) we will
229  * lock the actual association mutex. This will protect all the assoiciation
230  * level queues and streams and such. We will need to lock the socket layer
231  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
232  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
233  */
234 
235 #define SCTP_TCB_LOCK_INIT(_tcb) \
236 	InitializeCriticalSection(&(_tcb)->tcb_mtx)
237 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
238 	DeleteCriticalSection(&(_tcb)->tcb_mtx)
239 #ifdef SCTP_LOCK_LOGGING
240 #define SCTP_TCB_LOCK(_tcb) do {						\
241 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
242 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
243 	EnterCriticalSection(&(_tcb)->tcb_mtx);					\
244 } while (0)
245 #else
246 #define SCTP_TCB_LOCK(_tcb) \
247 	EnterCriticalSection(&(_tcb)->tcb_mtx)
248 #endif
249 #define SCTP_TCB_TRYLOCK(_tcb) 	((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
250 #define SCTP_TCB_UNLOCK(_tcb) \
251 	LeaveCriticalSection(&(_tcb)->tcb_mtx)
252 #define SCTP_TCB_LOCK_ASSERT(_tcb)
253 
254 #else /* all Userspaces except Windows */
255 #define SCTP_WQ_ADDR_INIT() \
256 	(void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
257 #define SCTP_WQ_ADDR_DESTROY() \
258 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
259 #ifdef INVARIANTS
260 #define SCTP_WQ_ADDR_LOCK() \
261 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx already locked", __func__))
262 #define SCTP_WQ_ADDR_UNLOCK() \
263 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx not locked", __func__))
264 #else
265 #define SCTP_WQ_ADDR_LOCK() \
266 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
267 #define SCTP_WQ_ADDR_UNLOCK() \
268 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
269 #endif
270 #define SCTP_WQ_ADDR_LOCK_ASSERT() \
271 	KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s: wq_addr_mtx not locked", __func__))
272 
273 #define SCTP_INP_INFO_LOCK_INIT() \
274 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(mtx_attr))
275 #define SCTP_INP_INFO_LOCK_DESTROY() \
276 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
277 #ifdef INVARIANTS
278 #define SCTP_INP_INFO_RLOCK() \
279 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
280 #define SCTP_INP_INFO_WLOCK() \
281 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
282 #define SCTP_INP_INFO_RUNLOCK() \
283 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
284 #define SCTP_INP_INFO_WUNLOCK() \
285 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
286 #else
287 #define SCTP_INP_INFO_RLOCK() \
288 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
289 #define SCTP_INP_INFO_WLOCK() \
290 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
291 #define SCTP_INP_INFO_RUNLOCK() \
292 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
293 #define SCTP_INP_INFO_WUNLOCK() \
294 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
295 #endif
296 #define SCTP_INP_INFO_TRYLOCK() \
297 	(!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx))))
298 
299 #define SCTP_IP_PKTLOG_INIT() \
300 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
301 #define SCTP_IP_PKTLOG_DESTROY() \
302 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
303 #ifdef INVARIANTS
304 #define SCTP_IP_PKTLOG_LOCK() \
305 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx already locked", __func__))
306 #define SCTP_IP_PKTLOG_UNLOCK() \
307 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx not locked", __func__))
308 #else
309 #define SCTP_IP_PKTLOG_LOCK() \
310 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
311 #define SCTP_IP_PKTLOG_UNLOCK() \
312 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
313 #endif
314 
315 
316 /*
317  * The INP locks we will use for locking an SCTP endpoint, so for example if
318  * we want to change something at the endpoint level for example random_store
319  * or cookie secrets we lock the INP level.
320  */
321 #define SCTP_INP_READ_INIT(_inp) \
322 	(void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
323 #define SCTP_INP_READ_DESTROY(_inp) \
324 	(void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
325 #ifdef INVARIANTS
326 #define SCTP_INP_READ_LOCK(_inp) \
327 	KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx already locked", __func__))
328 #define SCTP_INP_READ_UNLOCK(_inp) \
329 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx not locked", __func__))
330 #else
331 #define SCTP_INP_READ_LOCK(_inp) \
332 	(void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
333 #define SCTP_INP_READ_UNLOCK(_inp) \
334 	(void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
335 #endif
336 
337 #define SCTP_INP_LOCK_INIT(_inp) \
338 	(void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
339 #define SCTP_INP_LOCK_DESTROY(_inp) \
340 	(void)pthread_mutex_destroy(&(_inp)->inp_mtx)
341 #ifdef INVARIANTS
342 #ifdef SCTP_LOCK_LOGGING
343 #define SCTP_INP_RLOCK(_inp) do {									\
344 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)				\
345 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);						\
346 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))	\
347 } while (0)
348 #define SCTP_INP_WLOCK(_inp) do {									\
349 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)				\
350 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);						\
351 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
352 } while (0)
353 #else
354 #define SCTP_INP_RLOCK(_inp) \
355 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
356 #define SCTP_INP_WLOCK(_inp) \
357 	KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
358 #endif
359 #define SCTP_INP_RUNLOCK(_inp) \
360 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
361 #define SCTP_INP_WUNLOCK(_inp) \
362 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
363 #else
364 #ifdef SCTP_LOCK_LOGGING
365 #define SCTP_INP_RLOCK(_inp) do {						\
366 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
367 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
368 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
369 } while (0)
370 #define SCTP_INP_WLOCK(_inp) do {						\
371 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
372 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);			\
373 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);				\
374 } while (0)
375 #else
376 #define SCTP_INP_RLOCK(_inp) \
377 	(void)pthread_mutex_lock(&(_inp)->inp_mtx)
378 #define SCTP_INP_WLOCK(_inp) \
379 	(void)pthread_mutex_lock(&(_inp)->inp_mtx)
380 #endif
381 #define SCTP_INP_RUNLOCK(_inp) \
382 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
383 #define SCTP_INP_WUNLOCK(_inp) \
384 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
385 #endif
386 #define SCTP_INP_RLOCK_ASSERT(_inp) \
387 	KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__))
388 #define SCTP_INP_WLOCK_ASSERT(_inp) \
389 	KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__))
390 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
391 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
392 
393 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
394 	(void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, &SCTP_BASE_VAR(mtx_attr))
395 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
396 	(void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
397 #ifdef INVARIANTS
398 #define SCTP_TCB_SEND_LOCK(_tcb) \
399 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx already locked", __func__))
400 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
401 	KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx not locked", __func__))
402 #else
403 #define SCTP_TCB_SEND_LOCK(_tcb) \
404 	(void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx)
405 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
406 	(void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
407 #endif
408 
409 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
410 	(void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
411 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
412 	(void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
413 #ifdef INVARIANTS
414 #ifdef SCTP_LOCK_LOGGING
415 #define SCTP_ASOC_CREATE_LOCK(_inp) do {										\
416 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)						\
417 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);							\
418 	KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__))	\
419 } while (0)
420 #else
421 #define SCTP_ASOC_CREATE_LOCK(_inp) \
422 	KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__))
423 #endif
424 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
425 	KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx not locked", __func__))
426 #else
427 #ifdef SCTP_LOCK_LOGGING
428 #define SCTP_ASOC_CREATE_LOCK(_inp) do {					\
429 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)	\
430 		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);		\
431 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);			\
432 } while (0)
433 #else
434 #define SCTP_ASOC_CREATE_LOCK(_inp) \
435 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
436 #endif
437 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
438 	(void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
439 #endif
440 /*
441  * For the majority of things (once we have found the association) we will
442  * lock the actual association mutex. This will protect all the assoiciation
443  * level queues and streams and such. We will need to lock the socket layer
444  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
445  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
446  */
447 
448 #define SCTP_TCB_LOCK_INIT(_tcb) \
449 	(void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
450 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
451 	(void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
452 #ifdef INVARIANTS
453 #ifdef SCTP_LOCK_LOGGING
454 #define SCTP_TCB_LOCK(_tcb) do {									\
455 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 				\
456 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);					\
457 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__))	\
458 } while (0)
459 #else
460 #define SCTP_TCB_LOCK(_tcb) \
461 	KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__))
462 #endif
463 #define SCTP_TCB_UNLOCK(_tcb) \
464 	KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx not locked", __func__))
465 #else
466 #ifdef SCTP_LOCK_LOGGING
467 #define SCTP_TCB_LOCK(_tcb) do {						\
468 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) 	\
469 		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
470 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);				\
471 } while (0)
472 #else
473 #define SCTP_TCB_LOCK(_tcb) \
474 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
475 #endif
476 #define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
477 #endif
478 #define SCTP_TCB_LOCK_ASSERT(_tcb) \
479 	KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s: tcb_mtx not locked", __func__))
480 #define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
481 #endif
482 
483 #endif /* SCTP_PER_SOCKET_LOCKING */
484 
485 
486 /*
487  * common locks
488  */
489 
490 /* copied over to compile */
491 #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
492 #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
493 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
494 
495 /* socket locks */
496 
497 #if defined(__Userspace_os_Windows)
498 #define SOCKBUF_LOCK_ASSERT(_so_buf)
499 #define SOCKBUF_LOCK(_so_buf) \
500 	EnterCriticalSection(&(_so_buf)->sb_mtx)
501 #define SOCKBUF_UNLOCK(_so_buf) \
502 	LeaveCriticalSection(&(_so_buf)->sb_mtx)
503 #define SOCK_LOCK(_so) \
504 	SOCKBUF_LOCK(&(_so)->so_rcv)
505 #define SOCK_UNLOCK(_so) \
506 	SOCKBUF_UNLOCK(&(_so)->so_rcv)
507 #else
508 #define SOCKBUF_LOCK_ASSERT(_so_buf) \
509 	KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s: socket buffer not locked", __func__))
510 #ifdef INVARIANTS
511 #define SOCKBUF_LOCK(_so_buf) \
512 	KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx already locked", __func__))
513 #define SOCKBUF_UNLOCK(_so_buf) \
514 	KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx not locked", __func__))
515 #else
516 #define SOCKBUF_LOCK(_so_buf) \
517 	pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
518 #define SOCKBUF_UNLOCK(_so_buf) \
519 	pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
520 #endif
521 #define SOCK_LOCK(_so) \
522 	SOCKBUF_LOCK(&(_so)->so_rcv)
523 #define SOCK_UNLOCK(_so) \
524 	SOCKBUF_UNLOCK(&(_so)->so_rcv)
525 #endif
526 
527 #define SCTP_STATLOG_INIT_LOCK()
528 #define SCTP_STATLOG_LOCK()
529 #define SCTP_STATLOG_UNLOCK()
530 #define SCTP_STATLOG_DESTROY()
531 
532 #if defined(__Userspace_os_Windows)
533 /* address list locks */
534 #define SCTP_IPI_ADDR_INIT() \
535 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
536 #define SCTP_IPI_ADDR_DESTROY() \
537 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
538 #define SCTP_IPI_ADDR_RLOCK() \
539 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
540 #define SCTP_IPI_ADDR_RUNLOCK() \
541 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
542 #define SCTP_IPI_ADDR_WLOCK() \
543 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
544 #define SCTP_IPI_ADDR_WUNLOCK() \
545 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
546 
547 
548 /* iterator locks */
549 #define SCTP_ITERATOR_LOCK_INIT() \
550 	InitializeCriticalSection(&sctp_it_ctl.it_mtx)
551 #define SCTP_ITERATOR_LOCK_DESTROY() \
552 	DeleteCriticalSection(&sctp_it_ctl.it_mtx)
553 #define SCTP_ITERATOR_LOCK() \
554 		EnterCriticalSection(&sctp_it_ctl.it_mtx)
555 #define SCTP_ITERATOR_UNLOCK() \
556 	LeaveCriticalSection(&sctp_it_ctl.it_mtx)
557 
558 #define SCTP_IPI_ITERATOR_WQ_INIT() \
559 	InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
560 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
561 	DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
562 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
563 	EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
564 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
565 	LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
566 
567 #else /* end of __Userspace_os_Windows */
568 /* address list locks */
569 #define SCTP_IPI_ADDR_INIT() \
570 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
571 #define SCTP_IPI_ADDR_DESTROY() \
572 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
573 #ifdef INVARIANTS
574 #define SCTP_IPI_ADDR_RLOCK() \
575 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
576 #define SCTP_IPI_ADDR_RUNLOCK() \
577 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
578 #define SCTP_IPI_ADDR_WLOCK() \
579 	KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
580 #define SCTP_IPI_ADDR_WUNLOCK() \
581 	KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
582 #else
583 #define SCTP_IPI_ADDR_RLOCK() \
584 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
585 #define SCTP_IPI_ADDR_RUNLOCK() \
586 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
587 #define SCTP_IPI_ADDR_WLOCK() \
588 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
589 #define SCTP_IPI_ADDR_WUNLOCK() \
590 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
591 #endif
592 
593 /* iterator locks */
594 #define SCTP_ITERATOR_LOCK_INIT() \
595 	(void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
596 #define SCTP_ITERATOR_LOCK_DESTROY() \
597 	(void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
598 #ifdef INVARIANTS
599 #define SCTP_ITERATOR_LOCK() \
600 	KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx already locked", __func__))
601 #define SCTP_ITERATOR_UNLOCK() \
602 	KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx not locked", __func__))
603 #else
604 #define SCTP_ITERATOR_LOCK() \
605 	(void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
606 #define SCTP_ITERATOR_UNLOCK() \
607 	(void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
608 #endif
609 
610 #define SCTP_IPI_ITERATOR_WQ_INIT() \
611 	(void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
612 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
613 	(void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
614 #ifdef INVARIANTS
615 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
616 	KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx already locked", __func__))
617 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
618 	KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx not locked", __func__))
619 #else
620 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
621 	(void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
622 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
623 	(void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
624 #endif
625 #endif
626 
627 #define SCTP_INCR_EP_COUNT() \
628 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
629 
630 #define SCTP_DECR_EP_COUNT() \
631 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
632 
633 #define SCTP_INCR_ASOC_COUNT() \
634 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
635 
636 #define SCTP_DECR_ASOC_COUNT() \
637 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
638 
639 #define SCTP_INCR_LADDR_COUNT() \
640 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
641 
642 #define SCTP_DECR_LADDR_COUNT() \
643 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
644 
645 #define SCTP_INCR_RADDR_COUNT() \
646 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
647 
648 #define SCTP_DECR_RADDR_COUNT() \
649 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
650 
651 #define SCTP_INCR_CHK_COUNT() \
652 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
653 
654 #define SCTP_DECR_CHK_COUNT() \
655 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
656 
657 #define SCTP_INCR_READQ_COUNT() \
658 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
659 
660 #define SCTP_DECR_READQ_COUNT() \
661 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
662 
663 #define SCTP_INCR_STRMOQ_COUNT() \
664 	atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
665 
666 #define SCTP_DECR_STRMOQ_COUNT() \
667 	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
668 
669 #endif
670