• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #ifndef __sctp_process_lock_h__
33 #define __sctp_process_lock_h__
34 
35 /*
36  * Need to yet define five atomic fuctions or
37  * their equivalant.
38  * - atomic_add_int(&foo, val) - add atomically the value
39  * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
40  *				      but value it was is returned.
41  * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
42  *
43  * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
44  *					        in foo if and only if
45  *					        foo is value. Returns 0
46  *					        on success.
47  */
48 
49 #ifdef SCTP_PER_SOCKET_LOCKING
50 /*
51  * per socket level locking
52  */
53 
54 #if defined(__Userspace_os_Windows)
55 /* Lock for INFO stuff */
56 #define SCTP_INP_INFO_LOCK_INIT()
57 #define SCTP_INP_INFO_RLOCK()
58 #define SCTP_INP_INFO_RUNLOCK()
59 #define SCTP_INP_INFO_WLOCK()
60 #define SCTP_INP_INFO_WUNLOCK()
61 #define SCTP_INP_INFO_LOCK_DESTROY()
62 #define SCTP_IPI_COUNT_INIT()
63 #define SCTP_IPI_COUNT_DESTROY()
64 #else
65 #define SCTP_INP_INFO_LOCK_INIT()
66 #define SCTP_INP_INFO_RLOCK()
67 #define SCTP_INP_INFO_RUNLOCK()
68 #define SCTP_INP_INFO_WLOCK()
69 #define SCTP_INP_INFO_WUNLOCK()
70 #define SCTP_INP_INFO_LOCK_DESTROY()
71 #define SCTP_IPI_COUNT_INIT()
72 #define SCTP_IPI_COUNT_DESTROY()
73 #endif
74 
75 #define SCTP_TCB_SEND_LOCK_INIT(_tcb)
76 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
77 #define SCTP_TCB_SEND_LOCK(_tcb)
78 #define SCTP_TCB_SEND_UNLOCK(_tcb)
79 
80 /* Lock for INP */
81 #define SCTP_INP_LOCK_INIT(_inp)
82 #define SCTP_INP_LOCK_DESTROY(_inp)
83 
84 #define SCTP_INP_RLOCK(_inp)
85 #define SCTP_INP_RUNLOCK(_inp)
86 #define SCTP_INP_WLOCK(_inp)
87 #define SCTP_INP_WUNLOCK(_inep)
88 #define SCTP_INP_INCR_REF(_inp)
89 #define SCTP_INP_DECR_REF(_inp)
90 
91 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
92 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
93 #define SCTP_ASOC_CREATE_LOCK(_inp)
94 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
95 
96 #define SCTP_INP_READ_INIT(_inp)
97 #define SCTP_INP_READ_DESTROY(_inp)
98 #define SCTP_INP_READ_LOCK(_inp)
99 #define SCTP_INP_READ_UNLOCK(_inp)
100 
101 /* Lock for TCB */
102 #define SCTP_TCB_LOCK_INIT(_tcb)
103 #define SCTP_TCB_LOCK_DESTROY(_tcb)
104 #define SCTP_TCB_LOCK(_tcb)
105 #define SCTP_TCB_TRYLOCK(_tcb) 1
106 #define SCTP_TCB_UNLOCK(_tcb)
107 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
108 #define SCTP_TCB_LOCK_ASSERT(_tcb)
109 
110 #else
111 /*
112  * per tcb level locking
113  */
114 #define SCTP_IPI_COUNT_INIT()
115 
116 #if defined(__Userspace_os_Windows)
117 #define SCTP_WQ_ADDR_INIT() \
118         InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
119 #define SCTP_WQ_ADDR_DESTROY() \
120 	DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
121 #define SCTP_WQ_ADDR_LOCK() \
122         EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
123 #define SCTP_WQ_ADDR_UNLOCK() \
124         LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
125 
126 
127 #define SCTP_INP_INFO_LOCK_INIT() \
128 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
129 #define SCTP_INP_INFO_LOCK_DESTROY() \
130 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
131 #define SCTP_INP_INFO_RLOCK() \
132 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
133 #define SCTP_INP_INFO_TRYLOCK()	\
134         TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
135 #define SCTP_INP_INFO_WLOCK() \
136 	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
137 #define SCTP_INP_INFO_RUNLOCK() \
138  	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
139 #define SCTP_INP_INFO_WUNLOCK()	\
140 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
141 
142 #define SCTP_IP_PKTLOG_INIT() \
143         InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
144 #define SCTP_IP_PKTLOG_DESTROY () \
145 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
146 #define SCTP_IP_PKTLOG_LOCK() \
147     	EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
148 #define SCTP_IP_PKTLOG_UNLOCK() \
149 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
150 
151 /*
152  * The INP locks we will use for locking an SCTP endpoint, so for example if
153  * we want to change something at the endpoint level for example random_store
154  * or cookie secrets we lock the INP level.
155  */
156 #define SCTP_INP_READ_INIT(_inp) \
157 	InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
158 #define SCTP_INP_READ_DESTROY(_inp) \
159 	DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
160 #define SCTP_INP_READ_LOCK(_inp) \
161 	EnterCriticalSection(&(_inp)->inp_rdata_mtx)
162 #define SCTP_INP_READ_UNLOCK(_inp) \
163 	LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
164 
165 #define SCTP_INP_LOCK_INIT(_inp) \
166 	InitializeCriticalSection(&(_inp)->inp_mtx)
167 
168 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
169 	InitializeCriticalSection(&(_inp)->inp_create_mtx)
170 
171 #define SCTP_INP_LOCK_DESTROY(_inp) \
172 	DeleteCriticalSection(&(_inp)->inp_mtx)
173 
174 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
175 	DeleteCriticalSection(&(_inp)->inp_create_mtx)
176 
177 #ifdef SCTP_LOCK_LOGGING
178 #define SCTP_INP_RLOCK(_inp)	do { 					\
179 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
180 	EnterCriticalSection(&(_inp)->inp_mtx);			\
181 } while (0)
182 
183 #define SCTP_INP_WLOCK(_inp)	do { 					\
184 	sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
185 	EnterCriticalSection(&(_inp)->inp_mtx);			\
186 } while (0)
187 #else
188 
189 #define SCTP_INP_RLOCK(_inp)	do { 					\
190 	EnterCriticalSection(&(_inp)->inp_mtx);			\
191 } while (0)
192 
193 #define SCTP_INP_WLOCK(_inp)	do { 					\
194 	EnterCriticalSection(&(_inp)->inp_mtx);			\
195 } while (0)
196 #endif
197 
198 
199 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
200 	InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
201 
202 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
203 	DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
204 
205 #define SCTP_TCB_SEND_LOCK(_tcb) do { \
206 	EnterCriticalSection(&(_tcb)->tcb_send_mtx); \
207 } while (0)
208 
209 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
210 	LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
211 
212 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
213 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
214 
215 #ifdef SCTP_LOCK_LOGGING
216 #define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
217 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
218 	EnterCriticalSection(&(_inp)->inp_create_mtx);		\
219 } while (0)
220 #else
221 #define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
222 	EnterCriticalSection(&(_inp)->inp_create_mtx);		\
223 } while (0)
224 #endif
225 
226 #define SCTP_INP_RUNLOCK(_inp) \
227 	LeaveCriticalSection(&(_inp)->inp_mtx)
228 #define SCTP_INP_WUNLOCK(_inp) \
229 	LeaveCriticalSection(&(_inp)->inp_mtx)
230 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
231 	LeaveCriticalSection(&(_inp)->inp_create_mtx)
232 
233 /*
234  * For the majority of things (once we have found the association) we will
235  * lock the actual association mutex. This will protect all the assoiciation
236  * level queues and streams and such. We will need to lock the socket layer
237  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
238  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
239  */
240 
241 #define SCTP_TCB_LOCK_INIT(_tcb) \
242 	InitializeCriticalSection(&(_tcb)->tcb_mtx)
243 
244 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
245 	DeleteCriticalSection(&(_tcb)->tcb_mtx)
246 
247 #ifdef SCTP_LOCK_LOGGING
248 #define SCTP_TCB_LOCK(_tcb)  do {					\
249 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
250 	EnterCriticalSection(&(_tcb)->tcb_mtx);			\
251 } while (0)
252 
253 #else
254 #define SCTP_TCB_LOCK(_tcb)  do {					\
255 	EnterCriticalSection(&(_tcb)->tcb_mtx);			\
256 } while (0)
257 #endif
258 
259 #define SCTP_TCB_TRYLOCK(_tcb) 	((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
260 
261 #define SCTP_TCB_UNLOCK(_tcb)	do {  \
262 	LeaveCriticalSection(&(_tcb)->tcb_mtx);  \
263 } while (0)
264 
265 #define SCTP_TCB_LOCK_ASSERT(_tcb)
266 
267 #else /* all Userspaces except Windows */
268 #define SCTP_WQ_ADDR_INIT() \
269         (void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), NULL)
270 #define SCTP_WQ_ADDR_DESTROY() \
271 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
272 #define SCTP_WQ_ADDR_LOCK() \
273         (void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
274 #define SCTP_WQ_ADDR_UNLOCK() \
275         (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
276 
277 
278 #define SCTP_INP_INFO_LOCK_INIT() \
279 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), NULL)
280 #define SCTP_INP_INFO_LOCK_DESTROY() \
281 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
282 #define SCTP_INP_INFO_RLOCK() \
283 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
284 #define SCTP_INP_INFO_TRYLOCK()	\
285         (!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx))))
286 #define SCTP_INP_INFO_WLOCK() \
287 	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
288 #define SCTP_INP_INFO_RUNLOCK()	\
289 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
290 #define SCTP_INP_INFO_WUNLOCK()	\
291 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
292 
293 #define SCTP_IP_PKTLOG_INIT() \
294         (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), NULL)
295 #define SCTP_IP_PKTLOG_DESTROY() \
296 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
297 #define SCTP_IP_PKTLOG_LOCK() \
298         (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
299 #define SCTP_IP_PKTLOG_UNLOCK()	\
300         (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
301 
302 
303 
304 /*
305  * The INP locks we will use for locking an SCTP endpoint, so for example if
306  * we want to change something at the endpoint level for example random_store
307  * or cookie secrets we lock the INP level.
308  */
309 #define SCTP_INP_READ_INIT(_inp) \
310 	(void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, NULL)
311 
312 #define SCTP_INP_READ_DESTROY(_inp) \
313 	(void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
314 
315 #define SCTP_INP_READ_LOCK(_inp)	do { \
316 	(void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx);    \
317 } while (0)
318 
319 
320 #define SCTP_INP_READ_UNLOCK(_inp) \
321 	(void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
322 
323 #define SCTP_INP_LOCK_INIT(_inp) \
324 	(void)pthread_mutex_init(&(_inp)->inp_mtx, NULL)
325 
326 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
327 	(void)pthread_mutex_init(&(_inp)->inp_create_mtx, NULL)
328 
329 #define SCTP_INP_LOCK_DESTROY(_inp) \
330 	(void)pthread_mutex_destroy(&(_inp)->inp_mtx)
331 
332 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
333 	(void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
334 
335 #ifdef SCTP_LOCK_LOGGING
336 #define SCTP_INP_RLOCK(_inp)	do { 					\
337 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
338 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
339 } while (0)
340 
341 #define SCTP_INP_WLOCK(_inp)	do { 					\
342 	sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
343 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
344 } while (0)
345 
346 #else
347 
348 #define SCTP_INP_RLOCK(_inp)	do { 					\
349 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
350 } while (0)
351 
352 #define SCTP_INP_WLOCK(_inp)	do { 					\
353 	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
354 } while (0)
355 #endif
356 
357 
358 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
359 	(void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, NULL)
360 
361 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
362 	(void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
363 
364 #define SCTP_TCB_SEND_LOCK(_tcb) do { \
365 	(void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx); \
366 } while (0)
367 
368 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
369 	(void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
370 
371 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
372 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
373 
374 #ifdef SCTP_LOCK_LOGGING
375 #define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
376 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
377 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);		\
378 } while (0)
379 #else
380 #define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
381 	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);		\
382 } while (0)
383 #endif
384 
385 #define SCTP_INP_RUNLOCK(_inp) \
386 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
387 #define SCTP_INP_WUNLOCK(_inp) \
388 	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
389 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
390 	(void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
391 
392 /*
393  * For the majority of things (once we have found the association) we will
394  * lock the actual association mutex. This will protect all the assoiciation
395  * level queues and streams and such. We will need to lock the socket layer
396  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
397  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
398  */
399 
400 #define SCTP_TCB_LOCK_INIT(_tcb) \
401 	(void)pthread_mutex_init(&(_tcb)->tcb_mtx, NULL)
402 
403 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
404 	(void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
405 
406 #ifdef SCTP_LOCK_LOGGING
407 #define SCTP_TCB_LOCK(_tcb)  do {					\
408 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
409 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);			\
410 } while (0)
411 
412 #else
413 #define SCTP_TCB_LOCK(_tcb)  do {					\
414 	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);			\
415 } while (0)
416 #endif
417 
418 #define SCTP_TCB_TRYLOCK(_tcb) 	(!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
419 
420 #define SCTP_TCB_UNLOCK(_tcb)	(void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
421 
422 #define SCTP_TCB_LOCK_ASSERT(_tcb)
423 #endif
424 
425 #endif /* SCTP_PER_SOCKET_LOCKING */
426 
427 
428 /*
429  * common locks
430  */
431 
432 /* copied over to compile */
433 #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
434 #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
435 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
436 
437 
438 /* socket locks */
439 
440 #if defined(__Userspace__)
441 #if defined(__Userspace_os_Windows)
442 #define SOCKBUF_LOCK_ASSERT(_so_buf)
443 #define SOCKBUF_LOCK(_so_buf) EnterCriticalSection(&(_so_buf)->sb_mtx)
444 #define SOCKBUF_UNLOCK(_so_buf) LeaveCriticalSection(&(_so_buf)->sb_mtx)
445 #define SOCK_LOCK(_so)  SOCKBUF_LOCK(&(_so)->so_rcv)
446 #define SOCK_UNLOCK(_so)  SOCKBUF_UNLOCK(&(_so)->so_rcv)
447 #else
448 #define SOCKBUF_LOCK_ASSERT(_so_buf) KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s: socket buffer not locked", __func__))
449 #define SOCKBUF_LOCK(_so_buf)   pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
450 #define SOCKBUF_UNLOCK(_so_buf) pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
451 #define	SOCK_LOCK(_so)		SOCKBUF_LOCK(&(_so)->so_rcv)
452 #define	SOCK_UNLOCK(_so)	SOCKBUF_UNLOCK(&(_so)->so_rcv)
453 #endif
454 #else
455 #define SOCK_LOCK(_so)
456 #define SOCK_UNLOCK(_so)
457 #define SOCKBUF_LOCK(_so_buf)
458 #define SOCKBUF_UNLOCK(_so_buf)
459 #define SOCKBUF_LOCK_ASSERT(_so_buf)
460 #endif
461 
462 #define SCTP_STATLOG_INIT_LOCK()
463 #define SCTP_STATLOG_LOCK()
464 #define SCTP_STATLOG_UNLOCK()
465 #define SCTP_STATLOG_DESTROY()
466 
467 #if defined(__Userspace_os_Windows)
468 /* address list locks */
469 #define SCTP_IPI_ADDR_INIT() \
470 	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
471 #define SCTP_IPI_ADDR_DESTROY() \
472 	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
473 
474 #define SCTP_IPI_ADDR_RLOCK() 						\
475 	do { 								\
476 		EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx));	\
477 	} while (0)
478 #define SCTP_IPI_ADDR_RUNLOCK() \
479 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
480 
481 #define SCTP_IPI_ADDR_WLOCK() 						\
482 	do { 								\
483 		EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx));	\
484 	} while (0)
485 #define SCTP_IPI_ADDR_WUNLOCK() \
486 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
487 
488 
489 /* iterator locks */
490 #define SCTP_ITERATOR_LOCK_INIT() \
491 	InitializeCriticalSection(&sctp_it_ctl.it_mtx)
492 
493 #define SCTP_ITERATOR_LOCK() 						\
494 	do {								\
495 		EnterCriticalSection(&sctp_it_ctl.it_mtx);		\
496 	} while (0)
497 
498 #define SCTP_ITERATOR_UNLOCK() \
499 	LeaveCriticalSection(&sctp_it_ctl.it_mtx)
500 
501 #define SCTP_ITERATOR_LOCK_DESTROY() \
502 	DeleteCriticalSection(&sctp_it_ctl.it_mtx)
503 
504 
505 #define SCTP_IPI_ITERATOR_WQ_INIT() \
506 	InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
507 
508 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
509 	DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
510 
511 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
512 	do { \
513 		EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx); \
514 	} while (0)
515 
516 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
517 	LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
518 
519 #else /* end of __Userspace_os_Windows */
520 /* address list locks */
521 #define SCTP_IPI_ADDR_INIT() \
522 	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), NULL)
523 #define SCTP_IPI_ADDR_DESTROY() \
524 	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
525 
526 #define SCTP_IPI_ADDR_RLOCK() 						\
527 	do { 								\
528 		(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx));	\
529 	} while (0)
530 #define SCTP_IPI_ADDR_RUNLOCK() \
531 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
532 
533 #define SCTP_IPI_ADDR_WLOCK() 						\
534 	do { 								\
535 		(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx));	\
536 	} while (0)
537 #define SCTP_IPI_ADDR_WUNLOCK() \
538 	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
539 
540 
541 /* iterator locks */
542 #define SCTP_ITERATOR_LOCK_INIT() \
543 	(void)pthread_mutex_init(&sctp_it_ctl.it_mtx, NULL)
544 
545 #define SCTP_ITERATOR_LOCK() 						\
546 	do {								\
547 		(void)pthread_mutex_lock(&sctp_it_ctl.it_mtx);		\
548 	} while (0)
549 
550 #define SCTP_ITERATOR_UNLOCK() \
551 	(void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
552 
553 #define SCTP_ITERATOR_LOCK_DESTROY() \
554 	(void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
555 
556 
557 #define SCTP_IPI_ITERATOR_WQ_INIT() \
558 	(void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, NULL)
559 
560 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
561 	(void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
562 
563 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
564 	do { \
565 		(void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx); \
566 	} while (0)
567 
568 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
569 	(void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
570 #endif
571 
572 #define SCTP_INCR_EP_COUNT() \
573 	do { \
574 		atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
575 	} while (0)
576 
577 #define SCTP_DECR_EP_COUNT() \
578 	do { \
579 	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
580 	} while (0)
581 
582 #define SCTP_INCR_ASOC_COUNT() \
583 	do { \
584 	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
585 	} while (0)
586 
587 #define SCTP_DECR_ASOC_COUNT() \
588 	do { \
589 	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
590 	} while (0)
591 
592 #define SCTP_INCR_LADDR_COUNT() \
593 	do { \
594 	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
595 	} while (0)
596 
597 #define SCTP_DECR_LADDR_COUNT() \
598 	do { \
599 	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
600 	} while (0)
601 
602 #define SCTP_INCR_RADDR_COUNT() \
603 	do { \
604  	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
605 	} while (0)
606 
607 #define SCTP_DECR_RADDR_COUNT() \
608 	do { \
609  	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
610 	} while (0)
611 
612 #define SCTP_INCR_CHK_COUNT() \
613 	do { \
614   	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
615 	} while (0)
616 
617 #define SCTP_DECR_CHK_COUNT() \
618 	do { \
619   	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
620 	} while (0)
621 
622 #define SCTP_INCR_READQ_COUNT() \
623 	do { \
624 	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
625 	} while (0)
626 
627 #define SCTP_DECR_READQ_COUNT() \
628 	do { \
629 	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
630 	} while (0)
631 
632 #define SCTP_INCR_STRMOQ_COUNT() \
633 	do { \
634 	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
635 	} while (0)
636 
637 #define SCTP_DECR_STRMOQ_COUNT() \
638 	do { \
639 	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
640 	} while (0)
641 
642 #endif
643