• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 /* __Userspace__ version of <sys/socketvar.h> goes here.*/
32 
33 #ifndef _USER_SOCKETVAR_H_
34 #define _USER_SOCKETVAR_H_
35 
36 #if defined(__APPLE__)
37 #include <sys/types.h>
38 #include <unistd.h>
39 #endif
40 
41 /* #include <sys/selinfo.h> */ /*__Userspace__ alternative?*/	/* for struct selinfo */
42 /* #include <sys/_lock.h>  was 0 byte file */
43 /* #include <sys/_mutex.h> was 0 byte file */
44 /* #include <sys/_sx.h> */ /*__Userspace__ alternative?*/
45 #if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(_WIN32) && !defined(__native_client__)
46 #include <sys/uio.h>
47 #endif
48 #define SOCK_MAXADDRLEN 255
49 #if !defined(MSG_NOTIFICATION)
50 #define MSG_NOTIFICATION 0x2000         /* SCTP notification */
51 #endif
52 #define SCTP_SO_LINGER     0x0001
53 #define SCTP_SO_ACCEPTCONN 0x0002
54 #define SS_CANTRCVMORE 0x020
55 #define SS_CANTSENDMORE 0x010
56 
57 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(_WIN32) || defined(__native_client__)
58 #define UIO_MAXIOV 1024
59 #define ERESTART (-1)
60 #endif
61 
62 #if !defined(__APPLE__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
63 enum	uio_rw { UIO_READ, UIO_WRITE };
64 #endif
65 
66 #if !defined(__NetBSD__) && !defined(__OpenBSD__)
67 /* Segment flag values. */
68 enum uio_seg {
69 	UIO_USERSPACE,		/* from user data space */
70 	UIO_SYSSPACE		/* from system space */
71 };
72 #endif
73 
74 struct proc {
75 	int stub; /* struct proc is a dummy for __Userspace__ */
76 };
77 
78 MALLOC_DECLARE(M_ACCF);
79 MALLOC_DECLARE(M_PCB);
80 MALLOC_DECLARE(M_SONAME);
81 
82 /* __Userspace__ Are these all the fields we need?
83  * Removing struct thread *uio_td;    owner field
84 */
85 struct uio {
86 	struct	iovec *uio_iov;		/* scatter/gather list */
87 	int		uio_iovcnt;		/* length of scatter/gather list */
88 	off_t	uio_offset;		/* offset in target object */
89 	ssize_t 	uio_resid;		/* remaining bytes to process */
90 	enum	uio_seg uio_segflg;	/* address space */
91 	enum	uio_rw uio_rw;		/* operation */
92 };
93 
94 
95 /* __Userspace__ */
96 
97 /*
98  * Kernel structure per socket.
99  * Contains send and receive buffer queues,
100  * handle on protocol and pointer to protocol
101  * private data and error information.
102  */
103 #if defined(_WIN32)
104 #define AF_ROUTE  17
105 #if !defined(__MINGW32__)
106 typedef __int32 pid_t;
107 #endif
108 typedef unsigned __int32 uid_t;
109 enum sigType {
110 	SIGNAL = 0,
111 	BROADCAST = 1,
112 	MAX_EVENTS = 2
113 };
114 #endif
115 
116 /*-
117  * Locking key to struct socket:
118  * (a) constant after allocation, no locking required.
119  * (b) locked by SOCK_LOCK(so).
120  * (c) locked by SOCKBUF_LOCK(&so->so_rcv).
121  * (d) locked by SOCKBUF_LOCK(&so->so_snd).
122  * (e) locked by ACCEPT_LOCK().
123  * (f) not locked since integer reads/writes are atomic.
124  * (g) used only as a sleep/wakeup address, no value.
125  * (h) locked by global mutex so_global_mtx.
126  */
127 struct socket {
128 	int	so_count;		/* (b) reference count */
129 	short	so_type;		/* (a) generic type, see socket.h */
130 	short	so_options;		/* from socket call, see socket.h */
131 	short	so_linger;		/* time to linger while closing */
132 	short	so_state;		/* (b) internal state flags SS_* */
133 	int	so_qstate;		/* (e) internal state flags SQ_* */
134 	void	*so_pcb;		/* protocol control block */
135 	int	so_dom;
136 /*
137  * Variables for connection queuing.
138  * Socket where accepts occur is so_head in all subsidiary sockets.
139  * If so_head is 0, socket is not related to an accept.
140  * For head socket so_incomp queues partially completed connections,
141  * while so_comp is a queue of connections ready to be accepted.
142  * If a connection is aborted and it has so_head set, then
143  * it has to be pulled out of either so_incomp or so_comp.
144  * We allow connections to queue up based on current queue lengths
145  * and limit on number of queued connections for this socket.
146  */
147 	struct	socket *so_head;	/* (e) back pointer to listen socket */
148 	TAILQ_HEAD(, socket) so_incomp;	/* (e) queue of partial unaccepted connections */
149 	TAILQ_HEAD(, socket) so_comp;	/* (e) queue of complete unaccepted connections */
150 	TAILQ_ENTRY(socket) so_list;	/* (e) list of unaccepted connections */
151 	u_short	so_qlen;		/* (e) number of unaccepted connections */
152 	u_short	so_incqlen;		/* (e) number of unaccepted incomplete
153 					   connections */
154 	u_short	so_qlimit;		/* (e) max number queued connections */
155 	short	so_timeo;		/* (g) connection timeout */
156 	userland_cond_t timeo_cond;      /* timeo_cond condition variable being used in wakeup */
157 
158 	u_short	so_error;		/* (f) error affecting connection */
159 	struct	sigio *so_sigio;	/* [sg] information for async I/O or
160 					   out of band data (SIGURG) */
161 	u_long	so_oobmark;		/* (c) chars to oob mark */
162 	TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */
163 /*
164  * Variables for socket buffering.
165  */
166 	struct sockbuf {
167 		/* __Userspace__ Many of these fields may
168 		 * not be required for the sctp stack.
169 		 * Commenting out the following.
170 		 * Including pthread mutex and condition variable to be
171 		 * used by sbwait, sorwakeup and sowwakeup.
172 		*/
173 		/* struct	selinfo sb_sel;*/ /* process selecting read/write */
174 		/* struct	mtx sb_mtx;*/	/* sockbuf lock */
175 		/* struct	sx sb_sx;*/	/* prevent I/O interlacing */
176 		userland_cond_t sb_cond; /* sockbuf condition variable */
177 		userland_mutex_t sb_mtx; /* sockbuf lock associated with sb_cond */
178 		short	sb_state;	/* (c/d) socket state on sockbuf */
179 #define	sb_startzero	sb_mb
180 		struct	mbuf *sb_mb;	/* (c/d) the mbuf chain */
181 		struct	mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */
182 		struct	mbuf *sb_lastrecord;	/* (c/d) first mbuf of last
183 						 * record in socket buffer */
184 		struct	mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */
185 		u_int	sb_sndptroff;	/* (c/d) byte offset of ptr into chain */
186 		u_int	sb_cc;		/* (c/d) actual chars in buffer */
187 		u_int	sb_hiwat;	/* (c/d) max actual char count */
188 		u_int	sb_mbcnt;	/* (c/d) chars of mbufs used */
189 		u_int	sb_mbmax;	/* (c/d) max chars of mbufs to use */
190 		u_int	sb_ctl;		/* (c/d) non-data chars in buffer */
191 		int	sb_lowat;	/* (c/d) low water mark */
192 		int	sb_timeo;	/* (c/d) timeout for read/write */
193 		short	sb_flags;	/* (c/d) flags, see below */
194 	} so_rcv, so_snd;
195 /*
196  * Constants for sb_flags field of struct sockbuf.
197  */
198 #define	SB_MAX		(256*1024)	/* default for max chars in sockbuf */
199 #define SB_RAW          (64*1024*2)    /*Aligning so->so_rcv.sb_hiwat with the receive buffer size of raw socket*/
200 /*
201  * Constants for sb_flags field of struct sockbuf.
202  */
203 #define	SB_WAIT		0x04		/* someone is waiting for data/space */
204 #define	SB_SEL		0x08		/* someone is selecting */
205 #define	SB_ASYNC	0x10		/* ASYNC I/O, need signals */
206 #define	SB_UPCALL	0x20		/* someone wants an upcall */
207 #define	SB_NOINTR	0x40		/* operations not interruptible */
208 #define	SB_AIO		0x80		/* AIO operations queued */
209 #define	SB_KNOTE	0x100		/* kernel note attached */
210 #define	SB_AUTOSIZE	0x800		/* automatically size socket buffer */
211 
212 	void	(*so_upcall)(struct socket *, void *, int);
213 	void	*so_upcallarg;
214 	struct	ucred *so_cred;		/* (a) user credentials */
215 	struct	label *so_label;	/* (b) MAC label for socket */
216 	struct	label *so_peerlabel;	/* (b) cached MAC label for peer */
217 	/* NB: generation count must not be first. */
218 	uint32_t so_gencnt;		/* (h) generation count */
219 	void	*so_emuldata;		/* (b) private data for emulators */
220  	struct so_accf {
221 		struct	accept_filter *so_accept_filter;
222 		void	*so_accept_filter_arg;	/* saved filter args */
223 		char	*so_accept_filter_str;	/* saved user args */
224 	} *so_accf;
225 };
226 
227 #define SB_EMPTY_FIXUP(sb) do {						\
228 	if ((sb)->sb_mb == NULL) {					\
229 		(sb)->sb_mbtail = NULL;					\
230 		(sb)->sb_lastrecord = NULL;				\
231 	}								\
232 } while (/*CONSTCOND*/0)
233 
234 /*
235  * Global accept mutex to serialize access to accept queues and
236  * fields associated with multiple sockets.  This allows us to
237  * avoid defining a lock order between listen and accept sockets
238  * until such time as it proves to be a good idea.
239  */
240 #if defined(_WIN32)
241 extern userland_mutex_t accept_mtx;
242 extern userland_cond_t accept_cond;
243 #define ACCEPT_LOCK_ASSERT()
244 #define	ACCEPT_LOCK() do { \
245 	EnterCriticalSection(&accept_mtx); \
246 } while (0)
247 #define	ACCEPT_UNLOCK()	do { \
248 	LeaveCriticalSection(&accept_mtx); \
249 } while (0)
250 #define	ACCEPT_UNLOCK_ASSERT()
251 #else
252 extern userland_mutex_t accept_mtx;
253 
254 extern userland_cond_t accept_cond;
255 #ifdef INVARIANTS
256 #define	ACCEPT_LOCK()	KASSERT(pthread_mutex_lock(&accept_mtx) == 0, ("%s: accept_mtx already locked", __func__))
257 #define	ACCEPT_UNLOCK()	KASSERT(pthread_mutex_unlock(&accept_mtx) == 0, ("%s: accept_mtx not locked", __func__))
258 #else
259 #define	ACCEPT_LOCK()   (void)pthread_mutex_lock(&accept_mtx)
260 #define	ACCEPT_UNLOCK() (void)pthread_mutex_unlock(&accept_mtx)
261 #endif
262 #define	ACCEPT_LOCK_ASSERT() \
263           KASSERT(pthread_mutex_trylock(&accept_mtx) == EBUSY, ("%s: accept_mtx not locked", __func__))
264 #define	ACCEPT_UNLOCK_ASSERT() do {                                                               \
265 	  KASSERT(pthread_mutex_trylock(&accept_mtx) == 0, ("%s: accept_mtx  locked", __func__)); \
266 	  (void)pthread_mutex_unlock(&accept_mtx);                                                \
267         } while (0)
268 #endif
269 
270 /*
271  * Per-socket buffer mutex used to protect most fields in the socket
272  * buffer.
273  */
274 #define	SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx)
275 #if defined(_WIN32)
276 #define SOCKBUF_LOCK_INIT(_sb, _name) \
277 	InitializeCriticalSection(SOCKBUF_MTX(_sb))
278 #define SOCKBUF_LOCK_DESTROY(_sb) DeleteCriticalSection(SOCKBUF_MTX(_sb))
279 #define SOCKBUF_COND_INIT(_sb) InitializeConditionVariable((&(_sb)->sb_cond))
280 #define SOCKBUF_COND_DESTROY(_sb) DeleteConditionVariable((&(_sb)->sb_cond))
281 #define SOCK_COND_INIT(_so) InitializeConditionVariable((&(_so)->timeo_cond))
282 #define SOCK_COND_DESTROY(_so) DeleteConditionVariable((&(_so)->timeo_cond))
283 #define SOCK_COND(_so) (&(_so)->timeo_cond)
284 #else
285 #ifdef INVARIANTS
286 #define SOCKBUF_LOCK_INIT(_sb, _name) do {                                 \
287 	pthread_mutexattr_t mutex_attr;                                    \
288 	                                                                   \
289 	pthread_mutexattr_init(&mutex_attr);                               \
290 	pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);  \
291 	pthread_mutex_init(SOCKBUF_MTX(_sb), &mutex_attr);                 \
292 	pthread_mutexattr_destroy(&mutex_attr);                            \
293 } while (0)
294 #else
295 #define SOCKBUF_LOCK_INIT(_sb, _name) \
296 	pthread_mutex_init(SOCKBUF_MTX(_sb), NULL)
297 #endif
298 #define SOCKBUF_LOCK_DESTROY(_sb) pthread_mutex_destroy(SOCKBUF_MTX(_sb))
299 #define SOCKBUF_COND_INIT(_sb) pthread_cond_init((&(_sb)->sb_cond), NULL)
300 #define SOCKBUF_COND_DESTROY(_sb) pthread_cond_destroy((&(_sb)->sb_cond))
301 #define SOCK_COND_INIT(_so) pthread_cond_init((&(_so)->timeo_cond), NULL)
302 #define SOCK_COND_DESTROY(_so) pthread_cond_destroy((&(_so)->timeo_cond))
303 #define SOCK_COND(_so) (&(_so)->timeo_cond)
304 #endif
305 /*__Userspace__ SOCKBUF_LOCK(_sb) is now defined in netinet/sctp_process_lock.h */
306 
307 /* #define	SOCKBUF_OWNED(_sb)		mtx_owned(SOCKBUF_MTX(_sb)) unused */
308 /*__Userspace__ SOCKBUF_UNLOCK(_sb) is now defined in netinet/sctp_process_lock.h */
309 
310 /*__Userspace__ SOCKBUF_LOCK_ASSERT(_sb) is now defined in netinet/sctp_process_lock.h */
311 
312 /* #define	SOCKBUF_UNLOCK_ASSERT(_sb)	mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED)   unused */
313 
314 /*
315  * Per-socket mutex: we reuse the receive socket buffer mutex for space
316  * efficiency.  This decision should probably be revisited as we optimize
317  * locking for the socket code.
318  */
319 #define	SOCK_MTX(_so)			SOCKBUF_MTX(&(_so)->so_rcv)
320 /*__Userspace__ SOCK_LOCK(_so) is now defined in netinet/sctp_process_lock.h */
321 
322 /* #define	SOCK_OWNED(_so)			SOCKBUF_OWNED(&(_so)->so_rcv) unused */
323 /*__Userspace__ SOCK_UNLOCK(_so) is now defined in netinet/sctp_process_lock.h */
324 
325 #define	SOCK_LOCK_ASSERT(_so)		SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv)
326 
327 /*
328  * Socket state bits.
329  *
330  * Historically, this bits were all kept in the so_state field.  For
331  * locking reasons, they are now in multiple fields, as they are
332  * locked differently.  so_state maintains basic socket state protected
333  * by the socket lock.  so_qstate holds information about the socket
334  * accept queues.  Each socket buffer also has a state field holding
335  * information relevant to that socket buffer (can't send, rcv).  Many
336  * fields will be read without locks to improve performance and avoid
337  * lock order issues.  However, this approach must be used with caution.
338  */
339 #define	SS_NOFDREF		0x0001	/* no file table ref any more */
340 #define	SS_ISCONNECTED		0x0002	/* socket connected to a peer */
341 #define	SS_ISCONNECTING		0x0004	/* in process of connecting to peer */
342 #define	SS_ISDISCONNECTING	0x0008	/* in process of disconnecting */
343 #define	SS_NBIO			0x0100	/* non-blocking ops */
344 #define	SS_ASYNC		0x0200	/* async i/o notify */
345 #define	SS_ISCONFIRMING		0x0400	/* deciding to accept connection req */
346 #define	SS_ISDISCONNECTED	0x2000	/* socket disconnected from peer */
347 /*
348  * Protocols can mark a socket as SS_PROTOREF to indicate that, following
349  * pru_detach, they still want the socket to persist, and will free it
350  * themselves when they are done.  Protocols should only ever call sofree()
351  * following setting this flag in pru_detach(), and never otherwise, as
352  * sofree() bypasses socket reference counting.
353  */
354 #define	SS_PROTOREF		0x4000	/* strong protocol reference */
355 
356 /*
357  * Socket state bits now stored in the socket buffer state field.
358  */
359 #define	SBS_CANTSENDMORE	0x0010	/* can't send more data to peer */
360 #define	SBS_CANTRCVMORE		0x0020	/* can't receive more data from peer */
361 #define	SBS_RCVATMARK		0x0040	/* at mark on input */
362 
363 /*
364  * Socket state bits stored in so_qstate.
365  */
366 #define	SQ_INCOMP		0x0800	/* unaccepted, incomplete connection */
367 #define	SQ_COMP			0x1000	/* unaccepted, complete connection */
368 
369 /*
370  * Socket event flags
371  */
372 #define SCTP_EVENT_READ		0x0001	/* socket is readable */
373 #define SCTP_EVENT_WRITE	0x0002	/* socket is writeable */
374 #define SCTP_EVENT_ERROR	0x0004	/* socket has an error state */
375 
376 
377 /*-------------------------------------------------------------*/
378 /*-------------------------------------------------------------*/
379 /*                   __Userspace__                             */
380 /*-------------------------------------------------------------*/
381 /*-------------------------------------------------------------*/
382 /* this new __Userspace__ section is to copy portions of the _KERNEL block
383  *  above into, avoiding having to port the entire thing at once...
384  *  For function prototypes, the full bodies are in user_socket.c .
385  */
386 
387 /* ---------------------------------------------------------- */
388 /* --- function prototypes (implemented in user_socket.c) --- */
389 /* ---------------------------------------------------------- */
390 void	soisconnecting(struct socket *so);
391 void	soisdisconnecting(struct socket *so);
392 void	soisconnected(struct socket *so);
393 struct socket * sonewconn(struct socket *head, int connstatus);
394 void	socantrcvmore(struct socket *so);
395 void	socantsendmore(struct socket *so);
396 void	sofree(struct socket *so);
397 
398 
399 
400 /* -------------- */
401 /* --- macros --- */
402 /* -------------- */
403 
404 #define	soref(so) do {							\
405 	SOCK_LOCK_ASSERT(so);						\
406 	++(so)->so_count;						\
407 } while (0)
408 
409 #define	sorele(so) do {							\
410 	ACCEPT_LOCK_ASSERT();						\
411 	SOCK_LOCK_ASSERT(so);						\
412 	KASSERT((so)->so_count > 0, ("sorele"));			\
413 	if (--(so)->so_count == 0)					\
414 		sofree(so);						\
415 	else {								\
416 		SOCK_UNLOCK(so);					\
417 		ACCEPT_UNLOCK();					\
418 	}								\
419 } while (0)
420 
421 
422 /* replacing imin with min (user_environment.h) */
423 #define	sbspace(sb) \
424     ((long) min((int)((sb)->sb_hiwat - (sb)->sb_cc), \
425 	 (int)((sb)->sb_mbmax - (sb)->sb_mbcnt)))
426 
427 /* do we have to send all at once on a socket? */
428 #define	sosendallatonce(so) \
429     ((so)->so_proto->pr_flags & PR_ATOMIC)
430 
431 /* can we read something from so? */
432 #define	soreadable(so) \
433     ((int)((so)->so_rcv.sb_cc) >= (so)->so_rcv.sb_lowat || \
434 	((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \
435 	!TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
436 
437 #if 0  /*  original */
438 #define PR_CONNREQUIRED 0x04  /* from sys/protosw.h "needed" for sowriteable */
439 #define	sowriteable(so) \
440     ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
441 	(((so)->so_state&SS_ISCONNECTED) || \
442 	  ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
443      ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
444      (so)->so_error)
445 #else  /* line with PR_CONNREQUIRED removed */
446 /* can we write something to so? */
447 #define	sowriteable(so) \
448     ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
449       (((so)->so_state&SS_ISCONNECTED))) ||              \
450      ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
451      (so)->so_error)
452 #endif
453 
454 extern void solisten_proto(struct socket *so, int backlog);
455 extern int solisten_proto_check(struct socket *so);
456 extern int sctp_listen(struct socket *so, int backlog, struct proc *p);
457 extern void socantrcvmore_locked(struct socket *so);
458 extern int sctp_bind(struct socket *so, struct sockaddr *addr);
459 extern int sctp6_bind(struct socket *so, struct sockaddr *addr, void *proc);
460 extern int sctpconn_bind(struct socket *so, struct sockaddr *addr);
461 extern int sctp_accept(struct socket *so, struct sockaddr **addr);
462 extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id);
463 extern int sctp6_attach(struct socket *so, int proto, uint32_t vrf_id);
464 extern int sctp_abort(struct socket *so);
465 extern int sctp6_abort(struct socket *so);
466 extern void sctp_close(struct socket *so);
467 extern int soaccept(struct socket *so, struct sockaddr **nam);
468 extern int solisten(struct socket *so, int backlog);
469 extern int  soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
470 extern void sowakeup(struct socket *so, struct sockbuf *sb);
471 extern void wakeup(void *ident, struct socket *so); /*__Userspace__ */
472 extern int uiomove(void *cp, int n, struct uio *uio);
473 extern int sbwait(struct sockbuf *sb);
474 extern int sodisconnect(struct socket *so);
475 extern int soconnect(struct socket *so, struct sockaddr *nam);
476 extern int sctp_disconnect(struct socket *so);
477 extern int sctp_connect(struct socket *so, struct sockaddr *addr);
478 extern int sctp6_connect(struct socket *so, struct sockaddr *addr);
479 extern int sctpconn_connect(struct socket *so, struct sockaddr *addr);
480 extern void sctp_finish(void);
481 
482 /* ------------------------------------------------ */
483 /* -----  macros copied from above ---- */
484 /* ------------------------------------------------ */
485 
486 /*
487  * Do we need to notify the other side when I/O is possible?
488  */
489 #define	sb_notify(sb)	(((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
490     SB_UPCALL | SB_AIO | SB_KNOTE)) != 0)
491 
492 
493 /*
494  * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
495  * avoid a non-atomic test-and-wakeup.  However, sowakeup is
496  * responsible for releasing the lock if it is called.  We unlock only
497  * if we don't call into sowakeup.  If any code is introduced that
498  * directly invokes the underlying sowakeup() primitives, it must
499  * maintain the same semantics.
500  */
501 #define	sorwakeup_locked(so) do {					\
502 	SOCKBUF_LOCK_ASSERT(&(so)->so_rcv);				\
503 	if (sb_notify(&(so)->so_rcv))					\
504 		sowakeup((so), &(so)->so_rcv);	 			\
505 	else								\
506 		SOCKBUF_UNLOCK(&(so)->so_rcv);				\
507 } while (0)
508 
509 #define	sorwakeup(so) do {						\
510 	SOCKBUF_LOCK(&(so)->so_rcv);					\
511 	sorwakeup_locked(so);						\
512 } while (0)
513 
514 #define	sowwakeup_locked(so) do {					\
515 	SOCKBUF_LOCK_ASSERT(&(so)->so_snd);				\
516 	if (sb_notify(&(so)->so_snd))					\
517 		sowakeup((so), &(so)->so_snd); 				\
518 	else								\
519 		SOCKBUF_UNLOCK(&(so)->so_snd);				\
520 } while (0)
521 
522 #define	sowwakeup(so) do {						\
523 	SOCKBUF_LOCK(&(so)->so_snd);					\
524 	sowwakeup_locked(so);						\
525 } while (0)
526 
527 #endif /* !_SYS_SOCKETVAR_H_ */
528