socketvar.h revision 167715
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)socketvar.h	8.3 (Berkeley) 2/19/95
30 * $FreeBSD: head/sys/sys/socketvar.h 167715 2007-03-19 18:35:13Z andre $
31 */
32
33#ifndef _SYS_SOCKETVAR_H_
34#define _SYS_SOCKETVAR_H_
35
36#include <sys/queue.h>			/* for TAILQ macros */
37#include <sys/selinfo.h>		/* for struct selinfo */
38#include <sys/_lock.h>
39#include <sys/_mutex.h>
40
41/*
42 * Kernel structure per socket.
43 * Contains send and receive buffer queues,
44 * handle on protocol and pointer to protocol
45 * private data and error information.
46 */
47typedef	u_quad_t so_gen_t;
48
49/*-
50 * Locking key to struct socket:
51 * (a) constant after allocation, no locking required.
52 * (b) locked by SOCK_LOCK(so).
53 * (c) locked by SOCKBUF_LOCK(&so->so_rcv).
54 * (d) locked by SOCKBUF_LOCK(&so->so_snd).
55 * (e) locked by ACCEPT_LOCK().
56 * (f) not locked since integer reads/writes are atomic.
57 * (g) used only as a sleep/wakeup address, no value.
58 * (h) locked by global mutex so_global_mtx.
59 */
60struct socket {
61	int	so_count;		/* (b) reference count */
62	short	so_type;		/* (a) generic type, see socket.h */
63	short	so_options;		/* from socket call, see socket.h */
64	short	so_linger;		/* time to linger while closing */
65	short	so_state;		/* (b) internal state flags SS_* */
66	int	so_qstate;		/* (e) internal state flags SQ_* */
67	void	*so_pcb;		/* protocol control block */
68	struct	protosw *so_proto;	/* (a) protocol handle */
69/*
70 * Variables for connection queuing.
71 * Socket where accepts occur is so_head in all subsidiary sockets.
72 * If so_head is 0, socket is not related to an accept.
73 * For head socket so_incomp queues partially completed connections,
74 * while so_comp is a queue of connections ready to be accepted.
75 * If a connection is aborted and it has so_head set, then
76 * it has to be pulled out of either so_incomp or so_comp.
77 * We allow connections to queue up based on current queue lengths
78 * and limit on number of queued connections for this socket.
79 */
80	struct	socket *so_head;	/* (e) back pointer to listen socket */
81	TAILQ_HEAD(, socket) so_incomp;	/* (e) queue of partial unaccepted connections */
82	TAILQ_HEAD(, socket) so_comp;	/* (e) queue of complete unaccepted connections */
83	TAILQ_ENTRY(socket) so_list;	/* (e) list of unaccepted connections */
84	u_short	so_qlen;		/* (e) number of unaccepted connections */
85	u_short	so_incqlen;		/* (e) number of unaccepted incomplete
86					   connections */
87	u_short	so_qlimit;		/* (e) max number queued connections */
88	short	so_timeo;		/* (g) connection timeout */
89	u_short	so_error;		/* (f) error affecting connection */
90	struct	sigio *so_sigio;	/* [sg] information for async I/O or
91					   out of band data (SIGURG) */
92	u_long	so_oobmark;		/* (c) chars to oob mark */
93	TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */
94/*
95 * Variables for socket buffering.
96 */
97	struct sockbuf {
98		struct	selinfo sb_sel;	/* process selecting read/write */
99		struct	mtx sb_mtx;	/* sockbuf lock */
100		short	sb_state;	/* (c/d) socket state on sockbuf */
101#define	sb_startzero	sb_mb
102		struct	mbuf *sb_mb;	/* (c/d) the mbuf chain */
103		struct	mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */
104		struct	mbuf *sb_lastrecord;	/* (c/d) first mbuf of last
105						 * record in socket buffer */
106		struct	mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */
107		u_int	sb_sndptroff;	/* (c/d) byte offset of ptr into chain */
108		u_int	sb_cc;		/* (c/d) actual chars in buffer */
109		u_int	sb_hiwat;	/* (c/d) max actual char count */
110		u_int	sb_mbcnt;	/* (c/d) chars of mbufs used */
111		u_int	sb_mbmax;	/* (c/d) max chars of mbufs to use */
112		u_int	sb_ctl;		/* (c/d) non-data chars in buffer */
113		int	sb_lowat;	/* (c/d) low water mark */
114		int	sb_timeo;	/* (c/d) timeout for read/write */
115		short	sb_flags;	/* (c/d) flags, see below */
116	} so_rcv, so_snd;
117/*
118 * Constants for sb_flags field of struct sockbuf.
119 */
120#define	SB_MAX		(256*1024)	/* default for max chars in sockbuf */
121/*
122 * Constants for sb_flags field of struct sockbuf.
123 */
124#define	SB_LOCK		0x01		/* lock on data queue */
125#define	SB_WANT		0x02		/* someone is waiting to lock */
126#define	SB_WAIT		0x04		/* someone is waiting for data/space */
127#define	SB_SEL		0x08		/* someone is selecting */
128#define	SB_ASYNC	0x10		/* ASYNC I/O, need signals */
129#define	SB_UPCALL	0x20		/* someone wants an upcall */
130#define	SB_NOINTR	0x40		/* operations not interruptible */
131#define SB_AIO		0x80		/* AIO operations queued */
132#define SB_KNOTE	0x100		/* kernel note attached */
133#define	SB_AUTOSIZE	0x800		/* automatically size socket buffer */
134
135	void	(*so_upcall)(struct socket *, void *, int);
136	void	*so_upcallarg;
137	struct	ucred *so_cred;		/* (a) user credentials */
138	struct	label *so_label;	/* (b) MAC label for socket */
139	struct	label *so_peerlabel;	/* (b) cached MAC label for peer */
140	/* NB: generation count must not be first. */
141	so_gen_t so_gencnt;		/* (h) generation count */
142	void	*so_emuldata;		/* (b) private data for emulators */
143 	struct so_accf {
144		struct	accept_filter *so_accept_filter;
145		void	*so_accept_filter_arg;	/* saved filter args */
146		char	*so_accept_filter_str;	/* saved user args */
147	} *so_accf;
148};
149
150#define SB_EMPTY_FIXUP(sb) do {						\
151	if ((sb)->sb_mb == NULL) {					\
152		(sb)->sb_mbtail = NULL;					\
153		(sb)->sb_lastrecord = NULL;				\
154	}								\
155} while (/*CONSTCOND*/0)
156
157/*
158 * Global accept mutex to serialize access to accept queues and
159 * fields associated with multiple sockets.  This allows us to
160 * avoid defining a lock order between listen and accept sockets
161 * until such time as it proves to be a good idea.
162 */
163extern struct mtx accept_mtx;
164#define	ACCEPT_LOCK_ASSERT()		mtx_assert(&accept_mtx, MA_OWNED)
165#define	ACCEPT_UNLOCK_ASSERT()		mtx_assert(&accept_mtx, MA_NOTOWNED)
166#define	ACCEPT_LOCK()			mtx_lock(&accept_mtx)
167#define	ACCEPT_UNLOCK()			mtx_unlock(&accept_mtx)
168
169/*
170 * Per-socket buffer mutex used to protect most fields in the socket
171 * buffer.
172 */
173#define	SOCKBUF_MTX(_sb)		(&(_sb)->sb_mtx)
174#define	SOCKBUF_LOCK_INIT(_sb, _name) \
175	mtx_init(SOCKBUF_MTX(_sb), _name, NULL, MTX_DEF)
176#define	SOCKBUF_LOCK_DESTROY(_sb)	mtx_destroy(SOCKBUF_MTX(_sb))
177#define	SOCKBUF_LOCK(_sb)		mtx_lock(SOCKBUF_MTX(_sb))
178#define	SOCKBUF_OWNED(_sb)		mtx_owned(SOCKBUF_MTX(_sb))
179#define	SOCKBUF_UNLOCK(_sb)		mtx_unlock(SOCKBUF_MTX(_sb))
180#define	SOCKBUF_LOCK_ASSERT(_sb)	mtx_assert(SOCKBUF_MTX(_sb), MA_OWNED)
181#define	SOCKBUF_UNLOCK_ASSERT(_sb)	mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED)
182
183/*
184 * Per-socket mutex: we reuse the receive socket buffer mutex for space
185 * efficiency.  This decision should probably be revisited as we optimize
186 * locking for the socket code.
187 */
188#define	SOCK_MTX(_so)			SOCKBUF_MTX(&(_so)->so_rcv)
189#define	SOCK_LOCK(_so)			SOCKBUF_LOCK(&(_so)->so_rcv)
190#define	SOCK_OWNED(_so)			SOCKBUF_OWNED(&(_so)->so_rcv)
191#define	SOCK_UNLOCK(_so)		SOCKBUF_UNLOCK(&(_so)->so_rcv)
192#define	SOCK_LOCK_ASSERT(_so)		SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv)
193
194/*
195 * Socket state bits.
196 *
197 * Historically, this bits were all kept in the so_state field.  For
198 * locking reasons, they are now in multiple fields, as they are
199 * locked differently.  so_state maintains basic socket state protected
200 * by the socket lock.  so_qstate holds information about the socket
201 * accept queues.  Each socket buffer also has a state field holding
202 * information relevant to that socket buffer (can't send, rcv).  Many
203 * fields will be read without locks to improve performance and avoid
204 * lock order issues.  However, this approach must be used with caution.
205 */
206#define	SS_NOFDREF		0x0001	/* no file table ref any more */
207#define	SS_ISCONNECTED		0x0002	/* socket connected to a peer */
208#define	SS_ISCONNECTING		0x0004	/* in process of connecting to peer */
209#define	SS_ISDISCONNECTING	0x0008	/* in process of disconnecting */
210#define	SS_NBIO			0x0100	/* non-blocking ops */
211#define	SS_ASYNC		0x0200	/* async i/o notify */
212#define	SS_ISCONFIRMING		0x0400	/* deciding to accept connection req */
213#define	SS_ISDISCONNECTED	0x2000	/* socket disconnected from peer */
214/*
215 * Protocols can mark a socket as SS_PROTOREF to indicate that, following
216 * pru_detach, they still want the socket to persist, and will free it
217 * themselves when they are done.  Protocols should only ever call sofree()
218 * following setting this flag in pru_detach(), and never otherwise, as
219 * sofree() bypasses socket reference counting.
220 */
221#define	SS_PROTOREF		0x4000	/* strong protocol reference */
222
223/*
224 * Socket state bits now stored in the socket buffer state field.
225 */
226#define	SBS_CANTSENDMORE	0x0010	/* can't send more data to peer */
227#define	SBS_CANTRCVMORE		0x0020	/* can't receive more data from peer */
228#define	SBS_RCVATMARK		0x0040	/* at mark on input */
229
230/*
231 * Socket state bits stored in so_qstate.
232 */
233#define	SQ_INCOMP		0x0800	/* unaccepted, incomplete connection */
234#define	SQ_COMP			0x1000	/* unaccepted, complete connection */
235
236/*
237 * Externalized form of struct socket used by the sysctl(3) interface.
238 */
239struct xsocket {
240	size_t	xso_len;	/* length of this structure */
241	struct	socket *xso_so;	/* makes a convenient handle sometimes */
242	short	so_type;
243	short	so_options;
244	short	so_linger;
245	short	so_state;
246	caddr_t	so_pcb;		/* another convenient handle */
247	int	xso_protocol;
248	int	xso_family;
249	u_short	so_qlen;
250	u_short	so_incqlen;
251	u_short	so_qlimit;
252	short	so_timeo;
253	u_short	so_error;
254	pid_t	so_pgid;
255	u_long	so_oobmark;
256	struct xsockbuf {
257		u_int	sb_cc;
258		u_int	sb_hiwat;
259		u_int	sb_mbcnt;
260		u_int	sb_mbmax;
261		int	sb_lowat;
262		int	sb_timeo;
263		short	sb_flags;
264	} so_rcv, so_snd;
265	uid_t	so_uid;		/* XXX */
266};
267
268#ifdef _KERNEL
269
270/*
271 * Macros for sockets and socket buffering.
272 */
273
274/*
275 * Do we need to notify the other side when I/O is possible?
276 */
277#define	sb_notify(sb)	(((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
278    SB_UPCALL | SB_AIO | SB_KNOTE)) != 0)
279
280/*
281 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
282 * This is problematical if the fields are unsigned, as the space might
283 * still be negative (cc > hiwat or mbcnt > mbmax).  Should detect
284 * overflow and return 0.  Should use "lmin" but it doesn't exist now.
285 */
286#define	sbspace(sb) \
287    ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \
288	 (int)((sb)->sb_mbmax - (sb)->sb_mbcnt)))
289
290/* do we have to send all at once on a socket? */
291#define	sosendallatonce(so) \
292    ((so)->so_proto->pr_flags & PR_ATOMIC)
293
294/* can we read something from so? */
295#define	soreadable(so) \
296    ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \
297	((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \
298	!TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
299
300/* can we write something to so? */
301#define	sowriteable(so) \
302    ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
303	(((so)->so_state&SS_ISCONNECTED) || \
304	  ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
305     ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
306     (so)->so_error)
307
308/* adjust counters in sb reflecting allocation of m */
309#define	sballoc(sb, m) { \
310	(sb)->sb_cc += (m)->m_len; \
311	if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \
312		(sb)->sb_ctl += (m)->m_len; \
313	(sb)->sb_mbcnt += MSIZE; \
314	if ((m)->m_flags & M_EXT) \
315		(sb)->sb_mbcnt += (m)->m_ext.ext_size; \
316}
317
318/* adjust counters in sb reflecting freeing of m */
319#define	sbfree(sb, m) { \
320	(sb)->sb_cc -= (m)->m_len; \
321	if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \
322		(sb)->sb_ctl -= (m)->m_len; \
323	(sb)->sb_mbcnt -= MSIZE; \
324	if ((m)->m_flags & M_EXT) \
325		(sb)->sb_mbcnt -= (m)->m_ext.ext_size; \
326	if ((sb)->sb_sndptr == (m)) { \
327		(sb)->sb_sndptr = NULL; \
328		(sb)->sb_sndptroff = 0; \
329	} \
330	if ((sb)->sb_sndptroff != 0) \
331		(sb)->sb_sndptroff -= (m)->m_len; \
332}
333
334/*
335 * Set lock on sockbuf sb; sleep if lock is already held.
336 * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
337 * Returns error without lock if sleep is interrupted.
338 */
339#define sblock(sb, wf) ((sb)->sb_flags & SB_LOCK ? \
340		(((wf) == M_WAITOK) ? sb_lock(sb) : EWOULDBLOCK) : \
341		((sb)->sb_flags |= SB_LOCK), 0)
342
343/* release lock on sockbuf sb */
344#define	sbunlock(sb) do { \
345	SOCKBUF_LOCK_ASSERT(sb); \
346	(sb)->sb_flags &= ~SB_LOCK; \
347	if ((sb)->sb_flags & SB_WANT) { \
348		(sb)->sb_flags &= ~SB_WANT; \
349		wakeup(&(sb)->sb_flags); \
350	} \
351} while (0)
352
353/*
354 * soref()/sorele() ref-count the socket structure.  Note that you must
355 * still explicitly close the socket, but the last ref count will free
356 * the structure.
357 */
358#define	soref(so) do {							\
359	SOCK_LOCK_ASSERT(so);						\
360	++(so)->so_count;						\
361} while (0)
362
363#define	sorele(so) do {							\
364	ACCEPT_LOCK_ASSERT();						\
365	SOCK_LOCK_ASSERT(so);						\
366	if ((so)->so_count <= 0)					\
367		panic("sorele");					\
368	if (--(so)->so_count == 0)					\
369		sofree(so);						\
370	else {								\
371		SOCK_UNLOCK(so);					\
372		ACCEPT_UNLOCK();					\
373	}								\
374} while (0)
375
376#define	sotryfree(so) do {						\
377	ACCEPT_LOCK_ASSERT();						\
378	SOCK_LOCK_ASSERT(so);						\
379	if ((so)->so_count == 0)					\
380		sofree(so);						\
381	else {								\
382		SOCK_UNLOCK(so);					\
383		ACCEPT_UNLOCK();					\
384	}								\
385} while(0)
386
387/*
388 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
389 * avoid a non-atomic test-and-wakeup.  However, sowakeup is
390 * responsible for releasing the lock if it is called.  We unlock only
391 * if we don't call into sowakeup.  If any code is introduced that
392 * directly invokes the underlying sowakeup() primitives, it must
393 * maintain the same semantics.
394 */
395#define	sorwakeup_locked(so) do {					\
396	SOCKBUF_LOCK_ASSERT(&(so)->so_rcv);				\
397	if (sb_notify(&(so)->so_rcv))					\
398		sowakeup((so), &(so)->so_rcv);	 			\
399	else								\
400		SOCKBUF_UNLOCK(&(so)->so_rcv);				\
401} while (0)
402
403#define	sorwakeup(so) do {						\
404	SOCKBUF_LOCK(&(so)->so_rcv);					\
405	sorwakeup_locked(so);						\
406} while (0)
407
408#define	sowwakeup_locked(so) do {					\
409	SOCKBUF_LOCK_ASSERT(&(so)->so_snd);				\
410	if (sb_notify(&(so)->so_snd))					\
411		sowakeup((so), &(so)->so_snd); 				\
412	else								\
413		SOCKBUF_UNLOCK(&(so)->so_snd);				\
414} while (0)
415
416#define	sowwakeup(so) do {						\
417	SOCKBUF_LOCK(&(so)->so_snd);					\
418	sowwakeup_locked(so);						\
419} while (0)
420
421/*
422 * Argument structure for sosetopt et seq.  This is in the KERNEL
423 * section because it will never be visible to user code.
424 */
425enum sopt_dir { SOPT_GET, SOPT_SET };
426struct sockopt {
427	enum	sopt_dir sopt_dir; /* is this a get or a set? */
428	int	sopt_level;	/* second arg of [gs]etsockopt */
429	int	sopt_name;	/* third arg of [gs]etsockopt */
430	void   *sopt_val;	/* fourth arg of [gs]etsockopt */
431	size_t	sopt_valsize;	/* (almost) fifth arg of [gs]etsockopt */
432	struct	thread *sopt_td; /* calling thread or null if kernel */
433};
434
435struct accept_filter {
436	char	accf_name[16];
437	void	(*accf_callback)
438		(struct socket *so, void *arg, int waitflag);
439	void *	(*accf_create)
440		(struct socket *so, char *arg);
441	void	(*accf_destroy)
442		(struct socket *so);
443	SLIST_ENTRY(accept_filter) accf_next;
444};
445
446#ifdef MALLOC_DECLARE
447MALLOC_DECLARE(M_ACCF);
448MALLOC_DECLARE(M_PCB);
449MALLOC_DECLARE(M_SONAME);
450#endif
451
452extern int	maxsockets;
453extern u_long	sb_max;
454extern struct uma_zone *socket_zone;
455extern so_gen_t so_gencnt;
456
457struct mbuf;
458struct sockaddr;
459struct ucred;
460struct uio;
461
462/*
463 * From uipc_socket and friends
464 */
465int	do_getopt_accept_filter(struct socket *so, struct sockopt *sopt);
466int	do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
467int	so_setsockopt(struct socket *so, int level, int optname,
468	    void *optval, size_t optlen);
469int	sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type);
470int	getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len);
471void	sbappend(struct sockbuf *sb, struct mbuf *m);
472void	sbappend_locked(struct sockbuf *sb, struct mbuf *m);
473void	sbappendstream(struct sockbuf *sb, struct mbuf *m);
474void	sbappendstream_locked(struct sockbuf *sb, struct mbuf *m);
475int	sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
476	    struct mbuf *m0, struct mbuf *control);
477int	sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
478	    struct mbuf *m0, struct mbuf *control);
479int	sbappendcontrol(struct sockbuf *sb, struct mbuf *m0,
480	    struct mbuf *control);
481int	sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
482	    struct mbuf *control);
483void	sbappendrecord(struct sockbuf *sb, struct mbuf *m0);
484void	sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0);
485void	sbcheck(struct sockbuf *sb);
486void	sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n);
487struct mbuf *
488	sbcreatecontrol(caddr_t p, int size, int type, int level);
489void	sbdestroy(struct sockbuf *sb, struct socket *so);
490void	sbdrop(struct sockbuf *sb, int len);
491void	sbdrop_locked(struct sockbuf *sb, int len);
492void	sbdroprecord(struct sockbuf *sb);
493void	sbdroprecord_locked(struct sockbuf *sb);
494void	sbflush(struct sockbuf *sb);
495void	sbflush_locked(struct sockbuf *sb);
496void	sbrelease(struct sockbuf *sb, struct socket *so);
497void	sbrelease_locked(struct sockbuf *sb, struct socket *so);
498int	sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
499	    struct thread *td);
500int	sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
501	    struct thread *td);
502struct mbuf *
503	sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff);
504void	sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb);
505int	sbwait(struct sockbuf *sb);
506int	sb_lock(struct sockbuf *sb);
507void	soabort(struct socket *so);
508int	soaccept(struct socket *so, struct sockaddr **nam);
509int	socheckuid(struct socket *so, uid_t uid);
510int	sobind(struct socket *so, struct sockaddr *nam, struct thread *td);
511void	socantrcvmore(struct socket *so);
512void	socantrcvmore_locked(struct socket *so);
513void	socantsendmore(struct socket *so);
514void	socantsendmore_locked(struct socket *so);
515int	soclose(struct socket *so);
516int	soconnect(struct socket *so, struct sockaddr *nam, struct thread *td);
517int	soconnect2(struct socket *so1, struct socket *so2);
518int	socow_setup(struct mbuf *m0, struct uio *uio);
519int	socreate(int dom, struct socket **aso, int type, int proto,
520	    struct ucred *cred, struct thread *td);
521int	sodisconnect(struct socket *so);
522struct	sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags);
523void	sofree(struct socket *so);
524int	sogetopt(struct socket *so, struct sockopt *sopt);
525void	sohasoutofband(struct socket *so);
526void	soisconnected(struct socket *so);
527void	soisconnecting(struct socket *so);
528void	soisdisconnected(struct socket *so);
529void	soisdisconnecting(struct socket *so);
530int	solisten(struct socket *so, int backlog, struct thread *td);
531void	solisten_proto(struct socket *so, int backlog);
532int	solisten_proto_check(struct socket *so);
533struct socket *
534	sonewconn(struct socket *head, int connstatus);
535int	sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen);
536int	sooptcopyout(struct sockopt *sopt, const void *buf, size_t len);
537
538/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
539int	soopt_getm(struct sockopt *sopt, struct mbuf **mp);
540int	soopt_mcopyin(struct sockopt *sopt, struct mbuf *m);
541int	soopt_mcopyout(struct sockopt *sopt, struct mbuf *m);
542
543int	sopoll(struct socket *so, int events, struct ucred *active_cred,
544	    struct thread *td);
545int	sopoll_generic(struct socket *so, int events,
546	    struct ucred *active_cred, struct thread *td);
547int	soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio,
548	    struct mbuf **mp0, struct mbuf **controlp, int *flagsp);
549int	soreceive_generic(struct socket *so, struct sockaddr **paddr,
550	    struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
551	    int *flagsp);
552int	soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
553void	sorflush(struct socket *so);
554int	sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
555	    struct mbuf *top, struct mbuf *control, int flags,
556	    struct thread *td);
557int	sosend_dgram(struct socket *so, struct sockaddr *addr,
558	    struct uio *uio, struct mbuf *top, struct mbuf *control,
559	    int flags, struct thread *td);
560int	sosend_generic(struct socket *so, struct sockaddr *addr,
561	    struct uio *uio, struct mbuf *top, struct mbuf *control,
562	    int flags, struct thread *td);
563int	sosetopt(struct socket *so, struct sockopt *sopt);
564int	soshutdown(struct socket *so, int how);
565void	sotoxsocket(struct socket *so, struct xsocket *xso);
566void	sowakeup(struct socket *so, struct sockbuf *sb);
567
568#ifdef SOCKBUF_DEBUG
569void	sblastrecordchk(struct sockbuf *, const char *, int);
570#define	SBLASTRECORDCHK(sb)	sblastrecordchk((sb), __FILE__, __LINE__)
571
572void	sblastmbufchk(struct sockbuf *, const char *, int);
573#define	SBLASTMBUFCHK(sb)	sblastmbufchk((sb), __FILE__, __LINE__)
574#else
575#define	SBLASTRECORDCHK(sb)      /* nothing */
576#define	SBLASTMBUFCHK(sb)        /* nothing */
577#endif /* SOCKBUF_DEBUG */
578
579/*
580 * Accept filter functions (duh).
581 */
582int	accept_filt_add(struct accept_filter *filt);
583int	accept_filt_del(char *name);
584struct	accept_filter *accept_filt_get(char *name);
585#ifdef ACCEPT_FILTER_MOD
586#ifdef SYSCTL_DECL
587SYSCTL_DECL(_net_inet_accf);
588#endif
589int	accept_filt_generic_mod_event(module_t mod, int event, void *data);
590#endif
591
592#endif /* _KERNEL */
593
594#endif /* !_SYS_SOCKETVAR_H_ */
595