socketvar.h revision 178888
1/*- 2 * Copyright (c) 1982, 1986, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)socketvar.h 8.3 (Berkeley) 2/19/95 30 * $FreeBSD: head/sys/sys/socketvar.h 178888 2008-05-09 23:03:00Z julian $ 31 */ 32 33#ifndef _SYS_SOCKETVAR_H_ 34#define _SYS_SOCKETVAR_H_ 35 36#include <sys/queue.h> /* for TAILQ macros */ 37#include <sys/selinfo.h> /* for struct selinfo */ 38#include <sys/_lock.h> 39#include <sys/_mutex.h> 40#include <sys/_sx.h> 41 42/* 43 * Kernel structure per socket. 44 * Contains send and receive buffer queues, 45 * handle on protocol and pointer to protocol 46 * private data and error information. 47 */ 48typedef u_quad_t so_gen_t; 49 50/*- 51 * Locking key to struct socket: 52 * (a) constant after allocation, no locking required. 53 * (b) locked by SOCK_LOCK(so). 54 * (c) locked by SOCKBUF_LOCK(&so->so_rcv). 55 * (d) locked by SOCKBUF_LOCK(&so->so_snd). 56 * (e) locked by ACCEPT_LOCK(). 57 * (f) not locked since integer reads/writes are atomic. 58 * (g) used only as a sleep/wakeup address, no value. 59 * (h) locked by global mutex so_global_mtx. 60 */ 61struct socket { 62 int so_count; /* (b) reference count */ 63 short so_type; /* (a) generic type, see socket.h */ 64 short so_options; /* from socket call, see socket.h */ 65 short so_linger; /* time to linger while closing */ 66 short so_state; /* (b) internal state flags SS_* */ 67 int so_qstate; /* (e) internal state flags SQ_* */ 68 void *so_pcb; /* protocol control block */ 69 struct protosw *so_proto; /* (a) protocol handle */ 70/* 71 * Variables for connection queuing. 72 * Socket where accepts occur is so_head in all subsidiary sockets. 73 * If so_head is 0, socket is not related to an accept. 74 * For head socket so_incomp queues partially completed connections, 75 * while so_comp is a queue of connections ready to be accepted. 76 * If a connection is aborted and it has so_head set, then 77 * it has to be pulled out of either so_incomp or so_comp. 78 * We allow connections to queue up based on current queue lengths 79 * and limit on number of queued connections for this socket. 80 */ 81 struct socket *so_head; /* (e) back pointer to listen socket */ 82 TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */ 83 TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */ 84 TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */ 85 u_short so_qlen; /* (e) number of unaccepted connections */ 86 u_short so_incqlen; /* (e) number of unaccepted incomplete 87 connections */ 88 u_short so_qlimit; /* (e) max number queued connections */ 89 short so_timeo; /* (g) connection timeout */ 90 u_short so_error; /* (f) error affecting connection */ 91 struct sigio *so_sigio; /* [sg] information for async I/O or 92 out of band data (SIGURG) */ 93 u_long so_oobmark; /* (c) chars to oob mark */ 94 TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */ 95/* 96 * Variables for socket buffering. 97 */ 98 struct sockbuf { 99 struct selinfo sb_sel; /* process selecting read/write */ 100 struct mtx sb_mtx; /* sockbuf lock */ 101 struct sx sb_sx; /* prevent I/O interlacing */ 102 short sb_state; /* (c/d) socket state on sockbuf */ 103#define sb_startzero sb_mb 104 struct mbuf *sb_mb; /* (c/d) the mbuf chain */ 105 struct mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */ 106 struct mbuf *sb_lastrecord; /* (c/d) first mbuf of last 107 * record in socket buffer */ 108 struct mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */ 109 u_int sb_sndptroff; /* (c/d) byte offset of ptr into chain */ 110 u_int sb_cc; /* (c/d) actual chars in buffer */ 111 u_int sb_hiwat; /* (c/d) max actual char count */ 112 u_int sb_mbcnt; /* (c/d) chars of mbufs used */ 113 u_int sb_mbmax; /* (c/d) max chars of mbufs to use */ 114 u_int sb_ctl; /* (c/d) non-data chars in buffer */ 115 int sb_lowat; /* (c/d) low water mark */ 116 int sb_timeo; /* (c/d) timeout for read/write */ 117 short sb_flags; /* (c/d) flags, see below */ 118 } so_rcv, so_snd; 119/* 120 * Constants for sb_flags field of struct sockbuf. 121 */ 122#define SB_MAX (256*1024) /* default for max chars in sockbuf */ 123/* 124 * Constants for sb_flags field of struct sockbuf. 125 */ 126#define SB_WAIT 0x04 /* someone is waiting for data/space */ 127#define SB_SEL 0x08 /* someone is selecting */ 128#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ 129#define SB_UPCALL 0x20 /* someone wants an upcall */ 130#define SB_NOINTR 0x40 /* operations not interruptible */ 131#define SB_AIO 0x80 /* AIO operations queued */ 132#define SB_KNOTE 0x100 /* kernel note attached */ 133#define SB_NOCOALESCE 0x200 /* don't coalesce new data into existing mbufs */ 134#define SB_AUTOSIZE 0x800 /* automatically size socket buffer */ 135 136 void (*so_upcall)(struct socket *, void *, int); 137 void *so_upcallarg; 138 struct ucred *so_cred; /* (a) user credentials */ 139 struct label *so_label; /* (b) MAC label for socket */ 140 struct label *so_peerlabel; /* (b) cached MAC label for peer */ 141 /* NB: generation count must not be first. */ 142 so_gen_t so_gencnt; /* (h) generation count */ 143 void *so_emuldata; /* (b) private data for emulators */ 144 struct so_accf { 145 struct accept_filter *so_accept_filter; 146 void *so_accept_filter_arg; /* saved filter args */ 147 char *so_accept_filter_str; /* saved user args */ 148 } *so_accf; 149 int so_fibnum; /* routing domain for this socket */ 150}; 151 152#define SB_EMPTY_FIXUP(sb) do { \ 153 if ((sb)->sb_mb == NULL) { \ 154 (sb)->sb_mbtail = NULL; \ 155 (sb)->sb_lastrecord = NULL; \ 156 } \ 157} while (/*CONSTCOND*/0) 158 159/* 160 * Global accept mutex to serialize access to accept queues and 161 * fields associated with multiple sockets. This allows us to 162 * avoid defining a lock order between listen and accept sockets 163 * until such time as it proves to be a good idea. 164 */ 165extern struct mtx accept_mtx; 166#define ACCEPT_LOCK_ASSERT() mtx_assert(&accept_mtx, MA_OWNED) 167#define ACCEPT_UNLOCK_ASSERT() mtx_assert(&accept_mtx, MA_NOTOWNED) 168#define ACCEPT_LOCK() mtx_lock(&accept_mtx) 169#define ACCEPT_UNLOCK() mtx_unlock(&accept_mtx) 170 171/* 172 * Per-socket buffer mutex used to protect most fields in the socket 173 * buffer. 174 */ 175#define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx) 176#define SOCKBUF_LOCK_INIT(_sb, _name) \ 177 mtx_init(SOCKBUF_MTX(_sb), _name, NULL, MTX_DEF) 178#define SOCKBUF_LOCK_DESTROY(_sb) mtx_destroy(SOCKBUF_MTX(_sb)) 179#define SOCKBUF_LOCK(_sb) mtx_lock(SOCKBUF_MTX(_sb)) 180#define SOCKBUF_OWNED(_sb) mtx_owned(SOCKBUF_MTX(_sb)) 181#define SOCKBUF_UNLOCK(_sb) mtx_unlock(SOCKBUF_MTX(_sb)) 182#define SOCKBUF_LOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_OWNED) 183#define SOCKBUF_UNLOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED) 184 185/* 186 * Per-socket mutex: we reuse the receive socket buffer mutex for space 187 * efficiency. This decision should probably be revisited as we optimize 188 * locking for the socket code. 189 */ 190#define SOCK_MTX(_so) SOCKBUF_MTX(&(_so)->so_rcv) 191#define SOCK_LOCK(_so) SOCKBUF_LOCK(&(_so)->so_rcv) 192#define SOCK_OWNED(_so) SOCKBUF_OWNED(&(_so)->so_rcv) 193#define SOCK_UNLOCK(_so) SOCKBUF_UNLOCK(&(_so)->so_rcv) 194#define SOCK_LOCK_ASSERT(_so) SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv) 195 196/* 197 * Socket state bits. 198 * 199 * Historically, this bits were all kept in the so_state field. For 200 * locking reasons, they are now in multiple fields, as they are 201 * locked differently. so_state maintains basic socket state protected 202 * by the socket lock. so_qstate holds information about the socket 203 * accept queues. Each socket buffer also has a state field holding 204 * information relevant to that socket buffer (can't send, rcv). Many 205 * fields will be read without locks to improve performance and avoid 206 * lock order issues. However, this approach must be used with caution. 207 */ 208#define SS_NOFDREF 0x0001 /* no file table ref any more */ 209#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ 210#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ 211#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ 212#define SS_NBIO 0x0100 /* non-blocking ops */ 213#define SS_ASYNC 0x0200 /* async i/o notify */ 214#define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ 215#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ 216/* 217 * Protocols can mark a socket as SS_PROTOREF to indicate that, following 218 * pru_detach, they still want the socket to persist, and will free it 219 * themselves when they are done. Protocols should only ever call sofree() 220 * following setting this flag in pru_detach(), and never otherwise, as 221 * sofree() bypasses socket reference counting. 222 */ 223#define SS_PROTOREF 0x4000 /* strong protocol reference */ 224 225/* 226 * Socket state bits now stored in the socket buffer state field. 227 */ 228#define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ 229#define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ 230#define SBS_RCVATMARK 0x0040 /* at mark on input */ 231 232/* 233 * Socket state bits stored in so_qstate. 234 */ 235#define SQ_INCOMP 0x0800 /* unaccepted, incomplete connection */ 236#define SQ_COMP 0x1000 /* unaccepted, complete connection */ 237 238/* 239 * Externalized form of struct socket used by the sysctl(3) interface. 240 */ 241struct xsocket { 242 size_t xso_len; /* length of this structure */ 243 struct socket *xso_so; /* makes a convenient handle sometimes */ 244 short so_type; 245 short so_options; 246 short so_linger; 247 short so_state; 248 caddr_t so_pcb; /* another convenient handle */ 249 int xso_protocol; 250 int xso_family; 251 u_short so_qlen; 252 u_short so_incqlen; 253 u_short so_qlimit; 254 short so_timeo; 255 u_short so_error; 256 pid_t so_pgid; 257 u_long so_oobmark; 258 struct xsockbuf { 259 u_int sb_cc; 260 u_int sb_hiwat; 261 u_int sb_mbcnt; 262 u_int sb_mbmax; 263 int sb_lowat; 264 int sb_timeo; 265 short sb_flags; 266 } so_rcv, so_snd; 267 uid_t so_uid; /* XXX */ 268}; 269 270#ifdef _KERNEL 271 272/* 273 * Macros for sockets and socket buffering. 274 */ 275 276/* 277 * Flags to sblock(). 278 */ 279#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ 280#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ 281#define SBL_VALID (SBL_WAIT | SBL_NOINTR) 282 283/* 284 * Do we need to notify the other side when I/O is possible? 285 */ 286#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \ 287 SB_UPCALL | SB_AIO | SB_KNOTE)) != 0) 288 289/* 290 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)? 291 * This is problematical if the fields are unsigned, as the space might 292 * still be negative (cc > hiwat or mbcnt > mbmax). Should detect 293 * overflow and return 0. Should use "lmin" but it doesn't exist now. 294 */ 295#define sbspace(sb) \ 296 ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \ 297 (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) 298 299/* do we have to send all at once on a socket? */ 300#define sosendallatonce(so) \ 301 ((so)->so_proto->pr_flags & PR_ATOMIC) 302 303/* can we read something from so? */ 304#define soreadable(so) \ 305 ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \ 306 ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \ 307 !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error) 308 309/* can we write something to so? */ 310#define sowriteable(so) \ 311 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ 312 (((so)->so_state&SS_ISCONNECTED) || \ 313 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ 314 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ 315 (so)->so_error) 316 317/* adjust counters in sb reflecting allocation of m */ 318#define sballoc(sb, m) { \ 319 (sb)->sb_cc += (m)->m_len; \ 320 if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ 321 (sb)->sb_ctl += (m)->m_len; \ 322 (sb)->sb_mbcnt += MSIZE; \ 323 if ((m)->m_flags & M_EXT) \ 324 (sb)->sb_mbcnt += (m)->m_ext.ext_size; \ 325} 326 327/* adjust counters in sb reflecting freeing of m */ 328#define sbfree(sb, m) { \ 329 (sb)->sb_cc -= (m)->m_len; \ 330 if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ 331 (sb)->sb_ctl -= (m)->m_len; \ 332 (sb)->sb_mbcnt -= MSIZE; \ 333 if ((m)->m_flags & M_EXT) \ 334 (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \ 335 if ((sb)->sb_sndptr == (m)) { \ 336 (sb)->sb_sndptr = NULL; \ 337 (sb)->sb_sndptroff = 0; \ 338 } \ 339 if ((sb)->sb_sndptroff != 0) \ 340 (sb)->sb_sndptroff -= (m)->m_len; \ 341} 342 343/* 344 * soref()/sorele() ref-count the socket structure. Note that you must 345 * still explicitly close the socket, but the last ref count will free 346 * the structure. 347 */ 348#define soref(so) do { \ 349 SOCK_LOCK_ASSERT(so); \ 350 ++(so)->so_count; \ 351} while (0) 352 353#define sorele(so) do { \ 354 ACCEPT_LOCK_ASSERT(); \ 355 SOCK_LOCK_ASSERT(so); \ 356 if ((so)->so_count <= 0) \ 357 panic("sorele"); \ 358 if (--(so)->so_count == 0) \ 359 sofree(so); \ 360 else { \ 361 SOCK_UNLOCK(so); \ 362 ACCEPT_UNLOCK(); \ 363 } \ 364} while (0) 365 366#define sotryfree(so) do { \ 367 ACCEPT_LOCK_ASSERT(); \ 368 SOCK_LOCK_ASSERT(so); \ 369 if ((so)->so_count == 0) \ 370 sofree(so); \ 371 else { \ 372 SOCK_UNLOCK(so); \ 373 ACCEPT_UNLOCK(); \ 374 } \ 375} while(0) 376 377/* 378 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to 379 * avoid a non-atomic test-and-wakeup. However, sowakeup is 380 * responsible for releasing the lock if it is called. We unlock only 381 * if we don't call into sowakeup. If any code is introduced that 382 * directly invokes the underlying sowakeup() primitives, it must 383 * maintain the same semantics. 384 */ 385#define sorwakeup_locked(so) do { \ 386 SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ 387 if (sb_notify(&(so)->so_rcv)) \ 388 sowakeup((so), &(so)->so_rcv); \ 389 else \ 390 SOCKBUF_UNLOCK(&(so)->so_rcv); \ 391} while (0) 392 393#define sorwakeup(so) do { \ 394 SOCKBUF_LOCK(&(so)->so_rcv); \ 395 sorwakeup_locked(so); \ 396} while (0) 397 398#define sowwakeup_locked(so) do { \ 399 SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ 400 if (sb_notify(&(so)->so_snd)) \ 401 sowakeup((so), &(so)->so_snd); \ 402 else \ 403 SOCKBUF_UNLOCK(&(so)->so_snd); \ 404} while (0) 405 406#define sowwakeup(so) do { \ 407 SOCKBUF_LOCK(&(so)->so_snd); \ 408 sowwakeup_locked(so); \ 409} while (0) 410 411/* 412 * Argument structure for sosetopt et seq. This is in the KERNEL 413 * section because it will never be visible to user code. 414 */ 415enum sopt_dir { SOPT_GET, SOPT_SET }; 416struct sockopt { 417 enum sopt_dir sopt_dir; /* is this a get or a set? */ 418 int sopt_level; /* second arg of [gs]etsockopt */ 419 int sopt_name; /* third arg of [gs]etsockopt */ 420 void *sopt_val; /* fourth arg of [gs]etsockopt */ 421 size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ 422 struct thread *sopt_td; /* calling thread or null if kernel */ 423}; 424 425struct accept_filter { 426 char accf_name[16]; 427 void (*accf_callback) 428 (struct socket *so, void *arg, int waitflag); 429 void * (*accf_create) 430 (struct socket *so, char *arg); 431 void (*accf_destroy) 432 (struct socket *so); 433 SLIST_ENTRY(accept_filter) accf_next; 434}; 435 436#ifdef MALLOC_DECLARE 437MALLOC_DECLARE(M_ACCF); 438MALLOC_DECLARE(M_PCB); 439MALLOC_DECLARE(M_SONAME); 440#endif 441 442extern int maxsockets; 443extern u_long sb_max; 444extern struct uma_zone *socket_zone; 445extern so_gen_t so_gencnt; 446 447struct mbuf; 448struct sockaddr; 449struct ucred; 450struct uio; 451 452/* 453 * From uipc_socket and friends 454 */ 455int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt); 456int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 457int so_setsockopt(struct socket *so, int level, int optname, 458 void *optval, size_t optlen); 459int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type); 460int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len); 461void sbappend(struct sockbuf *sb, struct mbuf *m); 462void sbappend_locked(struct sockbuf *sb, struct mbuf *m); 463void sbappendstream(struct sockbuf *sb, struct mbuf *m); 464void sbappendstream_locked(struct sockbuf *sb, struct mbuf *m); 465int sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, 466 struct mbuf *m0, struct mbuf *control); 467int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, 468 struct mbuf *m0, struct mbuf *control); 469int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, 470 struct mbuf *control); 471int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, 472 struct mbuf *control); 473void sbappendrecord(struct sockbuf *sb, struct mbuf *m0); 474void sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0); 475void sbcheck(struct sockbuf *sb); 476void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n); 477struct mbuf * 478 sbcreatecontrol(caddr_t p, int size, int type, int level); 479void sbdestroy(struct sockbuf *sb, struct socket *so); 480void sbdrop(struct sockbuf *sb, int len); 481void sbdrop_locked(struct sockbuf *sb, int len); 482void sbdroprecord(struct sockbuf *sb); 483void sbdroprecord_locked(struct sockbuf *sb); 484void sbflush(struct sockbuf *sb); 485void sbflush_locked(struct sockbuf *sb); 486void sbrelease(struct sockbuf *sb, struct socket *so); 487void sbrelease_internal(struct sockbuf *sb, struct socket *so); 488void sbrelease_locked(struct sockbuf *sb, struct socket *so); 489int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, 490 struct thread *td); 491int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, 492 struct thread *td); 493struct mbuf * 494 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff); 495void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); 496int sbwait(struct sockbuf *sb); 497int sblock(struct sockbuf *sb, int flags); 498void sbunlock(struct sockbuf *sb); 499void soabort(struct socket *so); 500int soaccept(struct socket *so, struct sockaddr **nam); 501int socheckuid(struct socket *so, uid_t uid); 502int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); 503void socantrcvmore(struct socket *so); 504void socantrcvmore_locked(struct socket *so); 505void socantsendmore(struct socket *so); 506void socantsendmore_locked(struct socket *so); 507int soclose(struct socket *so); 508int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); 509int soconnect2(struct socket *so1, struct socket *so2); 510int socow_setup(struct mbuf *m0, struct uio *uio); 511int socreate(int dom, struct socket **aso, int type, int proto, 512 struct ucred *cred, struct thread *td); 513int sodisconnect(struct socket *so); 514struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); 515void sofree(struct socket *so); 516int sogetopt(struct socket *so, struct sockopt *sopt); 517void sohasoutofband(struct socket *so); 518void soisconnected(struct socket *so); 519void soisconnecting(struct socket *so); 520void soisdisconnected(struct socket *so); 521void soisdisconnecting(struct socket *so); 522int solisten(struct socket *so, int backlog, struct thread *td); 523void solisten_proto(struct socket *so, int backlog); 524int solisten_proto_check(struct socket *so); 525struct socket * 526 sonewconn(struct socket *head, int connstatus); 527int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen); 528int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len); 529 530/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 531int soopt_getm(struct sockopt *sopt, struct mbuf **mp); 532int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); 533int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); 534 535int sopoll(struct socket *so, int events, struct ucred *active_cred, 536 struct thread *td); 537int sopoll_generic(struct socket *so, int events, 538 struct ucred *active_cred, struct thread *td); 539int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, 540 struct mbuf **mp0, struct mbuf **controlp, int *flagsp); 541int soreceive_generic(struct socket *so, struct sockaddr **paddr, 542 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 543 int *flagsp); 544int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); 545void sorflush(struct socket *so); 546int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 547 struct mbuf *top, struct mbuf *control, int flags, 548 struct thread *td); 549int sosend_dgram(struct socket *so, struct sockaddr *addr, 550 struct uio *uio, struct mbuf *top, struct mbuf *control, 551 int flags, struct thread *td); 552int sosend_generic(struct socket *so, struct sockaddr *addr, 553 struct uio *uio, struct mbuf *top, struct mbuf *control, 554 int flags, struct thread *td); 555int sosetopt(struct socket *so, struct sockopt *sopt); 556int soshutdown(struct socket *so, int how); 557void sotoxsocket(struct socket *so, struct xsocket *xso); 558void sowakeup(struct socket *so, struct sockbuf *sb); 559int selsocket(struct socket *so, int events, struct timeval *tv, 560 struct thread *td); 561 562#ifdef SOCKBUF_DEBUG 563void sblastrecordchk(struct sockbuf *, const char *, int); 564#define SBLASTRECORDCHK(sb) sblastrecordchk((sb), __FILE__, __LINE__) 565 566void sblastmbufchk(struct sockbuf *, const char *, int); 567#define SBLASTMBUFCHK(sb) sblastmbufchk((sb), __FILE__, __LINE__) 568#else 569#define SBLASTRECORDCHK(sb) /* nothing */ 570#define SBLASTMBUFCHK(sb) /* nothing */ 571#endif /* SOCKBUF_DEBUG */ 572 573/* 574 * Accept filter functions (duh). 575 */ 576int accept_filt_add(struct accept_filter *filt); 577int accept_filt_del(char *name); 578struct accept_filter *accept_filt_get(char *name); 579#ifdef ACCEPT_FILTER_MOD 580#ifdef SYSCTL_DECL 581SYSCTL_DECL(_net_inet_accf); 582#endif 583int accept_filt_generic_mod_event(module_t mod, int event, void *data); 584#endif 585 586#endif /* _KERNEL */ 587 588#endif /* !_SYS_SOCKETVAR_H_ */ 589