uipc_socket.c revision 180198
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2004 The FreeBSD Foundation 5 * Copyright (c) 2004-2007 Robert N. M. Watson 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35/* 36 * Comments on the socket life cycle: 37 * 38 * soalloc() sets of socket layer state for a socket, called only by 39 * socreate() and sonewconn(). Socket layer private. 40 * 41 * sodealloc() tears down socket layer state for a socket, called only by 42 * sofree() and sonewconn(). Socket layer private. 43 * 44 * pru_attach() associates protocol layer state with an allocated socket; 45 * called only once, may fail, aborting socket allocation. This is called 46 * from socreate() and sonewconn(). Socket layer private. 47 * 48 * pru_detach() disassociates protocol layer state from an attached socket, 49 * and will be called exactly once for sockets in which pru_attach() has 50 * been successfully called. If pru_attach() returned an error, 51 * pru_detach() will not be called. Socket layer private. 52 * 53 * pru_abort() and pru_close() notify the protocol layer that the last 54 * consumer of a socket is starting to tear down the socket, and that the 55 * protocol should terminate the connection. Historically, pru_abort() also 56 * detached protocol state from the socket state, but this is no longer the 57 * case. 58 * 59 * socreate() creates a socket and attaches protocol state. This is a public 60 * interface that may be used by socket layer consumers to create new 61 * sockets. 62 * 63 * sonewconn() creates a socket and attaches protocol state. This is a 64 * public interface that may be used by protocols to create new sockets when 65 * a new connection is received and will be available for accept() on a 66 * listen socket. 67 * 68 * soclose() destroys a socket after possibly waiting for it to disconnect. 69 * This is a public interface that socket consumers should use to close and 70 * release a socket when done with it. 71 * 72 * soabort() destroys a socket without waiting for it to disconnect (used 73 * only for incoming connections that are already partially or fully 74 * connected). This is used internally by the socket layer when clearing 75 * listen socket queues (due to overflow or close on the listen socket), but 76 * is also a public interface protocols may use to abort connections in 77 * their incomplete listen queues should they no longer be required. Sockets 78 * placed in completed connection listen queues should not be aborted for 79 * reasons described in the comment above the soclose() implementation. This 80 * is not a general purpose close routine, and except in the specific 81 * circumstances described here, should not be used. 82 * 83 * sofree() will free a socket and its protocol state if all references on 84 * the socket have been released, and is the public interface to attempt to 85 * free a socket when a reference is removed. This is a socket layer private 86 * interface. 87 * 88 * NOTE: In addition to socreate() and soclose(), which provide a single 89 * socket reference to the consumer to be managed as required, there are two 90 * calls to explicitly manage socket references, soref(), and sorele(). 91 * Currently, these are generally required only when transitioning a socket 92 * from a listen queue to a file descriptor, in order to prevent garbage 93 * collection of the socket at an untimely moment. For a number of reasons, 94 * these interfaces are not preferred, and should be avoided. 95 */ 96 97#include <sys/cdefs.h> 98__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 180198 2008-07-02 23:23:27Z rwatson $"); 99 100#include "opt_inet.h" 101#include "opt_mac.h" 102#include "opt_zero.h" 103#include "opt_compat.h" 104 105#include <sys/param.h> 106#include <sys/systm.h> 107#include <sys/fcntl.h> 108#include <sys/limits.h> 109#include <sys/lock.h> 110#include <sys/mac.h> 111#include <sys/malloc.h> 112#include <sys/mbuf.h> 113#include <sys/mutex.h> 114#include <sys/domain.h> 115#include <sys/file.h> /* for struct knote */ 116#include <sys/kernel.h> 117#include <sys/event.h> 118#include <sys/eventhandler.h> 119#include <sys/poll.h> 120#include <sys/proc.h> 121#include <sys/protosw.h> 122#include <sys/socket.h> 123#include <sys/socketvar.h> 124#include <sys/resourcevar.h> 125#include <net/route.h> 126#include <sys/signalvar.h> 127#include <sys/stat.h> 128#include <sys/sx.h> 129#include <sys/sysctl.h> 130#include <sys/uio.h> 131#include <sys/jail.h> 132 133#include <security/mac/mac_framework.h> 134 135#include <vm/uma.h> 136 137#ifdef COMPAT_IA32 138#include <sys/mount.h> 139#include <compat/freebsd32/freebsd32.h> 140 141extern struct sysentvec ia32_freebsd_sysvec; 142#endif 143 144static int soreceive_rcvoob(struct socket *so, struct uio *uio, 145 int flags); 146 147static void filt_sordetach(struct knote *kn); 148static int filt_soread(struct knote *kn, long hint); 149static void filt_sowdetach(struct knote *kn); 150static int filt_sowrite(struct knote *kn, long hint); 151static int filt_solisten(struct knote *kn, long hint); 152 153static struct filterops solisten_filtops = 154 { 1, NULL, filt_sordetach, filt_solisten }; 155static struct filterops soread_filtops = 156 { 1, NULL, filt_sordetach, filt_soread }; 157static struct filterops sowrite_filtops = 158 { 1, NULL, filt_sowdetach, filt_sowrite }; 159 160uma_zone_t socket_zone; 161so_gen_t so_gencnt; /* generation count for sockets */ 162 163int maxsockets; 164 165MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 166MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 167 168static int somaxconn = SOMAXCONN; 169static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS); 170/* XXX: we dont have SYSCTL_USHORT */ 171SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 172 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection " 173 "queue size"); 174static int numopensockets; 175SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 176 &numopensockets, 0, "Number of open sockets"); 177#ifdef ZERO_COPY_SOCKETS 178/* These aren't static because they're used in other files. */ 179int so_zero_copy_send = 1; 180int so_zero_copy_receive = 1; 181SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 182 "Zero copy controls"); 183SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 184 &so_zero_copy_receive, 0, "Enable zero copy receive"); 185SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 186 &so_zero_copy_send, 0, "Enable zero copy send"); 187#endif /* ZERO_COPY_SOCKETS */ 188 189/* 190 * accept_mtx locks down per-socket fields relating to accept queues. See 191 * socketvar.h for an annotation of the protected fields of struct socket. 192 */ 193struct mtx accept_mtx; 194MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 195 196/* 197 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 198 * so_gencnt field. 199 */ 200static struct mtx so_global_mtx; 201MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 202 203/* 204 * General IPC sysctl name space, used by sockets and a variety of other IPC 205 * types. 206 */ 207SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 208 209/* 210 * Sysctl to get and set the maximum global sockets limit. Notify protocols 211 * of the change so that they can update their dependent limits as required. 212 */ 213static int 214sysctl_maxsockets(SYSCTL_HANDLER_ARGS) 215{ 216 int error, newmaxsockets; 217 218 newmaxsockets = maxsockets; 219 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); 220 if (error == 0 && req->newptr) { 221 if (newmaxsockets > maxsockets) { 222 maxsockets = newmaxsockets; 223 if (maxsockets > ((maxfiles / 4) * 3)) { 224 maxfiles = (maxsockets * 5) / 4; 225 maxfilesperproc = (maxfiles * 9) / 10; 226 } 227 EVENTHANDLER_INVOKE(maxsockets_change); 228 } else 229 error = EINVAL; 230 } 231 return (error); 232} 233 234SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, 235 &maxsockets, 0, sysctl_maxsockets, "IU", 236 "Maximum number of sockets avaliable"); 237 238/* 239 * Initialise maxsockets. 240 */ 241static void init_maxsockets(void *ignored) 242{ 243 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 244 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 245} 246SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 247 248/* 249 * Socket operation routines. These routines are called by the routines in 250 * sys_socket.c or from a system process, and implement the semantics of 251 * socket operations by switching out to the protocol specific routines. 252 */ 253 254/* 255 * Get a socket structure from our zone, and initialize it. Note that it 256 * would probably be better to allocate socket and PCB at the same time, but 257 * I'm not convinced that all the protocols can be easily modified to do 258 * this. 259 * 260 * soalloc() returns a socket with a ref count of 0. 261 */ 262static struct socket * 263soalloc(void) 264{ 265 struct socket *so; 266 267 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); 268 if (so == NULL) 269 return (NULL); 270#ifdef MAC 271 if (mac_socket_init(so, M_NOWAIT) != 0) { 272 uma_zfree(socket_zone, so); 273 return (NULL); 274 } 275#endif 276 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 277 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 278 sx_init(&so->so_snd.sb_sx, "so_snd_sx"); 279 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx"); 280 TAILQ_INIT(&so->so_aiojobq); 281 mtx_lock(&so_global_mtx); 282 so->so_gencnt = ++so_gencnt; 283 ++numopensockets; 284 mtx_unlock(&so_global_mtx); 285 return (so); 286} 287 288/* 289 * Free the storage associated with a socket at the socket layer, tear down 290 * locks, labels, etc. All protocol state is assumed already to have been 291 * torn down (and possibly never set up) by the caller. 292 */ 293static void 294sodealloc(struct socket *so) 295{ 296 297 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 298 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 299 300 mtx_lock(&so_global_mtx); 301 so->so_gencnt = ++so_gencnt; 302 --numopensockets; /* Could be below, but faster here. */ 303 mtx_unlock(&so_global_mtx); 304 if (so->so_rcv.sb_hiwat) 305 (void)chgsbsize(so->so_cred->cr_uidinfo, 306 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 307 if (so->so_snd.sb_hiwat) 308 (void)chgsbsize(so->so_cred->cr_uidinfo, 309 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 310#ifdef INET 311 /* remove acccept filter if one is present. */ 312 if (so->so_accf != NULL) 313 do_setopt_accept_filter(so, NULL); 314#endif 315#ifdef MAC 316 mac_socket_destroy(so); 317#endif 318 crfree(so->so_cred); 319 sx_destroy(&so->so_snd.sb_sx); 320 sx_destroy(&so->so_rcv.sb_sx); 321 SOCKBUF_LOCK_DESTROY(&so->so_snd); 322 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 323 uma_zfree(socket_zone, so); 324} 325 326/* 327 * socreate returns a socket with a ref count of 1. The socket should be 328 * closed with soclose(). 329 */ 330int 331socreate(int dom, struct socket **aso, int type, int proto, 332 struct ucred *cred, struct thread *td) 333{ 334 struct protosw *prp; 335 struct socket *so; 336 int error; 337 338 if (proto) 339 prp = pffindproto(dom, proto, type); 340 else 341 prp = pffindtype(dom, type); 342 343 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 344 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 345 return (EPROTONOSUPPORT); 346 347 if (jailed(cred) && jail_socket_unixiproute_only && 348 prp->pr_domain->dom_family != PF_LOCAL && 349 prp->pr_domain->dom_family != PF_INET && 350 prp->pr_domain->dom_family != PF_ROUTE) { 351 return (EPROTONOSUPPORT); 352 } 353 354 if (prp->pr_type != type) 355 return (EPROTOTYPE); 356 so = soalloc(); 357 if (so == NULL) 358 return (ENOBUFS); 359 360 TAILQ_INIT(&so->so_incomp); 361 TAILQ_INIT(&so->so_comp); 362 so->so_type = type; 363 so->so_cred = crhold(cred); 364 if ((prp->pr_domain->dom_family == PF_INET) || 365 (prp->pr_domain->dom_family == PF_ROUTE)) 366 so->so_fibnum = td->td_proc->p_fibnum; 367 else 368 so->so_fibnum = 0; 369 so->so_proto = prp; 370#ifdef MAC 371 mac_socket_create(cred, so); 372#endif 373 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 374 NULL, NULL, NULL); 375 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 376 NULL, NULL, NULL); 377 so->so_count = 1; 378 /* 379 * Auto-sizing of socket buffers is managed by the protocols and 380 * the appropriate flags must be set in the pru_attach function. 381 */ 382 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 383 if (error) { 384 KASSERT(so->so_count == 1, ("socreate: so_count %d", 385 so->so_count)); 386 so->so_count = 0; 387 sodealloc(so); 388 return (error); 389 } 390 *aso = so; 391 return (0); 392} 393 394#ifdef REGRESSION 395static int regression_sonewconn_earlytest = 1; 396SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, 397 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); 398#endif 399 400/* 401 * When an attempt at a new connection is noted on a socket which accepts 402 * connections, sonewconn is called. If the connection is possible (subject 403 * to space constraints, etc.) then we allocate a new structure, propoerly 404 * linked into the data structure of the original socket, and return this. 405 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 406 * 407 * Note: the ref count on the socket is 0 on return. 408 */ 409struct socket * 410sonewconn(struct socket *head, int connstatus) 411{ 412 struct socket *so; 413 int over; 414 415 ACCEPT_LOCK(); 416 over = (head->so_qlen > 3 * head->so_qlimit / 2); 417 ACCEPT_UNLOCK(); 418#ifdef REGRESSION 419 if (regression_sonewconn_earlytest && over) 420#else 421 if (over) 422#endif 423 return (NULL); 424 so = soalloc(); 425 if (so == NULL) 426 return (NULL); 427 if ((head->so_options & SO_ACCEPTFILTER) != 0) 428 connstatus = 0; 429 so->so_head = head; 430 so->so_type = head->so_type; 431 so->so_options = head->so_options &~ SO_ACCEPTCONN; 432 so->so_linger = head->so_linger; 433 so->so_state = head->so_state | SS_NOFDREF; 434 so->so_proto = head->so_proto; 435 so->so_cred = crhold(head->so_cred); 436#ifdef MAC 437 SOCK_LOCK(head); 438 mac_socket_newconn(head, so); 439 SOCK_UNLOCK(head); 440#endif 441 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 442 NULL, NULL, NULL); 443 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 444 NULL, NULL, NULL); 445 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || 446 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 447 sodealloc(so); 448 return (NULL); 449 } 450 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; 451 so->so_snd.sb_lowat = head->so_snd.sb_lowat; 452 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; 453 so->so_snd.sb_timeo = head->so_snd.sb_timeo; 454 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; 455 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; 456 so->so_state |= connstatus; 457 ACCEPT_LOCK(); 458 if (connstatus) { 459 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 460 so->so_qstate |= SQ_COMP; 461 head->so_qlen++; 462 } else { 463 /* 464 * Keep removing sockets from the head until there's room for 465 * us to insert on the tail. In pre-locking revisions, this 466 * was a simple if(), but as we could be racing with other 467 * threads and soabort() requires dropping locks, we must 468 * loop waiting for the condition to be true. 469 */ 470 while (head->so_incqlen > head->so_qlimit) { 471 struct socket *sp; 472 sp = TAILQ_FIRST(&head->so_incomp); 473 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 474 head->so_incqlen--; 475 sp->so_qstate &= ~SQ_INCOMP; 476 sp->so_head = NULL; 477 ACCEPT_UNLOCK(); 478 soabort(sp); 479 ACCEPT_LOCK(); 480 } 481 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 482 so->so_qstate |= SQ_INCOMP; 483 head->so_incqlen++; 484 } 485 ACCEPT_UNLOCK(); 486 if (connstatus) { 487 sorwakeup(head); 488 wakeup_one(&head->so_timeo); 489 } 490 return (so); 491} 492 493int 494sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 495{ 496 497 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 498} 499 500/* 501 * solisten() transitions a socket from a non-listening state to a listening 502 * state, but can also be used to update the listen queue depth on an 503 * existing listen socket. The protocol will call back into the sockets 504 * layer using solisten_proto_check() and solisten_proto() to check and set 505 * socket-layer listen state. Call backs are used so that the protocol can 506 * acquire both protocol and socket layer locks in whatever order is required 507 * by the protocol. 508 * 509 * Protocol implementors are advised to hold the socket lock across the 510 * socket-layer test and set to avoid races at the socket layer. 511 */ 512int 513solisten(struct socket *so, int backlog, struct thread *td) 514{ 515 516 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td)); 517} 518 519int 520solisten_proto_check(struct socket *so) 521{ 522 523 SOCK_LOCK_ASSERT(so); 524 525 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 526 SS_ISDISCONNECTING)) 527 return (EINVAL); 528 return (0); 529} 530 531void 532solisten_proto(struct socket *so, int backlog) 533{ 534 535 SOCK_LOCK_ASSERT(so); 536 537 if (backlog < 0 || backlog > somaxconn) 538 backlog = somaxconn; 539 so->so_qlimit = backlog; 540 so->so_options |= SO_ACCEPTCONN; 541} 542 543/* 544 * Attempt to free a socket. This should really be sotryfree(). 545 * 546 * sofree() will succeed if: 547 * 548 * - There are no outstanding file descriptor references or related consumers 549 * (so_count == 0). 550 * 551 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 552 * 553 * - The protocol does not have an outstanding strong reference on the socket 554 * (SS_PROTOREF). 555 * 556 * - The socket is not in a completed connection queue, so a process has been 557 * notified that it is present. If it is removed, the user process may 558 * block in accept() despite select() saying the socket was ready. 559 * 560 * Otherwise, it will quietly abort so that a future call to sofree(), when 561 * conditions are right, can succeed. 562 */ 563void 564sofree(struct socket *so) 565{ 566 struct protosw *pr = so->so_proto; 567 struct socket *head; 568 569 ACCEPT_LOCK_ASSERT(); 570 SOCK_LOCK_ASSERT(so); 571 572 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 573 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { 574 SOCK_UNLOCK(so); 575 ACCEPT_UNLOCK(); 576 return; 577 } 578 579 head = so->so_head; 580 if (head != NULL) { 581 KASSERT((so->so_qstate & SQ_COMP) != 0 || 582 (so->so_qstate & SQ_INCOMP) != 0, 583 ("sofree: so_head != NULL, but neither SQ_COMP nor " 584 "SQ_INCOMP")); 585 KASSERT((so->so_qstate & SQ_COMP) == 0 || 586 (so->so_qstate & SQ_INCOMP) == 0, 587 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 588 TAILQ_REMOVE(&head->so_incomp, so, so_list); 589 head->so_incqlen--; 590 so->so_qstate &= ~SQ_INCOMP; 591 so->so_head = NULL; 592 } 593 KASSERT((so->so_qstate & SQ_COMP) == 0 && 594 (so->so_qstate & SQ_INCOMP) == 0, 595 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 596 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 597 if (so->so_options & SO_ACCEPTCONN) { 598 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated")); 599 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated")); 600 } 601 SOCK_UNLOCK(so); 602 ACCEPT_UNLOCK(); 603 604 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 605 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 606 if (pr->pr_usrreqs->pru_detach != NULL) 607 (*pr->pr_usrreqs->pru_detach)(so); 608 609 /* 610 * From this point on, we assume that no other references to this 611 * socket exist anywhere else in the stack. Therefore, no locks need 612 * to be acquired or held. 613 * 614 * We used to do a lot of socket buffer and socket locking here, as 615 * well as invoke sorflush() and perform wakeups. The direct call to 616 * dom_dispose() and sbrelease_internal() are an inlining of what was 617 * necessary from sorflush(). 618 * 619 * Notice that the socket buffer and kqueue state are torn down 620 * before calling pru_detach. This means that protocols shold not 621 * assume they can perform socket wakeups, etc, in their detach code. 622 */ 623 sbdestroy(&so->so_snd, so); 624 sbdestroy(&so->so_rcv, so); 625 knlist_destroy(&so->so_rcv.sb_sel.si_note); 626 knlist_destroy(&so->so_snd.sb_sel.si_note); 627 sodealloc(so); 628} 629 630/* 631 * Close a socket on last file table reference removal. Initiate disconnect 632 * if connected. Free socket when disconnect complete. 633 * 634 * This function will sorele() the socket. Note that soclose() may be called 635 * prior to the ref count reaching zero. The actual socket structure will 636 * not be freed until the ref count reaches zero. 637 */ 638int 639soclose(struct socket *so) 640{ 641 int error = 0; 642 643 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 644 645 funsetown(&so->so_sigio); 646 if (so->so_state & SS_ISCONNECTED) { 647 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 648 error = sodisconnect(so); 649 if (error) 650 goto drop; 651 } 652 if (so->so_options & SO_LINGER) { 653 if ((so->so_state & SS_ISDISCONNECTING) && 654 (so->so_state & SS_NBIO)) 655 goto drop; 656 while (so->so_state & SS_ISCONNECTED) { 657 error = tsleep(&so->so_timeo, 658 PSOCK | PCATCH, "soclos", so->so_linger * hz); 659 if (error) 660 break; 661 } 662 } 663 } 664 665drop: 666 if (so->so_proto->pr_usrreqs->pru_close != NULL) 667 (*so->so_proto->pr_usrreqs->pru_close)(so); 668 if (so->so_options & SO_ACCEPTCONN) { 669 struct socket *sp; 670 ACCEPT_LOCK(); 671 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 672 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 673 so->so_incqlen--; 674 sp->so_qstate &= ~SQ_INCOMP; 675 sp->so_head = NULL; 676 ACCEPT_UNLOCK(); 677 soabort(sp); 678 ACCEPT_LOCK(); 679 } 680 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 681 TAILQ_REMOVE(&so->so_comp, sp, so_list); 682 so->so_qlen--; 683 sp->so_qstate &= ~SQ_COMP; 684 sp->so_head = NULL; 685 ACCEPT_UNLOCK(); 686 soabort(sp); 687 ACCEPT_LOCK(); 688 } 689 ACCEPT_UNLOCK(); 690 } 691 ACCEPT_LOCK(); 692 SOCK_LOCK(so); 693 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 694 so->so_state |= SS_NOFDREF; 695 sorele(so); 696 return (error); 697} 698 699/* 700 * soabort() is used to abruptly tear down a connection, such as when a 701 * resource limit is reached (listen queue depth exceeded), or if a listen 702 * socket is closed while there are sockets waiting to be accepted. 703 * 704 * This interface is tricky, because it is called on an unreferenced socket, 705 * and must be called only by a thread that has actually removed the socket 706 * from the listen queue it was on, or races with other threads are risked. 707 * 708 * This interface will call into the protocol code, so must not be called 709 * with any socket locks held. Protocols do call it while holding their own 710 * recursible protocol mutexes, but this is something that should be subject 711 * to review in the future. 712 */ 713void 714soabort(struct socket *so) 715{ 716 717 /* 718 * In as much as is possible, assert that no references to this 719 * socket are held. This is not quite the same as asserting that the 720 * current thread is responsible for arranging for no references, but 721 * is as close as we can get for now. 722 */ 723 KASSERT(so->so_count == 0, ("soabort: so_count")); 724 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 725 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 726 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); 727 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); 728 729 if (so->so_proto->pr_usrreqs->pru_abort != NULL) 730 (*so->so_proto->pr_usrreqs->pru_abort)(so); 731 ACCEPT_LOCK(); 732 SOCK_LOCK(so); 733 sofree(so); 734} 735 736int 737soaccept(struct socket *so, struct sockaddr **nam) 738{ 739 int error; 740 741 SOCK_LOCK(so); 742 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 743 so->so_state &= ~SS_NOFDREF; 744 SOCK_UNLOCK(so); 745 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 746 return (error); 747} 748 749int 750soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 751{ 752 int error; 753 754 if (so->so_options & SO_ACCEPTCONN) 755 return (EOPNOTSUPP); 756 /* 757 * If protocol is connection-based, can only connect once. 758 * Otherwise, if connected, try to disconnect first. This allows 759 * user to disconnect by connecting to, e.g., a null address. 760 */ 761 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 762 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 763 (error = sodisconnect(so)))) { 764 error = EISCONN; 765 } else { 766 /* 767 * Prevent accumulated error from previous connection from 768 * biting us. 769 */ 770 so->so_error = 0; 771 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 772 } 773 774 return (error); 775} 776 777int 778soconnect2(struct socket *so1, struct socket *so2) 779{ 780 781 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 782} 783 784int 785sodisconnect(struct socket *so) 786{ 787 int error; 788 789 if ((so->so_state & SS_ISCONNECTED) == 0) 790 return (ENOTCONN); 791 if (so->so_state & SS_ISDISCONNECTING) 792 return (EALREADY); 793 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 794 return (error); 795} 796 797#ifdef ZERO_COPY_SOCKETS 798struct so_zerocopy_stats{ 799 int size_ok; 800 int align_ok; 801 int found_ifp; 802}; 803struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 804#include <netinet/in.h> 805#include <net/route.h> 806#include <netinet/in_pcb.h> 807#include <vm/vm.h> 808#include <vm/vm_page.h> 809#include <vm/vm_object.h> 810 811/* 812 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise 813 * sosend_dgram() and sosend_generic() use m_uiotombuf(). 814 * 815 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or 816 * all of the data referenced by the uio. If desired, it uses zero-copy. 817 * *space will be updated to reflect data copied in. 818 * 819 * NB: If atomic I/O is requested, the caller must already have checked that 820 * space can hold resid bytes. 821 * 822 * NB: In the event of an error, the caller may need to free the partial 823 * chain pointed to by *mpp. The contents of both *uio and *space may be 824 * modified even in the case of an error. 825 */ 826static int 827sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space, 828 int flags) 829{ 830 struct mbuf *m, **mp, *top; 831 long len, resid; 832 int error; 833#ifdef ZERO_COPY_SOCKETS 834 int cow_send; 835#endif 836 837 *retmp = top = NULL; 838 mp = ⊤ 839 len = 0; 840 resid = uio->uio_resid; 841 error = 0; 842 do { 843#ifdef ZERO_COPY_SOCKETS 844 cow_send = 0; 845#endif /* ZERO_COPY_SOCKETS */ 846 if (resid >= MINCLSIZE) { 847#ifdef ZERO_COPY_SOCKETS 848 if (top == NULL) { 849 m = m_gethdr(M_WAITOK, MT_DATA); 850 m->m_pkthdr.len = 0; 851 m->m_pkthdr.rcvif = NULL; 852 } else 853 m = m_get(M_WAITOK, MT_DATA); 854 if (so_zero_copy_send && 855 resid>=PAGE_SIZE && 856 *space>=PAGE_SIZE && 857 uio->uio_iov->iov_len>=PAGE_SIZE) { 858 so_zerocp_stats.size_ok++; 859 so_zerocp_stats.align_ok++; 860 cow_send = socow_setup(m, uio); 861 len = cow_send; 862 } 863 if (!cow_send) { 864 m_clget(m, M_WAITOK); 865 len = min(min(MCLBYTES, resid), *space); 866 } 867#else /* ZERO_COPY_SOCKETS */ 868 if (top == NULL) { 869 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 870 m->m_pkthdr.len = 0; 871 m->m_pkthdr.rcvif = NULL; 872 } else 873 m = m_getcl(M_WAIT, MT_DATA, 0); 874 len = min(min(MCLBYTES, resid), *space); 875#endif /* ZERO_COPY_SOCKETS */ 876 } else { 877 if (top == NULL) { 878 m = m_gethdr(M_WAIT, MT_DATA); 879 m->m_pkthdr.len = 0; 880 m->m_pkthdr.rcvif = NULL; 881 882 len = min(min(MHLEN, resid), *space); 883 /* 884 * For datagram protocols, leave room 885 * for protocol headers in first mbuf. 886 */ 887 if (atomic && m && len < MHLEN) 888 MH_ALIGN(m, len); 889 } else { 890 m = m_get(M_WAIT, MT_DATA); 891 len = min(min(MLEN, resid), *space); 892 } 893 } 894 if (m == NULL) { 895 error = ENOBUFS; 896 goto out; 897 } 898 899 *space -= len; 900#ifdef ZERO_COPY_SOCKETS 901 if (cow_send) 902 error = 0; 903 else 904#endif /* ZERO_COPY_SOCKETS */ 905 error = uiomove(mtod(m, void *), (int)len, uio); 906 resid = uio->uio_resid; 907 m->m_len = len; 908 *mp = m; 909 top->m_pkthdr.len += len; 910 if (error) 911 goto out; 912 mp = &m->m_next; 913 if (resid <= 0) { 914 if (flags & MSG_EOR) 915 top->m_flags |= M_EOR; 916 break; 917 } 918 } while (*space > 0 && atomic); 919out: 920 *retmp = top; 921 return (error); 922} 923#endif /*ZERO_COPY_SOCKETS*/ 924 925#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 926 927int 928sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 929 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 930{ 931 long space, resid; 932 int clen = 0, error, dontroute; 933#ifdef ZERO_COPY_SOCKETS 934 int atomic = sosendallatonce(so) || top; 935#endif 936 937 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM")); 938 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 939 ("sodgram_send: !PR_ATOMIC")); 940 941 if (uio != NULL) 942 resid = uio->uio_resid; 943 else 944 resid = top->m_pkthdr.len; 945 /* 946 * In theory resid should be unsigned. However, space must be 947 * signed, as it might be less than 0 if we over-committed, and we 948 * must use a signed comparison of space and resid. On the other 949 * hand, a negative resid causes us to loop sending 0-length 950 * segments to the protocol. 951 * 952 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 953 * type sockets since that's an error. 954 */ 955 if (resid < 0) { 956 error = EINVAL; 957 goto out; 958 } 959 960 dontroute = 961 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 962 if (td != NULL) 963 td->td_ru.ru_msgsnd++; 964 if (control != NULL) 965 clen = control->m_len; 966 967 SOCKBUF_LOCK(&so->so_snd); 968 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 969 SOCKBUF_UNLOCK(&so->so_snd); 970 error = EPIPE; 971 goto out; 972 } 973 if (so->so_error) { 974 error = so->so_error; 975 so->so_error = 0; 976 SOCKBUF_UNLOCK(&so->so_snd); 977 goto out; 978 } 979 if ((so->so_state & SS_ISCONNECTED) == 0) { 980 /* 981 * `sendto' and `sendmsg' is allowed on a connection-based 982 * socket if it supports implied connect. Return ENOTCONN if 983 * not connected and no address is supplied. 984 */ 985 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 986 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 987 if ((so->so_state & SS_ISCONFIRMING) == 0 && 988 !(resid == 0 && clen != 0)) { 989 SOCKBUF_UNLOCK(&so->so_snd); 990 error = ENOTCONN; 991 goto out; 992 } 993 } else if (addr == NULL) { 994 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 995 error = ENOTCONN; 996 else 997 error = EDESTADDRREQ; 998 SOCKBUF_UNLOCK(&so->so_snd); 999 goto out; 1000 } 1001 } 1002 1003 /* 1004 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 1005 * problem and need fixing. 1006 */ 1007 space = sbspace(&so->so_snd); 1008 if (flags & MSG_OOB) 1009 space += 1024; 1010 space -= clen; 1011 SOCKBUF_UNLOCK(&so->so_snd); 1012 if (resid > space) { 1013 error = EMSGSIZE; 1014 goto out; 1015 } 1016 if (uio == NULL) { 1017 resid = 0; 1018 if (flags & MSG_EOR) 1019 top->m_flags |= M_EOR; 1020 } else { 1021#ifdef ZERO_COPY_SOCKETS 1022 error = sosend_copyin(uio, &top, atomic, &space, flags); 1023 if (error) 1024 goto out; 1025#else 1026 /* 1027 * Copy the data from userland into a mbuf chain. 1028 * If no data is to be copied in, a single empty mbuf 1029 * is returned. 1030 */ 1031 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, 1032 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); 1033 if (top == NULL) { 1034 error = EFAULT; /* only possible error */ 1035 goto out; 1036 } 1037 space -= resid - uio->uio_resid; 1038#endif 1039 resid = uio->uio_resid; 1040 } 1041 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 1042 /* 1043 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 1044 * than with. 1045 */ 1046 if (dontroute) { 1047 SOCK_LOCK(so); 1048 so->so_options |= SO_DONTROUTE; 1049 SOCK_UNLOCK(so); 1050 } 1051 /* 1052 * XXX all the SBS_CANTSENDMORE checks previously done could be out 1053 * of date. We could have recieved a reset packet in an interrupt or 1054 * maybe we slept while doing page faults in uiomove() etc. We could 1055 * probably recheck again inside the locking protection here, but 1056 * there are probably other places that this also happens. We must 1057 * rethink this. 1058 */ 1059 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1060 (flags & MSG_OOB) ? PRUS_OOB : 1061 /* 1062 * If the user set MSG_EOF, the protocol understands this flag and 1063 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. 1064 */ 1065 ((flags & MSG_EOF) && 1066 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1067 (resid <= 0)) ? 1068 PRUS_EOF : 1069 /* If there is more to send set PRUS_MORETOCOME */ 1070 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1071 top, addr, control, td); 1072 if (dontroute) { 1073 SOCK_LOCK(so); 1074 so->so_options &= ~SO_DONTROUTE; 1075 SOCK_UNLOCK(so); 1076 } 1077 clen = 0; 1078 control = NULL; 1079 top = NULL; 1080out: 1081 if (top != NULL) 1082 m_freem(top); 1083 if (control != NULL) 1084 m_freem(control); 1085 return (error); 1086} 1087 1088/* 1089 * Send on a socket. If send must go all at once and message is larger than 1090 * send buffering, then hard error. Lock against other senders. If must go 1091 * all at once and not enough room now, then inform user that this would 1092 * block and do nothing. Otherwise, if nonblocking, send as much as 1093 * possible. The data to be sent is described by "uio" if nonzero, otherwise 1094 * by the mbuf chain "top" (which must be null if uio is not). Data provided 1095 * in mbuf chain must be small enough to send all at once. 1096 * 1097 * Returns nonzero on error, timeout or signal; callers must check for short 1098 * counts if EINTR/ERESTART are returned. Data and control buffers are freed 1099 * on return. 1100 */ 1101int 1102sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, 1103 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1104{ 1105 long space, resid; 1106 int clen = 0, error, dontroute; 1107 int atomic = sosendallatonce(so) || top; 1108 1109 if (uio != NULL) 1110 resid = uio->uio_resid; 1111 else 1112 resid = top->m_pkthdr.len; 1113 /* 1114 * In theory resid should be unsigned. However, space must be 1115 * signed, as it might be less than 0 if we over-committed, and we 1116 * must use a signed comparison of space and resid. On the other 1117 * hand, a negative resid causes us to loop sending 0-length 1118 * segments to the protocol. 1119 * 1120 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 1121 * type sockets since that's an error. 1122 */ 1123 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 1124 error = EINVAL; 1125 goto out; 1126 } 1127 1128 dontroute = 1129 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 1130 (so->so_proto->pr_flags & PR_ATOMIC); 1131 if (td != NULL) 1132 td->td_ru.ru_msgsnd++; 1133 if (control != NULL) 1134 clen = control->m_len; 1135 1136 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1137 if (error) 1138 goto out; 1139 1140restart: 1141 do { 1142 SOCKBUF_LOCK(&so->so_snd); 1143 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1144 SOCKBUF_UNLOCK(&so->so_snd); 1145 error = EPIPE; 1146 goto release; 1147 } 1148 if (so->so_error) { 1149 error = so->so_error; 1150 so->so_error = 0; 1151 SOCKBUF_UNLOCK(&so->so_snd); 1152 goto release; 1153 } 1154 if ((so->so_state & SS_ISCONNECTED) == 0) { 1155 /* 1156 * `sendto' and `sendmsg' is allowed on a connection- 1157 * based socket if it supports implied connect. 1158 * Return ENOTCONN if not connected and no address is 1159 * supplied. 1160 */ 1161 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1162 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1163 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1164 !(resid == 0 && clen != 0)) { 1165 SOCKBUF_UNLOCK(&so->so_snd); 1166 error = ENOTCONN; 1167 goto release; 1168 } 1169 } else if (addr == NULL) { 1170 SOCKBUF_UNLOCK(&so->so_snd); 1171 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1172 error = ENOTCONN; 1173 else 1174 error = EDESTADDRREQ; 1175 goto release; 1176 } 1177 } 1178 space = sbspace(&so->so_snd); 1179 if (flags & MSG_OOB) 1180 space += 1024; 1181 if ((atomic && resid > so->so_snd.sb_hiwat) || 1182 clen > so->so_snd.sb_hiwat) { 1183 SOCKBUF_UNLOCK(&so->so_snd); 1184 error = EMSGSIZE; 1185 goto release; 1186 } 1187 if (space < resid + clen && 1188 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1189 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { 1190 SOCKBUF_UNLOCK(&so->so_snd); 1191 error = EWOULDBLOCK; 1192 goto release; 1193 } 1194 error = sbwait(&so->so_snd); 1195 SOCKBUF_UNLOCK(&so->so_snd); 1196 if (error) 1197 goto release; 1198 goto restart; 1199 } 1200 SOCKBUF_UNLOCK(&so->so_snd); 1201 space -= clen; 1202 do { 1203 if (uio == NULL) { 1204 resid = 0; 1205 if (flags & MSG_EOR) 1206 top->m_flags |= M_EOR; 1207 } else { 1208#ifdef ZERO_COPY_SOCKETS 1209 error = sosend_copyin(uio, &top, atomic, 1210 &space, flags); 1211 if (error != 0) 1212 goto release; 1213#else 1214 /* 1215 * Copy the data from userland into a mbuf 1216 * chain. If no data is to be copied in, 1217 * a single empty mbuf is returned. 1218 */ 1219 top = m_uiotombuf(uio, M_WAITOK, space, 1220 (atomic ? max_hdr : 0), 1221 (atomic ? M_PKTHDR : 0) | 1222 ((flags & MSG_EOR) ? M_EOR : 0)); 1223 if (top == NULL) { 1224 error = EFAULT; /* only possible error */ 1225 goto release; 1226 } 1227 space -= resid - uio->uio_resid; 1228#endif 1229 resid = uio->uio_resid; 1230 } 1231 if (dontroute) { 1232 SOCK_LOCK(so); 1233 so->so_options |= SO_DONTROUTE; 1234 SOCK_UNLOCK(so); 1235 } 1236 /* 1237 * XXX all the SBS_CANTSENDMORE checks previously 1238 * done could be out of date. We could have recieved 1239 * a reset packet in an interrupt or maybe we slept 1240 * while doing page faults in uiomove() etc. We 1241 * could probably recheck again inside the locking 1242 * protection here, but there are probably other 1243 * places that this also happens. We must rethink 1244 * this. 1245 */ 1246 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1247 (flags & MSG_OOB) ? PRUS_OOB : 1248 /* 1249 * If the user set MSG_EOF, the protocol understands 1250 * this flag and nothing left to send then use 1251 * PRU_SEND_EOF instead of PRU_SEND. 1252 */ 1253 ((flags & MSG_EOF) && 1254 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1255 (resid <= 0)) ? 1256 PRUS_EOF : 1257 /* If there is more to send set PRUS_MORETOCOME. */ 1258 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1259 top, addr, control, td); 1260 if (dontroute) { 1261 SOCK_LOCK(so); 1262 so->so_options &= ~SO_DONTROUTE; 1263 SOCK_UNLOCK(so); 1264 } 1265 clen = 0; 1266 control = NULL; 1267 top = NULL; 1268 if (error) 1269 goto release; 1270 } while (resid && space > 0); 1271 } while (resid); 1272 1273release: 1274 sbunlock(&so->so_snd); 1275out: 1276 if (top != NULL) 1277 m_freem(top); 1278 if (control != NULL) 1279 m_freem(control); 1280 return (error); 1281} 1282 1283int 1284sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 1285 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1286{ 1287 1288 /* XXXRW: Temporary debugging. */ 1289 KASSERT(so->so_proto->pr_usrreqs->pru_sosend != sosend, 1290 ("sosend: protocol calls sosend")); 1291 1292 return (so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top, 1293 control, flags, td)); 1294} 1295 1296/* 1297 * The part of soreceive() that implements reading non-inline out-of-band 1298 * data from a socket. For more complete comments, see soreceive(), from 1299 * which this code originated. 1300 * 1301 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1302 * unable to return an mbuf chain to the caller. 1303 */ 1304static int 1305soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) 1306{ 1307 struct protosw *pr = so->so_proto; 1308 struct mbuf *m; 1309 int error; 1310 1311 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1312 1313 m = m_get(M_WAIT, MT_DATA); 1314 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1315 if (error) 1316 goto bad; 1317 do { 1318#ifdef ZERO_COPY_SOCKETS 1319 if (so_zero_copy_receive) { 1320 int disposable; 1321 1322 if ((m->m_flags & M_EXT) 1323 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1324 disposable = 1; 1325 else 1326 disposable = 0; 1327 1328 error = uiomoveco(mtod(m, void *), 1329 min(uio->uio_resid, m->m_len), 1330 uio, disposable); 1331 } else 1332#endif /* ZERO_COPY_SOCKETS */ 1333 error = uiomove(mtod(m, void *), 1334 (int) min(uio->uio_resid, m->m_len), uio); 1335 m = m_free(m); 1336 } while (uio->uio_resid && error == 0 && m); 1337bad: 1338 if (m != NULL) 1339 m_freem(m); 1340 return (error); 1341} 1342 1343/* 1344 * Following replacement or removal of the first mbuf on the first mbuf chain 1345 * of a socket buffer, push necessary state changes back into the socket 1346 * buffer so that other consumers see the values consistently. 'nextrecord' 1347 * is the callers locally stored value of the original value of 1348 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1349 * NOTE: 'nextrecord' may be NULL. 1350 */ 1351static __inline void 1352sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1353{ 1354 1355 SOCKBUF_LOCK_ASSERT(sb); 1356 /* 1357 * First, update for the new value of nextrecord. If necessary, make 1358 * it the first record. 1359 */ 1360 if (sb->sb_mb != NULL) 1361 sb->sb_mb->m_nextpkt = nextrecord; 1362 else 1363 sb->sb_mb = nextrecord; 1364 1365 /* 1366 * Now update any dependent socket buffer fields to reflect the new 1367 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1368 * addition of a second clause that takes care of the case where 1369 * sb_mb has been updated, but remains the last record. 1370 */ 1371 if (sb->sb_mb == NULL) { 1372 sb->sb_mbtail = NULL; 1373 sb->sb_lastrecord = NULL; 1374 } else if (sb->sb_mb->m_nextpkt == NULL) 1375 sb->sb_lastrecord = sb->sb_mb; 1376} 1377 1378 1379/* 1380 * Implement receive operations on a socket. We depend on the way that 1381 * records are added to the sockbuf by sbappend. In particular, each record 1382 * (mbufs linked through m_next) must begin with an address if the protocol 1383 * so specifies, followed by an optional mbuf or mbufs containing ancillary 1384 * data, and then zero or more mbufs of data. In order to allow parallelism 1385 * between network receive and copying to user space, as well as avoid 1386 * sleeping with a mutex held, we release the socket buffer mutex during the 1387 * user space copy. Although the sockbuf is locked, new data may still be 1388 * appended, and thus we must maintain consistency of the sockbuf during that 1389 * time. 1390 * 1391 * The caller may receive the data as a single mbuf chain by supplying an 1392 * mbuf **mp0 for use in returning the chain. The uio is then used only for 1393 * the count in uio_resid. 1394 */ 1395int 1396soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, 1397 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1398{ 1399 struct mbuf *m, **mp; 1400 int flags, len, error, offset; 1401 struct protosw *pr = so->so_proto; 1402 struct mbuf *nextrecord; 1403 int moff, type = 0; 1404 int orig_resid = uio->uio_resid; 1405 1406 mp = mp0; 1407 if (psa != NULL) 1408 *psa = NULL; 1409 if (controlp != NULL) 1410 *controlp = NULL; 1411 if (flagsp != NULL) 1412 flags = *flagsp &~ MSG_EOR; 1413 else 1414 flags = 0; 1415 if (flags & MSG_OOB) 1416 return (soreceive_rcvoob(so, uio, flags)); 1417 if (mp != NULL) 1418 *mp = NULL; 1419 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1420 && uio->uio_resid) 1421 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1422 1423 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1424 if (error) 1425 return (error); 1426 1427restart: 1428 SOCKBUF_LOCK(&so->so_rcv); 1429 m = so->so_rcv.sb_mb; 1430 /* 1431 * If we have less data than requested, block awaiting more (subject 1432 * to any timeout) if: 1433 * 1. the current count is less than the low water mark, or 1434 * 2. MSG_WAITALL is set, and it is possible to do the entire 1435 * receive operation at once if we block (resid <= hiwat). 1436 * 3. MSG_DONTWAIT is not set 1437 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1438 * we have to do the receive in sections, and thus risk returning a 1439 * short count if a timeout or signal occurs after we start. 1440 */ 1441 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1442 so->so_rcv.sb_cc < uio->uio_resid) && 1443 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1444 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1445 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1446 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1447 ("receive: m == %p so->so_rcv.sb_cc == %u", 1448 m, so->so_rcv.sb_cc)); 1449 if (so->so_error) { 1450 if (m != NULL) 1451 goto dontblock; 1452 error = so->so_error; 1453 if ((flags & MSG_PEEK) == 0) 1454 so->so_error = 0; 1455 SOCKBUF_UNLOCK(&so->so_rcv); 1456 goto release; 1457 } 1458 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1459 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1460 if (m == NULL) { 1461 SOCKBUF_UNLOCK(&so->so_rcv); 1462 goto release; 1463 } else 1464 goto dontblock; 1465 } 1466 for (; m != NULL; m = m->m_next) 1467 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1468 m = so->so_rcv.sb_mb; 1469 goto dontblock; 1470 } 1471 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1472 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1473 SOCKBUF_UNLOCK(&so->so_rcv); 1474 error = ENOTCONN; 1475 goto release; 1476 } 1477 if (uio->uio_resid == 0) { 1478 SOCKBUF_UNLOCK(&so->so_rcv); 1479 goto release; 1480 } 1481 if ((so->so_state & SS_NBIO) || 1482 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1483 SOCKBUF_UNLOCK(&so->so_rcv); 1484 error = EWOULDBLOCK; 1485 goto release; 1486 } 1487 SBLASTRECORDCHK(&so->so_rcv); 1488 SBLASTMBUFCHK(&so->so_rcv); 1489 error = sbwait(&so->so_rcv); 1490 SOCKBUF_UNLOCK(&so->so_rcv); 1491 if (error) 1492 goto release; 1493 goto restart; 1494 } 1495dontblock: 1496 /* 1497 * From this point onward, we maintain 'nextrecord' as a cache of the 1498 * pointer to the next record in the socket buffer. We must keep the 1499 * various socket buffer pointers and local stack versions of the 1500 * pointers in sync, pushing out modifications before dropping the 1501 * socket buffer mutex, and re-reading them when picking it up. 1502 * 1503 * Otherwise, we will race with the network stack appending new data 1504 * or records onto the socket buffer by using inconsistent/stale 1505 * versions of the field, possibly resulting in socket buffer 1506 * corruption. 1507 * 1508 * By holding the high-level sblock(), we prevent simultaneous 1509 * readers from pulling off the front of the socket buffer. 1510 */ 1511 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1512 if (uio->uio_td) 1513 uio->uio_td->td_ru.ru_msgrcv++; 1514 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1515 SBLASTRECORDCHK(&so->so_rcv); 1516 SBLASTMBUFCHK(&so->so_rcv); 1517 nextrecord = m->m_nextpkt; 1518 if (pr->pr_flags & PR_ADDR) { 1519 KASSERT(m->m_type == MT_SONAME, 1520 ("m->m_type == %d", m->m_type)); 1521 orig_resid = 0; 1522 if (psa != NULL) 1523 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1524 M_NOWAIT); 1525 if (flags & MSG_PEEK) { 1526 m = m->m_next; 1527 } else { 1528 sbfree(&so->so_rcv, m); 1529 so->so_rcv.sb_mb = m_free(m); 1530 m = so->so_rcv.sb_mb; 1531 sockbuf_pushsync(&so->so_rcv, nextrecord); 1532 } 1533 } 1534 1535 /* 1536 * Process one or more MT_CONTROL mbufs present before any data mbufs 1537 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1538 * just copy the data; if !MSG_PEEK, we call into the protocol to 1539 * perform externalization (or freeing if controlp == NULL). 1540 */ 1541 if (m != NULL && m->m_type == MT_CONTROL) { 1542 struct mbuf *cm = NULL, *cmn; 1543 struct mbuf **cme = &cm; 1544 1545 do { 1546 if (flags & MSG_PEEK) { 1547 if (controlp != NULL) { 1548 *controlp = m_copy(m, 0, m->m_len); 1549 controlp = &(*controlp)->m_next; 1550 } 1551 m = m->m_next; 1552 } else { 1553 sbfree(&so->so_rcv, m); 1554 so->so_rcv.sb_mb = m->m_next; 1555 m->m_next = NULL; 1556 *cme = m; 1557 cme = &(*cme)->m_next; 1558 m = so->so_rcv.sb_mb; 1559 } 1560 } while (m != NULL && m->m_type == MT_CONTROL); 1561 if ((flags & MSG_PEEK) == 0) 1562 sockbuf_pushsync(&so->so_rcv, nextrecord); 1563 while (cm != NULL) { 1564 cmn = cm->m_next; 1565 cm->m_next = NULL; 1566 if (pr->pr_domain->dom_externalize != NULL) { 1567 SOCKBUF_UNLOCK(&so->so_rcv); 1568 error = (*pr->pr_domain->dom_externalize) 1569 (cm, controlp); 1570 SOCKBUF_LOCK(&so->so_rcv); 1571 } else if (controlp != NULL) 1572 *controlp = cm; 1573 else 1574 m_freem(cm); 1575 if (controlp != NULL) { 1576 orig_resid = 0; 1577 while (*controlp != NULL) 1578 controlp = &(*controlp)->m_next; 1579 } 1580 cm = cmn; 1581 } 1582 if (m != NULL) 1583 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1584 else 1585 nextrecord = so->so_rcv.sb_mb; 1586 orig_resid = 0; 1587 } 1588 if (m != NULL) { 1589 if ((flags & MSG_PEEK) == 0) { 1590 KASSERT(m->m_nextpkt == nextrecord, 1591 ("soreceive: post-control, nextrecord !sync")); 1592 if (nextrecord == NULL) { 1593 KASSERT(so->so_rcv.sb_mb == m, 1594 ("soreceive: post-control, sb_mb!=m")); 1595 KASSERT(so->so_rcv.sb_lastrecord == m, 1596 ("soreceive: post-control, lastrecord!=m")); 1597 } 1598 } 1599 type = m->m_type; 1600 if (type == MT_OOBDATA) 1601 flags |= MSG_OOB; 1602 } else { 1603 if ((flags & MSG_PEEK) == 0) { 1604 KASSERT(so->so_rcv.sb_mb == nextrecord, 1605 ("soreceive: sb_mb != nextrecord")); 1606 if (so->so_rcv.sb_mb == NULL) { 1607 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1608 ("soreceive: sb_lastercord != NULL")); 1609 } 1610 } 1611 } 1612 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1613 SBLASTRECORDCHK(&so->so_rcv); 1614 SBLASTMBUFCHK(&so->so_rcv); 1615 1616 /* 1617 * Now continue to read any data mbufs off of the head of the socket 1618 * buffer until the read request is satisfied. Note that 'type' is 1619 * used to store the type of any mbuf reads that have happened so far 1620 * such that soreceive() can stop reading if the type changes, which 1621 * causes soreceive() to return only one of regular data and inline 1622 * out-of-band data in a single socket receive operation. 1623 */ 1624 moff = 0; 1625 offset = 0; 1626 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1627 /* 1628 * If the type of mbuf has changed since the last mbuf 1629 * examined ('type'), end the receive operation. 1630 */ 1631 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1632 if (m->m_type == MT_OOBDATA) { 1633 if (type != MT_OOBDATA) 1634 break; 1635 } else if (type == MT_OOBDATA) 1636 break; 1637 else 1638 KASSERT(m->m_type == MT_DATA, 1639 ("m->m_type == %d", m->m_type)); 1640 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1641 len = uio->uio_resid; 1642 if (so->so_oobmark && len > so->so_oobmark - offset) 1643 len = so->so_oobmark - offset; 1644 if (len > m->m_len - moff) 1645 len = m->m_len - moff; 1646 /* 1647 * If mp is set, just pass back the mbufs. Otherwise copy 1648 * them out via the uio, then free. Sockbuf must be 1649 * consistent here (points to current mbuf, it points to next 1650 * record) when we drop priority; we must note any additions 1651 * to the sockbuf when we block interrupts again. 1652 */ 1653 if (mp == NULL) { 1654 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1655 SBLASTRECORDCHK(&so->so_rcv); 1656 SBLASTMBUFCHK(&so->so_rcv); 1657 SOCKBUF_UNLOCK(&so->so_rcv); 1658#ifdef ZERO_COPY_SOCKETS 1659 if (so_zero_copy_receive) { 1660 int disposable; 1661 1662 if ((m->m_flags & M_EXT) 1663 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1664 disposable = 1; 1665 else 1666 disposable = 0; 1667 1668 error = uiomoveco(mtod(m, char *) + moff, 1669 (int)len, uio, 1670 disposable); 1671 } else 1672#endif /* ZERO_COPY_SOCKETS */ 1673 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1674 SOCKBUF_LOCK(&so->so_rcv); 1675 if (error) { 1676 /* 1677 * The MT_SONAME mbuf has already been removed 1678 * from the record, so it is necessary to 1679 * remove the data mbufs, if any, to preserve 1680 * the invariant in the case of PR_ADDR that 1681 * requires MT_SONAME mbufs at the head of 1682 * each record. 1683 */ 1684 if (m && pr->pr_flags & PR_ATOMIC && 1685 ((flags & MSG_PEEK) == 0)) 1686 (void)sbdroprecord_locked(&so->so_rcv); 1687 SOCKBUF_UNLOCK(&so->so_rcv); 1688 goto release; 1689 } 1690 } else 1691 uio->uio_resid -= len; 1692 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1693 if (len == m->m_len - moff) { 1694 if (m->m_flags & M_EOR) 1695 flags |= MSG_EOR; 1696 if (flags & MSG_PEEK) { 1697 m = m->m_next; 1698 moff = 0; 1699 } else { 1700 nextrecord = m->m_nextpkt; 1701 sbfree(&so->so_rcv, m); 1702 if (mp != NULL) { 1703 *mp = m; 1704 mp = &m->m_next; 1705 so->so_rcv.sb_mb = m = m->m_next; 1706 *mp = NULL; 1707 } else { 1708 so->so_rcv.sb_mb = m_free(m); 1709 m = so->so_rcv.sb_mb; 1710 } 1711 sockbuf_pushsync(&so->so_rcv, nextrecord); 1712 SBLASTRECORDCHK(&so->so_rcv); 1713 SBLASTMBUFCHK(&so->so_rcv); 1714 } 1715 } else { 1716 if (flags & MSG_PEEK) 1717 moff += len; 1718 else { 1719 if (mp != NULL) { 1720 int copy_flag; 1721 1722 if (flags & MSG_DONTWAIT) 1723 copy_flag = M_DONTWAIT; 1724 else 1725 copy_flag = M_WAIT; 1726 if (copy_flag == M_WAIT) 1727 SOCKBUF_UNLOCK(&so->so_rcv); 1728 *mp = m_copym(m, 0, len, copy_flag); 1729 if (copy_flag == M_WAIT) 1730 SOCKBUF_LOCK(&so->so_rcv); 1731 if (*mp == NULL) { 1732 /* 1733 * m_copym() couldn't 1734 * allocate an mbuf. Adjust 1735 * uio_resid back (it was 1736 * adjusted down by len 1737 * bytes, which we didn't end 1738 * up "copying" over). 1739 */ 1740 uio->uio_resid += len; 1741 break; 1742 } 1743 } 1744 m->m_data += len; 1745 m->m_len -= len; 1746 so->so_rcv.sb_cc -= len; 1747 } 1748 } 1749 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1750 if (so->so_oobmark) { 1751 if ((flags & MSG_PEEK) == 0) { 1752 so->so_oobmark -= len; 1753 if (so->so_oobmark == 0) { 1754 so->so_rcv.sb_state |= SBS_RCVATMARK; 1755 break; 1756 } 1757 } else { 1758 offset += len; 1759 if (offset == so->so_oobmark) 1760 break; 1761 } 1762 } 1763 if (flags & MSG_EOR) 1764 break; 1765 /* 1766 * If the MSG_WAITALL flag is set (for non-atomic socket), we 1767 * must not quit until "uio->uio_resid == 0" or an error 1768 * termination. If a signal/timeout occurs, return with a 1769 * short count but without error. Keep sockbuf locked 1770 * against other readers. 1771 */ 1772 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1773 !sosendallatonce(so) && nextrecord == NULL) { 1774 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1775 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1776 break; 1777 /* 1778 * Notify the protocol that some data has been 1779 * drained before blocking. 1780 */ 1781 if (pr->pr_flags & PR_WANTRCVD) { 1782 SOCKBUF_UNLOCK(&so->so_rcv); 1783 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1784 SOCKBUF_LOCK(&so->so_rcv); 1785 } 1786 SBLASTRECORDCHK(&so->so_rcv); 1787 SBLASTMBUFCHK(&so->so_rcv); 1788 error = sbwait(&so->so_rcv); 1789 if (error) { 1790 SOCKBUF_UNLOCK(&so->so_rcv); 1791 goto release; 1792 } 1793 m = so->so_rcv.sb_mb; 1794 if (m != NULL) 1795 nextrecord = m->m_nextpkt; 1796 } 1797 } 1798 1799 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1800 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1801 flags |= MSG_TRUNC; 1802 if ((flags & MSG_PEEK) == 0) 1803 (void) sbdroprecord_locked(&so->so_rcv); 1804 } 1805 if ((flags & MSG_PEEK) == 0) { 1806 if (m == NULL) { 1807 /* 1808 * First part is an inline SB_EMPTY_FIXUP(). Second 1809 * part makes sure sb_lastrecord is up-to-date if 1810 * there is still data in the socket buffer. 1811 */ 1812 so->so_rcv.sb_mb = nextrecord; 1813 if (so->so_rcv.sb_mb == NULL) { 1814 so->so_rcv.sb_mbtail = NULL; 1815 so->so_rcv.sb_lastrecord = NULL; 1816 } else if (nextrecord->m_nextpkt == NULL) 1817 so->so_rcv.sb_lastrecord = nextrecord; 1818 } 1819 SBLASTRECORDCHK(&so->so_rcv); 1820 SBLASTMBUFCHK(&so->so_rcv); 1821 /* 1822 * If soreceive() is being done from the socket callback, 1823 * then don't need to generate ACK to peer to update window, 1824 * since ACK will be generated on return to TCP. 1825 */ 1826 if (!(flags & MSG_SOCALLBCK) && 1827 (pr->pr_flags & PR_WANTRCVD)) { 1828 SOCKBUF_UNLOCK(&so->so_rcv); 1829 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1830 SOCKBUF_LOCK(&so->so_rcv); 1831 } 1832 } 1833 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1834 if (orig_resid == uio->uio_resid && orig_resid && 1835 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1836 SOCKBUF_UNLOCK(&so->so_rcv); 1837 goto restart; 1838 } 1839 SOCKBUF_UNLOCK(&so->so_rcv); 1840 1841 if (flagsp != NULL) 1842 *flagsp |= flags; 1843release: 1844 sbunlock(&so->so_rcv); 1845 return (error); 1846} 1847 1848/* 1849 * Optimized version of soreceive() for simple datagram cases from userspace; 1850 * this is experimental, and while heavily tested, may contain errors. 1851 */ 1852int 1853soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 1854 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1855{ 1856 struct mbuf *m, *m2; 1857 int flags, len, error, offset; 1858 struct protosw *pr = so->so_proto; 1859 struct mbuf *nextrecord; 1860 int orig_resid = uio->uio_resid; 1861 1862 if (psa != NULL) 1863 *psa = NULL; 1864 if (controlp != NULL) 1865 *controlp = NULL; 1866 if (flagsp != NULL) 1867 flags = *flagsp &~ MSG_EOR; 1868 else 1869 flags = 0; 1870 1871 /* 1872 * For any complicated cases, fall back to the full 1873 * soreceive_generic(). 1874 */ 1875 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB)) 1876 return (soreceive_generic(so, psa, uio, mp0, controlp, 1877 flagsp)); 1878 1879 /* 1880 * Enforce restrictions on use. 1881 */ 1882 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, 1883 ("soreceive_dgram: wantrcvd")); 1884 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); 1885 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, 1886 ("soreceive_dgram: SBS_RCVATMARK")); 1887 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, 1888 ("soreceive_dgram: P_CONNREQUIRED")); 1889 1890restart: 1891 SOCKBUF_LOCK(&so->so_rcv); 1892 m = so->so_rcv.sb_mb; 1893 1894 /* 1895 * If we have less data than requested, block awaiting more (subject 1896 * to any timeout) if: 1897 * 1. the current count is less than the low water mark, or 1898 * 2. MSG_WAITALL is set, and it is possible to do the entire 1899 * receive operation at once if we block (resid <= hiwat). 1900 * 3. MSG_DONTWAIT is not set 1901 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1902 * we have to do the receive in sections, and thus risk returning a 1903 * short count if a timeout or signal occurs after we start. 1904 */ 1905 if (m == NULL) { 1906 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1907 ("receive: m == %p so->so_rcv.sb_cc == %u", 1908 m, so->so_rcv.sb_cc)); 1909 if (so->so_error) { 1910 if (m != NULL) 1911 goto dontblock; 1912 error = so->so_error; 1913 so->so_error = 0; 1914 SOCKBUF_UNLOCK(&so->so_rcv); 1915 return (error); 1916 } 1917 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1918 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1919 if (m == NULL) { 1920 SOCKBUF_UNLOCK(&so->so_rcv); 1921 return (0); 1922 } else 1923 goto dontblock; 1924 } 1925 if (uio->uio_resid == 0) { 1926 SOCKBUF_UNLOCK(&so->so_rcv); 1927 return (0); 1928 } 1929 if ((so->so_state & SS_NBIO) || 1930 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1931 SOCKBUF_UNLOCK(&so->so_rcv); 1932 error = EWOULDBLOCK; 1933 return (error); 1934 } 1935 SBLASTRECORDCHK(&so->so_rcv); 1936 SBLASTMBUFCHK(&so->so_rcv); 1937 1938 /* XXXRW: sbwait() may not be as happy without sblock(). */ 1939 error = sbwait(&so->so_rcv); 1940 SOCKBUF_UNLOCK(&so->so_rcv); 1941 if (error) 1942 return (error); 1943 goto restart; 1944 } 1945dontblock: 1946 /* 1947 * From this point onward, we maintain 'nextrecord' as a cache of the 1948 * pointer to the next record in the socket buffer. We must keep the 1949 * various socket buffer pointers and local stack versions of the 1950 * pointers in sync, pushing out modifications before dropping the 1951 * socket buffer mutex, and re-reading them when picking it up. 1952 * 1953 * Otherwise, we will race with the network stack appending new data 1954 * or records onto the socket buffer by using inconsistent/stale 1955 * versions of the field, possibly resulting in socket buffer 1956 * corruption. 1957 * 1958 * By holding the high-level sblock(), we prevent simultaneous 1959 * readers from pulling off the front of the socket buffer. 1960 */ 1961 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1962 if (uio->uio_td) 1963 uio->uio_td->td_ru.ru_msgrcv++; 1964 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1965 SBLASTRECORDCHK(&so->so_rcv); 1966 SBLASTMBUFCHK(&so->so_rcv); 1967 nextrecord = m->m_nextpkt; 1968 if (pr->pr_flags & PR_ADDR) { 1969 KASSERT(m->m_type == MT_SONAME, 1970 ("m->m_type == %d", m->m_type)); 1971 orig_resid = 0; 1972 if (psa != NULL) 1973 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1974 M_NOWAIT); 1975 sbfree(&so->so_rcv, m); 1976 so->so_rcv.sb_mb = m_free(m); 1977 m = so->so_rcv.sb_mb; 1978 sockbuf_pushsync(&so->so_rcv, nextrecord); 1979 } 1980 if (m == NULL) { 1981 /* XXXRW: Can this happen? */ 1982 SOCKBUF_UNLOCK(&so->so_rcv); 1983 return (0); 1984 } 1985 KASSERT(m->m_nextpkt == nextrecord, 1986 ("soreceive: post-control, nextrecord !sync")); 1987 if (nextrecord == NULL) { 1988 KASSERT(so->so_rcv.sb_mb == m, 1989 ("soreceive: post-control, sb_mb!=m")); 1990 KASSERT(so->so_rcv.sb_lastrecord == m, 1991 ("soreceive: post-control, lastrecord!=m")); 1992 } 1993 1994 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1995 SBLASTRECORDCHK(&so->so_rcv); 1996 SBLASTMBUFCHK(&so->so_rcv); 1997 KASSERT(m == so->so_rcv.sb_mb, ("soreceive_dgram: m not sb_mb")); 1998 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, 1999 ("soreceive_dgram: m_nextpkt != nextrecord")); 2000 2001 /* 2002 * Pull 'm' and its chain off the front of the packet queue. 2003 */ 2004 so->so_rcv.sb_mb = NULL; 2005 sockbuf_pushsync(&so->so_rcv, nextrecord); 2006 2007 /* 2008 * Walk 'm's chain and free that many bytes from the socket buffer. 2009 */ 2010 for (m2 = m; m2 != NULL; m2 = m2->m_next) 2011 sbfree(&so->so_rcv, m2); 2012 2013 /* 2014 * Do a few last checks before we let go of the lock. 2015 */ 2016 SBLASTRECORDCHK(&so->so_rcv); 2017 SBLASTMBUFCHK(&so->so_rcv); 2018 SOCKBUF_UNLOCK(&so->so_rcv); 2019 2020 /* 2021 * Packet to copyout() is now in 'm' and it is disconnected from the 2022 * queue. 2023 * 2024 * Process one or more MT_CONTROL mbufs present before any data mbufs 2025 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 2026 * just copy the data; if !MSG_PEEK, we call into the protocol to 2027 * perform externalization (or freeing if controlp == NULL). 2028 */ 2029 if (m->m_type == MT_CONTROL) { 2030 struct mbuf *cm = NULL, *cmn; 2031 struct mbuf **cme = &cm; 2032 2033 do { 2034 m2 = m->m_next; 2035 m->m_next = NULL; 2036 *cme = m; 2037 cme = &(*cme)->m_next; 2038 m = m2; 2039 } while (m != NULL && m->m_type == MT_CONTROL); 2040 while (cm != NULL) { 2041 cmn = cm->m_next; 2042 cm->m_next = NULL; 2043 if (pr->pr_domain->dom_externalize != NULL) { 2044 error = (*pr->pr_domain->dom_externalize) 2045 (cm, controlp); 2046 } else if (controlp != NULL) 2047 *controlp = cm; 2048 else 2049 m_freem(cm); 2050 if (controlp != NULL) { 2051 orig_resid = 0; 2052 while (*controlp != NULL) 2053 controlp = &(*controlp)->m_next; 2054 } 2055 cm = cmn; 2056 } 2057 orig_resid = 0; /* XXXRW: why this? */ 2058 } 2059 2060 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data")); 2061 2062 offset = 0; 2063 while (m != NULL && uio->uio_resid > 0) { 2064 len = uio->uio_resid; 2065 if (len > m->m_len) 2066 len = m->m_len; 2067 error = uiomove(mtod(m, char *), (int)len, uio); 2068 if (error) { 2069 m_freem(m); 2070 return (error); 2071 } 2072 m = m_free(m); 2073 } 2074 if (m != NULL && pr->pr_flags & PR_ATOMIC) 2075 flags |= MSG_TRUNC; 2076 m_freem(m); 2077 if (flagsp != NULL) 2078 *flagsp |= flags; 2079 return (0); 2080} 2081 2082int 2083soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 2084 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2085{ 2086 2087 /* XXXRW: Temporary debugging. */ 2088 KASSERT(so->so_proto->pr_usrreqs->pru_soreceive != soreceive, 2089 ("soreceive: protocol calls soreceive")); 2090 2091 return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0, 2092 controlp, flagsp)); 2093} 2094 2095int 2096soshutdown(struct socket *so, int how) 2097{ 2098 struct protosw *pr = so->so_proto; 2099 2100 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 2101 return (EINVAL); 2102 if (pr->pr_usrreqs->pru_flush != NULL) { 2103 (*pr->pr_usrreqs->pru_flush)(so, how); 2104 } 2105 if (how != SHUT_WR) 2106 sorflush(so); 2107 if (how != SHUT_RD) 2108 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 2109 return (0); 2110} 2111 2112void 2113sorflush(struct socket *so) 2114{ 2115 struct sockbuf *sb = &so->so_rcv; 2116 struct protosw *pr = so->so_proto; 2117 struct sockbuf asb; 2118 2119 /* 2120 * In order to avoid calling dom_dispose with the socket buffer mutex 2121 * held, and in order to generally avoid holding the lock for a long 2122 * time, we make a copy of the socket buffer and clear the original 2123 * (except locks, state). The new socket buffer copy won't have 2124 * initialized locks so we can only call routines that won't use or 2125 * assert those locks. 2126 * 2127 * Dislodge threads currently blocked in receive and wait to acquire 2128 * a lock against other simultaneous readers before clearing the 2129 * socket buffer. Don't let our acquire be interrupted by a signal 2130 * despite any existing socket disposition on interruptable waiting. 2131 */ 2132 socantrcvmore(so); 2133 (void) sblock(sb, SBL_WAIT | SBL_NOINTR); 2134 2135 /* 2136 * Invalidate/clear most of the sockbuf structure, but leave selinfo 2137 * and mutex data unchanged. 2138 */ 2139 SOCKBUF_LOCK(sb); 2140 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 2141 bcopy(&sb->sb_startzero, &asb.sb_startzero, 2142 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2143 bzero(&sb->sb_startzero, 2144 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2145 SOCKBUF_UNLOCK(sb); 2146 sbunlock(sb); 2147 2148 /* 2149 * Dispose of special rights and flush the socket buffer. Don't call 2150 * any unsafe routines (that rely on locks being initialized) on asb. 2151 */ 2152 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 2153 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 2154 sbrelease_internal(&asb, so); 2155} 2156 2157/* 2158 * Perhaps this routine, and sooptcopyout(), below, ought to come in an 2159 * additional variant to handle the case where the option value needs to be 2160 * some kind of integer, but not a specific size. In addition to their use 2161 * here, these functions are also called by the protocol-level pr_ctloutput() 2162 * routines. 2163 */ 2164int 2165sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2166{ 2167 size_t valsize; 2168 2169 /* 2170 * If the user gives us more than we wanted, we ignore it, but if we 2171 * don't get the minimum length the caller wants, we return EINVAL. 2172 * On success, sopt->sopt_valsize is set to however much we actually 2173 * retrieved. 2174 */ 2175 if ((valsize = sopt->sopt_valsize) < minlen) 2176 return EINVAL; 2177 if (valsize > len) 2178 sopt->sopt_valsize = valsize = len; 2179 2180 if (sopt->sopt_td != NULL) 2181 return (copyin(sopt->sopt_val, buf, valsize)); 2182 2183 bcopy(sopt->sopt_val, buf, valsize); 2184 return (0); 2185} 2186 2187/* 2188 * Kernel version of setsockopt(2). 2189 * 2190 * XXX: optlen is size_t, not socklen_t 2191 */ 2192int 2193so_setsockopt(struct socket *so, int level, int optname, void *optval, 2194 size_t optlen) 2195{ 2196 struct sockopt sopt; 2197 2198 sopt.sopt_level = level; 2199 sopt.sopt_name = optname; 2200 sopt.sopt_dir = SOPT_SET; 2201 sopt.sopt_val = optval; 2202 sopt.sopt_valsize = optlen; 2203 sopt.sopt_td = NULL; 2204 return (sosetopt(so, &sopt)); 2205} 2206 2207int 2208sosetopt(struct socket *so, struct sockopt *sopt) 2209{ 2210 int error, optval; 2211 struct linger l; 2212 struct timeval tv; 2213 u_long val; 2214#ifdef MAC 2215 struct mac extmac; 2216#endif 2217 2218 error = 0; 2219 if (sopt->sopt_level != SOL_SOCKET) { 2220 if (so->so_proto && so->so_proto->pr_ctloutput) 2221 return ((*so->so_proto->pr_ctloutput) 2222 (so, sopt)); 2223 error = ENOPROTOOPT; 2224 } else { 2225 switch (sopt->sopt_name) { 2226#ifdef INET 2227 case SO_ACCEPTFILTER: 2228 error = do_setopt_accept_filter(so, sopt); 2229 if (error) 2230 goto bad; 2231 break; 2232#endif 2233 case SO_LINGER: 2234 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2235 if (error) 2236 goto bad; 2237 2238 SOCK_LOCK(so); 2239 so->so_linger = l.l_linger; 2240 if (l.l_onoff) 2241 so->so_options |= SO_LINGER; 2242 else 2243 so->so_options &= ~SO_LINGER; 2244 SOCK_UNLOCK(so); 2245 break; 2246 2247 case SO_DEBUG: 2248 case SO_KEEPALIVE: 2249 case SO_DONTROUTE: 2250 case SO_USELOOPBACK: 2251 case SO_BROADCAST: 2252 case SO_REUSEADDR: 2253 case SO_REUSEPORT: 2254 case SO_OOBINLINE: 2255 case SO_TIMESTAMP: 2256 case SO_BINTIME: 2257 case SO_NOSIGPIPE: 2258 error = sooptcopyin(sopt, &optval, sizeof optval, 2259 sizeof optval); 2260 if (error) 2261 goto bad; 2262 SOCK_LOCK(so); 2263 if (optval) 2264 so->so_options |= sopt->sopt_name; 2265 else 2266 so->so_options &= ~sopt->sopt_name; 2267 SOCK_UNLOCK(so); 2268 break; 2269 2270 case SO_SETFIB: 2271 error = sooptcopyin(sopt, &optval, sizeof optval, 2272 sizeof optval); 2273 if (optval < 1 || optval > rt_numfibs) { 2274 error = EINVAL; 2275 goto bad; 2276 } 2277 if ((so->so_proto->pr_domain->dom_family == PF_INET) || 2278 (so->so_proto->pr_domain->dom_family == PF_ROUTE)) { 2279 so->so_fibnum = optval; 2280 } else { 2281 so->so_fibnum = 0; 2282 } 2283 break; 2284 case SO_SNDBUF: 2285 case SO_RCVBUF: 2286 case SO_SNDLOWAT: 2287 case SO_RCVLOWAT: 2288 error = sooptcopyin(sopt, &optval, sizeof optval, 2289 sizeof optval); 2290 if (error) 2291 goto bad; 2292 2293 /* 2294 * Values < 1 make no sense for any of these options, 2295 * so disallow them. 2296 */ 2297 if (optval < 1) { 2298 error = EINVAL; 2299 goto bad; 2300 } 2301 2302 switch (sopt->sopt_name) { 2303 case SO_SNDBUF: 2304 case SO_RCVBUF: 2305 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 2306 &so->so_snd : &so->so_rcv, (u_long)optval, 2307 so, curthread) == 0) { 2308 error = ENOBUFS; 2309 goto bad; 2310 } 2311 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd : 2312 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE; 2313 break; 2314 2315 /* 2316 * Make sure the low-water is never greater than the 2317 * high-water. 2318 */ 2319 case SO_SNDLOWAT: 2320 SOCKBUF_LOCK(&so->so_snd); 2321 so->so_snd.sb_lowat = 2322 (optval > so->so_snd.sb_hiwat) ? 2323 so->so_snd.sb_hiwat : optval; 2324 SOCKBUF_UNLOCK(&so->so_snd); 2325 break; 2326 case SO_RCVLOWAT: 2327 SOCKBUF_LOCK(&so->so_rcv); 2328 so->so_rcv.sb_lowat = 2329 (optval > so->so_rcv.sb_hiwat) ? 2330 so->so_rcv.sb_hiwat : optval; 2331 SOCKBUF_UNLOCK(&so->so_rcv); 2332 break; 2333 } 2334 break; 2335 2336 case SO_SNDTIMEO: 2337 case SO_RCVTIMEO: 2338#ifdef COMPAT_IA32 2339 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2340 struct timeval32 tv32; 2341 2342 error = sooptcopyin(sopt, &tv32, sizeof tv32, 2343 sizeof tv32); 2344 CP(tv32, tv, tv_sec); 2345 CP(tv32, tv, tv_usec); 2346 } else 2347#endif 2348 error = sooptcopyin(sopt, &tv, sizeof tv, 2349 sizeof tv); 2350 if (error) 2351 goto bad; 2352 2353 /* assert(hz > 0); */ 2354 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2355 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2356 error = EDOM; 2357 goto bad; 2358 } 2359 /* assert(tick > 0); */ 2360 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2361 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 2362 if (val > INT_MAX) { 2363 error = EDOM; 2364 goto bad; 2365 } 2366 if (val == 0 && tv.tv_usec != 0) 2367 val = 1; 2368 2369 switch (sopt->sopt_name) { 2370 case SO_SNDTIMEO: 2371 so->so_snd.sb_timeo = val; 2372 break; 2373 case SO_RCVTIMEO: 2374 so->so_rcv.sb_timeo = val; 2375 break; 2376 } 2377 break; 2378 2379 case SO_LABEL: 2380#ifdef MAC 2381 error = sooptcopyin(sopt, &extmac, sizeof extmac, 2382 sizeof extmac); 2383 if (error) 2384 goto bad; 2385 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 2386 so, &extmac); 2387#else 2388 error = EOPNOTSUPP; 2389#endif 2390 break; 2391 2392 default: 2393 error = ENOPROTOOPT; 2394 break; 2395 } 2396 if (error == 0 && so->so_proto != NULL && 2397 so->so_proto->pr_ctloutput != NULL) { 2398 (void) ((*so->so_proto->pr_ctloutput) 2399 (so, sopt)); 2400 } 2401 } 2402bad: 2403 return (error); 2404} 2405 2406/* 2407 * Helper routine for getsockopt. 2408 */ 2409int 2410sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2411{ 2412 int error; 2413 size_t valsize; 2414 2415 error = 0; 2416 2417 /* 2418 * Documented get behavior is that we always return a value, possibly 2419 * truncated to fit in the user's buffer. Traditional behavior is 2420 * that we always tell the user precisely how much we copied, rather 2421 * than something useful like the total amount we had available for 2422 * her. Note that this interface is not idempotent; the entire 2423 * answer must generated ahead of time. 2424 */ 2425 valsize = min(len, sopt->sopt_valsize); 2426 sopt->sopt_valsize = valsize; 2427 if (sopt->sopt_val != NULL) { 2428 if (sopt->sopt_td != NULL) 2429 error = copyout(buf, sopt->sopt_val, valsize); 2430 else 2431 bcopy(buf, sopt->sopt_val, valsize); 2432 } 2433 return (error); 2434} 2435 2436int 2437sogetopt(struct socket *so, struct sockopt *sopt) 2438{ 2439 int error, optval; 2440 struct linger l; 2441 struct timeval tv; 2442#ifdef MAC 2443 struct mac extmac; 2444#endif 2445 2446 error = 0; 2447 if (sopt->sopt_level != SOL_SOCKET) { 2448 if (so->so_proto && so->so_proto->pr_ctloutput) { 2449 return ((*so->so_proto->pr_ctloutput) 2450 (so, sopt)); 2451 } else 2452 return (ENOPROTOOPT); 2453 } else { 2454 switch (sopt->sopt_name) { 2455#ifdef INET 2456 case SO_ACCEPTFILTER: 2457 error = do_getopt_accept_filter(so, sopt); 2458 break; 2459#endif 2460 case SO_LINGER: 2461 SOCK_LOCK(so); 2462 l.l_onoff = so->so_options & SO_LINGER; 2463 l.l_linger = so->so_linger; 2464 SOCK_UNLOCK(so); 2465 error = sooptcopyout(sopt, &l, sizeof l); 2466 break; 2467 2468 case SO_USELOOPBACK: 2469 case SO_DONTROUTE: 2470 case SO_DEBUG: 2471 case SO_KEEPALIVE: 2472 case SO_REUSEADDR: 2473 case SO_REUSEPORT: 2474 case SO_BROADCAST: 2475 case SO_OOBINLINE: 2476 case SO_ACCEPTCONN: 2477 case SO_TIMESTAMP: 2478 case SO_BINTIME: 2479 case SO_NOSIGPIPE: 2480 optval = so->so_options & sopt->sopt_name; 2481integer: 2482 error = sooptcopyout(sopt, &optval, sizeof optval); 2483 break; 2484 2485 case SO_TYPE: 2486 optval = so->so_type; 2487 goto integer; 2488 2489 case SO_ERROR: 2490 SOCK_LOCK(so); 2491 optval = so->so_error; 2492 so->so_error = 0; 2493 SOCK_UNLOCK(so); 2494 goto integer; 2495 2496 case SO_SNDBUF: 2497 optval = so->so_snd.sb_hiwat; 2498 goto integer; 2499 2500 case SO_RCVBUF: 2501 optval = so->so_rcv.sb_hiwat; 2502 goto integer; 2503 2504 case SO_SNDLOWAT: 2505 optval = so->so_snd.sb_lowat; 2506 goto integer; 2507 2508 case SO_RCVLOWAT: 2509 optval = so->so_rcv.sb_lowat; 2510 goto integer; 2511 2512 case SO_SNDTIMEO: 2513 case SO_RCVTIMEO: 2514 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2515 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 2516 2517 tv.tv_sec = optval / hz; 2518 tv.tv_usec = (optval % hz) * tick; 2519#ifdef COMPAT_IA32 2520 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2521 struct timeval32 tv32; 2522 2523 CP(tv, tv32, tv_sec); 2524 CP(tv, tv32, tv_usec); 2525 error = sooptcopyout(sopt, &tv32, sizeof tv32); 2526 } else 2527#endif 2528 error = sooptcopyout(sopt, &tv, sizeof tv); 2529 break; 2530 2531 case SO_LABEL: 2532#ifdef MAC 2533 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2534 sizeof(extmac)); 2535 if (error) 2536 return (error); 2537 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 2538 so, &extmac); 2539 if (error) 2540 return (error); 2541 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2542#else 2543 error = EOPNOTSUPP; 2544#endif 2545 break; 2546 2547 case SO_PEERLABEL: 2548#ifdef MAC 2549 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2550 sizeof(extmac)); 2551 if (error) 2552 return (error); 2553 error = mac_getsockopt_peerlabel( 2554 sopt->sopt_td->td_ucred, so, &extmac); 2555 if (error) 2556 return (error); 2557 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2558#else 2559 error = EOPNOTSUPP; 2560#endif 2561 break; 2562 2563 case SO_LISTENQLIMIT: 2564 optval = so->so_qlimit; 2565 goto integer; 2566 2567 case SO_LISTENQLEN: 2568 optval = so->so_qlen; 2569 goto integer; 2570 2571 case SO_LISTENINCQLEN: 2572 optval = so->so_incqlen; 2573 goto integer; 2574 2575 default: 2576 error = ENOPROTOOPT; 2577 break; 2578 } 2579 return (error); 2580 } 2581} 2582 2583/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2584int 2585soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2586{ 2587 struct mbuf *m, *m_prev; 2588 int sopt_size = sopt->sopt_valsize; 2589 2590 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2591 if (m == NULL) 2592 return ENOBUFS; 2593 if (sopt_size > MLEN) { 2594 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT); 2595 if ((m->m_flags & M_EXT) == 0) { 2596 m_free(m); 2597 return ENOBUFS; 2598 } 2599 m->m_len = min(MCLBYTES, sopt_size); 2600 } else { 2601 m->m_len = min(MLEN, sopt_size); 2602 } 2603 sopt_size -= m->m_len; 2604 *mp = m; 2605 m_prev = m; 2606 2607 while (sopt_size) { 2608 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2609 if (m == NULL) { 2610 m_freem(*mp); 2611 return ENOBUFS; 2612 } 2613 if (sopt_size > MLEN) { 2614 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT : 2615 M_DONTWAIT); 2616 if ((m->m_flags & M_EXT) == 0) { 2617 m_freem(m); 2618 m_freem(*mp); 2619 return ENOBUFS; 2620 } 2621 m->m_len = min(MCLBYTES, sopt_size); 2622 } else { 2623 m->m_len = min(MLEN, sopt_size); 2624 } 2625 sopt_size -= m->m_len; 2626 m_prev->m_next = m; 2627 m_prev = m; 2628 } 2629 return (0); 2630} 2631 2632/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2633int 2634soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2635{ 2636 struct mbuf *m0 = m; 2637 2638 if (sopt->sopt_val == NULL) 2639 return (0); 2640 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2641 if (sopt->sopt_td != NULL) { 2642 int error; 2643 2644 error = copyin(sopt->sopt_val, mtod(m, char *), 2645 m->m_len); 2646 if (error != 0) { 2647 m_freem(m0); 2648 return(error); 2649 } 2650 } else 2651 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 2652 sopt->sopt_valsize -= m->m_len; 2653 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2654 m = m->m_next; 2655 } 2656 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2657 panic("ip6_sooptmcopyin"); 2658 return (0); 2659} 2660 2661/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2662int 2663soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2664{ 2665 struct mbuf *m0 = m; 2666 size_t valsize = 0; 2667 2668 if (sopt->sopt_val == NULL) 2669 return (0); 2670 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2671 if (sopt->sopt_td != NULL) { 2672 int error; 2673 2674 error = copyout(mtod(m, char *), sopt->sopt_val, 2675 m->m_len); 2676 if (error != 0) { 2677 m_freem(m0); 2678 return(error); 2679 } 2680 } else 2681 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2682 sopt->sopt_valsize -= m->m_len; 2683 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2684 valsize += m->m_len; 2685 m = m->m_next; 2686 } 2687 if (m != NULL) { 2688 /* enough soopt buffer should be given from user-land */ 2689 m_freem(m0); 2690 return(EINVAL); 2691 } 2692 sopt->sopt_valsize = valsize; 2693 return (0); 2694} 2695 2696/* 2697 * sohasoutofband(): protocol notifies socket layer of the arrival of new 2698 * out-of-band data, which will then notify socket consumers. 2699 */ 2700void 2701sohasoutofband(struct socket *so) 2702{ 2703 2704 if (so->so_sigio != NULL) 2705 pgsigio(&so->so_sigio, SIGURG, 0); 2706 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2707} 2708 2709int 2710sopoll(struct socket *so, int events, struct ucred *active_cred, 2711 struct thread *td) 2712{ 2713 2714 /* XXXRW: Temporary debugging. */ 2715 KASSERT(so->so_proto->pr_usrreqs->pru_sopoll != sopoll, 2716 ("sopoll: protocol calls sopoll")); 2717 2718 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, 2719 td)); 2720} 2721 2722int 2723sopoll_generic(struct socket *so, int events, struct ucred *active_cred, 2724 struct thread *td) 2725{ 2726 int revents = 0; 2727 2728 SOCKBUF_LOCK(&so->so_snd); 2729 SOCKBUF_LOCK(&so->so_rcv); 2730 if (events & (POLLIN | POLLRDNORM)) 2731 if (soreadable(so)) 2732 revents |= events & (POLLIN | POLLRDNORM); 2733 2734 if (events & POLLINIGNEOF) 2735 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 2736 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 2737 revents |= POLLINIGNEOF; 2738 2739 if (events & (POLLOUT | POLLWRNORM)) 2740 if (sowriteable(so)) 2741 revents |= events & (POLLOUT | POLLWRNORM); 2742 2743 if (events & (POLLPRI | POLLRDBAND)) 2744 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2745 revents |= events & (POLLPRI | POLLRDBAND); 2746 2747 if (revents == 0) { 2748 if (events & 2749 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 2750 POLLRDBAND)) { 2751 selrecord(td, &so->so_rcv.sb_sel); 2752 so->so_rcv.sb_flags |= SB_SEL; 2753 } 2754 2755 if (events & (POLLOUT | POLLWRNORM)) { 2756 selrecord(td, &so->so_snd.sb_sel); 2757 so->so_snd.sb_flags |= SB_SEL; 2758 } 2759 } 2760 2761 SOCKBUF_UNLOCK(&so->so_rcv); 2762 SOCKBUF_UNLOCK(&so->so_snd); 2763 return (revents); 2764} 2765 2766int 2767soo_kqfilter(struct file *fp, struct knote *kn) 2768{ 2769 struct socket *so = kn->kn_fp->f_data; 2770 struct sockbuf *sb; 2771 2772 switch (kn->kn_filter) { 2773 case EVFILT_READ: 2774 if (so->so_options & SO_ACCEPTCONN) 2775 kn->kn_fop = &solisten_filtops; 2776 else 2777 kn->kn_fop = &soread_filtops; 2778 sb = &so->so_rcv; 2779 break; 2780 case EVFILT_WRITE: 2781 kn->kn_fop = &sowrite_filtops; 2782 sb = &so->so_snd; 2783 break; 2784 default: 2785 return (EINVAL); 2786 } 2787 2788 SOCKBUF_LOCK(sb); 2789 knlist_add(&sb->sb_sel.si_note, kn, 1); 2790 sb->sb_flags |= SB_KNOTE; 2791 SOCKBUF_UNLOCK(sb); 2792 return (0); 2793} 2794 2795/* 2796 * Some routines that return EOPNOTSUPP for entry points that are not 2797 * supported by a protocol. Fill in as needed. 2798 */ 2799int 2800pru_accept_notsupp(struct socket *so, struct sockaddr **nam) 2801{ 2802 2803 return EOPNOTSUPP; 2804} 2805 2806int 2807pru_attach_notsupp(struct socket *so, int proto, struct thread *td) 2808{ 2809 2810 return EOPNOTSUPP; 2811} 2812 2813int 2814pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 2815{ 2816 2817 return EOPNOTSUPP; 2818} 2819 2820int 2821pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 2822{ 2823 2824 return EOPNOTSUPP; 2825} 2826 2827int 2828pru_connect2_notsupp(struct socket *so1, struct socket *so2) 2829{ 2830 2831 return EOPNOTSUPP; 2832} 2833 2834int 2835pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, 2836 struct ifnet *ifp, struct thread *td) 2837{ 2838 2839 return EOPNOTSUPP; 2840} 2841 2842int 2843pru_disconnect_notsupp(struct socket *so) 2844{ 2845 2846 return EOPNOTSUPP; 2847} 2848 2849int 2850pru_listen_notsupp(struct socket *so, int backlog, struct thread *td) 2851{ 2852 2853 return EOPNOTSUPP; 2854} 2855 2856int 2857pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) 2858{ 2859 2860 return EOPNOTSUPP; 2861} 2862 2863int 2864pru_rcvd_notsupp(struct socket *so, int flags) 2865{ 2866 2867 return EOPNOTSUPP; 2868} 2869 2870int 2871pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) 2872{ 2873 2874 return EOPNOTSUPP; 2875} 2876 2877int 2878pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, 2879 struct sockaddr *addr, struct mbuf *control, struct thread *td) 2880{ 2881 2882 return EOPNOTSUPP; 2883} 2884 2885/* 2886 * This isn't really a ``null'' operation, but it's the default one and 2887 * doesn't do anything destructive. 2888 */ 2889int 2890pru_sense_null(struct socket *so, struct stat *sb) 2891{ 2892 2893 sb->st_blksize = so->so_snd.sb_hiwat; 2894 return 0; 2895} 2896 2897int 2898pru_shutdown_notsupp(struct socket *so) 2899{ 2900 2901 return EOPNOTSUPP; 2902} 2903 2904int 2905pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) 2906{ 2907 2908 return EOPNOTSUPP; 2909} 2910 2911int 2912pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 2913 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 2914{ 2915 2916 return EOPNOTSUPP; 2917} 2918 2919int 2920pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 2921 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2922{ 2923 2924 return EOPNOTSUPP; 2925} 2926 2927int 2928pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, 2929 struct thread *td) 2930{ 2931 2932 return EOPNOTSUPP; 2933} 2934 2935static void 2936filt_sordetach(struct knote *kn) 2937{ 2938 struct socket *so = kn->kn_fp->f_data; 2939 2940 SOCKBUF_LOCK(&so->so_rcv); 2941 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 2942 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 2943 so->so_rcv.sb_flags &= ~SB_KNOTE; 2944 SOCKBUF_UNLOCK(&so->so_rcv); 2945} 2946 2947/*ARGSUSED*/ 2948static int 2949filt_soread(struct knote *kn, long hint) 2950{ 2951 struct socket *so; 2952 2953 so = kn->kn_fp->f_data; 2954 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2955 2956 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 2957 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2958 kn->kn_flags |= EV_EOF; 2959 kn->kn_fflags = so->so_error; 2960 return (1); 2961 } else if (so->so_error) /* temporary udp error */ 2962 return (1); 2963 else if (kn->kn_sfflags & NOTE_LOWAT) 2964 return (kn->kn_data >= kn->kn_sdata); 2965 else 2966 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 2967} 2968 2969static void 2970filt_sowdetach(struct knote *kn) 2971{ 2972 struct socket *so = kn->kn_fp->f_data; 2973 2974 SOCKBUF_LOCK(&so->so_snd); 2975 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 2976 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 2977 so->so_snd.sb_flags &= ~SB_KNOTE; 2978 SOCKBUF_UNLOCK(&so->so_snd); 2979} 2980 2981/*ARGSUSED*/ 2982static int 2983filt_sowrite(struct knote *kn, long hint) 2984{ 2985 struct socket *so; 2986 2987 so = kn->kn_fp->f_data; 2988 SOCKBUF_LOCK_ASSERT(&so->so_snd); 2989 kn->kn_data = sbspace(&so->so_snd); 2990 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2991 kn->kn_flags |= EV_EOF; 2992 kn->kn_fflags = so->so_error; 2993 return (1); 2994 } else if (so->so_error) /* temporary udp error */ 2995 return (1); 2996 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2997 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2998 return (0); 2999 else if (kn->kn_sfflags & NOTE_LOWAT) 3000 return (kn->kn_data >= kn->kn_sdata); 3001 else 3002 return (kn->kn_data >= so->so_snd.sb_lowat); 3003} 3004 3005/*ARGSUSED*/ 3006static int 3007filt_solisten(struct knote *kn, long hint) 3008{ 3009 struct socket *so = kn->kn_fp->f_data; 3010 3011 kn->kn_data = so->so_qlen; 3012 return (! TAILQ_EMPTY(&so->so_comp)); 3013} 3014 3015int 3016socheckuid(struct socket *so, uid_t uid) 3017{ 3018 3019 if (so == NULL) 3020 return (EPERM); 3021 if (so->so_cred->cr_uid != uid) 3022 return (EPERM); 3023 return (0); 3024} 3025 3026static int 3027sysctl_somaxconn(SYSCTL_HANDLER_ARGS) 3028{ 3029 int error; 3030 int val; 3031 3032 val = somaxconn; 3033 error = sysctl_handle_int(oidp, &val, 0, req); 3034 if (error || !req->newptr ) 3035 return (error); 3036 3037 if (val < 1 || val > USHRT_MAX) 3038 return (EINVAL); 3039 3040 somaxconn = val; 3041 return (0); 3042} 3043 3044/* 3045 * These functions are used by protocols to notify the socket layer (and its 3046 * consumers) of state changes in the sockets driven by protocol-side events. 3047 */ 3048 3049/* 3050 * Procedures to manipulate state flags of socket and do appropriate wakeups. 3051 * 3052 * Normal sequence from the active (originating) side is that 3053 * soisconnecting() is called during processing of connect() call, resulting 3054 * in an eventual call to soisconnected() if/when the connection is 3055 * established. When the connection is torn down soisdisconnecting() is 3056 * called during processing of disconnect() call, and soisdisconnected() is 3057 * called when the connection to the peer is totally severed. The semantics 3058 * of these routines are such that connectionless protocols can call 3059 * soisconnected() and soisdisconnected() only, bypassing the in-progress 3060 * calls when setting up a ``connection'' takes no time. 3061 * 3062 * From the passive side, a socket is created with two queues of sockets: 3063 * so_incomp for connections in progress and so_comp for connections already 3064 * made and awaiting user acceptance. As a protocol is preparing incoming 3065 * connections, it creates a socket structure queued on so_incomp by calling 3066 * sonewconn(). When the connection is established, soisconnected() is 3067 * called, and transfers the socket structure to so_comp, making it available 3068 * to accept(). 3069 * 3070 * If a socket is closed with sockets on either so_incomp or so_comp, these 3071 * sockets are dropped. 3072 * 3073 * If higher-level protocols are implemented in the kernel, the wakeups done 3074 * here will sometimes cause software-interrupt process scheduling. 3075 */ 3076void 3077soisconnecting(struct socket *so) 3078{ 3079 3080 SOCK_LOCK(so); 3081 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 3082 so->so_state |= SS_ISCONNECTING; 3083 SOCK_UNLOCK(so); 3084} 3085 3086void 3087soisconnected(struct socket *so) 3088{ 3089 struct socket *head; 3090 3091 ACCEPT_LOCK(); 3092 SOCK_LOCK(so); 3093 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 3094 so->so_state |= SS_ISCONNECTED; 3095 head = so->so_head; 3096 if (head != NULL && (so->so_qstate & SQ_INCOMP)) { 3097 if ((so->so_options & SO_ACCEPTFILTER) == 0) { 3098 SOCK_UNLOCK(so); 3099 TAILQ_REMOVE(&head->so_incomp, so, so_list); 3100 head->so_incqlen--; 3101 so->so_qstate &= ~SQ_INCOMP; 3102 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 3103 head->so_qlen++; 3104 so->so_qstate |= SQ_COMP; 3105 ACCEPT_UNLOCK(); 3106 sorwakeup(head); 3107 wakeup_one(&head->so_timeo); 3108 } else { 3109 ACCEPT_UNLOCK(); 3110 so->so_upcall = 3111 head->so_accf->so_accept_filter->accf_callback; 3112 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 3113 so->so_rcv.sb_flags |= SB_UPCALL; 3114 so->so_options &= ~SO_ACCEPTFILTER; 3115 SOCK_UNLOCK(so); 3116 so->so_upcall(so, so->so_upcallarg, M_DONTWAIT); 3117 } 3118 return; 3119 } 3120 SOCK_UNLOCK(so); 3121 ACCEPT_UNLOCK(); 3122 wakeup(&so->so_timeo); 3123 sorwakeup(so); 3124 sowwakeup(so); 3125} 3126 3127void 3128soisdisconnecting(struct socket *so) 3129{ 3130 3131 /* 3132 * Note: This code assumes that SOCK_LOCK(so) and 3133 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3134 */ 3135 SOCKBUF_LOCK(&so->so_rcv); 3136 so->so_state &= ~SS_ISCONNECTING; 3137 so->so_state |= SS_ISDISCONNECTING; 3138 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3139 sorwakeup_locked(so); 3140 SOCKBUF_LOCK(&so->so_snd); 3141 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3142 sowwakeup_locked(so); 3143 wakeup(&so->so_timeo); 3144} 3145 3146void 3147soisdisconnected(struct socket *so) 3148{ 3149 3150 /* 3151 * Note: This code assumes that SOCK_LOCK(so) and 3152 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3153 */ 3154 SOCKBUF_LOCK(&so->so_rcv); 3155 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 3156 so->so_state |= SS_ISDISCONNECTED; 3157 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3158 sorwakeup_locked(so); 3159 SOCKBUF_LOCK(&so->so_snd); 3160 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3161 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc); 3162 sowwakeup_locked(so); 3163 wakeup(&so->so_timeo); 3164} 3165 3166/* 3167 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. 3168 */ 3169struct sockaddr * 3170sodupsockaddr(const struct sockaddr *sa, int mflags) 3171{ 3172 struct sockaddr *sa2; 3173 3174 sa2 = malloc(sa->sa_len, M_SONAME, mflags); 3175 if (sa2) 3176 bcopy(sa, sa2, sa->sa_len); 3177 return sa2; 3178} 3179 3180/* 3181 * Create an external-format (``xsocket'') structure using the information in 3182 * the kernel-format socket structure pointed to by so. This is done to 3183 * reduce the spew of irrelevant information over this interface, to isolate 3184 * user code from changes in the kernel structure, and potentially to provide 3185 * information-hiding if we decide that some of this information should be 3186 * hidden from users. 3187 */ 3188void 3189sotoxsocket(struct socket *so, struct xsocket *xso) 3190{ 3191 3192 xso->xso_len = sizeof *xso; 3193 xso->xso_so = so; 3194 xso->so_type = so->so_type; 3195 xso->so_options = so->so_options; 3196 xso->so_linger = so->so_linger; 3197 xso->so_state = so->so_state; 3198 xso->so_pcb = so->so_pcb; 3199 xso->xso_protocol = so->so_proto->pr_protocol; 3200 xso->xso_family = so->so_proto->pr_domain->dom_family; 3201 xso->so_qlen = so->so_qlen; 3202 xso->so_incqlen = so->so_incqlen; 3203 xso->so_qlimit = so->so_qlimit; 3204 xso->so_timeo = so->so_timeo; 3205 xso->so_error = so->so_error; 3206 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 3207 xso->so_oobmark = so->so_oobmark; 3208 sbtoxsockbuf(&so->so_snd, &xso->so_snd); 3209 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 3210 xso->so_uid = so->so_cred->cr_uid; 3211} 3212