uipc_socket.c revision 142062
1/*- 2 * Copyright (c) 2004 The FreeBSD Foundation 3 * Copyright (c) 2004-2005 Robert Watson 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 142062 2005-02-18 19:15:22Z rwatson $"); 36 37#include "opt_inet.h" 38#include "opt_mac.h" 39#include "opt_zero.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/fcntl.h> 44#include <sys/limits.h> 45#include <sys/lock.h> 46#include <sys/mac.h> 47#include <sys/malloc.h> 48#include <sys/mbuf.h> 49#include <sys/mutex.h> 50#include <sys/domain.h> 51#include <sys/file.h> /* for struct knote */ 52#include <sys/kernel.h> 53#include <sys/event.h> 54#include <sys/poll.h> 55#include <sys/proc.h> 56#include <sys/protosw.h> 57#include <sys/socket.h> 58#include <sys/socketvar.h> 59#include <sys/resourcevar.h> 60#include <sys/signalvar.h> 61#include <sys/sysctl.h> 62#include <sys/uio.h> 63#include <sys/jail.h> 64 65#include <vm/uma.h> 66 67 68static int soreceive_rcvoob(struct socket *so, struct uio *uio, 69 int flags); 70 71static void filt_sordetach(struct knote *kn); 72static int filt_soread(struct knote *kn, long hint); 73static void filt_sowdetach(struct knote *kn); 74static int filt_sowrite(struct knote *kn, long hint); 75static int filt_solisten(struct knote *kn, long hint); 76 77static struct filterops solisten_filtops = 78 { 1, NULL, filt_sordetach, filt_solisten }; 79static struct filterops soread_filtops = 80 { 1, NULL, filt_sordetach, filt_soread }; 81static struct filterops sowrite_filtops = 82 { 1, NULL, filt_sowdetach, filt_sowrite }; 83 84uma_zone_t socket_zone; 85so_gen_t so_gencnt; /* generation count for sockets */ 86 87MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 88MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 89 90SYSCTL_DECL(_kern_ipc); 91 92static int somaxconn = SOMAXCONN; 93static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS); 94/* XXX: we dont have SYSCTL_USHORT */ 95SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 96 0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection " 97 "queue size"); 98static int numopensockets; 99SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 100 &numopensockets, 0, "Number of open sockets"); 101#ifdef ZERO_COPY_SOCKETS 102/* These aren't static because they're used in other files. */ 103int so_zero_copy_send = 1; 104int so_zero_copy_receive = 1; 105SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 106 "Zero copy controls"); 107SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 108 &so_zero_copy_receive, 0, "Enable zero copy receive"); 109SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 110 &so_zero_copy_send, 0, "Enable zero copy send"); 111#endif /* ZERO_COPY_SOCKETS */ 112 113/* 114 * accept_mtx locks down per-socket fields relating to accept queues. See 115 * socketvar.h for an annotation of the protected fields of struct socket. 116 */ 117struct mtx accept_mtx; 118MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 119 120/* 121 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 122 * so_gencnt field. 123 */ 124static struct mtx so_global_mtx; 125MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 126 127/* 128 * Socket operation routines. 129 * These routines are called by the routines in 130 * sys_socket.c or from a system process, and 131 * implement the semantics of socket operations by 132 * switching out to the protocol specific routines. 133 */ 134 135/* 136 * Get a socket structure from our zone, and initialize it. 137 * Note that it would probably be better to allocate socket 138 * and PCB at the same time, but I'm not convinced that all 139 * the protocols can be easily modified to do this. 140 * 141 * soalloc() returns a socket with a ref count of 0. 142 */ 143struct socket * 144soalloc(int mflags) 145{ 146 struct socket *so; 147 148 so = uma_zalloc(socket_zone, mflags | M_ZERO); 149 if (so != NULL) { 150#ifdef MAC 151 if (mac_init_socket(so, mflags) != 0) { 152 uma_zfree(socket_zone, so); 153 return (NULL); 154 } 155#endif 156 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 157 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 158 /* sx_init(&so->so_sxlock, "socket sxlock"); */ 159 TAILQ_INIT(&so->so_aiojobq); 160 mtx_lock(&so_global_mtx); 161 so->so_gencnt = ++so_gencnt; 162 ++numopensockets; 163 mtx_unlock(&so_global_mtx); 164 } 165 return (so); 166} 167 168/* 169 * socreate returns a socket with a ref count of 1. The socket should be 170 * closed with soclose(). 171 */ 172int 173socreate(dom, aso, type, proto, cred, td) 174 int dom; 175 struct socket **aso; 176 int type; 177 int proto; 178 struct ucred *cred; 179 struct thread *td; 180{ 181 struct protosw *prp; 182 struct socket *so; 183 int error; 184 185 if (proto) 186 prp = pffindproto(dom, proto, type); 187 else 188 prp = pffindtype(dom, type); 189 190 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 191 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 192 return (EPROTONOSUPPORT); 193 194 if (jailed(cred) && jail_socket_unixiproute_only && 195 prp->pr_domain->dom_family != PF_LOCAL && 196 prp->pr_domain->dom_family != PF_INET && 197 prp->pr_domain->dom_family != PF_ROUTE) { 198 return (EPROTONOSUPPORT); 199 } 200 201 if (prp->pr_type != type) 202 return (EPROTOTYPE); 203 so = soalloc(M_WAITOK); 204 if (so == NULL) 205 return (ENOBUFS); 206 207 TAILQ_INIT(&so->so_incomp); 208 TAILQ_INIT(&so->so_comp); 209 so->so_type = type; 210 so->so_cred = crhold(cred); 211 so->so_proto = prp; 212#ifdef MAC 213 mac_create_socket(cred, so); 214#endif 215 SOCK_LOCK(so); 216 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); 217 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); 218 soref(so); 219 SOCK_UNLOCK(so); 220 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 221 if (error) { 222 ACCEPT_LOCK(); 223 SOCK_LOCK(so); 224 so->so_state |= SS_NOFDREF; 225 sorele(so); 226 return (error); 227 } 228 *aso = so; 229 return (0); 230} 231 232int 233sobind(so, nam, td) 234 struct socket *so; 235 struct sockaddr *nam; 236 struct thread *td; 237{ 238 239 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 240} 241 242void 243sodealloc(struct socket *so) 244{ 245 246 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 247 mtx_lock(&so_global_mtx); 248 so->so_gencnt = ++so_gencnt; 249 mtx_unlock(&so_global_mtx); 250 if (so->so_rcv.sb_hiwat) 251 (void)chgsbsize(so->so_cred->cr_uidinfo, 252 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 253 if (so->so_snd.sb_hiwat) 254 (void)chgsbsize(so->so_cred->cr_uidinfo, 255 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 256#ifdef INET 257 /* remove acccept filter if one is present. */ 258 if (so->so_accf != NULL) 259 do_setopt_accept_filter(so, NULL); 260#endif 261#ifdef MAC 262 mac_destroy_socket(so); 263#endif 264 crfree(so->so_cred); 265 SOCKBUF_LOCK_DESTROY(&so->so_snd); 266 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 267 /* sx_destroy(&so->so_sxlock); */ 268 uma_zfree(socket_zone, so); 269 mtx_lock(&so_global_mtx); 270 --numopensockets; 271 mtx_unlock(&so_global_mtx); 272} 273 274int 275solisten(so, backlog, td) 276 struct socket *so; 277 int backlog; 278 struct thread *td; 279{ 280 int error; 281 282 /* 283 * XXXRW: Ordering issue here -- perhaps we need to set 284 * SO_ACCEPTCONN before the call to pru_listen()? 285 * XXXRW: General atomic test-and-set concerns here also. 286 */ 287 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 288 SS_ISDISCONNECTING)) 289 return (EINVAL); 290 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td); 291 if (error) 292 return (error); 293 ACCEPT_LOCK(); 294 SOCK_LOCK(so); 295 so->so_options |= SO_ACCEPTCONN; 296 SOCK_UNLOCK(so); 297 if (backlog < 0 || backlog > somaxconn) 298 backlog = somaxconn; 299 so->so_qlimit = backlog; 300 ACCEPT_UNLOCK(); 301 return (0); 302} 303 304/* 305 * Attempt to free a socket. This should really be sotryfree(). 306 * 307 * We free the socket if the protocol is no longer interested in the socket, 308 * there's no file descriptor reference, and the refcount is 0. While the 309 * calling macro sotryfree() tests the refcount, sofree() has to test it 310 * again as it's possible to race with an accept()ing thread if the socket is 311 * in an listen queue of a listen socket, as being in the listen queue 312 * doesn't elevate the reference count. sofree() acquires the accept mutex 313 * early for this test in order to avoid that race. 314 */ 315void 316sofree(so) 317 struct socket *so; 318{ 319 struct socket *head; 320 321 ACCEPT_LOCK_ASSERT(); 322 SOCK_LOCK_ASSERT(so); 323 324 if (so->so_pcb != NULL || (so->so_state & SS_NOFDREF) == 0 || 325 so->so_count != 0) { 326 SOCK_UNLOCK(so); 327 ACCEPT_UNLOCK(); 328 return; 329 } 330 331 head = so->so_head; 332 if (head != NULL) { 333 KASSERT((so->so_qstate & SQ_COMP) != 0 || 334 (so->so_qstate & SQ_INCOMP) != 0, 335 ("sofree: so_head != NULL, but neither SQ_COMP nor " 336 "SQ_INCOMP")); 337 KASSERT((so->so_qstate & SQ_COMP) == 0 || 338 (so->so_qstate & SQ_INCOMP) == 0, 339 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 340 /* 341 * accept(2) is responsible draining the completed 342 * connection queue and freeing those sockets, so 343 * we just return here if this socket is currently 344 * on the completed connection queue. Otherwise, 345 * accept(2) may hang after select(2) has indicating 346 * that a listening socket was ready. If it's an 347 * incomplete connection, we remove it from the queue 348 * and free it; otherwise, it won't be released until 349 * the listening socket is closed. 350 */ 351 if ((so->so_qstate & SQ_COMP) != 0) { 352 SOCK_UNLOCK(so); 353 ACCEPT_UNLOCK(); 354 return; 355 } 356 TAILQ_REMOVE(&head->so_incomp, so, so_list); 357 head->so_incqlen--; 358 so->so_qstate &= ~SQ_INCOMP; 359 so->so_head = NULL; 360 } 361 KASSERT((so->so_qstate & SQ_COMP) == 0 && 362 (so->so_qstate & SQ_INCOMP) == 0, 363 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 364 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 365 SOCK_UNLOCK(so); 366 ACCEPT_UNLOCK(); 367 SOCKBUF_LOCK(&so->so_snd); 368 so->so_snd.sb_flags |= SB_NOINTR; 369 (void)sblock(&so->so_snd, M_WAITOK); 370 /* 371 * socantsendmore_locked() drops the socket buffer mutex so that it 372 * can safely perform wakeups. Re-acquire the mutex before 373 * continuing. 374 */ 375 socantsendmore_locked(so); 376 SOCKBUF_LOCK(&so->so_snd); 377 sbunlock(&so->so_snd); 378 sbrelease_locked(&so->so_snd, so); 379 SOCKBUF_UNLOCK(&so->so_snd); 380 sorflush(so); 381 knlist_destroy(&so->so_rcv.sb_sel.si_note); 382 knlist_destroy(&so->so_snd.sb_sel.si_note); 383 sodealloc(so); 384} 385 386/* 387 * Close a socket on last file table reference removal. 388 * Initiate disconnect if connected. 389 * Free socket when disconnect complete. 390 * 391 * This function will sorele() the socket. Note that soclose() may be 392 * called prior to the ref count reaching zero. The actual socket 393 * structure will not be freed until the ref count reaches zero. 394 */ 395int 396soclose(so) 397 struct socket *so; 398{ 399 int error = 0; 400 401 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 402 403 funsetown(&so->so_sigio); 404 if (so->so_options & SO_ACCEPTCONN) { 405 struct socket *sp; 406 ACCEPT_LOCK(); 407 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 408 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 409 so->so_incqlen--; 410 sp->so_qstate &= ~SQ_INCOMP; 411 sp->so_head = NULL; 412 ACCEPT_UNLOCK(); 413 (void) soabort(sp); 414 ACCEPT_LOCK(); 415 } 416 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 417 TAILQ_REMOVE(&so->so_comp, sp, so_list); 418 so->so_qlen--; 419 sp->so_qstate &= ~SQ_COMP; 420 sp->so_head = NULL; 421 ACCEPT_UNLOCK(); 422 (void) soabort(sp); 423 ACCEPT_LOCK(); 424 } 425 ACCEPT_UNLOCK(); 426 } 427 if (so->so_pcb == NULL) 428 goto discard; 429 if (so->so_state & SS_ISCONNECTED) { 430 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 431 error = sodisconnect(so); 432 if (error) 433 goto drop; 434 } 435 if (so->so_options & SO_LINGER) { 436 if ((so->so_state & SS_ISDISCONNECTING) && 437 (so->so_state & SS_NBIO)) 438 goto drop; 439 while (so->so_state & SS_ISCONNECTED) { 440 error = tsleep(&so->so_timeo, 441 PSOCK | PCATCH, "soclos", so->so_linger * hz); 442 if (error) 443 break; 444 } 445 } 446 } 447drop: 448 if (so->so_pcb != NULL) { 449 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so); 450 if (error == 0) 451 error = error2; 452 } 453discard: 454 ACCEPT_LOCK(); 455 SOCK_LOCK(so); 456 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 457 so->so_state |= SS_NOFDREF; 458 sorele(so); 459 return (error); 460} 461 462/* 463 * soabort() must not be called with any socket locks held, as it calls 464 * into the protocol, which will call back into the socket code causing 465 * it to acquire additional socket locks that may cause recursion or lock 466 * order reversals. 467 */ 468int 469soabort(so) 470 struct socket *so; 471{ 472 int error; 473 474 error = (*so->so_proto->pr_usrreqs->pru_abort)(so); 475 if (error) { 476 ACCEPT_LOCK(); 477 SOCK_LOCK(so); 478 sotryfree(so); /* note: does not decrement the ref count */ 479 return error; 480 } 481 return (0); 482} 483 484int 485soaccept(so, nam) 486 struct socket *so; 487 struct sockaddr **nam; 488{ 489 int error; 490 491 SOCK_LOCK(so); 492 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 493 so->so_state &= ~SS_NOFDREF; 494 SOCK_UNLOCK(so); 495 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 496 return (error); 497} 498 499int 500soconnect(so, nam, td) 501 struct socket *so; 502 struct sockaddr *nam; 503 struct thread *td; 504{ 505 int error; 506 507 if (so->so_options & SO_ACCEPTCONN) 508 return (EOPNOTSUPP); 509 /* 510 * If protocol is connection-based, can only connect once. 511 * Otherwise, if connected, try to disconnect first. 512 * This allows user to disconnect by connecting to, e.g., 513 * a null address. 514 */ 515 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 516 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 517 (error = sodisconnect(so)))) { 518 error = EISCONN; 519 } else { 520 /* 521 * Prevent accumulated error from previous connection 522 * from biting us. 523 */ 524 so->so_error = 0; 525 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 526 } 527 528 return (error); 529} 530 531int 532soconnect2(so1, so2) 533 struct socket *so1; 534 struct socket *so2; 535{ 536 537 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 538} 539 540int 541sodisconnect(so) 542 struct socket *so; 543{ 544 int error; 545 546 if ((so->so_state & SS_ISCONNECTED) == 0) 547 return (ENOTCONN); 548 if (so->so_state & SS_ISDISCONNECTING) 549 return (EALREADY); 550 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 551 return (error); 552} 553 554#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 555/* 556 * Send on a socket. 557 * If send must go all at once and message is larger than 558 * send buffering, then hard error. 559 * Lock against other senders. 560 * If must go all at once and not enough room now, then 561 * inform user that this would block and do nothing. 562 * Otherwise, if nonblocking, send as much as possible. 563 * The data to be sent is described by "uio" if nonzero, 564 * otherwise by the mbuf chain "top" (which must be null 565 * if uio is not). Data provided in mbuf chain must be small 566 * enough to send all at once. 567 * 568 * Returns nonzero on error, timeout or signal; callers 569 * must check for short counts if EINTR/ERESTART are returned. 570 * Data and control buffers are freed on return. 571 */ 572 573#ifdef ZERO_COPY_SOCKETS 574struct so_zerocopy_stats{ 575 int size_ok; 576 int align_ok; 577 int found_ifp; 578}; 579struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 580#include <netinet/in.h> 581#include <net/route.h> 582#include <netinet/in_pcb.h> 583#include <vm/vm.h> 584#include <vm/vm_page.h> 585#include <vm/vm_object.h> 586#endif /*ZERO_COPY_SOCKETS*/ 587 588int 589sosend(so, addr, uio, top, control, flags, td) 590 struct socket *so; 591 struct sockaddr *addr; 592 struct uio *uio; 593 struct mbuf *top; 594 struct mbuf *control; 595 int flags; 596 struct thread *td; 597{ 598 struct mbuf **mp; 599 struct mbuf *m; 600 long space, len = 0, resid; 601 int clen = 0, error, dontroute; 602 int atomic = sosendallatonce(so) || top; 603#ifdef ZERO_COPY_SOCKETS 604 int cow_send; 605#endif /* ZERO_COPY_SOCKETS */ 606 607 if (uio != NULL) 608 resid = uio->uio_resid; 609 else 610 resid = top->m_pkthdr.len; 611 /* 612 * In theory resid should be unsigned. 613 * However, space must be signed, as it might be less than 0 614 * if we over-committed, and we must use a signed comparison 615 * of space and resid. On the other hand, a negative resid 616 * causes us to loop sending 0-length segments to the protocol. 617 * 618 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 619 * type sockets since that's an error. 620 */ 621 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 622 error = EINVAL; 623 goto out; 624 } 625 626 dontroute = 627 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 628 (so->so_proto->pr_flags & PR_ATOMIC); 629 if (td != NULL) 630 td->td_proc->p_stats->p_ru.ru_msgsnd++; 631 if (control != NULL) 632 clen = control->m_len; 633#define snderr(errno) { error = (errno); goto release; } 634 635 SOCKBUF_LOCK(&so->so_snd); 636restart: 637 SOCKBUF_LOCK_ASSERT(&so->so_snd); 638 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 639 if (error) 640 goto out_locked; 641 do { 642 SOCKBUF_LOCK_ASSERT(&so->so_snd); 643 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 644 snderr(EPIPE); 645 if (so->so_error) { 646 error = so->so_error; 647 so->so_error = 0; 648 goto release; 649 } 650 if ((so->so_state & SS_ISCONNECTED) == 0) { 651 /* 652 * `sendto' and `sendmsg' is allowed on a connection- 653 * based socket if it supports implied connect. 654 * Return ENOTCONN if not connected and no address is 655 * supplied. 656 */ 657 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 658 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 659 if ((so->so_state & SS_ISCONFIRMING) == 0 && 660 !(resid == 0 && clen != 0)) 661 snderr(ENOTCONN); 662 } else if (addr == NULL) 663 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 664 ENOTCONN : EDESTADDRREQ); 665 } 666 space = sbspace(&so->so_snd); 667 if (flags & MSG_OOB) 668 space += 1024; 669 if ((atomic && resid > so->so_snd.sb_hiwat) || 670 clen > so->so_snd.sb_hiwat) 671 snderr(EMSGSIZE); 672 if (space < resid + clen && 673 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 674 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) 675 snderr(EWOULDBLOCK); 676 sbunlock(&so->so_snd); 677 error = sbwait(&so->so_snd); 678 if (error) 679 goto out_locked; 680 goto restart; 681 } 682 SOCKBUF_UNLOCK(&so->so_snd); 683 mp = ⊤ 684 space -= clen; 685 do { 686 if (uio == NULL) { 687 /* 688 * Data is prepackaged in "top". 689 */ 690 resid = 0; 691 if (flags & MSG_EOR) 692 top->m_flags |= M_EOR; 693 } else do { 694#ifdef ZERO_COPY_SOCKETS 695 cow_send = 0; 696#endif /* ZERO_COPY_SOCKETS */ 697 if (resid >= MINCLSIZE) { 698#ifdef ZERO_COPY_SOCKETS 699 if (top == NULL) { 700 MGETHDR(m, M_TRYWAIT, MT_DATA); 701 if (m == NULL) { 702 error = ENOBUFS; 703 SOCKBUF_LOCK(&so->so_snd); 704 goto release; 705 } 706 m->m_pkthdr.len = 0; 707 m->m_pkthdr.rcvif = (struct ifnet *)0; 708 } else { 709 MGET(m, M_TRYWAIT, MT_DATA); 710 if (m == NULL) { 711 error = ENOBUFS; 712 SOCKBUF_LOCK(&so->so_snd); 713 goto release; 714 } 715 } 716 if (so_zero_copy_send && 717 resid>=PAGE_SIZE && 718 space>=PAGE_SIZE && 719 uio->uio_iov->iov_len>=PAGE_SIZE) { 720 so_zerocp_stats.size_ok++; 721 if (!((vm_offset_t) 722 uio->uio_iov->iov_base & PAGE_MASK)){ 723 so_zerocp_stats.align_ok++; 724 cow_send = socow_setup(m, uio); 725 } 726 } 727 if (!cow_send) { 728 MCLGET(m, M_TRYWAIT); 729 if ((m->m_flags & M_EXT) == 0) { 730 m_free(m); 731 m = NULL; 732 } else { 733 len = min(min(MCLBYTES, resid), space); 734 } 735 } else 736 len = PAGE_SIZE; 737#else /* ZERO_COPY_SOCKETS */ 738 if (top == NULL) { 739 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 740 m->m_pkthdr.len = 0; 741 m->m_pkthdr.rcvif = (struct ifnet *)0; 742 } else 743 m = m_getcl(M_TRYWAIT, MT_DATA, 0); 744 len = min(min(MCLBYTES, resid), space); 745#endif /* ZERO_COPY_SOCKETS */ 746 } else { 747 if (top == NULL) { 748 m = m_gethdr(M_TRYWAIT, MT_DATA); 749 m->m_pkthdr.len = 0; 750 m->m_pkthdr.rcvif = (struct ifnet *)0; 751 752 len = min(min(MHLEN, resid), space); 753 /* 754 * For datagram protocols, leave room 755 * for protocol headers in first mbuf. 756 */ 757 if (atomic && m && len < MHLEN) 758 MH_ALIGN(m, len); 759 } else { 760 m = m_get(M_TRYWAIT, MT_DATA); 761 len = min(min(MLEN, resid), space); 762 } 763 } 764 if (m == NULL) { 765 error = ENOBUFS; 766 SOCKBUF_LOCK(&so->so_snd); 767 goto release; 768 } 769 770 space -= len; 771#ifdef ZERO_COPY_SOCKETS 772 if (cow_send) 773 error = 0; 774 else 775#endif /* ZERO_COPY_SOCKETS */ 776 error = uiomove(mtod(m, void *), (int)len, uio); 777 resid = uio->uio_resid; 778 m->m_len = len; 779 *mp = m; 780 top->m_pkthdr.len += len; 781 if (error) { 782 SOCKBUF_LOCK(&so->so_snd); 783 goto release; 784 } 785 mp = &m->m_next; 786 if (resid <= 0) { 787 if (flags & MSG_EOR) 788 top->m_flags |= M_EOR; 789 break; 790 } 791 } while (space > 0 && atomic); 792 if (dontroute) { 793 SOCK_LOCK(so); 794 so->so_options |= SO_DONTROUTE; 795 SOCK_UNLOCK(so); 796 } 797 /* 798 * XXX all the SBS_CANTSENDMORE checks previously 799 * done could be out of date. We could have recieved 800 * a reset packet in an interrupt or maybe we slept 801 * while doing page faults in uiomove() etc. We could 802 * probably recheck again inside the locking protection 803 * here, but there are probably other places that this 804 * also happens. We must rethink this. 805 */ 806 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 807 (flags & MSG_OOB) ? PRUS_OOB : 808 /* 809 * If the user set MSG_EOF, the protocol 810 * understands this flag and nothing left to 811 * send then use PRU_SEND_EOF instead of PRU_SEND. 812 */ 813 ((flags & MSG_EOF) && 814 (so->so_proto->pr_flags & PR_IMPLOPCL) && 815 (resid <= 0)) ? 816 PRUS_EOF : 817 /* If there is more to send set PRUS_MORETOCOME */ 818 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 819 top, addr, control, td); 820 if (dontroute) { 821 SOCK_LOCK(so); 822 so->so_options &= ~SO_DONTROUTE; 823 SOCK_UNLOCK(so); 824 } 825 clen = 0; 826 control = NULL; 827 top = NULL; 828 mp = ⊤ 829 if (error) { 830 SOCKBUF_LOCK(&so->so_snd); 831 goto release; 832 } 833 } while (resid && space > 0); 834 SOCKBUF_LOCK(&so->so_snd); 835 } while (resid); 836 837release: 838 SOCKBUF_LOCK_ASSERT(&so->so_snd); 839 sbunlock(&so->so_snd); 840out_locked: 841 SOCKBUF_LOCK_ASSERT(&so->so_snd); 842 SOCKBUF_UNLOCK(&so->so_snd); 843out: 844 if (top != NULL) 845 m_freem(top); 846 if (control != NULL) 847 m_freem(control); 848 return (error); 849} 850 851/* 852 * The part of soreceive() that implements reading non-inline out-of-band 853 * data from a socket. For more complete comments, see soreceive(), from 854 * which this code originated. 855 * 856 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 857 * unable to return an mbuf chain to the caller. 858 */ 859static int 860soreceive_rcvoob(so, uio, flags) 861 struct socket *so; 862 struct uio *uio; 863 int flags; 864{ 865 struct protosw *pr = so->so_proto; 866 struct mbuf *m; 867 int error; 868 869 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 870 871 m = m_get(M_TRYWAIT, MT_DATA); 872 if (m == NULL) 873 return (ENOBUFS); 874 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 875 if (error) 876 goto bad; 877 do { 878#ifdef ZERO_COPY_SOCKETS 879 if (so_zero_copy_receive) { 880 int disposable; 881 882 if ((m->m_flags & M_EXT) 883 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 884 disposable = 1; 885 else 886 disposable = 0; 887 888 error = uiomoveco(mtod(m, void *), 889 min(uio->uio_resid, m->m_len), 890 uio, disposable); 891 } else 892#endif /* ZERO_COPY_SOCKETS */ 893 error = uiomove(mtod(m, void *), 894 (int) min(uio->uio_resid, m->m_len), uio); 895 m = m_free(m); 896 } while (uio->uio_resid && error == 0 && m); 897bad: 898 if (m != NULL) 899 m_freem(m); 900 return (error); 901} 902 903/* 904 * Following replacement or removal of the first mbuf on the first mbuf chain 905 * of a socket buffer, push necessary state changes back into the socket 906 * buffer so that other consumers see the values consistently. 'nextrecord' 907 * is the callers locally stored value of the original value of 908 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 909 * NOTE: 'nextrecord' may be NULL. 910 */ 911static __inline void 912sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 913{ 914 915 SOCKBUF_LOCK_ASSERT(sb); 916 /* 917 * First, update for the new value of nextrecord. If necessary, make 918 * it the first record. 919 */ 920 if (sb->sb_mb != NULL) 921 sb->sb_mb->m_nextpkt = nextrecord; 922 else 923 sb->sb_mb = nextrecord; 924 925 /* 926 * Now update any dependent socket buffer fields to reflect the new 927 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 928 * addition of a second clause that takes care of the case where 929 * sb_mb has been updated, but remains the last record. 930 */ 931 if (sb->sb_mb == NULL) { 932 sb->sb_mbtail = NULL; 933 sb->sb_lastrecord = NULL; 934 } else if (sb->sb_mb->m_nextpkt == NULL) 935 sb->sb_lastrecord = sb->sb_mb; 936} 937 938 939/* 940 * Implement receive operations on a socket. 941 * We depend on the way that records are added to the sockbuf 942 * by sbappend*. In particular, each record (mbufs linked through m_next) 943 * must begin with an address if the protocol so specifies, 944 * followed by an optional mbuf or mbufs containing ancillary data, 945 * and then zero or more mbufs of data. 946 * In order to avoid blocking network interrupts for the entire time here, 947 * we splx() while doing the actual copy to user space. 948 * Although the sockbuf is locked, new data may still be appended, 949 * and thus we must maintain consistency of the sockbuf during that time. 950 * 951 * The caller may receive the data as a single mbuf chain by supplying 952 * an mbuf **mp0 for use in returning the chain. The uio is then used 953 * only for the count in uio_resid. 954 */ 955int 956soreceive(so, psa, uio, mp0, controlp, flagsp) 957 struct socket *so; 958 struct sockaddr **psa; 959 struct uio *uio; 960 struct mbuf **mp0; 961 struct mbuf **controlp; 962 int *flagsp; 963{ 964 struct mbuf *m, **mp; 965 int flags, len, error, offset; 966 struct protosw *pr = so->so_proto; 967 struct mbuf *nextrecord; 968 int moff, type = 0; 969 int orig_resid = uio->uio_resid; 970 971 mp = mp0; 972 if (psa != NULL) 973 *psa = NULL; 974 if (controlp != NULL) 975 *controlp = NULL; 976 if (flagsp != NULL) 977 flags = *flagsp &~ MSG_EOR; 978 else 979 flags = 0; 980 if (flags & MSG_OOB) 981 return (soreceive_rcvoob(so, uio, flags)); 982 if (mp != NULL) 983 *mp = NULL; 984 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 985 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 986 987 SOCKBUF_LOCK(&so->so_rcv); 988restart: 989 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 990 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 991 if (error) 992 goto out; 993 994 m = so->so_rcv.sb_mb; 995 /* 996 * If we have less data than requested, block awaiting more 997 * (subject to any timeout) if: 998 * 1. the current count is less than the low water mark, or 999 * 2. MSG_WAITALL is set, and it is possible to do the entire 1000 * receive operation at once if we block (resid <= hiwat). 1001 * 3. MSG_DONTWAIT is not set 1002 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1003 * we have to do the receive in sections, and thus risk returning 1004 * a short count if a timeout or signal occurs after we start. 1005 */ 1006 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1007 so->so_rcv.sb_cc < uio->uio_resid) && 1008 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1009 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1010 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1011 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1012 ("receive: m == %p so->so_rcv.sb_cc == %u", 1013 m, so->so_rcv.sb_cc)); 1014 if (so->so_error) { 1015 if (m != NULL) 1016 goto dontblock; 1017 error = so->so_error; 1018 if ((flags & MSG_PEEK) == 0) 1019 so->so_error = 0; 1020 goto release; 1021 } 1022 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1023 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1024 if (m) 1025 goto dontblock; 1026 else 1027 goto release; 1028 } 1029 for (; m != NULL; m = m->m_next) 1030 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1031 m = so->so_rcv.sb_mb; 1032 goto dontblock; 1033 } 1034 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1035 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1036 error = ENOTCONN; 1037 goto release; 1038 } 1039 if (uio->uio_resid == 0) 1040 goto release; 1041 if ((so->so_state & SS_NBIO) || 1042 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1043 error = EWOULDBLOCK; 1044 goto release; 1045 } 1046 SBLASTRECORDCHK(&so->so_rcv); 1047 SBLASTMBUFCHK(&so->so_rcv); 1048 sbunlock(&so->so_rcv); 1049 error = sbwait(&so->so_rcv); 1050 if (error) 1051 goto out; 1052 goto restart; 1053 } 1054dontblock: 1055 /* 1056 * From this point onward, we maintain 'nextrecord' as a cache of the 1057 * pointer to the next record in the socket buffer. We must keep the 1058 * various socket buffer pointers and local stack versions of the 1059 * pointers in sync, pushing out modifications before dropping the 1060 * socket buffer mutex, and re-reading them when picking it up. 1061 * 1062 * Otherwise, we will race with the network stack appending new data 1063 * or records onto the socket buffer by using inconsistent/stale 1064 * versions of the field, possibly resulting in socket buffer 1065 * corruption. 1066 * 1067 * By holding the high-level sblock(), we prevent simultaneous 1068 * readers from pulling off the front of the socket buffer. 1069 */ 1070 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1071 if (uio->uio_td) 1072 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++; 1073 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1074 SBLASTRECORDCHK(&so->so_rcv); 1075 SBLASTMBUFCHK(&so->so_rcv); 1076 nextrecord = m->m_nextpkt; 1077 if (pr->pr_flags & PR_ADDR) { 1078 KASSERT(m->m_type == MT_SONAME, 1079 ("m->m_type == %d", m->m_type)); 1080 orig_resid = 0; 1081 if (psa != NULL) 1082 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1083 M_NOWAIT); 1084 if (flags & MSG_PEEK) { 1085 m = m->m_next; 1086 } else { 1087 sbfree(&so->so_rcv, m); 1088 so->so_rcv.sb_mb = m_free(m); 1089 m = so->so_rcv.sb_mb; 1090 sockbuf_pushsync(&so->so_rcv, nextrecord); 1091 } 1092 } 1093 1094 /* 1095 * Process one or more MT_CONTROL mbufs present before any data mbufs 1096 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1097 * just copy the data; if !MSG_PEEK, we call into the protocol to 1098 * perform externalization (or freeing if controlp == NULL). 1099 */ 1100 if (m != NULL && m->m_type == MT_CONTROL) { 1101 struct mbuf *cm = NULL, *cmn; 1102 struct mbuf **cme = &cm; 1103 1104 do { 1105 if (flags & MSG_PEEK) { 1106 if (controlp != NULL) { 1107 *controlp = m_copy(m, 0, m->m_len); 1108 controlp = &(*controlp)->m_next; 1109 } 1110 m = m->m_next; 1111 } else { 1112 sbfree(&so->so_rcv, m); 1113 so->so_rcv.sb_mb = m->m_next; 1114 m->m_next = NULL; 1115 *cme = m; 1116 cme = &(*cme)->m_next; 1117 m = so->so_rcv.sb_mb; 1118 } 1119 } while (m != NULL && m->m_type == MT_CONTROL); 1120 if ((flags & MSG_PEEK) == 0) 1121 sockbuf_pushsync(&so->so_rcv, nextrecord); 1122 while (cm != NULL) { 1123 cmn = cm->m_next; 1124 cm->m_next = NULL; 1125 if (pr->pr_domain->dom_externalize != NULL) { 1126 SOCKBUF_UNLOCK(&so->so_rcv); 1127 error = (*pr->pr_domain->dom_externalize) 1128 (cm, controlp); 1129 SOCKBUF_LOCK(&so->so_rcv); 1130 } else if (controlp != NULL) 1131 *controlp = cm; 1132 else 1133 m_freem(cm); 1134 if (controlp != NULL) { 1135 orig_resid = 0; 1136 while (*controlp != NULL) 1137 controlp = &(*controlp)->m_next; 1138 } 1139 cm = cmn; 1140 } 1141 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1142 orig_resid = 0; 1143 } 1144 if (m != NULL) { 1145 if ((flags & MSG_PEEK) == 0) { 1146 KASSERT(m->m_nextpkt == nextrecord, 1147 ("soreceive: post-control, nextrecord !sync")); 1148 if (nextrecord == NULL) { 1149 KASSERT(so->so_rcv.sb_mb == m, 1150 ("soreceive: post-control, sb_mb!=m")); 1151 KASSERT(so->so_rcv.sb_lastrecord == m, 1152 ("soreceive: post-control, lastrecord!=m")); 1153 } 1154 } 1155 type = m->m_type; 1156 if (type == MT_OOBDATA) 1157 flags |= MSG_OOB; 1158 } else { 1159 if ((flags & MSG_PEEK) == 0) { 1160 KASSERT(so->so_rcv.sb_mb == nextrecord, 1161 ("soreceive: sb_mb != nextrecord")); 1162 if (so->so_rcv.sb_mb == NULL) { 1163 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1164 ("soreceive: sb_lastercord != NULL")); 1165 } 1166 } 1167 } 1168 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1169 SBLASTRECORDCHK(&so->so_rcv); 1170 SBLASTMBUFCHK(&so->so_rcv); 1171 1172 /* 1173 * Now continue to read any data mbufs off of the head of the socket 1174 * buffer until the read request is satisfied. Note that 'type' is 1175 * used to store the type of any mbuf reads that have happened so far 1176 * such that soreceive() can stop reading if the type changes, which 1177 * causes soreceive() to return only one of regular data and inline 1178 * out-of-band data in a single socket receive operation. 1179 */ 1180 moff = 0; 1181 offset = 0; 1182 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1183 /* 1184 * If the type of mbuf has changed since the last mbuf 1185 * examined ('type'), end the receive operation. 1186 */ 1187 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1188 if (m->m_type == MT_OOBDATA) { 1189 if (type != MT_OOBDATA) 1190 break; 1191 } else if (type == MT_OOBDATA) 1192 break; 1193 else 1194 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1195 ("m->m_type == %d", m->m_type)); 1196 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1197 len = uio->uio_resid; 1198 if (so->so_oobmark && len > so->so_oobmark - offset) 1199 len = so->so_oobmark - offset; 1200 if (len > m->m_len - moff) 1201 len = m->m_len - moff; 1202 /* 1203 * If mp is set, just pass back the mbufs. 1204 * Otherwise copy them out via the uio, then free. 1205 * Sockbuf must be consistent here (points to current mbuf, 1206 * it points to next record) when we drop priority; 1207 * we must note any additions to the sockbuf when we 1208 * block interrupts again. 1209 */ 1210 if (mp == NULL) { 1211 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1212 SBLASTRECORDCHK(&so->so_rcv); 1213 SBLASTMBUFCHK(&so->so_rcv); 1214 SOCKBUF_UNLOCK(&so->so_rcv); 1215#ifdef ZERO_COPY_SOCKETS 1216 if (so_zero_copy_receive) { 1217 int disposable; 1218 1219 if ((m->m_flags & M_EXT) 1220 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1221 disposable = 1; 1222 else 1223 disposable = 0; 1224 1225 error = uiomoveco(mtod(m, char *) + moff, 1226 (int)len, uio, 1227 disposable); 1228 } else 1229#endif /* ZERO_COPY_SOCKETS */ 1230 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1231 SOCKBUF_LOCK(&so->so_rcv); 1232 if (error) 1233 goto release; 1234 } else 1235 uio->uio_resid -= len; 1236 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1237 if (len == m->m_len - moff) { 1238 if (m->m_flags & M_EOR) 1239 flags |= MSG_EOR; 1240 if (flags & MSG_PEEK) { 1241 m = m->m_next; 1242 moff = 0; 1243 } else { 1244 nextrecord = m->m_nextpkt; 1245 sbfree(&so->so_rcv, m); 1246 if (mp != NULL) { 1247 *mp = m; 1248 mp = &m->m_next; 1249 so->so_rcv.sb_mb = m = m->m_next; 1250 *mp = NULL; 1251 } else { 1252 so->so_rcv.sb_mb = m_free(m); 1253 m = so->so_rcv.sb_mb; 1254 } 1255 if (m != NULL) { 1256 m->m_nextpkt = nextrecord; 1257 if (nextrecord == NULL) 1258 so->so_rcv.sb_lastrecord = m; 1259 } else { 1260 so->so_rcv.sb_mb = nextrecord; 1261 SB_EMPTY_FIXUP(&so->so_rcv); 1262 } 1263 SBLASTRECORDCHK(&so->so_rcv); 1264 SBLASTMBUFCHK(&so->so_rcv); 1265 } 1266 } else { 1267 if (flags & MSG_PEEK) 1268 moff += len; 1269 else { 1270 if (mp != NULL) { 1271 int copy_flag; 1272 1273 if (flags & MSG_DONTWAIT) 1274 copy_flag = M_DONTWAIT; 1275 else 1276 copy_flag = M_TRYWAIT; 1277 if (copy_flag == M_TRYWAIT) 1278 SOCKBUF_UNLOCK(&so->so_rcv); 1279 *mp = m_copym(m, 0, len, copy_flag); 1280 if (copy_flag == M_TRYWAIT) 1281 SOCKBUF_LOCK(&so->so_rcv); 1282 if (*mp == NULL) { 1283 /* 1284 * m_copym() couldn't allocate an mbuf. 1285 * Adjust uio_resid back (it was adjusted 1286 * down by len bytes, which we didn't end 1287 * up "copying" over). 1288 */ 1289 uio->uio_resid += len; 1290 break; 1291 } 1292 } 1293 m->m_data += len; 1294 m->m_len -= len; 1295 so->so_rcv.sb_cc -= len; 1296 } 1297 } 1298 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1299 if (so->so_oobmark) { 1300 if ((flags & MSG_PEEK) == 0) { 1301 so->so_oobmark -= len; 1302 if (so->so_oobmark == 0) { 1303 so->so_rcv.sb_state |= SBS_RCVATMARK; 1304 break; 1305 } 1306 } else { 1307 offset += len; 1308 if (offset == so->so_oobmark) 1309 break; 1310 } 1311 } 1312 if (flags & MSG_EOR) 1313 break; 1314 /* 1315 * If the MSG_WAITALL flag is set (for non-atomic socket), 1316 * we must not quit until "uio->uio_resid == 0" or an error 1317 * termination. If a signal/timeout occurs, return 1318 * with a short count but without error. 1319 * Keep sockbuf locked against other readers. 1320 */ 1321 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1322 !sosendallatonce(so) && nextrecord == NULL) { 1323 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1324 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1325 break; 1326 /* 1327 * Notify the protocol that some data has been 1328 * drained before blocking. 1329 */ 1330 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) { 1331 SOCKBUF_UNLOCK(&so->so_rcv); 1332 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1333 SOCKBUF_LOCK(&so->so_rcv); 1334 } 1335 SBLASTRECORDCHK(&so->so_rcv); 1336 SBLASTMBUFCHK(&so->so_rcv); 1337 error = sbwait(&so->so_rcv); 1338 if (error) 1339 goto release; 1340 m = so->so_rcv.sb_mb; 1341 if (m != NULL) 1342 nextrecord = m->m_nextpkt; 1343 } 1344 } 1345 1346 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1347 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1348 flags |= MSG_TRUNC; 1349 if ((flags & MSG_PEEK) == 0) 1350 (void) sbdroprecord_locked(&so->so_rcv); 1351 } 1352 if ((flags & MSG_PEEK) == 0) { 1353 if (m == NULL) { 1354 /* 1355 * First part is an inline SB_EMPTY_FIXUP(). Second 1356 * part makes sure sb_lastrecord is up-to-date if 1357 * there is still data in the socket buffer. 1358 */ 1359 so->so_rcv.sb_mb = nextrecord; 1360 if (so->so_rcv.sb_mb == NULL) { 1361 so->so_rcv.sb_mbtail = NULL; 1362 so->so_rcv.sb_lastrecord = NULL; 1363 } else if (nextrecord->m_nextpkt == NULL) 1364 so->so_rcv.sb_lastrecord = nextrecord; 1365 } 1366 SBLASTRECORDCHK(&so->so_rcv); 1367 SBLASTMBUFCHK(&so->so_rcv); 1368 /* 1369 * If soreceive() is being done from the socket callback, then 1370 * don't need to generate ACK to peer to update window, since 1371 * ACK will be generated on return to TCP. 1372 */ 1373 if (!(flags & MSG_SOCALLBCK) && 1374 (pr->pr_flags & PR_WANTRCVD) && so->so_pcb) { 1375 SOCKBUF_UNLOCK(&so->so_rcv); 1376 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1377 SOCKBUF_LOCK(&so->so_rcv); 1378 } 1379 } 1380 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1381 if (orig_resid == uio->uio_resid && orig_resid && 1382 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1383 sbunlock(&so->so_rcv); 1384 goto restart; 1385 } 1386 1387 if (flagsp != NULL) 1388 *flagsp |= flags; 1389release: 1390 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1391 sbunlock(&so->so_rcv); 1392out: 1393 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1394 SOCKBUF_UNLOCK(&so->so_rcv); 1395 return (error); 1396} 1397 1398int 1399soshutdown(so, how) 1400 struct socket *so; 1401 int how; 1402{ 1403 struct protosw *pr = so->so_proto; 1404 1405 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1406 return (EINVAL); 1407 1408 if (how != SHUT_WR) 1409 sorflush(so); 1410 if (how != SHUT_RD) 1411 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 1412 return (0); 1413} 1414 1415void 1416sorflush(so) 1417 struct socket *so; 1418{ 1419 struct sockbuf *sb = &so->so_rcv; 1420 struct protosw *pr = so->so_proto; 1421 struct sockbuf asb; 1422 1423 /* 1424 * XXXRW: This is quite ugly. Previously, this code made a copy of 1425 * the socket buffer, then zero'd the original to clear the buffer 1426 * fields. However, with mutexes in the socket buffer, this causes 1427 * problems. We only clear the zeroable bits of the original; 1428 * however, we have to initialize and destroy the mutex in the copy 1429 * so that dom_dispose() and sbrelease() can lock t as needed. 1430 */ 1431 SOCKBUF_LOCK(sb); 1432 sb->sb_flags |= SB_NOINTR; 1433 (void) sblock(sb, M_WAITOK); 1434 /* 1435 * socantrcvmore_locked() drops the socket buffer mutex so that it 1436 * can safely perform wakeups. Re-acquire the mutex before 1437 * continuing. 1438 */ 1439 socantrcvmore_locked(so); 1440 SOCKBUF_LOCK(sb); 1441 sbunlock(sb); 1442 /* 1443 * Invalidate/clear most of the sockbuf structure, but leave 1444 * selinfo and mutex data unchanged. 1445 */ 1446 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 1447 bcopy(&sb->sb_startzero, &asb.sb_startzero, 1448 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1449 bzero(&sb->sb_startzero, 1450 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1451 SOCKBUF_UNLOCK(sb); 1452 1453 SOCKBUF_LOCK_INIT(&asb, "so_rcv"); 1454 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 1455 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1456 sbrelease(&asb, so); 1457 SOCKBUF_LOCK_DESTROY(&asb); 1458} 1459 1460/* 1461 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1462 * an additional variant to handle the case where the option value needs 1463 * to be some kind of integer, but not a specific size. 1464 * In addition to their use here, these functions are also called by the 1465 * protocol-level pr_ctloutput() routines. 1466 */ 1467int 1468sooptcopyin(sopt, buf, len, minlen) 1469 struct sockopt *sopt; 1470 void *buf; 1471 size_t len; 1472 size_t minlen; 1473{ 1474 size_t valsize; 1475 1476 /* 1477 * If the user gives us more than we wanted, we ignore it, 1478 * but if we don't get the minimum length the caller 1479 * wants, we return EINVAL. On success, sopt->sopt_valsize 1480 * is set to however much we actually retrieved. 1481 */ 1482 if ((valsize = sopt->sopt_valsize) < minlen) 1483 return EINVAL; 1484 if (valsize > len) 1485 sopt->sopt_valsize = valsize = len; 1486 1487 if (sopt->sopt_td != NULL) 1488 return (copyin(sopt->sopt_val, buf, valsize)); 1489 1490 bcopy(sopt->sopt_val, buf, valsize); 1491 return 0; 1492} 1493 1494/* 1495 * Kernel version of setsockopt(2)/ 1496 * XXX: optlen is size_t, not socklen_t 1497 */ 1498int 1499so_setsockopt(struct socket *so, int level, int optname, void *optval, 1500 size_t optlen) 1501{ 1502 struct sockopt sopt; 1503 1504 sopt.sopt_level = level; 1505 sopt.sopt_name = optname; 1506 sopt.sopt_dir = SOPT_SET; 1507 sopt.sopt_val = optval; 1508 sopt.sopt_valsize = optlen; 1509 sopt.sopt_td = NULL; 1510 return (sosetopt(so, &sopt)); 1511} 1512 1513int 1514sosetopt(so, sopt) 1515 struct socket *so; 1516 struct sockopt *sopt; 1517{ 1518 int error, optval; 1519 struct linger l; 1520 struct timeval tv; 1521 u_long val; 1522#ifdef MAC 1523 struct mac extmac; 1524#endif 1525 1526 error = 0; 1527 if (sopt->sopt_level != SOL_SOCKET) { 1528 if (so->so_proto && so->so_proto->pr_ctloutput) 1529 return ((*so->so_proto->pr_ctloutput) 1530 (so, sopt)); 1531 error = ENOPROTOOPT; 1532 } else { 1533 switch (sopt->sopt_name) { 1534#ifdef INET 1535 case SO_ACCEPTFILTER: 1536 error = do_setopt_accept_filter(so, sopt); 1537 if (error) 1538 goto bad; 1539 break; 1540#endif 1541 case SO_LINGER: 1542 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1543 if (error) 1544 goto bad; 1545 1546 SOCK_LOCK(so); 1547 so->so_linger = l.l_linger; 1548 if (l.l_onoff) 1549 so->so_options |= SO_LINGER; 1550 else 1551 so->so_options &= ~SO_LINGER; 1552 SOCK_UNLOCK(so); 1553 break; 1554 1555 case SO_DEBUG: 1556 case SO_KEEPALIVE: 1557 case SO_DONTROUTE: 1558 case SO_USELOOPBACK: 1559 case SO_BROADCAST: 1560 case SO_REUSEADDR: 1561 case SO_REUSEPORT: 1562 case SO_OOBINLINE: 1563 case SO_TIMESTAMP: 1564 case SO_BINTIME: 1565 case SO_NOSIGPIPE: 1566 error = sooptcopyin(sopt, &optval, sizeof optval, 1567 sizeof optval); 1568 if (error) 1569 goto bad; 1570 SOCK_LOCK(so); 1571 if (optval) 1572 so->so_options |= sopt->sopt_name; 1573 else 1574 so->so_options &= ~sopt->sopt_name; 1575 SOCK_UNLOCK(so); 1576 break; 1577 1578 case SO_SNDBUF: 1579 case SO_RCVBUF: 1580 case SO_SNDLOWAT: 1581 case SO_RCVLOWAT: 1582 error = sooptcopyin(sopt, &optval, sizeof optval, 1583 sizeof optval); 1584 if (error) 1585 goto bad; 1586 1587 /* 1588 * Values < 1 make no sense for any of these 1589 * options, so disallow them. 1590 */ 1591 if (optval < 1) { 1592 error = EINVAL; 1593 goto bad; 1594 } 1595 1596 switch (sopt->sopt_name) { 1597 case SO_SNDBUF: 1598 case SO_RCVBUF: 1599 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 1600 &so->so_snd : &so->so_rcv, (u_long)optval, 1601 so, curthread) == 0) { 1602 error = ENOBUFS; 1603 goto bad; 1604 } 1605 break; 1606 1607 /* 1608 * Make sure the low-water is never greater than 1609 * the high-water. 1610 */ 1611 case SO_SNDLOWAT: 1612 SOCKBUF_LOCK(&so->so_snd); 1613 so->so_snd.sb_lowat = 1614 (optval > so->so_snd.sb_hiwat) ? 1615 so->so_snd.sb_hiwat : optval; 1616 SOCKBUF_UNLOCK(&so->so_snd); 1617 break; 1618 case SO_RCVLOWAT: 1619 SOCKBUF_LOCK(&so->so_rcv); 1620 so->so_rcv.sb_lowat = 1621 (optval > so->so_rcv.sb_hiwat) ? 1622 so->so_rcv.sb_hiwat : optval; 1623 SOCKBUF_UNLOCK(&so->so_rcv); 1624 break; 1625 } 1626 break; 1627 1628 case SO_SNDTIMEO: 1629 case SO_RCVTIMEO: 1630 error = sooptcopyin(sopt, &tv, sizeof tv, 1631 sizeof tv); 1632 if (error) 1633 goto bad; 1634 1635 /* assert(hz > 0); */ 1636 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 1637 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1638 error = EDOM; 1639 goto bad; 1640 } 1641 /* assert(tick > 0); */ 1642 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 1643 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 1644 if (val > INT_MAX) { 1645 error = EDOM; 1646 goto bad; 1647 } 1648 if (val == 0 && tv.tv_usec != 0) 1649 val = 1; 1650 1651 switch (sopt->sopt_name) { 1652 case SO_SNDTIMEO: 1653 so->so_snd.sb_timeo = val; 1654 break; 1655 case SO_RCVTIMEO: 1656 so->so_rcv.sb_timeo = val; 1657 break; 1658 } 1659 break; 1660 case SO_LABEL: 1661#ifdef MAC 1662 error = sooptcopyin(sopt, &extmac, sizeof extmac, 1663 sizeof extmac); 1664 if (error) 1665 goto bad; 1666 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 1667 so, &extmac); 1668#else 1669 error = EOPNOTSUPP; 1670#endif 1671 break; 1672 default: 1673 error = ENOPROTOOPT; 1674 break; 1675 } 1676 if (error == 0 && so->so_proto != NULL && 1677 so->so_proto->pr_ctloutput != NULL) { 1678 (void) ((*so->so_proto->pr_ctloutput) 1679 (so, sopt)); 1680 } 1681 } 1682bad: 1683 return (error); 1684} 1685 1686/* Helper routine for getsockopt */ 1687int 1688sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1689{ 1690 int error; 1691 size_t valsize; 1692 1693 error = 0; 1694 1695 /* 1696 * Documented get behavior is that we always return a value, 1697 * possibly truncated to fit in the user's buffer. 1698 * Traditional behavior is that we always tell the user 1699 * precisely how much we copied, rather than something useful 1700 * like the total amount we had available for her. 1701 * Note that this interface is not idempotent; the entire answer must 1702 * generated ahead of time. 1703 */ 1704 valsize = min(len, sopt->sopt_valsize); 1705 sopt->sopt_valsize = valsize; 1706 if (sopt->sopt_val != NULL) { 1707 if (sopt->sopt_td != NULL) 1708 error = copyout(buf, sopt->sopt_val, valsize); 1709 else 1710 bcopy(buf, sopt->sopt_val, valsize); 1711 } 1712 return error; 1713} 1714 1715int 1716sogetopt(so, sopt) 1717 struct socket *so; 1718 struct sockopt *sopt; 1719{ 1720 int error, optval; 1721 struct linger l; 1722 struct timeval tv; 1723#ifdef INET 1724 struct accept_filter_arg *afap; 1725#endif 1726#ifdef MAC 1727 struct mac extmac; 1728#endif 1729 1730 error = 0; 1731 if (sopt->sopt_level != SOL_SOCKET) { 1732 if (so->so_proto && so->so_proto->pr_ctloutput) { 1733 return ((*so->so_proto->pr_ctloutput) 1734 (so, sopt)); 1735 } else 1736 return (ENOPROTOOPT); 1737 } else { 1738 switch (sopt->sopt_name) { 1739#ifdef INET 1740 case SO_ACCEPTFILTER: 1741 /* Unlocked read. */ 1742 if ((so->so_options & SO_ACCEPTCONN) == 0) 1743 return (EINVAL); 1744 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), 1745 M_TEMP, M_WAITOK | M_ZERO); 1746 SOCK_LOCK(so); 1747 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 1748 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 1749 if (so->so_accf->so_accept_filter_str != NULL) 1750 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 1751 } 1752 SOCK_UNLOCK(so); 1753 error = sooptcopyout(sopt, afap, sizeof(*afap)); 1754 FREE(afap, M_TEMP); 1755 break; 1756#endif 1757 1758 case SO_LINGER: 1759 SOCK_LOCK(so); 1760 l.l_onoff = so->so_options & SO_LINGER; 1761 l.l_linger = so->so_linger; 1762 SOCK_UNLOCK(so); 1763 error = sooptcopyout(sopt, &l, sizeof l); 1764 break; 1765 1766 case SO_USELOOPBACK: 1767 case SO_DONTROUTE: 1768 case SO_DEBUG: 1769 case SO_KEEPALIVE: 1770 case SO_REUSEADDR: 1771 case SO_REUSEPORT: 1772 case SO_BROADCAST: 1773 case SO_OOBINLINE: 1774 case SO_TIMESTAMP: 1775 case SO_BINTIME: 1776 case SO_NOSIGPIPE: 1777 optval = so->so_options & sopt->sopt_name; 1778integer: 1779 error = sooptcopyout(sopt, &optval, sizeof optval); 1780 break; 1781 1782 case SO_TYPE: 1783 optval = so->so_type; 1784 goto integer; 1785 1786 case SO_ERROR: 1787 optval = so->so_error; 1788 so->so_error = 0; 1789 goto integer; 1790 1791 case SO_SNDBUF: 1792 optval = so->so_snd.sb_hiwat; 1793 goto integer; 1794 1795 case SO_RCVBUF: 1796 optval = so->so_rcv.sb_hiwat; 1797 goto integer; 1798 1799 case SO_SNDLOWAT: 1800 optval = so->so_snd.sb_lowat; 1801 goto integer; 1802 1803 case SO_RCVLOWAT: 1804 optval = so->so_rcv.sb_lowat; 1805 goto integer; 1806 1807 case SO_SNDTIMEO: 1808 case SO_RCVTIMEO: 1809 optval = (sopt->sopt_name == SO_SNDTIMEO ? 1810 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1811 1812 tv.tv_sec = optval / hz; 1813 tv.tv_usec = (optval % hz) * tick; 1814 error = sooptcopyout(sopt, &tv, sizeof tv); 1815 break; 1816 case SO_LABEL: 1817#ifdef MAC 1818 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 1819 sizeof(extmac)); 1820 if (error) 1821 return (error); 1822 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 1823 so, &extmac); 1824 if (error) 1825 return (error); 1826 error = sooptcopyout(sopt, &extmac, sizeof extmac); 1827#else 1828 error = EOPNOTSUPP; 1829#endif 1830 break; 1831 case SO_PEERLABEL: 1832#ifdef MAC 1833 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 1834 sizeof(extmac)); 1835 if (error) 1836 return (error); 1837 error = mac_getsockopt_peerlabel( 1838 sopt->sopt_td->td_ucred, so, &extmac); 1839 if (error) 1840 return (error); 1841 error = sooptcopyout(sopt, &extmac, sizeof extmac); 1842#else 1843 error = EOPNOTSUPP; 1844#endif 1845 break; 1846 default: 1847 error = ENOPROTOOPT; 1848 break; 1849 } 1850 return (error); 1851 } 1852} 1853 1854/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 1855int 1856soopt_getm(struct sockopt *sopt, struct mbuf **mp) 1857{ 1858 struct mbuf *m, *m_prev; 1859 int sopt_size = sopt->sopt_valsize; 1860 1861 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 1862 if (m == NULL) 1863 return ENOBUFS; 1864 if (sopt_size > MLEN) { 1865 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT); 1866 if ((m->m_flags & M_EXT) == 0) { 1867 m_free(m); 1868 return ENOBUFS; 1869 } 1870 m->m_len = min(MCLBYTES, sopt_size); 1871 } else { 1872 m->m_len = min(MLEN, sopt_size); 1873 } 1874 sopt_size -= m->m_len; 1875 *mp = m; 1876 m_prev = m; 1877 1878 while (sopt_size) { 1879 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 1880 if (m == NULL) { 1881 m_freem(*mp); 1882 return ENOBUFS; 1883 } 1884 if (sopt_size > MLEN) { 1885 MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT : 1886 M_DONTWAIT); 1887 if ((m->m_flags & M_EXT) == 0) { 1888 m_freem(m); 1889 m_freem(*mp); 1890 return ENOBUFS; 1891 } 1892 m->m_len = min(MCLBYTES, sopt_size); 1893 } else { 1894 m->m_len = min(MLEN, sopt_size); 1895 } 1896 sopt_size -= m->m_len; 1897 m_prev->m_next = m; 1898 m_prev = m; 1899 } 1900 return 0; 1901} 1902 1903/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 1904int 1905soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 1906{ 1907 struct mbuf *m0 = m; 1908 1909 if (sopt->sopt_val == NULL) 1910 return 0; 1911 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1912 if (sopt->sopt_td != NULL) { 1913 int error; 1914 1915 error = copyin(sopt->sopt_val, mtod(m, char *), 1916 m->m_len); 1917 if (error != 0) { 1918 m_freem(m0); 1919 return(error); 1920 } 1921 } else 1922 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 1923 sopt->sopt_valsize -= m->m_len; 1924 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 1925 m = m->m_next; 1926 } 1927 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 1928 panic("ip6_sooptmcopyin"); 1929 return 0; 1930} 1931 1932/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 1933int 1934soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 1935{ 1936 struct mbuf *m0 = m; 1937 size_t valsize = 0; 1938 1939 if (sopt->sopt_val == NULL) 1940 return 0; 1941 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1942 if (sopt->sopt_td != NULL) { 1943 int error; 1944 1945 error = copyout(mtod(m, char *), sopt->sopt_val, 1946 m->m_len); 1947 if (error != 0) { 1948 m_freem(m0); 1949 return(error); 1950 } 1951 } else 1952 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 1953 sopt->sopt_valsize -= m->m_len; 1954 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 1955 valsize += m->m_len; 1956 m = m->m_next; 1957 } 1958 if (m != NULL) { 1959 /* enough soopt buffer should be given from user-land */ 1960 m_freem(m0); 1961 return(EINVAL); 1962 } 1963 sopt->sopt_valsize = valsize; 1964 return 0; 1965} 1966 1967void 1968sohasoutofband(so) 1969 struct socket *so; 1970{ 1971 if (so->so_sigio != NULL) 1972 pgsigio(&so->so_sigio, SIGURG, 0); 1973 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 1974} 1975 1976int 1977sopoll(struct socket *so, int events, struct ucred *active_cred, 1978 struct thread *td) 1979{ 1980 int revents = 0; 1981 1982 SOCKBUF_LOCK(&so->so_snd); 1983 SOCKBUF_LOCK(&so->so_rcv); 1984 if (events & (POLLIN | POLLRDNORM)) 1985 if (soreadable(so)) 1986 revents |= events & (POLLIN | POLLRDNORM); 1987 1988 if (events & POLLINIGNEOF) 1989 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 1990 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 1991 revents |= POLLINIGNEOF; 1992 1993 if (events & (POLLOUT | POLLWRNORM)) 1994 if (sowriteable(so)) 1995 revents |= events & (POLLOUT | POLLWRNORM); 1996 1997 if (events & (POLLPRI | POLLRDBAND)) 1998 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 1999 revents |= events & (POLLPRI | POLLRDBAND); 2000 2001 if (revents == 0) { 2002 if (events & 2003 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 2004 POLLRDBAND)) { 2005 selrecord(td, &so->so_rcv.sb_sel); 2006 so->so_rcv.sb_flags |= SB_SEL; 2007 } 2008 2009 if (events & (POLLOUT | POLLWRNORM)) { 2010 selrecord(td, &so->so_snd.sb_sel); 2011 so->so_snd.sb_flags |= SB_SEL; 2012 } 2013 } 2014 2015 SOCKBUF_UNLOCK(&so->so_rcv); 2016 SOCKBUF_UNLOCK(&so->so_snd); 2017 return (revents); 2018} 2019 2020int 2021soo_kqfilter(struct file *fp, struct knote *kn) 2022{ 2023 struct socket *so = kn->kn_fp->f_data; 2024 struct sockbuf *sb; 2025 2026 switch (kn->kn_filter) { 2027 case EVFILT_READ: 2028 if (so->so_options & SO_ACCEPTCONN) 2029 kn->kn_fop = &solisten_filtops; 2030 else 2031 kn->kn_fop = &soread_filtops; 2032 sb = &so->so_rcv; 2033 break; 2034 case EVFILT_WRITE: 2035 kn->kn_fop = &sowrite_filtops; 2036 sb = &so->so_snd; 2037 break; 2038 default: 2039 return (EINVAL); 2040 } 2041 2042 SOCKBUF_LOCK(sb); 2043 knlist_add(&sb->sb_sel.si_note, kn, 1); 2044 sb->sb_flags |= SB_KNOTE; 2045 SOCKBUF_UNLOCK(sb); 2046 return (0); 2047} 2048 2049static void 2050filt_sordetach(struct knote *kn) 2051{ 2052 struct socket *so = kn->kn_fp->f_data; 2053 2054 SOCKBUF_LOCK(&so->so_rcv); 2055 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 2056 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 2057 so->so_rcv.sb_flags &= ~SB_KNOTE; 2058 SOCKBUF_UNLOCK(&so->so_rcv); 2059} 2060 2061/*ARGSUSED*/ 2062static int 2063filt_soread(struct knote *kn, long hint) 2064{ 2065 struct socket *so; 2066 2067 so = kn->kn_fp->f_data; 2068 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2069 2070 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 2071 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2072 kn->kn_flags |= EV_EOF; 2073 kn->kn_fflags = so->so_error; 2074 return (1); 2075 } else if (so->so_error) /* temporary udp error */ 2076 return (1); 2077 else if (kn->kn_sfflags & NOTE_LOWAT) 2078 return (kn->kn_data >= kn->kn_sdata); 2079 else 2080 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 2081} 2082 2083static void 2084filt_sowdetach(struct knote *kn) 2085{ 2086 struct socket *so = kn->kn_fp->f_data; 2087 2088 SOCKBUF_LOCK(&so->so_snd); 2089 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 2090 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 2091 so->so_snd.sb_flags &= ~SB_KNOTE; 2092 SOCKBUF_UNLOCK(&so->so_snd); 2093} 2094 2095/*ARGSUSED*/ 2096static int 2097filt_sowrite(struct knote *kn, long hint) 2098{ 2099 struct socket *so; 2100 2101 so = kn->kn_fp->f_data; 2102 SOCKBUF_LOCK_ASSERT(&so->so_snd); 2103 kn->kn_data = sbspace(&so->so_snd); 2104 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2105 kn->kn_flags |= EV_EOF; 2106 kn->kn_fflags = so->so_error; 2107 return (1); 2108 } else if (so->so_error) /* temporary udp error */ 2109 return (1); 2110 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2111 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2112 return (0); 2113 else if (kn->kn_sfflags & NOTE_LOWAT) 2114 return (kn->kn_data >= kn->kn_sdata); 2115 else 2116 return (kn->kn_data >= so->so_snd.sb_lowat); 2117} 2118 2119/*ARGSUSED*/ 2120static int 2121filt_solisten(struct knote *kn, long hint) 2122{ 2123 struct socket *so = kn->kn_fp->f_data; 2124 2125 kn->kn_data = so->so_qlen; 2126 return (! TAILQ_EMPTY(&so->so_comp)); 2127} 2128 2129int 2130socheckuid(struct socket *so, uid_t uid) 2131{ 2132 2133 if (so == NULL) 2134 return (EPERM); 2135 if (so->so_cred->cr_uid != uid) 2136 return (EPERM); 2137 return (0); 2138} 2139 2140static int 2141somaxconn_sysctl(SYSCTL_HANDLER_ARGS) 2142{ 2143 int error; 2144 int val; 2145 2146 val = somaxconn; 2147 error = sysctl_handle_int(oidp, &val, sizeof(int), req); 2148 if (error || !req->newptr ) 2149 return (error); 2150 2151 if (val < 1 || val > USHRT_MAX) 2152 return (EINVAL); 2153 2154 somaxconn = val; 2155 return (0); 2156} 2157