uipc_socket.c revision 131145
1/* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/kern/uipc_socket.c 131145 2004-06-26 17:12:29Z rwatson $"); 34 35#include "opt_inet.h" 36#include "opt_mac.h" 37#include "opt_zero.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/fcntl.h> 42#include <sys/limits.h> 43#include <sys/lock.h> 44#include <sys/mac.h> 45#include <sys/malloc.h> 46#include <sys/mbuf.h> 47#include <sys/mutex.h> 48#include <sys/domain.h> 49#include <sys/file.h> /* for struct knote */ 50#include <sys/kernel.h> 51#include <sys/event.h> 52#include <sys/poll.h> 53#include <sys/proc.h> 54#include <sys/protosw.h> 55#include <sys/socket.h> 56#include <sys/socketvar.h> 57#include <sys/resourcevar.h> 58#include <sys/signalvar.h> 59#include <sys/sysctl.h> 60#include <sys/uio.h> 61#include <sys/jail.h> 62 63#include <vm/uma.h> 64 65 66#ifdef INET 67static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 68#endif 69 70static void filt_sordetach(struct knote *kn); 71static int filt_soread(struct knote *kn, long hint); 72static void filt_sowdetach(struct knote *kn); 73static int filt_sowrite(struct knote *kn, long hint); 74static int filt_solisten(struct knote *kn, long hint); 75 76static struct filterops solisten_filtops = 77 { 1, NULL, filt_sordetach, filt_solisten }; 78static struct filterops soread_filtops = 79 { 1, NULL, filt_sordetach, filt_soread }; 80static struct filterops sowrite_filtops = 81 { 1, NULL, filt_sowdetach, filt_sowrite }; 82 83uma_zone_t socket_zone; 84so_gen_t so_gencnt; /* generation count for sockets */ 85 86MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 87MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 88 89SYSCTL_DECL(_kern_ipc); 90 91static int somaxconn = SOMAXCONN; 92SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 93 &somaxconn, 0, "Maximum pending socket connection queue size"); 94static int numopensockets; 95SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 96 &numopensockets, 0, "Number of open sockets"); 97#ifdef ZERO_COPY_SOCKETS 98/* These aren't static because they're used in other files. */ 99int so_zero_copy_send = 1; 100int so_zero_copy_receive = 1; 101SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 102 "Zero copy controls"); 103SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 104 &so_zero_copy_receive, 0, "Enable zero copy receive"); 105SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 106 &so_zero_copy_send, 0, "Enable zero copy send"); 107#endif /* ZERO_COPY_SOCKETS */ 108 109struct mtx accept_mtx; 110MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 111 112 113/* 114 * Socket operation routines. 115 * These routines are called by the routines in 116 * sys_socket.c or from a system process, and 117 * implement the semantics of socket operations by 118 * switching out to the protocol specific routines. 119 */ 120 121/* 122 * Get a socket structure from our zone, and initialize it. 123 * Note that it would probably be better to allocate socket 124 * and PCB at the same time, but I'm not convinced that all 125 * the protocols can be easily modified to do this. 126 * 127 * soalloc() returns a socket with a ref count of 0. 128 */ 129struct socket * 130soalloc(int mflags) 131{ 132 struct socket *so; 133#ifdef MAC 134 int error; 135#endif 136 137 so = uma_zalloc(socket_zone, mflags | M_ZERO); 138 if (so != NULL) { 139#ifdef MAC 140 error = mac_init_socket(so, mflags); 141 if (error != 0) { 142 uma_zfree(socket_zone, so); 143 so = NULL; 144 return so; 145 } 146#endif 147 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 148 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 149 /* XXX race condition for reentrant kernel */ 150 so->so_gencnt = ++so_gencnt; 151 /* sx_init(&so->so_sxlock, "socket sxlock"); */ 152 TAILQ_INIT(&so->so_aiojobq); 153 ++numopensockets; 154 } 155 return so; 156} 157 158/* 159 * socreate returns a socket with a ref count of 1. The socket should be 160 * closed with soclose(). 161 */ 162int 163socreate(dom, aso, type, proto, cred, td) 164 int dom; 165 struct socket **aso; 166 int type; 167 int proto; 168 struct ucred *cred; 169 struct thread *td; 170{ 171 struct protosw *prp; 172 struct socket *so; 173 int error; 174 175 if (proto) 176 prp = pffindproto(dom, proto, type); 177 else 178 prp = pffindtype(dom, type); 179 180 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL) 181 return (EPROTONOSUPPORT); 182 183 if (jailed(cred) && jail_socket_unixiproute_only && 184 prp->pr_domain->dom_family != PF_LOCAL && 185 prp->pr_domain->dom_family != PF_INET && 186 prp->pr_domain->dom_family != PF_ROUTE) { 187 return (EPROTONOSUPPORT); 188 } 189 190 if (prp->pr_type != type) 191 return (EPROTOTYPE); 192 so = soalloc(M_WAITOK); 193 if (so == NULL) 194 return (ENOBUFS); 195 196 TAILQ_INIT(&so->so_incomp); 197 TAILQ_INIT(&so->so_comp); 198 so->so_type = type; 199 so->so_cred = crhold(cred); 200 so->so_proto = prp; 201#ifdef MAC 202 mac_create_socket(cred, so); 203#endif 204 SOCK_LOCK(so); 205 soref(so); 206 SOCK_UNLOCK(so); 207 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 208 if (error) { 209 SOCK_LOCK(so); 210 so->so_state |= SS_NOFDREF; 211 sorele(so); 212 return (error); 213 } 214 *aso = so; 215 return (0); 216} 217 218int 219sobind(so, nam, td) 220 struct socket *so; 221 struct sockaddr *nam; 222 struct thread *td; 223{ 224 225 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 226} 227 228void 229sodealloc(struct socket *so) 230{ 231 232 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 233 so->so_gencnt = ++so_gencnt; 234 if (so->so_rcv.sb_hiwat) 235 (void)chgsbsize(so->so_cred->cr_uidinfo, 236 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 237 if (so->so_snd.sb_hiwat) 238 (void)chgsbsize(so->so_cred->cr_uidinfo, 239 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 240#ifdef INET 241 /* remove acccept filter if one is present. */ 242 if (so->so_accf != NULL) 243 do_setopt_accept_filter(so, NULL); 244#endif 245#ifdef MAC 246 mac_destroy_socket(so); 247#endif 248 crfree(so->so_cred); 249 SOCKBUF_LOCK_DESTROY(&so->so_snd); 250 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 251 /* sx_destroy(&so->so_sxlock); */ 252 uma_zfree(socket_zone, so); 253 --numopensockets; 254} 255 256int 257solisten(so, backlog, td) 258 struct socket *so; 259 int backlog; 260 struct thread *td; 261{ 262 int error; 263 264 /* 265 * XXXRW: Ordering issue here -- perhaps we need to set 266 * SO_ACCEPTCONN before the call to pru_listen()? 267 * XXXRW: General atomic test-and-set concerns here also. 268 */ 269 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 270 SS_ISDISCONNECTING)) 271 return (EINVAL); 272 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td); 273 if (error) 274 return (error); 275 ACCEPT_LOCK(); 276 if (TAILQ_EMPTY(&so->so_comp)) { 277 SOCK_LOCK(so); 278 so->so_options |= SO_ACCEPTCONN; 279 SOCK_UNLOCK(so); 280 } 281 if (backlog < 0 || backlog > somaxconn) 282 backlog = somaxconn; 283 so->so_qlimit = backlog; 284 ACCEPT_UNLOCK(); 285 return (0); 286} 287 288void 289sofree(so) 290 struct socket *so; 291{ 292 struct socket *head; 293 294 KASSERT(so->so_count == 0, ("socket %p so_count not 0", so)); 295 SOCK_LOCK_ASSERT(so); 296 297 if (so->so_pcb != NULL || (so->so_state & SS_NOFDREF) == 0) { 298 SOCK_UNLOCK(so); 299 return; 300 } 301 302 SOCK_UNLOCK(so); 303 ACCEPT_LOCK(); 304 head = so->so_head; 305 if (head != NULL) { 306 KASSERT((so->so_qstate & SQ_COMP) != 0 || 307 (so->so_qstate & SQ_INCOMP) != 0, 308 ("sofree: so_head != NULL, but neither SQ_COMP nor " 309 "SQ_INCOMP")); 310 KASSERT((so->so_qstate & SQ_COMP) == 0 || 311 (so->so_qstate & SQ_INCOMP) == 0, 312 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 313 /* 314 * accept(2) is responsible draining the completed 315 * connection queue and freeing those sockets, so 316 * we just return here if this socket is currently 317 * on the completed connection queue. Otherwise, 318 * accept(2) may hang after select(2) has indicating 319 * that a listening socket was ready. If it's an 320 * incomplete connection, we remove it from the queue 321 * and free it; otherwise, it won't be released until 322 * the listening socket is closed. 323 */ 324 if ((so->so_qstate & SQ_COMP) != 0) { 325 ACCEPT_UNLOCK(); 326 return; 327 } 328 TAILQ_REMOVE(&head->so_incomp, so, so_list); 329 head->so_incqlen--; 330 so->so_qstate &= ~SQ_INCOMP; 331 so->so_head = NULL; 332 } 333 KASSERT((so->so_qstate & SQ_COMP) == 0 && 334 (so->so_qstate & SQ_INCOMP) == 0, 335 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 336 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 337 ACCEPT_UNLOCK(); 338 SOCKBUF_LOCK(&so->so_snd); 339 so->so_snd.sb_flags |= SB_NOINTR; 340 (void)sblock(&so->so_snd, M_WAITOK); 341 /* 342 * socantsendmore_locked() drops the socket buffer mutex so that it 343 * can safely perform wakeups. Re-acquire the mutex before 344 * continuing. 345 */ 346 socantsendmore_locked(so); 347 SOCKBUF_LOCK(&so->so_snd); 348 sbunlock(&so->so_snd); 349 sbrelease_locked(&so->so_snd, so); 350 SOCKBUF_UNLOCK(&so->so_snd); 351 sorflush(so); 352 sodealloc(so); 353} 354 355/* 356 * Close a socket on last file table reference removal. 357 * Initiate disconnect if connected. 358 * Free socket when disconnect complete. 359 * 360 * This function will sorele() the socket. Note that soclose() may be 361 * called prior to the ref count reaching zero. The actual socket 362 * structure will not be freed until the ref count reaches zero. 363 */ 364int 365soclose(so) 366 struct socket *so; 367{ 368 int error = 0; 369 370 funsetown(&so->so_sigio); 371 if (so->so_options & SO_ACCEPTCONN) { 372 struct socket *sp; 373 ACCEPT_LOCK(); 374 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 375 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 376 so->so_incqlen--; 377 sp->so_qstate &= ~SQ_INCOMP; 378 sp->so_head = NULL; 379 ACCEPT_UNLOCK(); 380 (void) soabort(sp); 381 ACCEPT_LOCK(); 382 } 383 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 384 TAILQ_REMOVE(&so->so_comp, sp, so_list); 385 so->so_qlen--; 386 sp->so_qstate &= ~SQ_COMP; 387 sp->so_head = NULL; 388 ACCEPT_UNLOCK(); 389 (void) soabort(sp); 390 ACCEPT_LOCK(); 391 } 392 ACCEPT_UNLOCK(); 393 } 394 if (so->so_pcb == NULL) 395 goto discard; 396 if (so->so_state & SS_ISCONNECTED) { 397 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 398 error = sodisconnect(so); 399 if (error) 400 goto drop; 401 } 402 if (so->so_options & SO_LINGER) { 403 if ((so->so_state & SS_ISDISCONNECTING) && 404 (so->so_state & SS_NBIO)) 405 goto drop; 406 while (so->so_state & SS_ISCONNECTED) { 407 error = tsleep(&so->so_timeo, 408 PSOCK | PCATCH, "soclos", so->so_linger * hz); 409 if (error) 410 break; 411 } 412 } 413 } 414drop: 415 if (so->so_pcb != NULL) { 416 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so); 417 if (error == 0) 418 error = error2; 419 } 420discard: 421 SOCK_LOCK(so); 422 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 423 so->so_state |= SS_NOFDREF; 424 sorele(so); 425 return (error); 426} 427 428/* 429 * soabort() must not be called with any socket locks held, as it calls 430 * into the protocol, which will call back into the socket code causing 431 * it to acquire additional socket locks that may cause recursion or lock 432 * order reversals. 433 */ 434int 435soabort(so) 436 struct socket *so; 437{ 438 int error; 439 440 error = (*so->so_proto->pr_usrreqs->pru_abort)(so); 441 if (error) { 442 SOCK_LOCK(so); 443 sotryfree(so); /* note: does not decrement the ref count */ 444 return error; 445 } 446 return (0); 447} 448 449int 450soaccept(so, nam) 451 struct socket *so; 452 struct sockaddr **nam; 453{ 454 int error; 455 456 SOCK_LOCK(so); 457 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 458 so->so_state &= ~SS_NOFDREF; 459 SOCK_UNLOCK(so); 460 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 461 return (error); 462} 463 464int 465soconnect(so, nam, td) 466 struct socket *so; 467 struct sockaddr *nam; 468 struct thread *td; 469{ 470 int error; 471 472 if (so->so_options & SO_ACCEPTCONN) 473 return (EOPNOTSUPP); 474 /* 475 * If protocol is connection-based, can only connect once. 476 * Otherwise, if connected, try to disconnect first. 477 * This allows user to disconnect by connecting to, e.g., 478 * a null address. 479 */ 480 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 481 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 482 (error = sodisconnect(so)))) 483 error = EISCONN; 484 else 485 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 486 return (error); 487} 488 489int 490soconnect2(so1, so2) 491 struct socket *so1; 492 struct socket *so2; 493{ 494 495 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 496} 497 498int 499sodisconnect(so) 500 struct socket *so; 501{ 502 int error; 503 504 if ((so->so_state & SS_ISCONNECTED) == 0) 505 return (ENOTCONN); 506 if (so->so_state & SS_ISDISCONNECTING) 507 return (EALREADY); 508 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 509 return (error); 510} 511 512#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 513/* 514 * Send on a socket. 515 * If send must go all at once and message is larger than 516 * send buffering, then hard error. 517 * Lock against other senders. 518 * If must go all at once and not enough room now, then 519 * inform user that this would block and do nothing. 520 * Otherwise, if nonblocking, send as much as possible. 521 * The data to be sent is described by "uio" if nonzero, 522 * otherwise by the mbuf chain "top" (which must be null 523 * if uio is not). Data provided in mbuf chain must be small 524 * enough to send all at once. 525 * 526 * Returns nonzero on error, timeout or signal; callers 527 * must check for short counts if EINTR/ERESTART are returned. 528 * Data and control buffers are freed on return. 529 */ 530 531#ifdef ZERO_COPY_SOCKETS 532struct so_zerocopy_stats{ 533 int size_ok; 534 int align_ok; 535 int found_ifp; 536}; 537struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 538#include <netinet/in.h> 539#include <net/route.h> 540#include <netinet/in_pcb.h> 541#include <vm/vm.h> 542#include <vm/vm_page.h> 543#include <vm/vm_object.h> 544#endif /*ZERO_COPY_SOCKETS*/ 545 546int 547sosend(so, addr, uio, top, control, flags, td) 548 struct socket *so; 549 struct sockaddr *addr; 550 struct uio *uio; 551 struct mbuf *top; 552 struct mbuf *control; 553 int flags; 554 struct thread *td; 555{ 556 struct mbuf **mp; 557 struct mbuf *m; 558 long space, len = 0, resid; 559 int clen = 0, error, dontroute; 560 int atomic = sosendallatonce(so) || top; 561#ifdef ZERO_COPY_SOCKETS 562 int cow_send; 563#endif /* ZERO_COPY_SOCKETS */ 564 565 if (uio != NULL) 566 resid = uio->uio_resid; 567 else 568 resid = top->m_pkthdr.len; 569 /* 570 * In theory resid should be unsigned. 571 * However, space must be signed, as it might be less than 0 572 * if we over-committed, and we must use a signed comparison 573 * of space and resid. On the other hand, a negative resid 574 * causes us to loop sending 0-length segments to the protocol. 575 * 576 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 577 * type sockets since that's an error. 578 */ 579 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 580 error = EINVAL; 581 goto out; 582 } 583 584 dontroute = 585 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 586 (so->so_proto->pr_flags & PR_ATOMIC); 587 if (td != NULL) 588 td->td_proc->p_stats->p_ru.ru_msgsnd++; 589 if (control != NULL) 590 clen = control->m_len; 591#define snderr(errno) { error = (errno); goto release; } 592 593 SOCKBUF_LOCK(&so->so_snd); 594restart: 595 SOCKBUF_LOCK_ASSERT(&so->so_snd); 596 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 597 if (error) 598 goto out_locked; 599 do { 600 SOCKBUF_LOCK_ASSERT(&so->so_snd); 601 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 602 snderr(EPIPE); 603 if (so->so_error) { 604 error = so->so_error; 605 so->so_error = 0; 606 goto release; 607 } 608 if ((so->so_state & SS_ISCONNECTED) == 0) { 609 /* 610 * `sendto' and `sendmsg' is allowed on a connection- 611 * based socket if it supports implied connect. 612 * Return ENOTCONN if not connected and no address is 613 * supplied. 614 */ 615 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 616 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 617 if ((so->so_state & SS_ISCONFIRMING) == 0 && 618 !(resid == 0 && clen != 0)) 619 snderr(ENOTCONN); 620 } else if (addr == NULL) 621 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 622 ENOTCONN : EDESTADDRREQ); 623 } 624 space = sbspace(&so->so_snd); 625 if (flags & MSG_OOB) 626 space += 1024; 627 if ((atomic && resid > so->so_snd.sb_hiwat) || 628 clen > so->so_snd.sb_hiwat) 629 snderr(EMSGSIZE); 630 if (space < resid + clen && 631 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 632 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) 633 snderr(EWOULDBLOCK); 634 sbunlock(&so->so_snd); 635 error = sbwait(&so->so_snd); 636 if (error) 637 goto out_locked; 638 goto restart; 639 } 640 SOCKBUF_UNLOCK(&so->so_snd); 641 mp = ⊤ 642 space -= clen; 643 do { 644 if (uio == NULL) { 645 /* 646 * Data is prepackaged in "top". 647 */ 648 resid = 0; 649 if (flags & MSG_EOR) 650 top->m_flags |= M_EOR; 651 } else do { 652#ifdef ZERO_COPY_SOCKETS 653 cow_send = 0; 654#endif /* ZERO_COPY_SOCKETS */ 655 if (resid >= MINCLSIZE) { 656#ifdef ZERO_COPY_SOCKETS 657 if (top == NULL) { 658 MGETHDR(m, M_TRYWAIT, MT_DATA); 659 if (m == NULL) { 660 error = ENOBUFS; 661 SOCKBUF_LOCK(&so->so_snd); 662 goto release; 663 } 664 m->m_pkthdr.len = 0; 665 m->m_pkthdr.rcvif = (struct ifnet *)0; 666 } else { 667 MGET(m, M_TRYWAIT, MT_DATA); 668 if (m == NULL) { 669 error = ENOBUFS; 670 SOCKBUF_LOCK(&so->so_snd); 671 goto release; 672 } 673 } 674 if (so_zero_copy_send && 675 resid>=PAGE_SIZE && 676 space>=PAGE_SIZE && 677 uio->uio_iov->iov_len>=PAGE_SIZE) { 678 so_zerocp_stats.size_ok++; 679 if (!((vm_offset_t) 680 uio->uio_iov->iov_base & PAGE_MASK)){ 681 so_zerocp_stats.align_ok++; 682 cow_send = socow_setup(m, uio); 683 } 684 } 685 if (!cow_send) { 686 MCLGET(m, M_TRYWAIT); 687 if ((m->m_flags & M_EXT) == 0) { 688 m_free(m); 689 m = NULL; 690 } else { 691 len = min(min(MCLBYTES, resid), space); 692 } 693 } else 694 len = PAGE_SIZE; 695#else /* ZERO_COPY_SOCKETS */ 696 if (top == NULL) { 697 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 698 m->m_pkthdr.len = 0; 699 m->m_pkthdr.rcvif = (struct ifnet *)0; 700 } else 701 m = m_getcl(M_TRYWAIT, MT_DATA, 0); 702 len = min(min(MCLBYTES, resid), space); 703#endif /* ZERO_COPY_SOCKETS */ 704 } else { 705 if (top == NULL) { 706 m = m_gethdr(M_TRYWAIT, MT_DATA); 707 m->m_pkthdr.len = 0; 708 m->m_pkthdr.rcvif = (struct ifnet *)0; 709 710 len = min(min(MHLEN, resid), space); 711 /* 712 * For datagram protocols, leave room 713 * for protocol headers in first mbuf. 714 */ 715 if (atomic && m && len < MHLEN) 716 MH_ALIGN(m, len); 717 } else { 718 m = m_get(M_TRYWAIT, MT_DATA); 719 len = min(min(MLEN, resid), space); 720 } 721 } 722 if (m == NULL) { 723 error = ENOBUFS; 724 SOCKBUF_LOCK(&so->so_snd); 725 goto release; 726 } 727 728 space -= len; 729#ifdef ZERO_COPY_SOCKETS 730 if (cow_send) 731 error = 0; 732 else 733#endif /* ZERO_COPY_SOCKETS */ 734 error = uiomove(mtod(m, void *), (int)len, uio); 735 resid = uio->uio_resid; 736 m->m_len = len; 737 *mp = m; 738 top->m_pkthdr.len += len; 739 if (error) { 740 SOCKBUF_LOCK(&so->so_snd); 741 goto release; 742 } 743 mp = &m->m_next; 744 if (resid <= 0) { 745 if (flags & MSG_EOR) 746 top->m_flags |= M_EOR; 747 break; 748 } 749 } while (space > 0 && atomic); 750 if (dontroute) { 751 SOCK_LOCK(so); 752 so->so_options |= SO_DONTROUTE; 753 SOCK_UNLOCK(so); 754 } 755 /* 756 * XXX all the SBS_CANTSENDMORE checks previously 757 * done could be out of date. We could have recieved 758 * a reset packet in an interrupt or maybe we slept 759 * while doing page faults in uiomove() etc. We could 760 * probably recheck again inside the splnet() protection 761 * here, but there are probably other places that this 762 * also happens. We must rethink this. 763 */ 764 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 765 (flags & MSG_OOB) ? PRUS_OOB : 766 /* 767 * If the user set MSG_EOF, the protocol 768 * understands this flag and nothing left to 769 * send then use PRU_SEND_EOF instead of PRU_SEND. 770 */ 771 ((flags & MSG_EOF) && 772 (so->so_proto->pr_flags & PR_IMPLOPCL) && 773 (resid <= 0)) ? 774 PRUS_EOF : 775 /* If there is more to send set PRUS_MORETOCOME */ 776 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 777 top, addr, control, td); 778 if (dontroute) { 779 SOCK_LOCK(so); 780 so->so_options &= ~SO_DONTROUTE; 781 SOCK_UNLOCK(so); 782 } 783 clen = 0; 784 control = NULL; 785 top = NULL; 786 mp = ⊤ 787 if (error) { 788 SOCKBUF_LOCK(&so->so_snd); 789 goto release; 790 } 791 } while (resid && space > 0); 792 SOCKBUF_LOCK(&so->so_snd); 793 } while (resid); 794 795release: 796 SOCKBUF_LOCK_ASSERT(&so->so_snd); 797 sbunlock(&so->so_snd); 798out_locked: 799 SOCKBUF_LOCK_ASSERT(&so->so_snd); 800 SOCKBUF_UNLOCK(&so->so_snd); 801out: 802 if (top != NULL) 803 m_freem(top); 804 if (control != NULL) 805 m_freem(control); 806 return (error); 807} 808 809/* 810 * Implement receive operations on a socket. 811 * We depend on the way that records are added to the sockbuf 812 * by sbappend*. In particular, each record (mbufs linked through m_next) 813 * must begin with an address if the protocol so specifies, 814 * followed by an optional mbuf or mbufs containing ancillary data, 815 * and then zero or more mbufs of data. 816 * In order to avoid blocking network interrupts for the entire time here, 817 * we splx() while doing the actual copy to user space. 818 * Although the sockbuf is locked, new data may still be appended, 819 * and thus we must maintain consistency of the sockbuf during that time. 820 * 821 * The caller may receive the data as a single mbuf chain by supplying 822 * an mbuf **mp0 for use in returning the chain. The uio is then used 823 * only for the count in uio_resid. 824 */ 825int 826soreceive(so, psa, uio, mp0, controlp, flagsp) 827 struct socket *so; 828 struct sockaddr **psa; 829 struct uio *uio; 830 struct mbuf **mp0; 831 struct mbuf **controlp; 832 int *flagsp; 833{ 834 struct mbuf *m, **mp; 835 int flags, len, error, offset; 836 struct protosw *pr = so->so_proto; 837 struct mbuf *nextrecord; 838 int moff, type = 0; 839 int orig_resid = uio->uio_resid; 840 841 mp = mp0; 842 if (psa != NULL) 843 *psa = 0; 844 if (controlp != NULL) 845 *controlp = 0; 846 if (flagsp != NULL) 847 flags = *flagsp &~ MSG_EOR; 848 else 849 flags = 0; 850 if (flags & MSG_OOB) { 851 m = m_get(M_TRYWAIT, MT_DATA); 852 if (m == NULL) 853 return (ENOBUFS); 854 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 855 if (error) 856 goto bad; 857 do { 858#ifdef ZERO_COPY_SOCKETS 859 if (so_zero_copy_receive) { 860 vm_page_t pg; 861 int disposable; 862 863 if ((m->m_flags & M_EXT) 864 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 865 disposable = 1; 866 else 867 disposable = 0; 868 869 pg = PHYS_TO_VM_PAGE(vtophys(mtod(m, caddr_t))); 870 if (uio->uio_offset == -1) 871 uio->uio_offset =IDX_TO_OFF(pg->pindex); 872 873 error = uiomoveco(mtod(m, void *), 874 min(uio->uio_resid, m->m_len), 875 uio, pg->object, 876 disposable); 877 } else 878#endif /* ZERO_COPY_SOCKETS */ 879 error = uiomove(mtod(m, void *), 880 (int) min(uio->uio_resid, m->m_len), uio); 881 m = m_free(m); 882 } while (uio->uio_resid && error == 0 && m); 883bad: 884 if (m != NULL) 885 m_freem(m); 886 return (error); 887 } 888 if (mp != NULL) 889 *mp = NULL; 890 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 891 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 892 893 SOCKBUF_LOCK(&so->so_rcv); 894restart: 895 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 896 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 897 if (error) 898 goto out; 899 900 m = so->so_rcv.sb_mb; 901 /* 902 * If we have less data than requested, block awaiting more 903 * (subject to any timeout) if: 904 * 1. the current count is less than the low water mark, or 905 * 2. MSG_WAITALL is set, and it is possible to do the entire 906 * receive operation at once if we block (resid <= hiwat). 907 * 3. MSG_DONTWAIT is not set 908 * If MSG_WAITALL is set but resid is larger than the receive buffer, 909 * we have to do the receive in sections, and thus risk returning 910 * a short count if a timeout or signal occurs after we start. 911 */ 912 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 913 so->so_rcv.sb_cc < uio->uio_resid) && 914 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 915 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 916 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 917 KASSERT(m != NULL || !so->so_rcv.sb_cc, 918 ("receive: m == %p so->so_rcv.sb_cc == %u", 919 m, so->so_rcv.sb_cc)); 920 if (so->so_error) { 921 if (m != NULL) 922 goto dontblock; 923 error = so->so_error; 924 if ((flags & MSG_PEEK) == 0) 925 so->so_error = 0; 926 goto release; 927 } 928 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 929 if (m) 930 goto dontblock; 931 else 932 goto release; 933 } 934 for (; m != NULL; m = m->m_next) 935 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 936 m = so->so_rcv.sb_mb; 937 goto dontblock; 938 } 939 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 940 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 941 error = ENOTCONN; 942 goto release; 943 } 944 if (uio->uio_resid == 0) 945 goto release; 946 if ((so->so_state & SS_NBIO) || 947 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 948 error = EWOULDBLOCK; 949 goto release; 950 } 951 SBLASTRECORDCHK(&so->so_rcv); 952 SBLASTMBUFCHK(&so->so_rcv); 953 sbunlock(&so->so_rcv); 954 error = sbwait(&so->so_rcv); 955 if (error) 956 goto out; 957 goto restart; 958 } 959dontblock: 960 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 961 if (uio->uio_td) 962 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++; 963 SBLASTRECORDCHK(&so->so_rcv); 964 SBLASTMBUFCHK(&so->so_rcv); 965 nextrecord = m->m_nextpkt; 966 if (pr->pr_flags & PR_ADDR) { 967 KASSERT(m->m_type == MT_SONAME, 968 ("m->m_type == %d", m->m_type)); 969 orig_resid = 0; 970 if (psa != NULL) 971 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 972 M_NOWAIT); 973 if (flags & MSG_PEEK) { 974 m = m->m_next; 975 } else { 976 sbfree(&so->so_rcv, m); 977 so->so_rcv.sb_mb = m_free(m); 978 m = so->so_rcv.sb_mb; 979 } 980 } 981 while (m != NULL && m->m_type == MT_CONTROL && error == 0) { 982 if (flags & MSG_PEEK) { 983 if (controlp != NULL) 984 *controlp = m_copy(m, 0, m->m_len); 985 m = m->m_next; 986 } else { 987 sbfree(&so->so_rcv, m); 988 so->so_rcv.sb_mb = m->m_next; 989 m->m_next = NULL; 990 if (pr->pr_domain->dom_externalize) { 991 SOCKBUF_UNLOCK(&so->so_rcv); 992 error = (*pr->pr_domain->dom_externalize) 993 (m, controlp); 994 SOCKBUF_LOCK(&so->so_rcv); 995 } else if (controlp != NULL) 996 *controlp = m; 997 else 998 m_freem(m); 999 m = so->so_rcv.sb_mb; 1000 } 1001 if (controlp != NULL) { 1002 orig_resid = 0; 1003 while (*controlp != NULL) 1004 controlp = &(*controlp)->m_next; 1005 } 1006 } 1007 if (m != NULL) { 1008 if ((flags & MSG_PEEK) == 0) { 1009 m->m_nextpkt = nextrecord; 1010 /* 1011 * If nextrecord == NULL (this is a single chain), 1012 * then sb_lastrecord may not be valid here if m 1013 * was changed earlier. 1014 */ 1015 if (nextrecord == NULL) { 1016 KASSERT(so->so_rcv.sb_mb == m, 1017 ("receive tailq 1")); 1018 so->so_rcv.sb_lastrecord = m; 1019 } 1020 } 1021 type = m->m_type; 1022 if (type == MT_OOBDATA) 1023 flags |= MSG_OOB; 1024 } else { 1025 if ((flags & MSG_PEEK) == 0) { 1026 KASSERT(so->so_rcv.sb_mb == m,("receive tailq 2")); 1027 so->so_rcv.sb_mb = nextrecord; 1028 SB_EMPTY_FIXUP(&so->so_rcv); 1029 } 1030 } 1031 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1032 SBLASTRECORDCHK(&so->so_rcv); 1033 SBLASTMBUFCHK(&so->so_rcv); 1034 1035 moff = 0; 1036 offset = 0; 1037 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1038 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1039 if (m->m_type == MT_OOBDATA) { 1040 if (type != MT_OOBDATA) 1041 break; 1042 } else if (type == MT_OOBDATA) 1043 break; 1044 else 1045 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1046 ("m->m_type == %d", m->m_type)); 1047 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1048 len = uio->uio_resid; 1049 if (so->so_oobmark && len > so->so_oobmark - offset) 1050 len = so->so_oobmark - offset; 1051 if (len > m->m_len - moff) 1052 len = m->m_len - moff; 1053 /* 1054 * If mp is set, just pass back the mbufs. 1055 * Otherwise copy them out via the uio, then free. 1056 * Sockbuf must be consistent here (points to current mbuf, 1057 * it points to next record) when we drop priority; 1058 * we must note any additions to the sockbuf when we 1059 * block interrupts again. 1060 */ 1061 if (mp == NULL) { 1062 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1063 SBLASTRECORDCHK(&so->so_rcv); 1064 SBLASTMBUFCHK(&so->so_rcv); 1065 SOCKBUF_UNLOCK(&so->so_rcv); 1066#ifdef ZERO_COPY_SOCKETS 1067 if (so_zero_copy_receive) { 1068 vm_page_t pg; 1069 int disposable; 1070 1071 if ((m->m_flags & M_EXT) 1072 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1073 disposable = 1; 1074 else 1075 disposable = 0; 1076 1077 pg = PHYS_TO_VM_PAGE(vtophys(mtod(m, caddr_t) + 1078 moff)); 1079 1080 if (uio->uio_offset == -1) 1081 uio->uio_offset =IDX_TO_OFF(pg->pindex); 1082 1083 error = uiomoveco(mtod(m, char *) + moff, 1084 (int)len, uio,pg->object, 1085 disposable); 1086 } else 1087#endif /* ZERO_COPY_SOCKETS */ 1088 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1089 SOCKBUF_LOCK(&so->so_rcv); 1090 if (error) 1091 goto release; 1092 } else 1093 uio->uio_resid -= len; 1094 if (len == m->m_len - moff) { 1095 if (m->m_flags & M_EOR) 1096 flags |= MSG_EOR; 1097 if (flags & MSG_PEEK) { 1098 m = m->m_next; 1099 moff = 0; 1100 } else { 1101 nextrecord = m->m_nextpkt; 1102 sbfree(&so->so_rcv, m); 1103 if (mp != NULL) { 1104 *mp = m; 1105 mp = &m->m_next; 1106 so->so_rcv.sb_mb = m = m->m_next; 1107 *mp = NULL; 1108 } else { 1109 so->so_rcv.sb_mb = m_free(m); 1110 m = so->so_rcv.sb_mb; 1111 } 1112 if (m != NULL) { 1113 m->m_nextpkt = nextrecord; 1114 if (nextrecord == NULL) 1115 so->so_rcv.sb_lastrecord = m; 1116 } else { 1117 so->so_rcv.sb_mb = nextrecord; 1118 SB_EMPTY_FIXUP(&so->so_rcv); 1119 } 1120 SBLASTRECORDCHK(&so->so_rcv); 1121 SBLASTMBUFCHK(&so->so_rcv); 1122 } 1123 } else { 1124 if (flags & MSG_PEEK) 1125 moff += len; 1126 else { 1127 if (mp != NULL) 1128 *mp = m_copym(m, 0, len, M_TRYWAIT); 1129 m->m_data += len; 1130 m->m_len -= len; 1131 so->so_rcv.sb_cc -= len; 1132 } 1133 } 1134 if (so->so_oobmark) { 1135 if ((flags & MSG_PEEK) == 0) { 1136 so->so_oobmark -= len; 1137 if (so->so_oobmark == 0) { 1138 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1139 so->so_rcv.sb_state |= SBS_RCVATMARK; 1140 break; 1141 } 1142 } else { 1143 offset += len; 1144 if (offset == so->so_oobmark) 1145 break; 1146 } 1147 } 1148 if (flags & MSG_EOR) 1149 break; 1150 /* 1151 * If the MSG_WAITALL flag is set (for non-atomic socket), 1152 * we must not quit until "uio->uio_resid == 0" or an error 1153 * termination. If a signal/timeout occurs, return 1154 * with a short count but without error. 1155 * Keep sockbuf locked against other readers. 1156 */ 1157 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1158 !sosendallatonce(so) && nextrecord == NULL) { 1159 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1160 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1161 break; 1162 /* 1163 * Notify the protocol that some data has been 1164 * drained before blocking. 1165 */ 1166 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) { 1167 SOCKBUF_UNLOCK(&so->so_rcv); 1168 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1169 SOCKBUF_LOCK(&so->so_rcv); 1170 } 1171 SBLASTRECORDCHK(&so->so_rcv); 1172 SBLASTMBUFCHK(&so->so_rcv); 1173 error = sbwait(&so->so_rcv); 1174 if (error) 1175 goto release; 1176 m = so->so_rcv.sb_mb; 1177 if (m != NULL) 1178 nextrecord = m->m_nextpkt; 1179 } 1180 } 1181 1182 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1183 flags |= MSG_TRUNC; 1184 if ((flags & MSG_PEEK) == 0) { 1185 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1186 (void) sbdroprecord_locked(&so->so_rcv); 1187 } 1188 } 1189 if ((flags & MSG_PEEK) == 0) { 1190 if (m == NULL) { 1191 /* 1192 * First part is an inline SB_EMPTY_FIXUP(). Second 1193 * part makes sure sb_lastrecord is up-to-date if 1194 * there is still data in the socket buffer. 1195 */ 1196 so->so_rcv.sb_mb = nextrecord; 1197 if (so->so_rcv.sb_mb == NULL) { 1198 so->so_rcv.sb_mbtail = NULL; 1199 so->so_rcv.sb_lastrecord = NULL; 1200 } else if (nextrecord->m_nextpkt == NULL) 1201 so->so_rcv.sb_lastrecord = nextrecord; 1202 } 1203 SBLASTRECORDCHK(&so->so_rcv); 1204 SBLASTMBUFCHK(&so->so_rcv); 1205 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) { 1206 SOCKBUF_UNLOCK(&so->so_rcv); 1207 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1208 SOCKBUF_LOCK(&so->so_rcv); 1209 } 1210 } 1211 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1212 if (orig_resid == uio->uio_resid && orig_resid && 1213 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1214 sbunlock(&so->so_rcv); 1215 goto restart; 1216 } 1217 1218 if (flagsp != NULL) 1219 *flagsp |= flags; 1220release: 1221 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1222 sbunlock(&so->so_rcv); 1223out: 1224 SOCKBUF_UNLOCK(&so->so_rcv); 1225 return (error); 1226} 1227 1228int 1229soshutdown(so, how) 1230 struct socket *so; 1231 int how; 1232{ 1233 struct protosw *pr = so->so_proto; 1234 1235 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1236 return (EINVAL); 1237 1238 if (how != SHUT_WR) 1239 sorflush(so); 1240 if (how != SHUT_RD) 1241 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 1242 return (0); 1243} 1244 1245void 1246sorflush(so) 1247 struct socket *so; 1248{ 1249 struct sockbuf *sb = &so->so_rcv; 1250 struct protosw *pr = so->so_proto; 1251 struct sockbuf asb; 1252 1253 /* 1254 * XXXRW: This is quite ugly. The existing code made a copy of the 1255 * socket buffer, then zero'd the original to clear the buffer 1256 * fields. However, with mutexes in the socket buffer, this causes 1257 * problems. We only clear the zeroable bits of the original; 1258 * however, we have to initialize and destroy the mutex in the copy 1259 * so that dom_dispose() and sbrelease() can lock t as needed. 1260 */ 1261 SOCKBUF_LOCK(sb); 1262 sb->sb_flags |= SB_NOINTR; 1263 (void) sblock(sb, M_WAITOK); 1264 /* 1265 * socantrcvmore_locked() drops the socket buffer mutex so that it 1266 * can safely perform wakeups. Re-acquire the mutex before 1267 * continuing. 1268 */ 1269 socantrcvmore_locked(so); 1270 SOCKBUF_LOCK(sb); 1271 sbunlock(sb); 1272 /* 1273 * Invalidate/clear most of the sockbuf structure, but leave 1274 * selinfo and mutex data unchanged. 1275 */ 1276 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 1277 bcopy(&sb->sb_startzero, &asb.sb_startzero, 1278 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1279 bzero(&sb->sb_startzero, 1280 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1281 SOCKBUF_UNLOCK(sb); 1282 1283 SOCKBUF_LOCK_INIT(&asb, "so_rcv"); 1284 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 1285 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1286 sbrelease(&asb, so); 1287 SOCKBUF_LOCK_DESTROY(&asb); 1288} 1289 1290#ifdef INET 1291static int 1292do_setopt_accept_filter(so, sopt) 1293 struct socket *so; 1294 struct sockopt *sopt; 1295{ 1296 struct accept_filter_arg *afap = NULL; 1297 struct accept_filter *afp; 1298 struct so_accf *af = so->so_accf; 1299 int error = 0; 1300 1301 /* do not set/remove accept filters on non listen sockets */ 1302 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1303 error = EINVAL; 1304 goto out; 1305 } 1306 1307 /* removing the filter */ 1308 if (sopt == NULL) { 1309 if (af != NULL) { 1310 if (af->so_accept_filter != NULL && 1311 af->so_accept_filter->accf_destroy != NULL) { 1312 af->so_accept_filter->accf_destroy(so); 1313 } 1314 if (af->so_accept_filter_str != NULL) { 1315 FREE(af->so_accept_filter_str, M_ACCF); 1316 } 1317 FREE(af, M_ACCF); 1318 so->so_accf = NULL; 1319 } 1320 so->so_options &= ~SO_ACCEPTFILTER; 1321 return (0); 1322 } 1323 /* adding a filter */ 1324 /* must remove previous filter first */ 1325 if (af != NULL) { 1326 error = EINVAL; 1327 goto out; 1328 } 1329 /* don't put large objects on the kernel stack */ 1330 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK); 1331 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1332 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1333 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1334 if (error) 1335 goto out; 1336 afp = accept_filt_get(afap->af_name); 1337 if (afp == NULL) { 1338 error = ENOENT; 1339 goto out; 1340 } 1341 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1342 if (afp->accf_create != NULL) { 1343 if (afap->af_name[0] != '\0') { 1344 int len = strlen(afap->af_name) + 1; 1345 1346 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK); 1347 strcpy(af->so_accept_filter_str, afap->af_name); 1348 } 1349 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1350 if (af->so_accept_filter_arg == NULL) { 1351 FREE(af->so_accept_filter_str, M_ACCF); 1352 FREE(af, M_ACCF); 1353 so->so_accf = NULL; 1354 error = EINVAL; 1355 goto out; 1356 } 1357 } 1358 af->so_accept_filter = afp; 1359 so->so_accf = af; 1360 so->so_options |= SO_ACCEPTFILTER; 1361out: 1362 if (afap != NULL) 1363 FREE(afap, M_TEMP); 1364 return (error); 1365} 1366#endif /* INET */ 1367 1368/* 1369 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1370 * an additional variant to handle the case where the option value needs 1371 * to be some kind of integer, but not a specific size. 1372 * In addition to their use here, these functions are also called by the 1373 * protocol-level pr_ctloutput() routines. 1374 */ 1375int 1376sooptcopyin(sopt, buf, len, minlen) 1377 struct sockopt *sopt; 1378 void *buf; 1379 size_t len; 1380 size_t minlen; 1381{ 1382 size_t valsize; 1383 1384 /* 1385 * If the user gives us more than we wanted, we ignore it, 1386 * but if we don't get the minimum length the caller 1387 * wants, we return EINVAL. On success, sopt->sopt_valsize 1388 * is set to however much we actually retrieved. 1389 */ 1390 if ((valsize = sopt->sopt_valsize) < minlen) 1391 return EINVAL; 1392 if (valsize > len) 1393 sopt->sopt_valsize = valsize = len; 1394 1395 if (sopt->sopt_td != NULL) 1396 return (copyin(sopt->sopt_val, buf, valsize)); 1397 1398 bcopy(sopt->sopt_val, buf, valsize); 1399 return 0; 1400} 1401 1402int 1403sosetopt(so, sopt) 1404 struct socket *so; 1405 struct sockopt *sopt; 1406{ 1407 int error, optval; 1408 struct linger l; 1409 struct timeval tv; 1410 u_long val; 1411#ifdef MAC 1412 struct mac extmac; 1413#endif 1414 1415 error = 0; 1416 if (sopt->sopt_level != SOL_SOCKET) { 1417 if (so->so_proto && so->so_proto->pr_ctloutput) 1418 return ((*so->so_proto->pr_ctloutput) 1419 (so, sopt)); 1420 error = ENOPROTOOPT; 1421 } else { 1422 switch (sopt->sopt_name) { 1423#ifdef INET 1424 case SO_ACCEPTFILTER: 1425 error = do_setopt_accept_filter(so, sopt); 1426 if (error) 1427 goto bad; 1428 break; 1429#endif 1430 case SO_LINGER: 1431 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1432 if (error) 1433 goto bad; 1434 1435 SOCK_LOCK(so); 1436 so->so_linger = l.l_linger; 1437 if (l.l_onoff) 1438 so->so_options |= SO_LINGER; 1439 else 1440 so->so_options &= ~SO_LINGER; 1441 SOCK_UNLOCK(so); 1442 break; 1443 1444 case SO_DEBUG: 1445 case SO_KEEPALIVE: 1446 case SO_DONTROUTE: 1447 case SO_USELOOPBACK: 1448 case SO_BROADCAST: 1449 case SO_REUSEADDR: 1450 case SO_REUSEPORT: 1451 case SO_OOBINLINE: 1452 case SO_TIMESTAMP: 1453 case SO_BINTIME: 1454 case SO_NOSIGPIPE: 1455 error = sooptcopyin(sopt, &optval, sizeof optval, 1456 sizeof optval); 1457 if (error) 1458 goto bad; 1459 SOCK_LOCK(so); 1460 if (optval) 1461 so->so_options |= sopt->sopt_name; 1462 else 1463 so->so_options &= ~sopt->sopt_name; 1464 SOCK_UNLOCK(so); 1465 break; 1466 1467 case SO_SNDBUF: 1468 case SO_RCVBUF: 1469 case SO_SNDLOWAT: 1470 case SO_RCVLOWAT: 1471 error = sooptcopyin(sopt, &optval, sizeof optval, 1472 sizeof optval); 1473 if (error) 1474 goto bad; 1475 1476 /* 1477 * Values < 1 make no sense for any of these 1478 * options, so disallow them. 1479 */ 1480 if (optval < 1) { 1481 error = EINVAL; 1482 goto bad; 1483 } 1484 1485 switch (sopt->sopt_name) { 1486 case SO_SNDBUF: 1487 case SO_RCVBUF: 1488 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 1489 &so->so_snd : &so->so_rcv, (u_long)optval, 1490 so, curthread) == 0) { 1491 error = ENOBUFS; 1492 goto bad; 1493 } 1494 break; 1495 1496 /* 1497 * Make sure the low-water is never greater than 1498 * the high-water. 1499 */ 1500 case SO_SNDLOWAT: 1501 SOCKBUF_LOCK(&so->so_snd); 1502 so->so_snd.sb_lowat = 1503 (optval > so->so_snd.sb_hiwat) ? 1504 so->so_snd.sb_hiwat : optval; 1505 SOCKBUF_UNLOCK(&so->so_snd); 1506 break; 1507 case SO_RCVLOWAT: 1508 SOCKBUF_LOCK(&so->so_rcv); 1509 so->so_rcv.sb_lowat = 1510 (optval > so->so_rcv.sb_hiwat) ? 1511 so->so_rcv.sb_hiwat : optval; 1512 SOCKBUF_UNLOCK(&so->so_rcv); 1513 break; 1514 } 1515 break; 1516 1517 case SO_SNDTIMEO: 1518 case SO_RCVTIMEO: 1519 error = sooptcopyin(sopt, &tv, sizeof tv, 1520 sizeof tv); 1521 if (error) 1522 goto bad; 1523 1524 /* assert(hz > 0); */ 1525 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz || 1526 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1527 error = EDOM; 1528 goto bad; 1529 } 1530 /* assert(tick > 0); */ 1531 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */ 1532 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 1533 if (val > SHRT_MAX) { 1534 error = EDOM; 1535 goto bad; 1536 } 1537 if (val == 0 && tv.tv_usec != 0) 1538 val = 1; 1539 1540 switch (sopt->sopt_name) { 1541 case SO_SNDTIMEO: 1542 so->so_snd.sb_timeo = val; 1543 break; 1544 case SO_RCVTIMEO: 1545 so->so_rcv.sb_timeo = val; 1546 break; 1547 } 1548 break; 1549 case SO_LABEL: 1550#ifdef MAC 1551 error = sooptcopyin(sopt, &extmac, sizeof extmac, 1552 sizeof extmac); 1553 if (error) 1554 goto bad; 1555 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 1556 so, &extmac); 1557#else 1558 error = EOPNOTSUPP; 1559#endif 1560 break; 1561 default: 1562 error = ENOPROTOOPT; 1563 break; 1564 } 1565 if (error == 0 && so->so_proto != NULL && 1566 so->so_proto->pr_ctloutput != NULL) { 1567 (void) ((*so->so_proto->pr_ctloutput) 1568 (so, sopt)); 1569 } 1570 } 1571bad: 1572 return (error); 1573} 1574 1575/* Helper routine for getsockopt */ 1576int 1577sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1578{ 1579 int error; 1580 size_t valsize; 1581 1582 error = 0; 1583 1584 /* 1585 * Documented get behavior is that we always return a value, 1586 * possibly truncated to fit in the user's buffer. 1587 * Traditional behavior is that we always tell the user 1588 * precisely how much we copied, rather than something useful 1589 * like the total amount we had available for her. 1590 * Note that this interface is not idempotent; the entire answer must 1591 * generated ahead of time. 1592 */ 1593 valsize = min(len, sopt->sopt_valsize); 1594 sopt->sopt_valsize = valsize; 1595 if (sopt->sopt_val != NULL) { 1596 if (sopt->sopt_td != NULL) 1597 error = copyout(buf, sopt->sopt_val, valsize); 1598 else 1599 bcopy(buf, sopt->sopt_val, valsize); 1600 } 1601 return error; 1602} 1603 1604int 1605sogetopt(so, sopt) 1606 struct socket *so; 1607 struct sockopt *sopt; 1608{ 1609 int error, optval; 1610 struct linger l; 1611 struct timeval tv; 1612#ifdef INET 1613 struct accept_filter_arg *afap; 1614#endif 1615#ifdef MAC 1616 struct mac extmac; 1617#endif 1618 1619 error = 0; 1620 if (sopt->sopt_level != SOL_SOCKET) { 1621 if (so->so_proto && so->so_proto->pr_ctloutput) { 1622 return ((*so->so_proto->pr_ctloutput) 1623 (so, sopt)); 1624 } else 1625 return (ENOPROTOOPT); 1626 } else { 1627 switch (sopt->sopt_name) { 1628#ifdef INET 1629 case SO_ACCEPTFILTER: 1630 if ((so->so_options & SO_ACCEPTCONN) == 0) 1631 return (EINVAL); 1632 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), 1633 M_TEMP, M_WAITOK | M_ZERO); 1634 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 1635 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 1636 if (so->so_accf->so_accept_filter_str != NULL) 1637 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 1638 } 1639 error = sooptcopyout(sopt, afap, sizeof(*afap)); 1640 FREE(afap, M_TEMP); 1641 break; 1642#endif 1643 1644 case SO_LINGER: 1645 /* 1646 * XXXRW: We grab the lock here to get a consistent 1647 * snapshot of both fields. This may not really 1648 * be necessary. 1649 */ 1650 SOCK_LOCK(so); 1651 l.l_onoff = so->so_options & SO_LINGER; 1652 l.l_linger = so->so_linger; 1653 SOCK_UNLOCK(so); 1654 error = sooptcopyout(sopt, &l, sizeof l); 1655 break; 1656 1657 case SO_USELOOPBACK: 1658 case SO_DONTROUTE: 1659 case SO_DEBUG: 1660 case SO_KEEPALIVE: 1661 case SO_REUSEADDR: 1662 case SO_REUSEPORT: 1663 case SO_BROADCAST: 1664 case SO_OOBINLINE: 1665 case SO_TIMESTAMP: 1666 case SO_BINTIME: 1667 case SO_NOSIGPIPE: 1668 optval = so->so_options & sopt->sopt_name; 1669integer: 1670 error = sooptcopyout(sopt, &optval, sizeof optval); 1671 break; 1672 1673 case SO_TYPE: 1674 optval = so->so_type; 1675 goto integer; 1676 1677 case SO_ERROR: 1678 optval = so->so_error; 1679 so->so_error = 0; 1680 goto integer; 1681 1682 case SO_SNDBUF: 1683 optval = so->so_snd.sb_hiwat; 1684 goto integer; 1685 1686 case SO_RCVBUF: 1687 optval = so->so_rcv.sb_hiwat; 1688 goto integer; 1689 1690 case SO_SNDLOWAT: 1691 optval = so->so_snd.sb_lowat; 1692 goto integer; 1693 1694 case SO_RCVLOWAT: 1695 optval = so->so_rcv.sb_lowat; 1696 goto integer; 1697 1698 case SO_SNDTIMEO: 1699 case SO_RCVTIMEO: 1700 optval = (sopt->sopt_name == SO_SNDTIMEO ? 1701 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1702 1703 tv.tv_sec = optval / hz; 1704 tv.tv_usec = (optval % hz) * tick; 1705 error = sooptcopyout(sopt, &tv, sizeof tv); 1706 break; 1707 case SO_LABEL: 1708#ifdef MAC 1709 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 1710 sizeof(extmac)); 1711 if (error) 1712 return (error); 1713 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 1714 so, &extmac); 1715 if (error) 1716 return (error); 1717 error = sooptcopyout(sopt, &extmac, sizeof extmac); 1718#else 1719 error = EOPNOTSUPP; 1720#endif 1721 break; 1722 case SO_PEERLABEL: 1723#ifdef MAC 1724 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 1725 sizeof(extmac)); 1726 if (error) 1727 return (error); 1728 error = mac_getsockopt_peerlabel( 1729 sopt->sopt_td->td_ucred, so, &extmac); 1730 if (error) 1731 return (error); 1732 error = sooptcopyout(sopt, &extmac, sizeof extmac); 1733#else 1734 error = EOPNOTSUPP; 1735#endif 1736 break; 1737 default: 1738 error = ENOPROTOOPT; 1739 break; 1740 } 1741 return (error); 1742 } 1743} 1744 1745/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 1746int 1747soopt_getm(struct sockopt *sopt, struct mbuf **mp) 1748{ 1749 struct mbuf *m, *m_prev; 1750 int sopt_size = sopt->sopt_valsize; 1751 1752 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 1753 if (m == NULL) 1754 return ENOBUFS; 1755 if (sopt_size > MLEN) { 1756 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT); 1757 if ((m->m_flags & M_EXT) == 0) { 1758 m_free(m); 1759 return ENOBUFS; 1760 } 1761 m->m_len = min(MCLBYTES, sopt_size); 1762 } else { 1763 m->m_len = min(MLEN, sopt_size); 1764 } 1765 sopt_size -= m->m_len; 1766 *mp = m; 1767 m_prev = m; 1768 1769 while (sopt_size) { 1770 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 1771 if (m == NULL) { 1772 m_freem(*mp); 1773 return ENOBUFS; 1774 } 1775 if (sopt_size > MLEN) { 1776 MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT : 1777 M_DONTWAIT); 1778 if ((m->m_flags & M_EXT) == 0) { 1779 m_freem(m); 1780 m_freem(*mp); 1781 return ENOBUFS; 1782 } 1783 m->m_len = min(MCLBYTES, sopt_size); 1784 } else { 1785 m->m_len = min(MLEN, sopt_size); 1786 } 1787 sopt_size -= m->m_len; 1788 m_prev->m_next = m; 1789 m_prev = m; 1790 } 1791 return 0; 1792} 1793 1794/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 1795int 1796soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 1797{ 1798 struct mbuf *m0 = m; 1799 1800 if (sopt->sopt_val == NULL) 1801 return 0; 1802 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1803 if (sopt->sopt_td != NULL) { 1804 int error; 1805 1806 error = copyin(sopt->sopt_val, mtod(m, char *), 1807 m->m_len); 1808 if (error != 0) { 1809 m_freem(m0); 1810 return(error); 1811 } 1812 } else 1813 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 1814 sopt->sopt_valsize -= m->m_len; 1815 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 1816 m = m->m_next; 1817 } 1818 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 1819 panic("ip6_sooptmcopyin"); 1820 return 0; 1821} 1822 1823/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 1824int 1825soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 1826{ 1827 struct mbuf *m0 = m; 1828 size_t valsize = 0; 1829 1830 if (sopt->sopt_val == NULL) 1831 return 0; 1832 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1833 if (sopt->sopt_td != NULL) { 1834 int error; 1835 1836 error = copyout(mtod(m, char *), sopt->sopt_val, 1837 m->m_len); 1838 if (error != 0) { 1839 m_freem(m0); 1840 return(error); 1841 } 1842 } else 1843 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 1844 sopt->sopt_valsize -= m->m_len; 1845 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 1846 valsize += m->m_len; 1847 m = m->m_next; 1848 } 1849 if (m != NULL) { 1850 /* enough soopt buffer should be given from user-land */ 1851 m_freem(m0); 1852 return(EINVAL); 1853 } 1854 sopt->sopt_valsize = valsize; 1855 return 0; 1856} 1857 1858void 1859sohasoutofband(so) 1860 struct socket *so; 1861{ 1862 if (so->so_sigio != NULL) 1863 pgsigio(&so->so_sigio, SIGURG, 0); 1864 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 1865} 1866 1867int 1868sopoll(struct socket *so, int events, struct ucred *active_cred, 1869 struct thread *td) 1870{ 1871 int revents = 0; 1872 1873 if (events & (POLLIN | POLLRDNORM)) 1874 if (soreadable(so)) 1875 revents |= events & (POLLIN | POLLRDNORM); 1876 1877 if (events & POLLINIGNEOF) 1878 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 1879 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 1880 revents |= POLLINIGNEOF; 1881 1882 if (events & (POLLOUT | POLLWRNORM)) 1883 if (sowriteable(so)) 1884 revents |= events & (POLLOUT | POLLWRNORM); 1885 1886 if (events & (POLLPRI | POLLRDBAND)) 1887 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 1888 revents |= events & (POLLPRI | POLLRDBAND); 1889 1890 if (revents == 0) { 1891 if (events & 1892 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 1893 POLLRDBAND)) { 1894 SOCKBUF_LOCK(&so->so_rcv); 1895 selrecord(td, &so->so_rcv.sb_sel); 1896 so->so_rcv.sb_flags |= SB_SEL; 1897 SOCKBUF_UNLOCK(&so->so_rcv); 1898 } 1899 1900 if (events & (POLLOUT | POLLWRNORM)) { 1901 SOCKBUF_LOCK(&so->so_snd); 1902 selrecord(td, &so->so_snd.sb_sel); 1903 so->so_snd.sb_flags |= SB_SEL; 1904 SOCKBUF_UNLOCK(&so->so_snd); 1905 } 1906 } 1907 1908 return (revents); 1909} 1910 1911int 1912soo_kqfilter(struct file *fp, struct knote *kn) 1913{ 1914 struct socket *so = kn->kn_fp->f_data; 1915 struct sockbuf *sb; 1916 1917 switch (kn->kn_filter) { 1918 case EVFILT_READ: 1919 if (so->so_options & SO_ACCEPTCONN) 1920 kn->kn_fop = &solisten_filtops; 1921 else 1922 kn->kn_fop = &soread_filtops; 1923 sb = &so->so_rcv; 1924 break; 1925 case EVFILT_WRITE: 1926 kn->kn_fop = &sowrite_filtops; 1927 sb = &so->so_snd; 1928 break; 1929 default: 1930 return (1); 1931 } 1932 1933 SOCKBUF_LOCK(sb); 1934 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext); 1935 sb->sb_flags |= SB_KNOTE; 1936 SOCKBUF_UNLOCK(sb); 1937 return (0); 1938} 1939 1940static void 1941filt_sordetach(struct knote *kn) 1942{ 1943 struct socket *so = kn->kn_fp->f_data; 1944 1945 SOCKBUF_LOCK(&so->so_rcv); 1946 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); 1947 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) 1948 so->so_rcv.sb_flags &= ~SB_KNOTE; 1949 SOCKBUF_UNLOCK(&so->so_rcv); 1950} 1951 1952/*ARGSUSED*/ 1953static int 1954filt_soread(struct knote *kn, long hint) 1955{ 1956 struct socket *so = kn->kn_fp->f_data; 1957 int need_lock, result; 1958 1959 /* 1960 * XXXRW: Conditional locking because filt_soread() can be called 1961 * either from KNOTE() in the socket context where the socket buffer 1962 * lock is already held, or from kqueue() itself. 1963 */ 1964 need_lock = !SOCKBUF_OWNED(&so->so_rcv); 1965 if (need_lock) 1966 SOCKBUF_LOCK(&so->so_rcv); 1967 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 1968 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1969 kn->kn_flags |= EV_EOF; 1970 kn->kn_fflags = so->so_error; 1971 result = 1; 1972 } else if (so->so_error) /* temporary udp error */ 1973 result = 1; 1974 else if (kn->kn_sfflags & NOTE_LOWAT) 1975 result = (kn->kn_data >= kn->kn_sdata); 1976 else 1977 result = (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 1978 if (need_lock) 1979 SOCKBUF_UNLOCK(&so->so_rcv); 1980 return (result); 1981} 1982 1983static void 1984filt_sowdetach(struct knote *kn) 1985{ 1986 struct socket *so = kn->kn_fp->f_data; 1987 1988 SOCKBUF_LOCK(&so->so_snd); 1989 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); 1990 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) 1991 so->so_snd.sb_flags &= ~SB_KNOTE; 1992 SOCKBUF_UNLOCK(&so->so_snd); 1993} 1994 1995/*ARGSUSED*/ 1996static int 1997filt_sowrite(struct knote *kn, long hint) 1998{ 1999 struct socket *so = kn->kn_fp->f_data; 2000 int need_lock, result; 2001 2002 /* 2003 * XXXRW: Conditional locking because filt_soread() can be called 2004 * either from KNOTE() in the socket context where the socket buffer 2005 * lock is already held, or from kqueue() itself. 2006 */ 2007 need_lock = !SOCKBUF_OWNED(&so->so_snd); 2008 if (need_lock) 2009 SOCKBUF_LOCK(&so->so_snd); 2010 kn->kn_data = sbspace(&so->so_snd); 2011 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2012 kn->kn_flags |= EV_EOF; 2013 kn->kn_fflags = so->so_error; 2014 result = 1; 2015 } else if (so->so_error) /* temporary udp error */ 2016 result = 1; 2017 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2018 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2019 result = 0; 2020 else if (kn->kn_sfflags & NOTE_LOWAT) 2021 result = (kn->kn_data >= kn->kn_sdata); 2022 else 2023 result = (kn->kn_data >= so->so_snd.sb_lowat); 2024 if (need_lock) 2025 SOCKBUF_UNLOCK(&so->so_snd); 2026 return (result); 2027} 2028 2029/*ARGSUSED*/ 2030static int 2031filt_solisten(struct knote *kn, long hint) 2032{ 2033 struct socket *so = kn->kn_fp->f_data; 2034 2035 kn->kn_data = so->so_qlen; 2036 return (! TAILQ_EMPTY(&so->so_comp)); 2037} 2038 2039int 2040socheckuid(struct socket *so, uid_t uid) 2041{ 2042 2043 if (so == NULL) 2044 return (EPERM); 2045 if (so->so_cred->cr_uid == uid) 2046 return (0); 2047 return (EPERM); 2048} 2049