167 .fo_flags = DFLAG_PASSABLE 168}; 169 170static void filt_pipedetach(struct knote *kn); 171static void filt_pipedetach_notsup(struct knote *kn); 172static int filt_pipenotsup(struct knote *kn, long hint); 173static int filt_piperead(struct knote *kn, long hint); 174static int filt_pipewrite(struct knote *kn, long hint); 175 176static struct filterops pipe_nfiltops = { 177 .f_isfd = 1, 178 .f_detach = filt_pipedetach_notsup, 179 .f_event = filt_pipenotsup 180}; 181static struct filterops pipe_rfiltops = { 182 .f_isfd = 1, 183 .f_detach = filt_pipedetach, 184 .f_event = filt_piperead 185}; 186static struct filterops pipe_wfiltops = { 187 .f_isfd = 1, 188 .f_detach = filt_pipedetach, 189 .f_event = filt_pipewrite 190}; 191 192/* 193 * Default pipe buffer size(s), this can be kind-of large now because pipe 194 * space is pageable. The pipe code will try to maintain locality of 195 * reference for performance reasons, so small amounts of outstanding I/O 196 * will not wipe the cache. 197 */ 198#define MINPIPESIZE (PIPE_SIZE/3) 199#define MAXPIPESIZE (2*PIPE_SIZE/3) 200 201static long amountpipekva; 202static int pipefragretry; 203static int pipeallocfail; 204static int piperesizefail; 205static int piperesizeallowed = 1; 206 207SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN, 208 &maxpipekva, 0, "Pipe KVA limit"); 209SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD, 210 &amountpipekva, 0, "Pipe KVA usage"); 211SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD, 212 &pipefragretry, 0, "Pipe allocation retries due to fragmentation"); 213SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD, 214 &pipeallocfail, 0, "Pipe allocation failures"); 215SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD, 216 &piperesizefail, 0, "Pipe resize failures"); 217SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW, 218 &piperesizeallowed, 0, "Pipe resizing allowed"); 219 220static void pipeinit(void *dummy __unused); 221static void pipeclose(struct pipe *cpipe); 222static void pipe_free_kmem(struct pipe *cpipe); 223static int pipe_create(struct pipe *pipe, int backing); 224static int pipe_paircreate(struct thread *td, struct pipepair **p_pp); 225static __inline int pipelock(struct pipe *cpipe, int catch); 226static __inline void pipeunlock(struct pipe *cpipe); 227#ifndef PIPE_NODIRECT 228static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 229static void pipe_destroy_write_buffer(struct pipe *wpipe); 230static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 231static void pipe_clone_write_buffer(struct pipe *wpipe); 232#endif 233static int pipespace(struct pipe *cpipe, int size); 234static int pipespace_new(struct pipe *cpipe, int size); 235 236static int pipe_zone_ctor(void *mem, int size, void *arg, int flags); 237static int pipe_zone_init(void *mem, int size, int flags); 238static void pipe_zone_fini(void *mem, int size); 239 240static uma_zone_t pipe_zone; 241static struct unrhdr *pipeino_unr; 242static dev_t pipedev_ino; 243 244SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 245 246static void 247pipeinit(void *dummy __unused) 248{ 249 250 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair), 251 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini, 252 UMA_ALIGN_PTR, 0); 253 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 254 pipeino_unr = new_unrhdr(1, INT32_MAX, NULL); 255 KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized")); 256 pipedev_ino = devfs_alloc_cdp_inode(); 257 KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized")); 258} 259 260static int 261pipe_zone_ctor(void *mem, int size, void *arg, int flags) 262{ 263 struct pipepair *pp; 264 struct pipe *rpipe, *wpipe; 265 266 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size")); 267 268 pp = (struct pipepair *)mem; 269 270 /* 271 * We zero both pipe endpoints to make sure all the kmem pointers 272 * are NULL, flag fields are zero'd, etc. We timestamp both 273 * endpoints with the same time. 274 */ 275 rpipe = &pp->pp_rpipe; 276 bzero(rpipe, sizeof(*rpipe)); 277 vfs_timestamp(&rpipe->pipe_ctime); 278 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime; 279 280 wpipe = &pp->pp_wpipe; 281 bzero(wpipe, sizeof(*wpipe)); 282 wpipe->pipe_ctime = rpipe->pipe_ctime; 283 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime; 284 285 rpipe->pipe_peer = wpipe; 286 rpipe->pipe_pair = pp; 287 wpipe->pipe_peer = rpipe; 288 wpipe->pipe_pair = pp; 289 290 /* 291 * Mark both endpoints as present; they will later get free'd 292 * one at a time. When both are free'd, then the whole pair 293 * is released. 294 */ 295 rpipe->pipe_present = PIPE_ACTIVE; 296 wpipe->pipe_present = PIPE_ACTIVE; 297 298 /* 299 * Eventually, the MAC Framework may initialize the label 300 * in ctor or init, but for now we do it elswhere to avoid 301 * blocking in ctor or init. 302 */ 303 pp->pp_label = NULL; 304 305 return (0); 306} 307 308static int 309pipe_zone_init(void *mem, int size, int flags) 310{ 311 struct pipepair *pp; 312 313 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size")); 314 315 pp = (struct pipepair *)mem; 316 317 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE); 318 return (0); 319} 320 321static void 322pipe_zone_fini(void *mem, int size) 323{ 324 struct pipepair *pp; 325 326 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size")); 327 328 pp = (struct pipepair *)mem; 329 330 mtx_destroy(&pp->pp_mtx); 331} 332 333static int 334pipe_paircreate(struct thread *td, struct pipepair **p_pp) 335{ 336 struct pipepair *pp; 337 struct pipe *rpipe, *wpipe; 338 int error; 339 340 *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK); 341#ifdef MAC 342 /* 343 * The MAC label is shared between the connected endpoints. As a 344 * result mac_pipe_init() and mac_pipe_create() are called once 345 * for the pair, and not on the endpoints. 346 */ 347 mac_pipe_init(pp); 348 mac_pipe_create(td->td_ucred, pp); 349#endif 350 rpipe = &pp->pp_rpipe; 351 wpipe = &pp->pp_wpipe; 352 353 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe)); 354 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe)); 355 356 /* Only the forward direction pipe is backed by default */ 357 if ((error = pipe_create(rpipe, 1)) != 0 || 358 (error = pipe_create(wpipe, 0)) != 0) { 359 pipeclose(rpipe); 360 pipeclose(wpipe); 361 return (error); 362 } 363 364 rpipe->pipe_state |= PIPE_DIRECTOK; 365 wpipe->pipe_state |= PIPE_DIRECTOK; 366 return (0); 367} 368 369int 370pipe_named_ctor(struct pipe **ppipe, struct thread *td) 371{ 372 struct pipepair *pp; 373 int error; 374 375 error = pipe_paircreate(td, &pp); 376 if (error != 0) 377 return (error); 378 pp->pp_rpipe.pipe_state |= PIPE_NAMED; 379 *ppipe = &pp->pp_rpipe; 380 return (0); 381} 382 383void 384pipe_dtor(struct pipe *dpipe) 385{ 386 ino_t ino; 387 388 ino = dpipe->pipe_ino; 389 funsetown(&dpipe->pipe_sigio); 390 pipeclose(dpipe); 391 if (dpipe->pipe_state & PIPE_NAMED) { 392 dpipe = dpipe->pipe_peer; 393 funsetown(&dpipe->pipe_sigio); 394 pipeclose(dpipe); 395 } 396 if (ino != 0 && ino != (ino_t)-1) 397 free_unr(pipeino_unr, ino); 398} 399 400/* 401 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let 402 * the zone pick up the pieces via pipeclose(). 403 */ 404int 405kern_pipe(struct thread *td, int fildes[2]) 406{ 407 408 return (kern_pipe2(td, fildes, 0)); 409} 410 411int 412kern_pipe2(struct thread *td, int fildes[2], int flags) 413{ 414 struct filedesc *fdp; 415 struct file *rf, *wf; 416 struct pipe *rpipe, *wpipe; 417 struct pipepair *pp; 418 int fd, fflags, error; 419 420 fdp = td->td_proc->p_fd; 421 error = pipe_paircreate(td, &pp); 422 if (error != 0) 423 return (error); 424 rpipe = &pp->pp_rpipe; 425 wpipe = &pp->pp_wpipe; 426 error = falloc(td, &rf, &fd, flags); 427 if (error) { 428 pipeclose(rpipe); 429 pipeclose(wpipe); 430 return (error); 431 } 432 /* An extra reference on `rf' has been held for us by falloc(). */ 433 fildes[0] = fd; 434 435 fflags = FREAD | FWRITE; 436 if ((flags & O_NONBLOCK) != 0) 437 fflags |= FNONBLOCK; 438 439 /* 440 * Warning: once we've gotten past allocation of the fd for the 441 * read-side, we can only drop the read side via fdrop() in order 442 * to avoid races against processes which manage to dup() the read 443 * side while we are blocked trying to allocate the write side. 444 */ 445 finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops); 446 error = falloc(td, &wf, &fd, flags); 447 if (error) { 448 fdclose(fdp, rf, fildes[0], td); 449 fdrop(rf, td); 450 /* rpipe has been closed by fdrop(). */ 451 pipeclose(wpipe); 452 return (error); 453 } 454 /* An extra reference on `wf' has been held for us by falloc(). */ 455 finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops); 456 fdrop(wf, td); 457 fildes[1] = fd; 458 fdrop(rf, td); 459 460 return (0); 461} 462 463/* ARGSUSED */ 464int 465sys_pipe(struct thread *td, struct pipe_args *uap) 466{ 467 int error; 468 int fildes[2]; 469 470 error = kern_pipe(td, fildes); 471 if (error) 472 return (error); 473 474 td->td_retval[0] = fildes[0]; 475 td->td_retval[1] = fildes[1]; 476 477 return (0); 478} 479 480int 481sys_pipe2(struct thread *td, struct pipe2_args *uap) 482{ 483 int error, fildes[2]; 484 485 if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK)) 486 return (EINVAL); 487 error = kern_pipe2(td, fildes, uap->flags); 488 if (error) 489 return (error); 490 error = copyout(fildes, uap->fildes, 2 * sizeof(int)); 491 if (error) { 492 (void)kern_close(td, fildes[0]); 493 (void)kern_close(td, fildes[1]); 494 } 495 return (error); 496} 497 498/* 499 * Allocate kva for pipe circular buffer, the space is pageable 500 * This routine will 'realloc' the size of a pipe safely, if it fails 501 * it will retain the old buffer. 502 * If it fails it will return ENOMEM. 503 */ 504static int 505pipespace_new(cpipe, size) 506 struct pipe *cpipe; 507 int size; 508{ 509 caddr_t buffer; 510 int error, cnt, firstseg; 511 static int curfail = 0; 512 static struct timeval lastfail; 513 514 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked")); 515 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW), 516 ("pipespace: resize of direct writes not allowed")); 517retry: 518 cnt = cpipe->pipe_buffer.cnt; 519 if (cnt > size) 520 size = cnt; 521 522 size = round_page(size); 523 buffer = (caddr_t) vm_map_min(pipe_map); 524 525 error = vm_map_find(pipe_map, NULL, 0, 526 (vm_offset_t *) &buffer, size, 1, 527 VM_PROT_ALL, VM_PROT_ALL, 0); 528 if (error != KERN_SUCCESS) { 529 if ((cpipe->pipe_buffer.buffer == NULL) && 530 (size > SMALL_PIPE_SIZE)) { 531 size = SMALL_PIPE_SIZE; 532 pipefragretry++; 533 goto retry; 534 } 535 if (cpipe->pipe_buffer.buffer == NULL) { 536 pipeallocfail++; 537 if (ppsratecheck(&lastfail, &curfail, 1)) 538 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n"); 539 } else { 540 piperesizefail++; 541 } 542 return (ENOMEM); 543 } 544 545 /* copy data, then free old resources if we're resizing */ 546 if (cnt > 0) { 547 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) { 548 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out; 549 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 550 buffer, firstseg); 551 if ((cnt - firstseg) > 0) 552 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg], 553 cpipe->pipe_buffer.in); 554 } else { 555 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 556 buffer, cnt); 557 } 558 } 559 pipe_free_kmem(cpipe); 560 cpipe->pipe_buffer.buffer = buffer; 561 cpipe->pipe_buffer.size = size; 562 cpipe->pipe_buffer.in = cnt; 563 cpipe->pipe_buffer.out = 0; 564 cpipe->pipe_buffer.cnt = cnt; 565 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size); 566 return (0); 567} 568 569/* 570 * Wrapper for pipespace_new() that performs locking assertions. 571 */ 572static int 573pipespace(cpipe, size) 574 struct pipe *cpipe; 575 int size; 576{ 577 578 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 579 ("Unlocked pipe passed to pipespace")); 580 return (pipespace_new(cpipe, size)); 581} 582 583/* 584 * lock a pipe for I/O, blocking other access 585 */ 586static __inline int 587pipelock(cpipe, catch) 588 struct pipe *cpipe; 589 int catch; 590{ 591 int error; 592 593 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 594 while (cpipe->pipe_state & PIPE_LOCKFL) { 595 cpipe->pipe_state |= PIPE_LWANT; 596 error = msleep(cpipe, PIPE_MTX(cpipe), 597 catch ? (PRIBIO | PCATCH) : PRIBIO, 598 "pipelk", 0); 599 if (error != 0) 600 return (error); 601 } 602 cpipe->pipe_state |= PIPE_LOCKFL; 603 return (0); 604} 605 606/* 607 * unlock a pipe I/O lock 608 */ 609static __inline void 610pipeunlock(cpipe) 611 struct pipe *cpipe; 612{ 613 614 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 615 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 616 ("Unlocked pipe passed to pipeunlock")); 617 cpipe->pipe_state &= ~PIPE_LOCKFL; 618 if (cpipe->pipe_state & PIPE_LWANT) { 619 cpipe->pipe_state &= ~PIPE_LWANT; 620 wakeup(cpipe); 621 } 622} 623 624void 625pipeselwakeup(cpipe) 626 struct pipe *cpipe; 627{ 628 629 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 630 if (cpipe->pipe_state & PIPE_SEL) { 631 selwakeuppri(&cpipe->pipe_sel, PSOCK); 632 if (!SEL_WAITING(&cpipe->pipe_sel)) 633 cpipe->pipe_state &= ~PIPE_SEL; 634 } 635 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 636 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 637 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0); 638} 639 640/* 641 * Initialize and allocate VM and memory for pipe. The structure 642 * will start out zero'd from the ctor, so we just manage the kmem. 643 */ 644static int 645pipe_create(pipe, backing) 646 struct pipe *pipe; 647 int backing; 648{ 649 int error; 650 651 if (backing) { 652 if (amountpipekva > maxpipekva / 2) 653 error = pipespace_new(pipe, SMALL_PIPE_SIZE); 654 else 655 error = pipespace_new(pipe, PIPE_SIZE); 656 } else { 657 /* If we're not backing this pipe, no need to do anything. */ 658 error = 0; 659 } 660 pipe->pipe_ino = -1; 661 return (error); 662} 663 664/* ARGSUSED */ 665static int 666pipe_read(fp, uio, active_cred, flags, td) 667 struct file *fp; 668 struct uio *uio; 669 struct ucred *active_cred; 670 struct thread *td; 671 int flags; 672{ 673 struct pipe *rpipe; 674 int error; 675 int nread = 0; 676 int size; 677 678 rpipe = fp->f_data; 679 PIPE_LOCK(rpipe); 680 ++rpipe->pipe_busy; 681 error = pipelock(rpipe, 1); 682 if (error) 683 goto unlocked_error; 684 685#ifdef MAC 686 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair); 687 if (error) 688 goto locked_error; 689#endif 690 if (amountpipekva > (3 * maxpipekva) / 4) { 691 if (!(rpipe->pipe_state & PIPE_DIRECTW) && 692 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 693 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 694 (piperesizeallowed == 1)) { 695 PIPE_UNLOCK(rpipe); 696 pipespace(rpipe, SMALL_PIPE_SIZE); 697 PIPE_LOCK(rpipe); 698 } 699 } 700 701 while (uio->uio_resid) { 702 /* 703 * normal pipe buffer receive 704 */ 705 if (rpipe->pipe_buffer.cnt > 0) { 706 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 707 if (size > rpipe->pipe_buffer.cnt) 708 size = rpipe->pipe_buffer.cnt; 709 if (size > uio->uio_resid) 710 size = uio->uio_resid; 711 712 PIPE_UNLOCK(rpipe); 713 error = uiomove( 714 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 715 size, uio); 716 PIPE_LOCK(rpipe); 717 if (error) 718 break; 719 720 rpipe->pipe_buffer.out += size; 721 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 722 rpipe->pipe_buffer.out = 0; 723 724 rpipe->pipe_buffer.cnt -= size; 725 726 /* 727 * If there is no more to read in the pipe, reset 728 * its pointers to the beginning. This improves 729 * cache hit stats. 730 */ 731 if (rpipe->pipe_buffer.cnt == 0) { 732 rpipe->pipe_buffer.in = 0; 733 rpipe->pipe_buffer.out = 0; 734 } 735 nread += size; 736#ifndef PIPE_NODIRECT 737 /* 738 * Direct copy, bypassing a kernel buffer. 739 */ 740 } else if ((size = rpipe->pipe_map.cnt) && 741 (rpipe->pipe_state & PIPE_DIRECTW)) { 742 if (size > uio->uio_resid) 743 size = (u_int) uio->uio_resid; 744 745 PIPE_UNLOCK(rpipe); 746 error = uiomove_fromphys(rpipe->pipe_map.ms, 747 rpipe->pipe_map.pos, size, uio); 748 PIPE_LOCK(rpipe); 749 if (error) 750 break; 751 nread += size; 752 rpipe->pipe_map.pos += size; 753 rpipe->pipe_map.cnt -= size; 754 if (rpipe->pipe_map.cnt == 0) { 755 rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW); 756 wakeup(rpipe); 757 } 758#endif 759 } else { 760 /* 761 * detect EOF condition 762 * read returns 0 on EOF, no need to set error 763 */ 764 if (rpipe->pipe_state & PIPE_EOF) 765 break; 766 767 /* 768 * If the "write-side" has been blocked, wake it up now. 769 */ 770 if (rpipe->pipe_state & PIPE_WANTW) { 771 rpipe->pipe_state &= ~PIPE_WANTW; 772 wakeup(rpipe); 773 } 774 775 /* 776 * Break if some data was read. 777 */ 778 if (nread > 0) 779 break; 780 781 /* 782 * Unlock the pipe buffer for our remaining processing. 783 * We will either break out with an error or we will 784 * sleep and relock to loop. 785 */ 786 pipeunlock(rpipe); 787 788 /* 789 * Handle non-blocking mode operation or 790 * wait for more data. 791 */ 792 if (fp->f_flag & FNONBLOCK) { 793 error = EAGAIN; 794 } else { 795 rpipe->pipe_state |= PIPE_WANTR; 796 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 797 PRIBIO | PCATCH, 798 "piperd", 0)) == 0) 799 error = pipelock(rpipe, 1); 800 } 801 if (error) 802 goto unlocked_error; 803 } 804 } 805#ifdef MAC 806locked_error: 807#endif 808 pipeunlock(rpipe); 809 810 /* XXX: should probably do this before getting any locks. */ 811 if (error == 0) 812 vfs_timestamp(&rpipe->pipe_atime); 813unlocked_error: 814 --rpipe->pipe_busy; 815 816 /* 817 * PIPE_WANT processing only makes sense if pipe_busy is 0. 818 */ 819 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 820 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 821 wakeup(rpipe); 822 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 823 /* 824 * Handle write blocking hysteresis. 825 */ 826 if (rpipe->pipe_state & PIPE_WANTW) { 827 rpipe->pipe_state &= ~PIPE_WANTW; 828 wakeup(rpipe); 829 } 830 } 831 832 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 833 pipeselwakeup(rpipe); 834 835 PIPE_UNLOCK(rpipe); 836 return (error); 837} 838 839#ifndef PIPE_NODIRECT 840/* 841 * Map the sending processes' buffer into kernel space and wire it. 842 * This is similar to a physical write operation. 843 */ 844static int 845pipe_build_write_buffer(wpipe, uio) 846 struct pipe *wpipe; 847 struct uio *uio; 848{ 849 u_int size; 850 int i; 851 852 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 853 KASSERT(wpipe->pipe_state & PIPE_DIRECTW, 854 ("Clone attempt on non-direct write pipe!")); 855 856 if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size) 857 size = wpipe->pipe_buffer.size; 858 else 859 size = uio->uio_iov->iov_len; 860 861 if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 862 (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ, 863 wpipe->pipe_map.ms, PIPENPAGES)) < 0) 864 return (EFAULT); 865 866/* 867 * set up the control block 868 */ 869 wpipe->pipe_map.npages = i; 870 wpipe->pipe_map.pos = 871 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 872 wpipe->pipe_map.cnt = size; 873 874/* 875 * and update the uio data 876 */ 877 878 uio->uio_iov->iov_len -= size; 879 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; 880 if (uio->uio_iov->iov_len == 0) 881 uio->uio_iov++; 882 uio->uio_resid -= size; 883 uio->uio_offset += size; 884 return (0); 885} 886 887/* 888 * unmap and unwire the process buffer 889 */ 890static void 891pipe_destroy_write_buffer(wpipe) 892 struct pipe *wpipe; 893{ 894 895 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 896 vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages); 897 wpipe->pipe_map.npages = 0; 898} 899 900/* 901 * In the case of a signal, the writing process might go away. This 902 * code copies the data into the circular buffer so that the source 903 * pages can be freed without loss of data. 904 */ 905static void 906pipe_clone_write_buffer(wpipe) 907 struct pipe *wpipe; 908{ 909 struct uio uio; 910 struct iovec iov; 911 int size; 912 int pos; 913 914 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 915 size = wpipe->pipe_map.cnt; 916 pos = wpipe->pipe_map.pos; 917 918 wpipe->pipe_buffer.in = size; 919 wpipe->pipe_buffer.out = 0; 920 wpipe->pipe_buffer.cnt = size; 921 wpipe->pipe_state &= ~PIPE_DIRECTW; 922 923 PIPE_UNLOCK(wpipe); 924 iov.iov_base = wpipe->pipe_buffer.buffer; 925 iov.iov_len = size; 926 uio.uio_iov = &iov; 927 uio.uio_iovcnt = 1; 928 uio.uio_offset = 0; 929 uio.uio_resid = size; 930 uio.uio_segflg = UIO_SYSSPACE; 931 uio.uio_rw = UIO_READ; 932 uio.uio_td = curthread; 933 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio); 934 PIPE_LOCK(wpipe); 935 pipe_destroy_write_buffer(wpipe); 936} 937 938/* 939 * This implements the pipe buffer write mechanism. Note that only 940 * a direct write OR a normal pipe write can be pending at any given time. 941 * If there are any characters in the pipe buffer, the direct write will 942 * be deferred until the receiving process grabs all of the bytes from 943 * the pipe buffer. Then the direct mapping write is set-up. 944 */ 945static int 946pipe_direct_write(wpipe, uio) 947 struct pipe *wpipe; 948 struct uio *uio; 949{ 950 int error; 951 952retry: 953 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 954 error = pipelock(wpipe, 1); 955 if (wpipe->pipe_state & PIPE_EOF) 956 error = EPIPE; 957 if (error) { 958 pipeunlock(wpipe); 959 goto error1; 960 } 961 while (wpipe->pipe_state & PIPE_DIRECTW) { 962 if (wpipe->pipe_state & PIPE_WANTR) { 963 wpipe->pipe_state &= ~PIPE_WANTR; 964 wakeup(wpipe); 965 } 966 pipeselwakeup(wpipe); 967 wpipe->pipe_state |= PIPE_WANTW; 968 pipeunlock(wpipe); 969 error = msleep(wpipe, PIPE_MTX(wpipe), 970 PRIBIO | PCATCH, "pipdww", 0); 971 if (error) 972 goto error1; 973 else 974 goto retry; 975 } 976 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 977 if (wpipe->pipe_buffer.cnt > 0) { 978 if (wpipe->pipe_state & PIPE_WANTR) { 979 wpipe->pipe_state &= ~PIPE_WANTR; 980 wakeup(wpipe); 981 } 982 pipeselwakeup(wpipe); 983 wpipe->pipe_state |= PIPE_WANTW; 984 pipeunlock(wpipe); 985 error = msleep(wpipe, PIPE_MTX(wpipe), 986 PRIBIO | PCATCH, "pipdwc", 0); 987 if (error) 988 goto error1; 989 else 990 goto retry; 991 } 992 993 wpipe->pipe_state |= PIPE_DIRECTW; 994 995 PIPE_UNLOCK(wpipe); 996 error = pipe_build_write_buffer(wpipe, uio); 997 PIPE_LOCK(wpipe); 998 if (error) { 999 wpipe->pipe_state &= ~PIPE_DIRECTW; 1000 pipeunlock(wpipe); 1001 goto error1; 1002 } 1003 1004 error = 0; 1005 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 1006 if (wpipe->pipe_state & PIPE_EOF) { 1007 pipe_destroy_write_buffer(wpipe); 1008 pipeselwakeup(wpipe); 1009 pipeunlock(wpipe); 1010 error = EPIPE; 1011 goto error1; 1012 } 1013 if (wpipe->pipe_state & PIPE_WANTR) { 1014 wpipe->pipe_state &= ~PIPE_WANTR; 1015 wakeup(wpipe); 1016 } 1017 pipeselwakeup(wpipe); 1018 wpipe->pipe_state |= PIPE_WANTW; 1019 pipeunlock(wpipe); 1020 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 1021 "pipdwt", 0); 1022 pipelock(wpipe, 0); 1023 } 1024 1025 if (wpipe->pipe_state & PIPE_EOF) 1026 error = EPIPE; 1027 if (wpipe->pipe_state & PIPE_DIRECTW) { 1028 /* 1029 * this bit of trickery substitutes a kernel buffer for 1030 * the process that might be going away. 1031 */ 1032 pipe_clone_write_buffer(wpipe); 1033 } else { 1034 pipe_destroy_write_buffer(wpipe); 1035 } 1036 pipeunlock(wpipe); 1037 return (error); 1038 1039error1: 1040 wakeup(wpipe); 1041 return (error); 1042} 1043#endif 1044 1045static int 1046pipe_write(fp, uio, active_cred, flags, td) 1047 struct file *fp; 1048 struct uio *uio; 1049 struct ucred *active_cred; 1050 struct thread *td; 1051 int flags; 1052{ 1053 int error = 0; 1054 int desiredsize; 1055 ssize_t orig_resid; 1056 struct pipe *wpipe, *rpipe; 1057 1058 rpipe = fp->f_data; 1059 wpipe = PIPE_PEER(rpipe); 1060 PIPE_LOCK(rpipe); 1061 error = pipelock(wpipe, 1); 1062 if (error) { 1063 PIPE_UNLOCK(rpipe); 1064 return (error); 1065 } 1066 /* 1067 * detect loss of pipe read side, issue SIGPIPE if lost. 1068 */ 1069 if (wpipe->pipe_present != PIPE_ACTIVE || 1070 (wpipe->pipe_state & PIPE_EOF)) { 1071 pipeunlock(wpipe); 1072 PIPE_UNLOCK(rpipe); 1073 return (EPIPE); 1074 } 1075#ifdef MAC 1076 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair); 1077 if (error) { 1078 pipeunlock(wpipe); 1079 PIPE_UNLOCK(rpipe); 1080 return (error); 1081 } 1082#endif 1083 ++wpipe->pipe_busy; 1084 1085 /* Choose a larger size if it's advantageous */ 1086 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size); 1087 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) { 1088 if (piperesizeallowed != 1) 1089 break; 1090 if (amountpipekva > maxpipekva / 2) 1091 break; 1092 if (desiredsize == BIG_PIPE_SIZE) 1093 break; 1094 desiredsize = desiredsize * 2; 1095 } 1096 1097 /* Choose a smaller size if we're in a OOM situation */ 1098 if ((amountpipekva > (3 * maxpipekva) / 4) && 1099 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 1100 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 1101 (piperesizeallowed == 1)) 1102 desiredsize = SMALL_PIPE_SIZE; 1103 1104 /* Resize if the above determined that a new size was necessary */ 1105 if ((desiredsize != wpipe->pipe_buffer.size) && 1106 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) { 1107 PIPE_UNLOCK(wpipe); 1108 pipespace(wpipe, desiredsize); 1109 PIPE_LOCK(wpipe); 1110 } 1111 if (wpipe->pipe_buffer.size == 0) { 1112 /* 1113 * This can only happen for reverse direction use of pipes 1114 * in a complete OOM situation. 1115 */ 1116 error = ENOMEM; 1117 --wpipe->pipe_busy; 1118 pipeunlock(wpipe); 1119 PIPE_UNLOCK(wpipe); 1120 return (error); 1121 } 1122 1123 pipeunlock(wpipe); 1124 1125 orig_resid = uio->uio_resid; 1126 1127 while (uio->uio_resid) { 1128 int space; 1129 1130 pipelock(wpipe, 0); 1131 if (wpipe->pipe_state & PIPE_EOF) { 1132 pipeunlock(wpipe); 1133 error = EPIPE; 1134 break; 1135 } 1136#ifndef PIPE_NODIRECT 1137 /* 1138 * If the transfer is large, we can gain performance if 1139 * we do process-to-process copies directly. 1140 * If the write is non-blocking, we don't use the 1141 * direct write mechanism. 1142 * 1143 * The direct write mechanism will detect the reader going 1144 * away on us. 1145 */ 1146 if (uio->uio_segflg == UIO_USERSPACE && 1147 uio->uio_iov->iov_len >= PIPE_MINDIRECT && 1148 wpipe->pipe_buffer.size >= PIPE_MINDIRECT && 1149 (fp->f_flag & FNONBLOCK) == 0) { 1150 pipeunlock(wpipe); 1151 error = pipe_direct_write(wpipe, uio); 1152 if (error) 1153 break; 1154 continue; 1155 } 1156#endif 1157 1158 /* 1159 * Pipe buffered writes cannot be coincidental with 1160 * direct writes. We wait until the currently executing 1161 * direct write is completed before we start filling the 1162 * pipe buffer. We break out if a signal occurs or the 1163 * reader goes away. 1164 */ 1165 if (wpipe->pipe_state & PIPE_DIRECTW) { 1166 if (wpipe->pipe_state & PIPE_WANTR) { 1167 wpipe->pipe_state &= ~PIPE_WANTR; 1168 wakeup(wpipe); 1169 } 1170 pipeselwakeup(wpipe); 1171 wpipe->pipe_state |= PIPE_WANTW; 1172 pipeunlock(wpipe); 1173 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 1174 "pipbww", 0); 1175 if (error) 1176 break; 1177 else 1178 continue; 1179 } 1180 1181 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1182 1183 /* Writes of size <= PIPE_BUF must be atomic. */ 1184 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 1185 space = 0; 1186 1187 if (space > 0) { 1188 int size; /* Transfer size */ 1189 int segsize; /* first segment to transfer */ 1190 1191 /* 1192 * Transfer size is minimum of uio transfer 1193 * and free space in pipe buffer. 1194 */ 1195 if (space > uio->uio_resid) 1196 size = uio->uio_resid; 1197 else 1198 size = space; 1199 /* 1200 * First segment to transfer is minimum of 1201 * transfer size and contiguous space in 1202 * pipe buffer. If first segment to transfer 1203 * is less than the transfer size, we've got 1204 * a wraparound in the buffer. 1205 */ 1206 segsize = wpipe->pipe_buffer.size - 1207 wpipe->pipe_buffer.in; 1208 if (segsize > size) 1209 segsize = size; 1210 1211 /* Transfer first segment */ 1212 1213 PIPE_UNLOCK(rpipe); 1214 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1215 segsize, uio); 1216 PIPE_LOCK(rpipe); 1217 1218 if (error == 0 && segsize < size) { 1219 KASSERT(wpipe->pipe_buffer.in + segsize == 1220 wpipe->pipe_buffer.size, 1221 ("Pipe buffer wraparound disappeared")); 1222 /* 1223 * Transfer remaining part now, to 1224 * support atomic writes. Wraparound 1225 * happened. 1226 */ 1227 1228 PIPE_UNLOCK(rpipe); 1229 error = uiomove( 1230 &wpipe->pipe_buffer.buffer[0], 1231 size - segsize, uio); 1232 PIPE_LOCK(rpipe); 1233 } 1234 if (error == 0) { 1235 wpipe->pipe_buffer.in += size; 1236 if (wpipe->pipe_buffer.in >= 1237 wpipe->pipe_buffer.size) { 1238 KASSERT(wpipe->pipe_buffer.in == 1239 size - segsize + 1240 wpipe->pipe_buffer.size, 1241 ("Expected wraparound bad")); 1242 wpipe->pipe_buffer.in = size - segsize; 1243 } 1244 1245 wpipe->pipe_buffer.cnt += size; 1246 KASSERT(wpipe->pipe_buffer.cnt <= 1247 wpipe->pipe_buffer.size, 1248 ("Pipe buffer overflow")); 1249 } 1250 pipeunlock(wpipe); 1251 if (error != 0) 1252 break; 1253 } else { 1254 /* 1255 * If the "read-side" has been blocked, wake it up now. 1256 */ 1257 if (wpipe->pipe_state & PIPE_WANTR) { 1258 wpipe->pipe_state &= ~PIPE_WANTR; 1259 wakeup(wpipe); 1260 } 1261 1262 /* 1263 * don't block on non-blocking I/O 1264 */ 1265 if (fp->f_flag & FNONBLOCK) { 1266 error = EAGAIN; 1267 pipeunlock(wpipe); 1268 break; 1269 } 1270 1271 /* 1272 * We have no more space and have something to offer, 1273 * wake up select/poll. 1274 */ 1275 pipeselwakeup(wpipe); 1276 1277 wpipe->pipe_state |= PIPE_WANTW; 1278 pipeunlock(wpipe); 1279 error = msleep(wpipe, PIPE_MTX(rpipe), 1280 PRIBIO | PCATCH, "pipewr", 0); 1281 if (error != 0) 1282 break; 1283 } 1284 } 1285 1286 pipelock(wpipe, 0); 1287 --wpipe->pipe_busy; 1288 1289 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1290 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1291 wakeup(wpipe); 1292 } else if (wpipe->pipe_buffer.cnt > 0) { 1293 /* 1294 * If we have put any characters in the buffer, we wake up 1295 * the reader. 1296 */ 1297 if (wpipe->pipe_state & PIPE_WANTR) { 1298 wpipe->pipe_state &= ~PIPE_WANTR; 1299 wakeup(wpipe); 1300 } 1301 } 1302 1303 /* 1304 * Don't return EPIPE if I/O was successful 1305 */ 1306 if ((wpipe->pipe_buffer.cnt == 0) && 1307 (uio->uio_resid == 0) && 1308 (error == EPIPE)) { 1309 error = 0; 1310 } 1311 1312 if (error == 0) 1313 vfs_timestamp(&wpipe->pipe_mtime); 1314 1315 /* 1316 * We have something to offer, 1317 * wake up select/poll. 1318 */ 1319 if (wpipe->pipe_buffer.cnt) 1320 pipeselwakeup(wpipe); 1321 1322 pipeunlock(wpipe); 1323 PIPE_UNLOCK(rpipe); 1324 return (error); 1325} 1326 1327/* ARGSUSED */ 1328static int 1329pipe_truncate(fp, length, active_cred, td) 1330 struct file *fp; 1331 off_t length; 1332 struct ucred *active_cred; 1333 struct thread *td; 1334{ 1335 1336 /* For named pipes call the vnode operation. */ 1337 if (fp->f_vnode != NULL) 1338 return (vnops.fo_truncate(fp, length, active_cred, td)); 1339 return (EINVAL); 1340} 1341 1342/* 1343 * we implement a very minimal set of ioctls for compatibility with sockets. 1344 */ 1345static int 1346pipe_ioctl(fp, cmd, data, active_cred, td) 1347 struct file *fp; 1348 u_long cmd; 1349 void *data; 1350 struct ucred *active_cred; 1351 struct thread *td; 1352{ 1353 struct pipe *mpipe = fp->f_data; 1354 int error; 1355 1356 PIPE_LOCK(mpipe); 1357 1358#ifdef MAC 1359 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data); 1360 if (error) { 1361 PIPE_UNLOCK(mpipe); 1362 return (error); 1363 } 1364#endif 1365 1366 error = 0; 1367 switch (cmd) { 1368 1369 case FIONBIO: 1370 break; 1371 1372 case FIOASYNC: 1373 if (*(int *)data) { 1374 mpipe->pipe_state |= PIPE_ASYNC; 1375 } else { 1376 mpipe->pipe_state &= ~PIPE_ASYNC; 1377 } 1378 break; 1379 1380 case FIONREAD: 1381 if (!(fp->f_flag & FREAD)) { 1382 *(int *)data = 0; 1383 PIPE_UNLOCK(mpipe); 1384 return (0); 1385 } 1386 if (mpipe->pipe_state & PIPE_DIRECTW) 1387 *(int *)data = mpipe->pipe_map.cnt; 1388 else 1389 *(int *)data = mpipe->pipe_buffer.cnt; 1390 break; 1391 1392 case FIOSETOWN: 1393 PIPE_UNLOCK(mpipe); 1394 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1395 goto out_unlocked; 1396 1397 case FIOGETOWN: 1398 *(int *)data = fgetown(&mpipe->pipe_sigio); 1399 break; 1400 1401 /* This is deprecated, FIOSETOWN should be used instead. */ 1402 case TIOCSPGRP: 1403 PIPE_UNLOCK(mpipe); 1404 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1405 goto out_unlocked; 1406 1407 /* This is deprecated, FIOGETOWN should be used instead. */ 1408 case TIOCGPGRP: 1409 *(int *)data = -fgetown(&mpipe->pipe_sigio); 1410 break; 1411 1412 default: 1413 error = ENOTTY; 1414 break; 1415 } 1416 PIPE_UNLOCK(mpipe); 1417out_unlocked: 1418 return (error); 1419} 1420 1421static int 1422pipe_poll(fp, events, active_cred, td) 1423 struct file *fp; 1424 int events; 1425 struct ucred *active_cred; 1426 struct thread *td; 1427{ 1428 struct pipe *rpipe; 1429 struct pipe *wpipe; 1430 int levents, revents; 1431#ifdef MAC 1432 int error; 1433#endif 1434 1435 revents = 0; 1436 rpipe = fp->f_data; 1437 wpipe = PIPE_PEER(rpipe); 1438 PIPE_LOCK(rpipe); 1439#ifdef MAC 1440 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair); 1441 if (error) 1442 goto locked_error; 1443#endif 1444 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) 1445 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1446 (rpipe->pipe_buffer.cnt > 0)) 1447 revents |= events & (POLLIN | POLLRDNORM); 1448 1449 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) 1450 if (wpipe->pipe_present != PIPE_ACTIVE || 1451 (wpipe->pipe_state & PIPE_EOF) || 1452 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1453 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF || 1454 wpipe->pipe_buffer.size == 0))) 1455 revents |= events & (POLLOUT | POLLWRNORM); 1456 1457 levents = events & 1458 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND); 1459 if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents && 1460 fp->f_seqcount == rpipe->pipe_wgen) 1461 events |= POLLINIGNEOF; 1462 1463 if ((events & POLLINIGNEOF) == 0) { 1464 if (rpipe->pipe_state & PIPE_EOF) { 1465 revents |= (events & (POLLIN | POLLRDNORM)); 1466 if (wpipe->pipe_present != PIPE_ACTIVE || 1467 (wpipe->pipe_state & PIPE_EOF)) 1468 revents |= POLLHUP; 1469 } 1470 } 1471 1472 if (revents == 0) { 1473 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) { 1474 selrecord(td, &rpipe->pipe_sel); 1475 if (SEL_WAITING(&rpipe->pipe_sel)) 1476 rpipe->pipe_state |= PIPE_SEL; 1477 } 1478 1479 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) { 1480 selrecord(td, &wpipe->pipe_sel); 1481 if (SEL_WAITING(&wpipe->pipe_sel)) 1482 wpipe->pipe_state |= PIPE_SEL; 1483 } 1484 } 1485#ifdef MAC 1486locked_error: 1487#endif 1488 PIPE_UNLOCK(rpipe); 1489 1490 return (revents); 1491} 1492 1493/* 1494 * We shouldn't need locks here as we're doing a read and this should 1495 * be a natural race. 1496 */ 1497static int 1498pipe_stat(fp, ub, active_cred, td) 1499 struct file *fp; 1500 struct stat *ub; 1501 struct ucred *active_cred; 1502 struct thread *td; 1503{ 1504 struct pipe *pipe; 1505 int new_unr; 1506#ifdef MAC 1507 int error; 1508#endif 1509 1510 pipe = fp->f_data; 1511 PIPE_LOCK(pipe); 1512#ifdef MAC 1513 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair); 1514 if (error) { 1515 PIPE_UNLOCK(pipe); 1516 return (error); 1517 } 1518#endif 1519 1520 /* For named pipes ask the underlying filesystem. */ 1521 if (pipe->pipe_state & PIPE_NAMED) { 1522 PIPE_UNLOCK(pipe); 1523 return (vnops.fo_stat(fp, ub, active_cred, td)); 1524 } 1525 1526 /* 1527 * Lazily allocate an inode number for the pipe. Most pipe 1528 * users do not call fstat(2) on the pipe, which means that 1529 * postponing the inode allocation until it is must be 1530 * returned to userland is useful. If alloc_unr failed, 1531 * assign st_ino zero instead of returning an error. 1532 * Special pipe_ino values: 1533 * -1 - not yet initialized; 1534 * 0 - alloc_unr failed, return 0 as st_ino forever. 1535 */ 1536 if (pipe->pipe_ino == (ino_t)-1) { 1537 new_unr = alloc_unr(pipeino_unr); 1538 if (new_unr != -1) 1539 pipe->pipe_ino = new_unr; 1540 else 1541 pipe->pipe_ino = 0; 1542 } 1543 PIPE_UNLOCK(pipe); 1544 1545 bzero(ub, sizeof(*ub)); 1546 ub->st_mode = S_IFIFO; 1547 ub->st_blksize = PAGE_SIZE; 1548 if (pipe->pipe_state & PIPE_DIRECTW) 1549 ub->st_size = pipe->pipe_map.cnt; 1550 else 1551 ub->st_size = pipe->pipe_buffer.cnt; 1552 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1553 ub->st_atim = pipe->pipe_atime; 1554 ub->st_mtim = pipe->pipe_mtime; 1555 ub->st_ctim = pipe->pipe_ctime; 1556 ub->st_uid = fp->f_cred->cr_uid; 1557 ub->st_gid = fp->f_cred->cr_gid; 1558 ub->st_dev = pipedev_ino; 1559 ub->st_ino = pipe->pipe_ino; 1560 /* 1561 * Left as 0: st_nlink, st_rdev, st_flags, st_gen. 1562 */ 1563 return (0); 1564} 1565 1566/* ARGSUSED */ 1567static int 1568pipe_close(fp, td) 1569 struct file *fp; 1570 struct thread *td; 1571{ 1572 1573 if (fp->f_vnode != NULL) 1574 return vnops.fo_close(fp, td); 1575 fp->f_ops = &badfileops; 1576 pipe_dtor(fp->f_data); 1577 fp->f_data = NULL; 1578 return (0); 1579} 1580 1581static int 1582pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td) 1583{ 1584 struct pipe *cpipe; 1585 int error; 1586 1587 cpipe = fp->f_data; 1588 if (cpipe->pipe_state & PIPE_NAMED) 1589 error = vn_chmod(fp, mode, active_cred, td); 1590 else 1591 error = invfo_chmod(fp, mode, active_cred, td); 1592 return (error); 1593} 1594 1595static int 1596pipe_chown(fp, uid, gid, active_cred, td) 1597 struct file *fp; 1598 uid_t uid; 1599 gid_t gid; 1600 struct ucred *active_cred; 1601 struct thread *td; 1602{ 1603 struct pipe *cpipe; 1604 int error; 1605 1606 cpipe = fp->f_data; 1607 if (cpipe->pipe_state & PIPE_NAMED) 1608 error = vn_chown(fp, uid, gid, active_cred, td); 1609 else 1610 error = invfo_chown(fp, uid, gid, active_cred, td); 1611 return (error); 1612} 1613 1614static void 1615pipe_free_kmem(cpipe) 1616 struct pipe *cpipe; 1617{ 1618 1619 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), 1620 ("pipe_free_kmem: pipe mutex locked")); 1621 1622 if (cpipe->pipe_buffer.buffer != NULL) { 1623 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size); 1624 vm_map_remove(pipe_map, 1625 (vm_offset_t)cpipe->pipe_buffer.buffer, 1626 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size); 1627 cpipe->pipe_buffer.buffer = NULL; 1628 } 1629#ifndef PIPE_NODIRECT 1630 { 1631 cpipe->pipe_map.cnt = 0; 1632 cpipe->pipe_map.pos = 0; 1633 cpipe->pipe_map.npages = 0; 1634 } 1635#endif 1636} 1637 1638/* 1639 * shutdown the pipe 1640 */ 1641static void 1642pipeclose(cpipe) 1643 struct pipe *cpipe; 1644{ 1645 struct pipepair *pp; 1646 struct pipe *ppipe; 1647 1648 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL")); 1649 1650 PIPE_LOCK(cpipe); 1651 pipelock(cpipe, 0); 1652 pp = cpipe->pipe_pair; 1653 1654 pipeselwakeup(cpipe); 1655 1656 /* 1657 * If the other side is blocked, wake it up saying that 1658 * we want to close it down. 1659 */ 1660 cpipe->pipe_state |= PIPE_EOF; 1661 while (cpipe->pipe_busy) { 1662 wakeup(cpipe); 1663 cpipe->pipe_state |= PIPE_WANT; 1664 pipeunlock(cpipe); 1665 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1666 pipelock(cpipe, 0); 1667 } 1668 1669 1670 /* 1671 * Disconnect from peer, if any. 1672 */ 1673 ppipe = cpipe->pipe_peer; 1674 if (ppipe->pipe_present == PIPE_ACTIVE) { 1675 pipeselwakeup(ppipe); 1676 1677 ppipe->pipe_state |= PIPE_EOF; 1678 wakeup(ppipe); 1679 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0); 1680 } 1681 1682 /* 1683 * Mark this endpoint as free. Release kmem resources. We 1684 * don't mark this endpoint as unused until we've finished 1685 * doing that, or the pipe might disappear out from under 1686 * us. 1687 */ 1688 PIPE_UNLOCK(cpipe); 1689 pipe_free_kmem(cpipe); 1690 PIPE_LOCK(cpipe); 1691 cpipe->pipe_present = PIPE_CLOSING; 1692 pipeunlock(cpipe); 1693 1694 /* 1695 * knlist_clear() may sleep dropping the PIPE_MTX. Set the 1696 * PIPE_FINALIZED, that allows other end to free the 1697 * pipe_pair, only after the knotes are completely dismantled. 1698 */ 1699 knlist_clear(&cpipe->pipe_sel.si_note, 1); 1700 cpipe->pipe_present = PIPE_FINALIZED; 1701 seldrain(&cpipe->pipe_sel); 1702 knlist_destroy(&cpipe->pipe_sel.si_note); 1703 1704 /* 1705 * If both endpoints are now closed, release the memory for the 1706 * pipe pair. If not, unlock. 1707 */ 1708 if (ppipe->pipe_present == PIPE_FINALIZED) { 1709 PIPE_UNLOCK(cpipe); 1710#ifdef MAC 1711 mac_pipe_destroy(pp); 1712#endif 1713 uma_zfree(pipe_zone, cpipe->pipe_pair); 1714 } else 1715 PIPE_UNLOCK(cpipe); 1716} 1717 1718/*ARGSUSED*/ 1719static int 1720pipe_kqfilter(struct file *fp, struct knote *kn) 1721{ 1722 struct pipe *cpipe; 1723 1724 /* 1725 * If a filter is requested that is not supported by this file 1726 * descriptor, don't return an error, but also don't ever generate an 1727 * event. 1728 */ 1729 if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) { 1730 kn->kn_fop = &pipe_nfiltops; 1731 return (0); 1732 } 1733 if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) { 1734 kn->kn_fop = &pipe_nfiltops; 1735 return (0); 1736 } 1737 cpipe = fp->f_data; 1738 PIPE_LOCK(cpipe); 1739 switch (kn->kn_filter) { 1740 case EVFILT_READ: 1741 kn->kn_fop = &pipe_rfiltops; 1742 break; 1743 case EVFILT_WRITE: 1744 kn->kn_fop = &pipe_wfiltops; 1745 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) { 1746 /* other end of pipe has been closed */ 1747 PIPE_UNLOCK(cpipe); 1748 return (EPIPE); 1749 } 1750 cpipe = PIPE_PEER(cpipe); 1751 break; 1752 default: 1753 PIPE_UNLOCK(cpipe); 1754 return (EINVAL); 1755 } 1756 1757 kn->kn_hook = cpipe; 1758 knlist_add(&cpipe->pipe_sel.si_note, kn, 1); 1759 PIPE_UNLOCK(cpipe); 1760 return (0); 1761} 1762 1763static void 1764filt_pipedetach(struct knote *kn) 1765{ 1766 struct pipe *cpipe = kn->kn_hook; 1767 1768 PIPE_LOCK(cpipe); 1769 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1); 1770 PIPE_UNLOCK(cpipe); 1771} 1772 1773/*ARGSUSED*/ 1774static int 1775filt_piperead(struct knote *kn, long hint) 1776{ 1777 struct pipe *rpipe = kn->kn_hook; 1778 struct pipe *wpipe = rpipe->pipe_peer; 1779 int ret; 1780 1781 PIPE_LOCK(rpipe); 1782 kn->kn_data = rpipe->pipe_buffer.cnt; 1783 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1784 kn->kn_data = rpipe->pipe_map.cnt; 1785 1786 if ((rpipe->pipe_state & PIPE_EOF) || 1787 wpipe->pipe_present != PIPE_ACTIVE || 1788 (wpipe->pipe_state & PIPE_EOF)) { 1789 kn->kn_flags |= EV_EOF; 1790 PIPE_UNLOCK(rpipe); 1791 return (1); 1792 } 1793 ret = kn->kn_data > 0; 1794 PIPE_UNLOCK(rpipe); 1795 return ret; 1796} 1797 1798/*ARGSUSED*/ 1799static int 1800filt_pipewrite(struct knote *kn, long hint) 1801{ 1802 struct pipe *wpipe; 1803 1804 wpipe = kn->kn_hook; 1805 PIPE_LOCK(wpipe); 1806 if (wpipe->pipe_present != PIPE_ACTIVE || 1807 (wpipe->pipe_state & PIPE_EOF)) { 1808 kn->kn_data = 0; 1809 kn->kn_flags |= EV_EOF; 1810 PIPE_UNLOCK(wpipe); 1811 return (1); 1812 } 1813 kn->kn_data = (wpipe->pipe_buffer.size > 0) ? 1814 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF; 1815 if (wpipe->pipe_state & PIPE_DIRECTW) 1816 kn->kn_data = 0; 1817 1818 PIPE_UNLOCK(wpipe); 1819 return (kn->kn_data >= PIPE_BUF); 1820} 1821 1822static void 1823filt_pipedetach_notsup(struct knote *kn) 1824{ 1825 1826} 1827 1828static int 1829filt_pipenotsup(struct knote *kn, long hint) 1830{ 1831 1832 return (0); 1833}
| 168 .fo_flags = DFLAG_PASSABLE 169}; 170 171static void filt_pipedetach(struct knote *kn); 172static void filt_pipedetach_notsup(struct knote *kn); 173static int filt_pipenotsup(struct knote *kn, long hint); 174static int filt_piperead(struct knote *kn, long hint); 175static int filt_pipewrite(struct knote *kn, long hint); 176 177static struct filterops pipe_nfiltops = { 178 .f_isfd = 1, 179 .f_detach = filt_pipedetach_notsup, 180 .f_event = filt_pipenotsup 181}; 182static struct filterops pipe_rfiltops = { 183 .f_isfd = 1, 184 .f_detach = filt_pipedetach, 185 .f_event = filt_piperead 186}; 187static struct filterops pipe_wfiltops = { 188 .f_isfd = 1, 189 .f_detach = filt_pipedetach, 190 .f_event = filt_pipewrite 191}; 192 193/* 194 * Default pipe buffer size(s), this can be kind-of large now because pipe 195 * space is pageable. The pipe code will try to maintain locality of 196 * reference for performance reasons, so small amounts of outstanding I/O 197 * will not wipe the cache. 198 */ 199#define MINPIPESIZE (PIPE_SIZE/3) 200#define MAXPIPESIZE (2*PIPE_SIZE/3) 201 202static long amountpipekva; 203static int pipefragretry; 204static int pipeallocfail; 205static int piperesizefail; 206static int piperesizeallowed = 1; 207 208SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN, 209 &maxpipekva, 0, "Pipe KVA limit"); 210SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD, 211 &amountpipekva, 0, "Pipe KVA usage"); 212SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD, 213 &pipefragretry, 0, "Pipe allocation retries due to fragmentation"); 214SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD, 215 &pipeallocfail, 0, "Pipe allocation failures"); 216SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD, 217 &piperesizefail, 0, "Pipe resize failures"); 218SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW, 219 &piperesizeallowed, 0, "Pipe resizing allowed"); 220 221static void pipeinit(void *dummy __unused); 222static void pipeclose(struct pipe *cpipe); 223static void pipe_free_kmem(struct pipe *cpipe); 224static int pipe_create(struct pipe *pipe, int backing); 225static int pipe_paircreate(struct thread *td, struct pipepair **p_pp); 226static __inline int pipelock(struct pipe *cpipe, int catch); 227static __inline void pipeunlock(struct pipe *cpipe); 228#ifndef PIPE_NODIRECT 229static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 230static void pipe_destroy_write_buffer(struct pipe *wpipe); 231static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 232static void pipe_clone_write_buffer(struct pipe *wpipe); 233#endif 234static int pipespace(struct pipe *cpipe, int size); 235static int pipespace_new(struct pipe *cpipe, int size); 236 237static int pipe_zone_ctor(void *mem, int size, void *arg, int flags); 238static int pipe_zone_init(void *mem, int size, int flags); 239static void pipe_zone_fini(void *mem, int size); 240 241static uma_zone_t pipe_zone; 242static struct unrhdr *pipeino_unr; 243static dev_t pipedev_ino; 244 245SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 246 247static void 248pipeinit(void *dummy __unused) 249{ 250 251 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair), 252 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini, 253 UMA_ALIGN_PTR, 0); 254 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 255 pipeino_unr = new_unrhdr(1, INT32_MAX, NULL); 256 KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized")); 257 pipedev_ino = devfs_alloc_cdp_inode(); 258 KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized")); 259} 260 261static int 262pipe_zone_ctor(void *mem, int size, void *arg, int flags) 263{ 264 struct pipepair *pp; 265 struct pipe *rpipe, *wpipe; 266 267 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size")); 268 269 pp = (struct pipepair *)mem; 270 271 /* 272 * We zero both pipe endpoints to make sure all the kmem pointers 273 * are NULL, flag fields are zero'd, etc. We timestamp both 274 * endpoints with the same time. 275 */ 276 rpipe = &pp->pp_rpipe; 277 bzero(rpipe, sizeof(*rpipe)); 278 vfs_timestamp(&rpipe->pipe_ctime); 279 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime; 280 281 wpipe = &pp->pp_wpipe; 282 bzero(wpipe, sizeof(*wpipe)); 283 wpipe->pipe_ctime = rpipe->pipe_ctime; 284 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime; 285 286 rpipe->pipe_peer = wpipe; 287 rpipe->pipe_pair = pp; 288 wpipe->pipe_peer = rpipe; 289 wpipe->pipe_pair = pp; 290 291 /* 292 * Mark both endpoints as present; they will later get free'd 293 * one at a time. When both are free'd, then the whole pair 294 * is released. 295 */ 296 rpipe->pipe_present = PIPE_ACTIVE; 297 wpipe->pipe_present = PIPE_ACTIVE; 298 299 /* 300 * Eventually, the MAC Framework may initialize the label 301 * in ctor or init, but for now we do it elswhere to avoid 302 * blocking in ctor or init. 303 */ 304 pp->pp_label = NULL; 305 306 return (0); 307} 308 309static int 310pipe_zone_init(void *mem, int size, int flags) 311{ 312 struct pipepair *pp; 313 314 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size")); 315 316 pp = (struct pipepair *)mem; 317 318 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE); 319 return (0); 320} 321 322static void 323pipe_zone_fini(void *mem, int size) 324{ 325 struct pipepair *pp; 326 327 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size")); 328 329 pp = (struct pipepair *)mem; 330 331 mtx_destroy(&pp->pp_mtx); 332} 333 334static int 335pipe_paircreate(struct thread *td, struct pipepair **p_pp) 336{ 337 struct pipepair *pp; 338 struct pipe *rpipe, *wpipe; 339 int error; 340 341 *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK); 342#ifdef MAC 343 /* 344 * The MAC label is shared between the connected endpoints. As a 345 * result mac_pipe_init() and mac_pipe_create() are called once 346 * for the pair, and not on the endpoints. 347 */ 348 mac_pipe_init(pp); 349 mac_pipe_create(td->td_ucred, pp); 350#endif 351 rpipe = &pp->pp_rpipe; 352 wpipe = &pp->pp_wpipe; 353 354 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe)); 355 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe)); 356 357 /* Only the forward direction pipe is backed by default */ 358 if ((error = pipe_create(rpipe, 1)) != 0 || 359 (error = pipe_create(wpipe, 0)) != 0) { 360 pipeclose(rpipe); 361 pipeclose(wpipe); 362 return (error); 363 } 364 365 rpipe->pipe_state |= PIPE_DIRECTOK; 366 wpipe->pipe_state |= PIPE_DIRECTOK; 367 return (0); 368} 369 370int 371pipe_named_ctor(struct pipe **ppipe, struct thread *td) 372{ 373 struct pipepair *pp; 374 int error; 375 376 error = pipe_paircreate(td, &pp); 377 if (error != 0) 378 return (error); 379 pp->pp_rpipe.pipe_state |= PIPE_NAMED; 380 *ppipe = &pp->pp_rpipe; 381 return (0); 382} 383 384void 385pipe_dtor(struct pipe *dpipe) 386{ 387 ino_t ino; 388 389 ino = dpipe->pipe_ino; 390 funsetown(&dpipe->pipe_sigio); 391 pipeclose(dpipe); 392 if (dpipe->pipe_state & PIPE_NAMED) { 393 dpipe = dpipe->pipe_peer; 394 funsetown(&dpipe->pipe_sigio); 395 pipeclose(dpipe); 396 } 397 if (ino != 0 && ino != (ino_t)-1) 398 free_unr(pipeino_unr, ino); 399} 400 401/* 402 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let 403 * the zone pick up the pieces via pipeclose(). 404 */ 405int 406kern_pipe(struct thread *td, int fildes[2]) 407{ 408 409 return (kern_pipe2(td, fildes, 0)); 410} 411 412int 413kern_pipe2(struct thread *td, int fildes[2], int flags) 414{ 415 struct filedesc *fdp; 416 struct file *rf, *wf; 417 struct pipe *rpipe, *wpipe; 418 struct pipepair *pp; 419 int fd, fflags, error; 420 421 fdp = td->td_proc->p_fd; 422 error = pipe_paircreate(td, &pp); 423 if (error != 0) 424 return (error); 425 rpipe = &pp->pp_rpipe; 426 wpipe = &pp->pp_wpipe; 427 error = falloc(td, &rf, &fd, flags); 428 if (error) { 429 pipeclose(rpipe); 430 pipeclose(wpipe); 431 return (error); 432 } 433 /* An extra reference on `rf' has been held for us by falloc(). */ 434 fildes[0] = fd; 435 436 fflags = FREAD | FWRITE; 437 if ((flags & O_NONBLOCK) != 0) 438 fflags |= FNONBLOCK; 439 440 /* 441 * Warning: once we've gotten past allocation of the fd for the 442 * read-side, we can only drop the read side via fdrop() in order 443 * to avoid races against processes which manage to dup() the read 444 * side while we are blocked trying to allocate the write side. 445 */ 446 finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops); 447 error = falloc(td, &wf, &fd, flags); 448 if (error) { 449 fdclose(fdp, rf, fildes[0], td); 450 fdrop(rf, td); 451 /* rpipe has been closed by fdrop(). */ 452 pipeclose(wpipe); 453 return (error); 454 } 455 /* An extra reference on `wf' has been held for us by falloc(). */ 456 finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops); 457 fdrop(wf, td); 458 fildes[1] = fd; 459 fdrop(rf, td); 460 461 return (0); 462} 463 464/* ARGSUSED */ 465int 466sys_pipe(struct thread *td, struct pipe_args *uap) 467{ 468 int error; 469 int fildes[2]; 470 471 error = kern_pipe(td, fildes); 472 if (error) 473 return (error); 474 475 td->td_retval[0] = fildes[0]; 476 td->td_retval[1] = fildes[1]; 477 478 return (0); 479} 480 481int 482sys_pipe2(struct thread *td, struct pipe2_args *uap) 483{ 484 int error, fildes[2]; 485 486 if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK)) 487 return (EINVAL); 488 error = kern_pipe2(td, fildes, uap->flags); 489 if (error) 490 return (error); 491 error = copyout(fildes, uap->fildes, 2 * sizeof(int)); 492 if (error) { 493 (void)kern_close(td, fildes[0]); 494 (void)kern_close(td, fildes[1]); 495 } 496 return (error); 497} 498 499/* 500 * Allocate kva for pipe circular buffer, the space is pageable 501 * This routine will 'realloc' the size of a pipe safely, if it fails 502 * it will retain the old buffer. 503 * If it fails it will return ENOMEM. 504 */ 505static int 506pipespace_new(cpipe, size) 507 struct pipe *cpipe; 508 int size; 509{ 510 caddr_t buffer; 511 int error, cnt, firstseg; 512 static int curfail = 0; 513 static struct timeval lastfail; 514 515 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked")); 516 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW), 517 ("pipespace: resize of direct writes not allowed")); 518retry: 519 cnt = cpipe->pipe_buffer.cnt; 520 if (cnt > size) 521 size = cnt; 522 523 size = round_page(size); 524 buffer = (caddr_t) vm_map_min(pipe_map); 525 526 error = vm_map_find(pipe_map, NULL, 0, 527 (vm_offset_t *) &buffer, size, 1, 528 VM_PROT_ALL, VM_PROT_ALL, 0); 529 if (error != KERN_SUCCESS) { 530 if ((cpipe->pipe_buffer.buffer == NULL) && 531 (size > SMALL_PIPE_SIZE)) { 532 size = SMALL_PIPE_SIZE; 533 pipefragretry++; 534 goto retry; 535 } 536 if (cpipe->pipe_buffer.buffer == NULL) { 537 pipeallocfail++; 538 if (ppsratecheck(&lastfail, &curfail, 1)) 539 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n"); 540 } else { 541 piperesizefail++; 542 } 543 return (ENOMEM); 544 } 545 546 /* copy data, then free old resources if we're resizing */ 547 if (cnt > 0) { 548 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) { 549 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out; 550 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 551 buffer, firstseg); 552 if ((cnt - firstseg) > 0) 553 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg], 554 cpipe->pipe_buffer.in); 555 } else { 556 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 557 buffer, cnt); 558 } 559 } 560 pipe_free_kmem(cpipe); 561 cpipe->pipe_buffer.buffer = buffer; 562 cpipe->pipe_buffer.size = size; 563 cpipe->pipe_buffer.in = cnt; 564 cpipe->pipe_buffer.out = 0; 565 cpipe->pipe_buffer.cnt = cnt; 566 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size); 567 return (0); 568} 569 570/* 571 * Wrapper for pipespace_new() that performs locking assertions. 572 */ 573static int 574pipespace(cpipe, size) 575 struct pipe *cpipe; 576 int size; 577{ 578 579 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 580 ("Unlocked pipe passed to pipespace")); 581 return (pipespace_new(cpipe, size)); 582} 583 584/* 585 * lock a pipe for I/O, blocking other access 586 */ 587static __inline int 588pipelock(cpipe, catch) 589 struct pipe *cpipe; 590 int catch; 591{ 592 int error; 593 594 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 595 while (cpipe->pipe_state & PIPE_LOCKFL) { 596 cpipe->pipe_state |= PIPE_LWANT; 597 error = msleep(cpipe, PIPE_MTX(cpipe), 598 catch ? (PRIBIO | PCATCH) : PRIBIO, 599 "pipelk", 0); 600 if (error != 0) 601 return (error); 602 } 603 cpipe->pipe_state |= PIPE_LOCKFL; 604 return (0); 605} 606 607/* 608 * unlock a pipe I/O lock 609 */ 610static __inline void 611pipeunlock(cpipe) 612 struct pipe *cpipe; 613{ 614 615 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 616 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 617 ("Unlocked pipe passed to pipeunlock")); 618 cpipe->pipe_state &= ~PIPE_LOCKFL; 619 if (cpipe->pipe_state & PIPE_LWANT) { 620 cpipe->pipe_state &= ~PIPE_LWANT; 621 wakeup(cpipe); 622 } 623} 624 625void 626pipeselwakeup(cpipe) 627 struct pipe *cpipe; 628{ 629 630 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 631 if (cpipe->pipe_state & PIPE_SEL) { 632 selwakeuppri(&cpipe->pipe_sel, PSOCK); 633 if (!SEL_WAITING(&cpipe->pipe_sel)) 634 cpipe->pipe_state &= ~PIPE_SEL; 635 } 636 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 637 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 638 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0); 639} 640 641/* 642 * Initialize and allocate VM and memory for pipe. The structure 643 * will start out zero'd from the ctor, so we just manage the kmem. 644 */ 645static int 646pipe_create(pipe, backing) 647 struct pipe *pipe; 648 int backing; 649{ 650 int error; 651 652 if (backing) { 653 if (amountpipekva > maxpipekva / 2) 654 error = pipespace_new(pipe, SMALL_PIPE_SIZE); 655 else 656 error = pipespace_new(pipe, PIPE_SIZE); 657 } else { 658 /* If we're not backing this pipe, no need to do anything. */ 659 error = 0; 660 } 661 pipe->pipe_ino = -1; 662 return (error); 663} 664 665/* ARGSUSED */ 666static int 667pipe_read(fp, uio, active_cred, flags, td) 668 struct file *fp; 669 struct uio *uio; 670 struct ucred *active_cred; 671 struct thread *td; 672 int flags; 673{ 674 struct pipe *rpipe; 675 int error; 676 int nread = 0; 677 int size; 678 679 rpipe = fp->f_data; 680 PIPE_LOCK(rpipe); 681 ++rpipe->pipe_busy; 682 error = pipelock(rpipe, 1); 683 if (error) 684 goto unlocked_error; 685 686#ifdef MAC 687 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair); 688 if (error) 689 goto locked_error; 690#endif 691 if (amountpipekva > (3 * maxpipekva) / 4) { 692 if (!(rpipe->pipe_state & PIPE_DIRECTW) && 693 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 694 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 695 (piperesizeallowed == 1)) { 696 PIPE_UNLOCK(rpipe); 697 pipespace(rpipe, SMALL_PIPE_SIZE); 698 PIPE_LOCK(rpipe); 699 } 700 } 701 702 while (uio->uio_resid) { 703 /* 704 * normal pipe buffer receive 705 */ 706 if (rpipe->pipe_buffer.cnt > 0) { 707 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 708 if (size > rpipe->pipe_buffer.cnt) 709 size = rpipe->pipe_buffer.cnt; 710 if (size > uio->uio_resid) 711 size = uio->uio_resid; 712 713 PIPE_UNLOCK(rpipe); 714 error = uiomove( 715 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 716 size, uio); 717 PIPE_LOCK(rpipe); 718 if (error) 719 break; 720 721 rpipe->pipe_buffer.out += size; 722 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 723 rpipe->pipe_buffer.out = 0; 724 725 rpipe->pipe_buffer.cnt -= size; 726 727 /* 728 * If there is no more to read in the pipe, reset 729 * its pointers to the beginning. This improves 730 * cache hit stats. 731 */ 732 if (rpipe->pipe_buffer.cnt == 0) { 733 rpipe->pipe_buffer.in = 0; 734 rpipe->pipe_buffer.out = 0; 735 } 736 nread += size; 737#ifndef PIPE_NODIRECT 738 /* 739 * Direct copy, bypassing a kernel buffer. 740 */ 741 } else if ((size = rpipe->pipe_map.cnt) && 742 (rpipe->pipe_state & PIPE_DIRECTW)) { 743 if (size > uio->uio_resid) 744 size = (u_int) uio->uio_resid; 745 746 PIPE_UNLOCK(rpipe); 747 error = uiomove_fromphys(rpipe->pipe_map.ms, 748 rpipe->pipe_map.pos, size, uio); 749 PIPE_LOCK(rpipe); 750 if (error) 751 break; 752 nread += size; 753 rpipe->pipe_map.pos += size; 754 rpipe->pipe_map.cnt -= size; 755 if (rpipe->pipe_map.cnt == 0) { 756 rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW); 757 wakeup(rpipe); 758 } 759#endif 760 } else { 761 /* 762 * detect EOF condition 763 * read returns 0 on EOF, no need to set error 764 */ 765 if (rpipe->pipe_state & PIPE_EOF) 766 break; 767 768 /* 769 * If the "write-side" has been blocked, wake it up now. 770 */ 771 if (rpipe->pipe_state & PIPE_WANTW) { 772 rpipe->pipe_state &= ~PIPE_WANTW; 773 wakeup(rpipe); 774 } 775 776 /* 777 * Break if some data was read. 778 */ 779 if (nread > 0) 780 break; 781 782 /* 783 * Unlock the pipe buffer for our remaining processing. 784 * We will either break out with an error or we will 785 * sleep and relock to loop. 786 */ 787 pipeunlock(rpipe); 788 789 /* 790 * Handle non-blocking mode operation or 791 * wait for more data. 792 */ 793 if (fp->f_flag & FNONBLOCK) { 794 error = EAGAIN; 795 } else { 796 rpipe->pipe_state |= PIPE_WANTR; 797 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 798 PRIBIO | PCATCH, 799 "piperd", 0)) == 0) 800 error = pipelock(rpipe, 1); 801 } 802 if (error) 803 goto unlocked_error; 804 } 805 } 806#ifdef MAC 807locked_error: 808#endif 809 pipeunlock(rpipe); 810 811 /* XXX: should probably do this before getting any locks. */ 812 if (error == 0) 813 vfs_timestamp(&rpipe->pipe_atime); 814unlocked_error: 815 --rpipe->pipe_busy; 816 817 /* 818 * PIPE_WANT processing only makes sense if pipe_busy is 0. 819 */ 820 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 821 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 822 wakeup(rpipe); 823 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 824 /* 825 * Handle write blocking hysteresis. 826 */ 827 if (rpipe->pipe_state & PIPE_WANTW) { 828 rpipe->pipe_state &= ~PIPE_WANTW; 829 wakeup(rpipe); 830 } 831 } 832 833 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 834 pipeselwakeup(rpipe); 835 836 PIPE_UNLOCK(rpipe); 837 return (error); 838} 839 840#ifndef PIPE_NODIRECT 841/* 842 * Map the sending processes' buffer into kernel space and wire it. 843 * This is similar to a physical write operation. 844 */ 845static int 846pipe_build_write_buffer(wpipe, uio) 847 struct pipe *wpipe; 848 struct uio *uio; 849{ 850 u_int size; 851 int i; 852 853 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 854 KASSERT(wpipe->pipe_state & PIPE_DIRECTW, 855 ("Clone attempt on non-direct write pipe!")); 856 857 if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size) 858 size = wpipe->pipe_buffer.size; 859 else 860 size = uio->uio_iov->iov_len; 861 862 if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 863 (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ, 864 wpipe->pipe_map.ms, PIPENPAGES)) < 0) 865 return (EFAULT); 866 867/* 868 * set up the control block 869 */ 870 wpipe->pipe_map.npages = i; 871 wpipe->pipe_map.pos = 872 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 873 wpipe->pipe_map.cnt = size; 874 875/* 876 * and update the uio data 877 */ 878 879 uio->uio_iov->iov_len -= size; 880 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; 881 if (uio->uio_iov->iov_len == 0) 882 uio->uio_iov++; 883 uio->uio_resid -= size; 884 uio->uio_offset += size; 885 return (0); 886} 887 888/* 889 * unmap and unwire the process buffer 890 */ 891static void 892pipe_destroy_write_buffer(wpipe) 893 struct pipe *wpipe; 894{ 895 896 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 897 vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages); 898 wpipe->pipe_map.npages = 0; 899} 900 901/* 902 * In the case of a signal, the writing process might go away. This 903 * code copies the data into the circular buffer so that the source 904 * pages can be freed without loss of data. 905 */ 906static void 907pipe_clone_write_buffer(wpipe) 908 struct pipe *wpipe; 909{ 910 struct uio uio; 911 struct iovec iov; 912 int size; 913 int pos; 914 915 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 916 size = wpipe->pipe_map.cnt; 917 pos = wpipe->pipe_map.pos; 918 919 wpipe->pipe_buffer.in = size; 920 wpipe->pipe_buffer.out = 0; 921 wpipe->pipe_buffer.cnt = size; 922 wpipe->pipe_state &= ~PIPE_DIRECTW; 923 924 PIPE_UNLOCK(wpipe); 925 iov.iov_base = wpipe->pipe_buffer.buffer; 926 iov.iov_len = size; 927 uio.uio_iov = &iov; 928 uio.uio_iovcnt = 1; 929 uio.uio_offset = 0; 930 uio.uio_resid = size; 931 uio.uio_segflg = UIO_SYSSPACE; 932 uio.uio_rw = UIO_READ; 933 uio.uio_td = curthread; 934 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio); 935 PIPE_LOCK(wpipe); 936 pipe_destroy_write_buffer(wpipe); 937} 938 939/* 940 * This implements the pipe buffer write mechanism. Note that only 941 * a direct write OR a normal pipe write can be pending at any given time. 942 * If there are any characters in the pipe buffer, the direct write will 943 * be deferred until the receiving process grabs all of the bytes from 944 * the pipe buffer. Then the direct mapping write is set-up. 945 */ 946static int 947pipe_direct_write(wpipe, uio) 948 struct pipe *wpipe; 949 struct uio *uio; 950{ 951 int error; 952 953retry: 954 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 955 error = pipelock(wpipe, 1); 956 if (wpipe->pipe_state & PIPE_EOF) 957 error = EPIPE; 958 if (error) { 959 pipeunlock(wpipe); 960 goto error1; 961 } 962 while (wpipe->pipe_state & PIPE_DIRECTW) { 963 if (wpipe->pipe_state & PIPE_WANTR) { 964 wpipe->pipe_state &= ~PIPE_WANTR; 965 wakeup(wpipe); 966 } 967 pipeselwakeup(wpipe); 968 wpipe->pipe_state |= PIPE_WANTW; 969 pipeunlock(wpipe); 970 error = msleep(wpipe, PIPE_MTX(wpipe), 971 PRIBIO | PCATCH, "pipdww", 0); 972 if (error) 973 goto error1; 974 else 975 goto retry; 976 } 977 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 978 if (wpipe->pipe_buffer.cnt > 0) { 979 if (wpipe->pipe_state & PIPE_WANTR) { 980 wpipe->pipe_state &= ~PIPE_WANTR; 981 wakeup(wpipe); 982 } 983 pipeselwakeup(wpipe); 984 wpipe->pipe_state |= PIPE_WANTW; 985 pipeunlock(wpipe); 986 error = msleep(wpipe, PIPE_MTX(wpipe), 987 PRIBIO | PCATCH, "pipdwc", 0); 988 if (error) 989 goto error1; 990 else 991 goto retry; 992 } 993 994 wpipe->pipe_state |= PIPE_DIRECTW; 995 996 PIPE_UNLOCK(wpipe); 997 error = pipe_build_write_buffer(wpipe, uio); 998 PIPE_LOCK(wpipe); 999 if (error) { 1000 wpipe->pipe_state &= ~PIPE_DIRECTW; 1001 pipeunlock(wpipe); 1002 goto error1; 1003 } 1004 1005 error = 0; 1006 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 1007 if (wpipe->pipe_state & PIPE_EOF) { 1008 pipe_destroy_write_buffer(wpipe); 1009 pipeselwakeup(wpipe); 1010 pipeunlock(wpipe); 1011 error = EPIPE; 1012 goto error1; 1013 } 1014 if (wpipe->pipe_state & PIPE_WANTR) { 1015 wpipe->pipe_state &= ~PIPE_WANTR; 1016 wakeup(wpipe); 1017 } 1018 pipeselwakeup(wpipe); 1019 wpipe->pipe_state |= PIPE_WANTW; 1020 pipeunlock(wpipe); 1021 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 1022 "pipdwt", 0); 1023 pipelock(wpipe, 0); 1024 } 1025 1026 if (wpipe->pipe_state & PIPE_EOF) 1027 error = EPIPE; 1028 if (wpipe->pipe_state & PIPE_DIRECTW) { 1029 /* 1030 * this bit of trickery substitutes a kernel buffer for 1031 * the process that might be going away. 1032 */ 1033 pipe_clone_write_buffer(wpipe); 1034 } else { 1035 pipe_destroy_write_buffer(wpipe); 1036 } 1037 pipeunlock(wpipe); 1038 return (error); 1039 1040error1: 1041 wakeup(wpipe); 1042 return (error); 1043} 1044#endif 1045 1046static int 1047pipe_write(fp, uio, active_cred, flags, td) 1048 struct file *fp; 1049 struct uio *uio; 1050 struct ucred *active_cred; 1051 struct thread *td; 1052 int flags; 1053{ 1054 int error = 0; 1055 int desiredsize; 1056 ssize_t orig_resid; 1057 struct pipe *wpipe, *rpipe; 1058 1059 rpipe = fp->f_data; 1060 wpipe = PIPE_PEER(rpipe); 1061 PIPE_LOCK(rpipe); 1062 error = pipelock(wpipe, 1); 1063 if (error) { 1064 PIPE_UNLOCK(rpipe); 1065 return (error); 1066 } 1067 /* 1068 * detect loss of pipe read side, issue SIGPIPE if lost. 1069 */ 1070 if (wpipe->pipe_present != PIPE_ACTIVE || 1071 (wpipe->pipe_state & PIPE_EOF)) { 1072 pipeunlock(wpipe); 1073 PIPE_UNLOCK(rpipe); 1074 return (EPIPE); 1075 } 1076#ifdef MAC 1077 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair); 1078 if (error) { 1079 pipeunlock(wpipe); 1080 PIPE_UNLOCK(rpipe); 1081 return (error); 1082 } 1083#endif 1084 ++wpipe->pipe_busy; 1085 1086 /* Choose a larger size if it's advantageous */ 1087 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size); 1088 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) { 1089 if (piperesizeallowed != 1) 1090 break; 1091 if (amountpipekva > maxpipekva / 2) 1092 break; 1093 if (desiredsize == BIG_PIPE_SIZE) 1094 break; 1095 desiredsize = desiredsize * 2; 1096 } 1097 1098 /* Choose a smaller size if we're in a OOM situation */ 1099 if ((amountpipekva > (3 * maxpipekva) / 4) && 1100 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 1101 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 1102 (piperesizeallowed == 1)) 1103 desiredsize = SMALL_PIPE_SIZE; 1104 1105 /* Resize if the above determined that a new size was necessary */ 1106 if ((desiredsize != wpipe->pipe_buffer.size) && 1107 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) { 1108 PIPE_UNLOCK(wpipe); 1109 pipespace(wpipe, desiredsize); 1110 PIPE_LOCK(wpipe); 1111 } 1112 if (wpipe->pipe_buffer.size == 0) { 1113 /* 1114 * This can only happen for reverse direction use of pipes 1115 * in a complete OOM situation. 1116 */ 1117 error = ENOMEM; 1118 --wpipe->pipe_busy; 1119 pipeunlock(wpipe); 1120 PIPE_UNLOCK(wpipe); 1121 return (error); 1122 } 1123 1124 pipeunlock(wpipe); 1125 1126 orig_resid = uio->uio_resid; 1127 1128 while (uio->uio_resid) { 1129 int space; 1130 1131 pipelock(wpipe, 0); 1132 if (wpipe->pipe_state & PIPE_EOF) { 1133 pipeunlock(wpipe); 1134 error = EPIPE; 1135 break; 1136 } 1137#ifndef PIPE_NODIRECT 1138 /* 1139 * If the transfer is large, we can gain performance if 1140 * we do process-to-process copies directly. 1141 * If the write is non-blocking, we don't use the 1142 * direct write mechanism. 1143 * 1144 * The direct write mechanism will detect the reader going 1145 * away on us. 1146 */ 1147 if (uio->uio_segflg == UIO_USERSPACE && 1148 uio->uio_iov->iov_len >= PIPE_MINDIRECT && 1149 wpipe->pipe_buffer.size >= PIPE_MINDIRECT && 1150 (fp->f_flag & FNONBLOCK) == 0) { 1151 pipeunlock(wpipe); 1152 error = pipe_direct_write(wpipe, uio); 1153 if (error) 1154 break; 1155 continue; 1156 } 1157#endif 1158 1159 /* 1160 * Pipe buffered writes cannot be coincidental with 1161 * direct writes. We wait until the currently executing 1162 * direct write is completed before we start filling the 1163 * pipe buffer. We break out if a signal occurs or the 1164 * reader goes away. 1165 */ 1166 if (wpipe->pipe_state & PIPE_DIRECTW) { 1167 if (wpipe->pipe_state & PIPE_WANTR) { 1168 wpipe->pipe_state &= ~PIPE_WANTR; 1169 wakeup(wpipe); 1170 } 1171 pipeselwakeup(wpipe); 1172 wpipe->pipe_state |= PIPE_WANTW; 1173 pipeunlock(wpipe); 1174 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 1175 "pipbww", 0); 1176 if (error) 1177 break; 1178 else 1179 continue; 1180 } 1181 1182 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1183 1184 /* Writes of size <= PIPE_BUF must be atomic. */ 1185 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 1186 space = 0; 1187 1188 if (space > 0) { 1189 int size; /* Transfer size */ 1190 int segsize; /* first segment to transfer */ 1191 1192 /* 1193 * Transfer size is minimum of uio transfer 1194 * and free space in pipe buffer. 1195 */ 1196 if (space > uio->uio_resid) 1197 size = uio->uio_resid; 1198 else 1199 size = space; 1200 /* 1201 * First segment to transfer is minimum of 1202 * transfer size and contiguous space in 1203 * pipe buffer. If first segment to transfer 1204 * is less than the transfer size, we've got 1205 * a wraparound in the buffer. 1206 */ 1207 segsize = wpipe->pipe_buffer.size - 1208 wpipe->pipe_buffer.in; 1209 if (segsize > size) 1210 segsize = size; 1211 1212 /* Transfer first segment */ 1213 1214 PIPE_UNLOCK(rpipe); 1215 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1216 segsize, uio); 1217 PIPE_LOCK(rpipe); 1218 1219 if (error == 0 && segsize < size) { 1220 KASSERT(wpipe->pipe_buffer.in + segsize == 1221 wpipe->pipe_buffer.size, 1222 ("Pipe buffer wraparound disappeared")); 1223 /* 1224 * Transfer remaining part now, to 1225 * support atomic writes. Wraparound 1226 * happened. 1227 */ 1228 1229 PIPE_UNLOCK(rpipe); 1230 error = uiomove( 1231 &wpipe->pipe_buffer.buffer[0], 1232 size - segsize, uio); 1233 PIPE_LOCK(rpipe); 1234 } 1235 if (error == 0) { 1236 wpipe->pipe_buffer.in += size; 1237 if (wpipe->pipe_buffer.in >= 1238 wpipe->pipe_buffer.size) { 1239 KASSERT(wpipe->pipe_buffer.in == 1240 size - segsize + 1241 wpipe->pipe_buffer.size, 1242 ("Expected wraparound bad")); 1243 wpipe->pipe_buffer.in = size - segsize; 1244 } 1245 1246 wpipe->pipe_buffer.cnt += size; 1247 KASSERT(wpipe->pipe_buffer.cnt <= 1248 wpipe->pipe_buffer.size, 1249 ("Pipe buffer overflow")); 1250 } 1251 pipeunlock(wpipe); 1252 if (error != 0) 1253 break; 1254 } else { 1255 /* 1256 * If the "read-side" has been blocked, wake it up now. 1257 */ 1258 if (wpipe->pipe_state & PIPE_WANTR) { 1259 wpipe->pipe_state &= ~PIPE_WANTR; 1260 wakeup(wpipe); 1261 } 1262 1263 /* 1264 * don't block on non-blocking I/O 1265 */ 1266 if (fp->f_flag & FNONBLOCK) { 1267 error = EAGAIN; 1268 pipeunlock(wpipe); 1269 break; 1270 } 1271 1272 /* 1273 * We have no more space and have something to offer, 1274 * wake up select/poll. 1275 */ 1276 pipeselwakeup(wpipe); 1277 1278 wpipe->pipe_state |= PIPE_WANTW; 1279 pipeunlock(wpipe); 1280 error = msleep(wpipe, PIPE_MTX(rpipe), 1281 PRIBIO | PCATCH, "pipewr", 0); 1282 if (error != 0) 1283 break; 1284 } 1285 } 1286 1287 pipelock(wpipe, 0); 1288 --wpipe->pipe_busy; 1289 1290 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1291 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1292 wakeup(wpipe); 1293 } else if (wpipe->pipe_buffer.cnt > 0) { 1294 /* 1295 * If we have put any characters in the buffer, we wake up 1296 * the reader. 1297 */ 1298 if (wpipe->pipe_state & PIPE_WANTR) { 1299 wpipe->pipe_state &= ~PIPE_WANTR; 1300 wakeup(wpipe); 1301 } 1302 } 1303 1304 /* 1305 * Don't return EPIPE if I/O was successful 1306 */ 1307 if ((wpipe->pipe_buffer.cnt == 0) && 1308 (uio->uio_resid == 0) && 1309 (error == EPIPE)) { 1310 error = 0; 1311 } 1312 1313 if (error == 0) 1314 vfs_timestamp(&wpipe->pipe_mtime); 1315 1316 /* 1317 * We have something to offer, 1318 * wake up select/poll. 1319 */ 1320 if (wpipe->pipe_buffer.cnt) 1321 pipeselwakeup(wpipe); 1322 1323 pipeunlock(wpipe); 1324 PIPE_UNLOCK(rpipe); 1325 return (error); 1326} 1327 1328/* ARGSUSED */ 1329static int 1330pipe_truncate(fp, length, active_cred, td) 1331 struct file *fp; 1332 off_t length; 1333 struct ucred *active_cred; 1334 struct thread *td; 1335{ 1336 1337 /* For named pipes call the vnode operation. */ 1338 if (fp->f_vnode != NULL) 1339 return (vnops.fo_truncate(fp, length, active_cred, td)); 1340 return (EINVAL); 1341} 1342 1343/* 1344 * we implement a very minimal set of ioctls for compatibility with sockets. 1345 */ 1346static int 1347pipe_ioctl(fp, cmd, data, active_cred, td) 1348 struct file *fp; 1349 u_long cmd; 1350 void *data; 1351 struct ucred *active_cred; 1352 struct thread *td; 1353{ 1354 struct pipe *mpipe = fp->f_data; 1355 int error; 1356 1357 PIPE_LOCK(mpipe); 1358 1359#ifdef MAC 1360 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data); 1361 if (error) { 1362 PIPE_UNLOCK(mpipe); 1363 return (error); 1364 } 1365#endif 1366 1367 error = 0; 1368 switch (cmd) { 1369 1370 case FIONBIO: 1371 break; 1372 1373 case FIOASYNC: 1374 if (*(int *)data) { 1375 mpipe->pipe_state |= PIPE_ASYNC; 1376 } else { 1377 mpipe->pipe_state &= ~PIPE_ASYNC; 1378 } 1379 break; 1380 1381 case FIONREAD: 1382 if (!(fp->f_flag & FREAD)) { 1383 *(int *)data = 0; 1384 PIPE_UNLOCK(mpipe); 1385 return (0); 1386 } 1387 if (mpipe->pipe_state & PIPE_DIRECTW) 1388 *(int *)data = mpipe->pipe_map.cnt; 1389 else 1390 *(int *)data = mpipe->pipe_buffer.cnt; 1391 break; 1392 1393 case FIOSETOWN: 1394 PIPE_UNLOCK(mpipe); 1395 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1396 goto out_unlocked; 1397 1398 case FIOGETOWN: 1399 *(int *)data = fgetown(&mpipe->pipe_sigio); 1400 break; 1401 1402 /* This is deprecated, FIOSETOWN should be used instead. */ 1403 case TIOCSPGRP: 1404 PIPE_UNLOCK(mpipe); 1405 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1406 goto out_unlocked; 1407 1408 /* This is deprecated, FIOGETOWN should be used instead. */ 1409 case TIOCGPGRP: 1410 *(int *)data = -fgetown(&mpipe->pipe_sigio); 1411 break; 1412 1413 default: 1414 error = ENOTTY; 1415 break; 1416 } 1417 PIPE_UNLOCK(mpipe); 1418out_unlocked: 1419 return (error); 1420} 1421 1422static int 1423pipe_poll(fp, events, active_cred, td) 1424 struct file *fp; 1425 int events; 1426 struct ucred *active_cred; 1427 struct thread *td; 1428{ 1429 struct pipe *rpipe; 1430 struct pipe *wpipe; 1431 int levents, revents; 1432#ifdef MAC 1433 int error; 1434#endif 1435 1436 revents = 0; 1437 rpipe = fp->f_data; 1438 wpipe = PIPE_PEER(rpipe); 1439 PIPE_LOCK(rpipe); 1440#ifdef MAC 1441 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair); 1442 if (error) 1443 goto locked_error; 1444#endif 1445 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) 1446 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1447 (rpipe->pipe_buffer.cnt > 0)) 1448 revents |= events & (POLLIN | POLLRDNORM); 1449 1450 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) 1451 if (wpipe->pipe_present != PIPE_ACTIVE || 1452 (wpipe->pipe_state & PIPE_EOF) || 1453 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1454 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF || 1455 wpipe->pipe_buffer.size == 0))) 1456 revents |= events & (POLLOUT | POLLWRNORM); 1457 1458 levents = events & 1459 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND); 1460 if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents && 1461 fp->f_seqcount == rpipe->pipe_wgen) 1462 events |= POLLINIGNEOF; 1463 1464 if ((events & POLLINIGNEOF) == 0) { 1465 if (rpipe->pipe_state & PIPE_EOF) { 1466 revents |= (events & (POLLIN | POLLRDNORM)); 1467 if (wpipe->pipe_present != PIPE_ACTIVE || 1468 (wpipe->pipe_state & PIPE_EOF)) 1469 revents |= POLLHUP; 1470 } 1471 } 1472 1473 if (revents == 0) { 1474 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) { 1475 selrecord(td, &rpipe->pipe_sel); 1476 if (SEL_WAITING(&rpipe->pipe_sel)) 1477 rpipe->pipe_state |= PIPE_SEL; 1478 } 1479 1480 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) { 1481 selrecord(td, &wpipe->pipe_sel); 1482 if (SEL_WAITING(&wpipe->pipe_sel)) 1483 wpipe->pipe_state |= PIPE_SEL; 1484 } 1485 } 1486#ifdef MAC 1487locked_error: 1488#endif 1489 PIPE_UNLOCK(rpipe); 1490 1491 return (revents); 1492} 1493 1494/* 1495 * We shouldn't need locks here as we're doing a read and this should 1496 * be a natural race. 1497 */ 1498static int 1499pipe_stat(fp, ub, active_cred, td) 1500 struct file *fp; 1501 struct stat *ub; 1502 struct ucred *active_cred; 1503 struct thread *td; 1504{ 1505 struct pipe *pipe; 1506 int new_unr; 1507#ifdef MAC 1508 int error; 1509#endif 1510 1511 pipe = fp->f_data; 1512 PIPE_LOCK(pipe); 1513#ifdef MAC 1514 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair); 1515 if (error) { 1516 PIPE_UNLOCK(pipe); 1517 return (error); 1518 } 1519#endif 1520 1521 /* For named pipes ask the underlying filesystem. */ 1522 if (pipe->pipe_state & PIPE_NAMED) { 1523 PIPE_UNLOCK(pipe); 1524 return (vnops.fo_stat(fp, ub, active_cred, td)); 1525 } 1526 1527 /* 1528 * Lazily allocate an inode number for the pipe. Most pipe 1529 * users do not call fstat(2) on the pipe, which means that 1530 * postponing the inode allocation until it is must be 1531 * returned to userland is useful. If alloc_unr failed, 1532 * assign st_ino zero instead of returning an error. 1533 * Special pipe_ino values: 1534 * -1 - not yet initialized; 1535 * 0 - alloc_unr failed, return 0 as st_ino forever. 1536 */ 1537 if (pipe->pipe_ino == (ino_t)-1) { 1538 new_unr = alloc_unr(pipeino_unr); 1539 if (new_unr != -1) 1540 pipe->pipe_ino = new_unr; 1541 else 1542 pipe->pipe_ino = 0; 1543 } 1544 PIPE_UNLOCK(pipe); 1545 1546 bzero(ub, sizeof(*ub)); 1547 ub->st_mode = S_IFIFO; 1548 ub->st_blksize = PAGE_SIZE; 1549 if (pipe->pipe_state & PIPE_DIRECTW) 1550 ub->st_size = pipe->pipe_map.cnt; 1551 else 1552 ub->st_size = pipe->pipe_buffer.cnt; 1553 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1554 ub->st_atim = pipe->pipe_atime; 1555 ub->st_mtim = pipe->pipe_mtime; 1556 ub->st_ctim = pipe->pipe_ctime; 1557 ub->st_uid = fp->f_cred->cr_uid; 1558 ub->st_gid = fp->f_cred->cr_gid; 1559 ub->st_dev = pipedev_ino; 1560 ub->st_ino = pipe->pipe_ino; 1561 /* 1562 * Left as 0: st_nlink, st_rdev, st_flags, st_gen. 1563 */ 1564 return (0); 1565} 1566 1567/* ARGSUSED */ 1568static int 1569pipe_close(fp, td) 1570 struct file *fp; 1571 struct thread *td; 1572{ 1573 1574 if (fp->f_vnode != NULL) 1575 return vnops.fo_close(fp, td); 1576 fp->f_ops = &badfileops; 1577 pipe_dtor(fp->f_data); 1578 fp->f_data = NULL; 1579 return (0); 1580} 1581 1582static int 1583pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td) 1584{ 1585 struct pipe *cpipe; 1586 int error; 1587 1588 cpipe = fp->f_data; 1589 if (cpipe->pipe_state & PIPE_NAMED) 1590 error = vn_chmod(fp, mode, active_cred, td); 1591 else 1592 error = invfo_chmod(fp, mode, active_cred, td); 1593 return (error); 1594} 1595 1596static int 1597pipe_chown(fp, uid, gid, active_cred, td) 1598 struct file *fp; 1599 uid_t uid; 1600 gid_t gid; 1601 struct ucred *active_cred; 1602 struct thread *td; 1603{ 1604 struct pipe *cpipe; 1605 int error; 1606 1607 cpipe = fp->f_data; 1608 if (cpipe->pipe_state & PIPE_NAMED) 1609 error = vn_chown(fp, uid, gid, active_cred, td); 1610 else 1611 error = invfo_chown(fp, uid, gid, active_cred, td); 1612 return (error); 1613} 1614 1615static void 1616pipe_free_kmem(cpipe) 1617 struct pipe *cpipe; 1618{ 1619 1620 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), 1621 ("pipe_free_kmem: pipe mutex locked")); 1622 1623 if (cpipe->pipe_buffer.buffer != NULL) { 1624 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size); 1625 vm_map_remove(pipe_map, 1626 (vm_offset_t)cpipe->pipe_buffer.buffer, 1627 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size); 1628 cpipe->pipe_buffer.buffer = NULL; 1629 } 1630#ifndef PIPE_NODIRECT 1631 { 1632 cpipe->pipe_map.cnt = 0; 1633 cpipe->pipe_map.pos = 0; 1634 cpipe->pipe_map.npages = 0; 1635 } 1636#endif 1637} 1638 1639/* 1640 * shutdown the pipe 1641 */ 1642static void 1643pipeclose(cpipe) 1644 struct pipe *cpipe; 1645{ 1646 struct pipepair *pp; 1647 struct pipe *ppipe; 1648 1649 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL")); 1650 1651 PIPE_LOCK(cpipe); 1652 pipelock(cpipe, 0); 1653 pp = cpipe->pipe_pair; 1654 1655 pipeselwakeup(cpipe); 1656 1657 /* 1658 * If the other side is blocked, wake it up saying that 1659 * we want to close it down. 1660 */ 1661 cpipe->pipe_state |= PIPE_EOF; 1662 while (cpipe->pipe_busy) { 1663 wakeup(cpipe); 1664 cpipe->pipe_state |= PIPE_WANT; 1665 pipeunlock(cpipe); 1666 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1667 pipelock(cpipe, 0); 1668 } 1669 1670 1671 /* 1672 * Disconnect from peer, if any. 1673 */ 1674 ppipe = cpipe->pipe_peer; 1675 if (ppipe->pipe_present == PIPE_ACTIVE) { 1676 pipeselwakeup(ppipe); 1677 1678 ppipe->pipe_state |= PIPE_EOF; 1679 wakeup(ppipe); 1680 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0); 1681 } 1682 1683 /* 1684 * Mark this endpoint as free. Release kmem resources. We 1685 * don't mark this endpoint as unused until we've finished 1686 * doing that, or the pipe might disappear out from under 1687 * us. 1688 */ 1689 PIPE_UNLOCK(cpipe); 1690 pipe_free_kmem(cpipe); 1691 PIPE_LOCK(cpipe); 1692 cpipe->pipe_present = PIPE_CLOSING; 1693 pipeunlock(cpipe); 1694 1695 /* 1696 * knlist_clear() may sleep dropping the PIPE_MTX. Set the 1697 * PIPE_FINALIZED, that allows other end to free the 1698 * pipe_pair, only after the knotes are completely dismantled. 1699 */ 1700 knlist_clear(&cpipe->pipe_sel.si_note, 1); 1701 cpipe->pipe_present = PIPE_FINALIZED; 1702 seldrain(&cpipe->pipe_sel); 1703 knlist_destroy(&cpipe->pipe_sel.si_note); 1704 1705 /* 1706 * If both endpoints are now closed, release the memory for the 1707 * pipe pair. If not, unlock. 1708 */ 1709 if (ppipe->pipe_present == PIPE_FINALIZED) { 1710 PIPE_UNLOCK(cpipe); 1711#ifdef MAC 1712 mac_pipe_destroy(pp); 1713#endif 1714 uma_zfree(pipe_zone, cpipe->pipe_pair); 1715 } else 1716 PIPE_UNLOCK(cpipe); 1717} 1718 1719/*ARGSUSED*/ 1720static int 1721pipe_kqfilter(struct file *fp, struct knote *kn) 1722{ 1723 struct pipe *cpipe; 1724 1725 /* 1726 * If a filter is requested that is not supported by this file 1727 * descriptor, don't return an error, but also don't ever generate an 1728 * event. 1729 */ 1730 if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) { 1731 kn->kn_fop = &pipe_nfiltops; 1732 return (0); 1733 } 1734 if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) { 1735 kn->kn_fop = &pipe_nfiltops; 1736 return (0); 1737 } 1738 cpipe = fp->f_data; 1739 PIPE_LOCK(cpipe); 1740 switch (kn->kn_filter) { 1741 case EVFILT_READ: 1742 kn->kn_fop = &pipe_rfiltops; 1743 break; 1744 case EVFILT_WRITE: 1745 kn->kn_fop = &pipe_wfiltops; 1746 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) { 1747 /* other end of pipe has been closed */ 1748 PIPE_UNLOCK(cpipe); 1749 return (EPIPE); 1750 } 1751 cpipe = PIPE_PEER(cpipe); 1752 break; 1753 default: 1754 PIPE_UNLOCK(cpipe); 1755 return (EINVAL); 1756 } 1757 1758 kn->kn_hook = cpipe; 1759 knlist_add(&cpipe->pipe_sel.si_note, kn, 1); 1760 PIPE_UNLOCK(cpipe); 1761 return (0); 1762} 1763 1764static void 1765filt_pipedetach(struct knote *kn) 1766{ 1767 struct pipe *cpipe = kn->kn_hook; 1768 1769 PIPE_LOCK(cpipe); 1770 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1); 1771 PIPE_UNLOCK(cpipe); 1772} 1773 1774/*ARGSUSED*/ 1775static int 1776filt_piperead(struct knote *kn, long hint) 1777{ 1778 struct pipe *rpipe = kn->kn_hook; 1779 struct pipe *wpipe = rpipe->pipe_peer; 1780 int ret; 1781 1782 PIPE_LOCK(rpipe); 1783 kn->kn_data = rpipe->pipe_buffer.cnt; 1784 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1785 kn->kn_data = rpipe->pipe_map.cnt; 1786 1787 if ((rpipe->pipe_state & PIPE_EOF) || 1788 wpipe->pipe_present != PIPE_ACTIVE || 1789 (wpipe->pipe_state & PIPE_EOF)) { 1790 kn->kn_flags |= EV_EOF; 1791 PIPE_UNLOCK(rpipe); 1792 return (1); 1793 } 1794 ret = kn->kn_data > 0; 1795 PIPE_UNLOCK(rpipe); 1796 return ret; 1797} 1798 1799/*ARGSUSED*/ 1800static int 1801filt_pipewrite(struct knote *kn, long hint) 1802{ 1803 struct pipe *wpipe; 1804 1805 wpipe = kn->kn_hook; 1806 PIPE_LOCK(wpipe); 1807 if (wpipe->pipe_present != PIPE_ACTIVE || 1808 (wpipe->pipe_state & PIPE_EOF)) { 1809 kn->kn_data = 0; 1810 kn->kn_flags |= EV_EOF; 1811 PIPE_UNLOCK(wpipe); 1812 return (1); 1813 } 1814 kn->kn_data = (wpipe->pipe_buffer.size > 0) ? 1815 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF; 1816 if (wpipe->pipe_state & PIPE_DIRECTW) 1817 kn->kn_data = 0; 1818 1819 PIPE_UNLOCK(wpipe); 1820 return (kn->kn_data >= PIPE_BUF); 1821} 1822 1823static void 1824filt_pipedetach_notsup(struct knote *kn) 1825{ 1826 1827} 1828 1829static int 1830filt_pipenotsup(struct knote *kn, long hint) 1831{ 1832 1833 return (0); 1834}
|