53#include "opt_mac.h" 54 55#include <sys/param.h> 56#include <sys/systm.h> 57#include <sys/fcntl.h> 58#include <sys/file.h> 59#include <sys/filedesc.h> 60#include <sys/filio.h> 61#include <sys/kernel.h> 62#include <sys/lock.h> 63#include <sys/mac.h> 64#include <sys/mutex.h> 65#include <sys/ttycom.h> 66#include <sys/stat.h> 67#include <sys/malloc.h> 68#include <sys/poll.h> 69#include <sys/selinfo.h> 70#include <sys/signalvar.h> 71#include <sys/sysproto.h> 72#include <sys/pipe.h> 73#include <sys/proc.h> 74#include <sys/vnode.h> 75#include <sys/uio.h> 76#include <sys/event.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <vm/vm_object.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_extern.h> 83#include <vm/pmap.h> 84#include <vm/vm_map.h> 85#include <vm/vm_page.h> 86#include <vm/uma.h> 87 88/* 89 * Use this define if you want to disable *fancy* VM things. Expect an 90 * approx 30% decrease in transfer rate. This could be useful for 91 * NetBSD or OpenBSD. 92 */ 93/* #define PIPE_NODIRECT */ 94 95/* 96 * interfaces to the outside world 97 */ 98static fo_rdwr_t pipe_read; 99static fo_rdwr_t pipe_write; 100static fo_ioctl_t pipe_ioctl; 101static fo_poll_t pipe_poll; 102static fo_kqfilter_t pipe_kqfilter; 103static fo_stat_t pipe_stat; 104static fo_close_t pipe_close; 105 106static struct fileops pipeops = { 107 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 108 pipe_stat, pipe_close, DFLAG_PASSABLE 109}; 110 111static void filt_pipedetach(struct knote *kn); 112static int filt_piperead(struct knote *kn, long hint); 113static int filt_pipewrite(struct knote *kn, long hint); 114 115static struct filterops pipe_rfiltops = 116 { 1, NULL, filt_pipedetach, filt_piperead }; 117static struct filterops pipe_wfiltops = 118 { 1, NULL, filt_pipedetach, filt_pipewrite }; 119 120#define PIPE_GET_GIANT(pipe) \ 121 do { \ 122 KASSERT(((pipe)->pipe_state & PIPE_LOCKFL) != 0, \ 123 ("%s:%d PIPE_GET_GIANT: line pipe not locked", \ 124 __FILE__, __LINE__)); \ 125 PIPE_UNLOCK(pipe); \ 126 mtx_lock(&Giant); \ 127 } while (0) 128 129#define PIPE_DROP_GIANT(pipe) \ 130 do { \ 131 mtx_unlock(&Giant); \ 132 PIPE_LOCK(pipe); \ 133 } while (0) 134 135/* 136 * Default pipe buffer size(s), this can be kind-of large now because pipe 137 * space is pageable. The pipe code will try to maintain locality of 138 * reference for performance reasons, so small amounts of outstanding I/O 139 * will not wipe the cache. 140 */ 141#define MINPIPESIZE (PIPE_SIZE/3) 142#define MAXPIPESIZE (2*PIPE_SIZE/3) 143 144/* 145 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 146 * is there so that on large systems, we don't exhaust it. 147 */ 148#define MAXPIPEKVA (8*1024*1024) 149 150/* 151 * Limit for direct transfers, we cannot, of course limit 152 * the amount of kva for pipes in general though. 153 */ 154#define LIMITPIPEKVA (16*1024*1024) 155 156/* 157 * Limit the number of "big" pipes 158 */ 159#define LIMITBIGPIPES 32 160static int nbigpipe; 161 162static int amountpipekva; 163 164static void pipeinit(void *dummy __unused); 165static void pipeclose(struct pipe *cpipe); 166static void pipe_free_kmem(struct pipe *cpipe); 167static int pipe_create(struct pipe **cpipep); 168static __inline int pipelock(struct pipe *cpipe, int catch); 169static __inline void pipeunlock(struct pipe *cpipe); 170static __inline void pipeselwakeup(struct pipe *cpipe); 171#ifndef PIPE_NODIRECT 172static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 173static void pipe_destroy_write_buffer(struct pipe *wpipe); 174static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 175static void pipe_clone_write_buffer(struct pipe *wpipe); 176#endif 177static int pipespace(struct pipe *cpipe, int size); 178 179static uma_zone_t pipe_zone; 180 181SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 182 183static void 184pipeinit(void *dummy __unused) 185{ 186 pipe_zone = uma_zcreate("PIPE", sizeof(struct pipe), NULL, 187 NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 188} 189 190/* 191 * The pipe system call for the DTYPE_PIPE type of pipes 192 */ 193 194/* ARGSUSED */ 195int 196pipe(td, uap) 197 struct thread *td; 198 struct pipe_args /* { 199 int dummy; 200 } */ *uap; 201{ 202 struct filedesc *fdp = td->td_proc->p_fd; 203 struct file *rf, *wf; 204 struct pipe *rpipe, *wpipe; 205 struct mtx *pmtx; 206 int fd, error; 207 208 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 209 210 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO); 211 212 rpipe = wpipe = NULL; 213 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 214 pipeclose(rpipe); 215 pipeclose(wpipe); 216 free(pmtx, M_TEMP); 217 return (ENFILE); 218 } 219 220 rpipe->pipe_state |= PIPE_DIRECTOK; 221 wpipe->pipe_state |= PIPE_DIRECTOK; 222 223 error = falloc(td, &rf, &fd); 224 if (error) { 225 pipeclose(rpipe); 226 pipeclose(wpipe); 227 free(pmtx, M_TEMP); 228 return (error); 229 } 230 fhold(rf); 231 td->td_retval[0] = fd; 232 233 /* 234 * Warning: once we've gotten past allocation of the fd for the 235 * read-side, we can only drop the read side via fdrop() in order 236 * to avoid races against processes which manage to dup() the read 237 * side while we are blocked trying to allocate the write side. 238 */ 239 FILE_LOCK(rf); 240 rf->f_flag = FREAD | FWRITE; 241 rf->f_type = DTYPE_PIPE; 242 rf->f_data = rpipe; 243 rf->f_ops = &pipeops; 244 FILE_UNLOCK(rf); 245 error = falloc(td, &wf, &fd); 246 if (error) { 247 FILEDESC_LOCK(fdp); 248 if (fdp->fd_ofiles[td->td_retval[0]] == rf) { 249 fdp->fd_ofiles[td->td_retval[0]] = NULL; 250 FILEDESC_UNLOCK(fdp); 251 fdrop(rf, td); 252 } else 253 FILEDESC_UNLOCK(fdp); 254 fdrop(rf, td); 255 /* rpipe has been closed by fdrop(). */ 256 pipeclose(wpipe); 257 free(pmtx, M_TEMP); 258 return (error); 259 } 260 FILE_LOCK(wf); 261 wf->f_flag = FREAD | FWRITE; 262 wf->f_type = DTYPE_PIPE; 263 wf->f_data = wpipe; 264 wf->f_ops = &pipeops; 265 FILE_UNLOCK(wf); 266 td->td_retval[1] = fd; 267 rpipe->pipe_peer = wpipe; 268 wpipe->pipe_peer = rpipe; 269#ifdef MAC 270 /* 271 * struct pipe represents a pipe endpoint. The MAC label is shared 272 * between the connected endpoints. As a result mac_init_pipe() and 273 * mac_create_pipe() should only be called on one of the endpoints 274 * after they have been connected. 275 */ 276 mac_init_pipe(rpipe); 277 mac_create_pipe(td->td_ucred, rpipe); 278#endif 279 mtx_init(pmtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE); 280 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; 281 fdrop(rf, td); 282 283 return (0); 284} 285 286/* 287 * Allocate kva for pipe circular buffer, the space is pageable 288 * This routine will 'realloc' the size of a pipe safely, if it fails 289 * it will retain the old buffer. 290 * If it fails it will return ENOMEM. 291 */ 292static int 293pipespace(cpipe, size) 294 struct pipe *cpipe; 295 int size; 296{ 297 struct vm_object *object; 298 caddr_t buffer; 299 int npages, error; 300 301 GIANT_REQUIRED; 302 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)), 303 ("pipespace: pipe mutex locked")); 304 305 npages = round_page(size)/PAGE_SIZE; 306 /* 307 * Create an object, I don't like the idea of paging to/from 308 * kernel_object. 309 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 310 */ 311 object = vm_object_allocate(OBJT_DEFAULT, npages); 312 buffer = (caddr_t) vm_map_min(kernel_map); 313 314 /* 315 * Insert the object into the kernel map, and allocate kva for it. 316 * The map entry is, by default, pageable. 317 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 318 */ 319 error = vm_map_find(kernel_map, object, 0, 320 (vm_offset_t *) &buffer, size, 1, 321 VM_PROT_ALL, VM_PROT_ALL, 0); 322 323 if (error != KERN_SUCCESS) { 324 vm_object_deallocate(object); 325 return (ENOMEM); 326 } 327 328 /* free old resources if we're resizing */ 329 pipe_free_kmem(cpipe); 330 cpipe->pipe_buffer.object = object; 331 cpipe->pipe_buffer.buffer = buffer; 332 cpipe->pipe_buffer.size = size; 333 cpipe->pipe_buffer.in = 0; 334 cpipe->pipe_buffer.out = 0; 335 cpipe->pipe_buffer.cnt = 0; 336 atomic_add_int(&amountpipekva, cpipe->pipe_buffer.size); 337 return (0); 338} 339 340/* 341 * initialize and allocate VM and memory for pipe 342 */ 343static int 344pipe_create(cpipep) 345 struct pipe **cpipep; 346{ 347 struct pipe *cpipe; 348 int error; 349 350 *cpipep = uma_zalloc(pipe_zone, M_WAITOK); 351 if (*cpipep == NULL) 352 return (ENOMEM); 353 354 cpipe = *cpipep; 355 356 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */ 357 cpipe->pipe_buffer.object = NULL; 358#ifndef PIPE_NODIRECT 359 cpipe->pipe_map.kva = 0; 360#endif 361 /* 362 * protect so pipeclose() doesn't follow a junk pointer 363 * if pipespace() fails. 364 */ 365 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel)); 366 cpipe->pipe_state = 0; 367 cpipe->pipe_peer = NULL; 368 cpipe->pipe_busy = 0; 369 370#ifndef PIPE_NODIRECT 371 /* 372 * pipe data structure initializations to support direct pipe I/O 373 */ 374 cpipe->pipe_map.cnt = 0; 375 cpipe->pipe_map.kva = 0; 376 cpipe->pipe_map.pos = 0; 377 cpipe->pipe_map.npages = 0; 378 /* cpipe->pipe_map.ms[] = invalid */ 379#endif 380 381 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */ 382 error = pipespace(cpipe, PIPE_SIZE); 383 if (error) 384 return (error); 385 386 vfs_timestamp(&cpipe->pipe_ctime); 387 cpipe->pipe_atime = cpipe->pipe_ctime; 388 cpipe->pipe_mtime = cpipe->pipe_ctime; 389 390 return (0); 391} 392 393 394/* 395 * lock a pipe for I/O, blocking other access 396 */ 397static __inline int 398pipelock(cpipe, catch) 399 struct pipe *cpipe; 400 int catch; 401{ 402 int error; 403 404 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 405 while (cpipe->pipe_state & PIPE_LOCKFL) { 406 cpipe->pipe_state |= PIPE_LWANT; 407 error = msleep(cpipe, PIPE_MTX(cpipe), 408 catch ? (PRIBIO | PCATCH) : PRIBIO, 409 "pipelk", 0); 410 if (error != 0) 411 return (error); 412 } 413 cpipe->pipe_state |= PIPE_LOCKFL; 414 return (0); 415} 416 417/* 418 * unlock a pipe I/O lock 419 */ 420static __inline void 421pipeunlock(cpipe) 422 struct pipe *cpipe; 423{ 424 425 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 426 cpipe->pipe_state &= ~PIPE_LOCKFL; 427 if (cpipe->pipe_state & PIPE_LWANT) { 428 cpipe->pipe_state &= ~PIPE_LWANT; 429 wakeup(cpipe); 430 } 431} 432 433static __inline void 434pipeselwakeup(cpipe) 435 struct pipe *cpipe; 436{ 437 438 if (cpipe->pipe_state & PIPE_SEL) { 439 cpipe->pipe_state &= ~PIPE_SEL; 440 selwakeup(&cpipe->pipe_sel); 441 } 442 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 443 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 444 KNOTE(&cpipe->pipe_sel.si_note, 0); 445} 446 447/* ARGSUSED */ 448static int 449pipe_read(fp, uio, active_cred, flags, td) 450 struct file *fp; 451 struct uio *uio; 452 struct ucred *active_cred; 453 struct thread *td; 454 int flags; 455{ 456 struct pipe *rpipe = fp->f_data; 457 int error; 458 int nread = 0; 459 u_int size; 460 461 PIPE_LOCK(rpipe); 462 ++rpipe->pipe_busy; 463 error = pipelock(rpipe, 1); 464 if (error) 465 goto unlocked_error; 466 467#ifdef MAC 468 error = mac_check_pipe_read(active_cred, rpipe); 469 if (error) 470 goto locked_error; 471#endif 472 473 while (uio->uio_resid) { 474 /* 475 * normal pipe buffer receive 476 */ 477 if (rpipe->pipe_buffer.cnt > 0) { 478 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 479 if (size > rpipe->pipe_buffer.cnt) 480 size = rpipe->pipe_buffer.cnt; 481 if (size > (u_int) uio->uio_resid) 482 size = (u_int) uio->uio_resid; 483 484 PIPE_UNLOCK(rpipe); 485 error = uiomove( 486 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 487 size, uio); 488 PIPE_LOCK(rpipe); 489 if (error) 490 break; 491 492 rpipe->pipe_buffer.out += size; 493 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 494 rpipe->pipe_buffer.out = 0; 495 496 rpipe->pipe_buffer.cnt -= size; 497 498 /* 499 * If there is no more to read in the pipe, reset 500 * its pointers to the beginning. This improves 501 * cache hit stats. 502 */ 503 if (rpipe->pipe_buffer.cnt == 0) { 504 rpipe->pipe_buffer.in = 0; 505 rpipe->pipe_buffer.out = 0; 506 } 507 nread += size; 508#ifndef PIPE_NODIRECT 509 /* 510 * Direct copy, bypassing a kernel buffer. 511 */ 512 } else if ((size = rpipe->pipe_map.cnt) && 513 (rpipe->pipe_state & PIPE_DIRECTW)) { 514 caddr_t va; 515 if (size > (u_int) uio->uio_resid) 516 size = (u_int) uio->uio_resid; 517 518 va = (caddr_t) rpipe->pipe_map.kva + 519 rpipe->pipe_map.pos; 520 PIPE_UNLOCK(rpipe); 521 error = uiomove(va, size, uio); 522 PIPE_LOCK(rpipe); 523 if (error) 524 break; 525 nread += size; 526 rpipe->pipe_map.pos += size; 527 rpipe->pipe_map.cnt -= size; 528 if (rpipe->pipe_map.cnt == 0) { 529 rpipe->pipe_state &= ~PIPE_DIRECTW; 530 wakeup(rpipe); 531 } 532#endif 533 } else { 534 /* 535 * detect EOF condition 536 * read returns 0 on EOF, no need to set error 537 */ 538 if (rpipe->pipe_state & PIPE_EOF) 539 break; 540 541 /* 542 * If the "write-side" has been blocked, wake it up now. 543 */ 544 if (rpipe->pipe_state & PIPE_WANTW) { 545 rpipe->pipe_state &= ~PIPE_WANTW; 546 wakeup(rpipe); 547 } 548 549 /* 550 * Break if some data was read. 551 */ 552 if (nread > 0) 553 break; 554 555 /* 556 * Unlock the pipe buffer for our remaining processing. 557 * We will either break out with an error or we will 558 * sleep and relock to loop. 559 */ 560 pipeunlock(rpipe); 561 562 /* 563 * Handle non-blocking mode operation or 564 * wait for more data. 565 */ 566 if (fp->f_flag & FNONBLOCK) { 567 error = EAGAIN; 568 } else { 569 rpipe->pipe_state |= PIPE_WANTR; 570 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 571 PRIBIO | PCATCH, 572 "piperd", 0)) == 0) 573 error = pipelock(rpipe, 1); 574 } 575 if (error) 576 goto unlocked_error; 577 } 578 } 579#ifdef MAC 580locked_error: 581#endif 582 pipeunlock(rpipe); 583 584 /* XXX: should probably do this before getting any locks. */ 585 if (error == 0) 586 vfs_timestamp(&rpipe->pipe_atime); 587unlocked_error: 588 --rpipe->pipe_busy; 589 590 /* 591 * PIPE_WANT processing only makes sense if pipe_busy is 0. 592 */ 593 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 594 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 595 wakeup(rpipe); 596 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 597 /* 598 * Handle write blocking hysteresis. 599 */ 600 if (rpipe->pipe_state & PIPE_WANTW) { 601 rpipe->pipe_state &= ~PIPE_WANTW; 602 wakeup(rpipe); 603 } 604 } 605 606 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 607 pipeselwakeup(rpipe); 608 609 PIPE_UNLOCK(rpipe); 610 return (error); 611} 612 613#ifndef PIPE_NODIRECT 614/* 615 * Map the sending processes' buffer into kernel space and wire it. 616 * This is similar to a physical write operation. 617 */ 618static int 619pipe_build_write_buffer(wpipe, uio) 620 struct pipe *wpipe; 621 struct uio *uio; 622{ 623 u_int size; 624 int i; 625 vm_offset_t addr, endaddr; 626 vm_paddr_t paddr; 627 628 GIANT_REQUIRED; 629 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 630 631 size = (u_int) uio->uio_iov->iov_len; 632 if (size > wpipe->pipe_buffer.size) 633 size = wpipe->pipe_buffer.size; 634 635 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 636 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 637 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 638 vm_page_t m; 639 640 /* 641 * vm_fault_quick() can sleep. Consequently, 642 * vm_page_lock_queue() and vm_page_unlock_queue() 643 * should not be performed outside of this loop. 644 */ 645 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 || 646 (paddr = pmap_extract(vmspace_pmap(curproc->p_vmspace), 647 addr)) == 0) { 648 int j; 649 650 vm_page_lock_queues(); 651 for (j = 0; j < i; j++) 652 vm_page_unwire(wpipe->pipe_map.ms[j], 1); 653 vm_page_unlock_queues(); 654 return (EFAULT); 655 } 656 657 m = PHYS_TO_VM_PAGE(paddr); 658 vm_page_lock_queues(); 659 vm_page_wire(m); 660 vm_page_unlock_queues(); 661 wpipe->pipe_map.ms[i] = m; 662 } 663 664/* 665 * set up the control block 666 */ 667 wpipe->pipe_map.npages = i; 668 wpipe->pipe_map.pos = 669 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 670 wpipe->pipe_map.cnt = size; 671 672/* 673 * and map the buffer 674 */ 675 if (wpipe->pipe_map.kva == 0) { 676 /* 677 * We need to allocate space for an extra page because the 678 * address range might (will) span pages at times. 679 */ 680 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map, 681 wpipe->pipe_buffer.size + PAGE_SIZE); 682 atomic_add_int(&amountpipekva, 683 wpipe->pipe_buffer.size + PAGE_SIZE); 684 } 685 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms, 686 wpipe->pipe_map.npages); 687 688/* 689 * and update the uio data 690 */ 691 692 uio->uio_iov->iov_len -= size; 693 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; 694 if (uio->uio_iov->iov_len == 0) 695 uio->uio_iov++; 696 uio->uio_resid -= size; 697 uio->uio_offset += size; 698 return (0); 699} 700 701/* 702 * unmap and unwire the process buffer 703 */ 704static void 705pipe_destroy_write_buffer(wpipe) 706 struct pipe *wpipe; 707{ 708 int i; 709 710 GIANT_REQUIRED; 711 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 712 713 if (wpipe->pipe_map.kva) { 714 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages); 715 716 if (amountpipekva > MAXPIPEKVA) { 717 vm_offset_t kva = wpipe->pipe_map.kva; 718 wpipe->pipe_map.kva = 0; 719 kmem_free(kernel_map, kva, 720 wpipe->pipe_buffer.size + PAGE_SIZE); 721 atomic_subtract_int(&amountpipekva, 722 wpipe->pipe_buffer.size + PAGE_SIZE); 723 } 724 } 725 vm_page_lock_queues(); 726 for (i = 0; i < wpipe->pipe_map.npages; i++) 727 vm_page_unwire(wpipe->pipe_map.ms[i], 1); 728 vm_page_unlock_queues(); 729 wpipe->pipe_map.npages = 0; 730} 731 732/* 733 * In the case of a signal, the writing process might go away. This 734 * code copies the data into the circular buffer so that the source 735 * pages can be freed without loss of data. 736 */ 737static void 738pipe_clone_write_buffer(wpipe) 739 struct pipe *wpipe; 740{ 741 int size; 742 int pos; 743 744 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 745 size = wpipe->pipe_map.cnt; 746 pos = wpipe->pipe_map.pos; 747 748 wpipe->pipe_buffer.in = size; 749 wpipe->pipe_buffer.out = 0; 750 wpipe->pipe_buffer.cnt = size; 751 wpipe->pipe_state &= ~PIPE_DIRECTW; 752 753 PIPE_GET_GIANT(wpipe); 754 bcopy((caddr_t) wpipe->pipe_map.kva + pos, 755 wpipe->pipe_buffer.buffer, size); 756 pipe_destroy_write_buffer(wpipe); 757 PIPE_DROP_GIANT(wpipe); 758} 759 760/* 761 * This implements the pipe buffer write mechanism. Note that only 762 * a direct write OR a normal pipe write can be pending at any given time. 763 * If there are any characters in the pipe buffer, the direct write will 764 * be deferred until the receiving process grabs all of the bytes from 765 * the pipe buffer. Then the direct mapping write is set-up. 766 */ 767static int 768pipe_direct_write(wpipe, uio) 769 struct pipe *wpipe; 770 struct uio *uio; 771{ 772 int error; 773 774retry: 775 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 776 while (wpipe->pipe_state & PIPE_DIRECTW) { 777 if (wpipe->pipe_state & PIPE_WANTR) { 778 wpipe->pipe_state &= ~PIPE_WANTR; 779 wakeup(wpipe); 780 } 781 wpipe->pipe_state |= PIPE_WANTW; 782 error = msleep(wpipe, PIPE_MTX(wpipe), 783 PRIBIO | PCATCH, "pipdww", 0); 784 if (error) 785 goto error1; 786 if (wpipe->pipe_state & PIPE_EOF) { 787 error = EPIPE; 788 goto error1; 789 } 790 } 791 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 792 if (wpipe->pipe_buffer.cnt > 0) { 793 if (wpipe->pipe_state & PIPE_WANTR) { 794 wpipe->pipe_state &= ~PIPE_WANTR; 795 wakeup(wpipe); 796 } 797 798 wpipe->pipe_state |= PIPE_WANTW; 799 error = msleep(wpipe, PIPE_MTX(wpipe), 800 PRIBIO | PCATCH, "pipdwc", 0); 801 if (error) 802 goto error1; 803 if (wpipe->pipe_state & PIPE_EOF) { 804 error = EPIPE; 805 goto error1; 806 } 807 goto retry; 808 } 809 810 wpipe->pipe_state |= PIPE_DIRECTW; 811 812 pipelock(wpipe, 0); 813 PIPE_GET_GIANT(wpipe); 814 error = pipe_build_write_buffer(wpipe, uio); 815 PIPE_DROP_GIANT(wpipe); 816 pipeunlock(wpipe); 817 if (error) { 818 wpipe->pipe_state &= ~PIPE_DIRECTW; 819 goto error1; 820 } 821 822 error = 0; 823 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 824 if (wpipe->pipe_state & PIPE_EOF) { 825 pipelock(wpipe, 0); 826 PIPE_GET_GIANT(wpipe); 827 pipe_destroy_write_buffer(wpipe); 828 PIPE_DROP_GIANT(wpipe); 829 pipeselwakeup(wpipe); 830 pipeunlock(wpipe); 831 error = EPIPE; 832 goto error1; 833 } 834 if (wpipe->pipe_state & PIPE_WANTR) { 835 wpipe->pipe_state &= ~PIPE_WANTR; 836 wakeup(wpipe); 837 } 838 pipeselwakeup(wpipe); 839 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 840 "pipdwt", 0); 841 } 842 843 pipelock(wpipe,0); 844 if (wpipe->pipe_state & PIPE_DIRECTW) { 845 /* 846 * this bit of trickery substitutes a kernel buffer for 847 * the process that might be going away. 848 */ 849 pipe_clone_write_buffer(wpipe); 850 } else { 851 PIPE_GET_GIANT(wpipe); 852 pipe_destroy_write_buffer(wpipe); 853 PIPE_DROP_GIANT(wpipe); 854 } 855 pipeunlock(wpipe); 856 return (error); 857 858error1: 859 wakeup(wpipe); 860 return (error); 861} 862#endif 863 864static int 865pipe_write(fp, uio, active_cred, flags, td) 866 struct file *fp; 867 struct uio *uio; 868 struct ucred *active_cred; 869 struct thread *td; 870 int flags; 871{ 872 int error = 0; 873 int orig_resid; 874 struct pipe *wpipe, *rpipe; 875 876 rpipe = fp->f_data; 877 wpipe = rpipe->pipe_peer; 878 879 PIPE_LOCK(rpipe); 880 /* 881 * detect loss of pipe read side, issue SIGPIPE if lost. 882 */ 883 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 884 PIPE_UNLOCK(rpipe); 885 return (EPIPE); 886 } 887#ifdef MAC 888 error = mac_check_pipe_write(active_cred, wpipe); 889 if (error) { 890 PIPE_UNLOCK(rpipe); 891 return (error); 892 } 893#endif 894 ++wpipe->pipe_busy; 895 896 /* 897 * If it is advantageous to resize the pipe buffer, do 898 * so. 899 */ 900 if ((uio->uio_resid > PIPE_SIZE) && 901 (nbigpipe < LIMITBIGPIPES) && 902 (wpipe->pipe_state & PIPE_DIRECTW) == 0 && 903 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 904 (wpipe->pipe_buffer.cnt == 0)) { 905 906 if ((error = pipelock(wpipe, 1)) == 0) { 907 PIPE_GET_GIANT(wpipe); 908 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 909 nbigpipe++; 910 PIPE_DROP_GIANT(wpipe); 911 pipeunlock(wpipe); 912 } 913 } 914 915 /* 916 * If an early error occured unbusy and return, waking up any pending 917 * readers. 918 */ 919 if (error) { 920 --wpipe->pipe_busy; 921 if ((wpipe->pipe_busy == 0) && 922 (wpipe->pipe_state & PIPE_WANT)) { 923 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 924 wakeup(wpipe); 925 } 926 PIPE_UNLOCK(rpipe); 927 return(error); 928 } 929 930 orig_resid = uio->uio_resid; 931 932 while (uio->uio_resid) { 933 int space; 934 935#ifndef PIPE_NODIRECT 936 /* 937 * If the transfer is large, we can gain performance if 938 * we do process-to-process copies directly. 939 * If the write is non-blocking, we don't use the 940 * direct write mechanism. 941 * 942 * The direct write mechanism will detect the reader going 943 * away on us. 944 */ 945 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) && 946 (fp->f_flag & FNONBLOCK) == 0 && 947 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) && 948 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) { 949 error = pipe_direct_write(wpipe, uio); 950 if (error) 951 break; 952 continue; 953 } 954#endif 955 956 /* 957 * Pipe buffered writes cannot be coincidental with 958 * direct writes. We wait until the currently executing 959 * direct write is completed before we start filling the 960 * pipe buffer. We break out if a signal occurs or the 961 * reader goes away. 962 */ 963 retrywrite: 964 while (wpipe->pipe_state & PIPE_DIRECTW) { 965 if (wpipe->pipe_state & PIPE_WANTR) { 966 wpipe->pipe_state &= ~PIPE_WANTR; 967 wakeup(wpipe); 968 } 969 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 970 "pipbww", 0); 971 if (wpipe->pipe_state & PIPE_EOF) 972 break; 973 if (error) 974 break; 975 } 976 if (wpipe->pipe_state & PIPE_EOF) { 977 error = EPIPE; 978 break; 979 } 980 981 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 982 983 /* Writes of size <= PIPE_BUF must be atomic. */ 984 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 985 space = 0; 986 987 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) { 988 if ((error = pipelock(wpipe,1)) == 0) { 989 int size; /* Transfer size */ 990 int segsize; /* first segment to transfer */ 991 992 /* 993 * It is possible for a direct write to 994 * slip in on us... handle it here... 995 */ 996 if (wpipe->pipe_state & PIPE_DIRECTW) { 997 pipeunlock(wpipe); 998 goto retrywrite; 999 } 1000 /* 1001 * If a process blocked in uiomove, our 1002 * value for space might be bad. 1003 * 1004 * XXX will we be ok if the reader has gone 1005 * away here? 1006 */ 1007 if (space > wpipe->pipe_buffer.size - 1008 wpipe->pipe_buffer.cnt) { 1009 pipeunlock(wpipe); 1010 goto retrywrite; 1011 } 1012 1013 /* 1014 * Transfer size is minimum of uio transfer 1015 * and free space in pipe buffer. 1016 */ 1017 if (space > uio->uio_resid) 1018 size = uio->uio_resid; 1019 else 1020 size = space; 1021 /* 1022 * First segment to transfer is minimum of 1023 * transfer size and contiguous space in 1024 * pipe buffer. If first segment to transfer 1025 * is less than the transfer size, we've got 1026 * a wraparound in the buffer. 1027 */ 1028 segsize = wpipe->pipe_buffer.size - 1029 wpipe->pipe_buffer.in; 1030 if (segsize > size) 1031 segsize = size; 1032 1033 /* Transfer first segment */ 1034 1035 PIPE_UNLOCK(rpipe); 1036 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1037 segsize, uio); 1038 PIPE_LOCK(rpipe); 1039 1040 if (error == 0 && segsize < size) { 1041 /* 1042 * Transfer remaining part now, to 1043 * support atomic writes. Wraparound 1044 * happened. 1045 */ 1046 if (wpipe->pipe_buffer.in + segsize != 1047 wpipe->pipe_buffer.size) 1048 panic("Expected pipe buffer " 1049 "wraparound disappeared"); 1050 1051 PIPE_UNLOCK(rpipe); 1052 error = uiomove( 1053 &wpipe->pipe_buffer.buffer[0], 1054 size - segsize, uio); 1055 PIPE_LOCK(rpipe); 1056 } 1057 if (error == 0) { 1058 wpipe->pipe_buffer.in += size; 1059 if (wpipe->pipe_buffer.in >= 1060 wpipe->pipe_buffer.size) { 1061 if (wpipe->pipe_buffer.in != 1062 size - segsize + 1063 wpipe->pipe_buffer.size) 1064 panic("Expected " 1065 "wraparound bad"); 1066 wpipe->pipe_buffer.in = size - 1067 segsize; 1068 } 1069 1070 wpipe->pipe_buffer.cnt += size; 1071 if (wpipe->pipe_buffer.cnt > 1072 wpipe->pipe_buffer.size) 1073 panic("Pipe buffer overflow"); 1074 1075 } 1076 pipeunlock(wpipe); 1077 } 1078 if (error) 1079 break; 1080 1081 } else { 1082 /* 1083 * If the "read-side" has been blocked, wake it up now. 1084 */ 1085 if (wpipe->pipe_state & PIPE_WANTR) { 1086 wpipe->pipe_state &= ~PIPE_WANTR; 1087 wakeup(wpipe); 1088 } 1089 1090 /* 1091 * don't block on non-blocking I/O 1092 */ 1093 if (fp->f_flag & FNONBLOCK) { 1094 error = EAGAIN; 1095 break; 1096 } 1097 1098 /* 1099 * We have no more space and have something to offer, 1100 * wake up select/poll. 1101 */ 1102 pipeselwakeup(wpipe); 1103 1104 wpipe->pipe_state |= PIPE_WANTW; 1105 error = msleep(wpipe, PIPE_MTX(rpipe), 1106 PRIBIO | PCATCH, "pipewr", 0); 1107 if (error != 0) 1108 break; 1109 /* 1110 * If read side wants to go away, we just issue a signal 1111 * to ourselves. 1112 */ 1113 if (wpipe->pipe_state & PIPE_EOF) { 1114 error = EPIPE; 1115 break; 1116 } 1117 } 1118 } 1119 1120 --wpipe->pipe_busy; 1121 1122 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1123 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1124 wakeup(wpipe); 1125 } else if (wpipe->pipe_buffer.cnt > 0) { 1126 /* 1127 * If we have put any characters in the buffer, we wake up 1128 * the reader. 1129 */ 1130 if (wpipe->pipe_state & PIPE_WANTR) { 1131 wpipe->pipe_state &= ~PIPE_WANTR; 1132 wakeup(wpipe); 1133 } 1134 } 1135 1136 /* 1137 * Don't return EPIPE if I/O was successful 1138 */ 1139 if ((wpipe->pipe_buffer.cnt == 0) && 1140 (uio->uio_resid == 0) && 1141 (error == EPIPE)) { 1142 error = 0; 1143 } 1144 1145 if (error == 0) 1146 vfs_timestamp(&wpipe->pipe_mtime); 1147 1148 /* 1149 * We have something to offer, 1150 * wake up select/poll. 1151 */ 1152 if (wpipe->pipe_buffer.cnt) 1153 pipeselwakeup(wpipe); 1154 1155 PIPE_UNLOCK(rpipe); 1156 return (error); 1157} 1158 1159/* 1160 * we implement a very minimal set of ioctls for compatibility with sockets. 1161 */ 1162static int 1163pipe_ioctl(fp, cmd, data, active_cred, td) 1164 struct file *fp; 1165 u_long cmd; 1166 void *data; 1167 struct ucred *active_cred; 1168 struct thread *td; 1169{ 1170 struct pipe *mpipe = fp->f_data; 1171#ifdef MAC 1172 int error; 1173#endif 1174 1175 PIPE_LOCK(mpipe); 1176 1177#ifdef MAC 1178 error = mac_check_pipe_ioctl(active_cred, mpipe, cmd, data); 1179 if (error) 1180 return (error); 1181#endif 1182 1183 switch (cmd) { 1184 1185 case FIONBIO: 1186 PIPE_UNLOCK(mpipe); 1187 return (0); 1188 1189 case FIOASYNC: 1190 if (*(int *)data) { 1191 mpipe->pipe_state |= PIPE_ASYNC; 1192 } else { 1193 mpipe->pipe_state &= ~PIPE_ASYNC; 1194 } 1195 PIPE_UNLOCK(mpipe); 1196 return (0); 1197 1198 case FIONREAD: 1199 if (mpipe->pipe_state & PIPE_DIRECTW) 1200 *(int *)data = mpipe->pipe_map.cnt; 1201 else 1202 *(int *)data = mpipe->pipe_buffer.cnt; 1203 PIPE_UNLOCK(mpipe); 1204 return (0); 1205 1206 case FIOSETOWN: 1207 PIPE_UNLOCK(mpipe); 1208 return (fsetown(*(int *)data, &mpipe->pipe_sigio)); 1209 1210 case FIOGETOWN: 1211 PIPE_UNLOCK(mpipe); 1212 *(int *)data = fgetown(&mpipe->pipe_sigio); 1213 return (0); 1214 1215 /* This is deprecated, FIOSETOWN should be used instead. */ 1216 case TIOCSPGRP: 1217 PIPE_UNLOCK(mpipe); 1218 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio)); 1219 1220 /* This is deprecated, FIOGETOWN should be used instead. */ 1221 case TIOCGPGRP: 1222 PIPE_UNLOCK(mpipe); 1223 *(int *)data = -fgetown(&mpipe->pipe_sigio); 1224 return (0); 1225 1226 } 1227 PIPE_UNLOCK(mpipe); 1228 return (ENOTTY); 1229} 1230 1231static int 1232pipe_poll(fp, events, active_cred, td) 1233 struct file *fp; 1234 int events; 1235 struct ucred *active_cred; 1236 struct thread *td; 1237{ 1238 struct pipe *rpipe = fp->f_data; 1239 struct pipe *wpipe; 1240 int revents = 0; 1241#ifdef MAC 1242 int error; 1243#endif 1244 1245 wpipe = rpipe->pipe_peer; 1246 PIPE_LOCK(rpipe); 1247#ifdef MAC 1248 error = mac_check_pipe_poll(active_cred, rpipe); 1249 if (error) 1250 goto locked_error; 1251#endif 1252 if (events & (POLLIN | POLLRDNORM)) 1253 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1254 (rpipe->pipe_buffer.cnt > 0) || 1255 (rpipe->pipe_state & PIPE_EOF)) 1256 revents |= events & (POLLIN | POLLRDNORM); 1257 1258 if (events & (POLLOUT | POLLWRNORM)) 1259 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1260 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1261 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1262 revents |= events & (POLLOUT | POLLWRNORM); 1263 1264 if ((rpipe->pipe_state & PIPE_EOF) || 1265 (wpipe == NULL) || 1266 (wpipe->pipe_state & PIPE_EOF)) 1267 revents |= POLLHUP; 1268 1269 if (revents == 0) { 1270 if (events & (POLLIN | POLLRDNORM)) { 1271 selrecord(td, &rpipe->pipe_sel); 1272 rpipe->pipe_state |= PIPE_SEL; 1273 } 1274 1275 if (events & (POLLOUT | POLLWRNORM)) { 1276 selrecord(td, &wpipe->pipe_sel); 1277 wpipe->pipe_state |= PIPE_SEL; 1278 } 1279 } 1280#ifdef MAC 1281locked_error: 1282#endif 1283 PIPE_UNLOCK(rpipe); 1284 1285 return (revents); 1286} 1287 1288/* 1289 * We shouldn't need locks here as we're doing a read and this should 1290 * be a natural race. 1291 */ 1292static int 1293pipe_stat(fp, ub, active_cred, td) 1294 struct file *fp; 1295 struct stat *ub; 1296 struct ucred *active_cred; 1297 struct thread *td; 1298{ 1299 struct pipe *pipe = fp->f_data; 1300#ifdef MAC 1301 int error; 1302 1303 PIPE_LOCK(pipe); 1304 error = mac_check_pipe_stat(active_cred, pipe); 1305 PIPE_UNLOCK(pipe); 1306 if (error) 1307 return (error); 1308#endif 1309 bzero(ub, sizeof(*ub)); 1310 ub->st_mode = S_IFIFO; 1311 ub->st_blksize = pipe->pipe_buffer.size; 1312 ub->st_size = pipe->pipe_buffer.cnt; 1313 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1314 ub->st_atimespec = pipe->pipe_atime; 1315 ub->st_mtimespec = pipe->pipe_mtime; 1316 ub->st_ctimespec = pipe->pipe_ctime; 1317 ub->st_uid = fp->f_cred->cr_uid; 1318 ub->st_gid = fp->f_cred->cr_gid; 1319 /* 1320 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 1321 * XXX (st_dev, st_ino) should be unique. 1322 */ 1323 return (0); 1324} 1325 1326/* ARGSUSED */ 1327static int 1328pipe_close(fp, td) 1329 struct file *fp; 1330 struct thread *td; 1331{ 1332 struct pipe *cpipe = fp->f_data; 1333 1334 fp->f_ops = &badfileops; 1335 fp->f_data = NULL; 1336 funsetown(&cpipe->pipe_sigio); 1337 pipeclose(cpipe); 1338 return (0); 1339} 1340 1341static void 1342pipe_free_kmem(cpipe) 1343 struct pipe *cpipe; 1344{ 1345 1346 GIANT_REQUIRED; 1347 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)), 1348 ("pipespace: pipe mutex locked")); 1349 1350 if (cpipe->pipe_buffer.buffer != NULL) { 1351 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1352 --nbigpipe; 1353 atomic_subtract_int(&amountpipekva, cpipe->pipe_buffer.size); 1354 kmem_free(kernel_map, 1355 (vm_offset_t)cpipe->pipe_buffer.buffer, 1356 cpipe->pipe_buffer.size); 1357 cpipe->pipe_buffer.buffer = NULL; 1358 } 1359#ifndef PIPE_NODIRECT 1360 if (cpipe->pipe_map.kva != 0) { 1361 atomic_subtract_int(&amountpipekva, 1362 cpipe->pipe_buffer.size + PAGE_SIZE); 1363 kmem_free(kernel_map, 1364 cpipe->pipe_map.kva, 1365 cpipe->pipe_buffer.size + PAGE_SIZE); 1366 cpipe->pipe_map.cnt = 0; 1367 cpipe->pipe_map.kva = 0; 1368 cpipe->pipe_map.pos = 0; 1369 cpipe->pipe_map.npages = 0; 1370 } 1371#endif 1372} 1373 1374/* 1375 * shutdown the pipe 1376 */ 1377static void 1378pipeclose(cpipe) 1379 struct pipe *cpipe; 1380{ 1381 struct pipe *ppipe; 1382 int hadpeer; 1383 1384 if (cpipe == NULL) 1385 return; 1386 1387 hadpeer = 0; 1388 1389 /* partially created pipes won't have a valid mutex. */ 1390 if (PIPE_MTX(cpipe) != NULL) 1391 PIPE_LOCK(cpipe); 1392 1393 pipeselwakeup(cpipe); 1394 1395 /* 1396 * If the other side is blocked, wake it up saying that 1397 * we want to close it down. 1398 */ 1399 while (cpipe->pipe_busy) { 1400 wakeup(cpipe); 1401 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1402 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1403 } 1404 1405#ifdef MAC 1406 if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL) 1407 mac_destroy_pipe(cpipe); 1408#endif 1409 1410 /* 1411 * Disconnect from peer 1412 */ 1413 if ((ppipe = cpipe->pipe_peer) != NULL) { 1414 hadpeer++; 1415 pipeselwakeup(ppipe); 1416 1417 ppipe->pipe_state |= PIPE_EOF; 1418 wakeup(ppipe); 1419 KNOTE(&ppipe->pipe_sel.si_note, 0); 1420 ppipe->pipe_peer = NULL; 1421 } 1422 /* 1423 * free resources 1424 */ 1425 if (PIPE_MTX(cpipe) != NULL) { 1426 PIPE_UNLOCK(cpipe); 1427 if (!hadpeer) { 1428 mtx_destroy(PIPE_MTX(cpipe)); 1429 free(PIPE_MTX(cpipe), M_TEMP); 1430 } 1431 } 1432 mtx_lock(&Giant); 1433 pipe_free_kmem(cpipe); 1434 uma_zfree(pipe_zone, cpipe); 1435 mtx_unlock(&Giant); 1436} 1437 1438/*ARGSUSED*/ 1439static int 1440pipe_kqfilter(struct file *fp, struct knote *kn) 1441{ 1442 struct pipe *cpipe; 1443 1444 cpipe = kn->kn_fp->f_data; 1445 switch (kn->kn_filter) { 1446 case EVFILT_READ: 1447 kn->kn_fop = &pipe_rfiltops; 1448 break; 1449 case EVFILT_WRITE: 1450 kn->kn_fop = &pipe_wfiltops; 1451 cpipe = cpipe->pipe_peer; 1452 if (cpipe == NULL) 1453 /* other end of pipe has been closed */ 1454 return (EBADF); 1455 break; 1456 default: 1457 return (1); 1458 } 1459 kn->kn_hook = cpipe; 1460 1461 PIPE_LOCK(cpipe); 1462 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1463 PIPE_UNLOCK(cpipe); 1464 return (0); 1465} 1466 1467static void 1468filt_pipedetach(struct knote *kn) 1469{ 1470 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1471 1472 PIPE_LOCK(cpipe); 1473 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1474 PIPE_UNLOCK(cpipe); 1475} 1476 1477/*ARGSUSED*/ 1478static int 1479filt_piperead(struct knote *kn, long hint) 1480{ 1481 struct pipe *rpipe = kn->kn_fp->f_data; 1482 struct pipe *wpipe = rpipe->pipe_peer; 1483 1484 PIPE_LOCK(rpipe); 1485 kn->kn_data = rpipe->pipe_buffer.cnt; 1486 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1487 kn->kn_data = rpipe->pipe_map.cnt; 1488 1489 if ((rpipe->pipe_state & PIPE_EOF) || 1490 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1491 kn->kn_flags |= EV_EOF; 1492 PIPE_UNLOCK(rpipe); 1493 return (1); 1494 } 1495 PIPE_UNLOCK(rpipe); 1496 return (kn->kn_data > 0); 1497} 1498 1499/*ARGSUSED*/ 1500static int 1501filt_pipewrite(struct knote *kn, long hint) 1502{ 1503 struct pipe *rpipe = kn->kn_fp->f_data; 1504 struct pipe *wpipe = rpipe->pipe_peer; 1505 1506 PIPE_LOCK(rpipe); 1507 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1508 kn->kn_data = 0; 1509 kn->kn_flags |= EV_EOF; 1510 PIPE_UNLOCK(rpipe); 1511 return (1); 1512 } 1513 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1514 if (wpipe->pipe_state & PIPE_DIRECTW) 1515 kn->kn_data = 0; 1516 1517 PIPE_UNLOCK(rpipe); 1518 return (kn->kn_data >= PIPE_BUF); 1519}
|