Deleted Added
sdiff udiff text old ( 116127 ) new ( 116182 )
full compact
1/*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 *
19 * $FreeBSD: head/sys/kern/sys_pipe.c 116127 2003-06-09 21:57:48Z mux $
20 */
21
22/*
23 * This file contains a high-performance replacement for the socket-based
24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
25 * all features of sockets, but does do everything that pipes normally
26 * do.
27 */
28
29/*
30 * This code has two modes of operation, a small write mode and a large
31 * write mode. The small write mode acts like conventional pipes with
32 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
33 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
35 * the receiving process can copy it directly from the pages in the sending
36 * process.
37 *
38 * If the sending process receives a signal, it is possible that it will
39 * go away, and certainly its address space can change, because control
40 * is returned back to the user-mode side. In that case, the pipe code
41 * arranges to copy the buffer supplied by the user process, to a pageable
42 * kernel buffer, and the receiving process will grab the data from the
43 * pageable kernel buffer. Since signals don't happen all that often,
44 * the copy operation is normally eliminated.
45 *
46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
47 * happen for small transfers so that the system will not spend all of
48 * its time context switching. PIPE_SIZE is constrained by the
49 * amount of kernel virtual memory.
50 */
51
52#include "opt_mac.h"
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/fcntl.h>
57#include <sys/file.h>
58#include <sys/filedesc.h>
59#include <sys/filio.h>
60#include <sys/kernel.h>
61#include <sys/lock.h>
62#include <sys/mac.h>
63#include <sys/mutex.h>
64#include <sys/ttycom.h>
65#include <sys/stat.h>
66#include <sys/malloc.h>
67#include <sys/poll.h>
68#include <sys/selinfo.h>
69#include <sys/signalvar.h>
70#include <sys/sysproto.h>
71#include <sys/pipe.h>
72#include <sys/proc.h>
73#include <sys/vnode.h>
74#include <sys/uio.h>
75#include <sys/event.h>
76
77#include <vm/vm.h>
78#include <vm/vm_param.h>
79#include <vm/vm_object.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_extern.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_page.h>
85#include <vm/uma.h>
86
87/*
88 * Use this define if you want to disable *fancy* VM things. Expect an
89 * approx 30% decrease in transfer rate. This could be useful for
90 * NetBSD or OpenBSD.
91 */
92/* #define PIPE_NODIRECT */
93
94/*
95 * interfaces to the outside world
96 */
97static fo_rdwr_t pipe_read;
98static fo_rdwr_t pipe_write;
99static fo_ioctl_t pipe_ioctl;
100static fo_poll_t pipe_poll;
101static fo_kqfilter_t pipe_kqfilter;
102static fo_stat_t pipe_stat;
103static fo_close_t pipe_close;
104
105static struct fileops pipeops = {
106 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
107 pipe_stat, pipe_close, DFLAG_PASSABLE
108};
109
110static void filt_pipedetach(struct knote *kn);
111static int filt_piperead(struct knote *kn, long hint);
112static int filt_pipewrite(struct knote *kn, long hint);
113
114static struct filterops pipe_rfiltops =
115 { 1, NULL, filt_pipedetach, filt_piperead };
116static struct filterops pipe_wfiltops =
117 { 1, NULL, filt_pipedetach, filt_pipewrite };
118
119#define PIPE_GET_GIANT(pipe) \
120 do { \
121 KASSERT(((pipe)->pipe_state & PIPE_LOCKFL) != 0, \
122 ("%s:%d PIPE_GET_GIANT: line pipe not locked", \
123 __FILE__, __LINE__)); \
124 PIPE_UNLOCK(pipe); \
125 mtx_lock(&Giant); \
126 } while (0)
127
128#define PIPE_DROP_GIANT(pipe) \
129 do { \
130 mtx_unlock(&Giant); \
131 PIPE_LOCK(pipe); \
132 } while (0)
133
134/*
135 * Default pipe buffer size(s), this can be kind-of large now because pipe
136 * space is pageable. The pipe code will try to maintain locality of
137 * reference for performance reasons, so small amounts of outstanding I/O
138 * will not wipe the cache.
139 */
140#define MINPIPESIZE (PIPE_SIZE/3)
141#define MAXPIPESIZE (2*PIPE_SIZE/3)
142
143/*
144 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
145 * is there so that on large systems, we don't exhaust it.
146 */
147#define MAXPIPEKVA (8*1024*1024)
148
149/*
150 * Limit for direct transfers, we cannot, of course limit
151 * the amount of kva for pipes in general though.
152 */
153#define LIMITPIPEKVA (16*1024*1024)
154
155/*
156 * Limit the number of "big" pipes
157 */
158#define LIMITBIGPIPES 32
159static int nbigpipe;
160
161static int amountpipekva;
162
163static void pipeinit(void *dummy __unused);
164static void pipeclose(struct pipe *cpipe);
165static void pipe_free_kmem(struct pipe *cpipe);
166static int pipe_create(struct pipe **cpipep);
167static __inline int pipelock(struct pipe *cpipe, int catch);
168static __inline void pipeunlock(struct pipe *cpipe);
169static __inline void pipeselwakeup(struct pipe *cpipe);
170#ifndef PIPE_NODIRECT
171static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
172static void pipe_destroy_write_buffer(struct pipe *wpipe);
173static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
174static void pipe_clone_write_buffer(struct pipe *wpipe);
175#endif
176static int pipespace(struct pipe *cpipe, int size);
177
178static uma_zone_t pipe_zone;
179
180SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
181
182static void
183pipeinit(void *dummy __unused)
184{
185 pipe_zone = uma_zcreate("PIPE", sizeof(struct pipe), NULL,
186 NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
187}
188
189/*
190 * The pipe system call for the DTYPE_PIPE type of pipes
191 */
192
193/* ARGSUSED */
194int
195pipe(td, uap)
196 struct thread *td;
197 struct pipe_args /* {
198 int dummy;
199 } */ *uap;
200{
201 struct filedesc *fdp = td->td_proc->p_fd;
202 struct file *rf, *wf;
203 struct pipe *rpipe, *wpipe;
204 struct mtx *pmtx;
205 int fd, error;
206
207 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
208
209 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
210
211 rpipe = wpipe = NULL;
212 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
213 pipeclose(rpipe);
214 pipeclose(wpipe);
215 free(pmtx, M_TEMP);
216 return (ENFILE);
217 }
218
219 rpipe->pipe_state |= PIPE_DIRECTOK;
220 wpipe->pipe_state |= PIPE_DIRECTOK;
221
222 error = falloc(td, &rf, &fd);
223 if (error) {
224 pipeclose(rpipe);
225 pipeclose(wpipe);
226 free(pmtx, M_TEMP);
227 return (error);
228 }
229 fhold(rf);
230 td->td_retval[0] = fd;
231
232 /*
233 * Warning: once we've gotten past allocation of the fd for the
234 * read-side, we can only drop the read side via fdrop() in order
235 * to avoid races against processes which manage to dup() the read
236 * side while we are blocked trying to allocate the write side.
237 */
238 FILE_LOCK(rf);
239 rf->f_flag = FREAD | FWRITE;
240 rf->f_type = DTYPE_PIPE;
241 rf->f_data = rpipe;
242 rf->f_ops = &pipeops;
243 FILE_UNLOCK(rf);
244 error = falloc(td, &wf, &fd);
245 if (error) {
246 FILEDESC_LOCK(fdp);
247 if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
248 fdp->fd_ofiles[td->td_retval[0]] = NULL;
249 FILEDESC_UNLOCK(fdp);
250 fdrop(rf, td);
251 } else
252 FILEDESC_UNLOCK(fdp);
253 fdrop(rf, td);
254 /* rpipe has been closed by fdrop(). */
255 pipeclose(wpipe);
256 free(pmtx, M_TEMP);
257 return (error);
258 }
259 FILE_LOCK(wf);
260 wf->f_flag = FREAD | FWRITE;
261 wf->f_type = DTYPE_PIPE;
262 wf->f_data = wpipe;
263 wf->f_ops = &pipeops;
264 FILE_UNLOCK(wf);
265 td->td_retval[1] = fd;
266 rpipe->pipe_peer = wpipe;
267 wpipe->pipe_peer = rpipe;
268#ifdef MAC
269 /*
270 * struct pipe represents a pipe endpoint. The MAC label is shared
271 * between the connected endpoints. As a result mac_init_pipe() and
272 * mac_create_pipe() should only be called on one of the endpoints
273 * after they have been connected.
274 */
275 mac_init_pipe(rpipe);
276 mac_create_pipe(td->td_ucred, rpipe);
277#endif
278 mtx_init(pmtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
279 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
280 fdrop(rf, td);
281
282 return (0);
283}
284
285/*
286 * Allocate kva for pipe circular buffer, the space is pageable
287 * This routine will 'realloc' the size of a pipe safely, if it fails
288 * it will retain the old buffer.
289 * If it fails it will return ENOMEM.
290 */
291static int
292pipespace(cpipe, size)
293 struct pipe *cpipe;
294 int size;
295{
296 struct vm_object *object;
297 caddr_t buffer;
298 int npages, error;
299
300 GIANT_REQUIRED;
301 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
302 ("pipespace: pipe mutex locked"));
303
304 npages = round_page(size)/PAGE_SIZE;
305 /*
306 * Create an object, I don't like the idea of paging to/from
307 * kernel_object.
308 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
309 */
310 object = vm_object_allocate(OBJT_DEFAULT, npages);
311 buffer = (caddr_t) vm_map_min(kernel_map);
312
313 /*
314 * Insert the object into the kernel map, and allocate kva for it.
315 * The map entry is, by default, pageable.
316 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
317 */
318 error = vm_map_find(kernel_map, object, 0,
319 (vm_offset_t *) &buffer, size, 1,
320 VM_PROT_ALL, VM_PROT_ALL, 0);
321
322 if (error != KERN_SUCCESS) {
323 vm_object_deallocate(object);
324 return (ENOMEM);
325 }
326
327 /* free old resources if we're resizing */
328 pipe_free_kmem(cpipe);
329 cpipe->pipe_buffer.object = object;
330 cpipe->pipe_buffer.buffer = buffer;
331 cpipe->pipe_buffer.size = size;
332 cpipe->pipe_buffer.in = 0;
333 cpipe->pipe_buffer.out = 0;
334 cpipe->pipe_buffer.cnt = 0;
335 atomic_add_int(&amountpipekva, cpipe->pipe_buffer.size);
336 return (0);
337}
338
339/*
340 * initialize and allocate VM and memory for pipe
341 */
342static int
343pipe_create(cpipep)
344 struct pipe **cpipep;
345{
346 struct pipe *cpipe;
347 int error;
348
349 *cpipep = uma_zalloc(pipe_zone, M_WAITOK);
350 if (*cpipep == NULL)
351 return (ENOMEM);
352
353 cpipe = *cpipep;
354
355 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
356 cpipe->pipe_buffer.object = NULL;
357#ifndef PIPE_NODIRECT
358 cpipe->pipe_map.kva = 0;
359#endif
360 /*
361 * protect so pipeclose() doesn't follow a junk pointer
362 * if pipespace() fails.
363 */
364 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel));
365 cpipe->pipe_state = 0;
366 cpipe->pipe_peer = NULL;
367 cpipe->pipe_busy = 0;
368
369#ifndef PIPE_NODIRECT
370 /*
371 * pipe data structure initializations to support direct pipe I/O
372 */
373 cpipe->pipe_map.cnt = 0;
374 cpipe->pipe_map.kva = 0;
375 cpipe->pipe_map.pos = 0;
376 cpipe->pipe_map.npages = 0;
377 /* cpipe->pipe_map.ms[] = invalid */
378#endif
379
380 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
381 error = pipespace(cpipe, PIPE_SIZE);
382 if (error)
383 return (error);
384
385 vfs_timestamp(&cpipe->pipe_ctime);
386 cpipe->pipe_atime = cpipe->pipe_ctime;
387 cpipe->pipe_mtime = cpipe->pipe_ctime;
388
389 return (0);
390}
391
392
393/*
394 * lock a pipe for I/O, blocking other access
395 */
396static __inline int
397pipelock(cpipe, catch)
398 struct pipe *cpipe;
399 int catch;
400{
401 int error;
402
403 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
404 while (cpipe->pipe_state & PIPE_LOCKFL) {
405 cpipe->pipe_state |= PIPE_LWANT;
406 error = msleep(cpipe, PIPE_MTX(cpipe),
407 catch ? (PRIBIO | PCATCH) : PRIBIO,
408 "pipelk", 0);
409 if (error != 0)
410 return (error);
411 }
412 cpipe->pipe_state |= PIPE_LOCKFL;
413 return (0);
414}
415
416/*
417 * unlock a pipe I/O lock
418 */
419static __inline void
420pipeunlock(cpipe)
421 struct pipe *cpipe;
422{
423
424 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
425 cpipe->pipe_state &= ~PIPE_LOCKFL;
426 if (cpipe->pipe_state & PIPE_LWANT) {
427 cpipe->pipe_state &= ~PIPE_LWANT;
428 wakeup(cpipe);
429 }
430}
431
432static __inline void
433pipeselwakeup(cpipe)
434 struct pipe *cpipe;
435{
436
437 if (cpipe->pipe_state & PIPE_SEL) {
438 cpipe->pipe_state &= ~PIPE_SEL;
439 selwakeup(&cpipe->pipe_sel);
440 }
441 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
442 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
443 KNOTE(&cpipe->pipe_sel.si_note, 0);
444}
445
446/* ARGSUSED */
447static int
448pipe_read(fp, uio, active_cred, flags, td)
449 struct file *fp;
450 struct uio *uio;
451 struct ucred *active_cred;
452 struct thread *td;
453 int flags;
454{
455 struct pipe *rpipe = fp->f_data;
456 int error;
457 int nread = 0;
458 u_int size;
459
460 PIPE_LOCK(rpipe);
461 ++rpipe->pipe_busy;
462 error = pipelock(rpipe, 1);
463 if (error)
464 goto unlocked_error;
465
466#ifdef MAC
467 error = mac_check_pipe_read(active_cred, rpipe);
468 if (error)
469 goto locked_error;
470#endif
471
472 while (uio->uio_resid) {
473 /*
474 * normal pipe buffer receive
475 */
476 if (rpipe->pipe_buffer.cnt > 0) {
477 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
478 if (size > rpipe->pipe_buffer.cnt)
479 size = rpipe->pipe_buffer.cnt;
480 if (size > (u_int) uio->uio_resid)
481 size = (u_int) uio->uio_resid;
482
483 PIPE_UNLOCK(rpipe);
484 error = uiomove(
485 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
486 size, uio);
487 PIPE_LOCK(rpipe);
488 if (error)
489 break;
490
491 rpipe->pipe_buffer.out += size;
492 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
493 rpipe->pipe_buffer.out = 0;
494
495 rpipe->pipe_buffer.cnt -= size;
496
497 /*
498 * If there is no more to read in the pipe, reset
499 * its pointers to the beginning. This improves
500 * cache hit stats.
501 */
502 if (rpipe->pipe_buffer.cnt == 0) {
503 rpipe->pipe_buffer.in = 0;
504 rpipe->pipe_buffer.out = 0;
505 }
506 nread += size;
507#ifndef PIPE_NODIRECT
508 /*
509 * Direct copy, bypassing a kernel buffer.
510 */
511 } else if ((size = rpipe->pipe_map.cnt) &&
512 (rpipe->pipe_state & PIPE_DIRECTW)) {
513 caddr_t va;
514 if (size > (u_int) uio->uio_resid)
515 size = (u_int) uio->uio_resid;
516
517 va = (caddr_t) rpipe->pipe_map.kva +
518 rpipe->pipe_map.pos;
519 PIPE_UNLOCK(rpipe);
520 error = uiomove(va, size, uio);
521 PIPE_LOCK(rpipe);
522 if (error)
523 break;
524 nread += size;
525 rpipe->pipe_map.pos += size;
526 rpipe->pipe_map.cnt -= size;
527 if (rpipe->pipe_map.cnt == 0) {
528 rpipe->pipe_state &= ~PIPE_DIRECTW;
529 wakeup(rpipe);
530 }
531#endif
532 } else {
533 /*
534 * detect EOF condition
535 * read returns 0 on EOF, no need to set error
536 */
537 if (rpipe->pipe_state & PIPE_EOF)
538 break;
539
540 /*
541 * If the "write-side" has been blocked, wake it up now.
542 */
543 if (rpipe->pipe_state & PIPE_WANTW) {
544 rpipe->pipe_state &= ~PIPE_WANTW;
545 wakeup(rpipe);
546 }
547
548 /*
549 * Break if some data was read.
550 */
551 if (nread > 0)
552 break;
553
554 /*
555 * Unlock the pipe buffer for our remaining processing.
556 * We will either break out with an error or we will
557 * sleep and relock to loop.
558 */
559 pipeunlock(rpipe);
560
561 /*
562 * Handle non-blocking mode operation or
563 * wait for more data.
564 */
565 if (fp->f_flag & FNONBLOCK) {
566 error = EAGAIN;
567 } else {
568 rpipe->pipe_state |= PIPE_WANTR;
569 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
570 PRIBIO | PCATCH,
571 "piperd", 0)) == 0)
572 error = pipelock(rpipe, 1);
573 }
574 if (error)
575 goto unlocked_error;
576 }
577 }
578#ifdef MAC
579locked_error:
580#endif
581 pipeunlock(rpipe);
582
583 /* XXX: should probably do this before getting any locks. */
584 if (error == 0)
585 vfs_timestamp(&rpipe->pipe_atime);
586unlocked_error:
587 --rpipe->pipe_busy;
588
589 /*
590 * PIPE_WANT processing only makes sense if pipe_busy is 0.
591 */
592 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
593 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
594 wakeup(rpipe);
595 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
596 /*
597 * Handle write blocking hysteresis.
598 */
599 if (rpipe->pipe_state & PIPE_WANTW) {
600 rpipe->pipe_state &= ~PIPE_WANTW;
601 wakeup(rpipe);
602 }
603 }
604
605 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
606 pipeselwakeup(rpipe);
607
608 PIPE_UNLOCK(rpipe);
609 return (error);
610}
611
612#ifndef PIPE_NODIRECT
613/*
614 * Map the sending processes' buffer into kernel space and wire it.
615 * This is similar to a physical write operation.
616 */
617static int
618pipe_build_write_buffer(wpipe, uio)
619 struct pipe *wpipe;
620 struct uio *uio;
621{
622 u_int size;
623 int i;
624 vm_offset_t addr, endaddr;
625 vm_paddr_t paddr;
626
627 GIANT_REQUIRED;
628 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
629
630 size = (u_int) uio->uio_iov->iov_len;
631 if (size > wpipe->pipe_buffer.size)
632 size = wpipe->pipe_buffer.size;
633
634 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
635 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
636 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
637 vm_page_t m;
638
639 /*
640 * vm_fault_quick() can sleep. Consequently,
641 * vm_page_lock_queue() and vm_page_unlock_queue()
642 * should not be performed outside of this loop.
643 */
644 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
645 (paddr = pmap_extract(vmspace_pmap(curproc->p_vmspace),
646 addr)) == 0) {
647 int j;
648
649 vm_page_lock_queues();
650 for (j = 0; j < i; j++)
651 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
652 vm_page_unlock_queues();
653 return (EFAULT);
654 }
655
656 m = PHYS_TO_VM_PAGE(paddr);
657 vm_page_lock_queues();
658 vm_page_wire(m);
659 vm_page_unlock_queues();
660 wpipe->pipe_map.ms[i] = m;
661 }
662
663/*
664 * set up the control block
665 */
666 wpipe->pipe_map.npages = i;
667 wpipe->pipe_map.pos =
668 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
669 wpipe->pipe_map.cnt = size;
670
671/*
672 * and map the buffer
673 */
674 if (wpipe->pipe_map.kva == 0) {
675 /*
676 * We need to allocate space for an extra page because the
677 * address range might (will) span pages at times.
678 */
679 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
680 wpipe->pipe_buffer.size + PAGE_SIZE);
681 atomic_add_int(&amountpipekva,
682 wpipe->pipe_buffer.size + PAGE_SIZE);
683 }
684 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
685 wpipe->pipe_map.npages);
686
687/*
688 * and update the uio data
689 */
690
691 uio->uio_iov->iov_len -= size;
692 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
693 if (uio->uio_iov->iov_len == 0)
694 uio->uio_iov++;
695 uio->uio_resid -= size;
696 uio->uio_offset += size;
697 return (0);
698}
699
700/*
701 * unmap and unwire the process buffer
702 */
703static void
704pipe_destroy_write_buffer(wpipe)
705 struct pipe *wpipe;
706{
707 int i;
708
709 GIANT_REQUIRED;
710 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
711
712 if (wpipe->pipe_map.kva) {
713 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
714
715 if (amountpipekva > MAXPIPEKVA) {
716 vm_offset_t kva = wpipe->pipe_map.kva;
717 wpipe->pipe_map.kva = 0;
718 kmem_free(kernel_map, kva,
719 wpipe->pipe_buffer.size + PAGE_SIZE);
720 atomic_subtract_int(&amountpipekva,
721 wpipe->pipe_buffer.size + PAGE_SIZE);
722 }
723 }
724 vm_page_lock_queues();
725 for (i = 0; i < wpipe->pipe_map.npages; i++)
726 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
727 vm_page_unlock_queues();
728 wpipe->pipe_map.npages = 0;
729}
730
731/*
732 * In the case of a signal, the writing process might go away. This
733 * code copies the data into the circular buffer so that the source
734 * pages can be freed without loss of data.
735 */
736static void
737pipe_clone_write_buffer(wpipe)
738 struct pipe *wpipe;
739{
740 int size;
741 int pos;
742
743 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
744 size = wpipe->pipe_map.cnt;
745 pos = wpipe->pipe_map.pos;
746
747 wpipe->pipe_buffer.in = size;
748 wpipe->pipe_buffer.out = 0;
749 wpipe->pipe_buffer.cnt = size;
750 wpipe->pipe_state &= ~PIPE_DIRECTW;
751
752 PIPE_GET_GIANT(wpipe);
753 bcopy((caddr_t) wpipe->pipe_map.kva + pos,
754 wpipe->pipe_buffer.buffer, size);
755 pipe_destroy_write_buffer(wpipe);
756 PIPE_DROP_GIANT(wpipe);
757}
758
759/*
760 * This implements the pipe buffer write mechanism. Note that only
761 * a direct write OR a normal pipe write can be pending at any given time.
762 * If there are any characters in the pipe buffer, the direct write will
763 * be deferred until the receiving process grabs all of the bytes from
764 * the pipe buffer. Then the direct mapping write is set-up.
765 */
766static int
767pipe_direct_write(wpipe, uio)
768 struct pipe *wpipe;
769 struct uio *uio;
770{
771 int error;
772
773retry:
774 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
775 while (wpipe->pipe_state & PIPE_DIRECTW) {
776 if (wpipe->pipe_state & PIPE_WANTR) {
777 wpipe->pipe_state &= ~PIPE_WANTR;
778 wakeup(wpipe);
779 }
780 wpipe->pipe_state |= PIPE_WANTW;
781 error = msleep(wpipe, PIPE_MTX(wpipe),
782 PRIBIO | PCATCH, "pipdww", 0);
783 if (error)
784 goto error1;
785 if (wpipe->pipe_state & PIPE_EOF) {
786 error = EPIPE;
787 goto error1;
788 }
789 }
790 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
791 if (wpipe->pipe_buffer.cnt > 0) {
792 if (wpipe->pipe_state & PIPE_WANTR) {
793 wpipe->pipe_state &= ~PIPE_WANTR;
794 wakeup(wpipe);
795 }
796
797 wpipe->pipe_state |= PIPE_WANTW;
798 error = msleep(wpipe, PIPE_MTX(wpipe),
799 PRIBIO | PCATCH, "pipdwc", 0);
800 if (error)
801 goto error1;
802 if (wpipe->pipe_state & PIPE_EOF) {
803 error = EPIPE;
804 goto error1;
805 }
806 goto retry;
807 }
808
809 wpipe->pipe_state |= PIPE_DIRECTW;
810
811 pipelock(wpipe, 0);
812 PIPE_GET_GIANT(wpipe);
813 error = pipe_build_write_buffer(wpipe, uio);
814 PIPE_DROP_GIANT(wpipe);
815 pipeunlock(wpipe);
816 if (error) {
817 wpipe->pipe_state &= ~PIPE_DIRECTW;
818 goto error1;
819 }
820
821 error = 0;
822 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
823 if (wpipe->pipe_state & PIPE_EOF) {
824 pipelock(wpipe, 0);
825 PIPE_GET_GIANT(wpipe);
826 pipe_destroy_write_buffer(wpipe);
827 PIPE_DROP_GIANT(wpipe);
828 pipeselwakeup(wpipe);
829 pipeunlock(wpipe);
830 error = EPIPE;
831 goto error1;
832 }
833 if (wpipe->pipe_state & PIPE_WANTR) {
834 wpipe->pipe_state &= ~PIPE_WANTR;
835 wakeup(wpipe);
836 }
837 pipeselwakeup(wpipe);
838 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
839 "pipdwt", 0);
840 }
841
842 pipelock(wpipe,0);
843 if (wpipe->pipe_state & PIPE_DIRECTW) {
844 /*
845 * this bit of trickery substitutes a kernel buffer for
846 * the process that might be going away.
847 */
848 pipe_clone_write_buffer(wpipe);
849 } else {
850 PIPE_GET_GIANT(wpipe);
851 pipe_destroy_write_buffer(wpipe);
852 PIPE_DROP_GIANT(wpipe);
853 }
854 pipeunlock(wpipe);
855 return (error);
856
857error1:
858 wakeup(wpipe);
859 return (error);
860}
861#endif
862
863static int
864pipe_write(fp, uio, active_cred, flags, td)
865 struct file *fp;
866 struct uio *uio;
867 struct ucred *active_cred;
868 struct thread *td;
869 int flags;
870{
871 int error = 0;
872 int orig_resid;
873 struct pipe *wpipe, *rpipe;
874
875 rpipe = fp->f_data;
876 wpipe = rpipe->pipe_peer;
877
878 PIPE_LOCK(rpipe);
879 /*
880 * detect loss of pipe read side, issue SIGPIPE if lost.
881 */
882 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
883 PIPE_UNLOCK(rpipe);
884 return (EPIPE);
885 }
886#ifdef MAC
887 error = mac_check_pipe_write(active_cred, wpipe);
888 if (error) {
889 PIPE_UNLOCK(rpipe);
890 return (error);
891 }
892#endif
893 ++wpipe->pipe_busy;
894
895 /*
896 * If it is advantageous to resize the pipe buffer, do
897 * so.
898 */
899 if ((uio->uio_resid > PIPE_SIZE) &&
900 (nbigpipe < LIMITBIGPIPES) &&
901 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
902 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
903 (wpipe->pipe_buffer.cnt == 0)) {
904
905 if ((error = pipelock(wpipe, 1)) == 0) {
906 PIPE_GET_GIANT(wpipe);
907 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
908 nbigpipe++;
909 PIPE_DROP_GIANT(wpipe);
910 pipeunlock(wpipe);
911 }
912 }
913
914 /*
915 * If an early error occured unbusy and return, waking up any pending
916 * readers.
917 */
918 if (error) {
919 --wpipe->pipe_busy;
920 if ((wpipe->pipe_busy == 0) &&
921 (wpipe->pipe_state & PIPE_WANT)) {
922 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
923 wakeup(wpipe);
924 }
925 PIPE_UNLOCK(rpipe);
926 return(error);
927 }
928
929 orig_resid = uio->uio_resid;
930
931 while (uio->uio_resid) {
932 int space;
933
934#ifndef PIPE_NODIRECT
935 /*
936 * If the transfer is large, we can gain performance if
937 * we do process-to-process copies directly.
938 * If the write is non-blocking, we don't use the
939 * direct write mechanism.
940 *
941 * The direct write mechanism will detect the reader going
942 * away on us.
943 */
944 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
945 (fp->f_flag & FNONBLOCK) == 0 &&
946 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
947 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
948 error = pipe_direct_write(wpipe, uio);
949 if (error)
950 break;
951 continue;
952 }
953#endif
954
955 /*
956 * Pipe buffered writes cannot be coincidental with
957 * direct writes. We wait until the currently executing
958 * direct write is completed before we start filling the
959 * pipe buffer. We break out if a signal occurs or the
960 * reader goes away.
961 */
962 retrywrite:
963 while (wpipe->pipe_state & PIPE_DIRECTW) {
964 if (wpipe->pipe_state & PIPE_WANTR) {
965 wpipe->pipe_state &= ~PIPE_WANTR;
966 wakeup(wpipe);
967 }
968 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
969 "pipbww", 0);
970 if (wpipe->pipe_state & PIPE_EOF)
971 break;
972 if (error)
973 break;
974 }
975 if (wpipe->pipe_state & PIPE_EOF) {
976 error = EPIPE;
977 break;
978 }
979
980 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
981
982 /* Writes of size <= PIPE_BUF must be atomic. */
983 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
984 space = 0;
985
986 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
987 if ((error = pipelock(wpipe,1)) == 0) {
988 int size; /* Transfer size */
989 int segsize; /* first segment to transfer */
990
991 /*
992 * It is possible for a direct write to
993 * slip in on us... handle it here...
994 */
995 if (wpipe->pipe_state & PIPE_DIRECTW) {
996 pipeunlock(wpipe);
997 goto retrywrite;
998 }
999 /*
1000 * If a process blocked in uiomove, our
1001 * value for space might be bad.
1002 *
1003 * XXX will we be ok if the reader has gone
1004 * away here?
1005 */
1006 if (space > wpipe->pipe_buffer.size -
1007 wpipe->pipe_buffer.cnt) {
1008 pipeunlock(wpipe);
1009 goto retrywrite;
1010 }
1011
1012 /*
1013 * Transfer size is minimum of uio transfer
1014 * and free space in pipe buffer.
1015 */
1016 if (space > uio->uio_resid)
1017 size = uio->uio_resid;
1018 else
1019 size = space;
1020 /*
1021 * First segment to transfer is minimum of
1022 * transfer size and contiguous space in
1023 * pipe buffer. If first segment to transfer
1024 * is less than the transfer size, we've got
1025 * a wraparound in the buffer.
1026 */
1027 segsize = wpipe->pipe_buffer.size -
1028 wpipe->pipe_buffer.in;
1029 if (segsize > size)
1030 segsize = size;
1031
1032 /* Transfer first segment */
1033
1034 PIPE_UNLOCK(rpipe);
1035 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1036 segsize, uio);
1037 PIPE_LOCK(rpipe);
1038
1039 if (error == 0 && segsize < size) {
1040 /*
1041 * Transfer remaining part now, to
1042 * support atomic writes. Wraparound
1043 * happened.
1044 */
1045 if (wpipe->pipe_buffer.in + segsize !=
1046 wpipe->pipe_buffer.size)
1047 panic("Expected pipe buffer "
1048 "wraparound disappeared");
1049
1050 PIPE_UNLOCK(rpipe);
1051 error = uiomove(
1052 &wpipe->pipe_buffer.buffer[0],
1053 size - segsize, uio);
1054 PIPE_LOCK(rpipe);
1055 }
1056 if (error == 0) {
1057 wpipe->pipe_buffer.in += size;
1058 if (wpipe->pipe_buffer.in >=
1059 wpipe->pipe_buffer.size) {
1060 if (wpipe->pipe_buffer.in !=
1061 size - segsize +
1062 wpipe->pipe_buffer.size)
1063 panic("Expected "
1064 "wraparound bad");
1065 wpipe->pipe_buffer.in = size -
1066 segsize;
1067 }
1068
1069 wpipe->pipe_buffer.cnt += size;
1070 if (wpipe->pipe_buffer.cnt >
1071 wpipe->pipe_buffer.size)
1072 panic("Pipe buffer overflow");
1073
1074 }
1075 pipeunlock(wpipe);
1076 }
1077 if (error)
1078 break;
1079
1080 } else {
1081 /*
1082 * If the "read-side" has been blocked, wake it up now.
1083 */
1084 if (wpipe->pipe_state & PIPE_WANTR) {
1085 wpipe->pipe_state &= ~PIPE_WANTR;
1086 wakeup(wpipe);
1087 }
1088
1089 /*
1090 * don't block on non-blocking I/O
1091 */
1092 if (fp->f_flag & FNONBLOCK) {
1093 error = EAGAIN;
1094 break;
1095 }
1096
1097 /*
1098 * We have no more space and have something to offer,
1099 * wake up select/poll.
1100 */
1101 pipeselwakeup(wpipe);
1102
1103 wpipe->pipe_state |= PIPE_WANTW;
1104 error = msleep(wpipe, PIPE_MTX(rpipe),
1105 PRIBIO | PCATCH, "pipewr", 0);
1106 if (error != 0)
1107 break;
1108 /*
1109 * If read side wants to go away, we just issue a signal
1110 * to ourselves.
1111 */
1112 if (wpipe->pipe_state & PIPE_EOF) {
1113 error = EPIPE;
1114 break;
1115 }
1116 }
1117 }
1118
1119 --wpipe->pipe_busy;
1120
1121 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1122 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1123 wakeup(wpipe);
1124 } else if (wpipe->pipe_buffer.cnt > 0) {
1125 /*
1126 * If we have put any characters in the buffer, we wake up
1127 * the reader.
1128 */
1129 if (wpipe->pipe_state & PIPE_WANTR) {
1130 wpipe->pipe_state &= ~PIPE_WANTR;
1131 wakeup(wpipe);
1132 }
1133 }
1134
1135 /*
1136 * Don't return EPIPE if I/O was successful
1137 */
1138 if ((wpipe->pipe_buffer.cnt == 0) &&
1139 (uio->uio_resid == 0) &&
1140 (error == EPIPE)) {
1141 error = 0;
1142 }
1143
1144 if (error == 0)
1145 vfs_timestamp(&wpipe->pipe_mtime);
1146
1147 /*
1148 * We have something to offer,
1149 * wake up select/poll.
1150 */
1151 if (wpipe->pipe_buffer.cnt)
1152 pipeselwakeup(wpipe);
1153
1154 PIPE_UNLOCK(rpipe);
1155 return (error);
1156}
1157
1158/*
1159 * we implement a very minimal set of ioctls for compatibility with sockets.
1160 */
1161static int
1162pipe_ioctl(fp, cmd, data, active_cred, td)
1163 struct file *fp;
1164 u_long cmd;
1165 void *data;
1166 struct ucred *active_cred;
1167 struct thread *td;
1168{
1169 struct pipe *mpipe = fp->f_data;
1170#ifdef MAC
1171 int error;
1172#endif
1173
1174 PIPE_LOCK(mpipe);
1175
1176#ifdef MAC
1177 error = mac_check_pipe_ioctl(active_cred, mpipe, cmd, data);
1178 if (error)
1179 return (error);
1180#endif
1181
1182 switch (cmd) {
1183
1184 case FIONBIO:
1185 PIPE_UNLOCK(mpipe);
1186 return (0);
1187
1188 case FIOASYNC:
1189 if (*(int *)data) {
1190 mpipe->pipe_state |= PIPE_ASYNC;
1191 } else {
1192 mpipe->pipe_state &= ~PIPE_ASYNC;
1193 }
1194 PIPE_UNLOCK(mpipe);
1195 return (0);
1196
1197 case FIONREAD:
1198 if (mpipe->pipe_state & PIPE_DIRECTW)
1199 *(int *)data = mpipe->pipe_map.cnt;
1200 else
1201 *(int *)data = mpipe->pipe_buffer.cnt;
1202 PIPE_UNLOCK(mpipe);
1203 return (0);
1204
1205 case FIOSETOWN:
1206 PIPE_UNLOCK(mpipe);
1207 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1208
1209 case FIOGETOWN:
1210 PIPE_UNLOCK(mpipe);
1211 *(int *)data = fgetown(&mpipe->pipe_sigio);
1212 return (0);
1213
1214 /* This is deprecated, FIOSETOWN should be used instead. */
1215 case TIOCSPGRP:
1216 PIPE_UNLOCK(mpipe);
1217 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1218
1219 /* This is deprecated, FIOGETOWN should be used instead. */
1220 case TIOCGPGRP:
1221 PIPE_UNLOCK(mpipe);
1222 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1223 return (0);
1224
1225 }
1226 PIPE_UNLOCK(mpipe);
1227 return (ENOTTY);
1228}
1229
1230static int
1231pipe_poll(fp, events, active_cred, td)
1232 struct file *fp;
1233 int events;
1234 struct ucred *active_cred;
1235 struct thread *td;
1236{
1237 struct pipe *rpipe = fp->f_data;
1238 struct pipe *wpipe;
1239 int revents = 0;
1240#ifdef MAC
1241 int error;
1242#endif
1243
1244 wpipe = rpipe->pipe_peer;
1245 PIPE_LOCK(rpipe);
1246#ifdef MAC
1247 error = mac_check_pipe_poll(active_cred, rpipe);
1248 if (error)
1249 goto locked_error;
1250#endif
1251 if (events & (POLLIN | POLLRDNORM))
1252 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1253 (rpipe->pipe_buffer.cnt > 0) ||
1254 (rpipe->pipe_state & PIPE_EOF))
1255 revents |= events & (POLLIN | POLLRDNORM);
1256
1257 if (events & (POLLOUT | POLLWRNORM))
1258 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
1259 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1260 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1261 revents |= events & (POLLOUT | POLLWRNORM);
1262
1263 if ((rpipe->pipe_state & PIPE_EOF) ||
1264 (wpipe == NULL) ||
1265 (wpipe->pipe_state & PIPE_EOF))
1266 revents |= POLLHUP;
1267
1268 if (revents == 0) {
1269 if (events & (POLLIN | POLLRDNORM)) {
1270 selrecord(td, &rpipe->pipe_sel);
1271 rpipe->pipe_state |= PIPE_SEL;
1272 }
1273
1274 if (events & (POLLOUT | POLLWRNORM)) {
1275 selrecord(td, &wpipe->pipe_sel);
1276 wpipe->pipe_state |= PIPE_SEL;
1277 }
1278 }
1279#ifdef MAC
1280locked_error:
1281#endif
1282 PIPE_UNLOCK(rpipe);
1283
1284 return (revents);
1285}
1286
1287/*
1288 * We shouldn't need locks here as we're doing a read and this should
1289 * be a natural race.
1290 */
1291static int
1292pipe_stat(fp, ub, active_cred, td)
1293 struct file *fp;
1294 struct stat *ub;
1295 struct ucred *active_cred;
1296 struct thread *td;
1297{
1298 struct pipe *pipe = fp->f_data;
1299#ifdef MAC
1300 int error;
1301
1302 PIPE_LOCK(pipe);
1303 error = mac_check_pipe_stat(active_cred, pipe);
1304 PIPE_UNLOCK(pipe);
1305 if (error)
1306 return (error);
1307#endif
1308 bzero(ub, sizeof(*ub));
1309 ub->st_mode = S_IFIFO;
1310 ub->st_blksize = pipe->pipe_buffer.size;
1311 ub->st_size = pipe->pipe_buffer.cnt;
1312 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1313 ub->st_atimespec = pipe->pipe_atime;
1314 ub->st_mtimespec = pipe->pipe_mtime;
1315 ub->st_ctimespec = pipe->pipe_ctime;
1316 ub->st_uid = fp->f_cred->cr_uid;
1317 ub->st_gid = fp->f_cred->cr_gid;
1318 /*
1319 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1320 * XXX (st_dev, st_ino) should be unique.
1321 */
1322 return (0);
1323}
1324
1325/* ARGSUSED */
1326static int
1327pipe_close(fp, td)
1328 struct file *fp;
1329 struct thread *td;
1330{
1331 struct pipe *cpipe = fp->f_data;
1332
1333 fp->f_ops = &badfileops;
1334 fp->f_data = NULL;
1335 funsetown(&cpipe->pipe_sigio);
1336 pipeclose(cpipe);
1337 return (0);
1338}
1339
1340static void
1341pipe_free_kmem(cpipe)
1342 struct pipe *cpipe;
1343{
1344
1345 GIANT_REQUIRED;
1346 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1347 ("pipespace: pipe mutex locked"));
1348
1349 if (cpipe->pipe_buffer.buffer != NULL) {
1350 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1351 --nbigpipe;
1352 atomic_subtract_int(&amountpipekva, cpipe->pipe_buffer.size);
1353 kmem_free(kernel_map,
1354 (vm_offset_t)cpipe->pipe_buffer.buffer,
1355 cpipe->pipe_buffer.size);
1356 cpipe->pipe_buffer.buffer = NULL;
1357 }
1358#ifndef PIPE_NODIRECT
1359 if (cpipe->pipe_map.kva != 0) {
1360 atomic_subtract_int(&amountpipekva,
1361 cpipe->pipe_buffer.size + PAGE_SIZE);
1362 kmem_free(kernel_map,
1363 cpipe->pipe_map.kva,
1364 cpipe->pipe_buffer.size + PAGE_SIZE);
1365 cpipe->pipe_map.cnt = 0;
1366 cpipe->pipe_map.kva = 0;
1367 cpipe->pipe_map.pos = 0;
1368 cpipe->pipe_map.npages = 0;
1369 }
1370#endif
1371}
1372
1373/*
1374 * shutdown the pipe
1375 */
1376static void
1377pipeclose(cpipe)
1378 struct pipe *cpipe;
1379{
1380 struct pipe *ppipe;
1381 int hadpeer;
1382
1383 if (cpipe == NULL)
1384 return;
1385
1386 hadpeer = 0;
1387
1388 /* partially created pipes won't have a valid mutex. */
1389 if (PIPE_MTX(cpipe) != NULL)
1390 PIPE_LOCK(cpipe);
1391
1392 pipeselwakeup(cpipe);
1393
1394 /*
1395 * If the other side is blocked, wake it up saying that
1396 * we want to close it down.
1397 */
1398 while (cpipe->pipe_busy) {
1399 wakeup(cpipe);
1400 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
1401 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1402 }
1403
1404#ifdef MAC
1405 if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL)
1406 mac_destroy_pipe(cpipe);
1407#endif
1408
1409 /*
1410 * Disconnect from peer
1411 */
1412 if ((ppipe = cpipe->pipe_peer) != NULL) {
1413 hadpeer++;
1414 pipeselwakeup(ppipe);
1415
1416 ppipe->pipe_state |= PIPE_EOF;
1417 wakeup(ppipe);
1418 KNOTE(&ppipe->pipe_sel.si_note, 0);
1419 ppipe->pipe_peer = NULL;
1420 }
1421 /*
1422 * free resources
1423 */
1424 if (PIPE_MTX(cpipe) != NULL) {
1425 PIPE_UNLOCK(cpipe);
1426 if (!hadpeer) {
1427 mtx_destroy(PIPE_MTX(cpipe));
1428 free(PIPE_MTX(cpipe), M_TEMP);
1429 }
1430 }
1431 mtx_lock(&Giant);
1432 pipe_free_kmem(cpipe);
1433 uma_zfree(pipe_zone, cpipe);
1434 mtx_unlock(&Giant);
1435}
1436
1437/*ARGSUSED*/
1438static int
1439pipe_kqfilter(struct file *fp, struct knote *kn)
1440{
1441 struct pipe *cpipe;
1442
1443 cpipe = kn->kn_fp->f_data;
1444 switch (kn->kn_filter) {
1445 case EVFILT_READ:
1446 kn->kn_fop = &pipe_rfiltops;
1447 break;
1448 case EVFILT_WRITE:
1449 kn->kn_fop = &pipe_wfiltops;
1450 cpipe = cpipe->pipe_peer;
1451 if (cpipe == NULL)
1452 /* other end of pipe has been closed */
1453 return (EBADF);
1454 break;
1455 default:
1456 return (1);
1457 }
1458 kn->kn_hook = cpipe;
1459
1460 PIPE_LOCK(cpipe);
1461 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1462 PIPE_UNLOCK(cpipe);
1463 return (0);
1464}
1465
1466static void
1467filt_pipedetach(struct knote *kn)
1468{
1469 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1470
1471 PIPE_LOCK(cpipe);
1472 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1473 PIPE_UNLOCK(cpipe);
1474}
1475
1476/*ARGSUSED*/
1477static int
1478filt_piperead(struct knote *kn, long hint)
1479{
1480 struct pipe *rpipe = kn->kn_fp->f_data;
1481 struct pipe *wpipe = rpipe->pipe_peer;
1482
1483 PIPE_LOCK(rpipe);
1484 kn->kn_data = rpipe->pipe_buffer.cnt;
1485 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1486 kn->kn_data = rpipe->pipe_map.cnt;
1487
1488 if ((rpipe->pipe_state & PIPE_EOF) ||
1489 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1490 kn->kn_flags |= EV_EOF;
1491 PIPE_UNLOCK(rpipe);
1492 return (1);
1493 }
1494 PIPE_UNLOCK(rpipe);
1495 return (kn->kn_data > 0);
1496}
1497
1498/*ARGSUSED*/
1499static int
1500filt_pipewrite(struct knote *kn, long hint)
1501{
1502 struct pipe *rpipe = kn->kn_fp->f_data;
1503 struct pipe *wpipe = rpipe->pipe_peer;
1504
1505 PIPE_LOCK(rpipe);
1506 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1507 kn->kn_data = 0;
1508 kn->kn_flags |= EV_EOF;
1509 PIPE_UNLOCK(rpipe);
1510 return (1);
1511 }
1512 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1513 if (wpipe->pipe_state & PIPE_DIRECTW)
1514 kn->kn_data = 0;
1515
1516 PIPE_UNLOCK(rpipe);
1517 return (kn->kn_data >= PIPE_BUF);
1518}