sys_pipe.c revision 43623
1/*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 *    John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 *    are met.
18 *
19 * $Id: sys_pipe.c,v 1.49 1999/01/28 00:57:47 dillon Exp $
20 */
21
22/*
23 * This file contains a high-performance replacement for the socket-based
24 * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
25 * all features of sockets, but does do everything that pipes normally
26 * do.
27 */
28
29/*
30 * This code has two modes of operation, a small write mode and a large
31 * write mode.  The small write mode acts like conventional pipes with
32 * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
33 * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
35 * the receiving process can copy it directly from the pages in the sending
36 * process.
37 *
38 * If the sending process receives a signal, it is possible that it will
39 * go away, and certainly its address space can change, because control
40 * is returned back to the user-mode side.  In that case, the pipe code
41 * arranges to copy the buffer supplied by the user process, to a pageable
42 * kernel buffer, and the receiving process will grab the data from the
43 * pageable kernel buffer.  Since signals don't happen all that often,
44 * the copy operation is normally eliminated.
45 *
46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
47 * happen for small transfers so that the system will not spend all of
48 * its time context switching.  PIPE_SIZE is constrained by the
49 * amount of kernel virtual memory.
50 */
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/proc.h>
55#include <sys/fcntl.h>
56#include <sys/file.h>
57#include <sys/filedesc.h>
58#include <sys/filio.h>
59#include <sys/ttycom.h>
60#include <sys/stat.h>
61#include <sys/poll.h>
62#include <sys/select.h>
63#include <sys/signalvar.h>
64#include <sys/sysproto.h>
65#include <sys/pipe.h>
66#include <sys/uio.h>
67
68#include <vm/vm.h>
69#include <vm/vm_prot.h>
70#include <vm/vm_param.h>
71#include <sys/lock.h>
72#include <vm/vm_object.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_extern.h>
75#include <vm/pmap.h>
76#include <vm/vm_map.h>
77#include <vm/vm_page.h>
78#include <vm/vm_zone.h>
79
80/*
81 * Use this define if you want to disable *fancy* VM things.  Expect an
82 * approx 30% decrease in transfer rate.  This could be useful for
83 * NetBSD or OpenBSD.
84 */
85/* #define PIPE_NODIRECT */
86
87/*
88 * interfaces to the outside world
89 */
90static int pipe_read __P((struct file *fp, struct uio *uio,
91		struct ucred *cred));
92static int pipe_write __P((struct file *fp, struct uio *uio,
93		struct ucred *cred));
94static int pipe_close __P((struct file *fp, struct proc *p));
95static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
96		struct proc *p));
97static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
98
99static struct fileops pipeops =
100    { pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_close };
101
102/*
103 * Default pipe buffer size(s), this can be kind-of large now because pipe
104 * space is pageable.  The pipe code will try to maintain locality of
105 * reference for performance reasons, so small amounts of outstanding I/O
106 * will not wipe the cache.
107 */
108#define MINPIPESIZE (PIPE_SIZE/3)
109#define MAXPIPESIZE (2*PIPE_SIZE/3)
110
111/*
112 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
113 * is there so that on large systems, we don't exhaust it.
114 */
115#define MAXPIPEKVA (8*1024*1024)
116
117/*
118 * Limit for direct transfers, we cannot, of course limit
119 * the amount of kva for pipes in general though.
120 */
121#define LIMITPIPEKVA (16*1024*1024)
122
123/*
124 * Limit the number of "big" pipes
125 */
126#define LIMITBIGPIPES	32
127static int nbigpipe;
128
129static int amountpipekva;
130
131static void pipeclose __P((struct pipe *cpipe));
132static void pipeinit __P((struct pipe *cpipe));
133static __inline int pipelock __P((struct pipe *cpipe, int catch));
134static __inline void pipeunlock __P((struct pipe *cpipe));
135static __inline void pipeselwakeup __P((struct pipe *cpipe));
136#ifndef PIPE_NODIRECT
137static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
138static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
139static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
140static void pipe_clone_write_buffer __P((struct pipe *wpipe));
141#endif
142static void pipespace __P((struct pipe *cpipe));
143
144static vm_zone_t pipe_zone;
145
146/*
147 * The pipe system call for the DTYPE_PIPE type of pipes
148 */
149
150/* ARGSUSED */
151int
152pipe(p, uap)
153	struct proc *p;
154	struct pipe_args /* {
155		int	dummy;
156	} */ *uap;
157{
158	register struct filedesc *fdp = p->p_fd;
159	struct file *rf, *wf;
160	struct pipe *rpipe, *wpipe;
161	int fd, error;
162
163	if (pipe_zone == NULL)
164		pipe_zone = zinit("PIPE", sizeof (struct pipe), 0, 0, 4);
165
166	rpipe = zalloc( pipe_zone);
167	pipeinit(rpipe);
168	rpipe->pipe_state |= PIPE_DIRECTOK;
169	wpipe = zalloc( pipe_zone);
170	pipeinit(wpipe);
171	wpipe->pipe_state |= PIPE_DIRECTOK;
172
173	error = falloc(p, &rf, &fd);
174	if (error)
175		goto free2;
176	p->p_retval[0] = fd;
177	rf->f_flag = FREAD | FWRITE;
178	rf->f_type = DTYPE_PIPE;
179	rf->f_ops = &pipeops;
180	rf->f_data = (caddr_t)rpipe;
181	error = falloc(p, &wf, &fd);
182	if (error)
183		goto free3;
184	wf->f_flag = FREAD | FWRITE;
185	wf->f_type = DTYPE_PIPE;
186	wf->f_ops = &pipeops;
187	wf->f_data = (caddr_t)wpipe;
188	p->p_retval[1] = fd;
189
190	rpipe->pipe_peer = wpipe;
191	wpipe->pipe_peer = rpipe;
192
193	return (0);
194free3:
195	ffree(rf);
196	fdp->fd_ofiles[p->p_retval[0]] = 0;
197free2:
198	(void)pipeclose(wpipe);
199	(void)pipeclose(rpipe);
200	return (error);
201}
202
203/*
204 * Allocate kva for pipe circular buffer, the space is pageable
205 */
206static void
207pipespace(cpipe)
208	struct pipe *cpipe;
209{
210	int npages, error;
211
212	npages = round_page(cpipe->pipe_buffer.size)/PAGE_SIZE;
213	/*
214	 * Create an object, I don't like the idea of paging to/from
215	 * kernel_object.
216	 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
217	 */
218	cpipe->pipe_buffer.object = vm_object_allocate(OBJT_DEFAULT, npages);
219	cpipe->pipe_buffer.buffer = (caddr_t) vm_map_min(kernel_map);
220
221	/*
222	 * Insert the object into the kernel map, and allocate kva for it.
223	 * The map entry is, by default, pageable.
224	 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
225	 */
226	error = vm_map_find(kernel_map, cpipe->pipe_buffer.object, 0,
227		(vm_offset_t *) &cpipe->pipe_buffer.buffer,
228		cpipe->pipe_buffer.size, 1,
229		VM_PROT_ALL, VM_PROT_ALL, 0);
230
231	if (error != KERN_SUCCESS)
232		panic("pipeinit: cannot allocate pipe -- out of kvm -- code = %d", error);
233	amountpipekva += cpipe->pipe_buffer.size;
234}
235
236/*
237 * initialize and allocate VM and memory for pipe
238 */
239static void
240pipeinit(cpipe)
241	struct pipe *cpipe;
242{
243
244	cpipe->pipe_buffer.in = 0;
245	cpipe->pipe_buffer.out = 0;
246	cpipe->pipe_buffer.cnt = 0;
247	cpipe->pipe_buffer.size = PIPE_SIZE;
248
249	/* Buffer kva gets dynamically allocated */
250	cpipe->pipe_buffer.buffer = NULL;
251	/* cpipe->pipe_buffer.object = invalid */
252
253	cpipe->pipe_state = 0;
254	cpipe->pipe_peer = NULL;
255	cpipe->pipe_busy = 0;
256	getnanotime(&cpipe->pipe_ctime);
257	cpipe->pipe_atime = cpipe->pipe_ctime;
258	cpipe->pipe_mtime = cpipe->pipe_ctime;
259	bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel);
260
261#ifndef PIPE_NODIRECT
262	/*
263	 * pipe data structure initializations to support direct pipe I/O
264	 */
265	cpipe->pipe_map.cnt = 0;
266	cpipe->pipe_map.kva = 0;
267	cpipe->pipe_map.pos = 0;
268	cpipe->pipe_map.npages = 0;
269	/* cpipe->pipe_map.ms[] = invalid */
270#endif
271}
272
273
274/*
275 * lock a pipe for I/O, blocking other access
276 */
277static __inline int
278pipelock(cpipe, catch)
279	struct pipe *cpipe;
280	int catch;
281{
282	int error;
283	while (cpipe->pipe_state & PIPE_LOCK) {
284		cpipe->pipe_state |= PIPE_LWANT;
285		if ((error = tsleep( cpipe,
286			catch?(PRIBIO|PCATCH):PRIBIO, "pipelk", 0)) != 0) {
287			return error;
288		}
289	}
290	cpipe->pipe_state |= PIPE_LOCK;
291	return 0;
292}
293
294/*
295 * unlock a pipe I/O lock
296 */
297static __inline void
298pipeunlock(cpipe)
299	struct pipe *cpipe;
300{
301	cpipe->pipe_state &= ~PIPE_LOCK;
302	if (cpipe->pipe_state & PIPE_LWANT) {
303		cpipe->pipe_state &= ~PIPE_LWANT;
304		wakeup(cpipe);
305	}
306}
307
308static __inline void
309pipeselwakeup(cpipe)
310	struct pipe *cpipe;
311{
312	if (cpipe->pipe_state & PIPE_SEL) {
313		cpipe->pipe_state &= ~PIPE_SEL;
314		selwakeup(&cpipe->pipe_sel);
315	}
316	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
317		pgsigio(cpipe->pipe_sigio, SIGIO, 0);
318}
319
320/* ARGSUSED */
321static int
322pipe_read(fp, uio, cred)
323	struct file *fp;
324	struct uio *uio;
325	struct ucred *cred;
326{
327
328	struct pipe *rpipe = (struct pipe *) fp->f_data;
329	int error = 0;
330	int nread = 0;
331	u_int size;
332
333	++rpipe->pipe_busy;
334	while (uio->uio_resid) {
335		/*
336		 * normal pipe buffer receive
337		 */
338		if (rpipe->pipe_buffer.cnt > 0) {
339			size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
340			if (size > rpipe->pipe_buffer.cnt)
341				size = rpipe->pipe_buffer.cnt;
342			if (size > (u_int) uio->uio_resid)
343				size = (u_int) uio->uio_resid;
344			if ((error = pipelock(rpipe,1)) == 0) {
345				error = uiomove( &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
346					size, uio);
347				pipeunlock(rpipe);
348			}
349			if (error) {
350				break;
351			}
352			rpipe->pipe_buffer.out += size;
353			if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
354				rpipe->pipe_buffer.out = 0;
355
356			rpipe->pipe_buffer.cnt -= size;
357			nread += size;
358#ifndef PIPE_NODIRECT
359		/*
360		 * Direct copy, bypassing a kernel buffer.
361		 */
362		} else if ((size = rpipe->pipe_map.cnt) &&
363			(rpipe->pipe_state & PIPE_DIRECTW)) {
364			caddr_t va;
365			if (size > (u_int) uio->uio_resid)
366				size = (u_int) uio->uio_resid;
367			if ((error = pipelock(rpipe,1)) == 0) {
368				va = (caddr_t) rpipe->pipe_map.kva + rpipe->pipe_map.pos;
369				error = uiomove(va, size, uio);
370				pipeunlock(rpipe);
371			}
372			if (error)
373				break;
374			nread += size;
375			rpipe->pipe_map.pos += size;
376			rpipe->pipe_map.cnt -= size;
377			if (rpipe->pipe_map.cnt == 0) {
378				rpipe->pipe_state &= ~PIPE_DIRECTW;
379				wakeup(rpipe);
380			}
381#endif
382		} else {
383			/*
384			 * If there is no more to read in the pipe, reset
385			 * its pointers to the beginning.  This improves
386			 * cache hit stats.
387			 *
388			 * We get this over with now because it may block
389			 * and cause the state to change out from under us,
390			 * rather then have to re-test the state both before
391			 * and after this fragment.
392			 */
393
394			if ((error = pipelock(rpipe,1)) == 0) {
395				if (rpipe->pipe_buffer.cnt == 0) {
396					rpipe->pipe_buffer.in = 0;
397					rpipe->pipe_buffer.out = 0;
398				}
399				pipeunlock(rpipe);
400
401				/*
402				 * If pipe filled up due to pipelock
403				 * blocking, loop back up.
404				 */
405				if (rpipe->pipe_buffer.cnt > 0)
406					continue;
407			}
408
409			/*
410			 * detect EOF condition
411			 */
412			if (rpipe->pipe_state & PIPE_EOF) {
413				/* XXX error = ? */
414				break;
415			}
416
417			/*
418			 * If the "write-side" has been blocked, wake it up now.
419			 */
420			if (rpipe->pipe_state & PIPE_WANTW) {
421				rpipe->pipe_state &= ~PIPE_WANTW;
422				wakeup(rpipe);
423			}
424
425			/*
426			 * break if error (signal via pipelock), or if some
427			 * data was read
428			 */
429			if (error || nread > 0)
430				break;
431
432			/*
433			 * Handle non-blocking mode operation
434			 */
435
436			if (fp->f_flag & FNONBLOCK) {
437				error = EAGAIN;
438				break;
439			}
440
441			/*
442			 * Wait for more data
443			 */
444
445			rpipe->pipe_state |= PIPE_WANTR;
446			if ((error = tsleep(rpipe, PRIBIO|PCATCH, "piperd", 0)) != 0) {
447				break;
448			}
449		}
450	}
451
452	if (error == 0)
453		getnanotime(&rpipe->pipe_atime);
454
455	--rpipe->pipe_busy;
456	if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
457		rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
458		wakeup(rpipe);
459	} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
460		/*
461		 * If there is no more to read in the pipe, reset
462		 * its pointers to the beginning.  This improves
463		 * cache hit stats.
464		 */
465		if (rpipe->pipe_buffer.cnt == 0) {
466			if ((error == 0) && (error = pipelock(rpipe,1)) == 0) {
467				rpipe->pipe_buffer.in = 0;
468				rpipe->pipe_buffer.out = 0;
469				pipeunlock(rpipe);
470			}
471		}
472
473		/*
474		 * If the "write-side" has been blocked, wake it up now.
475		 */
476		if (rpipe->pipe_state & PIPE_WANTW) {
477			rpipe->pipe_state &= ~PIPE_WANTW;
478			wakeup(rpipe);
479		}
480	}
481
482	if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
483		pipeselwakeup(rpipe);
484
485	return error;
486}
487
488#ifndef PIPE_NODIRECT
489/*
490 * Map the sending processes' buffer into kernel space and wire it.
491 * This is similar to a physical write operation.
492 */
493static int
494pipe_build_write_buffer(wpipe, uio)
495	struct pipe *wpipe;
496	struct uio *uio;
497{
498	u_int size;
499	int i;
500	vm_offset_t addr, endaddr, paddr;
501
502	size = (u_int) uio->uio_iov->iov_len;
503	if (size > wpipe->pipe_buffer.size)
504		size = wpipe->pipe_buffer.size;
505
506	endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
507	for(i = 0, addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
508		addr < endaddr;
509		addr += PAGE_SIZE, i+=1) {
510
511		vm_page_t m;
512
513		vm_fault_quick( (caddr_t) addr, VM_PROT_READ);
514		paddr = pmap_kextract(addr);
515		if (!paddr) {
516			int j;
517			for(j=0;j<i;j++)
518				vm_page_unwire(wpipe->pipe_map.ms[j], 1);
519			return EFAULT;
520		}
521
522		m = PHYS_TO_VM_PAGE(paddr);
523		vm_page_wire(m);
524		wpipe->pipe_map.ms[i] = m;
525	}
526
527/*
528 * set up the control block
529 */
530	wpipe->pipe_map.npages = i;
531	wpipe->pipe_map.pos = ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
532	wpipe->pipe_map.cnt = size;
533
534/*
535 * and map the buffer
536 */
537	if (wpipe->pipe_map.kva == 0) {
538		/*
539		 * We need to allocate space for an extra page because the
540		 * address range might (will) span pages at times.
541		 */
542		wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
543			wpipe->pipe_buffer.size + PAGE_SIZE);
544		amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
545	}
546	pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
547		wpipe->pipe_map.npages);
548
549/*
550 * and update the uio data
551 */
552
553	uio->uio_iov->iov_len -= size;
554	uio->uio_iov->iov_base += size;
555	if (uio->uio_iov->iov_len == 0)
556		uio->uio_iov++;
557	uio->uio_resid -= size;
558	uio->uio_offset += size;
559	return 0;
560}
561
562/*
563 * unmap and unwire the process buffer
564 */
565static void
566pipe_destroy_write_buffer(wpipe)
567struct pipe *wpipe;
568{
569	int i;
570	if (wpipe->pipe_map.kva) {
571		pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
572
573		if (amountpipekva > MAXPIPEKVA) {
574			vm_offset_t kva = wpipe->pipe_map.kva;
575			wpipe->pipe_map.kva = 0;
576			kmem_free(kernel_map, kva,
577				wpipe->pipe_buffer.size + PAGE_SIZE);
578			amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
579		}
580	}
581	for (i=0;i<wpipe->pipe_map.npages;i++)
582		vm_page_unwire(wpipe->pipe_map.ms[i], 1);
583}
584
585/*
586 * In the case of a signal, the writing process might go away.  This
587 * code copies the data into the circular buffer so that the source
588 * pages can be freed without loss of data.
589 */
590static void
591pipe_clone_write_buffer(wpipe)
592struct pipe *wpipe;
593{
594	int size;
595	int pos;
596
597	size = wpipe->pipe_map.cnt;
598	pos = wpipe->pipe_map.pos;
599	bcopy((caddr_t) wpipe->pipe_map.kva+pos,
600			(caddr_t) wpipe->pipe_buffer.buffer,
601			size);
602
603	wpipe->pipe_buffer.in = size;
604	wpipe->pipe_buffer.out = 0;
605	wpipe->pipe_buffer.cnt = size;
606	wpipe->pipe_state &= ~PIPE_DIRECTW;
607
608	pipe_destroy_write_buffer(wpipe);
609}
610
611/*
612 * This implements the pipe buffer write mechanism.  Note that only
613 * a direct write OR a normal pipe write can be pending at any given time.
614 * If there are any characters in the pipe buffer, the direct write will
615 * be deferred until the receiving process grabs all of the bytes from
616 * the pipe buffer.  Then the direct mapping write is set-up.
617 */
618static int
619pipe_direct_write(wpipe, uio)
620	struct pipe *wpipe;
621	struct uio *uio;
622{
623	int error;
624retry:
625	while (wpipe->pipe_state & PIPE_DIRECTW) {
626		if ( wpipe->pipe_state & PIPE_WANTR) {
627			wpipe->pipe_state &= ~PIPE_WANTR;
628			wakeup(wpipe);
629		}
630		wpipe->pipe_state |= PIPE_WANTW;
631		error = tsleep(wpipe,
632				PRIBIO|PCATCH, "pipdww", 0);
633		if (error)
634			goto error1;
635		if (wpipe->pipe_state & PIPE_EOF) {
636			error = EPIPE;
637			goto error1;
638		}
639	}
640	wpipe->pipe_map.cnt = 0;	/* transfer not ready yet */
641	if (wpipe->pipe_buffer.cnt > 0) {
642		if ( wpipe->pipe_state & PIPE_WANTR) {
643			wpipe->pipe_state &= ~PIPE_WANTR;
644			wakeup(wpipe);
645		}
646
647		wpipe->pipe_state |= PIPE_WANTW;
648		error = tsleep(wpipe,
649				PRIBIO|PCATCH, "pipdwc", 0);
650		if (error)
651			goto error1;
652		if (wpipe->pipe_state & PIPE_EOF) {
653			error = EPIPE;
654			goto error1;
655		}
656		goto retry;
657	}
658
659	wpipe->pipe_state |= PIPE_DIRECTW;
660
661	error = pipe_build_write_buffer(wpipe, uio);
662	if (error) {
663		wpipe->pipe_state &= ~PIPE_DIRECTW;
664		goto error1;
665	}
666
667	error = 0;
668	while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
669		if (wpipe->pipe_state & PIPE_EOF) {
670			pipelock(wpipe, 0);
671			pipe_destroy_write_buffer(wpipe);
672			pipeunlock(wpipe);
673			pipeselwakeup(wpipe);
674			error = EPIPE;
675			goto error1;
676		}
677		if (wpipe->pipe_state & PIPE_WANTR) {
678			wpipe->pipe_state &= ~PIPE_WANTR;
679			wakeup(wpipe);
680		}
681		pipeselwakeup(wpipe);
682		error = tsleep(wpipe, PRIBIO|PCATCH, "pipdwt", 0);
683	}
684
685	pipelock(wpipe,0);
686	if (wpipe->pipe_state & PIPE_DIRECTW) {
687		/*
688		 * this bit of trickery substitutes a kernel buffer for
689		 * the process that might be going away.
690		 */
691		pipe_clone_write_buffer(wpipe);
692	} else {
693		pipe_destroy_write_buffer(wpipe);
694	}
695	pipeunlock(wpipe);
696	return error;
697
698error1:
699	wakeup(wpipe);
700	return error;
701}
702#endif
703
704static int
705pipe_write(fp, uio, cred)
706	struct file *fp;
707	struct uio *uio;
708	struct ucred *cred;
709{
710	int error = 0;
711	int orig_resid;
712
713	struct pipe *wpipe, *rpipe;
714
715	rpipe = (struct pipe *) fp->f_data;
716	wpipe = rpipe->pipe_peer;
717
718	/*
719	 * detect loss of pipe read side, issue SIGPIPE if lost.
720	 */
721	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
722		return EPIPE;
723	}
724
725	/*
726	 * If it is advantageous to resize the pipe buffer, do
727	 * so.
728	 */
729	if ((uio->uio_resid > PIPE_SIZE) &&
730		(nbigpipe < LIMITBIGPIPES) &&
731		(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
732		(wpipe->pipe_buffer.size <= PIPE_SIZE) &&
733		(wpipe->pipe_buffer.cnt == 0)) {
734
735		if (wpipe->pipe_buffer.buffer) {
736			amountpipekva -= wpipe->pipe_buffer.size;
737			kmem_free(kernel_map,
738				(vm_offset_t)wpipe->pipe_buffer.buffer,
739				wpipe->pipe_buffer.size);
740		}
741
742#ifndef PIPE_NODIRECT
743		if (wpipe->pipe_map.kva) {
744			amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
745			kmem_free(kernel_map,
746				wpipe->pipe_map.kva,
747				wpipe->pipe_buffer.size + PAGE_SIZE);
748		}
749#endif
750
751		wpipe->pipe_buffer.in = 0;
752		wpipe->pipe_buffer.out = 0;
753		wpipe->pipe_buffer.cnt = 0;
754		wpipe->pipe_buffer.size = BIG_PIPE_SIZE;
755		wpipe->pipe_buffer.buffer = NULL;
756		++nbigpipe;
757
758#ifndef PIPE_NODIRECT
759		wpipe->pipe_map.cnt = 0;
760		wpipe->pipe_map.kva = 0;
761		wpipe->pipe_map.pos = 0;
762		wpipe->pipe_map.npages = 0;
763#endif
764
765	}
766
767
768	if( wpipe->pipe_buffer.buffer == NULL) {
769		if ((error = pipelock(wpipe,1)) == 0) {
770			pipespace(wpipe);
771			pipeunlock(wpipe);
772		} else {
773			return error;
774		}
775	}
776
777	++wpipe->pipe_busy;
778	orig_resid = uio->uio_resid;
779	while (uio->uio_resid) {
780		int space;
781#ifndef PIPE_NODIRECT
782		/*
783		 * If the transfer is large, we can gain performance if
784		 * we do process-to-process copies directly.
785		 * If the write is non-blocking, we don't use the
786		 * direct write mechanism.
787		 */
788		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
789		    (fp->f_flag & FNONBLOCK) == 0 &&
790			(wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
791			(uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
792			error = pipe_direct_write( wpipe, uio);
793			if (error) {
794				break;
795			}
796			continue;
797		}
798#endif
799
800		/*
801		 * Pipe buffered writes cannot be coincidental with
802		 * direct writes.  We wait until the currently executing
803		 * direct write is completed before we start filling the
804		 * pipe buffer.
805		 */
806	retrywrite:
807		while (wpipe->pipe_state & PIPE_DIRECTW) {
808			if (wpipe->pipe_state & PIPE_WANTR) {
809				wpipe->pipe_state &= ~PIPE_WANTR;
810				wakeup(wpipe);
811			}
812			error = tsleep(wpipe,
813					PRIBIO|PCATCH, "pipbww", 0);
814			if (error)
815				break;
816		}
817
818		space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
819
820		/* Writes of size <= PIPE_BUF must be atomic. */
821		/* XXX perhaps they need to be contiguous to be atomic? */
822		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
823			space = 0;
824
825		if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
826			/*
827			 * This set the maximum transfer as a segment of
828			 * the buffer.
829			 */
830			int size = wpipe->pipe_buffer.size - wpipe->pipe_buffer.in;
831			/*
832			 * space is the size left in the buffer
833			 */
834			if (size > space)
835				size = space;
836			/*
837			 * now limit it to the size of the uio transfer
838			 */
839			if (size > uio->uio_resid)
840				size = uio->uio_resid;
841			if ((error = pipelock(wpipe,1)) == 0) {
842				/*
843				 * It is possible for a direct write to
844				 * slip in on us... handle it here...
845				 */
846				if (wpipe->pipe_state & PIPE_DIRECTW) {
847					pipeunlock(wpipe);
848					goto retrywrite;
849				}
850				error = uiomove( &wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
851					size, uio);
852				pipeunlock(wpipe);
853			}
854			if (error)
855				break;
856
857			wpipe->pipe_buffer.in += size;
858			if (wpipe->pipe_buffer.in >= wpipe->pipe_buffer.size)
859				wpipe->pipe_buffer.in = 0;
860
861			wpipe->pipe_buffer.cnt += size;
862		} else {
863			/*
864			 * If the "read-side" has been blocked, wake it up now.
865			 */
866			if (wpipe->pipe_state & PIPE_WANTR) {
867				wpipe->pipe_state &= ~PIPE_WANTR;
868				wakeup(wpipe);
869			}
870
871			/*
872			 * don't block on non-blocking I/O
873			 */
874			if (fp->f_flag & FNONBLOCK) {
875				error = EAGAIN;
876				break;
877			}
878
879			/*
880			 * We have no more space and have something to offer,
881			 * wake up select/poll.
882			 */
883			pipeselwakeup(wpipe);
884
885			wpipe->pipe_state |= PIPE_WANTW;
886			if ((error = tsleep(wpipe, (PRIBIO+1)|PCATCH, "pipewr", 0)) != 0) {
887				break;
888			}
889			/*
890			 * If read side wants to go away, we just issue a signal
891			 * to ourselves.
892			 */
893			if (wpipe->pipe_state & PIPE_EOF) {
894				error = EPIPE;
895				break;
896			}
897		}
898	}
899
900	--wpipe->pipe_busy;
901	if ((wpipe->pipe_busy == 0) &&
902		(wpipe->pipe_state & PIPE_WANT)) {
903		wpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTR);
904		wakeup(wpipe);
905	} else if (wpipe->pipe_buffer.cnt > 0) {
906		/*
907		 * If we have put any characters in the buffer, we wake up
908		 * the reader.
909		 */
910		if (wpipe->pipe_state & PIPE_WANTR) {
911			wpipe->pipe_state &= ~PIPE_WANTR;
912			wakeup(wpipe);
913		}
914	}
915
916	/*
917	 * Don't return EPIPE if I/O was successful
918	 */
919	if ((wpipe->pipe_buffer.cnt == 0) &&
920		(uio->uio_resid == 0) &&
921		(error == EPIPE))
922		error = 0;
923
924	if (error == 0)
925		getnanotime(&wpipe->pipe_mtime);
926
927	/*
928	 * We have something to offer,
929	 * wake up select/poll.
930	 */
931	if (wpipe->pipe_buffer.cnt)
932		pipeselwakeup(wpipe);
933
934	return error;
935}
936
937/*
938 * we implement a very minimal set of ioctls for compatibility with sockets.
939 */
940int
941pipe_ioctl(fp, cmd, data, p)
942	struct file *fp;
943	u_long cmd;
944	register caddr_t data;
945	struct proc *p;
946{
947	register struct pipe *mpipe = (struct pipe *)fp->f_data;
948
949	switch (cmd) {
950
951	case FIONBIO:
952		return (0);
953
954	case FIOASYNC:
955		if (*(int *)data) {
956			mpipe->pipe_state |= PIPE_ASYNC;
957		} else {
958			mpipe->pipe_state &= ~PIPE_ASYNC;
959		}
960		return (0);
961
962	case FIONREAD:
963		if (mpipe->pipe_state & PIPE_DIRECTW)
964			*(int *)data = mpipe->pipe_map.cnt;
965		else
966			*(int *)data = mpipe->pipe_buffer.cnt;
967		return (0);
968
969	case FIOSETOWN:
970		return (fsetown(*(int *)data, &mpipe->pipe_sigio));
971
972	case FIOGETOWN:
973		*(int *)data = fgetown(mpipe->pipe_sigio);
974		return (0);
975
976	/* This is deprecated, FIOSETOWN should be used instead. */
977	case TIOCSPGRP:
978		return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
979
980	/* This is deprecated, FIOGETOWN should be used instead. */
981	case TIOCGPGRP:
982		*(int *)data = -fgetown(mpipe->pipe_sigio);
983		return (0);
984
985	}
986	return (ENOTTY);
987}
988
989int
990pipe_poll(fp, events, cred, p)
991	struct file *fp;
992	int events;
993	struct ucred *cred;
994	struct proc *p;
995{
996	register struct pipe *rpipe = (struct pipe *)fp->f_data;
997	struct pipe *wpipe;
998	int revents = 0;
999
1000	wpipe = rpipe->pipe_peer;
1001	if (events & (POLLIN | POLLRDNORM))
1002		if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1003		    (rpipe->pipe_buffer.cnt > 0) ||
1004		    (rpipe->pipe_state & PIPE_EOF))
1005			revents |= events & (POLLIN | POLLRDNORM);
1006
1007	if (events & (POLLOUT | POLLWRNORM))
1008		if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
1009		    (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1010		     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1011			revents |= events & (POLLOUT | POLLWRNORM);
1012
1013	if ((rpipe->pipe_state & PIPE_EOF) ||
1014	    (wpipe == NULL) ||
1015	    (wpipe->pipe_state & PIPE_EOF))
1016		revents |= POLLHUP;
1017
1018	if (revents == 0) {
1019		if (events & (POLLIN | POLLRDNORM)) {
1020			selrecord(p, &rpipe->pipe_sel);
1021			rpipe->pipe_state |= PIPE_SEL;
1022		}
1023
1024		if (events & (POLLOUT | POLLWRNORM)) {
1025			selrecord(p, &wpipe->pipe_sel);
1026			wpipe->pipe_state |= PIPE_SEL;
1027		}
1028	}
1029
1030	return (revents);
1031}
1032
1033int
1034pipe_stat(pipe, ub)
1035	register struct pipe *pipe;
1036	register struct stat *ub;
1037{
1038	bzero((caddr_t)ub, sizeof (*ub));
1039	ub->st_mode = S_IFIFO;
1040	ub->st_blksize = pipe->pipe_buffer.size;
1041	ub->st_size = pipe->pipe_buffer.cnt;
1042	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1043	ub->st_atimespec = pipe->pipe_atime;
1044	ub->st_mtimespec = pipe->pipe_mtime;
1045	ub->st_ctimespec = pipe->pipe_ctime;
1046	/*
1047	 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1048	 * st_flags, st_gen.
1049	 * XXX (st_dev, st_ino) should be unique.
1050	 */
1051	return 0;
1052}
1053
1054/* ARGSUSED */
1055static int
1056pipe_close(fp, p)
1057	struct file *fp;
1058	struct proc *p;
1059{
1060	struct pipe *cpipe = (struct pipe *)fp->f_data;
1061
1062	funsetown(cpipe->pipe_sigio);
1063	pipeclose(cpipe);
1064	fp->f_data = NULL;
1065	return 0;
1066}
1067
1068/*
1069 * shutdown the pipe
1070 */
1071static void
1072pipeclose(cpipe)
1073	struct pipe *cpipe;
1074{
1075	struct pipe *ppipe;
1076	if (cpipe) {
1077
1078		pipeselwakeup(cpipe);
1079
1080		/*
1081		 * If the other side is blocked, wake it up saying that
1082		 * we want to close it down.
1083		 */
1084		while (cpipe->pipe_busy) {
1085			wakeup(cpipe);
1086			cpipe->pipe_state |= PIPE_WANT|PIPE_EOF;
1087			tsleep(cpipe, PRIBIO, "pipecl", 0);
1088		}
1089
1090		/*
1091		 * Disconnect from peer
1092		 */
1093		if ((ppipe = cpipe->pipe_peer) != NULL) {
1094			pipeselwakeup(ppipe);
1095
1096			ppipe->pipe_state |= PIPE_EOF;
1097			wakeup(ppipe);
1098			ppipe->pipe_peer = NULL;
1099		}
1100
1101		/*
1102		 * free resources
1103		 */
1104		if (cpipe->pipe_buffer.buffer) {
1105			if (cpipe->pipe_buffer.size > PIPE_SIZE)
1106				--nbigpipe;
1107			amountpipekva -= cpipe->pipe_buffer.size;
1108			kmem_free(kernel_map,
1109				(vm_offset_t)cpipe->pipe_buffer.buffer,
1110				cpipe->pipe_buffer.size);
1111		}
1112#ifndef PIPE_NODIRECT
1113		if (cpipe->pipe_map.kva) {
1114			amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1115			kmem_free(kernel_map,
1116				cpipe->pipe_map.kva,
1117				cpipe->pipe_buffer.size + PAGE_SIZE);
1118		}
1119#endif
1120		zfree(pipe_zone, cpipe);
1121	}
1122}
1123