kern_descrip.c revision 91406
1/*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
39 * $FreeBSD: head/sys/kern/kern_descrip.c 91406 2002-02-27 18:32:23Z jhb $
40 */
41
42#include "opt_compat.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mutex.h>
49#include <sys/sysproto.h>
50#include <sys/conf.h>
51#include <sys/filedesc.h>
52#include <sys/kernel.h>
53#include <sys/sysctl.h>
54#include <sys/vnode.h>
55#include <sys/proc.h>
56#include <sys/file.h>
57#include <sys/stat.h>
58#include <sys/filio.h>
59#include <sys/fcntl.h>
60#include <sys/unistd.h>
61#include <sys/resourcevar.h>
62#include <sys/event.h>
63#include <sys/sx.h>
64#include <sys/socketvar.h>
65
66#include <machine/limits.h>
67
68#include <vm/vm.h>
69#include <vm/vm_extern.h>
70
71static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
72MALLOC_DEFINE(M_FILE, "file", "Open file structure");
73static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
74
75static	 d_open_t  fdopen;
76#define NUMFDESC 64
77
78#define CDEV_MAJOR 22
79static struct cdevsw fildesc_cdevsw = {
80	/* open */	fdopen,
81	/* close */	noclose,
82	/* read */	noread,
83	/* write */	nowrite,
84	/* ioctl */	noioctl,
85	/* poll */	nopoll,
86	/* mmap */	nommap,
87	/* strategy */	nostrategy,
88	/* name */	"FD",
89	/* maj */	CDEV_MAJOR,
90	/* dump */	nodump,
91	/* psize */	nopsize,
92	/* flags */	0,
93};
94
95static int do_dup __P((struct filedesc *fdp, int old, int new, register_t *retval, struct thread *td));
96static int badfo_readwrite __P((struct file *fp, struct uio *uio,
97    struct ucred *cred, int flags, struct thread *td));
98static int badfo_ioctl __P((struct file *fp, u_long com, caddr_t data,
99    struct thread *td));
100static int badfo_poll __P((struct file *fp, int events,
101    struct ucred *cred, struct thread *td));
102static int badfo_kqfilter __P((struct file *fp, struct knote *kn));
103static int badfo_stat __P((struct file *fp, struct stat *sb, struct thread *td));
104static int badfo_close __P((struct file *fp, struct thread *td));
105
106/*
107 * Descriptor management.
108 */
109struct filelist filehead;	/* head of list of open files */
110int nfiles;			/* actual number of open files */
111extern int cmask;
112struct sx filelist_lock;	/* sx to protect filelist */
113
114/*
115 * System calls on descriptors.
116 */
117#ifndef _SYS_SYSPROTO_H_
118struct getdtablesize_args {
119	int	dummy;
120};
121#endif
122/*
123 * MPSAFE
124 */
125/* ARGSUSED */
126int
127getdtablesize(td, uap)
128	struct thread *td;
129	struct getdtablesize_args *uap;
130{
131	struct proc *p = td->td_proc;
132
133	mtx_lock(&Giant);
134	td->td_retval[0] =
135	    min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
136	mtx_unlock(&Giant);
137	return (0);
138}
139
140/*
141 * Duplicate a file descriptor to a particular value.
142 *
143 * note: keep in mind that a potential race condition exists when closing
144 * descriptors from a shared descriptor table (via rfork).
145 */
146#ifndef _SYS_SYSPROTO_H_
147struct dup2_args {
148	u_int	from;
149	u_int	to;
150};
151#endif
152/*
153 * MPSAFE
154 */
155/* ARGSUSED */
156int
157dup2(td, uap)
158	struct thread *td;
159	struct dup2_args *uap;
160{
161	struct proc *p = td->td_proc;
162	register struct filedesc *fdp = td->td_proc->p_fd;
163	register u_int old = uap->from, new = uap->to;
164	int i, error;
165
166	FILEDESC_LOCK(fdp);
167retry:
168	if (old >= fdp->fd_nfiles ||
169	    fdp->fd_ofiles[old] == NULL ||
170	    new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
171	    new >= maxfilesperproc) {
172		FILEDESC_UNLOCK(fdp);
173		return (EBADF);
174	}
175	if (old == new) {
176		td->td_retval[0] = new;
177		FILEDESC_UNLOCK(fdp);
178		return (0);
179	}
180	if (new >= fdp->fd_nfiles) {
181		if ((error = fdalloc(td, new, &i))) {
182			FILEDESC_UNLOCK(fdp);
183			return (error);
184		}
185		/*
186		 * fdalloc() may block, retest everything.
187		 */
188		goto retry;
189	}
190	error = do_dup(fdp, (int)old, (int)new, td->td_retval, td);
191	return(error);
192}
193
194/*
195 * Duplicate a file descriptor.
196 */
197#ifndef _SYS_SYSPROTO_H_
198struct dup_args {
199	u_int	fd;
200};
201#endif
202/*
203 * MPSAFE
204 */
205/* ARGSUSED */
206int
207dup(td, uap)
208	struct thread *td;
209	struct dup_args *uap;
210{
211	register struct filedesc *fdp;
212	u_int old;
213	int new, error;
214
215	old = uap->fd;
216	fdp = td->td_proc->p_fd;
217	FILEDESC_LOCK(fdp);
218	if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) {
219		FILEDESC_UNLOCK(fdp);
220		return (EBADF);
221	}
222	if ((error = fdalloc(td, 0, &new))) {
223		FILEDESC_UNLOCK(fdp);
224		return (error);
225	}
226	error = do_dup(fdp, (int)old, new, td->td_retval, td);
227	return (error);
228}
229
230/*
231 * The file control system call.
232 */
233#ifndef _SYS_SYSPROTO_H_
234struct fcntl_args {
235	int	fd;
236	int	cmd;
237	long	arg;
238};
239#endif
240/*
241 * MPSAFE
242 */
243/* ARGSUSED */
244int
245fcntl(td, uap)
246	struct thread *td;
247	register struct fcntl_args *uap;
248{
249	register struct proc *p = td->td_proc;
250	register struct filedesc *fdp;
251	register struct file *fp;
252	register char *pop;
253	struct vnode *vp;
254	int i, tmp, error = 0, flg = F_POSIX;
255	struct flock fl;
256	u_int newmin;
257	struct proc *leaderp;
258
259	mtx_lock(&Giant);
260
261	fdp = p->p_fd;
262	FILEDESC_LOCK(fdp);
263	if ((unsigned)uap->fd >= fdp->fd_nfiles ||
264	    (fp = fdp->fd_ofiles[uap->fd]) == NULL) {
265		FILEDESC_UNLOCK(fdp);
266		error = EBADF;
267		goto done2;
268	}
269	pop = &fdp->fd_ofileflags[uap->fd];
270
271	switch (uap->cmd) {
272	case F_DUPFD:
273		newmin = uap->arg;
274		if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
275		    newmin >= maxfilesperproc) {
276			FILEDESC_UNLOCK(fdp);
277			error = EINVAL;
278			break;
279		}
280		if ((error = fdalloc(td, newmin, &i))) {
281			FILEDESC_UNLOCK(fdp);
282			break;
283		}
284		error = do_dup(fdp, uap->fd, i, td->td_retval, td);
285		break;
286
287	case F_GETFD:
288		td->td_retval[0] = *pop & 1;
289		FILEDESC_UNLOCK(fdp);
290		break;
291
292	case F_SETFD:
293		*pop = (*pop &~ 1) | (uap->arg & 1);
294		FILEDESC_UNLOCK(fdp);
295		break;
296
297	case F_GETFL:
298		FILE_LOCK(fp);
299		FILEDESC_UNLOCK(fdp);
300		td->td_retval[0] = OFLAGS(fp->f_flag);
301		FILE_UNLOCK(fp);
302		break;
303
304	case F_SETFL:
305		fhold(fp);
306		FILEDESC_UNLOCK(fdp);
307		fp->f_flag &= ~FCNTLFLAGS;
308		fp->f_flag |= FFLAGS(uap->arg & ~O_ACCMODE) & FCNTLFLAGS;
309		tmp = fp->f_flag & FNONBLOCK;
310		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
311		if (error) {
312			fdrop(fp, td);
313			break;
314		}
315		tmp = fp->f_flag & FASYNC;
316		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
317		if (!error) {
318			fdrop(fp, td);
319			break;
320		}
321		fp->f_flag &= ~FNONBLOCK;
322		tmp = 0;
323		(void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
324		fdrop(fp, td);
325		break;
326
327	case F_GETOWN:
328		fhold(fp);
329		FILEDESC_UNLOCK(fdp);
330		error = fo_ioctl(fp, FIOGETOWN, (caddr_t)td->td_retval, td);
331		fdrop(fp, td);
332		break;
333
334	case F_SETOWN:
335		fhold(fp);
336		FILEDESC_UNLOCK(fdp);
337		error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&uap->arg, td);
338		fdrop(fp, td);
339		break;
340
341	case F_SETLKW:
342		flg |= F_WAIT;
343		/* Fall into F_SETLK */
344
345	case F_SETLK:
346		if (fp->f_type != DTYPE_VNODE) {
347			FILEDESC_UNLOCK(fdp);
348			error = EBADF;
349			break;
350		}
351		vp = (struct vnode *)fp->f_data;
352		/*
353		 * copyin/lockop may block
354		 */
355		fhold(fp);
356		FILEDESC_UNLOCK(fdp);
357		vp = (struct vnode *)fp->f_data;
358
359		/* Copy in the lock structure */
360		error = copyin((caddr_t)(intptr_t)uap->arg, (caddr_t)&fl,
361		    sizeof(fl));
362		if (error) {
363			fdrop(fp, td);
364			break;
365		}
366		if (fl.l_whence == SEEK_CUR) {
367			if (fp->f_offset < 0 ||
368			    (fl.l_start > 0 &&
369			     fp->f_offset > OFF_MAX - fl.l_start)) {
370				fdrop(fp, td);
371				error = EOVERFLOW;
372				break;
373			}
374			fl.l_start += fp->f_offset;
375		}
376
377		switch (fl.l_type) {
378		case F_RDLCK:
379			if ((fp->f_flag & FREAD) == 0) {
380				error = EBADF;
381				break;
382			}
383			PROC_LOCK(p);
384			p->p_flag |= P_ADVLOCK;
385			leaderp = p->p_leader;
386			PROC_UNLOCK(p);
387			error = VOP_ADVLOCK(vp, (caddr_t)leaderp, F_SETLK,
388			    &fl, flg);
389			break;
390		case F_WRLCK:
391			if ((fp->f_flag & FWRITE) == 0) {
392				error = EBADF;
393				break;
394			}
395			PROC_LOCK(p);
396			p->p_flag |= P_ADVLOCK;
397			leaderp = p->p_leader;
398			PROC_UNLOCK(p);
399			error = VOP_ADVLOCK(vp, (caddr_t)leaderp, F_SETLK,
400			    &fl, flg);
401			break;
402		case F_UNLCK:
403			PROC_LOCK(p);
404			leaderp = p->p_leader;
405			PROC_UNLOCK(p);
406			error = VOP_ADVLOCK(vp, (caddr_t)leaderp, F_UNLCK,
407				&fl, F_POSIX);
408			break;
409		default:
410			error = EINVAL;
411			break;
412		}
413		fdrop(fp, td);
414		break;
415
416	case F_GETLK:
417		if (fp->f_type != DTYPE_VNODE) {
418			FILEDESC_UNLOCK(fdp);
419			error = EBADF;
420			break;
421		}
422		vp = (struct vnode *)fp->f_data;
423		/*
424		 * copyin/lockop may block
425		 */
426		fhold(fp);
427		FILEDESC_UNLOCK(fdp);
428		vp = (struct vnode *)fp->f_data;
429
430		/* Copy in the lock structure */
431		error = copyin((caddr_t)(intptr_t)uap->arg, (caddr_t)&fl,
432		    sizeof(fl));
433		if (error) {
434			fdrop(fp, td);
435			break;
436		}
437		if (fl.l_type != F_RDLCK && fl.l_type != F_WRLCK &&
438		    fl.l_type != F_UNLCK) {
439			fdrop(fp, td);
440			error = EINVAL;
441			break;
442		}
443		if (fl.l_whence == SEEK_CUR) {
444			if ((fl.l_start > 0 &&
445			     fp->f_offset > OFF_MAX - fl.l_start) ||
446			    (fl.l_start < 0 &&
447			     fp->f_offset < OFF_MIN - fl.l_start)) {
448				fdrop(fp, td);
449				error = EOVERFLOW;
450				break;
451			}
452			fl.l_start += fp->f_offset;
453		}
454		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
455			    &fl, F_POSIX);
456		fdrop(fp, td);
457		if (error == 0) {
458			error = copyout((caddr_t)&fl,
459				    (caddr_t)(intptr_t)uap->arg, sizeof(fl));
460		}
461		break;
462	default:
463		FILEDESC_UNLOCK(fdp);
464		error = EINVAL;
465		break;
466	}
467done2:
468	mtx_unlock(&Giant);
469	return (error);
470}
471
472/*
473 * Common code for dup, dup2, and fcntl(F_DUPFD).
474 * filedesc must be locked, but will be unlocked as a side effect.
475 */
476static int
477do_dup(fdp, old, new, retval, td)
478	register struct filedesc *fdp;
479	register int old, new;
480	register_t *retval;
481	struct thread *td;
482{
483	struct file *fp;
484	struct file *delfp;
485
486	FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
487
488	/*
489	 * Save info on the descriptor being overwritten.  We have
490	 * to do the unmap now, but we cannot close it without
491	 * introducing an ownership race for the slot.
492	 */
493	delfp = fdp->fd_ofiles[new];
494#if 0
495	if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED))
496		(void) munmapfd(td, new);
497#endif
498
499	/*
500	 * Duplicate the source descriptor, update lastfile
501	 */
502	fp = fdp->fd_ofiles[old];
503	fdp->fd_ofiles[new] = fp;
504	fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
505	fhold(fp);
506	if (new > fdp->fd_lastfile)
507		fdp->fd_lastfile = new;
508	*retval = new;
509
510	FILEDESC_UNLOCK(fdp);
511
512	/*
513	 * If we dup'd over a valid file, we now own the reference to it
514	 * and must dispose of it using closef() semantics (as if a
515	 * close() were performed on it).
516	 */
517	if (delfp) {
518		mtx_lock(&Giant);
519		(void) closef(delfp, td);
520		mtx_unlock(&Giant);
521	}
522	return (0);
523}
524
525/*
526 * If sigio is on the list associated with a process or process group,
527 * disable signalling from the device, remove sigio from the list and
528 * free sigio.
529 */
530void
531funsetown(sigio)
532	struct sigio *sigio;
533{
534	int s;
535
536	if (sigio == NULL)
537		return;
538
539	s = splhigh();
540	*(sigio->sio_myref) = NULL;
541	splx(s);
542	if ((sigio)->sio_pgid < 0) {
543		struct pgrp *pg = (sigio)->sio_pgrp;
544		PGRP_LOCK(pg);
545		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
546			     sigio, sio_pgsigio);
547		PGRP_UNLOCK(pg);
548	} else {
549		struct proc *p = (sigio)->sio_proc;
550		PROC_LOCK(p);
551		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
552			     sigio, sio_pgsigio);
553		PROC_UNLOCK(p);
554	}
555	crfree(sigio->sio_ucred);
556	FREE(sigio, M_SIGIO);
557}
558
559/* Free a list of sigio structures. */
560void
561funsetownlst(sigiolst)
562	struct sigiolst *sigiolst;
563{
564	int s;
565	struct sigio *sigio;
566	struct proc *p;
567	struct pgrp *pg;
568
569	sigio = SLIST_FIRST(sigiolst);
570	if (sigio == NULL)
571		return;
572
573	p = NULL;
574	pg = NULL;
575
576	/*
577	 * Every entry of the list should belong
578	 * to a single proc or pgrp.
579	 */
580	if (sigio->sio_pgid < 0) {
581		pg = sigio->sio_pgrp;
582		PGRP_LOCK_ASSERT(pg, MA_OWNED);
583	} else /* if (sigio->sio_pgid > 0) */ {
584		p = sigio->sio_proc;
585		PROC_LOCK_ASSERT(p, MA_OWNED);
586	}
587
588	while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
589		s = splhigh();
590		*(sigio->sio_myref) = NULL;
591		splx(s);
592		if (pg != NULL) {
593			KASSERT(sigio->sio_pgid < 0, ("Proc sigio in pgrp sigio list"));
594			KASSERT(sigio->sio_pgrp == pg, ("Bogus pgrp in sigio list"));
595			SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, sio_pgsigio);
596			PGRP_UNLOCK(pg);
597			crfree(sigio->sio_ucred);
598			FREE(sigio, M_SIGIO);
599			PGRP_LOCK(pg);
600		} else /* if (p != NULL) */ {
601			KASSERT(sigio->sio_pgid > 0, ("Pgrp sigio in proc sigio list"));
602			KASSERT(sigio->sio_proc == p, ("Bogus proc in sigio list"));
603			SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
604			PROC_UNLOCK(p);
605			crfree(sigio->sio_ucred);
606			FREE(sigio, M_SIGIO);
607			PROC_LOCK(p);
608		}
609	}
610}
611
612/*
613 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
614 *
615 * After permission checking, add a sigio structure to the sigio list for
616 * the process or process group.
617 */
618int
619fsetown(pgid, sigiop)
620	pid_t pgid;
621	struct sigio **sigiop;
622{
623	struct proc *proc;
624	struct pgrp *pgrp;
625	struct sigio *sigio;
626	int s, ret;
627
628	if (pgid == 0) {
629		funsetown(*sigiop);
630		return (0);
631	}
632
633	ret = 0;
634
635	/* Allocate and fill in the new sigio out of locks. */
636	MALLOC(sigio, struct sigio *, sizeof(struct sigio), M_SIGIO, M_WAITOK);
637	sigio->sio_pgid = pgid;
638	sigio->sio_ucred = crhold(curthread->td_ucred);
639	sigio->sio_myref = sigiop;
640
641	PGRPSESS_SLOCK();
642	if (pgid > 0) {
643		proc = pfind(pgid);
644		if (proc == NULL) {
645			ret = ESRCH;
646			goto fail;
647		}
648
649		/*
650		 * Policy - Don't allow a process to FSETOWN a process
651		 * in another session.
652		 *
653		 * Remove this test to allow maximum flexibility or
654		 * restrict FSETOWN to the current process or process
655		 * group for maximum safety.
656		 */
657		PROC_UNLOCK(proc);
658		if (proc->p_session != curthread->td_proc->p_session) {
659			ret = EPERM;
660			goto fail;
661		}
662
663		pgrp = NULL;
664	} else /* if (pgid < 0) */ {
665		pgrp = pgfind(-pgid);
666		if (pgrp == NULL) {
667			ret = ESRCH;
668			goto fail;
669		}
670		PGRP_UNLOCK(pgrp);
671
672		/*
673		 * Policy - Don't allow a process to FSETOWN a process
674		 * in another session.
675		 *
676		 * Remove this test to allow maximum flexibility or
677		 * restrict FSETOWN to the current process or process
678		 * group for maximum safety.
679		 */
680		if (pgrp->pg_session != curthread->td_proc->p_session) {
681			ret = EPERM;
682			goto fail;
683		}
684
685		proc = NULL;
686	}
687	funsetown(*sigiop);
688	if (pgid > 0) {
689		PROC_LOCK(proc);
690		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
691		sigio->sio_proc = proc;
692		PROC_UNLOCK(proc);
693	} else {
694		PGRP_LOCK(pgrp);
695		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
696		sigio->sio_pgrp = pgrp;
697		PGRP_UNLOCK(pgrp);
698	}
699	PGRPSESS_SUNLOCK();
700	s = splhigh();
701	*sigiop = sigio;
702	splx(s);
703	return (0);
704
705fail:
706	PGRPSESS_SUNLOCK();
707	crfree(sigio->sio_ucred);
708	FREE(sigio, M_SIGIO);
709	return (ret);
710}
711
712/*
713 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
714 */
715pid_t
716fgetown(sigio)
717	struct sigio *sigio;
718{
719	return (sigio != NULL ? sigio->sio_pgid : 0);
720}
721
722/*
723 * Close a file descriptor.
724 */
725#ifndef _SYS_SYSPROTO_H_
726struct close_args {
727        int     fd;
728};
729#endif
730/*
731 * MPSAFE
732 */
733/* ARGSUSED */
734int
735close(td, uap)
736	struct thread *td;
737	struct close_args *uap;
738{
739	register struct filedesc *fdp;
740	register struct file *fp;
741	register int fd = uap->fd;
742	int error = 0;
743
744	mtx_lock(&Giant);
745	fdp = td->td_proc->p_fd;
746	FILEDESC_LOCK(fdp);
747	if ((unsigned)fd >= fdp->fd_nfiles ||
748	    (fp = fdp->fd_ofiles[fd]) == NULL) {
749		FILEDESC_UNLOCK(fdp);
750		error = EBADF;
751		goto done2;
752	}
753#if 0
754	if (fdp->fd_ofileflags[fd] & UF_MAPPED)
755		(void) munmapfd(td, fd);
756#endif
757	fdp->fd_ofiles[fd] = NULL;
758	fdp->fd_ofileflags[fd] = 0;
759
760	/*
761	 * we now hold the fp reference that used to be owned by the descriptor
762	 * array.
763	 */
764	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
765		fdp->fd_lastfile--;
766	if (fd < fdp->fd_freefile)
767		fdp->fd_freefile = fd;
768	if (fd < fdp->fd_knlistsize) {
769		FILEDESC_UNLOCK(fdp);
770		knote_fdclose(td, fd);
771	} else
772		FILEDESC_UNLOCK(fdp);
773
774	error = closef(fp, td);
775done2:
776	mtx_unlock(&Giant);
777	return(error);
778}
779
780#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
781/*
782 * Return status information about a file descriptor.
783 */
784#ifndef _SYS_SYSPROTO_H_
785struct ofstat_args {
786	int	fd;
787	struct	ostat *sb;
788};
789#endif
790/*
791 * MPSAFE
792 */
793/* ARGSUSED */
794int
795ofstat(td, uap)
796	struct thread *td;
797	register struct ofstat_args *uap;
798{
799	struct file *fp;
800	struct stat ub;
801	struct ostat oub;
802	int error;
803
804	mtx_lock(&Giant);
805	if ((error = fget(td, uap->fd, &fp)) != 0)
806		goto done2;
807	error = fo_stat(fp, &ub, td);
808	if (error == 0) {
809		cvtstat(&ub, &oub);
810		error = copyout((caddr_t)&oub, (caddr_t)uap->sb, sizeof (oub));
811	}
812	fdrop(fp, td);
813done2:
814	mtx_unlock(&Giant);
815	return (error);
816}
817#endif /* COMPAT_43 || COMPAT_SUNOS */
818
819/*
820 * Return status information about a file descriptor.
821 */
822#ifndef _SYS_SYSPROTO_H_
823struct fstat_args {
824	int	fd;
825	struct	stat *sb;
826};
827#endif
828/*
829 * MPSAFE
830 */
831/* ARGSUSED */
832int
833fstat(td, uap)
834	struct thread *td;
835	struct fstat_args *uap;
836{
837	struct file *fp;
838	struct stat ub;
839	int error;
840
841	mtx_lock(&Giant);
842	if ((error = fget(td, uap->fd, &fp)) != 0)
843		goto done2;
844	error = fo_stat(fp, &ub, td);
845	if (error == 0)
846		error = copyout((caddr_t)&ub, (caddr_t)uap->sb, sizeof (ub));
847	fdrop(fp, td);
848done2:
849	mtx_unlock(&Giant);
850	return (error);
851}
852
853/*
854 * Return status information about a file descriptor.
855 */
856#ifndef _SYS_SYSPROTO_H_
857struct nfstat_args {
858	int	fd;
859	struct	nstat *sb;
860};
861#endif
862/*
863 * MPSAFE
864 */
865/* ARGSUSED */
866int
867nfstat(td, uap)
868	struct thread *td;
869	register struct nfstat_args *uap;
870{
871	struct file *fp;
872	struct stat ub;
873	struct nstat nub;
874	int error;
875
876	mtx_lock(&Giant);
877	if ((error = fget(td, uap->fd, &fp)) != 0)
878		goto done2;
879	error = fo_stat(fp, &ub, td);
880	if (error == 0) {
881		cvtnstat(&ub, &nub);
882		error = copyout((caddr_t)&nub, (caddr_t)uap->sb, sizeof (nub));
883	}
884	fdrop(fp, td);
885done2:
886	mtx_unlock(&Giant);
887	return (error);
888}
889
890/*
891 * Return pathconf information about a file descriptor.
892 */
893#ifndef _SYS_SYSPROTO_H_
894struct fpathconf_args {
895	int	fd;
896	int	name;
897};
898#endif
899/*
900 * MPSAFE
901 */
902/* ARGSUSED */
903int
904fpathconf(td, uap)
905	struct thread *td;
906	register struct fpathconf_args *uap;
907{
908	struct file *fp;
909	struct vnode *vp;
910	int error;
911
912	if ((error = fget(td, uap->fd, &fp)) != 0)
913		return (error);
914
915	switch (fp->f_type) {
916	case DTYPE_PIPE:
917	case DTYPE_SOCKET:
918		if (uap->name != _PC_PIPE_BUF) {
919			error = EINVAL;
920		} else {
921			td->td_retval[0] = PIPE_BUF;
922			error = 0;
923		}
924		break;
925	case DTYPE_FIFO:
926	case DTYPE_VNODE:
927		vp = (struct vnode *)fp->f_data;
928		mtx_lock(&Giant);
929		error = VOP_PATHCONF(vp, uap->name, td->td_retval);
930		mtx_unlock(&Giant);
931		break;
932	default:
933		error = EOPNOTSUPP;
934		break;
935	}
936	fdrop(fp, td);
937	return(error);
938}
939
940/*
941 * Allocate a file descriptor for the process.
942 */
943static int fdexpand;
944SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
945
946int
947fdalloc(td, want, result)
948	struct thread *td;
949	int want;
950	int *result;
951{
952	struct proc *p = td->td_proc;
953	register struct filedesc *fdp = td->td_proc->p_fd;
954	register int i;
955	int lim, last, nfiles;
956	struct file **newofile, **oldofile;
957	char *newofileflags;
958
959	FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
960
961	/*
962	 * Search for a free descriptor starting at the higher
963	 * of want or fd_freefile.  If that fails, consider
964	 * expanding the ofile array.
965	 */
966	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
967	for (;;) {
968		last = min(fdp->fd_nfiles, lim);
969		if ((i = want) < fdp->fd_freefile)
970			i = fdp->fd_freefile;
971		for (; i < last; i++) {
972			if (fdp->fd_ofiles[i] == NULL) {
973				fdp->fd_ofileflags[i] = 0;
974				if (i > fdp->fd_lastfile)
975					fdp->fd_lastfile = i;
976				if (want <= fdp->fd_freefile)
977					fdp->fd_freefile = i;
978				*result = i;
979				return (0);
980			}
981		}
982
983		/*
984		 * No space in current array.  Expand?
985		 */
986		if (fdp->fd_nfiles >= lim)
987			return (EMFILE);
988		if (fdp->fd_nfiles < NDEXTENT)
989			nfiles = NDEXTENT;
990		else
991			nfiles = 2 * fdp->fd_nfiles;
992		FILEDESC_UNLOCK(fdp);
993		mtx_lock(&Giant);
994		MALLOC(newofile, struct file **, nfiles * OFILESIZE,
995		    M_FILEDESC, M_WAITOK);
996		mtx_unlock(&Giant);
997		FILEDESC_LOCK(fdp);
998
999		/*
1000		 * deal with file-table extend race that might have occured
1001		 * when malloc was blocked.
1002		 */
1003		if (fdp->fd_nfiles >= nfiles) {
1004			FILEDESC_UNLOCK(fdp);
1005			mtx_lock(&Giant);
1006			FREE(newofile, M_FILEDESC);
1007			mtx_unlock(&Giant);
1008			FILEDESC_LOCK(fdp);
1009			continue;
1010		}
1011		newofileflags = (char *) &newofile[nfiles];
1012		/*
1013		 * Copy the existing ofile and ofileflags arrays
1014		 * and zero the new portion of each array.
1015		 */
1016		bcopy(fdp->fd_ofiles, newofile,
1017			(i = sizeof(struct file *) * fdp->fd_nfiles));
1018		bzero((char *)newofile + i, nfiles * sizeof(struct file *) - i);
1019		bcopy(fdp->fd_ofileflags, newofileflags,
1020			(i = sizeof(char) * fdp->fd_nfiles));
1021		bzero(newofileflags + i, nfiles * sizeof(char) - i);
1022		if (fdp->fd_nfiles > NDFILE)
1023			oldofile = fdp->fd_ofiles;
1024		else
1025			oldofile = NULL;
1026		fdp->fd_ofiles = newofile;
1027		fdp->fd_ofileflags = newofileflags;
1028		fdp->fd_nfiles = nfiles;
1029		fdexpand++;
1030		if (oldofile != NULL) {
1031			FILEDESC_UNLOCK(fdp);
1032			mtx_lock(&Giant);
1033			FREE(oldofile, M_FILEDESC);
1034			mtx_unlock(&Giant);
1035			FILEDESC_LOCK(fdp);
1036		}
1037	}
1038	return (0);
1039}
1040
1041/*
1042 * Check to see whether n user file descriptors
1043 * are available to the process p.
1044 */
1045int
1046fdavail(td, n)
1047	struct thread *td;
1048	register int n;
1049{
1050	struct proc *p = td->td_proc;
1051	register struct filedesc *fdp = td->td_proc->p_fd;
1052	register struct file **fpp;
1053	register int i, lim, last;
1054
1055	FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
1056
1057	lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1058	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
1059		return (1);
1060
1061	last = min(fdp->fd_nfiles, lim);
1062	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
1063	for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
1064		if (*fpp == NULL && --n <= 0)
1065			return (1);
1066	}
1067	return (0);
1068}
1069
1070/*
1071 * Create a new open file structure and allocate
1072 * a file decriptor for the process that refers to it.
1073 */
1074int
1075falloc(td, resultfp, resultfd)
1076	register struct thread *td;
1077	struct file **resultfp;
1078	int *resultfd;
1079{
1080	struct proc *p = td->td_proc;
1081	register struct file *fp, *fq;
1082	int error, i;
1083
1084	sx_xlock(&filelist_lock);
1085	if (nfiles >= maxfiles) {
1086		sx_xunlock(&filelist_lock);
1087		tablefull("file");
1088		return (ENFILE);
1089	}
1090	nfiles++;
1091	sx_xunlock(&filelist_lock);
1092	/*
1093	 * Allocate a new file descriptor.
1094	 * If the process has file descriptor zero open, add to the list
1095	 * of open files at that point, otherwise put it at the front of
1096	 * the list of open files.
1097	 */
1098	MALLOC(fp, struct file *, sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
1099
1100	/*
1101	 * wait until after malloc (which may have blocked) returns before
1102	 * allocating the slot, else a race might have shrunk it if we had
1103	 * allocated it before the malloc.
1104	 */
1105	FILEDESC_LOCK(p->p_fd);
1106	if ((error = fdalloc(td, 0, &i))) {
1107		FILEDESC_UNLOCK(p->p_fd);
1108		sx_xlock(&filelist_lock);
1109		nfiles--;
1110		sx_xunlock(&filelist_lock);
1111		FREE(fp, M_FILE);
1112		return (error);
1113	}
1114	fp->f_mtxp = mtx_pool_alloc();
1115	fp->f_gcflag = 0;
1116	fp->f_count = 1;
1117	fp->f_cred = crhold(td->td_ucred);
1118	fp->f_ops = &badfileops;
1119	fp->f_seqcount = 1;
1120	FILEDESC_UNLOCK(p->p_fd);
1121	sx_xlock(&filelist_lock);
1122	FILEDESC_LOCK(p->p_fd);
1123	if ((fq = p->p_fd->fd_ofiles[0])) {
1124		LIST_INSERT_AFTER(fq, fp, f_list);
1125	} else {
1126		LIST_INSERT_HEAD(&filehead, fp, f_list);
1127	}
1128	p->p_fd->fd_ofiles[i] = fp;
1129	FILEDESC_UNLOCK(p->p_fd);
1130	sx_xunlock(&filelist_lock);
1131	if (resultfp)
1132		*resultfp = fp;
1133	if (resultfd)
1134		*resultfd = i;
1135	return (0);
1136}
1137
1138/*
1139 * Free a file descriptor.
1140 */
1141void
1142ffree(fp)
1143	register struct file *fp;
1144{
1145
1146	KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
1147	sx_xlock(&filelist_lock);
1148	LIST_REMOVE(fp, f_list);
1149	nfiles--;
1150	sx_xunlock(&filelist_lock);
1151	crfree(fp->f_cred);
1152	FREE(fp, M_FILE);
1153}
1154
1155/*
1156 * Build a new filedesc structure.
1157 */
1158struct filedesc *
1159fdinit(td)
1160	struct thread *td;
1161{
1162	register struct filedesc0 *newfdp;
1163	register struct filedesc *fdp = td->td_proc->p_fd;
1164
1165	MALLOC(newfdp, struct filedesc0 *, sizeof(struct filedesc0),
1166	    M_FILEDESC, M_WAITOK | M_ZERO);
1167	mtx_init(&newfdp->fd_fd.fd_mtx, "filedesc structure", MTX_DEF);
1168	FILEDESC_LOCK(&newfdp->fd_fd);
1169	newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
1170	if (newfdp->fd_fd.fd_cdir)
1171		VREF(newfdp->fd_fd.fd_cdir);
1172	newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1173	if (newfdp->fd_fd.fd_rdir)
1174		VREF(newfdp->fd_fd.fd_rdir);
1175	newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1176	if (newfdp->fd_fd.fd_jdir)
1177		VREF(newfdp->fd_fd.fd_jdir);
1178
1179	/* Create the file descriptor table. */
1180	newfdp->fd_fd.fd_refcnt = 1;
1181	newfdp->fd_fd.fd_cmask = cmask;
1182	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1183	newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
1184	newfdp->fd_fd.fd_nfiles = NDFILE;
1185	newfdp->fd_fd.fd_knlistsize = -1;
1186	FILEDESC_UNLOCK(&newfdp->fd_fd);
1187
1188	return (&newfdp->fd_fd);
1189}
1190
1191/*
1192 * Share a filedesc structure.
1193 */
1194struct filedesc *
1195fdshare(p)
1196	struct proc *p;
1197{
1198	FILEDESC_LOCK(p->p_fd);
1199	p->p_fd->fd_refcnt++;
1200	FILEDESC_UNLOCK(p->p_fd);
1201	return (p->p_fd);
1202}
1203
1204/*
1205 * Copy a filedesc structure.
1206 */
1207struct filedesc *
1208fdcopy(td)
1209	struct thread *td;
1210{
1211	register struct filedesc *newfdp, *fdp = td->td_proc->p_fd;
1212	register struct file **fpp;
1213	register int i, j;
1214
1215	/* Certain daemons might not have file descriptors. */
1216	if (fdp == NULL)
1217		return (NULL);
1218
1219	FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
1220
1221	FILEDESC_UNLOCK(fdp);
1222	MALLOC(newfdp, struct filedesc *, sizeof(struct filedesc0),
1223	    M_FILEDESC, M_WAITOK);
1224	FILEDESC_LOCK(fdp);
1225	bcopy(fdp, newfdp, sizeof(struct filedesc));
1226	FILEDESC_UNLOCK(fdp);
1227	bzero(&newfdp->fd_mtx, sizeof(newfdp->fd_mtx));
1228	mtx_init(&newfdp->fd_mtx, "filedesc structure", MTX_DEF);
1229	if (newfdp->fd_cdir)
1230		VREF(newfdp->fd_cdir);
1231	if (newfdp->fd_rdir)
1232		VREF(newfdp->fd_rdir);
1233	if (newfdp->fd_jdir)
1234		VREF(newfdp->fd_jdir);
1235	newfdp->fd_refcnt = 1;
1236
1237	/*
1238	 * If the number of open files fits in the internal arrays
1239	 * of the open file structure, use them, otherwise allocate
1240	 * additional memory for the number of descriptors currently
1241	 * in use.
1242	 */
1243	FILEDESC_LOCK(fdp);
1244	newfdp->fd_lastfile = fdp->fd_lastfile;
1245	newfdp->fd_nfiles = fdp->fd_nfiles;
1246	if (newfdp->fd_lastfile < NDFILE) {
1247		newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles;
1248		newfdp->fd_ofileflags =
1249		    ((struct filedesc0 *) newfdp)->fd_dfileflags;
1250		i = NDFILE;
1251	} else {
1252		/*
1253		 * Compute the smallest multiple of NDEXTENT needed
1254		 * for the file descriptors currently in use,
1255		 * allowing the table to shrink.
1256		 */
1257retry:
1258		i = newfdp->fd_nfiles;
1259		while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2)
1260			i /= 2;
1261		FILEDESC_UNLOCK(fdp);
1262		MALLOC(newfdp->fd_ofiles, struct file **, i * OFILESIZE,
1263		    M_FILEDESC, M_WAITOK);
1264		FILEDESC_LOCK(fdp);
1265		newfdp->fd_lastfile = fdp->fd_lastfile;
1266		newfdp->fd_nfiles = fdp->fd_nfiles;
1267		j = newfdp->fd_nfiles;
1268		while (j > 2 * NDEXTENT && j > newfdp->fd_lastfile * 2)
1269			j /= 2;
1270		if (i != j) {
1271			/*
1272			 * The size of the original table has changed.
1273			 * Go over once again.
1274			 */
1275			FILEDESC_UNLOCK(fdp);
1276			FREE(newfdp->fd_ofiles, M_FILEDESC);
1277			FILEDESC_LOCK(fdp);
1278			newfdp->fd_lastfile = fdp->fd_lastfile;
1279			newfdp->fd_nfiles = fdp->fd_nfiles;
1280			goto retry;
1281		}
1282		newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i];
1283	}
1284	newfdp->fd_nfiles = i;
1285	bcopy(fdp->fd_ofiles, newfdp->fd_ofiles, i * sizeof(struct file **));
1286	bcopy(fdp->fd_ofileflags, newfdp->fd_ofileflags, i * sizeof(char));
1287
1288	/*
1289	 * kq descriptors cannot be copied.
1290	 */
1291	if (newfdp->fd_knlistsize != -1) {
1292		fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile];
1293		for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--) {
1294			if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) {
1295				*fpp = NULL;
1296				if (i < newfdp->fd_freefile)
1297					newfdp->fd_freefile = i;
1298			}
1299			if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0)
1300				newfdp->fd_lastfile--;
1301		}
1302		newfdp->fd_knlist = NULL;
1303		newfdp->fd_knlistsize = -1;
1304		newfdp->fd_knhash = NULL;
1305		newfdp->fd_knhashmask = 0;
1306	}
1307
1308	fpp = newfdp->fd_ofiles;
1309	for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) {
1310		if (*fpp != NULL) {
1311			fhold(*fpp);
1312		}
1313	}
1314	return (newfdp);
1315}
1316
1317/*
1318 * Release a filedesc structure.
1319 */
1320void
1321fdfree(td)
1322	struct thread *td;
1323{
1324	register struct filedesc *fdp = td->td_proc->p_fd;
1325	struct file **fpp;
1326	register int i;
1327
1328	/* Certain daemons might not have file descriptors. */
1329	if (fdp == NULL)
1330		return;
1331
1332	FILEDESC_LOCK(fdp);
1333	if (--fdp->fd_refcnt > 0) {
1334		FILEDESC_UNLOCK(fdp);
1335		return;
1336	}
1337	/*
1338	 * we are the last reference to the structure, we can
1339	 * safely assume it will not change out from under us.
1340	 */
1341	FILEDESC_UNLOCK(fdp);
1342	fpp = fdp->fd_ofiles;
1343	for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
1344		if (*fpp)
1345			(void) closef(*fpp, td);
1346	}
1347	if (fdp->fd_nfiles > NDFILE)
1348		FREE(fdp->fd_ofiles, M_FILEDESC);
1349	if (fdp->fd_cdir)
1350		vrele(fdp->fd_cdir);
1351	if (fdp->fd_rdir)
1352		vrele(fdp->fd_rdir);
1353	if (fdp->fd_jdir)
1354		vrele(fdp->fd_jdir);
1355	if (fdp->fd_knlist)
1356		FREE(fdp->fd_knlist, M_KQUEUE);
1357	if (fdp->fd_knhash)
1358		FREE(fdp->fd_knhash, M_KQUEUE);
1359	mtx_destroy(&fdp->fd_mtx);
1360	FREE(fdp, M_FILEDESC);
1361}
1362
1363/*
1364 * For setugid programs, we don't want to people to use that setugidness
1365 * to generate error messages which write to a file which otherwise would
1366 * otherwise be off-limits to the process.
1367 *
1368 * This is a gross hack to plug the hole.  A better solution would involve
1369 * a special vop or other form of generalized access control mechanism.  We
1370 * go ahead and just reject all procfs file systems accesses as dangerous.
1371 *
1372 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1373 * sufficient.  We also don't for check setugidness since we know we are.
1374 */
1375static int
1376is_unsafe(struct file *fp)
1377{
1378	if (fp->f_type == DTYPE_VNODE &&
1379	    ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
1380		return (1);
1381	return (0);
1382}
1383
1384/*
1385 * Make this setguid thing safe, if at all possible.
1386 */
1387void
1388setugidsafety(td)
1389	struct thread *td;
1390{
1391	struct filedesc *fdp = td->td_proc->p_fd;
1392	register int i;
1393
1394	/* Certain daemons might not have file descriptors. */
1395	if (fdp == NULL)
1396		return;
1397
1398	/*
1399	 * note: fdp->fd_ofiles may be reallocated out from under us while
1400	 * we are blocked in a close.  Be careful!
1401	 */
1402	FILEDESC_LOCK(fdp);
1403	for (i = 0; i <= fdp->fd_lastfile; i++) {
1404		if (i > 2)
1405			break;
1406		if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
1407			struct file *fp;
1408
1409#if 0
1410			if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0)
1411				(void) munmapfd(td, i);
1412#endif
1413			if (i < fdp->fd_knlistsize) {
1414				FILEDESC_UNLOCK(fdp);
1415				knote_fdclose(td, i);
1416				FILEDESC_LOCK(fdp);
1417			}
1418			/*
1419			 * NULL-out descriptor prior to close to avoid
1420			 * a race while close blocks.
1421			 */
1422			fp = fdp->fd_ofiles[i];
1423			fdp->fd_ofiles[i] = NULL;
1424			fdp->fd_ofileflags[i] = 0;
1425			if (i < fdp->fd_freefile)
1426				fdp->fd_freefile = i;
1427			FILEDESC_UNLOCK(fdp);
1428			(void) closef(fp, td);
1429			FILEDESC_LOCK(fdp);
1430		}
1431	}
1432	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1433		fdp->fd_lastfile--;
1434	FILEDESC_UNLOCK(fdp);
1435}
1436
1437/*
1438 * Close any files on exec?
1439 */
1440void
1441fdcloseexec(td)
1442	struct thread *td;
1443{
1444	struct filedesc *fdp = td->td_proc->p_fd;
1445	register int i;
1446
1447	/* Certain daemons might not have file descriptors. */
1448	if (fdp == NULL)
1449		return;
1450
1451	FILEDESC_LOCK(fdp);
1452
1453	/*
1454	 * We cannot cache fd_ofiles or fd_ofileflags since operations
1455	 * may block and rip them out from under us.
1456	 */
1457	for (i = 0; i <= fdp->fd_lastfile; i++) {
1458		if (fdp->fd_ofiles[i] != NULL &&
1459		    (fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
1460			struct file *fp;
1461
1462#if 0
1463			if (fdp->fd_ofileflags[i] & UF_MAPPED)
1464				(void) munmapfd(td, i);
1465#endif
1466			if (i < fdp->fd_knlistsize) {
1467				FILEDESC_UNLOCK(fdp);
1468				knote_fdclose(td, i);
1469				FILEDESC_LOCK(fdp);
1470			}
1471			/*
1472			 * NULL-out descriptor prior to close to avoid
1473			 * a race while close blocks.
1474			 */
1475			fp = fdp->fd_ofiles[i];
1476			fdp->fd_ofiles[i] = NULL;
1477			fdp->fd_ofileflags[i] = 0;
1478			if (i < fdp->fd_freefile)
1479				fdp->fd_freefile = i;
1480			FILEDESC_UNLOCK(fdp);
1481			(void) closef(fp, td);
1482			FILEDESC_LOCK(fdp);
1483		}
1484	}
1485	while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
1486		fdp->fd_lastfile--;
1487	FILEDESC_UNLOCK(fdp);
1488}
1489
1490/*
1491 * Internal form of close.
1492 * Decrement reference count on file structure.
1493 * Note: td may be NULL when closing a file
1494 * that was being passed in a message.
1495 */
1496int
1497closef(fp, td)
1498	register struct file *fp;
1499	register struct thread *td;
1500{
1501	struct vnode *vp;
1502	struct flock lf;
1503
1504	if (fp == NULL)
1505		return (0);
1506	/*
1507	 * POSIX record locking dictates that any close releases ALL
1508	 * locks owned by this process.  This is handled by setting
1509	 * a flag in the unlock to free ONLY locks obeying POSIX
1510	 * semantics, and not to free BSD-style file locks.
1511	 * If the descriptor was in a message, POSIX-style locks
1512	 * aren't passed with the descriptor.
1513	 */
1514	if (td && (td->td_proc->p_flag & P_ADVLOCK) &&
1515	    fp->f_type == DTYPE_VNODE) {
1516		lf.l_whence = SEEK_SET;
1517		lf.l_start = 0;
1518		lf.l_len = 0;
1519		lf.l_type = F_UNLCK;
1520		vp = (struct vnode *)fp->f_data;
1521		(void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
1522		    F_UNLCK, &lf, F_POSIX);
1523	}
1524	return (fdrop(fp, td));
1525}
1526
1527/*
1528 * Drop reference on struct file passed in, may call closef if the
1529 * reference hits zero.
1530 */
1531int
1532fdrop(fp, td)
1533	struct file *fp;
1534	struct thread *td;
1535{
1536
1537	FILE_LOCK(fp);
1538	return (fdrop_locked(fp, td));
1539}
1540
1541/*
1542 * Extract the file pointer associated with the specified descriptor for
1543 * the current user process.
1544 *
1545 * If the descriptor doesn't exist, EBADF is returned.
1546 *
1547 * If the descriptor exists but doesn't match 'flags' then
1548 * return EBADF for read attempts and EINVAL for write attempts.
1549 *
1550 * If 'hold' is set (non-zero) the file's refcount will be bumped on return.
1551 * It should be droped with fdrop().
1552 * If it is not set, then the refcount will not be bumped however the
1553 * thread's filedesc struct will be returned locked (for fgetsock).
1554 *
1555 * If an error occured the non-zero error is returned and *fpp is set to NULL.
1556 * Otherwise *fpp is set and zero is returned.
1557 */
1558static __inline
1559int
1560_fget(struct thread *td, int fd, struct file **fpp, int flags, int hold)
1561{
1562	struct filedesc *fdp;
1563	struct file *fp;
1564
1565	*fpp = NULL;
1566	if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
1567		return(EBADF);
1568	FILEDESC_LOCK(fdp);
1569	if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) {
1570		FILEDESC_UNLOCK(fdp);
1571		return(EBADF);
1572	}
1573
1574	/*
1575	 * Note: FREAD failures returns EBADF to maintain backwards
1576	 * compatibility with what routines returned before.
1577	 *
1578	 * Only one flag, or 0, may be specified.
1579	 */
1580	if (flags == FREAD && (fp->f_flag & FREAD) == 0) {
1581		FILEDESC_UNLOCK(fdp);
1582		return(EBADF);
1583	}
1584	if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) {
1585		FILEDESC_UNLOCK(fdp);
1586		return(EINVAL);
1587	}
1588	if (hold) {
1589		fhold(fp);
1590		FILEDESC_UNLOCK(fdp);
1591	}
1592	*fpp = fp;
1593	return(0);
1594}
1595
1596int
1597fget(struct thread *td, int fd, struct file **fpp)
1598{
1599    return(_fget(td, fd, fpp, 0, 1));
1600}
1601
1602int
1603fget_read(struct thread *td, int fd, struct file **fpp)
1604{
1605    return(_fget(td, fd, fpp, FREAD, 1));
1606}
1607
1608int
1609fget_write(struct thread *td, int fd, struct file **fpp)
1610{
1611    return(_fget(td, fd, fpp, FWRITE, 1));
1612}
1613
1614/*
1615 * Like fget() but loads the underlying vnode, or returns an error if
1616 * the descriptor does not represent a vnode.  Note that pipes use vnodes
1617 * but never have VM objects (so VOP_GETVOBJECT() calls will return an
1618 * error).  The returned vnode will be vref()d.
1619 */
1620
1621static __inline
1622int
1623_fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags)
1624{
1625	struct file *fp;
1626	int error;
1627
1628	*vpp = NULL;
1629	if ((error = _fget(td, fd, &fp, 0, 0)) != 0)
1630		return (error);
1631	if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
1632		error = EINVAL;
1633	} else {
1634		*vpp = (struct vnode *)fp->f_data;
1635		vref(*vpp);
1636	}
1637	FILEDESC_UNLOCK(td->td_proc->p_fd);
1638	return (error);
1639}
1640
1641int
1642fgetvp(struct thread *td, int fd, struct vnode **vpp)
1643{
1644	return(_fgetvp(td, fd, vpp, 0));
1645}
1646
1647int
1648fgetvp_read(struct thread *td, int fd, struct vnode **vpp)
1649{
1650	return(_fgetvp(td, fd, vpp, FREAD));
1651}
1652
1653int
1654fgetvp_write(struct thread *td, int fd, struct vnode **vpp)
1655{
1656	return(_fgetvp(td, fd, vpp, FWRITE));
1657}
1658
1659/*
1660 * Like fget() but loads the underlying socket, or returns an error if
1661 * the descriptor does not represent a socket.
1662 *
1663 * We bump the ref count on the returned socket.  XXX Also obtain the SX lock in
1664 * the future.
1665 */
1666int
1667fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp)
1668{
1669	struct file *fp;
1670	int error;
1671
1672	*spp = NULL;
1673	if (fflagp)
1674		*fflagp = 0;
1675	if ((error = _fget(td, fd, &fp, 0, 0)) != 0)
1676		return (error);
1677	if (fp->f_type != DTYPE_SOCKET) {
1678		error = ENOTSOCK;
1679	} else {
1680		*spp = (struct socket *)fp->f_data;
1681		if (fflagp)
1682			*fflagp = fp->f_flag;
1683		soref(*spp);
1684	}
1685	FILEDESC_UNLOCK(td->td_proc->p_fd);
1686	return(error);
1687}
1688
1689/*
1690 * Drop the reference count on the the socket and XXX release the SX lock in
1691 * the future.  The last reference closes the socket.
1692 */
1693void
1694fputsock(struct socket *so)
1695{
1696	sorele(so);
1697}
1698
1699/*
1700 * Drop reference on struct file passed in, may call closef if the
1701 * reference hits zero.
1702 * Expects struct file locked, and will unlock it.
1703 */
1704int
1705fdrop_locked(fp, td)
1706	struct file *fp;
1707	struct thread *td;
1708{
1709	struct flock lf;
1710	struct vnode *vp;
1711	int error;
1712
1713	FILE_LOCK_ASSERT(fp, MA_OWNED);
1714
1715	if (--fp->f_count > 0) {
1716		FILE_UNLOCK(fp);
1717		return (0);
1718	}
1719	if (fp->f_count < 0)
1720		panic("fdrop: count < 0");
1721	if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
1722		lf.l_whence = SEEK_SET;
1723		lf.l_start = 0;
1724		lf.l_len = 0;
1725		lf.l_type = F_UNLCK;
1726		vp = (struct vnode *)fp->f_data;
1727		FILE_UNLOCK(fp);
1728		(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
1729	} else
1730		FILE_UNLOCK(fp);
1731	if (fp->f_ops != &badfileops)
1732		error = fo_close(fp, td);
1733	else
1734		error = 0;
1735	ffree(fp);
1736	return (error);
1737}
1738
1739/*
1740 * Apply an advisory lock on a file descriptor.
1741 *
1742 * Just attempt to get a record lock of the requested type on
1743 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
1744 */
1745#ifndef _SYS_SYSPROTO_H_
1746struct flock_args {
1747	int	fd;
1748	int	how;
1749};
1750#endif
1751/*
1752 * MPSAFE
1753 */
1754/* ARGSUSED */
1755int
1756flock(td, uap)
1757	struct thread *td;
1758	register struct flock_args *uap;
1759{
1760	struct file *fp;
1761	struct vnode *vp;
1762	struct flock lf;
1763	int error;
1764
1765	if ((error = fget(td, uap->fd, &fp)) != 0)
1766		return (error);
1767	if (fp->f_type != DTYPE_VNODE) {
1768		fdrop(fp, td);
1769		return (EOPNOTSUPP);
1770	}
1771
1772	mtx_lock(&Giant);
1773	vp = (struct vnode *)fp->f_data;
1774	lf.l_whence = SEEK_SET;
1775	lf.l_start = 0;
1776	lf.l_len = 0;
1777	if (uap->how & LOCK_UN) {
1778		lf.l_type = F_UNLCK;
1779		FILE_LOCK(fp);
1780		fp->f_flag &= ~FHASLOCK;
1781		FILE_UNLOCK(fp);
1782		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
1783		goto done2;
1784	}
1785	if (uap->how & LOCK_EX)
1786		lf.l_type = F_WRLCK;
1787	else if (uap->how & LOCK_SH)
1788		lf.l_type = F_RDLCK;
1789	else {
1790		error = EBADF;
1791		goto done2;
1792	}
1793	FILE_LOCK(fp);
1794	fp->f_flag |= FHASLOCK;
1795	FILE_UNLOCK(fp);
1796	error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
1797	    (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
1798done2:
1799	fdrop(fp, td);
1800	mtx_unlock(&Giant);
1801	return (error);
1802}
1803
1804/*
1805 * File Descriptor pseudo-device driver (/dev/fd/).
1806 *
1807 * Opening minor device N dup()s the file (if any) connected to file
1808 * descriptor N belonging to the calling process.  Note that this driver
1809 * consists of only the ``open()'' routine, because all subsequent
1810 * references to this file will be direct to the other driver.
1811 */
1812/* ARGSUSED */
1813static int
1814fdopen(dev, mode, type, td)
1815	dev_t dev;
1816	int mode, type;
1817	struct thread *td;
1818{
1819
1820	/*
1821	 * XXX Kludge: set curthread->td_dupfd to contain the value of the
1822	 * the file descriptor being sought for duplication. The error
1823	 * return ensures that the vnode for this device will be released
1824	 * by vn_open. Open will detect this special error and take the
1825	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1826	 * will simply report the error.
1827	 */
1828	td->td_dupfd = dev2unit(dev);
1829	return (ENODEV);
1830}
1831
1832/*
1833 * Duplicate the specified descriptor to a free descriptor.
1834 */
1835int
1836dupfdopen(td, fdp, indx, dfd, mode, error)
1837	struct thread *td;
1838	struct filedesc *fdp;
1839	int indx, dfd;
1840	int mode;
1841	int error;
1842{
1843	register struct file *wfp;
1844	struct file *fp;
1845
1846	/*
1847	 * If the to-be-dup'd fd number is greater than the allowed number
1848	 * of file descriptors, or the fd to be dup'd has already been
1849	 * closed, then reject.
1850	 */
1851	FILEDESC_LOCK(fdp);
1852	if ((u_int)dfd >= fdp->fd_nfiles ||
1853	    (wfp = fdp->fd_ofiles[dfd]) == NULL) {
1854		FILEDESC_UNLOCK(fdp);
1855		return (EBADF);
1856	}
1857
1858	/*
1859	 * There are two cases of interest here.
1860	 *
1861	 * For ENODEV simply dup (dfd) to file descriptor
1862	 * (indx) and return.
1863	 *
1864	 * For ENXIO steal away the file structure from (dfd) and
1865	 * store it in (indx).  (dfd) is effectively closed by
1866	 * this operation.
1867	 *
1868	 * Any other error code is just returned.
1869	 */
1870	switch (error) {
1871	case ENODEV:
1872		/*
1873		 * Check that the mode the file is being opened for is a
1874		 * subset of the mode of the existing descriptor.
1875		 */
1876		FILE_LOCK(wfp);
1877		if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
1878			FILE_UNLOCK(wfp);
1879			FILEDESC_UNLOCK(fdp);
1880			return (EACCES);
1881		}
1882		fp = fdp->fd_ofiles[indx];
1883#if 0
1884		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1885			(void) munmapfd(td, indx);
1886#endif
1887		fdp->fd_ofiles[indx] = wfp;
1888		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1889		fhold_locked(wfp);
1890		FILE_UNLOCK(wfp);
1891		if (indx > fdp->fd_lastfile)
1892			fdp->fd_lastfile = indx;
1893		if (fp != NULL)
1894			FILE_LOCK(fp);
1895		FILEDESC_UNLOCK(fdp);
1896		/*
1897		 * we now own the reference to fp that the ofiles[] array
1898		 * used to own.  Release it.
1899		 */
1900		if (fp != NULL)
1901			fdrop_locked(fp, td);
1902		return (0);
1903
1904	case ENXIO:
1905		/*
1906		 * Steal away the file pointer from dfd, and stuff it into indx.
1907		 */
1908		fp = fdp->fd_ofiles[indx];
1909#if 0
1910		if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
1911			(void) munmapfd(td, indx);
1912#endif
1913		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
1914		fdp->fd_ofiles[dfd] = NULL;
1915		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
1916		fdp->fd_ofileflags[dfd] = 0;
1917
1918		/*
1919		 * Complete the clean up of the filedesc structure by
1920		 * recomputing the various hints.
1921		 */
1922		if (indx > fdp->fd_lastfile) {
1923			fdp->fd_lastfile = indx;
1924		} else {
1925			while (fdp->fd_lastfile > 0 &&
1926			   fdp->fd_ofiles[fdp->fd_lastfile] == NULL) {
1927				fdp->fd_lastfile--;
1928			}
1929			if (dfd < fdp->fd_freefile)
1930				fdp->fd_freefile = dfd;
1931		}
1932		if (fp != NULL)
1933			FILE_LOCK(fp);
1934		FILEDESC_UNLOCK(fdp);
1935
1936		/*
1937		 * we now own the reference to fp that the ofiles[] array
1938		 * used to own.  Release it.
1939		 */
1940		if (fp != NULL)
1941			fdrop_locked(fp, td);
1942		return (0);
1943
1944	default:
1945		FILEDESC_UNLOCK(fdp);
1946		return (error);
1947	}
1948	/* NOTREACHED */
1949}
1950
1951/*
1952 * Get file structures.
1953 */
1954static int
1955sysctl_kern_file(SYSCTL_HANDLER_ARGS)
1956{
1957	int error;
1958	struct file *fp;
1959
1960	sx_slock(&filelist_lock);
1961	if (!req->oldptr) {
1962		/*
1963		 * overestimate by 10 files
1964		 */
1965		error = SYSCTL_OUT(req, 0, sizeof(filehead) +
1966				   (nfiles + 10) * sizeof(struct file));
1967		sx_sunlock(&filelist_lock);
1968		return (error);
1969	}
1970
1971	error = SYSCTL_OUT(req, (caddr_t)&filehead, sizeof(filehead));
1972	if (error) {
1973		sx_sunlock(&filelist_lock);
1974		return (error);
1975	}
1976
1977	/*
1978	 * followed by an array of file structures
1979	 */
1980	LIST_FOREACH(fp, &filehead, f_list) {
1981		error = SYSCTL_OUT(req, (caddr_t)fp, sizeof (struct file));
1982		if (error) {
1983			sx_sunlock(&filelist_lock);
1984			return (error);
1985		}
1986	}
1987	sx_sunlock(&filelist_lock);
1988	return (0);
1989}
1990
1991SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
1992    0, 0, sysctl_kern_file, "S,file", "Entire file table");
1993
1994SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
1995    &maxfilesperproc, 0, "Maximum files allowed open per process");
1996
1997SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
1998    &maxfiles, 0, "Maximum number of files");
1999
2000SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
2001    &nfiles, 0, "System-wide number of open files");
2002
2003static void
2004fildesc_drvinit(void *unused)
2005{
2006	dev_t dev;
2007
2008	dev = make_dev(&fildesc_cdevsw, 0, UID_BIN, GID_BIN, 0666, "fd/0");
2009	make_dev_alias(dev, "stdin");
2010	dev = make_dev(&fildesc_cdevsw, 1, UID_BIN, GID_BIN, 0666, "fd/1");
2011	make_dev_alias(dev, "stdout");
2012	dev = make_dev(&fildesc_cdevsw, 2, UID_BIN, GID_BIN, 0666, "fd/2");
2013	make_dev_alias(dev, "stderr");
2014	if (!devfs_present) {
2015		int fd;
2016
2017		for (fd = 3; fd < NUMFDESC; fd++)
2018			make_dev(&fildesc_cdevsw, fd, UID_BIN, GID_BIN, 0666,
2019			    "fd/%d", fd);
2020	}
2021}
2022
2023struct fileops badfileops = {
2024	badfo_readwrite,
2025	badfo_readwrite,
2026	badfo_ioctl,
2027	badfo_poll,
2028	badfo_kqfilter,
2029	badfo_stat,
2030	badfo_close
2031};
2032
2033static int
2034badfo_readwrite(fp, uio, cred, flags, td)
2035	struct file *fp;
2036	struct uio *uio;
2037	struct ucred *cred;
2038	struct thread *td;
2039	int flags;
2040{
2041
2042	return (EBADF);
2043}
2044
2045static int
2046badfo_ioctl(fp, com, data, td)
2047	struct file *fp;
2048	u_long com;
2049	caddr_t data;
2050	struct thread *td;
2051{
2052
2053	return (EBADF);
2054}
2055
2056static int
2057badfo_poll(fp, events, cred, td)
2058	struct file *fp;
2059	int events;
2060	struct ucred *cred;
2061	struct thread *td;
2062{
2063
2064	return (0);
2065}
2066
2067static int
2068badfo_kqfilter(fp, kn)
2069	struct file *fp;
2070	struct knote *kn;
2071{
2072
2073	return (0);
2074}
2075
2076static int
2077badfo_stat(fp, sb, td)
2078	struct file *fp;
2079	struct stat *sb;
2080	struct thread *td;
2081{
2082
2083	return (EBADF);
2084}
2085
2086static int
2087badfo_close(fp, td)
2088	struct file *fp;
2089	struct thread *td;
2090{
2091
2092	return (EBADF);
2093}
2094
2095SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
2096					fildesc_drvinit,NULL)
2097
2098static void filelistinit __P((void *));
2099SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL)
2100
2101/* ARGSUSED*/
2102static void
2103filelistinit(dummy)
2104	void *dummy;
2105{
2106	sx_init(&filelist_lock, "filelist lock");
2107}
2108