kern_descrip.c revision 252436
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_descrip.c 252436 2013-07-01 03:27:14Z trociny $");
39
40#include "opt_capsicum.h"
41#include "opt_compat.h"
42#include "opt_ddb.h"
43#include "opt_ktrace.h"
44#include "opt_procdesc.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48
49#include <sys/capability.h>
50#include <sys/conf.h>
51#include <sys/domain.h>
52#include <sys/fcntl.h>
53#include <sys/file.h>
54#include <sys/filedesc.h>
55#include <sys/filio.h>
56#include <sys/jail.h>
57#include <sys/kernel.h>
58#include <sys/ksem.h>
59#include <sys/limits.h>
60#include <sys/lock.h>
61#include <sys/malloc.h>
62#include <sys/mman.h>
63#include <sys/mount.h>
64#include <sys/mqueue.h>
65#include <sys/mutex.h>
66#include <sys/namei.h>
67#include <sys/selinfo.h>
68#include <sys/pipe.h>
69#include <sys/priv.h>
70#include <sys/proc.h>
71#include <sys/procdesc.h>
72#include <sys/protosw.h>
73#include <sys/racct.h>
74#include <sys/resourcevar.h>
75#include <sys/sbuf.h>
76#include <sys/signalvar.h>
77#include <sys/socketvar.h>
78#include <sys/stat.h>
79#include <sys/sx.h>
80#include <sys/syscallsubr.h>
81#include <sys/sysctl.h>
82#include <sys/sysproto.h>
83#include <sys/tty.h>
84#include <sys/unistd.h>
85#include <sys/un.h>
86#include <sys/unpcb.h>
87#include <sys/user.h>
88#include <sys/vnode.h>
89#ifdef KTRACE
90#include <sys/ktrace.h>
91#endif
92
93#include <net/vnet.h>
94
95#include <netinet/in.h>
96#include <netinet/in_pcb.h>
97
98#include <security/audit/audit.h>
99
100#include <vm/uma.h>
101#include <vm/vm.h>
102
103#include <ddb/ddb.h>
104
105static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
106static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
107    "file desc to leader structures");
108static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
109MALLOC_DEFINE(M_FILECAPS, "filecaps", "descriptor capabilities");
110
111MALLOC_DECLARE(M_FADVISE);
112
113static uma_zone_t file_zone;
114
115void	(*ksem_info)(struct ksem *ks, char *path, size_t size, uint32_t *value);
116
117static int	closefp(struct filedesc *fdp, int fd, struct file *fp,
118		    struct thread *td, int holdleaders);
119static int	fd_first_free(struct filedesc *fdp, int low, int size);
120static int	fd_last_used(struct filedesc *fdp, int size);
121static void	fdgrowtable(struct filedesc *fdp, int nfd);
122static void	fdunused(struct filedesc *fdp, int fd);
123static void	fdused(struct filedesc *fdp, int fd);
124static int	fill_pipe_info(struct pipe *pi, struct kinfo_file *kif);
125static int	fill_procdesc_info(struct procdesc *pdp,
126		    struct kinfo_file *kif);
127static int	fill_pts_info(struct tty *tp, struct kinfo_file *kif);
128static int	fill_sem_info(struct file *fp, struct kinfo_file *kif);
129static int	fill_shm_info(struct file *fp, struct kinfo_file *kif);
130static int	fill_socket_info(struct socket *so, struct kinfo_file *kif);
131static int	fill_vnode_info(struct vnode *vp, struct kinfo_file *kif);
132
133/*
134 * Each process has:
135 *
136 * - An array of open file descriptors (fd_ofiles)
137 * - An array of file flags (fd_ofileflags)
138 * - A bitmap recording which descriptors are in use (fd_map)
139 *
140 * A process starts out with NDFILE descriptors.  The value of NDFILE has
141 * been selected based the historical limit of 20 open files, and an
142 * assumption that the majority of processes, especially short-lived
143 * processes like shells, will never need more.
144 *
145 * If this initial allocation is exhausted, a larger descriptor table and
146 * map are allocated dynamically, and the pointers in the process's struct
147 * filedesc are updated to point to those.  This is repeated every time
148 * the process runs out of file descriptors (provided it hasn't hit its
149 * resource limit).
150 *
151 * Since threads may hold references to individual descriptor table
152 * entries, the tables are never freed.  Instead, they are placed on a
153 * linked list and freed only when the struct filedesc is released.
154 */
155#define NDFILE		20
156#define NDSLOTSIZE	sizeof(NDSLOTTYPE)
157#define	NDENTRIES	(NDSLOTSIZE * __CHAR_BIT)
158#define NDSLOT(x)	((x) / NDENTRIES)
159#define NDBIT(x)	((NDSLOTTYPE)1 << ((x) % NDENTRIES))
160#define	NDSLOTS(x)	(((x) + NDENTRIES - 1) / NDENTRIES)
161
162/*
163 * SLIST entry used to keep track of ofiles which must be reclaimed when
164 * the process exits.
165 */
166struct freetable {
167	struct filedescent *ft_table;
168	SLIST_ENTRY(freetable) ft_next;
169};
170
171/*
172 * Initial allocation: a filedesc structure + the head of SLIST used to
173 * keep track of old ofiles + enough space for NDFILE descriptors.
174 */
175struct filedesc0 {
176	struct filedesc fd_fd;
177	SLIST_HEAD(, freetable) fd_free;
178	struct	filedescent fd_dfiles[NDFILE];
179	NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
180};
181
182/*
183 * Descriptor management.
184 */
185volatile int openfiles;			/* actual number of open files */
186struct mtx sigio_lock;		/* mtx to protect pointers to sigio */
187void (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
188
189/* A mutex to protect the association between a proc and filedesc. */
190static struct mtx fdesc_mtx;
191
192/*
193 * If low >= size, just return low. Otherwise find the first zero bit in the
194 * given bitmap, starting at low and not exceeding size - 1. Return size if
195 * not found.
196 */
197static int
198fd_first_free(struct filedesc *fdp, int low, int size)
199{
200	NDSLOTTYPE *map = fdp->fd_map;
201	NDSLOTTYPE mask;
202	int off, maxoff;
203
204	if (low >= size)
205		return (low);
206
207	off = NDSLOT(low);
208	if (low % NDENTRIES) {
209		mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
210		if ((mask &= ~map[off]) != 0UL)
211			return (off * NDENTRIES + ffsl(mask) - 1);
212		++off;
213	}
214	for (maxoff = NDSLOTS(size); off < maxoff; ++off)
215		if (map[off] != ~0UL)
216			return (off * NDENTRIES + ffsl(~map[off]) - 1);
217	return (size);
218}
219
220/*
221 * Find the highest non-zero bit in the given bitmap, starting at 0 and
222 * not exceeding size - 1. Return -1 if not found.
223 */
224static int
225fd_last_used(struct filedesc *fdp, int size)
226{
227	NDSLOTTYPE *map = fdp->fd_map;
228	NDSLOTTYPE mask;
229	int off, minoff;
230
231	off = NDSLOT(size);
232	if (size % NDENTRIES) {
233		mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES));
234		if ((mask &= map[off]) != 0)
235			return (off * NDENTRIES + flsl(mask) - 1);
236		--off;
237	}
238	for (minoff = NDSLOT(0); off >= minoff; --off)
239		if (map[off] != 0)
240			return (off * NDENTRIES + flsl(map[off]) - 1);
241	return (-1);
242}
243
244static int
245fdisused(struct filedesc *fdp, int fd)
246{
247
248	FILEDESC_LOCK_ASSERT(fdp);
249
250	KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
251	    ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
252
253	return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
254}
255
256/*
257 * Mark a file descriptor as used.
258 */
259static void
260fdused(struct filedesc *fdp, int fd)
261{
262
263	FILEDESC_XLOCK_ASSERT(fdp);
264
265	KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd));
266
267	fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
268	if (fd > fdp->fd_lastfile)
269		fdp->fd_lastfile = fd;
270	if (fd == fdp->fd_freefile)
271		fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles);
272}
273
274/*
275 * Mark a file descriptor as unused.
276 */
277static void
278fdunused(struct filedesc *fdp, int fd)
279{
280
281	FILEDESC_XLOCK_ASSERT(fdp);
282
283	KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd));
284	KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
285	    ("fd=%d is still in use", fd));
286
287	fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
288	if (fd < fdp->fd_freefile)
289		fdp->fd_freefile = fd;
290	if (fd == fdp->fd_lastfile)
291		fdp->fd_lastfile = fd_last_used(fdp, fd);
292}
293
294/*
295 * Free a file descriptor.
296 */
297static inline void
298fdfree(struct filedesc *fdp, int fd)
299{
300	struct filedescent *fde;
301
302	fde = &fdp->fd_ofiles[fd];
303	filecaps_free(&fde->fde_caps);
304	bzero(fde, sizeof(*fde));
305	fdunused(fdp, fd);
306}
307
308/*
309 * System calls on descriptors.
310 */
311#ifndef _SYS_SYSPROTO_H_
312struct getdtablesize_args {
313	int	dummy;
314};
315#endif
316/* ARGSUSED */
317int
318sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
319{
320	struct proc *p = td->td_proc;
321	uint64_t lim;
322
323	PROC_LOCK(p);
324	td->td_retval[0] =
325	    min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
326	lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
327	PROC_UNLOCK(p);
328	if (lim < td->td_retval[0])
329		td->td_retval[0] = lim;
330	return (0);
331}
332
333/*
334 * Duplicate a file descriptor to a particular value.
335 *
336 * Note: keep in mind that a potential race condition exists when closing
337 * descriptors from a shared descriptor table (via rfork).
338 */
339#ifndef _SYS_SYSPROTO_H_
340struct dup2_args {
341	u_int	from;
342	u_int	to;
343};
344#endif
345/* ARGSUSED */
346int
347sys_dup2(struct thread *td, struct dup2_args *uap)
348{
349
350	return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to,
351		    td->td_retval));
352}
353
354/*
355 * Duplicate a file descriptor.
356 */
357#ifndef _SYS_SYSPROTO_H_
358struct dup_args {
359	u_int	fd;
360};
361#endif
362/* ARGSUSED */
363int
364sys_dup(struct thread *td, struct dup_args *uap)
365{
366
367	return (do_dup(td, 0, (int)uap->fd, 0, td->td_retval));
368}
369
370/*
371 * The file control system call.
372 */
373#ifndef _SYS_SYSPROTO_H_
374struct fcntl_args {
375	int	fd;
376	int	cmd;
377	long	arg;
378};
379#endif
380/* ARGSUSED */
381int
382sys_fcntl(struct thread *td, struct fcntl_args *uap)
383{
384	struct flock fl;
385	struct __oflock ofl;
386	intptr_t arg;
387	int error;
388	int cmd;
389
390	error = 0;
391	cmd = uap->cmd;
392	switch (uap->cmd) {
393	case F_OGETLK:
394	case F_OSETLK:
395	case F_OSETLKW:
396		/*
397		 * Convert old flock structure to new.
398		 */
399		error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl));
400		fl.l_start = ofl.l_start;
401		fl.l_len = ofl.l_len;
402		fl.l_pid = ofl.l_pid;
403		fl.l_type = ofl.l_type;
404		fl.l_whence = ofl.l_whence;
405		fl.l_sysid = 0;
406
407		switch (uap->cmd) {
408		case F_OGETLK:
409		    cmd = F_GETLK;
410		    break;
411		case F_OSETLK:
412		    cmd = F_SETLK;
413		    break;
414		case F_OSETLKW:
415		    cmd = F_SETLKW;
416		    break;
417		}
418		arg = (intptr_t)&fl;
419		break;
420        case F_GETLK:
421        case F_SETLK:
422        case F_SETLKW:
423	case F_SETLK_REMOTE:
424                error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
425                arg = (intptr_t)&fl;
426                break;
427	default:
428		arg = uap->arg;
429		break;
430	}
431	if (error)
432		return (error);
433	error = kern_fcntl(td, uap->fd, cmd, arg);
434	if (error)
435		return (error);
436	if (uap->cmd == F_OGETLK) {
437		ofl.l_start = fl.l_start;
438		ofl.l_len = fl.l_len;
439		ofl.l_pid = fl.l_pid;
440		ofl.l_type = fl.l_type;
441		ofl.l_whence = fl.l_whence;
442		error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl));
443	} else if (uap->cmd == F_GETLK) {
444		error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl));
445	}
446	return (error);
447}
448
449int
450kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
451{
452	struct filedesc *fdp;
453	struct flock *flp;
454	struct file *fp, *fp2;
455	struct filedescent *fde;
456	struct proc *p;
457	struct vnode *vp;
458	int error, flg, tmp;
459	u_int old, new;
460	uint64_t bsize;
461	off_t foffset;
462
463	error = 0;
464	flg = F_POSIX;
465	p = td->td_proc;
466	fdp = p->p_fd;
467
468	switch (cmd) {
469	case F_DUPFD:
470		tmp = arg;
471		error = do_dup(td, DUP_FCNTL, fd, tmp, td->td_retval);
472		break;
473
474	case F_DUPFD_CLOEXEC:
475		tmp = arg;
476		error = do_dup(td, DUP_FCNTL | DUP_CLOEXEC, fd, tmp,
477		    td->td_retval);
478		break;
479
480	case F_DUP2FD:
481		tmp = arg;
482		error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval);
483		break;
484
485	case F_DUP2FD_CLOEXEC:
486		tmp = arg;
487		error = do_dup(td, DUP_FIXED | DUP_CLOEXEC, fd, tmp,
488		    td->td_retval);
489		break;
490
491	case F_GETFD:
492		FILEDESC_SLOCK(fdp);
493		if ((fp = fget_locked(fdp, fd)) == NULL) {
494			FILEDESC_SUNLOCK(fdp);
495			error = EBADF;
496			break;
497		}
498		fde = &fdp->fd_ofiles[fd];
499		td->td_retval[0] =
500		    (fde->fde_flags & UF_EXCLOSE) ? FD_CLOEXEC : 0;
501		FILEDESC_SUNLOCK(fdp);
502		break;
503
504	case F_SETFD:
505		FILEDESC_XLOCK(fdp);
506		if ((fp = fget_locked(fdp, fd)) == NULL) {
507			FILEDESC_XUNLOCK(fdp);
508			error = EBADF;
509			break;
510		}
511		fde = &fdp->fd_ofiles[fd];
512		fde->fde_flags = (fde->fde_flags & ~UF_EXCLOSE) |
513		    (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
514		FILEDESC_XUNLOCK(fdp);
515		break;
516
517	case F_GETFL:
518		error = fget_unlocked(fdp, fd, CAP_FCNTL, F_GETFL, &fp, NULL);
519		if (error != 0)
520			break;
521		td->td_retval[0] = OFLAGS(fp->f_flag);
522		fdrop(fp, td);
523		break;
524
525	case F_SETFL:
526		error = fget_unlocked(fdp, fd, CAP_FCNTL, F_SETFL, &fp, NULL);
527		if (error != 0)
528			break;
529		do {
530			tmp = flg = fp->f_flag;
531			tmp &= ~FCNTLFLAGS;
532			tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
533		} while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
534		tmp = fp->f_flag & FNONBLOCK;
535		error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
536		if (error != 0) {
537			fdrop(fp, td);
538			break;
539		}
540		tmp = fp->f_flag & FASYNC;
541		error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
542		if (error == 0) {
543			fdrop(fp, td);
544			break;
545		}
546		atomic_clear_int(&fp->f_flag, FNONBLOCK);
547		tmp = 0;
548		(void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
549		fdrop(fp, td);
550		break;
551
552	case F_GETOWN:
553		error = fget_unlocked(fdp, fd, CAP_FCNTL, F_GETOWN, &fp, NULL);
554		if (error != 0)
555			break;
556		error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
557		if (error == 0)
558			td->td_retval[0] = tmp;
559		fdrop(fp, td);
560		break;
561
562	case F_SETOWN:
563		error = fget_unlocked(fdp, fd, CAP_FCNTL, F_SETOWN, &fp, NULL);
564		if (error != 0)
565			break;
566		tmp = arg;
567		error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
568		fdrop(fp, td);
569		break;
570
571	case F_SETLK_REMOTE:
572		error = priv_check(td, PRIV_NFS_LOCKD);
573		if (error)
574			return (error);
575		flg = F_REMOTE;
576		goto do_setlk;
577
578	case F_SETLKW:
579		flg |= F_WAIT;
580		/* FALLTHROUGH F_SETLK */
581
582	case F_SETLK:
583	do_setlk:
584		error = fget_unlocked(fdp, fd, CAP_FLOCK, 0, &fp, NULL);
585		if (error != 0)
586			break;
587		if (fp->f_type != DTYPE_VNODE) {
588			error = EBADF;
589			fdrop(fp, td);
590			break;
591		}
592
593		flp = (struct flock *)arg;
594		if (flp->l_whence == SEEK_CUR) {
595			foffset = foffset_get(fp);
596			if (foffset < 0 ||
597			    (flp->l_start > 0 &&
598			     foffset > OFF_MAX - flp->l_start)) {
599				FILEDESC_SUNLOCK(fdp);
600				error = EOVERFLOW;
601				fdrop(fp, td);
602				break;
603			}
604			flp->l_start += foffset;
605		}
606
607		vp = fp->f_vnode;
608		switch (flp->l_type) {
609		case F_RDLCK:
610			if ((fp->f_flag & FREAD) == 0) {
611				error = EBADF;
612				break;
613			}
614			PROC_LOCK(p->p_leader);
615			p->p_leader->p_flag |= P_ADVLOCK;
616			PROC_UNLOCK(p->p_leader);
617			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
618			    flp, flg);
619			break;
620		case F_WRLCK:
621			if ((fp->f_flag & FWRITE) == 0) {
622				error = EBADF;
623				break;
624			}
625			PROC_LOCK(p->p_leader);
626			p->p_leader->p_flag |= P_ADVLOCK;
627			PROC_UNLOCK(p->p_leader);
628			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
629			    flp, flg);
630			break;
631		case F_UNLCK:
632			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
633			    flp, flg);
634			break;
635		case F_UNLCKSYS:
636			/*
637			 * Temporary api for testing remote lock
638			 * infrastructure.
639			 */
640			if (flg != F_REMOTE) {
641				error = EINVAL;
642				break;
643			}
644			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
645			    F_UNLCKSYS, flp, flg);
646			break;
647		default:
648			error = EINVAL;
649			break;
650		}
651		if (error != 0 || flp->l_type == F_UNLCK ||
652		    flp->l_type == F_UNLCKSYS) {
653			fdrop(fp, td);
654			break;
655		}
656
657		/*
658		 * Check for a race with close.
659		 *
660		 * The vnode is now advisory locked (or unlocked, but this case
661		 * is not really important) as the caller requested.
662		 * We had to drop the filedesc lock, so we need to recheck if
663		 * the descriptor is still valid, because if it was closed
664		 * in the meantime we need to remove advisory lock from the
665		 * vnode - close on any descriptor leading to an advisory
666		 * locked vnode, removes that lock.
667		 * We will return 0 on purpose in that case, as the result of
668		 * successful advisory lock might have been externally visible
669		 * already. This is fine - effectively we pretend to the caller
670		 * that the closing thread was a bit slower and that the
671		 * advisory lock succeeded before the close.
672		 */
673		error = fget_unlocked(fdp, fd, 0, 0, &fp2, NULL);
674		if (error != 0) {
675			fdrop(fp, td);
676			break;
677		}
678		if (fp != fp2) {
679			flp->l_whence = SEEK_SET;
680			flp->l_start = 0;
681			flp->l_len = 0;
682			flp->l_type = F_UNLCK;
683			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
684			    F_UNLCK, flp, F_POSIX);
685		}
686		fdrop(fp, td);
687		fdrop(fp2, td);
688		break;
689
690	case F_GETLK:
691		error = fget_unlocked(fdp, fd, CAP_FLOCK, 0, &fp, NULL);
692		if (error != 0)
693			break;
694		if (fp->f_type != DTYPE_VNODE) {
695			error = EBADF;
696			fdrop(fp, td);
697			break;
698		}
699		flp = (struct flock *)arg;
700		if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
701		    flp->l_type != F_UNLCK) {
702			error = EINVAL;
703			fdrop(fp, td);
704			break;
705		}
706		if (flp->l_whence == SEEK_CUR) {
707			foffset = foffset_get(fp);
708			if ((flp->l_start > 0 &&
709			    foffset > OFF_MAX - flp->l_start) ||
710			    (flp->l_start < 0 &&
711			     foffset < OFF_MIN - flp->l_start)) {
712				FILEDESC_SUNLOCK(fdp);
713				error = EOVERFLOW;
714				fdrop(fp, td);
715				break;
716			}
717			flp->l_start += foffset;
718		}
719		vp = fp->f_vnode;
720		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
721		    F_POSIX);
722		fdrop(fp, td);
723		break;
724
725	case F_RDAHEAD:
726		arg = arg ? 128 * 1024: 0;
727		/* FALLTHROUGH */
728	case F_READAHEAD:
729		error = fget_unlocked(fdp, fd, 0, 0, &fp, NULL);
730		if (error != 0)
731			break;
732		if (fp->f_type != DTYPE_VNODE) {
733			fdrop(fp, td);
734			error = EBADF;
735			break;
736		}
737		if (arg >= 0) {
738			vp = fp->f_vnode;
739			error = vn_lock(vp, LK_SHARED);
740			if (error != 0) {
741				fdrop(fp, td);
742				break;
743			}
744			bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
745			VOP_UNLOCK(vp, 0);
746			fp->f_seqcount = (arg + bsize - 1) / bsize;
747			do {
748				new = old = fp->f_flag;
749				new |= FRDAHEAD;
750			} while (!atomic_cmpset_rel_int(&fp->f_flag, old, new));
751		} else {
752			do {
753				new = old = fp->f_flag;
754				new &= ~FRDAHEAD;
755			} while (!atomic_cmpset_rel_int(&fp->f_flag, old, new));
756		}
757		fdrop(fp, td);
758		break;
759
760	default:
761		error = EINVAL;
762		break;
763	}
764	return (error);
765}
766
767/*
768 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
769 */
770int
771do_dup(struct thread *td, int flags, int old, int new,
772    register_t *retval)
773{
774	struct filedesc *fdp;
775	struct filedescent *oldfde, *newfde;
776	struct proc *p;
777	struct file *fp;
778	struct file *delfp;
779	int error, maxfd;
780
781	p = td->td_proc;
782	fdp = p->p_fd;
783
784	/*
785	 * Verify we have a valid descriptor to dup from and possibly to
786	 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
787	 * return EINVAL when the new descriptor is out of bounds.
788	 */
789	if (old < 0)
790		return (EBADF);
791	if (new < 0)
792		return (flags & DUP_FCNTL ? EINVAL : EBADF);
793	PROC_LOCK(p);
794	maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
795	PROC_UNLOCK(p);
796	if (new >= maxfd)
797		return (flags & DUP_FCNTL ? EINVAL : EBADF);
798
799	FILEDESC_XLOCK(fdp);
800	if (fget_locked(fdp, old) == NULL) {
801		FILEDESC_XUNLOCK(fdp);
802		return (EBADF);
803	}
804	oldfde = &fdp->fd_ofiles[old];
805	if (flags & DUP_FIXED && old == new) {
806		*retval = new;
807		if (flags & DUP_CLOEXEC)
808			fdp->fd_ofiles[new].fde_flags |= UF_EXCLOSE;
809		FILEDESC_XUNLOCK(fdp);
810		return (0);
811	}
812	fp = oldfde->fde_file;
813	fhold(fp);
814
815	/*
816	 * If the caller specified a file descriptor, make sure the file
817	 * table is large enough to hold it, and grab it.  Otherwise, just
818	 * allocate a new descriptor the usual way.
819	 */
820	if (flags & DUP_FIXED) {
821		if (new >= fdp->fd_nfiles) {
822			/*
823			 * The resource limits are here instead of e.g.
824			 * fdalloc(), because the file descriptor table may be
825			 * shared between processes, so we can't really use
826			 * racct_add()/racct_sub().  Instead of counting the
827			 * number of actually allocated descriptors, just put
828			 * the limit on the size of the file descriptor table.
829			 */
830#ifdef RACCT
831			PROC_LOCK(p);
832			error = racct_set(p, RACCT_NOFILE, new + 1);
833			PROC_UNLOCK(p);
834			if (error != 0) {
835				FILEDESC_XUNLOCK(fdp);
836				fdrop(fp, td);
837				return (EMFILE);
838			}
839#endif
840			fdgrowtable(fdp, new + 1);
841			oldfde = &fdp->fd_ofiles[old];
842		}
843		newfde = &fdp->fd_ofiles[new];
844		if (newfde->fde_file == NULL)
845			fdused(fdp, new);
846	} else {
847		if ((error = fdalloc(td, new, &new)) != 0) {
848			FILEDESC_XUNLOCK(fdp);
849			fdrop(fp, td);
850			return (error);
851		}
852		newfde = &fdp->fd_ofiles[new];
853	}
854
855	KASSERT(fp == oldfde->fde_file, ("old fd has been modified"));
856	KASSERT(old != new, ("new fd is same as old"));
857
858	delfp = newfde->fde_file;
859
860	/*
861	 * Duplicate the source descriptor.
862	 */
863	*newfde = *oldfde;
864	filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps);
865	if ((flags & DUP_CLOEXEC) != 0)
866		newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE;
867	else
868		newfde->fde_flags = oldfde->fde_flags & ~UF_EXCLOSE;
869	if (new > fdp->fd_lastfile)
870		fdp->fd_lastfile = new;
871	*retval = new;
872
873	if (delfp != NULL) {
874		(void) closefp(fdp, new, delfp, td, 1);
875		/* closefp() drops the FILEDESC lock for us. */
876	} else {
877		FILEDESC_XUNLOCK(fdp);
878	}
879
880	return (0);
881}
882
883/*
884 * If sigio is on the list associated with a process or process group,
885 * disable signalling from the device, remove sigio from the list and
886 * free sigio.
887 */
888void
889funsetown(struct sigio **sigiop)
890{
891	struct sigio *sigio;
892
893	SIGIO_LOCK();
894	sigio = *sigiop;
895	if (sigio == NULL) {
896		SIGIO_UNLOCK();
897		return;
898	}
899	*(sigio->sio_myref) = NULL;
900	if ((sigio)->sio_pgid < 0) {
901		struct pgrp *pg = (sigio)->sio_pgrp;
902		PGRP_LOCK(pg);
903		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
904			     sigio, sio_pgsigio);
905		PGRP_UNLOCK(pg);
906	} else {
907		struct proc *p = (sigio)->sio_proc;
908		PROC_LOCK(p);
909		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
910			     sigio, sio_pgsigio);
911		PROC_UNLOCK(p);
912	}
913	SIGIO_UNLOCK();
914	crfree(sigio->sio_ucred);
915	free(sigio, M_SIGIO);
916}
917
918/*
919 * Free a list of sigio structures.
920 * We only need to lock the SIGIO_LOCK because we have made ourselves
921 * inaccessible to callers of fsetown and therefore do not need to lock
922 * the proc or pgrp struct for the list manipulation.
923 */
924void
925funsetownlst(struct sigiolst *sigiolst)
926{
927	struct proc *p;
928	struct pgrp *pg;
929	struct sigio *sigio;
930
931	sigio = SLIST_FIRST(sigiolst);
932	if (sigio == NULL)
933		return;
934	p = NULL;
935	pg = NULL;
936
937	/*
938	 * Every entry of the list should belong
939	 * to a single proc or pgrp.
940	 */
941	if (sigio->sio_pgid < 0) {
942		pg = sigio->sio_pgrp;
943		PGRP_LOCK_ASSERT(pg, MA_NOTOWNED);
944	} else /* if (sigio->sio_pgid > 0) */ {
945		p = sigio->sio_proc;
946		PROC_LOCK_ASSERT(p, MA_NOTOWNED);
947	}
948
949	SIGIO_LOCK();
950	while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
951		*(sigio->sio_myref) = NULL;
952		if (pg != NULL) {
953			KASSERT(sigio->sio_pgid < 0,
954			    ("Proc sigio in pgrp sigio list"));
955			KASSERT(sigio->sio_pgrp == pg,
956			    ("Bogus pgrp in sigio list"));
957			PGRP_LOCK(pg);
958			SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,
959			    sio_pgsigio);
960			PGRP_UNLOCK(pg);
961		} else /* if (p != NULL) */ {
962			KASSERT(sigio->sio_pgid > 0,
963			    ("Pgrp sigio in proc sigio list"));
964			KASSERT(sigio->sio_proc == p,
965			    ("Bogus proc in sigio list"));
966			PROC_LOCK(p);
967			SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,
968			    sio_pgsigio);
969			PROC_UNLOCK(p);
970		}
971		SIGIO_UNLOCK();
972		crfree(sigio->sio_ucred);
973		free(sigio, M_SIGIO);
974		SIGIO_LOCK();
975	}
976	SIGIO_UNLOCK();
977}
978
979/*
980 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
981 *
982 * After permission checking, add a sigio structure to the sigio list for
983 * the process or process group.
984 */
985int
986fsetown(pid_t pgid, struct sigio **sigiop)
987{
988	struct proc *proc;
989	struct pgrp *pgrp;
990	struct sigio *sigio;
991	int ret;
992
993	if (pgid == 0) {
994		funsetown(sigiop);
995		return (0);
996	}
997
998	ret = 0;
999
1000	/* Allocate and fill in the new sigio out of locks. */
1001	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
1002	sigio->sio_pgid = pgid;
1003	sigio->sio_ucred = crhold(curthread->td_ucred);
1004	sigio->sio_myref = sigiop;
1005
1006	sx_slock(&proctree_lock);
1007	if (pgid > 0) {
1008		proc = pfind(pgid);
1009		if (proc == NULL) {
1010			ret = ESRCH;
1011			goto fail;
1012		}
1013
1014		/*
1015		 * Policy - Don't allow a process to FSETOWN a process
1016		 * in another session.
1017		 *
1018		 * Remove this test to allow maximum flexibility or
1019		 * restrict FSETOWN to the current process or process
1020		 * group for maximum safety.
1021		 */
1022		PROC_UNLOCK(proc);
1023		if (proc->p_session != curthread->td_proc->p_session) {
1024			ret = EPERM;
1025			goto fail;
1026		}
1027
1028		pgrp = NULL;
1029	} else /* if (pgid < 0) */ {
1030		pgrp = pgfind(-pgid);
1031		if (pgrp == NULL) {
1032			ret = ESRCH;
1033			goto fail;
1034		}
1035		PGRP_UNLOCK(pgrp);
1036
1037		/*
1038		 * Policy - Don't allow a process to FSETOWN a process
1039		 * in another session.
1040		 *
1041		 * Remove this test to allow maximum flexibility or
1042		 * restrict FSETOWN to the current process or process
1043		 * group for maximum safety.
1044		 */
1045		if (pgrp->pg_session != curthread->td_proc->p_session) {
1046			ret = EPERM;
1047			goto fail;
1048		}
1049
1050		proc = NULL;
1051	}
1052	funsetown(sigiop);
1053	if (pgid > 0) {
1054		PROC_LOCK(proc);
1055		/*
1056		 * Since funsetownlst() is called without the proctree
1057		 * locked, we need to check for P_WEXIT.
1058		 * XXX: is ESRCH correct?
1059		 */
1060		if ((proc->p_flag & P_WEXIT) != 0) {
1061			PROC_UNLOCK(proc);
1062			ret = ESRCH;
1063			goto fail;
1064		}
1065		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
1066		sigio->sio_proc = proc;
1067		PROC_UNLOCK(proc);
1068	} else {
1069		PGRP_LOCK(pgrp);
1070		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
1071		sigio->sio_pgrp = pgrp;
1072		PGRP_UNLOCK(pgrp);
1073	}
1074	sx_sunlock(&proctree_lock);
1075	SIGIO_LOCK();
1076	*sigiop = sigio;
1077	SIGIO_UNLOCK();
1078	return (0);
1079
1080fail:
1081	sx_sunlock(&proctree_lock);
1082	crfree(sigio->sio_ucred);
1083	free(sigio, M_SIGIO);
1084	return (ret);
1085}
1086
1087/*
1088 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1089 */
1090pid_t
1091fgetown(sigiop)
1092	struct sigio **sigiop;
1093{
1094	pid_t pgid;
1095
1096	SIGIO_LOCK();
1097	pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
1098	SIGIO_UNLOCK();
1099	return (pgid);
1100}
1101
1102/*
1103 * Function drops the filedesc lock on return.
1104 */
1105static int
1106closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1107    int holdleaders)
1108{
1109	int error;
1110
1111	FILEDESC_XLOCK_ASSERT(fdp);
1112
1113	if (holdleaders) {
1114		if (td->td_proc->p_fdtol != NULL) {
1115			/*
1116			 * Ask fdfree() to sleep to ensure that all relevant
1117			 * process leaders can be traversed in closef().
1118			 */
1119			fdp->fd_holdleaderscount++;
1120		} else {
1121			holdleaders = 0;
1122		}
1123	}
1124
1125	/*
1126	 * We now hold the fp reference that used to be owned by the
1127	 * descriptor array.  We have to unlock the FILEDESC *AFTER*
1128	 * knote_fdclose to prevent a race of the fd getting opened, a knote
1129	 * added, and deleteing a knote for the new fd.
1130	 */
1131	knote_fdclose(td, fd);
1132
1133	/*
1134	 * We need to notify mqueue if the object is of type mqueue.
1135	 */
1136	if (fp->f_type == DTYPE_MQUEUE)
1137		mq_fdclose(td, fd, fp);
1138	FILEDESC_XUNLOCK(fdp);
1139
1140	error = closef(fp, td);
1141	if (holdleaders) {
1142		FILEDESC_XLOCK(fdp);
1143		fdp->fd_holdleaderscount--;
1144		if (fdp->fd_holdleaderscount == 0 &&
1145		    fdp->fd_holdleaderswakeup != 0) {
1146			fdp->fd_holdleaderswakeup = 0;
1147			wakeup(&fdp->fd_holdleaderscount);
1148		}
1149		FILEDESC_XUNLOCK(fdp);
1150	}
1151	return (error);
1152}
1153
1154/*
1155 * Close a file descriptor.
1156 */
1157#ifndef _SYS_SYSPROTO_H_
1158struct close_args {
1159	int     fd;
1160};
1161#endif
1162/* ARGSUSED */
1163int
1164sys_close(td, uap)
1165	struct thread *td;
1166	struct close_args *uap;
1167{
1168
1169	return (kern_close(td, uap->fd));
1170}
1171
1172int
1173kern_close(td, fd)
1174	struct thread *td;
1175	int fd;
1176{
1177	struct filedesc *fdp;
1178	struct file *fp;
1179
1180	fdp = td->td_proc->p_fd;
1181
1182	AUDIT_SYSCLOSE(td, fd);
1183
1184	FILEDESC_XLOCK(fdp);
1185	if ((fp = fget_locked(fdp, fd)) == NULL) {
1186		FILEDESC_XUNLOCK(fdp);
1187		return (EBADF);
1188	}
1189	fdfree(fdp, fd);
1190
1191	/* closefp() drops the FILEDESC lock for us. */
1192	return (closefp(fdp, fd, fp, td, 1));
1193}
1194
1195/*
1196 * Close open file descriptors.
1197 */
1198#ifndef _SYS_SYSPROTO_H_
1199struct closefrom_args {
1200	int	lowfd;
1201};
1202#endif
1203/* ARGSUSED */
1204int
1205sys_closefrom(struct thread *td, struct closefrom_args *uap)
1206{
1207	struct filedesc *fdp;
1208	int fd;
1209
1210	fdp = td->td_proc->p_fd;
1211	AUDIT_ARG_FD(uap->lowfd);
1212
1213	/*
1214	 * Treat negative starting file descriptor values identical to
1215	 * closefrom(0) which closes all files.
1216	 */
1217	if (uap->lowfd < 0)
1218		uap->lowfd = 0;
1219	FILEDESC_SLOCK(fdp);
1220	for (fd = uap->lowfd; fd < fdp->fd_nfiles; fd++) {
1221		if (fdp->fd_ofiles[fd].fde_file != NULL) {
1222			FILEDESC_SUNLOCK(fdp);
1223			(void)kern_close(td, fd);
1224			FILEDESC_SLOCK(fdp);
1225		}
1226	}
1227	FILEDESC_SUNLOCK(fdp);
1228	return (0);
1229}
1230
1231#if defined(COMPAT_43)
1232/*
1233 * Return status information about a file descriptor.
1234 */
1235#ifndef _SYS_SYSPROTO_H_
1236struct ofstat_args {
1237	int	fd;
1238	struct	ostat *sb;
1239};
1240#endif
1241/* ARGSUSED */
1242int
1243ofstat(struct thread *td, struct ofstat_args *uap)
1244{
1245	struct ostat oub;
1246	struct stat ub;
1247	int error;
1248
1249	error = kern_fstat(td, uap->fd, &ub);
1250	if (error == 0) {
1251		cvtstat(&ub, &oub);
1252		error = copyout(&oub, uap->sb, sizeof(oub));
1253	}
1254	return (error);
1255}
1256#endif /* COMPAT_43 */
1257
1258/*
1259 * Return status information about a file descriptor.
1260 */
1261#ifndef _SYS_SYSPROTO_H_
1262struct fstat_args {
1263	int	fd;
1264	struct	stat *sb;
1265};
1266#endif
1267/* ARGSUSED */
1268int
1269sys_fstat(struct thread *td, struct fstat_args *uap)
1270{
1271	struct stat ub;
1272	int error;
1273
1274	error = kern_fstat(td, uap->fd, &ub);
1275	if (error == 0)
1276		error = copyout(&ub, uap->sb, sizeof(ub));
1277	return (error);
1278}
1279
1280int
1281kern_fstat(struct thread *td, int fd, struct stat *sbp)
1282{
1283	struct file *fp;
1284	int error;
1285
1286	AUDIT_ARG_FD(fd);
1287
1288	if ((error = fget(td, fd, CAP_FSTAT, &fp)) != 0)
1289		return (error);
1290
1291	AUDIT_ARG_FILE(td->td_proc, fp);
1292
1293	error = fo_stat(fp, sbp, td->td_ucred, td);
1294	fdrop(fp, td);
1295#ifdef KTRACE
1296	if (error == 0 && KTRPOINT(td, KTR_STRUCT))
1297		ktrstat(sbp);
1298#endif
1299	return (error);
1300}
1301
1302/*
1303 * Return status information about a file descriptor.
1304 */
1305#ifndef _SYS_SYSPROTO_H_
1306struct nfstat_args {
1307	int	fd;
1308	struct	nstat *sb;
1309};
1310#endif
1311/* ARGSUSED */
1312int
1313sys_nfstat(struct thread *td, struct nfstat_args *uap)
1314{
1315	struct nstat nub;
1316	struct stat ub;
1317	int error;
1318
1319	error = kern_fstat(td, uap->fd, &ub);
1320	if (error == 0) {
1321		cvtnstat(&ub, &nub);
1322		error = copyout(&nub, uap->sb, sizeof(nub));
1323	}
1324	return (error);
1325}
1326
1327/*
1328 * Return pathconf information about a file descriptor.
1329 */
1330#ifndef _SYS_SYSPROTO_H_
1331struct fpathconf_args {
1332	int	fd;
1333	int	name;
1334};
1335#endif
1336/* ARGSUSED */
1337int
1338sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
1339{
1340	struct file *fp;
1341	struct vnode *vp;
1342	int error;
1343
1344	if ((error = fget(td, uap->fd, CAP_FPATHCONF, &fp)) != 0)
1345		return (error);
1346
1347	/* If asynchronous I/O is available, it works for all descriptors. */
1348	if (uap->name == _PC_ASYNC_IO) {
1349		td->td_retval[0] = async_io_version;
1350		goto out;
1351	}
1352	vp = fp->f_vnode;
1353	if (vp != NULL) {
1354		vn_lock(vp, LK_SHARED | LK_RETRY);
1355		error = VOP_PATHCONF(vp, uap->name, td->td_retval);
1356		VOP_UNLOCK(vp, 0);
1357	} else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1358		if (uap->name != _PC_PIPE_BUF) {
1359			error = EINVAL;
1360		} else {
1361			td->td_retval[0] = PIPE_BUF;
1362			error = 0;
1363		}
1364	} else {
1365		error = EOPNOTSUPP;
1366	}
1367out:
1368	fdrop(fp, td);
1369	return (error);
1370}
1371
1372/*
1373 * Initialize filecaps structure.
1374 */
1375void
1376filecaps_init(struct filecaps *fcaps)
1377{
1378
1379	bzero(fcaps, sizeof(*fcaps));
1380	fcaps->fc_nioctls = -1;
1381}
1382
1383/*
1384 * Copy filecaps structure allocating memory for ioctls array if needed.
1385 */
1386void
1387filecaps_copy(const struct filecaps *src, struct filecaps *dst)
1388{
1389	size_t size;
1390
1391	*dst = *src;
1392	if (src->fc_ioctls != NULL) {
1393		KASSERT(src->fc_nioctls > 0,
1394		    ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
1395
1396		size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1397		dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
1398		bcopy(src->fc_ioctls, dst->fc_ioctls, size);
1399	}
1400}
1401
1402/*
1403 * Move filecaps structure to the new place and clear the old place.
1404 */
1405void
1406filecaps_move(struct filecaps *src, struct filecaps *dst)
1407{
1408
1409	*dst = *src;
1410	bzero(src, sizeof(*src));
1411}
1412
1413/*
1414 * Fill the given filecaps structure with full rights.
1415 */
1416static void
1417filecaps_fill(struct filecaps *fcaps)
1418{
1419
1420	fcaps->fc_rights = CAP_ALL;
1421	fcaps->fc_ioctls = NULL;
1422	fcaps->fc_nioctls = -1;
1423	fcaps->fc_fcntls = CAP_FCNTL_ALL;
1424}
1425
1426/*
1427 * Free memory allocated within filecaps structure.
1428 */
1429void
1430filecaps_free(struct filecaps *fcaps)
1431{
1432
1433	free(fcaps->fc_ioctls, M_FILECAPS);
1434	bzero(fcaps, sizeof(*fcaps));
1435}
1436
1437/*
1438 * Validate the given filecaps structure.
1439 */
1440static void
1441filecaps_validate(const struct filecaps *fcaps, const char *func)
1442{
1443
1444	KASSERT((fcaps->fc_rights & ~CAP_MASK_VALID) == 0,
1445	    ("%s: invalid rights", func));
1446	KASSERT((fcaps->fc_fcntls & ~CAP_FCNTL_ALL) == 0,
1447	    ("%s: invalid fcntls", func));
1448	KASSERT(fcaps->fc_fcntls == 0 || (fcaps->fc_rights & CAP_FCNTL) != 0,
1449	    ("%s: fcntls without CAP_FCNTL", func));
1450	KASSERT(fcaps->fc_ioctls != NULL ? fcaps->fc_nioctls > 0 :
1451	    (fcaps->fc_nioctls == -1 || fcaps->fc_nioctls == 0),
1452	    ("%s: invalid ioctls", func));
1453	KASSERT(fcaps->fc_nioctls == 0 || (fcaps->fc_rights & CAP_IOCTL) != 0,
1454	    ("%s: ioctls without CAP_IOCTL", func));
1455}
1456
1457/*
1458 * Grow the file table to accomodate (at least) nfd descriptors.
1459 */
1460static void
1461fdgrowtable(struct filedesc *fdp, int nfd)
1462{
1463	struct filedesc0 *fdp0;
1464	struct freetable *ft;
1465	struct filedescent *ntable;
1466	struct filedescent *otable;
1467	int nnfiles, onfiles;
1468	NDSLOTTYPE *nmap, *omap;
1469
1470	FILEDESC_XLOCK_ASSERT(fdp);
1471
1472	KASSERT(fdp->fd_nfiles > 0, ("zero-length file table"));
1473
1474	/* save old values */
1475	onfiles = fdp->fd_nfiles;
1476	otable = fdp->fd_ofiles;
1477	omap = fdp->fd_map;
1478
1479	/* compute the size of the new table */
1480	nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
1481	if (nnfiles <= onfiles)
1482		/* the table is already large enough */
1483		return;
1484
1485	/*
1486	 * Allocate a new table and map.  We need enough space for the
1487	 * file entries themselves and the struct freetable we will use
1488	 * when we decommission the table and place it on the freelist.
1489	 * We place the struct freetable in the middle so we don't have
1490	 * to worry about padding.
1491	 */
1492	ntable = malloc(nnfiles * sizeof(ntable[0]) + sizeof(struct freetable),
1493	    M_FILEDESC, M_ZERO | M_WAITOK);
1494	nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, M_FILEDESC,
1495	    M_ZERO | M_WAITOK);
1496
1497	/* copy the old data over and point at the new tables */
1498	memcpy(ntable, otable, onfiles * sizeof(*otable));
1499	memcpy(nmap, omap, NDSLOTS(onfiles) * sizeof(*omap));
1500
1501	/* update the pointers and counters */
1502	fdp->fd_nfiles = nnfiles;
1503	memcpy(ntable, otable, onfiles * sizeof(ntable[0]));
1504	fdp->fd_ofiles = ntable;
1505	fdp->fd_map = nmap;
1506
1507	/*
1508	 * Do not free the old file table, as some threads may still
1509	 * reference entries within it.  Instead, place it on a freelist
1510	 * which will be processed when the struct filedesc is released.
1511	 *
1512	 * Do, however, free the old map.
1513	 *
1514	 * Note that if onfiles == NDFILE, we're dealing with the original
1515	 * static allocation contained within (struct filedesc0 *)fdp,
1516	 * which must not be freed.
1517	 */
1518	if (onfiles > NDFILE) {
1519		ft = (struct freetable *)&otable[onfiles];
1520		fdp0 = (struct filedesc0 *)fdp;
1521		ft->ft_table = otable;
1522		SLIST_INSERT_HEAD(&fdp0->fd_free, ft, ft_next);
1523		free(omap, M_FILEDESC);
1524	}
1525}
1526
1527/*
1528 * Allocate a file descriptor for the process.
1529 */
1530int
1531fdalloc(struct thread *td, int minfd, int *result)
1532{
1533	struct proc *p = td->td_proc;
1534	struct filedesc *fdp = p->p_fd;
1535	int fd = -1, maxfd, allocfd;
1536#ifdef RACCT
1537	int error;
1538#endif
1539
1540	FILEDESC_XLOCK_ASSERT(fdp);
1541
1542	if (fdp->fd_freefile > minfd)
1543		minfd = fdp->fd_freefile;
1544
1545	PROC_LOCK(p);
1546	maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
1547	PROC_UNLOCK(p);
1548
1549	/*
1550	 * Search the bitmap for a free descriptor starting at minfd.
1551	 * If none is found, grow the file table.
1552	 */
1553	fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
1554	if (fd >= maxfd)
1555		return (EMFILE);
1556	if (fd >= fdp->fd_nfiles) {
1557		allocfd = min(fd * 2, maxfd);
1558#ifdef RACCT
1559		PROC_LOCK(p);
1560		error = racct_set(p, RACCT_NOFILE, allocfd);
1561		PROC_UNLOCK(p);
1562		if (error != 0)
1563			return (EMFILE);
1564#endif
1565		/*
1566		 * fd is already equal to first free descriptor >= minfd, so
1567		 * we only need to grow the table and we are done.
1568		 */
1569		fdgrowtable(fdp, allocfd);
1570	}
1571
1572	/*
1573	 * Perform some sanity checks, then mark the file descriptor as
1574	 * used and return it to the caller.
1575	 */
1576	KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),
1577	    ("invalid descriptor %d", fd));
1578	KASSERT(!fdisused(fdp, fd),
1579	    ("fd_first_free() returned non-free descriptor"));
1580	KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
1581	    ("file descriptor isn't free"));
1582	KASSERT(fdp->fd_ofiles[fd].fde_flags == 0, ("file flags are set"));
1583	fdused(fdp, fd);
1584	*result = fd;
1585	return (0);
1586}
1587
1588/*
1589 * Allocate n file descriptors for the process.
1590 */
1591int
1592fdallocn(struct thread *td, int minfd, int *fds, int n)
1593{
1594	struct proc *p = td->td_proc;
1595	struct filedesc *fdp = p->p_fd;
1596	int i;
1597
1598	FILEDESC_XLOCK_ASSERT(fdp);
1599
1600	if (!fdavail(td, n))
1601		return (EMFILE);
1602
1603	for (i = 0; i < n; i++)
1604		if (fdalloc(td, 0, &fds[i]) != 0)
1605			break;
1606
1607	if (i < n) {
1608		for (i--; i >= 0; i--)
1609			fdunused(fdp, fds[i]);
1610		return (EMFILE);
1611	}
1612
1613	return (0);
1614}
1615
1616/*
1617 * Check to see whether n user file descriptors are available to the process
1618 * p.
1619 */
1620int
1621fdavail(struct thread *td, int n)
1622{
1623	struct proc *p = td->td_proc;
1624	struct filedesc *fdp = td->td_proc->p_fd;
1625	int i, lim, last;
1626
1627	FILEDESC_LOCK_ASSERT(fdp);
1628
1629	/*
1630	 * XXX: This is only called from uipc_usrreq.c:unp_externalize();
1631	 *      call racct_add() from there instead of dealing with containers
1632	 *      here.
1633	 */
1634	PROC_LOCK(p);
1635	lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
1636	PROC_UNLOCK(p);
1637	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
1638		return (1);
1639	last = min(fdp->fd_nfiles, lim);
1640	for (i = fdp->fd_freefile; i < last; i++) {
1641		if (fdp->fd_ofiles[i].fde_file == NULL && --n <= 0)
1642			return (1);
1643	}
1644	return (0);
1645}
1646
1647/*
1648 * Create a new open file structure and allocate a file decriptor for the
1649 * process that refers to it.  We add one reference to the file for the
1650 * descriptor table and one reference for resultfp. This is to prevent us
1651 * being preempted and the entry in the descriptor table closed after we
1652 * release the FILEDESC lock.
1653 */
1654int
1655falloc(struct thread *td, struct file **resultfp, int *resultfd, int flags)
1656{
1657	struct file *fp;
1658	int error, fd;
1659
1660	error = falloc_noinstall(td, &fp);
1661	if (error)
1662		return (error);		/* no reference held on error */
1663
1664	error = finstall(td, fp, &fd, flags, NULL);
1665	if (error) {
1666		fdrop(fp, td);		/* one reference (fp only) */
1667		return (error);
1668	}
1669
1670	if (resultfp != NULL)
1671		*resultfp = fp;		/* copy out result */
1672	else
1673		fdrop(fp, td);		/* release local reference */
1674
1675	if (resultfd != NULL)
1676		*resultfd = fd;
1677
1678	return (0);
1679}
1680
1681/*
1682 * Create a new open file structure without allocating a file descriptor.
1683 */
1684int
1685falloc_noinstall(struct thread *td, struct file **resultfp)
1686{
1687	struct file *fp;
1688	int maxuserfiles = maxfiles - (maxfiles / 20);
1689	static struct timeval lastfail;
1690	static int curfail;
1691
1692	KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
1693
1694	if ((openfiles >= maxuserfiles &&
1695	    priv_check(td, PRIV_MAXFILES) != 0) ||
1696	    openfiles >= maxfiles) {
1697		if (ppsratecheck(&lastfail, &curfail, 1)) {
1698			printf("kern.maxfiles limit exceeded by uid %i, "
1699			    "please see tuning(7).\n", td->td_ucred->cr_ruid);
1700		}
1701		return (ENFILE);
1702	}
1703	atomic_add_int(&openfiles, 1);
1704	fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
1705	refcount_init(&fp->f_count, 1);
1706	fp->f_cred = crhold(td->td_ucred);
1707	fp->f_ops = &badfileops;
1708	fp->f_data = NULL;
1709	fp->f_vnode = NULL;
1710	*resultfp = fp;
1711	return (0);
1712}
1713
1714/*
1715 * Install a file in a file descriptor table.
1716 */
1717int
1718finstall(struct thread *td, struct file *fp, int *fd, int flags,
1719    struct filecaps *fcaps)
1720{
1721	struct filedesc *fdp = td->td_proc->p_fd;
1722	struct filedescent *fde;
1723	int error;
1724
1725	KASSERT(fd != NULL, ("%s: fd == NULL", __func__));
1726	KASSERT(fp != NULL, ("%s: fp == NULL", __func__));
1727	if (fcaps != NULL)
1728		filecaps_validate(fcaps, __func__);
1729
1730	FILEDESC_XLOCK(fdp);
1731	if ((error = fdalloc(td, 0, fd))) {
1732		FILEDESC_XUNLOCK(fdp);
1733		return (error);
1734	}
1735	fhold(fp);
1736	fde = &fdp->fd_ofiles[*fd];
1737	fde->fde_file = fp;
1738	if ((flags & O_CLOEXEC) != 0)
1739		fde->fde_flags |= UF_EXCLOSE;
1740	if (fcaps != NULL)
1741		filecaps_move(fcaps, &fde->fde_caps);
1742	else
1743		filecaps_fill(&fde->fde_caps);
1744	FILEDESC_XUNLOCK(fdp);
1745	return (0);
1746}
1747
1748/*
1749 * Build a new filedesc structure from another.
1750 * Copy the current, root, and jail root vnode references.
1751 */
1752struct filedesc *
1753fdinit(struct filedesc *fdp)
1754{
1755	struct filedesc0 *newfdp;
1756
1757	newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO);
1758	FILEDESC_LOCK_INIT(&newfdp->fd_fd);
1759	if (fdp != NULL) {
1760		FILEDESC_XLOCK(fdp);
1761		newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
1762		if (newfdp->fd_fd.fd_cdir)
1763			VREF(newfdp->fd_fd.fd_cdir);
1764		newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1765		if (newfdp->fd_fd.fd_rdir)
1766			VREF(newfdp->fd_fd.fd_rdir);
1767		newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1768		if (newfdp->fd_fd.fd_jdir)
1769			VREF(newfdp->fd_fd.fd_jdir);
1770		FILEDESC_XUNLOCK(fdp);
1771	}
1772
1773	/* Create the file descriptor table. */
1774	newfdp->fd_fd.fd_refcnt = 1;
1775	newfdp->fd_fd.fd_holdcnt = 1;
1776	newfdp->fd_fd.fd_cmask = CMASK;
1777	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1778	newfdp->fd_fd.fd_nfiles = NDFILE;
1779	newfdp->fd_fd.fd_map = newfdp->fd_dmap;
1780	newfdp->fd_fd.fd_lastfile = -1;
1781	return (&newfdp->fd_fd);
1782}
1783
1784static struct filedesc *
1785fdhold(struct proc *p)
1786{
1787	struct filedesc *fdp;
1788
1789	mtx_lock(&fdesc_mtx);
1790	fdp = p->p_fd;
1791	if (fdp != NULL)
1792		fdp->fd_holdcnt++;
1793	mtx_unlock(&fdesc_mtx);
1794	return (fdp);
1795}
1796
1797static void
1798fddrop(struct filedesc *fdp)
1799{
1800	struct filedesc0 *fdp0;
1801	struct freetable *ft;
1802	int i;
1803
1804	mtx_lock(&fdesc_mtx);
1805	i = --fdp->fd_holdcnt;
1806	mtx_unlock(&fdesc_mtx);
1807	if (i > 0)
1808		return;
1809
1810	FILEDESC_LOCK_DESTROY(fdp);
1811	fdp0 = (struct filedesc0 *)fdp;
1812	while ((ft = SLIST_FIRST(&fdp0->fd_free)) != NULL) {
1813		SLIST_REMOVE_HEAD(&fdp0->fd_free, ft_next);
1814		free(ft->ft_table, M_FILEDESC);
1815	}
1816	free(fdp, M_FILEDESC);
1817}
1818
1819/*
1820 * Share a filedesc structure.
1821 */
1822struct filedesc *
1823fdshare(struct filedesc *fdp)
1824{
1825
1826	FILEDESC_XLOCK(fdp);
1827	fdp->fd_refcnt++;
1828	FILEDESC_XUNLOCK(fdp);
1829	return (fdp);
1830}
1831
1832/*
1833 * Unshare a filedesc structure, if necessary by making a copy
1834 */
1835void
1836fdunshare(struct proc *p, struct thread *td)
1837{
1838
1839	FILEDESC_XLOCK(p->p_fd);
1840	if (p->p_fd->fd_refcnt > 1) {
1841		struct filedesc *tmp;
1842
1843		FILEDESC_XUNLOCK(p->p_fd);
1844		tmp = fdcopy(p->p_fd);
1845		fdescfree(td);
1846		p->p_fd = tmp;
1847	} else
1848		FILEDESC_XUNLOCK(p->p_fd);
1849}
1850
1851/*
1852 * Copy a filedesc structure.  A NULL pointer in returns a NULL reference,
1853 * this is to ease callers, not catch errors.
1854 */
1855struct filedesc *
1856fdcopy(struct filedesc *fdp)
1857{
1858	struct filedesc *newfdp;
1859	struct filedescent *nfde, *ofde;
1860	int i;
1861
1862	/* Certain daemons might not have file descriptors. */
1863	if (fdp == NULL)
1864		return (NULL);
1865
1866	newfdp = fdinit(fdp);
1867	FILEDESC_SLOCK(fdp);
1868	while (fdp->fd_lastfile >= newfdp->fd_nfiles) {
1869		FILEDESC_SUNLOCK(fdp);
1870		FILEDESC_XLOCK(newfdp);
1871		fdgrowtable(newfdp, fdp->fd_lastfile + 1);
1872		FILEDESC_XUNLOCK(newfdp);
1873		FILEDESC_SLOCK(fdp);
1874	}
1875	/* copy all passable descriptors (i.e. not kqueue) */
1876	newfdp->fd_freefile = -1;
1877	for (i = 0; i <= fdp->fd_lastfile; ++i) {
1878		ofde = &fdp->fd_ofiles[i];
1879		if (fdisused(fdp, i) &&
1880		    (ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) &&
1881		    ofde->fde_file->f_ops != &badfileops) {
1882			nfde = &newfdp->fd_ofiles[i];
1883			*nfde = *ofde;
1884			filecaps_copy(&ofde->fde_caps, &nfde->fde_caps);
1885			fhold(nfde->fde_file);
1886			newfdp->fd_lastfile = i;
1887		} else {
1888			if (newfdp->fd_freefile == -1)
1889				newfdp->fd_freefile = i;
1890		}
1891	}
1892	newfdp->fd_cmask = fdp->fd_cmask;
1893	FILEDESC_SUNLOCK(fdp);
1894	FILEDESC_XLOCK(newfdp);
1895	for (i = 0; i <= newfdp->fd_lastfile; ++i) {
1896		if (newfdp->fd_ofiles[i].fde_file != NULL)
1897			fdused(newfdp, i);
1898	}
1899	if (newfdp->fd_freefile == -1)
1900		newfdp->fd_freefile = i;
1901	FILEDESC_XUNLOCK(newfdp);
1902	return (newfdp);
1903}
1904
1905/*
1906 * Release a filedesc structure.
1907 */
1908void
1909fdescfree(struct thread *td)
1910{
1911	struct filedesc *fdp;
1912	int i;
1913	struct filedesc_to_leader *fdtol;
1914	struct file *fp;
1915	struct vnode *cdir, *jdir, *rdir, *vp;
1916	struct flock lf;
1917
1918	/* Certain daemons might not have file descriptors. */
1919	fdp = td->td_proc->p_fd;
1920	if (fdp == NULL)
1921		return;
1922
1923#ifdef RACCT
1924	PROC_LOCK(td->td_proc);
1925	racct_set(td->td_proc, RACCT_NOFILE, 0);
1926	PROC_UNLOCK(td->td_proc);
1927#endif
1928
1929	/* Check for special need to clear POSIX style locks */
1930	fdtol = td->td_proc->p_fdtol;
1931	if (fdtol != NULL) {
1932		FILEDESC_XLOCK(fdp);
1933		KASSERT(fdtol->fdl_refcount > 0,
1934		    ("filedesc_to_refcount botch: fdl_refcount=%d",
1935		    fdtol->fdl_refcount));
1936		if (fdtol->fdl_refcount == 1 &&
1937		    (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1938			for (i = 0; i <= fdp->fd_lastfile; i++) {
1939				fp = fdp->fd_ofiles[i].fde_file;
1940				if (fp == NULL || fp->f_type != DTYPE_VNODE)
1941					continue;
1942				fhold(fp);
1943				FILEDESC_XUNLOCK(fdp);
1944				lf.l_whence = SEEK_SET;
1945				lf.l_start = 0;
1946				lf.l_len = 0;
1947				lf.l_type = F_UNLCK;
1948				vp = fp->f_vnode;
1949				(void) VOP_ADVLOCK(vp,
1950				    (caddr_t)td->td_proc->p_leader, F_UNLCK,
1951				    &lf, F_POSIX);
1952				FILEDESC_XLOCK(fdp);
1953				fdrop(fp, td);
1954			}
1955		}
1956	retry:
1957		if (fdtol->fdl_refcount == 1) {
1958			if (fdp->fd_holdleaderscount > 0 &&
1959			    (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1960				/*
1961				 * close() or do_dup() has cleared a reference
1962				 * in a shared file descriptor table.
1963				 */
1964				fdp->fd_holdleaderswakeup = 1;
1965				sx_sleep(&fdp->fd_holdleaderscount,
1966				    FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
1967				goto retry;
1968			}
1969			if (fdtol->fdl_holdcount > 0) {
1970				/*
1971				 * Ensure that fdtol->fdl_leader remains
1972				 * valid in closef().
1973				 */
1974				fdtol->fdl_wakeup = 1;
1975				sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
1976				    "fdlhold", 0);
1977				goto retry;
1978			}
1979		}
1980		fdtol->fdl_refcount--;
1981		if (fdtol->fdl_refcount == 0 &&
1982		    fdtol->fdl_holdcount == 0) {
1983			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1984			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1985		} else
1986			fdtol = NULL;
1987		td->td_proc->p_fdtol = NULL;
1988		FILEDESC_XUNLOCK(fdp);
1989		if (fdtol != NULL)
1990			free(fdtol, M_FILEDESC_TO_LEADER);
1991	}
1992	FILEDESC_XLOCK(fdp);
1993	i = --fdp->fd_refcnt;
1994	FILEDESC_XUNLOCK(fdp);
1995	if (i > 0)
1996		return;
1997
1998	for (i = 0; i <= fdp->fd_lastfile; i++) {
1999		fp = fdp->fd_ofiles[i].fde_file;
2000		if (fp != NULL) {
2001			FILEDESC_XLOCK(fdp);
2002			fdfree(fdp, i);
2003			FILEDESC_XUNLOCK(fdp);
2004			(void) closef(fp, td);
2005		}
2006	}
2007	FILEDESC_XLOCK(fdp);
2008
2009	/* XXX This should happen earlier. */
2010	mtx_lock(&fdesc_mtx);
2011	td->td_proc->p_fd = NULL;
2012	mtx_unlock(&fdesc_mtx);
2013
2014	if (fdp->fd_nfiles > NDFILE)
2015		free(fdp->fd_ofiles, M_FILEDESC);
2016	if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
2017		free(fdp->fd_map, M_FILEDESC);
2018
2019	fdp->fd_nfiles = 0;
2020
2021	cdir = fdp->fd_cdir;
2022	fdp->fd_cdir = NULL;
2023	rdir = fdp->fd_rdir;
2024	fdp->fd_rdir = NULL;
2025	jdir = fdp->fd_jdir;
2026	fdp->fd_jdir = NULL;
2027	FILEDESC_XUNLOCK(fdp);
2028
2029	if (cdir != NULL)
2030		vrele(cdir);
2031	if (rdir != NULL)
2032		vrele(rdir);
2033	if (jdir != NULL)
2034		vrele(jdir);
2035
2036	fddrop(fdp);
2037}
2038
2039/*
2040 * For setugid programs, we don't want to people to use that setugidness
2041 * to generate error messages which write to a file which otherwise would
2042 * otherwise be off-limits to the process.  We check for filesystems where
2043 * the vnode can change out from under us after execve (like [lin]procfs).
2044 *
2045 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2046 * sufficient.  We also don't check for setugidness since we know we are.
2047 */
2048static int
2049is_unsafe(struct file *fp)
2050{
2051	if (fp->f_type == DTYPE_VNODE) {
2052		struct vnode *vp = fp->f_vnode;
2053
2054		if ((vp->v_vflag & VV_PROCDEP) != 0)
2055			return (1);
2056	}
2057	return (0);
2058}
2059
2060/*
2061 * Make this setguid thing safe, if at all possible.
2062 */
2063void
2064setugidsafety(struct thread *td)
2065{
2066	struct filedesc *fdp;
2067	struct file *fp;
2068	int i;
2069
2070	/* Certain daemons might not have file descriptors. */
2071	fdp = td->td_proc->p_fd;
2072	if (fdp == NULL)
2073		return;
2074
2075	/*
2076	 * Note: fdp->fd_ofiles may be reallocated out from under us while
2077	 * we are blocked in a close.  Be careful!
2078	 */
2079	FILEDESC_XLOCK(fdp);
2080	for (i = 0; i <= fdp->fd_lastfile; i++) {
2081		if (i > 2)
2082			break;
2083		fp = fdp->fd_ofiles[i].fde_file;
2084		if (fp != NULL && is_unsafe(fp)) {
2085			knote_fdclose(td, i);
2086			/*
2087			 * NULL-out descriptor prior to close to avoid
2088			 * a race while close blocks.
2089			 */
2090			fdfree(fdp, i);
2091			FILEDESC_XUNLOCK(fdp);
2092			(void) closef(fp, td);
2093			FILEDESC_XLOCK(fdp);
2094		}
2095	}
2096	FILEDESC_XUNLOCK(fdp);
2097}
2098
2099/*
2100 * If a specific file object occupies a specific file descriptor, close the
2101 * file descriptor entry and drop a reference on the file object.  This is a
2102 * convenience function to handle a subsequent error in a function that calls
2103 * falloc() that handles the race that another thread might have closed the
2104 * file descriptor out from under the thread creating the file object.
2105 */
2106void
2107fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
2108{
2109
2110	FILEDESC_XLOCK(fdp);
2111	if (fdp->fd_ofiles[idx].fde_file == fp) {
2112		fdfree(fdp, idx);
2113		FILEDESC_XUNLOCK(fdp);
2114		fdrop(fp, td);
2115	} else
2116		FILEDESC_XUNLOCK(fdp);
2117}
2118
2119/*
2120 * Close any files on exec?
2121 */
2122void
2123fdcloseexec(struct thread *td)
2124{
2125	struct filedesc *fdp;
2126	struct filedescent *fde;
2127	struct file *fp;
2128	int i;
2129
2130	/* Certain daemons might not have file descriptors. */
2131	fdp = td->td_proc->p_fd;
2132	if (fdp == NULL)
2133		return;
2134
2135	/*
2136	 * We cannot cache fd_ofiles since operations
2137	 * may block and rip them out from under us.
2138	 */
2139	FILEDESC_XLOCK(fdp);
2140	for (i = 0; i <= fdp->fd_lastfile; i++) {
2141		fde = &fdp->fd_ofiles[i];
2142		fp = fde->fde_file;
2143		if (fp != NULL && (fp->f_type == DTYPE_MQUEUE ||
2144		    (fde->fde_flags & UF_EXCLOSE))) {
2145			fdfree(fdp, i);
2146			(void) closefp(fdp, i, fp, td, 0);
2147			/* closefp() drops the FILEDESC lock. */
2148			FILEDESC_XLOCK(fdp);
2149		}
2150	}
2151	FILEDESC_XUNLOCK(fdp);
2152}
2153
2154/*
2155 * It is unsafe for set[ug]id processes to be started with file
2156 * descriptors 0..2 closed, as these descriptors are given implicit
2157 * significance in the Standard C library.  fdcheckstd() will create a
2158 * descriptor referencing /dev/null for each of stdin, stdout, and
2159 * stderr that is not already open.
2160 */
2161int
2162fdcheckstd(struct thread *td)
2163{
2164	struct filedesc *fdp;
2165	register_t retval, save;
2166	int i, error, devnull;
2167
2168	fdp = td->td_proc->p_fd;
2169	if (fdp == NULL)
2170		return (0);
2171	KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
2172	devnull = -1;
2173	error = 0;
2174	for (i = 0; i < 3; i++) {
2175		if (fdp->fd_ofiles[i].fde_file != NULL)
2176			continue;
2177		if (devnull < 0) {
2178			save = td->td_retval[0];
2179			error = kern_open(td, "/dev/null", UIO_SYSSPACE,
2180			    O_RDWR, 0);
2181			devnull = td->td_retval[0];
2182			td->td_retval[0] = save;
2183			if (error)
2184				break;
2185			KASSERT(devnull == i, ("oof, we didn't get our fd"));
2186		} else {
2187			error = do_dup(td, DUP_FIXED, devnull, i, &retval);
2188			if (error != 0)
2189				break;
2190		}
2191	}
2192	return (error);
2193}
2194
2195/*
2196 * Internal form of close.  Decrement reference count on file structure.
2197 * Note: td may be NULL when closing a file that was being passed in a
2198 * message.
2199 *
2200 * XXXRW: Giant is not required for the caller, but often will be held; this
2201 * makes it moderately likely the Giant will be recursed in the VFS case.
2202 */
2203int
2204closef(struct file *fp, struct thread *td)
2205{
2206	struct vnode *vp;
2207	struct flock lf;
2208	struct filedesc_to_leader *fdtol;
2209	struct filedesc *fdp;
2210
2211	/*
2212	 * POSIX record locking dictates that any close releases ALL
2213	 * locks owned by this process.  This is handled by setting
2214	 * a flag in the unlock to free ONLY locks obeying POSIX
2215	 * semantics, and not to free BSD-style file locks.
2216	 * If the descriptor was in a message, POSIX-style locks
2217	 * aren't passed with the descriptor, and the thread pointer
2218	 * will be NULL.  Callers should be careful only to pass a
2219	 * NULL thread pointer when there really is no owning
2220	 * context that might have locks, or the locks will be
2221	 * leaked.
2222	 */
2223	if (fp->f_type == DTYPE_VNODE && td != NULL) {
2224		vp = fp->f_vnode;
2225		if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2226			lf.l_whence = SEEK_SET;
2227			lf.l_start = 0;
2228			lf.l_len = 0;
2229			lf.l_type = F_UNLCK;
2230			(void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
2231			    F_UNLCK, &lf, F_POSIX);
2232		}
2233		fdtol = td->td_proc->p_fdtol;
2234		if (fdtol != NULL) {
2235			/*
2236			 * Handle special case where file descriptor table is
2237			 * shared between multiple process leaders.
2238			 */
2239			fdp = td->td_proc->p_fd;
2240			FILEDESC_XLOCK(fdp);
2241			for (fdtol = fdtol->fdl_next;
2242			     fdtol != td->td_proc->p_fdtol;
2243			     fdtol = fdtol->fdl_next) {
2244				if ((fdtol->fdl_leader->p_flag &
2245				     P_ADVLOCK) == 0)
2246					continue;
2247				fdtol->fdl_holdcount++;
2248				FILEDESC_XUNLOCK(fdp);
2249				lf.l_whence = SEEK_SET;
2250				lf.l_start = 0;
2251				lf.l_len = 0;
2252				lf.l_type = F_UNLCK;
2253				vp = fp->f_vnode;
2254				(void) VOP_ADVLOCK(vp,
2255				    (caddr_t)fdtol->fdl_leader, F_UNLCK, &lf,
2256				    F_POSIX);
2257				FILEDESC_XLOCK(fdp);
2258				fdtol->fdl_holdcount--;
2259				if (fdtol->fdl_holdcount == 0 &&
2260				    fdtol->fdl_wakeup != 0) {
2261					fdtol->fdl_wakeup = 0;
2262					wakeup(fdtol);
2263				}
2264			}
2265			FILEDESC_XUNLOCK(fdp);
2266		}
2267	}
2268	return (fdrop(fp, td));
2269}
2270
2271/*
2272 * Initialize the file pointer with the specified properties.
2273 *
2274 * The ops are set with release semantics to be certain that the flags, type,
2275 * and data are visible when ops is.  This is to prevent ops methods from being
2276 * called with bad data.
2277 */
2278void
2279finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2280{
2281	fp->f_data = data;
2282	fp->f_flag = flag;
2283	fp->f_type = type;
2284	atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2285}
2286
2287int
2288fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t needrights,
2289    int needfcntl, struct file **fpp, cap_rights_t *haverightsp)
2290{
2291	struct file *fp;
2292	u_int count;
2293#ifdef CAPABILITIES
2294	cap_rights_t haverights;
2295	int error;
2296#endif
2297
2298	if (fd < 0 || fd >= fdp->fd_nfiles)
2299		return (EBADF);
2300	/*
2301	 * Fetch the descriptor locklessly.  We avoid fdrop() races by
2302	 * never raising a refcount above 0.  To accomplish this we have
2303	 * to use a cmpset loop rather than an atomic_add.  The descriptor
2304	 * must be re-verified once we acquire a reference to be certain
2305	 * that the identity is still correct and we did not lose a race
2306	 * due to preemption.
2307	 */
2308	for (;;) {
2309		fp = fdp->fd_ofiles[fd].fde_file;
2310		if (fp == NULL)
2311			return (EBADF);
2312#ifdef CAPABILITIES
2313		haverights = cap_rights(fdp, fd);
2314		error = cap_check(haverights, needrights);
2315		if (error != 0)
2316			return (error);
2317		if ((needrights & CAP_FCNTL) != 0) {
2318			error = cap_fcntl_check(fdp, fd, needfcntl);
2319			if (error != 0)
2320				return (error);
2321		}
2322#endif
2323		count = fp->f_count;
2324		if (count == 0)
2325			continue;
2326		/*
2327		 * Use an acquire barrier to prevent caching of fd_ofiles
2328		 * so it is refreshed for verification.
2329		 */
2330		if (atomic_cmpset_acq_int(&fp->f_count, count, count + 1) != 1)
2331			continue;
2332		if (fp == fdp->fd_ofiles[fd].fde_file)
2333			break;
2334		fdrop(fp, curthread);
2335	}
2336	*fpp = fp;
2337	if (haverightsp != NULL) {
2338#ifdef CAPABILITIES
2339		*haverightsp = haverights;
2340#else
2341		*haverightsp = CAP_ALL;
2342#endif
2343	}
2344	return (0);
2345}
2346
2347/*
2348 * Extract the file pointer associated with the specified descriptor for the
2349 * current user process.
2350 *
2351 * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
2352 * returned.
2353 *
2354 * File's rights will be checked against the capability rights mask.
2355 *
2356 * If an error occured the non-zero error is returned and *fpp is set to
2357 * NULL.  Otherwise *fpp is held and set and zero is returned.  Caller is
2358 * responsible for fdrop().
2359 */
2360static __inline int
2361_fget(struct thread *td, int fd, struct file **fpp, int flags,
2362    cap_rights_t needrights, u_char *maxprotp)
2363{
2364	struct filedesc *fdp;
2365	struct file *fp;
2366	cap_rights_t haverights;
2367	int error;
2368
2369	*fpp = NULL;
2370	if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
2371		return (EBADF);
2372	if (maxprotp != NULL)
2373		needrights |= CAP_MMAP;
2374	error = fget_unlocked(fdp, fd, needrights, 0, &fp, &haverights);
2375	if (error != 0)
2376		return (error);
2377	if (fp->f_ops == &badfileops) {
2378		fdrop(fp, td);
2379		return (EBADF);
2380	}
2381
2382#ifdef CAPABILITIES
2383	/*
2384	 * If requested, convert capability rights to access flags.
2385	 */
2386	if (maxprotp != NULL)
2387		*maxprotp = cap_rights_to_vmprot(haverights);
2388#else /* !CAPABILITIES */
2389	if (maxprotp != NULL)
2390		*maxprotp = VM_PROT_ALL;
2391#endif /* CAPABILITIES */
2392
2393	/*
2394	 * FREAD and FWRITE failure return EBADF as per POSIX.
2395	 */
2396	error = 0;
2397	switch (flags) {
2398	case FREAD:
2399	case FWRITE:
2400		if ((fp->f_flag & flags) == 0)
2401			error = EBADF;
2402		break;
2403	case FEXEC:
2404	    	if ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
2405		    ((fp->f_flag & FWRITE) != 0))
2406			error = EBADF;
2407		break;
2408	case 0:
2409		break;
2410	default:
2411		KASSERT(0, ("wrong flags"));
2412	}
2413
2414	if (error != 0) {
2415		fdrop(fp, td);
2416		return (error);
2417	}
2418
2419	*fpp = fp;
2420	return (0);
2421}
2422
2423int
2424fget(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
2425{
2426
2427	return(_fget(td, fd, fpp, 0, rights, NULL));
2428}
2429
2430int
2431fget_mmap(struct thread *td, int fd, cap_rights_t rights, u_char *maxprotp,
2432    struct file **fpp)
2433{
2434
2435	return (_fget(td, fd, fpp, 0, rights, maxprotp));
2436}
2437
2438int
2439fget_read(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
2440{
2441
2442	return(_fget(td, fd, fpp, FREAD, rights, NULL));
2443}
2444
2445int
2446fget_write(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
2447{
2448
2449	return (_fget(td, fd, fpp, FWRITE, rights, NULL));
2450}
2451
2452/*
2453 * Like fget() but loads the underlying vnode, or returns an error if the
2454 * descriptor does not represent a vnode.  Note that pipes use vnodes but
2455 * never have VM objects.  The returned vnode will be vref()'d.
2456 *
2457 * XXX: what about the unused flags ?
2458 */
2459static __inline int
2460_fgetvp(struct thread *td, int fd, int flags, cap_rights_t needrights,
2461    struct vnode **vpp)
2462{
2463	struct file *fp;
2464	int error;
2465
2466	*vpp = NULL;
2467	error = _fget(td, fd, &fp, flags, needrights, NULL);
2468	if (error)
2469		return (error);
2470	if (fp->f_vnode == NULL) {
2471		error = EINVAL;
2472	} else {
2473		*vpp = fp->f_vnode;
2474		vref(*vpp);
2475	}
2476	fdrop(fp, td);
2477
2478	return (error);
2479}
2480
2481int
2482fgetvp(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
2483{
2484
2485	return (_fgetvp(td, fd, 0, rights, vpp));
2486}
2487
2488int
2489fgetvp_rights(struct thread *td, int fd, cap_rights_t need,
2490    struct filecaps *havecaps, struct vnode **vpp)
2491{
2492	struct filedesc *fdp;
2493	struct file *fp;
2494#ifdef CAPABILITIES
2495	int error;
2496#endif
2497
2498	if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
2499		return (EBADF);
2500
2501	fp = fget_locked(fdp, fd);
2502	if (fp == NULL || fp->f_ops == &badfileops)
2503		return (EBADF);
2504
2505#ifdef CAPABILITIES
2506	error = cap_check(cap_rights(fdp, fd), need);
2507	if (error != 0)
2508		return (error);
2509#endif
2510
2511	if (fp->f_vnode == NULL)
2512		return (EINVAL);
2513
2514	*vpp = fp->f_vnode;
2515	vref(*vpp);
2516	filecaps_copy(&fdp->fd_ofiles[fd].fde_caps, havecaps);
2517
2518	return (0);
2519}
2520
2521int
2522fgetvp_read(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
2523{
2524
2525	return (_fgetvp(td, fd, FREAD, rights, vpp));
2526}
2527
2528int
2529fgetvp_exec(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
2530{
2531
2532	return (_fgetvp(td, fd, FEXEC, rights, vpp));
2533}
2534
2535#ifdef notyet
2536int
2537fgetvp_write(struct thread *td, int fd, cap_rights_t rights,
2538    struct vnode **vpp)
2539{
2540
2541	return (_fgetvp(td, fd, FWRITE, rights, vpp));
2542}
2543#endif
2544
2545/*
2546 * Like fget() but loads the underlying socket, or returns an error if the
2547 * descriptor does not represent a socket.
2548 *
2549 * We bump the ref count on the returned socket.  XXX Also obtain the SX lock
2550 * in the future.
2551 *
2552 * Note: fgetsock() and fputsock() are deprecated, as consumers should rely
2553 * on their file descriptor reference to prevent the socket from being free'd
2554 * during use.
2555 */
2556int
2557fgetsock(struct thread *td, int fd, cap_rights_t rights, struct socket **spp,
2558    u_int *fflagp)
2559{
2560	struct file *fp;
2561	int error;
2562
2563	*spp = NULL;
2564	if (fflagp != NULL)
2565		*fflagp = 0;
2566	if ((error = _fget(td, fd, &fp, 0, rights, NULL)) != 0)
2567		return (error);
2568	if (fp->f_type != DTYPE_SOCKET) {
2569		error = ENOTSOCK;
2570	} else {
2571		*spp = fp->f_data;
2572		if (fflagp)
2573			*fflagp = fp->f_flag;
2574		SOCK_LOCK(*spp);
2575		soref(*spp);
2576		SOCK_UNLOCK(*spp);
2577	}
2578	fdrop(fp, td);
2579
2580	return (error);
2581}
2582
2583/*
2584 * Drop the reference count on the socket and XXX release the SX lock in the
2585 * future.  The last reference closes the socket.
2586 *
2587 * Note: fputsock() is deprecated, see comment for fgetsock().
2588 */
2589void
2590fputsock(struct socket *so)
2591{
2592
2593	ACCEPT_LOCK();
2594	SOCK_LOCK(so);
2595	CURVNET_SET(so->so_vnet);
2596	sorele(so);
2597	CURVNET_RESTORE();
2598}
2599
2600/*
2601 * Handle the last reference to a file being closed.
2602 */
2603int
2604_fdrop(struct file *fp, struct thread *td)
2605{
2606	int error;
2607
2608	error = 0;
2609	if (fp->f_count != 0)
2610		panic("fdrop: count %d", fp->f_count);
2611	if (fp->f_ops != &badfileops)
2612		error = fo_close(fp, td);
2613	atomic_subtract_int(&openfiles, 1);
2614	crfree(fp->f_cred);
2615	free(fp->f_advice, M_FADVISE);
2616	uma_zfree(file_zone, fp);
2617
2618	return (error);
2619}
2620
2621/*
2622 * Apply an advisory lock on a file descriptor.
2623 *
2624 * Just attempt to get a record lock of the requested type on the entire file
2625 * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2626 */
2627#ifndef _SYS_SYSPROTO_H_
2628struct flock_args {
2629	int	fd;
2630	int	how;
2631};
2632#endif
2633/* ARGSUSED */
2634int
2635sys_flock(struct thread *td, struct flock_args *uap)
2636{
2637	struct file *fp;
2638	struct vnode *vp;
2639	struct flock lf;
2640	int error;
2641
2642	if ((error = fget(td, uap->fd, CAP_FLOCK, &fp)) != 0)
2643		return (error);
2644	if (fp->f_type != DTYPE_VNODE) {
2645		fdrop(fp, td);
2646		return (EOPNOTSUPP);
2647	}
2648
2649	vp = fp->f_vnode;
2650	lf.l_whence = SEEK_SET;
2651	lf.l_start = 0;
2652	lf.l_len = 0;
2653	if (uap->how & LOCK_UN) {
2654		lf.l_type = F_UNLCK;
2655		atomic_clear_int(&fp->f_flag, FHASLOCK);
2656		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
2657		goto done2;
2658	}
2659	if (uap->how & LOCK_EX)
2660		lf.l_type = F_WRLCK;
2661	else if (uap->how & LOCK_SH)
2662		lf.l_type = F_RDLCK;
2663	else {
2664		error = EBADF;
2665		goto done2;
2666	}
2667	atomic_set_int(&fp->f_flag, FHASLOCK);
2668	error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
2669	    (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
2670done2:
2671	fdrop(fp, td);
2672	return (error);
2673}
2674/*
2675 * Duplicate the specified descriptor to a free descriptor.
2676 */
2677int
2678dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
2679    int openerror, int *indxp)
2680{
2681	struct file *fp;
2682	int error, indx;
2683
2684	KASSERT(openerror == ENODEV || openerror == ENXIO,
2685	    ("unexpected error %d in %s", openerror, __func__));
2686
2687	/*
2688	 * If the to-be-dup'd fd number is greater than the allowed number
2689	 * of file descriptors, or the fd to be dup'd has already been
2690	 * closed, then reject.
2691	 */
2692	FILEDESC_XLOCK(fdp);
2693	if ((fp = fget_locked(fdp, dfd)) == NULL) {
2694		FILEDESC_XUNLOCK(fdp);
2695		return (EBADF);
2696	}
2697
2698	error = fdalloc(td, 0, &indx);
2699	if (error != 0) {
2700		FILEDESC_XUNLOCK(fdp);
2701		return (error);
2702	}
2703
2704	/*
2705	 * There are two cases of interest here.
2706	 *
2707	 * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
2708	 *
2709	 * For ENXIO steal away the file structure from (dfd) and store it in
2710	 * (indx).  (dfd) is effectively closed by this operation.
2711	 */
2712	switch (openerror) {
2713	case ENODEV:
2714		/*
2715		 * Check that the mode the file is being opened for is a
2716		 * subset of the mode of the existing descriptor.
2717		 */
2718		if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
2719			fdunused(fdp, indx);
2720			FILEDESC_XUNLOCK(fdp);
2721			return (EACCES);
2722		}
2723		fhold(fp);
2724		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
2725		filecaps_copy(&fdp->fd_ofiles[dfd].fde_caps,
2726		    &fdp->fd_ofiles[indx].fde_caps);
2727		break;
2728	case ENXIO:
2729		/*
2730		 * Steal away the file pointer from dfd and stuff it into indx.
2731		 */
2732		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
2733		bzero(&fdp->fd_ofiles[dfd], sizeof(fdp->fd_ofiles[dfd]));
2734		fdunused(fdp, dfd);
2735		break;
2736	}
2737	FILEDESC_XUNLOCK(fdp);
2738	*indxp = indx;
2739	return (0);
2740}
2741
2742/*
2743 * Scan all active processes and prisons to see if any of them have a current
2744 * or root directory of `olddp'. If so, replace them with the new mount point.
2745 */
2746void
2747mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
2748{
2749	struct filedesc *fdp;
2750	struct prison *pr;
2751	struct proc *p;
2752	int nrele;
2753
2754	if (vrefcnt(olddp) == 1)
2755		return;
2756	nrele = 0;
2757	sx_slock(&allproc_lock);
2758	FOREACH_PROC_IN_SYSTEM(p) {
2759		fdp = fdhold(p);
2760		if (fdp == NULL)
2761			continue;
2762		FILEDESC_XLOCK(fdp);
2763		if (fdp->fd_cdir == olddp) {
2764			vref(newdp);
2765			fdp->fd_cdir = newdp;
2766			nrele++;
2767		}
2768		if (fdp->fd_rdir == olddp) {
2769			vref(newdp);
2770			fdp->fd_rdir = newdp;
2771			nrele++;
2772		}
2773		if (fdp->fd_jdir == olddp) {
2774			vref(newdp);
2775			fdp->fd_jdir = newdp;
2776			nrele++;
2777		}
2778		FILEDESC_XUNLOCK(fdp);
2779		fddrop(fdp);
2780	}
2781	sx_sunlock(&allproc_lock);
2782	if (rootvnode == olddp) {
2783		vref(newdp);
2784		rootvnode = newdp;
2785		nrele++;
2786	}
2787	mtx_lock(&prison0.pr_mtx);
2788	if (prison0.pr_root == olddp) {
2789		vref(newdp);
2790		prison0.pr_root = newdp;
2791		nrele++;
2792	}
2793	mtx_unlock(&prison0.pr_mtx);
2794	sx_slock(&allprison_lock);
2795	TAILQ_FOREACH(pr, &allprison, pr_list) {
2796		mtx_lock(&pr->pr_mtx);
2797		if (pr->pr_root == olddp) {
2798			vref(newdp);
2799			pr->pr_root = newdp;
2800			nrele++;
2801		}
2802		mtx_unlock(&pr->pr_mtx);
2803	}
2804	sx_sunlock(&allprison_lock);
2805	while (nrele--)
2806		vrele(olddp);
2807}
2808
2809struct filedesc_to_leader *
2810filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
2811{
2812	struct filedesc_to_leader *fdtol;
2813
2814	fdtol = malloc(sizeof(struct filedesc_to_leader),
2815	       M_FILEDESC_TO_LEADER,
2816	       M_WAITOK);
2817	fdtol->fdl_refcount = 1;
2818	fdtol->fdl_holdcount = 0;
2819	fdtol->fdl_wakeup = 0;
2820	fdtol->fdl_leader = leader;
2821	if (old != NULL) {
2822		FILEDESC_XLOCK(fdp);
2823		fdtol->fdl_next = old->fdl_next;
2824		fdtol->fdl_prev = old;
2825		old->fdl_next = fdtol;
2826		fdtol->fdl_next->fdl_prev = fdtol;
2827		FILEDESC_XUNLOCK(fdp);
2828	} else {
2829		fdtol->fdl_next = fdtol;
2830		fdtol->fdl_prev = fdtol;
2831	}
2832	return (fdtol);
2833}
2834
2835/*
2836 * Get file structures globally.
2837 */
2838static int
2839sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2840{
2841	struct xfile xf;
2842	struct filedesc *fdp;
2843	struct file *fp;
2844	struct proc *p;
2845	int error, n;
2846
2847	error = sysctl_wire_old_buffer(req, 0);
2848	if (error != 0)
2849		return (error);
2850	if (req->oldptr == NULL) {
2851		n = 0;
2852		sx_slock(&allproc_lock);
2853		FOREACH_PROC_IN_SYSTEM(p) {
2854			if (p->p_state == PRS_NEW)
2855				continue;
2856			fdp = fdhold(p);
2857			if (fdp == NULL)
2858				continue;
2859			/* overestimates sparse tables. */
2860			if (fdp->fd_lastfile > 0)
2861				n += fdp->fd_lastfile;
2862			fddrop(fdp);
2863		}
2864		sx_sunlock(&allproc_lock);
2865		return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
2866	}
2867	error = 0;
2868	bzero(&xf, sizeof(xf));
2869	xf.xf_size = sizeof(xf);
2870	sx_slock(&allproc_lock);
2871	FOREACH_PROC_IN_SYSTEM(p) {
2872		PROC_LOCK(p);
2873		if (p->p_state == PRS_NEW) {
2874			PROC_UNLOCK(p);
2875			continue;
2876		}
2877		if (p_cansee(req->td, p) != 0) {
2878			PROC_UNLOCK(p);
2879			continue;
2880		}
2881		xf.xf_pid = p->p_pid;
2882		xf.xf_uid = p->p_ucred->cr_uid;
2883		PROC_UNLOCK(p);
2884		fdp = fdhold(p);
2885		if (fdp == NULL)
2886			continue;
2887		FILEDESC_SLOCK(fdp);
2888		for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) {
2889			if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
2890				continue;
2891			xf.xf_fd = n;
2892			xf.xf_file = fp;
2893			xf.xf_data = fp->f_data;
2894			xf.xf_vnode = fp->f_vnode;
2895			xf.xf_type = fp->f_type;
2896			xf.xf_count = fp->f_count;
2897			xf.xf_msgcount = 0;
2898			xf.xf_offset = foffset_get(fp);
2899			xf.xf_flag = fp->f_flag;
2900			error = SYSCTL_OUT(req, &xf, sizeof(xf));
2901			if (error)
2902				break;
2903		}
2904		FILEDESC_SUNLOCK(fdp);
2905		fddrop(fdp);
2906		if (error)
2907			break;
2908	}
2909	sx_sunlock(&allproc_lock);
2910	return (error);
2911}
2912
2913SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2914    0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
2915
2916#ifdef KINFO_OFILE_SIZE
2917CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
2918#endif
2919
2920#ifdef COMPAT_FREEBSD7
2921static int
2922export_vnode_for_osysctl(struct vnode *vp, int type,
2923    struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req)
2924{
2925	int error;
2926	char *fullpath, *freepath;
2927
2928	bzero(kif, sizeof(*kif));
2929	kif->kf_structsize = sizeof(*kif);
2930
2931	vref(vp);
2932	kif->kf_fd = type;
2933	kif->kf_type = KF_TYPE_VNODE;
2934	/* This function only handles directories. */
2935	if (vp->v_type != VDIR) {
2936		vrele(vp);
2937		return (ENOTDIR);
2938	}
2939	kif->kf_vnode_type = KF_VTYPE_VDIR;
2940
2941	/*
2942	 * This is not a true file descriptor, so we set a bogus refcount
2943	 * and offset to indicate these fields should be ignored.
2944	 */
2945	kif->kf_ref_count = -1;
2946	kif->kf_offset = -1;
2947
2948	freepath = NULL;
2949	fullpath = "-";
2950	FILEDESC_SUNLOCK(fdp);
2951	vn_fullpath(curthread, vp, &fullpath, &freepath);
2952	vrele(vp);
2953	strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
2954	if (freepath != NULL)
2955		free(freepath, M_TEMP);
2956	error = SYSCTL_OUT(req, kif, sizeof(*kif));
2957	FILEDESC_SLOCK(fdp);
2958	return (error);
2959}
2960
2961/*
2962 * Get per-process file descriptors for use by procstat(1), et al.
2963 */
2964static int
2965sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
2966{
2967	char *fullpath, *freepath;
2968	struct kinfo_ofile *kif;
2969	struct filedesc *fdp;
2970	int error, i, *name;
2971	struct shmfd *shmfd;
2972	struct socket *so;
2973	struct vnode *vp;
2974	struct ksem *ks;
2975	struct file *fp;
2976	struct proc *p;
2977	struct tty *tp;
2978
2979	name = (int *)arg1;
2980	error = pget((pid_t)name[0], PGET_CANDEBUG, &p);
2981	if (error != 0)
2982		return (error);
2983	fdp = fdhold(p);
2984	PROC_UNLOCK(p);
2985	if (fdp == NULL)
2986		return (ENOENT);
2987	kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
2988	FILEDESC_SLOCK(fdp);
2989	if (fdp->fd_cdir != NULL)
2990		export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif,
2991				fdp, req);
2992	if (fdp->fd_rdir != NULL)
2993		export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif,
2994				fdp, req);
2995	if (fdp->fd_jdir != NULL)
2996		export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif,
2997				fdp, req);
2998	for (i = 0; i < fdp->fd_nfiles; i++) {
2999		if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
3000			continue;
3001		bzero(kif, sizeof(*kif));
3002		kif->kf_structsize = sizeof(*kif);
3003		ks = NULL;
3004		vp = NULL;
3005		so = NULL;
3006		tp = NULL;
3007		shmfd = NULL;
3008		kif->kf_fd = i;
3009
3010		switch (fp->f_type) {
3011		case DTYPE_VNODE:
3012			kif->kf_type = KF_TYPE_VNODE;
3013			vp = fp->f_vnode;
3014			break;
3015
3016		case DTYPE_SOCKET:
3017			kif->kf_type = KF_TYPE_SOCKET;
3018			so = fp->f_data;
3019			break;
3020
3021		case DTYPE_PIPE:
3022			kif->kf_type = KF_TYPE_PIPE;
3023			break;
3024
3025		case DTYPE_FIFO:
3026			kif->kf_type = KF_TYPE_FIFO;
3027			vp = fp->f_vnode;
3028			break;
3029
3030		case DTYPE_KQUEUE:
3031			kif->kf_type = KF_TYPE_KQUEUE;
3032			break;
3033
3034		case DTYPE_CRYPTO:
3035			kif->kf_type = KF_TYPE_CRYPTO;
3036			break;
3037
3038		case DTYPE_MQUEUE:
3039			kif->kf_type = KF_TYPE_MQUEUE;
3040			break;
3041
3042		case DTYPE_SHM:
3043			kif->kf_type = KF_TYPE_SHM;
3044			shmfd = fp->f_data;
3045			break;
3046
3047		case DTYPE_SEM:
3048			kif->kf_type = KF_TYPE_SEM;
3049			ks = fp->f_data;
3050			break;
3051
3052		case DTYPE_PTS:
3053			kif->kf_type = KF_TYPE_PTS;
3054			tp = fp->f_data;
3055			break;
3056
3057#ifdef PROCDESC
3058		case DTYPE_PROCDESC:
3059			kif->kf_type = KF_TYPE_PROCDESC;
3060			break;
3061#endif
3062
3063		default:
3064			kif->kf_type = KF_TYPE_UNKNOWN;
3065			break;
3066		}
3067		kif->kf_ref_count = fp->f_count;
3068		if (fp->f_flag & FREAD)
3069			kif->kf_flags |= KF_FLAG_READ;
3070		if (fp->f_flag & FWRITE)
3071			kif->kf_flags |= KF_FLAG_WRITE;
3072		if (fp->f_flag & FAPPEND)
3073			kif->kf_flags |= KF_FLAG_APPEND;
3074		if (fp->f_flag & FASYNC)
3075			kif->kf_flags |= KF_FLAG_ASYNC;
3076		if (fp->f_flag & FFSYNC)
3077			kif->kf_flags |= KF_FLAG_FSYNC;
3078		if (fp->f_flag & FNONBLOCK)
3079			kif->kf_flags |= KF_FLAG_NONBLOCK;
3080		if (fp->f_flag & O_DIRECT)
3081			kif->kf_flags |= KF_FLAG_DIRECT;
3082		if (fp->f_flag & FHASLOCK)
3083			kif->kf_flags |= KF_FLAG_HASLOCK;
3084		kif->kf_offset = foffset_get(fp);
3085		if (vp != NULL) {
3086			vref(vp);
3087			switch (vp->v_type) {
3088			case VNON:
3089				kif->kf_vnode_type = KF_VTYPE_VNON;
3090				break;
3091			case VREG:
3092				kif->kf_vnode_type = KF_VTYPE_VREG;
3093				break;
3094			case VDIR:
3095				kif->kf_vnode_type = KF_VTYPE_VDIR;
3096				break;
3097			case VBLK:
3098				kif->kf_vnode_type = KF_VTYPE_VBLK;
3099				break;
3100			case VCHR:
3101				kif->kf_vnode_type = KF_VTYPE_VCHR;
3102				break;
3103			case VLNK:
3104				kif->kf_vnode_type = KF_VTYPE_VLNK;
3105				break;
3106			case VSOCK:
3107				kif->kf_vnode_type = KF_VTYPE_VSOCK;
3108				break;
3109			case VFIFO:
3110				kif->kf_vnode_type = KF_VTYPE_VFIFO;
3111				break;
3112			case VBAD:
3113				kif->kf_vnode_type = KF_VTYPE_VBAD;
3114				break;
3115			default:
3116				kif->kf_vnode_type = KF_VTYPE_UNKNOWN;
3117				break;
3118			}
3119			/*
3120			 * It is OK to drop the filedesc lock here as we will
3121			 * re-validate and re-evaluate its properties when
3122			 * the loop continues.
3123			 */
3124			freepath = NULL;
3125			fullpath = "-";
3126			FILEDESC_SUNLOCK(fdp);
3127			vn_fullpath(curthread, vp, &fullpath, &freepath);
3128			vrele(vp);
3129			strlcpy(kif->kf_path, fullpath,
3130			    sizeof(kif->kf_path));
3131			if (freepath != NULL)
3132				free(freepath, M_TEMP);
3133			FILEDESC_SLOCK(fdp);
3134		}
3135		if (so != NULL) {
3136			struct sockaddr *sa;
3137
3138			if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa)
3139			    == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
3140				bcopy(sa, &kif->kf_sa_local, sa->sa_len);
3141				free(sa, M_SONAME);
3142			}
3143			if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa)
3144			    == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
3145				bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
3146				free(sa, M_SONAME);
3147			}
3148			kif->kf_sock_domain =
3149			    so->so_proto->pr_domain->dom_family;
3150			kif->kf_sock_type = so->so_type;
3151			kif->kf_sock_protocol = so->so_proto->pr_protocol;
3152		}
3153		if (tp != NULL) {
3154			strlcpy(kif->kf_path, tty_devname(tp),
3155			    sizeof(kif->kf_path));
3156		}
3157		if (shmfd != NULL)
3158			shm_path(shmfd, kif->kf_path, sizeof(kif->kf_path));
3159		if (ks != NULL && ksem_info != NULL)
3160			ksem_info(ks, kif->kf_path, sizeof(kif->kf_path), NULL);
3161		error = SYSCTL_OUT(req, kif, sizeof(*kif));
3162		if (error)
3163			break;
3164	}
3165	FILEDESC_SUNLOCK(fdp);
3166	fddrop(fdp);
3167	free(kif, M_TEMP);
3168	return (0);
3169}
3170
3171static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc, CTLFLAG_RD,
3172    sysctl_kern_proc_ofiledesc, "Process ofiledesc entries");
3173#endif	/* COMPAT_FREEBSD7 */
3174
3175#ifdef KINFO_FILE_SIZE
3176CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
3177#endif
3178
3179struct export_fd_buf {
3180	struct filedesc		*fdp;
3181	struct sbuf 		*sb;
3182	ssize_t			remainder;
3183	struct kinfo_file	kif;
3184};
3185
3186static int
3187export_fd_to_sb(void *data, int type, int fd, int fflags, int refcnt,
3188    int64_t offset, cap_rights_t fd_cap_rights, struct export_fd_buf *efbuf)
3189{
3190	struct {
3191		int	fflag;
3192		int	kf_fflag;
3193	} fflags_table[] = {
3194		{ FAPPEND, KF_FLAG_APPEND },
3195		{ FASYNC, KF_FLAG_ASYNC },
3196		{ FFSYNC, KF_FLAG_FSYNC },
3197		{ FHASLOCK, KF_FLAG_HASLOCK },
3198		{ FNONBLOCK, KF_FLAG_NONBLOCK },
3199		{ FREAD, KF_FLAG_READ },
3200		{ FWRITE, KF_FLAG_WRITE },
3201		{ O_CREAT, KF_FLAG_CREAT },
3202		{ O_DIRECT, KF_FLAG_DIRECT },
3203		{ O_EXCL, KF_FLAG_EXCL },
3204		{ O_EXEC, KF_FLAG_EXEC },
3205		{ O_EXLOCK, KF_FLAG_EXLOCK },
3206		{ O_NOFOLLOW, KF_FLAG_NOFOLLOW },
3207		{ O_SHLOCK, KF_FLAG_SHLOCK },
3208		{ O_TRUNC, KF_FLAG_TRUNC }
3209	};
3210#define	NFFLAGS	(sizeof(fflags_table) / sizeof(*fflags_table))
3211	struct kinfo_file *kif;
3212	struct vnode *vp;
3213	int error, locked;
3214	unsigned int i;
3215
3216	if (efbuf->remainder == 0)
3217		return (0);
3218	kif = &efbuf->kif;
3219	bzero(kif, sizeof(*kif));
3220	locked = efbuf->fdp != NULL;
3221	switch (type) {
3222	case KF_TYPE_FIFO:
3223	case KF_TYPE_VNODE:
3224		if (locked) {
3225			FILEDESC_SUNLOCK(efbuf->fdp);
3226			locked = 0;
3227		}
3228		vp = (struct vnode *)data;
3229		error = fill_vnode_info(vp, kif);
3230		vrele(vp);
3231		break;
3232	case KF_TYPE_SOCKET:
3233		error = fill_socket_info((struct socket *)data, kif);
3234		break;
3235	case KF_TYPE_PIPE:
3236		error = fill_pipe_info((struct pipe *)data, kif);
3237		break;
3238	case KF_TYPE_PTS:
3239		error = fill_pts_info((struct tty *)data, kif);
3240		break;
3241	case KF_TYPE_PROCDESC:
3242		error = fill_procdesc_info((struct procdesc *)data, kif);
3243		break;
3244	case KF_TYPE_SEM:
3245		error = fill_sem_info((struct file *)data, kif);
3246		break;
3247	case KF_TYPE_SHM:
3248		error = fill_shm_info((struct file *)data, kif);
3249		break;
3250	default:
3251		error = 0;
3252	}
3253	if (error == 0)
3254		kif->kf_status |= KF_ATTR_VALID;
3255
3256	/*
3257	 * Translate file access flags.
3258	 */
3259	for (i = 0; i < NFFLAGS; i++)
3260		if (fflags & fflags_table[i].fflag)
3261			kif->kf_flags |=  fflags_table[i].kf_fflag;
3262	kif->kf_cap_rights = fd_cap_rights;
3263	kif->kf_fd = fd;
3264	kif->kf_type = type;
3265	kif->kf_ref_count = refcnt;
3266	kif->kf_offset = offset;
3267	/* Pack record size down */
3268	kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
3269	    strlen(kif->kf_path) + 1;
3270	kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
3271	if (efbuf->remainder != -1) {
3272		if (efbuf->remainder < kif->kf_structsize) {
3273			/* Terminate export. */
3274			efbuf->remainder = 0;
3275			if (efbuf->fdp != NULL && !locked)
3276				FILEDESC_SLOCK(efbuf->fdp);
3277			return (0);
3278		}
3279		efbuf->remainder -= kif->kf_structsize;
3280	}
3281	if (locked)
3282		FILEDESC_SUNLOCK(efbuf->fdp);
3283	error = sbuf_bcat(efbuf->sb, kif, kif->kf_structsize);
3284	if (efbuf->fdp != NULL)
3285		FILEDESC_SLOCK(efbuf->fdp);
3286	return (error);
3287}
3288
3289/*
3290 * Store a process file descriptor information to sbuf.
3291 *
3292 * Takes a locked proc as argument, and returns with the proc unlocked.
3293 */
3294int
3295kern_proc_filedesc_out(struct proc *p,  struct sbuf *sb, ssize_t maxlen)
3296{
3297	struct file *fp;
3298	struct filedesc *fdp;
3299	struct export_fd_buf *efbuf;
3300	struct vnode *cttyvp, *textvp, *tracevp;
3301	int64_t offset;
3302	void *data;
3303	int error, i;
3304	int type, refcnt, fflags;
3305	cap_rights_t fd_cap_rights;
3306
3307	PROC_LOCK_ASSERT(p, MA_OWNED);
3308
3309	/* ktrace vnode */
3310	tracevp = p->p_tracevp;
3311	if (tracevp != NULL)
3312		vref(tracevp);
3313	/* text vnode */
3314	textvp = p->p_textvp;
3315	if (textvp != NULL)
3316		vref(textvp);
3317	/* Controlling tty. */
3318	cttyvp = NULL;
3319	if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
3320		cttyvp = p->p_pgrp->pg_session->s_ttyvp;
3321		if (cttyvp != NULL)
3322			vref(cttyvp);
3323	}
3324	fdp = fdhold(p);
3325	PROC_UNLOCK(p);
3326	efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
3327	efbuf->fdp = NULL;
3328	efbuf->sb = sb;
3329	efbuf->remainder = maxlen;
3330	if (tracevp != NULL)
3331		export_fd_to_sb(tracevp, KF_TYPE_VNODE, KF_FD_TYPE_TRACE,
3332		    FREAD | FWRITE, -1, -1, 0, efbuf);
3333	if (textvp != NULL)
3334		export_fd_to_sb(textvp, KF_TYPE_VNODE, KF_FD_TYPE_TEXT,
3335		    FREAD, -1, -1, 0, efbuf);
3336	if (cttyvp != NULL)
3337		export_fd_to_sb(cttyvp, KF_TYPE_VNODE, KF_FD_TYPE_CTTY,
3338		    FREAD | FWRITE, -1, -1, 0, efbuf);
3339	error = 0;
3340	if (fdp == NULL)
3341		goto fail;
3342	efbuf->fdp = fdp;
3343	FILEDESC_SLOCK(fdp);
3344	/* working directory */
3345	if (fdp->fd_cdir != NULL) {
3346		vref(fdp->fd_cdir);
3347		data = fdp->fd_cdir;
3348		export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_CWD,
3349		    FREAD, -1, -1, 0, efbuf);
3350	}
3351	/* root directory */
3352	if (fdp->fd_rdir != NULL) {
3353		vref(fdp->fd_rdir);
3354		data = fdp->fd_rdir;
3355		export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_ROOT,
3356		    FREAD, -1, -1, 0, efbuf);
3357	}
3358	/* jail directory */
3359	if (fdp->fd_jdir != NULL) {
3360		vref(fdp->fd_jdir);
3361		data = fdp->fd_jdir;
3362		export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_JAIL,
3363		    FREAD, -1, -1, 0, efbuf);
3364	}
3365	for (i = 0; i < fdp->fd_nfiles; i++) {
3366		if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
3367			continue;
3368		data = NULL;
3369#ifdef CAPABILITIES
3370		fd_cap_rights = cap_rights(fdp, i);
3371#else /* !CAPABILITIES */
3372		fd_cap_rights = 0;
3373#endif
3374		switch (fp->f_type) {
3375		case DTYPE_VNODE:
3376			type = KF_TYPE_VNODE;
3377			vref(fp->f_vnode);
3378			data = fp->f_vnode;
3379			break;
3380
3381		case DTYPE_SOCKET:
3382			type = KF_TYPE_SOCKET;
3383			data = fp->f_data;
3384			break;
3385
3386		case DTYPE_PIPE:
3387			type = KF_TYPE_PIPE;
3388			data = fp->f_data;
3389			break;
3390
3391		case DTYPE_FIFO:
3392			type = KF_TYPE_FIFO;
3393			vref(fp->f_vnode);
3394			data = fp->f_vnode;
3395			break;
3396
3397		case DTYPE_KQUEUE:
3398			type = KF_TYPE_KQUEUE;
3399			break;
3400
3401		case DTYPE_CRYPTO:
3402			type = KF_TYPE_CRYPTO;
3403			break;
3404
3405		case DTYPE_MQUEUE:
3406			type = KF_TYPE_MQUEUE;
3407			break;
3408
3409		case DTYPE_SHM:
3410			type = KF_TYPE_SHM;
3411			data = fp;
3412			break;
3413
3414		case DTYPE_SEM:
3415			type = KF_TYPE_SEM;
3416			data = fp;
3417			break;
3418
3419		case DTYPE_PTS:
3420			type = KF_TYPE_PTS;
3421			data = fp->f_data;
3422			break;
3423
3424#ifdef PROCDESC
3425		case DTYPE_PROCDESC:
3426			type = KF_TYPE_PROCDESC;
3427			data = fp->f_data;
3428			break;
3429#endif
3430
3431		default:
3432			type = KF_TYPE_UNKNOWN;
3433			break;
3434		}
3435		refcnt = fp->f_count;
3436		fflags = fp->f_flag;
3437		offset = foffset_get(fp);
3438
3439		/*
3440		 * Create sysctl entry.
3441		 * It is OK to drop the filedesc lock here as we will
3442		 * re-validate and re-evaluate its properties when
3443		 * the loop continues.
3444		 */
3445		error = export_fd_to_sb(data, type, i, fflags, refcnt,
3446		    offset, fd_cap_rights, efbuf);
3447		if (error)
3448			break;
3449	}
3450	FILEDESC_SUNLOCK(fdp);
3451	fddrop(fdp);
3452fail:
3453	free(efbuf, M_TEMP);
3454	return (error);
3455}
3456
3457#define FILEDESC_SBUF_SIZE	(sizeof(struct kinfo_file) * 5)
3458
3459/*
3460 * Get per-process file descriptors for use by procstat(1), et al.
3461 */
3462static int
3463sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
3464{
3465	struct sbuf sb;
3466	struct proc *p;
3467	ssize_t maxlen;
3468	int error, error2, *name;
3469
3470	name = (int *)arg1;
3471
3472	sbuf_new_for_sysctl(&sb, NULL, FILEDESC_SBUF_SIZE, req);
3473	error = pget((pid_t)name[0], PGET_CANDEBUG, &p);
3474	if (error != 0) {
3475		sbuf_delete(&sb);
3476		return (error);
3477	}
3478	maxlen = req->oldptr != NULL ? req->oldlen : -1;
3479	error = kern_proc_filedesc_out(p, &sb, maxlen);
3480	error2 = sbuf_finish(&sb);
3481	sbuf_delete(&sb);
3482	return (error != 0 ? error : error2);
3483}
3484
3485int
3486vntype_to_kinfo(int vtype)
3487{
3488	struct {
3489		int	vtype;
3490		int	kf_vtype;
3491	} vtypes_table[] = {
3492		{ VBAD, KF_VTYPE_VBAD },
3493		{ VBLK, KF_VTYPE_VBLK },
3494		{ VCHR, KF_VTYPE_VCHR },
3495		{ VDIR, KF_VTYPE_VDIR },
3496		{ VFIFO, KF_VTYPE_VFIFO },
3497		{ VLNK, KF_VTYPE_VLNK },
3498		{ VNON, KF_VTYPE_VNON },
3499		{ VREG, KF_VTYPE_VREG },
3500		{ VSOCK, KF_VTYPE_VSOCK }
3501	};
3502#define	NVTYPES	(sizeof(vtypes_table) / sizeof(*vtypes_table))
3503	unsigned int i;
3504
3505	/*
3506	 * Perform vtype translation.
3507	 */
3508	for (i = 0; i < NVTYPES; i++)
3509		if (vtypes_table[i].vtype == vtype)
3510			break;
3511	if (i < NVTYPES)
3512		return (vtypes_table[i].kf_vtype);
3513
3514	return (KF_VTYPE_UNKNOWN);
3515}
3516
3517static int
3518fill_vnode_info(struct vnode *vp, struct kinfo_file *kif)
3519{
3520	struct vattr va;
3521	char *fullpath, *freepath;
3522	int error;
3523
3524	if (vp == NULL)
3525		return (1);
3526	kif->kf_vnode_type = vntype_to_kinfo(vp->v_type);
3527	freepath = NULL;
3528	fullpath = "-";
3529	error = vn_fullpath(curthread, vp, &fullpath, &freepath);
3530	if (error == 0) {
3531		strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
3532	}
3533	if (freepath != NULL)
3534		free(freepath, M_TEMP);
3535
3536	/*
3537	 * Retrieve vnode attributes.
3538	 */
3539	va.va_fsid = VNOVAL;
3540	va.va_rdev = NODEV;
3541	vn_lock(vp, LK_SHARED | LK_RETRY);
3542	error = VOP_GETATTR(vp, &va, curthread->td_ucred);
3543	VOP_UNLOCK(vp, 0);
3544	if (error != 0)
3545		return (error);
3546	if (va.va_fsid != VNOVAL)
3547		kif->kf_un.kf_file.kf_file_fsid = va.va_fsid;
3548	else
3549		kif->kf_un.kf_file.kf_file_fsid =
3550		    vp->v_mount->mnt_stat.f_fsid.val[0];
3551	kif->kf_un.kf_file.kf_file_fileid = va.va_fileid;
3552	kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode);
3553	kif->kf_un.kf_file.kf_file_size = va.va_size;
3554	kif->kf_un.kf_file.kf_file_rdev = va.va_rdev;
3555	return (0);
3556}
3557
3558static int
3559fill_socket_info(struct socket *so, struct kinfo_file *kif)
3560{
3561	struct sockaddr *sa;
3562	struct inpcb *inpcb;
3563	struct unpcb *unpcb;
3564	int error;
3565
3566	if (so == NULL)
3567		return (1);
3568	kif->kf_sock_domain = so->so_proto->pr_domain->dom_family;
3569	kif->kf_sock_type = so->so_type;
3570	kif->kf_sock_protocol = so->so_proto->pr_protocol;
3571	kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
3572	switch(kif->kf_sock_domain) {
3573	case AF_INET:
3574	case AF_INET6:
3575		if (kif->kf_sock_protocol == IPPROTO_TCP) {
3576			if (so->so_pcb != NULL) {
3577				inpcb = (struct inpcb *)(so->so_pcb);
3578				kif->kf_un.kf_sock.kf_sock_inpcb =
3579				    (uintptr_t)inpcb->inp_ppcb;
3580			}
3581		}
3582		break;
3583	case AF_UNIX:
3584		if (so->so_pcb != NULL) {
3585			unpcb = (struct unpcb *)(so->so_pcb);
3586			if (unpcb->unp_conn) {
3587				kif->kf_un.kf_sock.kf_sock_unpconn =
3588				    (uintptr_t)unpcb->unp_conn;
3589				kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
3590				    so->so_rcv.sb_state;
3591				kif->kf_un.kf_sock.kf_sock_snd_sb_state =
3592				    so->so_snd.sb_state;
3593			}
3594		}
3595		break;
3596	}
3597	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
3598	if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
3599		bcopy(sa, &kif->kf_sa_local, sa->sa_len);
3600		free(sa, M_SONAME);
3601	}
3602	error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
3603	if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
3604		bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
3605		free(sa, M_SONAME);
3606	}
3607	strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
3608	    sizeof(kif->kf_path));
3609	return (0);
3610}
3611
3612static int
3613fill_pts_info(struct tty *tp, struct kinfo_file *kif)
3614{
3615
3616	if (tp == NULL)
3617		return (1);
3618	kif->kf_un.kf_pts.kf_pts_dev = tty_udev(tp);
3619	strlcpy(kif->kf_path, tty_devname(tp), sizeof(kif->kf_path));
3620	return (0);
3621}
3622
3623static int
3624fill_pipe_info(struct pipe *pi, struct kinfo_file *kif)
3625{
3626
3627	if (pi == NULL)
3628		return (1);
3629	kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
3630	kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
3631	kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
3632	return (0);
3633}
3634
3635static int
3636fill_procdesc_info(struct procdesc *pdp, struct kinfo_file *kif)
3637{
3638
3639	if (pdp == NULL)
3640		return (1);
3641	kif->kf_un.kf_proc.kf_pid = pdp->pd_pid;
3642	return (0);
3643}
3644
3645static int
3646fill_sem_info(struct file *fp, struct kinfo_file *kif)
3647{
3648	struct thread *td;
3649	struct stat sb;
3650
3651	td = curthread;
3652	if (fp->f_data == NULL)
3653		return (1);
3654	if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
3655		return (1);
3656	if (ksem_info == NULL)
3657		return (1);
3658	ksem_info(fp->f_data, kif->kf_path, sizeof(kif->kf_path),
3659	    &kif->kf_un.kf_sem.kf_sem_value);
3660	kif->kf_un.kf_sem.kf_sem_mode = sb.st_mode;
3661	return (0);
3662}
3663
3664static int
3665fill_shm_info(struct file *fp, struct kinfo_file *kif)
3666{
3667	struct thread *td;
3668	struct stat sb;
3669
3670	td = curthread;
3671	if (fp->f_data == NULL)
3672		return (1);
3673	if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
3674		return (1);
3675	shm_path(fp->f_data, kif->kf_path, sizeof(kif->kf_path));
3676	kif->kf_un.kf_file.kf_file_mode = sb.st_mode;
3677	kif->kf_un.kf_file.kf_file_size = sb.st_size;
3678	return (0);
3679}
3680
3681static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD,
3682    sysctl_kern_proc_filedesc, "Process filedesc entries");
3683
3684#ifdef DDB
3685/*
3686 * For the purposes of debugging, generate a human-readable string for the
3687 * file type.
3688 */
3689static const char *
3690file_type_to_name(short type)
3691{
3692
3693	switch (type) {
3694	case 0:
3695		return ("zero");
3696	case DTYPE_VNODE:
3697		return ("vnod");
3698	case DTYPE_SOCKET:
3699		return ("sock");
3700	case DTYPE_PIPE:
3701		return ("pipe");
3702	case DTYPE_FIFO:
3703		return ("fifo");
3704	case DTYPE_KQUEUE:
3705		return ("kque");
3706	case DTYPE_CRYPTO:
3707		return ("crpt");
3708	case DTYPE_MQUEUE:
3709		return ("mque");
3710	case DTYPE_SHM:
3711		return ("shm");
3712	case DTYPE_SEM:
3713		return ("ksem");
3714	default:
3715		return ("unkn");
3716	}
3717}
3718
3719/*
3720 * For the purposes of debugging, identify a process (if any, perhaps one of
3721 * many) that references the passed file in its file descriptor array. Return
3722 * NULL if none.
3723 */
3724static struct proc *
3725file_to_first_proc(struct file *fp)
3726{
3727	struct filedesc *fdp;
3728	struct proc *p;
3729	int n;
3730
3731	FOREACH_PROC_IN_SYSTEM(p) {
3732		if (p->p_state == PRS_NEW)
3733			continue;
3734		fdp = p->p_fd;
3735		if (fdp == NULL)
3736			continue;
3737		for (n = 0; n < fdp->fd_nfiles; n++) {
3738			if (fp == fdp->fd_ofiles[n].fde_file)
3739				return (p);
3740		}
3741	}
3742	return (NULL);
3743}
3744
3745static void
3746db_print_file(struct file *fp, int header)
3747{
3748	struct proc *p;
3749
3750	if (header)
3751		db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n",
3752		    "File", "Type", "Data", "Flag", "GCFl", "Count",
3753		    "MCount", "Vnode", "FPID", "FCmd");
3754	p = file_to_first_proc(fp);
3755	db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp,
3756	    file_type_to_name(fp->f_type), fp->f_data, fp->f_flag,
3757	    0, fp->f_count, 0, fp->f_vnode,
3758	    p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
3759}
3760
3761DB_SHOW_COMMAND(file, db_show_file)
3762{
3763	struct file *fp;
3764
3765	if (!have_addr) {
3766		db_printf("usage: show file <addr>\n");
3767		return;
3768	}
3769	fp = (struct file *)addr;
3770	db_print_file(fp, 1);
3771}
3772
3773DB_SHOW_COMMAND(files, db_show_files)
3774{
3775	struct filedesc *fdp;
3776	struct file *fp;
3777	struct proc *p;
3778	int header;
3779	int n;
3780
3781	header = 1;
3782	FOREACH_PROC_IN_SYSTEM(p) {
3783		if (p->p_state == PRS_NEW)
3784			continue;
3785		if ((fdp = p->p_fd) == NULL)
3786			continue;
3787		for (n = 0; n < fdp->fd_nfiles; ++n) {
3788			if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
3789				continue;
3790			db_print_file(fp, header);
3791			header = 0;
3792		}
3793	}
3794}
3795#endif
3796
3797SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
3798    &maxfilesperproc, 0, "Maximum files allowed open per process");
3799
3800SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
3801    &maxfiles, 0, "Maximum number of files");
3802
3803SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
3804    __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files");
3805
3806/* ARGSUSED*/
3807static void
3808filelistinit(void *dummy)
3809{
3810
3811	file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
3812	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
3813	mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
3814	mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF);
3815}
3816SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
3817
3818/*-------------------------------------------------------------------*/
3819
3820static int
3821badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
3822    int flags, struct thread *td)
3823{
3824
3825	return (EBADF);
3826}
3827
3828static int
3829badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
3830    struct thread *td)
3831{
3832
3833	return (EINVAL);
3834}
3835
3836static int
3837badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
3838    struct thread *td)
3839{
3840
3841	return (EBADF);
3842}
3843
3844static int
3845badfo_poll(struct file *fp, int events, struct ucred *active_cred,
3846    struct thread *td)
3847{
3848
3849	return (0);
3850}
3851
3852static int
3853badfo_kqfilter(struct file *fp, struct knote *kn)
3854{
3855
3856	return (EBADF);
3857}
3858
3859static int
3860badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
3861    struct thread *td)
3862{
3863
3864	return (EBADF);
3865}
3866
3867static int
3868badfo_close(struct file *fp, struct thread *td)
3869{
3870
3871	return (EBADF);
3872}
3873
3874static int
3875badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
3876    struct thread *td)
3877{
3878
3879	return (EBADF);
3880}
3881
3882static int
3883badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
3884    struct thread *td)
3885{
3886
3887	return (EBADF);
3888}
3889
3890struct fileops badfileops = {
3891	.fo_read = badfo_readwrite,
3892	.fo_write = badfo_readwrite,
3893	.fo_truncate = badfo_truncate,
3894	.fo_ioctl = badfo_ioctl,
3895	.fo_poll = badfo_poll,
3896	.fo_kqfilter = badfo_kqfilter,
3897	.fo_stat = badfo_stat,
3898	.fo_close = badfo_close,
3899	.fo_chmod = badfo_chmod,
3900	.fo_chown = badfo_chown,
3901};
3902
3903int
3904invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
3905    struct thread *td)
3906{
3907
3908	return (EINVAL);
3909}
3910
3911int
3912invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
3913    struct thread *td)
3914{
3915
3916	return (EINVAL);
3917}
3918
3919/*-------------------------------------------------------------------*/
3920
3921/*
3922 * File Descriptor pseudo-device driver (/dev/fd/).
3923 *
3924 * Opening minor device N dup()s the file (if any) connected to file
3925 * descriptor N belonging to the calling process.  Note that this driver
3926 * consists of only the ``open()'' routine, because all subsequent
3927 * references to this file will be direct to the other driver.
3928 *
3929 * XXX: we could give this one a cloning event handler if necessary.
3930 */
3931
3932/* ARGSUSED */
3933static int
3934fdopen(struct cdev *dev, int mode, int type, struct thread *td)
3935{
3936
3937	/*
3938	 * XXX Kludge: set curthread->td_dupfd to contain the value of the
3939	 * the file descriptor being sought for duplication. The error
3940	 * return ensures that the vnode for this device will be released
3941	 * by vn_open. Open will detect this special error and take the
3942	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
3943	 * will simply report the error.
3944	 */
3945	td->td_dupfd = dev2unit(dev);
3946	return (ENODEV);
3947}
3948
3949static struct cdevsw fildesc_cdevsw = {
3950	.d_version =	D_VERSION,
3951	.d_open =	fdopen,
3952	.d_name =	"FD",
3953};
3954
3955static void
3956fildesc_drvinit(void *unused)
3957{
3958	struct cdev *dev;
3959
3960	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
3961	    UID_ROOT, GID_WHEEL, 0666, "fd/0");
3962	make_dev_alias(dev, "stdin");
3963	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
3964	    UID_ROOT, GID_WHEEL, 0666, "fd/1");
3965	make_dev_alias(dev, "stdout");
3966	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
3967	    UID_ROOT, GID_WHEEL, 0666, "fd/2");
3968	make_dev_alias(dev, "stderr");
3969}
3970
3971SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);
3972