kern_descrip.c revision 285963
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: stable/10/sys/kern/kern_descrip.c 285963 2015-07-28 16:39:36Z kib $");
39
40#include "opt_capsicum.h"
41#include "opt_compat.h"
42#include "opt_ddb.h"
43#include "opt_ktrace.h"
44#include "opt_procdesc.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48
49#include <sys/capsicum.h>
50#include <sys/conf.h>
51#include <sys/domain.h>
52#include <sys/fcntl.h>
53#include <sys/file.h>
54#include <sys/filedesc.h>
55#include <sys/filio.h>
56#include <sys/jail.h>
57#include <sys/kernel.h>
58#include <sys/ksem.h>
59#include <sys/limits.h>
60#include <sys/lock.h>
61#include <sys/malloc.h>
62#include <sys/mman.h>
63#include <sys/mount.h>
64#include <sys/mqueue.h>
65#include <sys/mutex.h>
66#include <sys/namei.h>
67#include <sys/selinfo.h>
68#include <sys/pipe.h>
69#include <sys/priv.h>
70#include <sys/proc.h>
71#include <sys/procdesc.h>
72#include <sys/protosw.h>
73#include <sys/racct.h>
74#include <sys/resourcevar.h>
75#include <sys/sbuf.h>
76#include <sys/signalvar.h>
77#include <sys/socketvar.h>
78#include <sys/stat.h>
79#include <sys/sx.h>
80#include <sys/syscallsubr.h>
81#include <sys/sysctl.h>
82#include <sys/sysproto.h>
83#include <sys/tty.h>
84#include <sys/unistd.h>
85#include <sys/un.h>
86#include <sys/unpcb.h>
87#include <sys/user.h>
88#include <sys/vnode.h>
89#ifdef KTRACE
90#include <sys/ktrace.h>
91#endif
92
93#include <net/vnet.h>
94
95#include <netinet/in.h>
96#include <netinet/in_pcb.h>
97
98#include <security/audit/audit.h>
99
100#include <vm/uma.h>
101#include <vm/vm.h>
102
103#include <ddb/ddb.h>
104
105static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
106static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
107    "file desc to leader structures");
108static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
109MALLOC_DEFINE(M_FILECAPS, "filecaps", "descriptor capabilities");
110
111MALLOC_DECLARE(M_FADVISE);
112
113static uma_zone_t file_zone;
114
115void	(*ksem_info)(struct ksem *ks, char *path, size_t size, uint32_t *value);
116
117static int	closefp(struct filedesc *fdp, int fd, struct file *fp,
118		    struct thread *td, int holdleaders);
119static int	fd_first_free(struct filedesc *fdp, int low, int size);
120static int	fd_last_used(struct filedesc *fdp, int size);
121static void	fdgrowtable(struct filedesc *fdp, int nfd);
122static void	fdgrowtable_exp(struct filedesc *fdp, int nfd);
123static void	fdunused(struct filedesc *fdp, int fd);
124static void	fdused(struct filedesc *fdp, int fd);
125static int	fill_pipe_info(struct pipe *pi, struct kinfo_file *kif);
126static int	fill_procdesc_info(struct procdesc *pdp,
127		    struct kinfo_file *kif);
128static int	fill_pts_info(struct tty *tp, struct kinfo_file *kif);
129static int	fill_sem_info(struct file *fp, struct kinfo_file *kif);
130static int	fill_shm_info(struct file *fp, struct kinfo_file *kif);
131static int	fill_socket_info(struct socket *so, struct kinfo_file *kif);
132static int	fill_vnode_info(struct vnode *vp, struct kinfo_file *kif);
133static int	getmaxfd(struct proc *p);
134
135/*
136 * Each process has:
137 *
138 * - An array of open file descriptors (fd_ofiles)
139 * - An array of file flags (fd_ofileflags)
140 * - A bitmap recording which descriptors are in use (fd_map)
141 *
142 * A process starts out with NDFILE descriptors.  The value of NDFILE has
143 * been selected based the historical limit of 20 open files, and an
144 * assumption that the majority of processes, especially short-lived
145 * processes like shells, will never need more.
146 *
147 * If this initial allocation is exhausted, a larger descriptor table and
148 * map are allocated dynamically, and the pointers in the process's struct
149 * filedesc are updated to point to those.  This is repeated every time
150 * the process runs out of file descriptors (provided it hasn't hit its
151 * resource limit).
152 *
153 * Since threads may hold references to individual descriptor table
154 * entries, the tables are never freed.  Instead, they are placed on a
155 * linked list and freed only when the struct filedesc is released.
156 */
157#define NDFILE		20
158#define NDSLOTSIZE	sizeof(NDSLOTTYPE)
159#define	NDENTRIES	(NDSLOTSIZE * __CHAR_BIT)
160#define NDSLOT(x)	((x) / NDENTRIES)
161#define NDBIT(x)	((NDSLOTTYPE)1 << ((x) % NDENTRIES))
162#define	NDSLOTS(x)	(((x) + NDENTRIES - 1) / NDENTRIES)
163
164/*
165 * SLIST entry used to keep track of ofiles which must be reclaimed when
166 * the process exits.
167 */
168struct freetable {
169	struct filedescent *ft_table;
170	SLIST_ENTRY(freetable) ft_next;
171};
172
173/*
174 * Initial allocation: a filedesc structure + the head of SLIST used to
175 * keep track of old ofiles + enough space for NDFILE descriptors.
176 */
177struct filedesc0 {
178	struct filedesc fd_fd;
179	SLIST_HEAD(, freetable) fd_free;
180	struct	filedescent fd_dfiles[NDFILE];
181	NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
182};
183
184/*
185 * Descriptor management.
186 */
187volatile int openfiles;			/* actual number of open files */
188struct mtx sigio_lock;		/* mtx to protect pointers to sigio */
189void (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
190
191/* A mutex to protect the association between a proc and filedesc. */
192static struct mtx fdesc_mtx;
193
194/*
195 * If low >= size, just return low. Otherwise find the first zero bit in the
196 * given bitmap, starting at low and not exceeding size - 1. Return size if
197 * not found.
198 */
199static int
200fd_first_free(struct filedesc *fdp, int low, int size)
201{
202	NDSLOTTYPE *map = fdp->fd_map;
203	NDSLOTTYPE mask;
204	int off, maxoff;
205
206	if (low >= size)
207		return (low);
208
209	off = NDSLOT(low);
210	if (low % NDENTRIES) {
211		mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
212		if ((mask &= ~map[off]) != 0UL)
213			return (off * NDENTRIES + ffsl(mask) - 1);
214		++off;
215	}
216	for (maxoff = NDSLOTS(size); off < maxoff; ++off)
217		if (map[off] != ~0UL)
218			return (off * NDENTRIES + ffsl(~map[off]) - 1);
219	return (size);
220}
221
222/*
223 * Find the highest non-zero bit in the given bitmap, starting at 0 and
224 * not exceeding size - 1. Return -1 if not found.
225 */
226static int
227fd_last_used(struct filedesc *fdp, int size)
228{
229	NDSLOTTYPE *map = fdp->fd_map;
230	NDSLOTTYPE mask;
231	int off, minoff;
232
233	off = NDSLOT(size);
234	if (size % NDENTRIES) {
235		mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES));
236		if ((mask &= map[off]) != 0)
237			return (off * NDENTRIES + flsl(mask) - 1);
238		--off;
239	}
240	for (minoff = NDSLOT(0); off >= minoff; --off)
241		if (map[off] != 0)
242			return (off * NDENTRIES + flsl(map[off]) - 1);
243	return (-1);
244}
245
246static int
247fdisused(struct filedesc *fdp, int fd)
248{
249
250	FILEDESC_LOCK_ASSERT(fdp);
251
252	KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
253	    ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
254
255	return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
256}
257
258/*
259 * Mark a file descriptor as used.
260 */
261static void
262fdused(struct filedesc *fdp, int fd)
263{
264
265	FILEDESC_XLOCK_ASSERT(fdp);
266
267	KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd));
268
269	fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
270	if (fd > fdp->fd_lastfile)
271		fdp->fd_lastfile = fd;
272	if (fd == fdp->fd_freefile)
273		fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles);
274}
275
276/*
277 * Mark a file descriptor as unused.
278 */
279static void
280fdunused(struct filedesc *fdp, int fd)
281{
282
283	FILEDESC_XLOCK_ASSERT(fdp);
284
285	KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd));
286	KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
287	    ("fd=%d is still in use", fd));
288
289	fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
290	if (fd < fdp->fd_freefile)
291		fdp->fd_freefile = fd;
292	if (fd == fdp->fd_lastfile)
293		fdp->fd_lastfile = fd_last_used(fdp, fd);
294}
295
296/*
297 * Free a file descriptor.
298 *
299 * Avoid some work if fdp is about to be destroyed.
300 */
301static inline void
302_fdfree(struct filedesc *fdp, int fd, int last)
303{
304	struct filedescent *fde;
305
306	fde = &fdp->fd_ofiles[fd];
307#ifdef CAPABILITIES
308	if (!last)
309		seq_write_begin(&fde->fde_seq);
310#endif
311	filecaps_free(&fde->fde_caps);
312	if (last)
313		return;
314	bzero(fde, fde_change_size);
315	fdunused(fdp, fd);
316#ifdef CAPABILITIES
317	seq_write_end(&fde->fde_seq);
318#endif
319}
320
321static inline void
322fdfree(struct filedesc *fdp, int fd)
323{
324
325	_fdfree(fdp, fd, 0);
326}
327
328static inline void
329fdfree_last(struct filedesc *fdp, int fd)
330{
331
332	_fdfree(fdp, fd, 1);
333}
334
335/*
336 * System calls on descriptors.
337 */
338#ifndef _SYS_SYSPROTO_H_
339struct getdtablesize_args {
340	int	dummy;
341};
342#endif
343/* ARGSUSED */
344int
345sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
346{
347	struct proc *p = td->td_proc;
348	uint64_t lim;
349
350	PROC_LOCK(p);
351	td->td_retval[0] =
352	    min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
353	lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
354	PROC_UNLOCK(p);
355	if (lim < td->td_retval[0])
356		td->td_retval[0] = lim;
357	return (0);
358}
359
360/*
361 * Duplicate a file descriptor to a particular value.
362 *
363 * Note: keep in mind that a potential race condition exists when closing
364 * descriptors from a shared descriptor table (via rfork).
365 */
366#ifndef _SYS_SYSPROTO_H_
367struct dup2_args {
368	u_int	from;
369	u_int	to;
370};
371#endif
372/* ARGSUSED */
373int
374sys_dup2(struct thread *td, struct dup2_args *uap)
375{
376
377	return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to,
378		    td->td_retval));
379}
380
381/*
382 * Duplicate a file descriptor.
383 */
384#ifndef _SYS_SYSPROTO_H_
385struct dup_args {
386	u_int	fd;
387};
388#endif
389/* ARGSUSED */
390int
391sys_dup(struct thread *td, struct dup_args *uap)
392{
393
394	return (do_dup(td, 0, (int)uap->fd, 0, td->td_retval));
395}
396
397/*
398 * The file control system call.
399 */
400#ifndef _SYS_SYSPROTO_H_
401struct fcntl_args {
402	int	fd;
403	int	cmd;
404	long	arg;
405};
406#endif
407/* ARGSUSED */
408int
409sys_fcntl(struct thread *td, struct fcntl_args *uap)
410{
411
412	return (kern_fcntl_freebsd(td, uap->fd, uap->cmd, uap->arg));
413}
414
415int
416kern_fcntl_freebsd(struct thread *td, int fd, int cmd, long arg)
417{
418	struct flock fl;
419	struct __oflock ofl;
420	intptr_t arg1;
421	int error, newcmd;
422
423	error = 0;
424	newcmd = cmd;
425	switch (cmd) {
426	case F_OGETLK:
427	case F_OSETLK:
428	case F_OSETLKW:
429		/*
430		 * Convert old flock structure to new.
431		 */
432		error = copyin((void *)(intptr_t)arg, &ofl, sizeof(ofl));
433		fl.l_start = ofl.l_start;
434		fl.l_len = ofl.l_len;
435		fl.l_pid = ofl.l_pid;
436		fl.l_type = ofl.l_type;
437		fl.l_whence = ofl.l_whence;
438		fl.l_sysid = 0;
439
440		switch (cmd) {
441		case F_OGETLK:
442			newcmd = F_GETLK;
443			break;
444		case F_OSETLK:
445			newcmd = F_SETLK;
446			break;
447		case F_OSETLKW:
448			newcmd = F_SETLKW;
449			break;
450		}
451		arg1 = (intptr_t)&fl;
452		break;
453	case F_GETLK:
454	case F_SETLK:
455	case F_SETLKW:
456	case F_SETLK_REMOTE:
457		error = copyin((void *)(intptr_t)arg, &fl, sizeof(fl));
458		arg1 = (intptr_t)&fl;
459		break;
460	default:
461		arg1 = arg;
462		break;
463	}
464	if (error)
465		return (error);
466	error = kern_fcntl(td, fd, newcmd, arg1);
467	if (error)
468		return (error);
469	if (cmd == F_OGETLK) {
470		ofl.l_start = fl.l_start;
471		ofl.l_len = fl.l_len;
472		ofl.l_pid = fl.l_pid;
473		ofl.l_type = fl.l_type;
474		ofl.l_whence = fl.l_whence;
475		error = copyout(&ofl, (void *)(intptr_t)arg, sizeof(ofl));
476	} else if (cmd == F_GETLK) {
477		error = copyout(&fl, (void *)(intptr_t)arg, sizeof(fl));
478	}
479	return (error);
480}
481
482int
483kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
484{
485	struct filedesc *fdp;
486	struct flock *flp;
487	struct file *fp, *fp2;
488	struct filedescent *fde;
489	struct proc *p;
490	struct vnode *vp;
491	cap_rights_t rights;
492	int error, flg, tmp;
493	uint64_t bsize;
494	off_t foffset;
495
496	error = 0;
497	flg = F_POSIX;
498	p = td->td_proc;
499	fdp = p->p_fd;
500
501	switch (cmd) {
502	case F_DUPFD:
503		tmp = arg;
504		error = do_dup(td, DUP_FCNTL, fd, tmp, td->td_retval);
505		break;
506
507	case F_DUPFD_CLOEXEC:
508		tmp = arg;
509		error = do_dup(td, DUP_FCNTL | DUP_CLOEXEC, fd, tmp,
510		    td->td_retval);
511		break;
512
513	case F_DUP2FD:
514		tmp = arg;
515		error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval);
516		break;
517
518	case F_DUP2FD_CLOEXEC:
519		tmp = arg;
520		error = do_dup(td, DUP_FIXED | DUP_CLOEXEC, fd, tmp,
521		    td->td_retval);
522		break;
523
524	case F_GETFD:
525		FILEDESC_SLOCK(fdp);
526		if ((fp = fget_locked(fdp, fd)) == NULL) {
527			FILEDESC_SUNLOCK(fdp);
528			error = EBADF;
529			break;
530		}
531		fde = &fdp->fd_ofiles[fd];
532		td->td_retval[0] =
533		    (fde->fde_flags & UF_EXCLOSE) ? FD_CLOEXEC : 0;
534		FILEDESC_SUNLOCK(fdp);
535		break;
536
537	case F_SETFD:
538		FILEDESC_XLOCK(fdp);
539		if ((fp = fget_locked(fdp, fd)) == NULL) {
540			FILEDESC_XUNLOCK(fdp);
541			error = EBADF;
542			break;
543		}
544		fde = &fdp->fd_ofiles[fd];
545		fde->fde_flags = (fde->fde_flags & ~UF_EXCLOSE) |
546		    (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
547		FILEDESC_XUNLOCK(fdp);
548		break;
549
550	case F_GETFL:
551		error = fget_unlocked(fdp, fd,
552		    cap_rights_init(&rights, CAP_FCNTL), F_GETFL, &fp, NULL);
553		if (error != 0)
554			break;
555		td->td_retval[0] = OFLAGS(fp->f_flag);
556		fdrop(fp, td);
557		break;
558
559	case F_SETFL:
560		error = fget_unlocked(fdp, fd,
561		    cap_rights_init(&rights, CAP_FCNTL), F_SETFL, &fp, NULL);
562		if (error != 0)
563			break;
564		do {
565			tmp = flg = fp->f_flag;
566			tmp &= ~FCNTLFLAGS;
567			tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
568		} while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
569		tmp = fp->f_flag & FNONBLOCK;
570		error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
571		if (error != 0) {
572			fdrop(fp, td);
573			break;
574		}
575		tmp = fp->f_flag & FASYNC;
576		error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
577		if (error == 0) {
578			fdrop(fp, td);
579			break;
580		}
581		atomic_clear_int(&fp->f_flag, FNONBLOCK);
582		tmp = 0;
583		(void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
584		fdrop(fp, td);
585		break;
586
587	case F_GETOWN:
588		error = fget_unlocked(fdp, fd,
589		    cap_rights_init(&rights, CAP_FCNTL), F_GETOWN, &fp, NULL);
590		if (error != 0)
591			break;
592		error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
593		if (error == 0)
594			td->td_retval[0] = tmp;
595		fdrop(fp, td);
596		break;
597
598	case F_SETOWN:
599		error = fget_unlocked(fdp, fd,
600		    cap_rights_init(&rights, CAP_FCNTL), F_SETOWN, &fp, NULL);
601		if (error != 0)
602			break;
603		tmp = arg;
604		error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
605		fdrop(fp, td);
606		break;
607
608	case F_SETLK_REMOTE:
609		error = priv_check(td, PRIV_NFS_LOCKD);
610		if (error)
611			return (error);
612		flg = F_REMOTE;
613		goto do_setlk;
614
615	case F_SETLKW:
616		flg |= F_WAIT;
617		/* FALLTHROUGH F_SETLK */
618
619	case F_SETLK:
620	do_setlk:
621		cap_rights_init(&rights, CAP_FLOCK);
622		error = fget_unlocked(fdp, fd, &rights, 0, &fp, NULL);
623		if (error != 0)
624			break;
625		if (fp->f_type != DTYPE_VNODE) {
626			error = EBADF;
627			fdrop(fp, td);
628			break;
629		}
630
631		flp = (struct flock *)arg;
632		if (flp->l_whence == SEEK_CUR) {
633			foffset = foffset_get(fp);
634			if (foffset < 0 ||
635			    (flp->l_start > 0 &&
636			     foffset > OFF_MAX - flp->l_start)) {
637				FILEDESC_SUNLOCK(fdp);
638				error = EOVERFLOW;
639				fdrop(fp, td);
640				break;
641			}
642			flp->l_start += foffset;
643		}
644
645		vp = fp->f_vnode;
646		switch (flp->l_type) {
647		case F_RDLCK:
648			if ((fp->f_flag & FREAD) == 0) {
649				error = EBADF;
650				break;
651			}
652			PROC_LOCK(p->p_leader);
653			p->p_leader->p_flag |= P_ADVLOCK;
654			PROC_UNLOCK(p->p_leader);
655			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
656			    flp, flg);
657			break;
658		case F_WRLCK:
659			if ((fp->f_flag & FWRITE) == 0) {
660				error = EBADF;
661				break;
662			}
663			PROC_LOCK(p->p_leader);
664			p->p_leader->p_flag |= P_ADVLOCK;
665			PROC_UNLOCK(p->p_leader);
666			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
667			    flp, flg);
668			break;
669		case F_UNLCK:
670			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
671			    flp, flg);
672			break;
673		case F_UNLCKSYS:
674			/*
675			 * Temporary api for testing remote lock
676			 * infrastructure.
677			 */
678			if (flg != F_REMOTE) {
679				error = EINVAL;
680				break;
681			}
682			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
683			    F_UNLCKSYS, flp, flg);
684			break;
685		default:
686			error = EINVAL;
687			break;
688		}
689		if (error != 0 || flp->l_type == F_UNLCK ||
690		    flp->l_type == F_UNLCKSYS) {
691			fdrop(fp, td);
692			break;
693		}
694
695		/*
696		 * Check for a race with close.
697		 *
698		 * The vnode is now advisory locked (or unlocked, but this case
699		 * is not really important) as the caller requested.
700		 * We had to drop the filedesc lock, so we need to recheck if
701		 * the descriptor is still valid, because if it was closed
702		 * in the meantime we need to remove advisory lock from the
703		 * vnode - close on any descriptor leading to an advisory
704		 * locked vnode, removes that lock.
705		 * We will return 0 on purpose in that case, as the result of
706		 * successful advisory lock might have been externally visible
707		 * already. This is fine - effectively we pretend to the caller
708		 * that the closing thread was a bit slower and that the
709		 * advisory lock succeeded before the close.
710		 */
711		error = fget_unlocked(fdp, fd, &rights, 0, &fp2, NULL);
712		if (error != 0) {
713			fdrop(fp, td);
714			break;
715		}
716		if (fp != fp2) {
717			flp->l_whence = SEEK_SET;
718			flp->l_start = 0;
719			flp->l_len = 0;
720			flp->l_type = F_UNLCK;
721			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
722			    F_UNLCK, flp, F_POSIX);
723		}
724		fdrop(fp, td);
725		fdrop(fp2, td);
726		break;
727
728	case F_GETLK:
729		error = fget_unlocked(fdp, fd,
730		    cap_rights_init(&rights, CAP_FLOCK), 0, &fp, NULL);
731		if (error != 0)
732			break;
733		if (fp->f_type != DTYPE_VNODE) {
734			error = EBADF;
735			fdrop(fp, td);
736			break;
737		}
738		flp = (struct flock *)arg;
739		if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
740		    flp->l_type != F_UNLCK) {
741			error = EINVAL;
742			fdrop(fp, td);
743			break;
744		}
745		if (flp->l_whence == SEEK_CUR) {
746			foffset = foffset_get(fp);
747			if ((flp->l_start > 0 &&
748			    foffset > OFF_MAX - flp->l_start) ||
749			    (flp->l_start < 0 &&
750			    foffset < OFF_MIN - flp->l_start)) {
751				FILEDESC_SUNLOCK(fdp);
752				error = EOVERFLOW;
753				fdrop(fp, td);
754				break;
755			}
756			flp->l_start += foffset;
757		}
758		vp = fp->f_vnode;
759		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
760		    F_POSIX);
761		fdrop(fp, td);
762		break;
763
764	case F_RDAHEAD:
765		arg = arg ? 128 * 1024: 0;
766		/* FALLTHROUGH */
767	case F_READAHEAD:
768		error = fget_unlocked(fdp, fd, NULL, 0, &fp, NULL);
769		if (error != 0)
770			break;
771		if (fp->f_type != DTYPE_VNODE) {
772			fdrop(fp, td);
773			error = EBADF;
774			break;
775		}
776		vp = fp->f_vnode;
777		/*
778		 * Exclusive lock synchronizes against f_seqcount reads and
779		 * writes in sequential_heuristic().
780		 */
781		error = vn_lock(vp, LK_EXCLUSIVE);
782		if (error != 0) {
783			fdrop(fp, td);
784			break;
785		}
786		if (arg >= 0) {
787			bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
788			fp->f_seqcount = (arg + bsize - 1) / bsize;
789			atomic_set_int(&fp->f_flag, FRDAHEAD);
790		} else {
791			atomic_clear_int(&fp->f_flag, FRDAHEAD);
792		}
793		VOP_UNLOCK(vp, 0);
794		fdrop(fp, td);
795		break;
796
797	default:
798		error = EINVAL;
799		break;
800	}
801	return (error);
802}
803
804static int
805getmaxfd(struct proc *p)
806{
807	int maxfd;
808
809	PROC_LOCK(p);
810	maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
811	PROC_UNLOCK(p);
812
813	return (maxfd);
814}
815
816/*
817 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
818 */
819int
820do_dup(struct thread *td, int flags, int old, int new,
821    register_t *retval)
822{
823	struct filedesc *fdp;
824	struct filedescent *oldfde, *newfde;
825	struct proc *p;
826	struct file *fp;
827	struct file *delfp;
828	int error, maxfd;
829
830	p = td->td_proc;
831	fdp = p->p_fd;
832
833	/*
834	 * Verify we have a valid descriptor to dup from and possibly to
835	 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
836	 * return EINVAL when the new descriptor is out of bounds.
837	 */
838	if (old < 0)
839		return (EBADF);
840	if (new < 0)
841		return (flags & DUP_FCNTL ? EINVAL : EBADF);
842	maxfd = getmaxfd(p);
843	if (new >= maxfd)
844		return (flags & DUP_FCNTL ? EINVAL : EBADF);
845
846	FILEDESC_XLOCK(fdp);
847	if (fget_locked(fdp, old) == NULL) {
848		FILEDESC_XUNLOCK(fdp);
849		return (EBADF);
850	}
851	oldfde = &fdp->fd_ofiles[old];
852	if (flags & DUP_FIXED && old == new) {
853		*retval = new;
854		if (flags & DUP_CLOEXEC)
855			fdp->fd_ofiles[new].fde_flags |= UF_EXCLOSE;
856		FILEDESC_XUNLOCK(fdp);
857		return (0);
858	}
859	fp = oldfde->fde_file;
860	fhold(fp);
861
862	/*
863	 * If the caller specified a file descriptor, make sure the file
864	 * table is large enough to hold it, and grab it.  Otherwise, just
865	 * allocate a new descriptor the usual way.
866	 */
867	if (flags & DUP_FIXED) {
868		if (new >= fdp->fd_nfiles) {
869			/*
870			 * The resource limits are here instead of e.g.
871			 * fdalloc(), because the file descriptor table may be
872			 * shared between processes, so we can't really use
873			 * racct_add()/racct_sub().  Instead of counting the
874			 * number of actually allocated descriptors, just put
875			 * the limit on the size of the file descriptor table.
876			 */
877#ifdef RACCT
878			if (racct_enable) {
879				PROC_LOCK(p);
880				error = racct_set(p, RACCT_NOFILE, new + 1);
881				PROC_UNLOCK(p);
882				if (error != 0) {
883					FILEDESC_XUNLOCK(fdp);
884					fdrop(fp, td);
885					return (EMFILE);
886				}
887			}
888#endif
889			fdgrowtable_exp(fdp, new + 1);
890			oldfde = &fdp->fd_ofiles[old];
891		}
892		newfde = &fdp->fd_ofiles[new];
893		if (newfde->fde_file == NULL)
894			fdused(fdp, new);
895	} else {
896		if ((error = fdalloc(td, new, &new)) != 0) {
897			FILEDESC_XUNLOCK(fdp);
898			fdrop(fp, td);
899			return (error);
900		}
901		newfde = &fdp->fd_ofiles[new];
902	}
903
904	KASSERT(fp == oldfde->fde_file, ("old fd has been modified"));
905	KASSERT(old != new, ("new fd is same as old"));
906
907	delfp = newfde->fde_file;
908
909	/*
910	 * Duplicate the source descriptor.
911	 */
912#ifdef CAPABILITIES
913	seq_write_begin(&newfde->fde_seq);
914#endif
915	filecaps_free(&newfde->fde_caps);
916	memcpy(newfde, oldfde, fde_change_size);
917	filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps);
918	if ((flags & DUP_CLOEXEC) != 0)
919		newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE;
920	else
921		newfde->fde_flags = oldfde->fde_flags & ~UF_EXCLOSE;
922#ifdef CAPABILITIES
923	seq_write_end(&newfde->fde_seq);
924#endif
925	*retval = new;
926
927	if (delfp != NULL) {
928		(void) closefp(fdp, new, delfp, td, 1);
929		/* closefp() drops the FILEDESC lock for us. */
930	} else {
931		FILEDESC_XUNLOCK(fdp);
932	}
933
934	return (0);
935}
936
937/*
938 * If sigio is on the list associated with a process or process group,
939 * disable signalling from the device, remove sigio from the list and
940 * free sigio.
941 */
942void
943funsetown(struct sigio **sigiop)
944{
945	struct sigio *sigio;
946
947	SIGIO_LOCK();
948	sigio = *sigiop;
949	if (sigio == NULL) {
950		SIGIO_UNLOCK();
951		return;
952	}
953	*(sigio->sio_myref) = NULL;
954	if ((sigio)->sio_pgid < 0) {
955		struct pgrp *pg = (sigio)->sio_pgrp;
956		PGRP_LOCK(pg);
957		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
958			    sigio, sio_pgsigio);
959		PGRP_UNLOCK(pg);
960	} else {
961		struct proc *p = (sigio)->sio_proc;
962		PROC_LOCK(p);
963		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
964			    sigio, sio_pgsigio);
965		PROC_UNLOCK(p);
966	}
967	SIGIO_UNLOCK();
968	crfree(sigio->sio_ucred);
969	free(sigio, M_SIGIO);
970}
971
972/*
973 * Free a list of sigio structures.
974 * We only need to lock the SIGIO_LOCK because we have made ourselves
975 * inaccessible to callers of fsetown and therefore do not need to lock
976 * the proc or pgrp struct for the list manipulation.
977 */
978void
979funsetownlst(struct sigiolst *sigiolst)
980{
981	struct proc *p;
982	struct pgrp *pg;
983	struct sigio *sigio;
984
985	sigio = SLIST_FIRST(sigiolst);
986	if (sigio == NULL)
987		return;
988	p = NULL;
989	pg = NULL;
990
991	/*
992	 * Every entry of the list should belong
993	 * to a single proc or pgrp.
994	 */
995	if (sigio->sio_pgid < 0) {
996		pg = sigio->sio_pgrp;
997		PGRP_LOCK_ASSERT(pg, MA_NOTOWNED);
998	} else /* if (sigio->sio_pgid > 0) */ {
999		p = sigio->sio_proc;
1000		PROC_LOCK_ASSERT(p, MA_NOTOWNED);
1001	}
1002
1003	SIGIO_LOCK();
1004	while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
1005		*(sigio->sio_myref) = NULL;
1006		if (pg != NULL) {
1007			KASSERT(sigio->sio_pgid < 0,
1008			    ("Proc sigio in pgrp sigio list"));
1009			KASSERT(sigio->sio_pgrp == pg,
1010			    ("Bogus pgrp in sigio list"));
1011			PGRP_LOCK(pg);
1012			SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,
1013			    sio_pgsigio);
1014			PGRP_UNLOCK(pg);
1015		} else /* if (p != NULL) */ {
1016			KASSERT(sigio->sio_pgid > 0,
1017			    ("Pgrp sigio in proc sigio list"));
1018			KASSERT(sigio->sio_proc == p,
1019			    ("Bogus proc in sigio list"));
1020			PROC_LOCK(p);
1021			SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,
1022			    sio_pgsigio);
1023			PROC_UNLOCK(p);
1024		}
1025		SIGIO_UNLOCK();
1026		crfree(sigio->sio_ucred);
1027		free(sigio, M_SIGIO);
1028		SIGIO_LOCK();
1029	}
1030	SIGIO_UNLOCK();
1031}
1032
1033/*
1034 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1035 *
1036 * After permission checking, add a sigio structure to the sigio list for
1037 * the process or process group.
1038 */
1039int
1040fsetown(pid_t pgid, struct sigio **sigiop)
1041{
1042	struct proc *proc;
1043	struct pgrp *pgrp;
1044	struct sigio *sigio;
1045	int ret;
1046
1047	if (pgid == 0) {
1048		funsetown(sigiop);
1049		return (0);
1050	}
1051
1052	ret = 0;
1053
1054	/* Allocate and fill in the new sigio out of locks. */
1055	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
1056	sigio->sio_pgid = pgid;
1057	sigio->sio_ucred = crhold(curthread->td_ucred);
1058	sigio->sio_myref = sigiop;
1059
1060	sx_slock(&proctree_lock);
1061	if (pgid > 0) {
1062		proc = pfind(pgid);
1063		if (proc == NULL) {
1064			ret = ESRCH;
1065			goto fail;
1066		}
1067
1068		/*
1069		 * Policy - Don't allow a process to FSETOWN a process
1070		 * in another session.
1071		 *
1072		 * Remove this test to allow maximum flexibility or
1073		 * restrict FSETOWN to the current process or process
1074		 * group for maximum safety.
1075		 */
1076		PROC_UNLOCK(proc);
1077		if (proc->p_session != curthread->td_proc->p_session) {
1078			ret = EPERM;
1079			goto fail;
1080		}
1081
1082		pgrp = NULL;
1083	} else /* if (pgid < 0) */ {
1084		pgrp = pgfind(-pgid);
1085		if (pgrp == NULL) {
1086			ret = ESRCH;
1087			goto fail;
1088		}
1089		PGRP_UNLOCK(pgrp);
1090
1091		/*
1092		 * Policy - Don't allow a process to FSETOWN a process
1093		 * in another session.
1094		 *
1095		 * Remove this test to allow maximum flexibility or
1096		 * restrict FSETOWN to the current process or process
1097		 * group for maximum safety.
1098		 */
1099		if (pgrp->pg_session != curthread->td_proc->p_session) {
1100			ret = EPERM;
1101			goto fail;
1102		}
1103
1104		proc = NULL;
1105	}
1106	funsetown(sigiop);
1107	if (pgid > 0) {
1108		PROC_LOCK(proc);
1109		/*
1110		 * Since funsetownlst() is called without the proctree
1111		 * locked, we need to check for P_WEXIT.
1112		 * XXX: is ESRCH correct?
1113		 */
1114		if ((proc->p_flag & P_WEXIT) != 0) {
1115			PROC_UNLOCK(proc);
1116			ret = ESRCH;
1117			goto fail;
1118		}
1119		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
1120		sigio->sio_proc = proc;
1121		PROC_UNLOCK(proc);
1122	} else {
1123		PGRP_LOCK(pgrp);
1124		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
1125		sigio->sio_pgrp = pgrp;
1126		PGRP_UNLOCK(pgrp);
1127	}
1128	sx_sunlock(&proctree_lock);
1129	SIGIO_LOCK();
1130	*sigiop = sigio;
1131	SIGIO_UNLOCK();
1132	return (0);
1133
1134fail:
1135	sx_sunlock(&proctree_lock);
1136	crfree(sigio->sio_ucred);
1137	free(sigio, M_SIGIO);
1138	return (ret);
1139}
1140
1141/*
1142 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1143 */
1144pid_t
1145fgetown(sigiop)
1146	struct sigio **sigiop;
1147{
1148	pid_t pgid;
1149
1150	SIGIO_LOCK();
1151	pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
1152	SIGIO_UNLOCK();
1153	return (pgid);
1154}
1155
1156/*
1157 * Function drops the filedesc lock on return.
1158 */
1159static int
1160closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1161    int holdleaders)
1162{
1163	int error;
1164
1165	FILEDESC_XLOCK_ASSERT(fdp);
1166
1167	if (holdleaders) {
1168		if (td->td_proc->p_fdtol != NULL) {
1169			/*
1170			 * Ask fdfree() to sleep to ensure that all relevant
1171			 * process leaders can be traversed in closef().
1172			 */
1173			fdp->fd_holdleaderscount++;
1174		} else {
1175			holdleaders = 0;
1176		}
1177	}
1178
1179	/*
1180	 * We now hold the fp reference that used to be owned by the
1181	 * descriptor array.  We have to unlock the FILEDESC *AFTER*
1182	 * knote_fdclose to prevent a race of the fd getting opened, a knote
1183	 * added, and deleteing a knote for the new fd.
1184	 */
1185	knote_fdclose(td, fd);
1186
1187	/*
1188	 * We need to notify mqueue if the object is of type mqueue.
1189	 */
1190	if (fp->f_type == DTYPE_MQUEUE)
1191		mq_fdclose(td, fd, fp);
1192	FILEDESC_XUNLOCK(fdp);
1193
1194	error = closef(fp, td);
1195	if (holdleaders) {
1196		FILEDESC_XLOCK(fdp);
1197		fdp->fd_holdleaderscount--;
1198		if (fdp->fd_holdleaderscount == 0 &&
1199		    fdp->fd_holdleaderswakeup != 0) {
1200			fdp->fd_holdleaderswakeup = 0;
1201			wakeup(&fdp->fd_holdleaderscount);
1202		}
1203		FILEDESC_XUNLOCK(fdp);
1204	}
1205	return (error);
1206}
1207
1208/*
1209 * Close a file descriptor.
1210 */
1211#ifndef _SYS_SYSPROTO_H_
1212struct close_args {
1213	int     fd;
1214};
1215#endif
1216/* ARGSUSED */
1217int
1218sys_close(struct thread *td, struct close_args *uap)
1219{
1220
1221	return (kern_close(td, uap->fd));
1222}
1223
1224int
1225kern_close(struct thread *td, int fd)
1226{
1227	struct filedesc *fdp;
1228	struct file *fp;
1229
1230	fdp = td->td_proc->p_fd;
1231
1232	AUDIT_SYSCLOSE(td, fd);
1233
1234	FILEDESC_XLOCK(fdp);
1235	if ((fp = fget_locked(fdp, fd)) == NULL) {
1236		FILEDESC_XUNLOCK(fdp);
1237		return (EBADF);
1238	}
1239	fdfree(fdp, fd);
1240
1241	/* closefp() drops the FILEDESC lock for us. */
1242	return (closefp(fdp, fd, fp, td, 1));
1243}
1244
1245/*
1246 * Close open file descriptors.
1247 */
1248#ifndef _SYS_SYSPROTO_H_
1249struct closefrom_args {
1250	int	lowfd;
1251};
1252#endif
1253/* ARGSUSED */
1254int
1255sys_closefrom(struct thread *td, struct closefrom_args *uap)
1256{
1257	struct filedesc *fdp;
1258	int fd;
1259
1260	fdp = td->td_proc->p_fd;
1261	AUDIT_ARG_FD(uap->lowfd);
1262
1263	/*
1264	 * Treat negative starting file descriptor values identical to
1265	 * closefrom(0) which closes all files.
1266	 */
1267	if (uap->lowfd < 0)
1268		uap->lowfd = 0;
1269	FILEDESC_SLOCK(fdp);
1270	for (fd = uap->lowfd; fd <= fdp->fd_lastfile; fd++) {
1271		if (fdp->fd_ofiles[fd].fde_file != NULL) {
1272			FILEDESC_SUNLOCK(fdp);
1273			(void)kern_close(td, fd);
1274			FILEDESC_SLOCK(fdp);
1275		}
1276	}
1277	FILEDESC_SUNLOCK(fdp);
1278	return (0);
1279}
1280
1281#if defined(COMPAT_43)
1282/*
1283 * Return status information about a file descriptor.
1284 */
1285#ifndef _SYS_SYSPROTO_H_
1286struct ofstat_args {
1287	int	fd;
1288	struct	ostat *sb;
1289};
1290#endif
1291/* ARGSUSED */
1292int
1293ofstat(struct thread *td, struct ofstat_args *uap)
1294{
1295	struct ostat oub;
1296	struct stat ub;
1297	int error;
1298
1299	error = kern_fstat(td, uap->fd, &ub);
1300	if (error == 0) {
1301		cvtstat(&ub, &oub);
1302		error = copyout(&oub, uap->sb, sizeof(oub));
1303	}
1304	return (error);
1305}
1306#endif /* COMPAT_43 */
1307
1308/*
1309 * Return status information about a file descriptor.
1310 */
1311#ifndef _SYS_SYSPROTO_H_
1312struct fstat_args {
1313	int	fd;
1314	struct	stat *sb;
1315};
1316#endif
1317/* ARGSUSED */
1318int
1319sys_fstat(struct thread *td, struct fstat_args *uap)
1320{
1321	struct stat ub;
1322	int error;
1323
1324	error = kern_fstat(td, uap->fd, &ub);
1325	if (error == 0)
1326		error = copyout(&ub, uap->sb, sizeof(ub));
1327	return (error);
1328}
1329
1330int
1331kern_fstat(struct thread *td, int fd, struct stat *sbp)
1332{
1333	struct file *fp;
1334	cap_rights_t rights;
1335	int error;
1336
1337	AUDIT_ARG_FD(fd);
1338
1339	error = fget(td, fd, cap_rights_init(&rights, CAP_FSTAT), &fp);
1340	if (error != 0)
1341		return (error);
1342
1343	AUDIT_ARG_FILE(td->td_proc, fp);
1344
1345	error = fo_stat(fp, sbp, td->td_ucred, td);
1346	fdrop(fp, td);
1347#ifdef KTRACE
1348	if (error == 0 && KTRPOINT(td, KTR_STRUCT))
1349		ktrstat(sbp);
1350#endif
1351	return (error);
1352}
1353
1354/*
1355 * Return status information about a file descriptor.
1356 */
1357#ifndef _SYS_SYSPROTO_H_
1358struct nfstat_args {
1359	int	fd;
1360	struct	nstat *sb;
1361};
1362#endif
1363/* ARGSUSED */
1364int
1365sys_nfstat(struct thread *td, struct nfstat_args *uap)
1366{
1367	struct nstat nub;
1368	struct stat ub;
1369	int error;
1370
1371	error = kern_fstat(td, uap->fd, &ub);
1372	if (error == 0) {
1373		cvtnstat(&ub, &nub);
1374		error = copyout(&nub, uap->sb, sizeof(nub));
1375	}
1376	return (error);
1377}
1378
1379/*
1380 * Return pathconf information about a file descriptor.
1381 */
1382#ifndef _SYS_SYSPROTO_H_
1383struct fpathconf_args {
1384	int	fd;
1385	int	name;
1386};
1387#endif
1388/* ARGSUSED */
1389int
1390sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
1391{
1392	struct file *fp;
1393	struct vnode *vp;
1394	cap_rights_t rights;
1395	int error;
1396
1397	error = fget(td, uap->fd, cap_rights_init(&rights, CAP_FPATHCONF), &fp);
1398	if (error != 0)
1399		return (error);
1400
1401	/* If asynchronous I/O is available, it works for all descriptors. */
1402	if (uap->name == _PC_ASYNC_IO) {
1403		td->td_retval[0] = async_io_version;
1404		goto out;
1405	}
1406	vp = fp->f_vnode;
1407	if (vp != NULL) {
1408		vn_lock(vp, LK_SHARED | LK_RETRY);
1409		error = VOP_PATHCONF(vp, uap->name, td->td_retval);
1410		VOP_UNLOCK(vp, 0);
1411	} else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1412		if (uap->name != _PC_PIPE_BUF) {
1413			error = EINVAL;
1414		} else {
1415			td->td_retval[0] = PIPE_BUF;
1416			error = 0;
1417		}
1418	} else {
1419		error = EOPNOTSUPP;
1420	}
1421out:
1422	fdrop(fp, td);
1423	return (error);
1424}
1425
1426/*
1427 * Initialize filecaps structure.
1428 */
1429void
1430filecaps_init(struct filecaps *fcaps)
1431{
1432
1433	bzero(fcaps, sizeof(*fcaps));
1434	fcaps->fc_nioctls = -1;
1435}
1436
1437/*
1438 * Copy filecaps structure allocating memory for ioctls array if needed.
1439 */
1440void
1441filecaps_copy(const struct filecaps *src, struct filecaps *dst)
1442{
1443	size_t size;
1444
1445	*dst = *src;
1446	if (src->fc_ioctls != NULL) {
1447		KASSERT(src->fc_nioctls > 0,
1448		    ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
1449
1450		size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1451		dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
1452		bcopy(src->fc_ioctls, dst->fc_ioctls, size);
1453	}
1454}
1455
1456/*
1457 * Move filecaps structure to the new place and clear the old place.
1458 */
1459void
1460filecaps_move(struct filecaps *src, struct filecaps *dst)
1461{
1462
1463	*dst = *src;
1464	bzero(src, sizeof(*src));
1465}
1466
1467/*
1468 * Fill the given filecaps structure with full rights.
1469 */
1470static void
1471filecaps_fill(struct filecaps *fcaps)
1472{
1473
1474	CAP_ALL(&fcaps->fc_rights);
1475	fcaps->fc_ioctls = NULL;
1476	fcaps->fc_nioctls = -1;
1477	fcaps->fc_fcntls = CAP_FCNTL_ALL;
1478}
1479
1480/*
1481 * Free memory allocated within filecaps structure.
1482 */
1483void
1484filecaps_free(struct filecaps *fcaps)
1485{
1486
1487	free(fcaps->fc_ioctls, M_FILECAPS);
1488	bzero(fcaps, sizeof(*fcaps));
1489}
1490
1491/*
1492 * Validate the given filecaps structure.
1493 */
1494static void
1495filecaps_validate(const struct filecaps *fcaps, const char *func)
1496{
1497
1498	KASSERT(cap_rights_is_valid(&fcaps->fc_rights),
1499	    ("%s: invalid rights", func));
1500	KASSERT((fcaps->fc_fcntls & ~CAP_FCNTL_ALL) == 0,
1501	    ("%s: invalid fcntls", func));
1502	KASSERT(fcaps->fc_fcntls == 0 ||
1503	    cap_rights_is_set(&fcaps->fc_rights, CAP_FCNTL),
1504	    ("%s: fcntls without CAP_FCNTL", func));
1505	KASSERT(fcaps->fc_ioctls != NULL ? fcaps->fc_nioctls > 0 :
1506	    (fcaps->fc_nioctls == -1 || fcaps->fc_nioctls == 0),
1507	    ("%s: invalid ioctls", func));
1508	KASSERT(fcaps->fc_nioctls == 0 ||
1509	    cap_rights_is_set(&fcaps->fc_rights, CAP_IOCTL),
1510	    ("%s: ioctls without CAP_IOCTL", func));
1511}
1512
1513static void
1514fdgrowtable_exp(struct filedesc *fdp, int nfd)
1515{
1516	int nfd1;
1517
1518	FILEDESC_XLOCK_ASSERT(fdp);
1519
1520	nfd1 = fdp->fd_nfiles * 2;
1521	if (nfd1 < nfd)
1522		nfd1 = nfd;
1523	fdgrowtable(fdp, nfd1);
1524}
1525
1526/*
1527 * Grow the file table to accomodate (at least) nfd descriptors.
1528 */
1529static void
1530fdgrowtable(struct filedesc *fdp, int nfd)
1531{
1532	struct filedesc0 *fdp0;
1533	struct freetable *ft;
1534	struct filedescent *ntable;
1535	struct filedescent *otable;
1536	int nnfiles, onfiles;
1537	NDSLOTTYPE *nmap, *omap;
1538
1539	FILEDESC_XLOCK_ASSERT(fdp);
1540
1541	KASSERT(fdp->fd_nfiles > 0, ("zero-length file table"));
1542
1543	/* save old values */
1544	onfiles = fdp->fd_nfiles;
1545	otable = fdp->fd_ofiles;
1546	omap = fdp->fd_map;
1547
1548	/* compute the size of the new table */
1549	nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
1550	if (nnfiles <= onfiles)
1551		/* the table is already large enough */
1552		return;
1553
1554	/*
1555	 * Allocate a new table.  We need enough space for the
1556	 * file entries themselves and the struct freetable we will use
1557	 * when we decommission the table and place it on the freelist.
1558	 * We place the struct freetable in the middle so we don't have
1559	 * to worry about padding.
1560	 */
1561	ntable = malloc(nnfiles * sizeof(ntable[0]) + sizeof(struct freetable),
1562	    M_FILEDESC, M_ZERO | M_WAITOK);
1563	/* copy the old data over and point at the new tables */
1564	memcpy(ntable, otable, onfiles * sizeof(*otable));
1565	fdp->fd_ofiles = ntable;
1566
1567	/*
1568	 * Allocate a new map only if the old is not large enough.  It will
1569	 * grow at a slower rate than the table as it can map more
1570	 * entries than the table can hold.
1571	 */
1572	if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
1573		nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, M_FILEDESC,
1574		    M_ZERO | M_WAITOK);
1575		/* copy over the old data and update the pointer */
1576		memcpy(nmap, omap, NDSLOTS(onfiles) * sizeof(*omap));
1577		fdp->fd_map = nmap;
1578	}
1579
1580	/*
1581	 * In order to have a valid pattern for fget_unlocked()
1582	 * fdp->fd_nfiles must be the last member to be updated, otherwise
1583	 * fget_unlocked() consumers may reference a new, higher value for
1584	 * fdp->fd_nfiles before to access the fdp->fd_ofiles array,
1585	 * resulting in OOB accesses.
1586	 */
1587	atomic_store_rel_int(&fdp->fd_nfiles, nnfiles);
1588
1589	/*
1590	 * Do not free the old file table, as some threads may still
1591	 * reference entries within it.  Instead, place it on a freelist
1592	 * which will be processed when the struct filedesc is released.
1593	 *
1594	 * Note that if onfiles == NDFILE, we're dealing with the original
1595	 * static allocation contained within (struct filedesc0 *)fdp,
1596	 * which must not be freed.
1597	 */
1598	if (onfiles > NDFILE) {
1599		ft = (struct freetable *)&otable[onfiles];
1600		fdp0 = (struct filedesc0 *)fdp;
1601		ft->ft_table = otable;
1602		SLIST_INSERT_HEAD(&fdp0->fd_free, ft, ft_next);
1603	}
1604	/*
1605	 * The map does not have the same possibility of threads still
1606	 * holding references to it.  So always free it as long as it
1607	 * does not reference the original static allocation.
1608	 */
1609	if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
1610		free(omap, M_FILEDESC);
1611}
1612
1613/*
1614 * Allocate a file descriptor for the process.
1615 */
1616int
1617fdalloc(struct thread *td, int minfd, int *result)
1618{
1619	struct proc *p = td->td_proc;
1620	struct filedesc *fdp = p->p_fd;
1621	int fd = -1, maxfd, allocfd;
1622#ifdef RACCT
1623	int error;
1624#endif
1625
1626	FILEDESC_XLOCK_ASSERT(fdp);
1627
1628	if (fdp->fd_freefile > minfd)
1629		minfd = fdp->fd_freefile;
1630
1631	maxfd = getmaxfd(p);
1632
1633	/*
1634	 * Search the bitmap for a free descriptor starting at minfd.
1635	 * If none is found, grow the file table.
1636	 */
1637	fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
1638	if (fd >= maxfd)
1639		return (EMFILE);
1640	if (fd >= fdp->fd_nfiles) {
1641		allocfd = min(fd * 2, maxfd);
1642#ifdef RACCT
1643		if (racct_enable) {
1644			PROC_LOCK(p);
1645			error = racct_set(p, RACCT_NOFILE, allocfd);
1646			PROC_UNLOCK(p);
1647			if (error != 0)
1648				return (EMFILE);
1649		}
1650#endif
1651		/*
1652		 * fd is already equal to first free descriptor >= minfd, so
1653		 * we only need to grow the table and we are done.
1654		 */
1655		fdgrowtable_exp(fdp, allocfd);
1656	}
1657
1658	/*
1659	 * Perform some sanity checks, then mark the file descriptor as
1660	 * used and return it to the caller.
1661	 */
1662	KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),
1663	    ("invalid descriptor %d", fd));
1664	KASSERT(!fdisused(fdp, fd),
1665	    ("fd_first_free() returned non-free descriptor"));
1666	KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
1667	    ("file descriptor isn't free"));
1668	KASSERT(fdp->fd_ofiles[fd].fde_flags == 0, ("file flags are set"));
1669	fdused(fdp, fd);
1670	*result = fd;
1671	return (0);
1672}
1673
1674/*
1675 * Allocate n file descriptors for the process.
1676 */
1677int
1678fdallocn(struct thread *td, int minfd, int *fds, int n)
1679{
1680	struct proc *p = td->td_proc;
1681	struct filedesc *fdp = p->p_fd;
1682	int i;
1683
1684	FILEDESC_XLOCK_ASSERT(fdp);
1685
1686	if (!fdavail(td, n))
1687		return (EMFILE);
1688
1689	for (i = 0; i < n; i++)
1690		if (fdalloc(td, 0, &fds[i]) != 0)
1691			break;
1692
1693	if (i < n) {
1694		for (i--; i >= 0; i--)
1695			fdunused(fdp, fds[i]);
1696		return (EMFILE);
1697	}
1698
1699	return (0);
1700}
1701
1702/*
1703 * Check to see whether n user file descriptors are available to the process
1704 * p.
1705 */
1706int
1707fdavail(struct thread *td, int n)
1708{
1709	struct proc *p = td->td_proc;
1710	struct filedesc *fdp = td->td_proc->p_fd;
1711	int i, lim, last;
1712
1713	FILEDESC_LOCK_ASSERT(fdp);
1714
1715	/*
1716	 * XXX: This is only called from uipc_usrreq.c:unp_externalize();
1717	 *      call racct_add() from there instead of dealing with containers
1718	 *      here.
1719	 */
1720	lim = getmaxfd(p);
1721	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
1722		return (1);
1723	last = min(fdp->fd_nfiles, lim);
1724	for (i = fdp->fd_freefile; i < last; i++) {
1725		if (fdp->fd_ofiles[i].fde_file == NULL && --n <= 0)
1726			return (1);
1727	}
1728	return (0);
1729}
1730
1731/*
1732 * Create a new open file structure and allocate a file decriptor for the
1733 * process that refers to it.  We add one reference to the file for the
1734 * descriptor table and one reference for resultfp. This is to prevent us
1735 * being preempted and the entry in the descriptor table closed after we
1736 * release the FILEDESC lock.
1737 */
1738int
1739falloc(struct thread *td, struct file **resultfp, int *resultfd, int flags)
1740{
1741	struct file *fp;
1742	int error, fd;
1743
1744	error = falloc_noinstall(td, &fp);
1745	if (error)
1746		return (error);		/* no reference held on error */
1747
1748	error = finstall(td, fp, &fd, flags, NULL);
1749	if (error) {
1750		fdrop(fp, td);		/* one reference (fp only) */
1751		return (error);
1752	}
1753
1754	if (resultfp != NULL)
1755		*resultfp = fp;		/* copy out result */
1756	else
1757		fdrop(fp, td);		/* release local reference */
1758
1759	if (resultfd != NULL)
1760		*resultfd = fd;
1761
1762	return (0);
1763}
1764
1765/*
1766 * Create a new open file structure without allocating a file descriptor.
1767 */
1768int
1769falloc_noinstall(struct thread *td, struct file **resultfp)
1770{
1771	struct file *fp;
1772	int maxuserfiles = maxfiles - (maxfiles / 20);
1773	static struct timeval lastfail;
1774	static int curfail;
1775
1776	KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
1777
1778	if ((openfiles >= maxuserfiles &&
1779	    priv_check(td, PRIV_MAXFILES) != 0) ||
1780	    openfiles >= maxfiles) {
1781		if (ppsratecheck(&lastfail, &curfail, 1)) {
1782			printf("kern.maxfiles limit exceeded by uid %i, "
1783			    "please see tuning(7).\n", td->td_ucred->cr_ruid);
1784		}
1785		return (ENFILE);
1786	}
1787	atomic_add_int(&openfiles, 1);
1788	fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
1789	refcount_init(&fp->f_count, 1);
1790	fp->f_cred = crhold(td->td_ucred);
1791	fp->f_ops = &badfileops;
1792	fp->f_data = NULL;
1793	fp->f_vnode = NULL;
1794	*resultfp = fp;
1795	return (0);
1796}
1797
1798/*
1799 * Install a file in a file descriptor table.
1800 */
1801int
1802finstall(struct thread *td, struct file *fp, int *fd, int flags,
1803    struct filecaps *fcaps)
1804{
1805	struct filedesc *fdp = td->td_proc->p_fd;
1806	struct filedescent *fde;
1807	int error;
1808
1809	KASSERT(fd != NULL, ("%s: fd == NULL", __func__));
1810	KASSERT(fp != NULL, ("%s: fp == NULL", __func__));
1811	if (fcaps != NULL)
1812		filecaps_validate(fcaps, __func__);
1813
1814	FILEDESC_XLOCK(fdp);
1815	if ((error = fdalloc(td, 0, fd))) {
1816		FILEDESC_XUNLOCK(fdp);
1817		return (error);
1818	}
1819	fhold(fp);
1820	fde = &fdp->fd_ofiles[*fd];
1821#ifdef CAPABILITIES
1822	seq_write_begin(&fde->fde_seq);
1823#endif
1824	fde->fde_file = fp;
1825	if ((flags & O_CLOEXEC) != 0)
1826		fde->fde_flags |= UF_EXCLOSE;
1827	if (fcaps != NULL)
1828		filecaps_move(fcaps, &fde->fde_caps);
1829	else
1830		filecaps_fill(&fde->fde_caps);
1831#ifdef CAPABILITIES
1832	seq_write_end(&fde->fde_seq);
1833#endif
1834	FILEDESC_XUNLOCK(fdp);
1835	return (0);
1836}
1837
1838/*
1839 * Build a new filedesc structure from another.
1840 * Copy the current, root, and jail root vnode references.
1841 */
1842struct filedesc *
1843fdinit(struct filedesc *fdp)
1844{
1845	struct filedesc0 *newfdp;
1846
1847	newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO);
1848	FILEDESC_LOCK_INIT(&newfdp->fd_fd);
1849	if (fdp != NULL) {
1850		FILEDESC_SLOCK(fdp);
1851		newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
1852		if (newfdp->fd_fd.fd_cdir)
1853			VREF(newfdp->fd_fd.fd_cdir);
1854		newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1855		if (newfdp->fd_fd.fd_rdir)
1856			VREF(newfdp->fd_fd.fd_rdir);
1857		newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1858		if (newfdp->fd_fd.fd_jdir)
1859			VREF(newfdp->fd_fd.fd_jdir);
1860		FILEDESC_SUNLOCK(fdp);
1861	}
1862
1863	/* Create the file descriptor table. */
1864	newfdp->fd_fd.fd_refcnt = 1;
1865	newfdp->fd_fd.fd_holdcnt = 1;
1866	newfdp->fd_fd.fd_cmask = CMASK;
1867	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1868	newfdp->fd_fd.fd_nfiles = NDFILE;
1869	newfdp->fd_fd.fd_map = newfdp->fd_dmap;
1870	newfdp->fd_fd.fd_lastfile = -1;
1871	return (&newfdp->fd_fd);
1872}
1873
1874static struct filedesc *
1875fdhold(struct proc *p)
1876{
1877	struct filedesc *fdp;
1878
1879	mtx_lock(&fdesc_mtx);
1880	fdp = p->p_fd;
1881	if (fdp != NULL)
1882		fdp->fd_holdcnt++;
1883	mtx_unlock(&fdesc_mtx);
1884	return (fdp);
1885}
1886
1887static void
1888fddrop(struct filedesc *fdp)
1889{
1890	struct filedesc0 *fdp0;
1891	struct freetable *ft;
1892	int i;
1893
1894	mtx_lock(&fdesc_mtx);
1895	i = --fdp->fd_holdcnt;
1896	mtx_unlock(&fdesc_mtx);
1897	if (i > 0)
1898		return;
1899
1900	FILEDESC_LOCK_DESTROY(fdp);
1901	fdp0 = (struct filedesc0 *)fdp;
1902	while ((ft = SLIST_FIRST(&fdp0->fd_free)) != NULL) {
1903		SLIST_REMOVE_HEAD(&fdp0->fd_free, ft_next);
1904		free(ft->ft_table, M_FILEDESC);
1905	}
1906	free(fdp, M_FILEDESC);
1907}
1908
1909/*
1910 * Share a filedesc structure.
1911 */
1912struct filedesc *
1913fdshare(struct filedesc *fdp)
1914{
1915
1916	FILEDESC_XLOCK(fdp);
1917	fdp->fd_refcnt++;
1918	FILEDESC_XUNLOCK(fdp);
1919	return (fdp);
1920}
1921
1922/*
1923 * Unshare a filedesc structure, if necessary by making a copy
1924 */
1925void
1926fdunshare(struct thread *td)
1927{
1928	struct filedesc *tmp;
1929	struct proc *p = td->td_proc;
1930
1931	if (p->p_fd->fd_refcnt == 1)
1932		return;
1933
1934	tmp = fdcopy(p->p_fd);
1935	fdescfree(td);
1936	p->p_fd = tmp;
1937}
1938
1939/*
1940 * Copy a filedesc structure.  A NULL pointer in returns a NULL reference,
1941 * this is to ease callers, not catch errors.
1942 */
1943struct filedesc *
1944fdcopy(struct filedesc *fdp)
1945{
1946	struct filedesc *newfdp;
1947	struct filedescent *nfde, *ofde;
1948	int i;
1949
1950	/* Certain daemons might not have file descriptors. */
1951	if (fdp == NULL)
1952		return (NULL);
1953
1954	newfdp = fdinit(fdp);
1955	FILEDESC_SLOCK(fdp);
1956	while (fdp->fd_lastfile >= newfdp->fd_nfiles) {
1957		FILEDESC_SUNLOCK(fdp);
1958		FILEDESC_XLOCK(newfdp);
1959		fdgrowtable(newfdp, fdp->fd_lastfile + 1);
1960		FILEDESC_XUNLOCK(newfdp);
1961		FILEDESC_SLOCK(fdp);
1962	}
1963	/* copy all passable descriptors (i.e. not kqueue) */
1964	newfdp->fd_freefile = -1;
1965	for (i = 0; i <= fdp->fd_lastfile; ++i) {
1966		ofde = &fdp->fd_ofiles[i];
1967		if (fdisused(fdp, i) &&
1968		    (ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) &&
1969		    ofde->fde_file->f_ops != &badfileops) {
1970			nfde = &newfdp->fd_ofiles[i];
1971			*nfde = *ofde;
1972			filecaps_copy(&ofde->fde_caps, &nfde->fde_caps);
1973			fhold(nfde->fde_file);
1974			newfdp->fd_lastfile = i;
1975		} else {
1976			if (newfdp->fd_freefile == -1)
1977				newfdp->fd_freefile = i;
1978		}
1979	}
1980	newfdp->fd_cmask = fdp->fd_cmask;
1981	FILEDESC_SUNLOCK(fdp);
1982	FILEDESC_XLOCK(newfdp);
1983	for (i = 0; i <= newfdp->fd_lastfile; ++i) {
1984		if (newfdp->fd_ofiles[i].fde_file != NULL)
1985			fdused(newfdp, i);
1986	}
1987	if (newfdp->fd_freefile == -1)
1988		newfdp->fd_freefile = i;
1989	FILEDESC_XUNLOCK(newfdp);
1990	return (newfdp);
1991}
1992
1993/*
1994 * Release a filedesc structure.
1995 */
1996void
1997fdescfree(struct thread *td)
1998{
1999	struct filedesc *fdp;
2000	int i;
2001	struct filedesc_to_leader *fdtol;
2002	struct file *fp;
2003	struct vnode *cdir, *jdir, *rdir, *vp;
2004	struct flock lf;
2005
2006	/* Certain daemons might not have file descriptors. */
2007	fdp = td->td_proc->p_fd;
2008	if (fdp == NULL)
2009		return;
2010
2011#ifdef RACCT
2012	if (racct_enable) {
2013		PROC_LOCK(td->td_proc);
2014		racct_set(td->td_proc, RACCT_NOFILE, 0);
2015		PROC_UNLOCK(td->td_proc);
2016	}
2017#endif
2018
2019	/* Check for special need to clear POSIX style locks */
2020	fdtol = td->td_proc->p_fdtol;
2021	if (fdtol != NULL) {
2022		FILEDESC_XLOCK(fdp);
2023		KASSERT(fdtol->fdl_refcount > 0,
2024		    ("filedesc_to_refcount botch: fdl_refcount=%d",
2025		    fdtol->fdl_refcount));
2026		if (fdtol->fdl_refcount == 1 &&
2027		    (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2028			for (i = 0; i <= fdp->fd_lastfile; i++) {
2029				fp = fdp->fd_ofiles[i].fde_file;
2030				if (fp == NULL || fp->f_type != DTYPE_VNODE)
2031					continue;
2032				fhold(fp);
2033				FILEDESC_XUNLOCK(fdp);
2034				lf.l_whence = SEEK_SET;
2035				lf.l_start = 0;
2036				lf.l_len = 0;
2037				lf.l_type = F_UNLCK;
2038				vp = fp->f_vnode;
2039				(void) VOP_ADVLOCK(vp,
2040				    (caddr_t)td->td_proc->p_leader, F_UNLCK,
2041				    &lf, F_POSIX);
2042				FILEDESC_XLOCK(fdp);
2043				fdrop(fp, td);
2044			}
2045		}
2046	retry:
2047		if (fdtol->fdl_refcount == 1) {
2048			if (fdp->fd_holdleaderscount > 0 &&
2049			    (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2050				/*
2051				 * close() or do_dup() has cleared a reference
2052				 * in a shared file descriptor table.
2053				 */
2054				fdp->fd_holdleaderswakeup = 1;
2055				sx_sleep(&fdp->fd_holdleaderscount,
2056				    FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
2057				goto retry;
2058			}
2059			if (fdtol->fdl_holdcount > 0) {
2060				/*
2061				 * Ensure that fdtol->fdl_leader remains
2062				 * valid in closef().
2063				 */
2064				fdtol->fdl_wakeup = 1;
2065				sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
2066				    "fdlhold", 0);
2067				goto retry;
2068			}
2069		}
2070		fdtol->fdl_refcount--;
2071		if (fdtol->fdl_refcount == 0 &&
2072		    fdtol->fdl_holdcount == 0) {
2073			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2074			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2075		} else
2076			fdtol = NULL;
2077		td->td_proc->p_fdtol = NULL;
2078		FILEDESC_XUNLOCK(fdp);
2079		if (fdtol != NULL)
2080			free(fdtol, M_FILEDESC_TO_LEADER);
2081	}
2082
2083	mtx_lock(&fdesc_mtx);
2084	td->td_proc->p_fd = NULL;
2085	mtx_unlock(&fdesc_mtx);
2086
2087	FILEDESC_XLOCK(fdp);
2088	i = --fdp->fd_refcnt;
2089	if (i > 0) {
2090		FILEDESC_XUNLOCK(fdp);
2091		return;
2092	}
2093
2094	cdir = fdp->fd_cdir;
2095	fdp->fd_cdir = NULL;
2096	rdir = fdp->fd_rdir;
2097	fdp->fd_rdir = NULL;
2098	jdir = fdp->fd_jdir;
2099	fdp->fd_jdir = NULL;
2100	FILEDESC_XUNLOCK(fdp);
2101
2102	for (i = 0; i <= fdp->fd_lastfile; i++) {
2103		fp = fdp->fd_ofiles[i].fde_file;
2104		if (fp != NULL) {
2105			fdfree_last(fdp, i);
2106			(void) closef(fp, td);
2107		}
2108	}
2109
2110	if (fdp->fd_nfiles > NDFILE)
2111		free(fdp->fd_ofiles, M_FILEDESC);
2112	if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
2113		free(fdp->fd_map, M_FILEDESC);
2114
2115	if (cdir != NULL)
2116		vrele(cdir);
2117	if (rdir != NULL)
2118		vrele(rdir);
2119	if (jdir != NULL)
2120		vrele(jdir);
2121
2122	fddrop(fdp);
2123}
2124
2125/*
2126 * For setugid programs, we don't want to people to use that setugidness
2127 * to generate error messages which write to a file which otherwise would
2128 * otherwise be off-limits to the process.  We check for filesystems where
2129 * the vnode can change out from under us after execve (like [lin]procfs).
2130 *
2131 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2132 * sufficient.  We also don't check for setugidness since we know we are.
2133 */
2134static int
2135is_unsafe(struct file *fp)
2136{
2137	if (fp->f_type == DTYPE_VNODE) {
2138		struct vnode *vp = fp->f_vnode;
2139
2140		if ((vp->v_vflag & VV_PROCDEP) != 0)
2141			return (1);
2142	}
2143	return (0);
2144}
2145
2146/*
2147 * Make this setguid thing safe, if at all possible.
2148 */
2149void
2150setugidsafety(struct thread *td)
2151{
2152	struct filedesc *fdp;
2153	struct file *fp;
2154	int i;
2155
2156	fdp = td->td_proc->p_fd;
2157	KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
2158	FILEDESC_XLOCK(fdp);
2159	for (i = 0; i <= fdp->fd_lastfile; i++) {
2160		if (i > 2)
2161			break;
2162		fp = fdp->fd_ofiles[i].fde_file;
2163		if (fp != NULL && is_unsafe(fp)) {
2164			knote_fdclose(td, i);
2165			/*
2166			 * NULL-out descriptor prior to close to avoid
2167			 * a race while close blocks.
2168			 */
2169			fdfree(fdp, i);
2170			FILEDESC_XUNLOCK(fdp);
2171			(void) closef(fp, td);
2172			FILEDESC_XLOCK(fdp);
2173		}
2174	}
2175	FILEDESC_XUNLOCK(fdp);
2176}
2177
2178/*
2179 * If a specific file object occupies a specific file descriptor, close the
2180 * file descriptor entry and drop a reference on the file object.  This is a
2181 * convenience function to handle a subsequent error in a function that calls
2182 * falloc() that handles the race that another thread might have closed the
2183 * file descriptor out from under the thread creating the file object.
2184 */
2185void
2186fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
2187{
2188
2189	FILEDESC_XLOCK(fdp);
2190	if (fdp->fd_ofiles[idx].fde_file == fp) {
2191		fdfree(fdp, idx);
2192		FILEDESC_XUNLOCK(fdp);
2193		fdrop(fp, td);
2194	} else
2195		FILEDESC_XUNLOCK(fdp);
2196}
2197
2198/*
2199 * Close any files on exec?
2200 */
2201void
2202fdcloseexec(struct thread *td)
2203{
2204	struct filedesc *fdp;
2205	struct filedescent *fde;
2206	struct file *fp;
2207	int i;
2208
2209	fdp = td->td_proc->p_fd;
2210	KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
2211	FILEDESC_XLOCK(fdp);
2212	for (i = 0; i <= fdp->fd_lastfile; i++) {
2213		fde = &fdp->fd_ofiles[i];
2214		fp = fde->fde_file;
2215		if (fp != NULL && (fp->f_type == DTYPE_MQUEUE ||
2216		    (fde->fde_flags & UF_EXCLOSE))) {
2217			fdfree(fdp, i);
2218			(void) closefp(fdp, i, fp, td, 0);
2219			/* closefp() drops the FILEDESC lock. */
2220			FILEDESC_XLOCK(fdp);
2221		}
2222	}
2223	FILEDESC_XUNLOCK(fdp);
2224}
2225
2226/*
2227 * It is unsafe for set[ug]id processes to be started with file
2228 * descriptors 0..2 closed, as these descriptors are given implicit
2229 * significance in the Standard C library.  fdcheckstd() will create a
2230 * descriptor referencing /dev/null for each of stdin, stdout, and
2231 * stderr that is not already open.
2232 */
2233int
2234fdcheckstd(struct thread *td)
2235{
2236	struct filedesc *fdp;
2237	register_t retval, save;
2238	int i, error, devnull;
2239
2240	fdp = td->td_proc->p_fd;
2241	KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
2242	devnull = -1;
2243	error = 0;
2244	for (i = 0; i < 3; i++) {
2245		if (fdp->fd_ofiles[i].fde_file != NULL)
2246			continue;
2247		if (devnull < 0) {
2248			save = td->td_retval[0];
2249			error = kern_open(td, "/dev/null", UIO_SYSSPACE,
2250			    O_RDWR, 0);
2251			devnull = td->td_retval[0];
2252			td->td_retval[0] = save;
2253			if (error)
2254				break;
2255			KASSERT(devnull == i, ("oof, we didn't get our fd"));
2256		} else {
2257			error = do_dup(td, DUP_FIXED, devnull, i, &retval);
2258			if (error != 0)
2259				break;
2260		}
2261	}
2262	return (error);
2263}
2264
2265/*
2266 * Internal form of close.  Decrement reference count on file structure.
2267 * Note: td may be NULL when closing a file that was being passed in a
2268 * message.
2269 *
2270 * XXXRW: Giant is not required for the caller, but often will be held; this
2271 * makes it moderately likely the Giant will be recursed in the VFS case.
2272 */
2273int
2274closef(struct file *fp, struct thread *td)
2275{
2276	struct vnode *vp;
2277	struct flock lf;
2278	struct filedesc_to_leader *fdtol;
2279	struct filedesc *fdp;
2280
2281	/*
2282	 * POSIX record locking dictates that any close releases ALL
2283	 * locks owned by this process.  This is handled by setting
2284	 * a flag in the unlock to free ONLY locks obeying POSIX
2285	 * semantics, and not to free BSD-style file locks.
2286	 * If the descriptor was in a message, POSIX-style locks
2287	 * aren't passed with the descriptor, and the thread pointer
2288	 * will be NULL.  Callers should be careful only to pass a
2289	 * NULL thread pointer when there really is no owning
2290	 * context that might have locks, or the locks will be
2291	 * leaked.
2292	 */
2293	if (fp->f_type == DTYPE_VNODE && td != NULL) {
2294		vp = fp->f_vnode;
2295		if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2296			lf.l_whence = SEEK_SET;
2297			lf.l_start = 0;
2298			lf.l_len = 0;
2299			lf.l_type = F_UNLCK;
2300			(void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
2301			    F_UNLCK, &lf, F_POSIX);
2302		}
2303		fdtol = td->td_proc->p_fdtol;
2304		if (fdtol != NULL) {
2305			/*
2306			 * Handle special case where file descriptor table is
2307			 * shared between multiple process leaders.
2308			 */
2309			fdp = td->td_proc->p_fd;
2310			FILEDESC_XLOCK(fdp);
2311			for (fdtol = fdtol->fdl_next;
2312			    fdtol != td->td_proc->p_fdtol;
2313			    fdtol = fdtol->fdl_next) {
2314				if ((fdtol->fdl_leader->p_flag &
2315				    P_ADVLOCK) == 0)
2316					continue;
2317				fdtol->fdl_holdcount++;
2318				FILEDESC_XUNLOCK(fdp);
2319				lf.l_whence = SEEK_SET;
2320				lf.l_start = 0;
2321				lf.l_len = 0;
2322				lf.l_type = F_UNLCK;
2323				vp = fp->f_vnode;
2324				(void) VOP_ADVLOCK(vp,
2325				    (caddr_t)fdtol->fdl_leader, F_UNLCK, &lf,
2326				    F_POSIX);
2327				FILEDESC_XLOCK(fdp);
2328				fdtol->fdl_holdcount--;
2329				if (fdtol->fdl_holdcount == 0 &&
2330				    fdtol->fdl_wakeup != 0) {
2331					fdtol->fdl_wakeup = 0;
2332					wakeup(fdtol);
2333				}
2334			}
2335			FILEDESC_XUNLOCK(fdp);
2336		}
2337	}
2338	return (fdrop(fp, td));
2339}
2340
2341/*
2342 * Initialize the file pointer with the specified properties.
2343 *
2344 * The ops are set with release semantics to be certain that the flags, type,
2345 * and data are visible when ops is.  This is to prevent ops methods from being
2346 * called with bad data.
2347 */
2348void
2349finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2350{
2351	fp->f_data = data;
2352	fp->f_flag = flag;
2353	fp->f_type = type;
2354	atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2355}
2356
2357int
2358fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
2359    int needfcntl, struct file **fpp, cap_rights_t *haverightsp)
2360{
2361#ifdef CAPABILITIES
2362	struct filedescent fde;
2363#endif
2364	struct file *fp;
2365	u_int count;
2366#ifdef CAPABILITIES
2367	seq_t seq;
2368	cap_rights_t haverights;
2369	int error;
2370#endif
2371
2372	/*
2373	 * Avoid reads reordering and then a first access to the
2374	 * fdp->fd_ofiles table which could result in OOB operation.
2375	 */
2376	if (fd < 0 || fd >= atomic_load_acq_int(&fdp->fd_nfiles))
2377		return (EBADF);
2378	/*
2379	 * Fetch the descriptor locklessly.  We avoid fdrop() races by
2380	 * never raising a refcount above 0.  To accomplish this we have
2381	 * to use a cmpset loop rather than an atomic_add.  The descriptor
2382	 * must be re-verified once we acquire a reference to be certain
2383	 * that the identity is still correct and we did not lose a race
2384	 * due to preemption.
2385	 */
2386	for (;;) {
2387#ifdef CAPABILITIES
2388		seq = seq_read(fd_seq(fdp, fd));
2389		fde = fdp->fd_ofiles[fd];
2390		if (!seq_consistent(fd_seq(fdp, fd), seq)) {
2391			cpu_spinwait();
2392			continue;
2393		}
2394		fp = fde.fde_file;
2395#else
2396		fp = fdp->fd_ofiles[fd].fde_file;
2397#endif
2398		if (fp == NULL)
2399			return (EBADF);
2400#ifdef CAPABILITIES
2401		haverights = *cap_rights_fde(&fde);
2402		if (needrightsp != NULL) {
2403			error = cap_check(&haverights, needrightsp);
2404			if (error != 0)
2405				return (error);
2406			if (cap_rights_is_set(needrightsp, CAP_FCNTL)) {
2407				error = cap_fcntl_check_fde(&fde, needfcntl);
2408				if (error != 0)
2409					return (error);
2410			}
2411		}
2412#endif
2413		count = fp->f_count;
2414		if (count == 0)
2415			continue;
2416		/*
2417		 * Use an acquire barrier to prevent caching of fd_ofiles
2418		 * so it is refreshed for verification.
2419		 */
2420		if (atomic_cmpset_acq_int(&fp->f_count, count, count + 1) != 1)
2421			continue;
2422#ifdef	CAPABILITIES
2423		if (seq_consistent_nomb(fd_seq(fdp, fd), seq))
2424#else
2425		if (fp == fdp->fd_ofiles[fd].fde_file)
2426#endif
2427			break;
2428		fdrop(fp, curthread);
2429	}
2430	*fpp = fp;
2431	if (haverightsp != NULL) {
2432#ifdef CAPABILITIES
2433		*haverightsp = haverights;
2434#else
2435		CAP_ALL(haverightsp);
2436#endif
2437	}
2438	return (0);
2439}
2440
2441/*
2442 * Extract the file pointer associated with the specified descriptor for the
2443 * current user process.
2444 *
2445 * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
2446 * returned.
2447 *
2448 * File's rights will be checked against the capability rights mask.
2449 *
2450 * If an error occured the non-zero error is returned and *fpp is set to
2451 * NULL.  Otherwise *fpp is held and set and zero is returned.  Caller is
2452 * responsible for fdrop().
2453 */
2454static __inline int
2455_fget(struct thread *td, int fd, struct file **fpp, int flags,
2456    cap_rights_t *needrightsp, u_char *maxprotp)
2457{
2458	struct filedesc *fdp;
2459	struct file *fp;
2460	cap_rights_t haverights, needrights;
2461	int error;
2462
2463	*fpp = NULL;
2464	if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
2465		return (EBADF);
2466	if (needrightsp != NULL)
2467		needrights = *needrightsp;
2468	else
2469		cap_rights_init(&needrights);
2470	if (maxprotp != NULL)
2471		cap_rights_set(&needrights, CAP_MMAP);
2472	error = fget_unlocked(fdp, fd, &needrights, 0, &fp, &haverights);
2473	if (error != 0)
2474		return (error);
2475	if (fp->f_ops == &badfileops) {
2476		fdrop(fp, td);
2477		return (EBADF);
2478	}
2479
2480#ifdef CAPABILITIES
2481	/*
2482	 * If requested, convert capability rights to access flags.
2483	 */
2484	if (maxprotp != NULL)
2485		*maxprotp = cap_rights_to_vmprot(&haverights);
2486#else /* !CAPABILITIES */
2487	if (maxprotp != NULL)
2488		*maxprotp = VM_PROT_ALL;
2489#endif /* CAPABILITIES */
2490
2491	/*
2492	 * FREAD and FWRITE failure return EBADF as per POSIX.
2493	 */
2494	error = 0;
2495	switch (flags) {
2496	case FREAD:
2497	case FWRITE:
2498		if ((fp->f_flag & flags) == 0)
2499			error = EBADF;
2500		break;
2501	case FEXEC:
2502	    	if ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
2503		    ((fp->f_flag & FWRITE) != 0))
2504			error = EBADF;
2505		break;
2506	case 0:
2507		break;
2508	default:
2509		KASSERT(0, ("wrong flags"));
2510	}
2511
2512	if (error != 0) {
2513		fdrop(fp, td);
2514		return (error);
2515	}
2516
2517	*fpp = fp;
2518	return (0);
2519}
2520
2521int
2522fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
2523{
2524
2525	return(_fget(td, fd, fpp, 0, rightsp, NULL));
2526}
2527
2528int
2529fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, u_char *maxprotp,
2530    struct file **fpp)
2531{
2532
2533	return (_fget(td, fd, fpp, 0, rightsp, maxprotp));
2534}
2535
2536int
2537fget_read(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
2538{
2539
2540	return(_fget(td, fd, fpp, FREAD, rightsp, NULL));
2541}
2542
2543int
2544fget_write(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
2545{
2546
2547	return (_fget(td, fd, fpp, FWRITE, rightsp, NULL));
2548}
2549
2550/*
2551 * Like fget() but loads the underlying vnode, or returns an error if the
2552 * descriptor does not represent a vnode.  Note that pipes use vnodes but
2553 * never have VM objects.  The returned vnode will be vref()'d.
2554 *
2555 * XXX: what about the unused flags ?
2556 */
2557static __inline int
2558_fgetvp(struct thread *td, int fd, int flags, cap_rights_t *needrightsp,
2559    struct vnode **vpp)
2560{
2561	struct file *fp;
2562	int error;
2563
2564	*vpp = NULL;
2565	error = _fget(td, fd, &fp, flags, needrightsp, NULL);
2566	if (error != 0)
2567		return (error);
2568	if (fp->f_vnode == NULL) {
2569		error = EINVAL;
2570	} else {
2571		*vpp = fp->f_vnode;
2572		vref(*vpp);
2573	}
2574	fdrop(fp, td);
2575
2576	return (error);
2577}
2578
2579int
2580fgetvp(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
2581{
2582
2583	return (_fgetvp(td, fd, 0, rightsp, vpp));
2584}
2585
2586int
2587fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
2588    struct filecaps *havecaps, struct vnode **vpp)
2589{
2590	struct filedesc *fdp;
2591	struct file *fp;
2592#ifdef CAPABILITIES
2593	int error;
2594#endif
2595
2596	if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
2597		return (EBADF);
2598
2599	fp = fget_locked(fdp, fd);
2600	if (fp == NULL || fp->f_ops == &badfileops)
2601		return (EBADF);
2602
2603#ifdef CAPABILITIES
2604	if (needrightsp != NULL) {
2605		error = cap_check(cap_rights(fdp, fd), needrightsp);
2606		if (error != 0)
2607			return (error);
2608	}
2609#endif
2610
2611	if (fp->f_vnode == NULL)
2612		return (EINVAL);
2613
2614	*vpp = fp->f_vnode;
2615	vref(*vpp);
2616	filecaps_copy(&fdp->fd_ofiles[fd].fde_caps, havecaps);
2617
2618	return (0);
2619}
2620
2621int
2622fgetvp_read(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
2623{
2624
2625	return (_fgetvp(td, fd, FREAD, rightsp, vpp));
2626}
2627
2628int
2629fgetvp_exec(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
2630{
2631
2632	return (_fgetvp(td, fd, FEXEC, rightsp, vpp));
2633}
2634
2635#ifdef notyet
2636int
2637fgetvp_write(struct thread *td, int fd, cap_rights_t *rightsp,
2638    struct vnode **vpp)
2639{
2640
2641	return (_fgetvp(td, fd, FWRITE, rightsp, vpp));
2642}
2643#endif
2644
2645/*
2646 * Like fget() but loads the underlying socket, or returns an error if the
2647 * descriptor does not represent a socket.
2648 *
2649 * We bump the ref count on the returned socket.  XXX Also obtain the SX lock
2650 * in the future.
2651 *
2652 * Note: fgetsock() and fputsock() are deprecated, as consumers should rely
2653 * on their file descriptor reference to prevent the socket from being free'd
2654 * during use.
2655 */
2656int
2657fgetsock(struct thread *td, int fd, cap_rights_t *rightsp, struct socket **spp,
2658    u_int *fflagp)
2659{
2660	struct file *fp;
2661	int error;
2662
2663	*spp = NULL;
2664	if (fflagp != NULL)
2665		*fflagp = 0;
2666	if ((error = _fget(td, fd, &fp, 0, rightsp, NULL)) != 0)
2667		return (error);
2668	if (fp->f_type != DTYPE_SOCKET) {
2669		error = ENOTSOCK;
2670	} else {
2671		*spp = fp->f_data;
2672		if (fflagp)
2673			*fflagp = fp->f_flag;
2674		SOCK_LOCK(*spp);
2675		soref(*spp);
2676		SOCK_UNLOCK(*spp);
2677	}
2678	fdrop(fp, td);
2679
2680	return (error);
2681}
2682
2683/*
2684 * Drop the reference count on the socket and XXX release the SX lock in the
2685 * future.  The last reference closes the socket.
2686 *
2687 * Note: fputsock() is deprecated, see comment for fgetsock().
2688 */
2689void
2690fputsock(struct socket *so)
2691{
2692
2693	ACCEPT_LOCK();
2694	SOCK_LOCK(so);
2695	CURVNET_SET(so->so_vnet);
2696	sorele(so);
2697	CURVNET_RESTORE();
2698}
2699
2700/*
2701 * Handle the last reference to a file being closed.
2702 */
2703int
2704_fdrop(struct file *fp, struct thread *td)
2705{
2706	int error;
2707
2708	error = 0;
2709	if (fp->f_count != 0)
2710		panic("fdrop: count %d", fp->f_count);
2711	if (fp->f_ops != &badfileops)
2712		error = fo_close(fp, td);
2713	atomic_subtract_int(&openfiles, 1);
2714	crfree(fp->f_cred);
2715	free(fp->f_advice, M_FADVISE);
2716	uma_zfree(file_zone, fp);
2717
2718	return (error);
2719}
2720
2721/*
2722 * Apply an advisory lock on a file descriptor.
2723 *
2724 * Just attempt to get a record lock of the requested type on the entire file
2725 * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2726 */
2727#ifndef _SYS_SYSPROTO_H_
2728struct flock_args {
2729	int	fd;
2730	int	how;
2731};
2732#endif
2733/* ARGSUSED */
2734int
2735sys_flock(struct thread *td, struct flock_args *uap)
2736{
2737	struct file *fp;
2738	struct vnode *vp;
2739	struct flock lf;
2740	cap_rights_t rights;
2741	int error;
2742
2743	error = fget(td, uap->fd, cap_rights_init(&rights, CAP_FLOCK), &fp);
2744	if (error != 0)
2745		return (error);
2746	if (fp->f_type != DTYPE_VNODE) {
2747		fdrop(fp, td);
2748		return (EOPNOTSUPP);
2749	}
2750
2751	vp = fp->f_vnode;
2752	lf.l_whence = SEEK_SET;
2753	lf.l_start = 0;
2754	lf.l_len = 0;
2755	if (uap->how & LOCK_UN) {
2756		lf.l_type = F_UNLCK;
2757		atomic_clear_int(&fp->f_flag, FHASLOCK);
2758		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
2759		goto done2;
2760	}
2761	if (uap->how & LOCK_EX)
2762		lf.l_type = F_WRLCK;
2763	else if (uap->how & LOCK_SH)
2764		lf.l_type = F_RDLCK;
2765	else {
2766		error = EBADF;
2767		goto done2;
2768	}
2769	atomic_set_int(&fp->f_flag, FHASLOCK);
2770	error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
2771	    (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
2772done2:
2773	fdrop(fp, td);
2774	return (error);
2775}
2776/*
2777 * Duplicate the specified descriptor to a free descriptor.
2778 */
2779int
2780dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
2781    int openerror, int *indxp)
2782{
2783	struct filedescent *newfde, *oldfde;
2784	struct file *fp;
2785	int error, indx;
2786
2787	KASSERT(openerror == ENODEV || openerror == ENXIO,
2788	    ("unexpected error %d in %s", openerror, __func__));
2789
2790	/*
2791	 * If the to-be-dup'd fd number is greater than the allowed number
2792	 * of file descriptors, or the fd to be dup'd has already been
2793	 * closed, then reject.
2794	 */
2795	FILEDESC_XLOCK(fdp);
2796	if ((fp = fget_locked(fdp, dfd)) == NULL) {
2797		FILEDESC_XUNLOCK(fdp);
2798		return (EBADF);
2799	}
2800
2801	error = fdalloc(td, 0, &indx);
2802	if (error != 0) {
2803		FILEDESC_XUNLOCK(fdp);
2804		return (error);
2805	}
2806
2807	/*
2808	 * There are two cases of interest here.
2809	 *
2810	 * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
2811	 *
2812	 * For ENXIO steal away the file structure from (dfd) and store it in
2813	 * (indx).  (dfd) is effectively closed by this operation.
2814	 */
2815	switch (openerror) {
2816	case ENODEV:
2817		/*
2818		 * Check that the mode the file is being opened for is a
2819		 * subset of the mode of the existing descriptor.
2820		 */
2821		if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
2822			fdunused(fdp, indx);
2823			FILEDESC_XUNLOCK(fdp);
2824			return (EACCES);
2825		}
2826		fhold(fp);
2827		newfde = &fdp->fd_ofiles[indx];
2828		oldfde = &fdp->fd_ofiles[dfd];
2829#ifdef CAPABILITIES
2830		seq_write_begin(&newfde->fde_seq);
2831#endif
2832		memcpy(newfde, oldfde, fde_change_size);
2833		filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps);
2834#ifdef CAPABILITIES
2835		seq_write_end(&newfde->fde_seq);
2836#endif
2837		break;
2838	case ENXIO:
2839		/*
2840		 * Steal away the file pointer from dfd and stuff it into indx.
2841		 */
2842		newfde = &fdp->fd_ofiles[indx];
2843		oldfde = &fdp->fd_ofiles[dfd];
2844#ifdef CAPABILITIES
2845		seq_write_begin(&newfde->fde_seq);
2846#endif
2847		memcpy(newfde, oldfde, fde_change_size);
2848		bzero(oldfde, fde_change_size);
2849		fdunused(fdp, dfd);
2850#ifdef CAPABILITIES
2851		seq_write_end(&newfde->fde_seq);
2852#endif
2853		break;
2854	}
2855	FILEDESC_XUNLOCK(fdp);
2856	*indxp = indx;
2857	return (0);
2858}
2859
2860/*
2861 * Scan all active processes and prisons to see if any of them have a current
2862 * or root directory of `olddp'. If so, replace them with the new mount point.
2863 */
2864void
2865mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
2866{
2867	struct filedesc *fdp;
2868	struct prison *pr;
2869	struct proc *p;
2870	int nrele;
2871
2872	if (vrefcnt(olddp) == 1)
2873		return;
2874	nrele = 0;
2875	sx_slock(&allproc_lock);
2876	FOREACH_PROC_IN_SYSTEM(p) {
2877		fdp = fdhold(p);
2878		if (fdp == NULL)
2879			continue;
2880		FILEDESC_XLOCK(fdp);
2881		if (fdp->fd_cdir == olddp) {
2882			vref(newdp);
2883			fdp->fd_cdir = newdp;
2884			nrele++;
2885		}
2886		if (fdp->fd_rdir == olddp) {
2887			vref(newdp);
2888			fdp->fd_rdir = newdp;
2889			nrele++;
2890		}
2891		if (fdp->fd_jdir == olddp) {
2892			vref(newdp);
2893			fdp->fd_jdir = newdp;
2894			nrele++;
2895		}
2896		FILEDESC_XUNLOCK(fdp);
2897		fddrop(fdp);
2898	}
2899	sx_sunlock(&allproc_lock);
2900	if (rootvnode == olddp) {
2901		vref(newdp);
2902		rootvnode = newdp;
2903		nrele++;
2904	}
2905	mtx_lock(&prison0.pr_mtx);
2906	if (prison0.pr_root == olddp) {
2907		vref(newdp);
2908		prison0.pr_root = newdp;
2909		nrele++;
2910	}
2911	mtx_unlock(&prison0.pr_mtx);
2912	sx_slock(&allprison_lock);
2913	TAILQ_FOREACH(pr, &allprison, pr_list) {
2914		mtx_lock(&pr->pr_mtx);
2915		if (pr->pr_root == olddp) {
2916			vref(newdp);
2917			pr->pr_root = newdp;
2918			nrele++;
2919		}
2920		mtx_unlock(&pr->pr_mtx);
2921	}
2922	sx_sunlock(&allprison_lock);
2923	while (nrele--)
2924		vrele(olddp);
2925}
2926
2927struct filedesc_to_leader *
2928filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
2929{
2930	struct filedesc_to_leader *fdtol;
2931
2932	fdtol = malloc(sizeof(struct filedesc_to_leader),
2933	    M_FILEDESC_TO_LEADER, M_WAITOK);
2934	fdtol->fdl_refcount = 1;
2935	fdtol->fdl_holdcount = 0;
2936	fdtol->fdl_wakeup = 0;
2937	fdtol->fdl_leader = leader;
2938	if (old != NULL) {
2939		FILEDESC_XLOCK(fdp);
2940		fdtol->fdl_next = old->fdl_next;
2941		fdtol->fdl_prev = old;
2942		old->fdl_next = fdtol;
2943		fdtol->fdl_next->fdl_prev = fdtol;
2944		FILEDESC_XUNLOCK(fdp);
2945	} else {
2946		fdtol->fdl_next = fdtol;
2947		fdtol->fdl_prev = fdtol;
2948	}
2949	return (fdtol);
2950}
2951
2952/*
2953 * Get file structures globally.
2954 */
2955static int
2956sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2957{
2958	struct xfile xf;
2959	struct filedesc *fdp;
2960	struct file *fp;
2961	struct proc *p;
2962	int error, n;
2963
2964	error = sysctl_wire_old_buffer(req, 0);
2965	if (error != 0)
2966		return (error);
2967	if (req->oldptr == NULL) {
2968		n = 0;
2969		sx_slock(&allproc_lock);
2970		FOREACH_PROC_IN_SYSTEM(p) {
2971			if (p->p_state == PRS_NEW)
2972				continue;
2973			fdp = fdhold(p);
2974			if (fdp == NULL)
2975				continue;
2976			/* overestimates sparse tables. */
2977			if (fdp->fd_lastfile > 0)
2978				n += fdp->fd_lastfile;
2979			fddrop(fdp);
2980		}
2981		sx_sunlock(&allproc_lock);
2982		return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
2983	}
2984	error = 0;
2985	bzero(&xf, sizeof(xf));
2986	xf.xf_size = sizeof(xf);
2987	sx_slock(&allproc_lock);
2988	FOREACH_PROC_IN_SYSTEM(p) {
2989		PROC_LOCK(p);
2990		if (p->p_state == PRS_NEW) {
2991			PROC_UNLOCK(p);
2992			continue;
2993		}
2994		if (p_cansee(req->td, p) != 0) {
2995			PROC_UNLOCK(p);
2996			continue;
2997		}
2998		xf.xf_pid = p->p_pid;
2999		xf.xf_uid = p->p_ucred->cr_uid;
3000		PROC_UNLOCK(p);
3001		fdp = fdhold(p);
3002		if (fdp == NULL)
3003			continue;
3004		FILEDESC_SLOCK(fdp);
3005		for (n = 0; fdp->fd_refcnt > 0 && n <= fdp->fd_lastfile; ++n) {
3006			if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
3007				continue;
3008			xf.xf_fd = n;
3009			xf.xf_file = fp;
3010			xf.xf_data = fp->f_data;
3011			xf.xf_vnode = fp->f_vnode;
3012			xf.xf_type = fp->f_type;
3013			xf.xf_count = fp->f_count;
3014			xf.xf_msgcount = 0;
3015			xf.xf_offset = foffset_get(fp);
3016			xf.xf_flag = fp->f_flag;
3017			error = SYSCTL_OUT(req, &xf, sizeof(xf));
3018			if (error)
3019				break;
3020		}
3021		FILEDESC_SUNLOCK(fdp);
3022		fddrop(fdp);
3023		if (error)
3024			break;
3025	}
3026	sx_sunlock(&allproc_lock);
3027	return (error);
3028}
3029
3030SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,
3031    0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
3032
3033#ifdef KINFO_OFILE_SIZE
3034CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
3035#endif
3036
3037#ifdef COMPAT_FREEBSD7
3038static int
3039export_vnode_for_osysctl(struct vnode *vp, int type,
3040    struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req)
3041{
3042	int error;
3043	char *fullpath, *freepath;
3044
3045	bzero(kif, sizeof(*kif));
3046	kif->kf_structsize = sizeof(*kif);
3047
3048	vref(vp);
3049	kif->kf_fd = type;
3050	kif->kf_type = KF_TYPE_VNODE;
3051	/* This function only handles directories. */
3052	if (vp->v_type != VDIR) {
3053		vrele(vp);
3054		return (ENOTDIR);
3055	}
3056	kif->kf_vnode_type = KF_VTYPE_VDIR;
3057
3058	/*
3059	 * This is not a true file descriptor, so we set a bogus refcount
3060	 * and offset to indicate these fields should be ignored.
3061	 */
3062	kif->kf_ref_count = -1;
3063	kif->kf_offset = -1;
3064
3065	freepath = NULL;
3066	fullpath = "-";
3067	FILEDESC_SUNLOCK(fdp);
3068	vn_fullpath(curthread, vp, &fullpath, &freepath);
3069	vrele(vp);
3070	strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
3071	if (freepath != NULL)
3072		free(freepath, M_TEMP);
3073	error = SYSCTL_OUT(req, kif, sizeof(*kif));
3074	FILEDESC_SLOCK(fdp);
3075	return (error);
3076}
3077
3078/*
3079 * Get per-process file descriptors for use by procstat(1), et al.
3080 */
3081static int
3082sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
3083{
3084	char *fullpath, *freepath;
3085	struct kinfo_ofile *kif;
3086	struct filedesc *fdp;
3087	int error, i, *name;
3088	struct shmfd *shmfd;
3089	struct socket *so;
3090	struct vnode *vp;
3091	struct ksem *ks;
3092	struct file *fp;
3093	struct proc *p;
3094	struct tty *tp;
3095
3096	name = (int *)arg1;
3097	error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
3098	if (error != 0)
3099		return (error);
3100	fdp = fdhold(p);
3101	PROC_UNLOCK(p);
3102	if (fdp == NULL)
3103		return (ENOENT);
3104	kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
3105	FILEDESC_SLOCK(fdp);
3106	if (fdp->fd_cdir != NULL)
3107		export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif,
3108				fdp, req);
3109	if (fdp->fd_rdir != NULL)
3110		export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif,
3111				fdp, req);
3112	if (fdp->fd_jdir != NULL)
3113		export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif,
3114				fdp, req);
3115	for (i = 0; fdp->fd_refcnt > 0 && i <= fdp->fd_lastfile; i++) {
3116		if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
3117			continue;
3118		bzero(kif, sizeof(*kif));
3119		kif->kf_structsize = sizeof(*kif);
3120		ks = NULL;
3121		vp = NULL;
3122		so = NULL;
3123		tp = NULL;
3124		shmfd = NULL;
3125		kif->kf_fd = i;
3126
3127		switch (fp->f_type) {
3128		case DTYPE_VNODE:
3129			kif->kf_type = KF_TYPE_VNODE;
3130			vp = fp->f_vnode;
3131			break;
3132
3133		case DTYPE_SOCKET:
3134			kif->kf_type = KF_TYPE_SOCKET;
3135			so = fp->f_data;
3136			break;
3137
3138		case DTYPE_PIPE:
3139			kif->kf_type = KF_TYPE_PIPE;
3140			break;
3141
3142		case DTYPE_FIFO:
3143			kif->kf_type = KF_TYPE_FIFO;
3144			vp = fp->f_vnode;
3145			break;
3146
3147		case DTYPE_KQUEUE:
3148			kif->kf_type = KF_TYPE_KQUEUE;
3149			break;
3150
3151		case DTYPE_CRYPTO:
3152			kif->kf_type = KF_TYPE_CRYPTO;
3153			break;
3154
3155		case DTYPE_MQUEUE:
3156			kif->kf_type = KF_TYPE_MQUEUE;
3157			break;
3158
3159		case DTYPE_SHM:
3160			kif->kf_type = KF_TYPE_SHM;
3161			shmfd = fp->f_data;
3162			break;
3163
3164		case DTYPE_SEM:
3165			kif->kf_type = KF_TYPE_SEM;
3166			ks = fp->f_data;
3167			break;
3168
3169		case DTYPE_PTS:
3170			kif->kf_type = KF_TYPE_PTS;
3171			tp = fp->f_data;
3172			break;
3173
3174#ifdef PROCDESC
3175		case DTYPE_PROCDESC:
3176			kif->kf_type = KF_TYPE_PROCDESC;
3177			break;
3178#endif
3179
3180		default:
3181			kif->kf_type = KF_TYPE_UNKNOWN;
3182			break;
3183		}
3184		kif->kf_ref_count = fp->f_count;
3185		if (fp->f_flag & FREAD)
3186			kif->kf_flags |= KF_FLAG_READ;
3187		if (fp->f_flag & FWRITE)
3188			kif->kf_flags |= KF_FLAG_WRITE;
3189		if (fp->f_flag & FAPPEND)
3190			kif->kf_flags |= KF_FLAG_APPEND;
3191		if (fp->f_flag & FASYNC)
3192			kif->kf_flags |= KF_FLAG_ASYNC;
3193		if (fp->f_flag & FFSYNC)
3194			kif->kf_flags |= KF_FLAG_FSYNC;
3195		if (fp->f_flag & FNONBLOCK)
3196			kif->kf_flags |= KF_FLAG_NONBLOCK;
3197		if (fp->f_flag & O_DIRECT)
3198			kif->kf_flags |= KF_FLAG_DIRECT;
3199		if (fp->f_flag & FHASLOCK)
3200			kif->kf_flags |= KF_FLAG_HASLOCK;
3201		kif->kf_offset = foffset_get(fp);
3202		if (vp != NULL) {
3203			vref(vp);
3204			switch (vp->v_type) {
3205			case VNON:
3206				kif->kf_vnode_type = KF_VTYPE_VNON;
3207				break;
3208			case VREG:
3209				kif->kf_vnode_type = KF_VTYPE_VREG;
3210				break;
3211			case VDIR:
3212				kif->kf_vnode_type = KF_VTYPE_VDIR;
3213				break;
3214			case VBLK:
3215				kif->kf_vnode_type = KF_VTYPE_VBLK;
3216				break;
3217			case VCHR:
3218				kif->kf_vnode_type = KF_VTYPE_VCHR;
3219				break;
3220			case VLNK:
3221				kif->kf_vnode_type = KF_VTYPE_VLNK;
3222				break;
3223			case VSOCK:
3224				kif->kf_vnode_type = KF_VTYPE_VSOCK;
3225				break;
3226			case VFIFO:
3227				kif->kf_vnode_type = KF_VTYPE_VFIFO;
3228				break;
3229			case VBAD:
3230				kif->kf_vnode_type = KF_VTYPE_VBAD;
3231				break;
3232			default:
3233				kif->kf_vnode_type = KF_VTYPE_UNKNOWN;
3234				break;
3235			}
3236			/*
3237			 * It is OK to drop the filedesc lock here as we will
3238			 * re-validate and re-evaluate its properties when
3239			 * the loop continues.
3240			 */
3241			freepath = NULL;
3242			fullpath = "-";
3243			FILEDESC_SUNLOCK(fdp);
3244			vn_fullpath(curthread, vp, &fullpath, &freepath);
3245			vrele(vp);
3246			strlcpy(kif->kf_path, fullpath,
3247			    sizeof(kif->kf_path));
3248			if (freepath != NULL)
3249				free(freepath, M_TEMP);
3250			FILEDESC_SLOCK(fdp);
3251		}
3252		if (so != NULL) {
3253			struct sockaddr *sa;
3254
3255			if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa)
3256			    == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
3257				bcopy(sa, &kif->kf_sa_local, sa->sa_len);
3258				free(sa, M_SONAME);
3259			}
3260			if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa)
3261			    == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
3262				bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
3263				free(sa, M_SONAME);
3264			}
3265			kif->kf_sock_domain =
3266			    so->so_proto->pr_domain->dom_family;
3267			kif->kf_sock_type = so->so_type;
3268			kif->kf_sock_protocol = so->so_proto->pr_protocol;
3269		}
3270		if (tp != NULL) {
3271			strlcpy(kif->kf_path, tty_devname(tp),
3272			    sizeof(kif->kf_path));
3273		}
3274		if (shmfd != NULL)
3275			shm_path(shmfd, kif->kf_path, sizeof(kif->kf_path));
3276		if (ks != NULL && ksem_info != NULL)
3277			ksem_info(ks, kif->kf_path, sizeof(kif->kf_path), NULL);
3278		error = SYSCTL_OUT(req, kif, sizeof(*kif));
3279		if (error)
3280			break;
3281	}
3282	FILEDESC_SUNLOCK(fdp);
3283	fddrop(fdp);
3284	free(kif, M_TEMP);
3285	return (0);
3286}
3287
3288static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc,
3289    CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_ofiledesc,
3290    "Process ofiledesc entries");
3291#endif	/* COMPAT_FREEBSD7 */
3292
3293#ifdef KINFO_FILE_SIZE
3294CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
3295#endif
3296
3297struct export_fd_buf {
3298	struct filedesc		*fdp;
3299	struct sbuf 		*sb;
3300	ssize_t			remainder;
3301	struct kinfo_file	kif;
3302};
3303
3304static int
3305export_fd_to_sb(void *data, int type, int fd, int fflags, int refcnt,
3306    int64_t offset, cap_rights_t *rightsp, struct export_fd_buf *efbuf)
3307{
3308	struct {
3309		int	fflag;
3310		int	kf_fflag;
3311	} fflags_table[] = {
3312		{ FAPPEND, KF_FLAG_APPEND },
3313		{ FASYNC, KF_FLAG_ASYNC },
3314		{ FFSYNC, KF_FLAG_FSYNC },
3315		{ FHASLOCK, KF_FLAG_HASLOCK },
3316		{ FNONBLOCK, KF_FLAG_NONBLOCK },
3317		{ FREAD, KF_FLAG_READ },
3318		{ FWRITE, KF_FLAG_WRITE },
3319		{ O_CREAT, KF_FLAG_CREAT },
3320		{ O_DIRECT, KF_FLAG_DIRECT },
3321		{ O_EXCL, KF_FLAG_EXCL },
3322		{ O_EXEC, KF_FLAG_EXEC },
3323		{ O_EXLOCK, KF_FLAG_EXLOCK },
3324		{ O_NOFOLLOW, KF_FLAG_NOFOLLOW },
3325		{ O_SHLOCK, KF_FLAG_SHLOCK },
3326		{ O_TRUNC, KF_FLAG_TRUNC }
3327	};
3328#define	NFFLAGS	(sizeof(fflags_table) / sizeof(*fflags_table))
3329	struct kinfo_file *kif;
3330	struct vnode *vp;
3331	int error, locked;
3332	unsigned int i;
3333
3334	if (efbuf->remainder == 0)
3335		return (0);
3336	kif = &efbuf->kif;
3337	bzero(kif, sizeof(*kif));
3338	locked = efbuf->fdp != NULL;
3339	switch (type) {
3340	case KF_TYPE_FIFO:
3341	case KF_TYPE_VNODE:
3342		if (locked) {
3343			FILEDESC_SUNLOCK(efbuf->fdp);
3344			locked = 0;
3345		}
3346		vp = (struct vnode *)data;
3347		error = fill_vnode_info(vp, kif);
3348		vrele(vp);
3349		break;
3350	case KF_TYPE_SOCKET:
3351		error = fill_socket_info((struct socket *)data, kif);
3352		break;
3353	case KF_TYPE_PIPE:
3354		error = fill_pipe_info((struct pipe *)data, kif);
3355		break;
3356	case KF_TYPE_PTS:
3357		error = fill_pts_info((struct tty *)data, kif);
3358		break;
3359	case KF_TYPE_PROCDESC:
3360		error = fill_procdesc_info((struct procdesc *)data, kif);
3361		break;
3362	case KF_TYPE_SEM:
3363		error = fill_sem_info((struct file *)data, kif);
3364		break;
3365	case KF_TYPE_SHM:
3366		error = fill_shm_info((struct file *)data, kif);
3367		break;
3368	default:
3369		error = 0;
3370	}
3371	if (error == 0)
3372		kif->kf_status |= KF_ATTR_VALID;
3373
3374	/*
3375	 * Translate file access flags.
3376	 */
3377	for (i = 0; i < NFFLAGS; i++)
3378		if (fflags & fflags_table[i].fflag)
3379			kif->kf_flags |=  fflags_table[i].kf_fflag;
3380	if (rightsp != NULL)
3381		kif->kf_cap_rights = *rightsp;
3382	else
3383		cap_rights_init(&kif->kf_cap_rights);
3384	kif->kf_fd = fd;
3385	kif->kf_type = type;
3386	kif->kf_ref_count = refcnt;
3387	kif->kf_offset = offset;
3388	/* Pack record size down */
3389	kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
3390	    strlen(kif->kf_path) + 1;
3391	kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
3392	if (efbuf->remainder != -1) {
3393		if (efbuf->remainder < kif->kf_structsize) {
3394			/* Terminate export. */
3395			efbuf->remainder = 0;
3396			if (efbuf->fdp != NULL && !locked)
3397				FILEDESC_SLOCK(efbuf->fdp);
3398			return (0);
3399		}
3400		efbuf->remainder -= kif->kf_structsize;
3401	}
3402	if (locked)
3403		FILEDESC_SUNLOCK(efbuf->fdp);
3404	error = sbuf_bcat(efbuf->sb, kif, kif->kf_structsize) == 0 ? 0 : ENOMEM;
3405	if (efbuf->fdp != NULL)
3406		FILEDESC_SLOCK(efbuf->fdp);
3407	return (error);
3408}
3409
3410/*
3411 * Store a process file descriptor information to sbuf.
3412 *
3413 * Takes a locked proc as argument, and returns with the proc unlocked.
3414 */
3415int
3416kern_proc_filedesc_out(struct proc *p,  struct sbuf *sb, ssize_t maxlen)
3417{
3418	struct file *fp;
3419	struct filedesc *fdp;
3420	struct export_fd_buf *efbuf;
3421	struct vnode *cttyvp, *textvp, *tracevp;
3422	int64_t offset;
3423	void *data;
3424	int error, i;
3425	int type, refcnt, fflags;
3426	cap_rights_t rights;
3427
3428	PROC_LOCK_ASSERT(p, MA_OWNED);
3429
3430	/* ktrace vnode */
3431	tracevp = p->p_tracevp;
3432	if (tracevp != NULL)
3433		vref(tracevp);
3434	/* text vnode */
3435	textvp = p->p_textvp;
3436	if (textvp != NULL)
3437		vref(textvp);
3438	/* Controlling tty. */
3439	cttyvp = NULL;
3440	if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
3441		cttyvp = p->p_pgrp->pg_session->s_ttyvp;
3442		if (cttyvp != NULL)
3443			vref(cttyvp);
3444	}
3445	fdp = fdhold(p);
3446	PROC_UNLOCK(p);
3447	efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
3448	efbuf->fdp = NULL;
3449	efbuf->sb = sb;
3450	efbuf->remainder = maxlen;
3451	if (tracevp != NULL)
3452		export_fd_to_sb(tracevp, KF_TYPE_VNODE, KF_FD_TYPE_TRACE,
3453		    FREAD | FWRITE, -1, -1, NULL, efbuf);
3454	if (textvp != NULL)
3455		export_fd_to_sb(textvp, KF_TYPE_VNODE, KF_FD_TYPE_TEXT,
3456		    FREAD, -1, -1, NULL, efbuf);
3457	if (cttyvp != NULL)
3458		export_fd_to_sb(cttyvp, KF_TYPE_VNODE, KF_FD_TYPE_CTTY,
3459		    FREAD | FWRITE, -1, -1, NULL, efbuf);
3460	error = 0;
3461	if (fdp == NULL)
3462		goto fail;
3463	efbuf->fdp = fdp;
3464	FILEDESC_SLOCK(fdp);
3465	/* working directory */
3466	if (fdp->fd_cdir != NULL) {
3467		vref(fdp->fd_cdir);
3468		data = fdp->fd_cdir;
3469		export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_CWD,
3470		    FREAD, -1, -1, NULL, efbuf);
3471	}
3472	/* root directory */
3473	if (fdp->fd_rdir != NULL) {
3474		vref(fdp->fd_rdir);
3475		data = fdp->fd_rdir;
3476		export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_ROOT,
3477		    FREAD, -1, -1, NULL, efbuf);
3478	}
3479	/* jail directory */
3480	if (fdp->fd_jdir != NULL) {
3481		vref(fdp->fd_jdir);
3482		data = fdp->fd_jdir;
3483		export_fd_to_sb(data, KF_TYPE_VNODE, KF_FD_TYPE_JAIL,
3484		    FREAD, -1, -1, NULL, efbuf);
3485	}
3486	for (i = 0; fdp->fd_refcnt > 0 && i <= fdp->fd_lastfile; i++) {
3487		if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
3488			continue;
3489		data = NULL;
3490#ifdef CAPABILITIES
3491		rights = *cap_rights(fdp, i);
3492#else /* !CAPABILITIES */
3493		cap_rights_init(&rights);
3494#endif
3495		switch (fp->f_type) {
3496		case DTYPE_VNODE:
3497			type = KF_TYPE_VNODE;
3498			vref(fp->f_vnode);
3499			data = fp->f_vnode;
3500			break;
3501
3502		case DTYPE_SOCKET:
3503			type = KF_TYPE_SOCKET;
3504			data = fp->f_data;
3505			break;
3506
3507		case DTYPE_PIPE:
3508			type = KF_TYPE_PIPE;
3509			data = fp->f_data;
3510			break;
3511
3512		case DTYPE_FIFO:
3513			type = KF_TYPE_FIFO;
3514			vref(fp->f_vnode);
3515			data = fp->f_vnode;
3516			break;
3517
3518		case DTYPE_KQUEUE:
3519			type = KF_TYPE_KQUEUE;
3520			break;
3521
3522		case DTYPE_CRYPTO:
3523			type = KF_TYPE_CRYPTO;
3524			break;
3525
3526		case DTYPE_MQUEUE:
3527			type = KF_TYPE_MQUEUE;
3528			break;
3529
3530		case DTYPE_SHM:
3531			type = KF_TYPE_SHM;
3532			data = fp;
3533			break;
3534
3535		case DTYPE_SEM:
3536			type = KF_TYPE_SEM;
3537			data = fp;
3538			break;
3539
3540		case DTYPE_PTS:
3541			type = KF_TYPE_PTS;
3542			data = fp->f_data;
3543			break;
3544
3545#ifdef PROCDESC
3546		case DTYPE_PROCDESC:
3547			type = KF_TYPE_PROCDESC;
3548			data = fp->f_data;
3549			break;
3550#endif
3551
3552		default:
3553			type = KF_TYPE_UNKNOWN;
3554			break;
3555		}
3556		refcnt = fp->f_count;
3557		fflags = fp->f_flag;
3558		offset = foffset_get(fp);
3559
3560		/*
3561		 * Create sysctl entry.
3562		 * It is OK to drop the filedesc lock here as we will
3563		 * re-validate and re-evaluate its properties when
3564		 * the loop continues.
3565		 */
3566		error = export_fd_to_sb(data, type, i, fflags, refcnt,
3567		    offset, &rights, efbuf);
3568		if (error != 0)
3569			break;
3570	}
3571	FILEDESC_SUNLOCK(fdp);
3572	fddrop(fdp);
3573fail:
3574	free(efbuf, M_TEMP);
3575	return (error);
3576}
3577
3578#define FILEDESC_SBUF_SIZE	(sizeof(struct kinfo_file) * 5)
3579
3580/*
3581 * Get per-process file descriptors for use by procstat(1), et al.
3582 */
3583static int
3584sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
3585{
3586	struct sbuf sb;
3587	struct proc *p;
3588	ssize_t maxlen;
3589	int error, error2, *name;
3590
3591	name = (int *)arg1;
3592
3593	sbuf_new_for_sysctl(&sb, NULL, FILEDESC_SBUF_SIZE, req);
3594	error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
3595	if (error != 0) {
3596		sbuf_delete(&sb);
3597		return (error);
3598	}
3599	maxlen = req->oldptr != NULL ? req->oldlen : -1;
3600	error = kern_proc_filedesc_out(p, &sb, maxlen);
3601	error2 = sbuf_finish(&sb);
3602	sbuf_delete(&sb);
3603	return (error != 0 ? error : error2);
3604}
3605
3606int
3607vntype_to_kinfo(int vtype)
3608{
3609	struct {
3610		int	vtype;
3611		int	kf_vtype;
3612	} vtypes_table[] = {
3613		{ VBAD, KF_VTYPE_VBAD },
3614		{ VBLK, KF_VTYPE_VBLK },
3615		{ VCHR, KF_VTYPE_VCHR },
3616		{ VDIR, KF_VTYPE_VDIR },
3617		{ VFIFO, KF_VTYPE_VFIFO },
3618		{ VLNK, KF_VTYPE_VLNK },
3619		{ VNON, KF_VTYPE_VNON },
3620		{ VREG, KF_VTYPE_VREG },
3621		{ VSOCK, KF_VTYPE_VSOCK }
3622	};
3623#define	NVTYPES	(sizeof(vtypes_table) / sizeof(*vtypes_table))
3624	unsigned int i;
3625
3626	/*
3627	 * Perform vtype translation.
3628	 */
3629	for (i = 0; i < NVTYPES; i++)
3630		if (vtypes_table[i].vtype == vtype)
3631			break;
3632	if (i < NVTYPES)
3633		return (vtypes_table[i].kf_vtype);
3634
3635	return (KF_VTYPE_UNKNOWN);
3636}
3637
3638static int
3639fill_vnode_info(struct vnode *vp, struct kinfo_file *kif)
3640{
3641	struct vattr va;
3642	char *fullpath, *freepath;
3643	int error;
3644
3645	if (vp == NULL)
3646		return (1);
3647	kif->kf_vnode_type = vntype_to_kinfo(vp->v_type);
3648	freepath = NULL;
3649	fullpath = "-";
3650	error = vn_fullpath(curthread, vp, &fullpath, &freepath);
3651	if (error == 0) {
3652		strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
3653	}
3654	if (freepath != NULL)
3655		free(freepath, M_TEMP);
3656
3657	/*
3658	 * Retrieve vnode attributes.
3659	 */
3660	va.va_fsid = VNOVAL;
3661	va.va_rdev = NODEV;
3662	vn_lock(vp, LK_SHARED | LK_RETRY);
3663	error = VOP_GETATTR(vp, &va, curthread->td_ucred);
3664	VOP_UNLOCK(vp, 0);
3665	if (error != 0)
3666		return (error);
3667	if (va.va_fsid != VNOVAL)
3668		kif->kf_un.kf_file.kf_file_fsid = va.va_fsid;
3669	else
3670		kif->kf_un.kf_file.kf_file_fsid =
3671		    vp->v_mount->mnt_stat.f_fsid.val[0];
3672	kif->kf_un.kf_file.kf_file_fileid = va.va_fileid;
3673	kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode);
3674	kif->kf_un.kf_file.kf_file_size = va.va_size;
3675	kif->kf_un.kf_file.kf_file_rdev = va.va_rdev;
3676	return (0);
3677}
3678
3679static int
3680fill_socket_info(struct socket *so, struct kinfo_file *kif)
3681{
3682	struct sockaddr *sa;
3683	struct inpcb *inpcb;
3684	struct unpcb *unpcb;
3685	int error;
3686
3687	if (so == NULL)
3688		return (1);
3689	kif->kf_sock_domain = so->so_proto->pr_domain->dom_family;
3690	kif->kf_sock_type = so->so_type;
3691	kif->kf_sock_protocol = so->so_proto->pr_protocol;
3692	kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
3693	switch(kif->kf_sock_domain) {
3694	case AF_INET:
3695	case AF_INET6:
3696		if (kif->kf_sock_protocol == IPPROTO_TCP) {
3697			if (so->so_pcb != NULL) {
3698				inpcb = (struct inpcb *)(so->so_pcb);
3699				kif->kf_un.kf_sock.kf_sock_inpcb =
3700				    (uintptr_t)inpcb->inp_ppcb;
3701			}
3702		}
3703		break;
3704	case AF_UNIX:
3705		if (so->so_pcb != NULL) {
3706			unpcb = (struct unpcb *)(so->so_pcb);
3707			if (unpcb->unp_conn) {
3708				kif->kf_un.kf_sock.kf_sock_unpconn =
3709				    (uintptr_t)unpcb->unp_conn;
3710				kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
3711				    so->so_rcv.sb_state;
3712				kif->kf_un.kf_sock.kf_sock_snd_sb_state =
3713				    so->so_snd.sb_state;
3714			}
3715		}
3716		break;
3717	}
3718	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
3719	if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
3720		bcopy(sa, &kif->kf_sa_local, sa->sa_len);
3721		free(sa, M_SONAME);
3722	}
3723	error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
3724	if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
3725		bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
3726		free(sa, M_SONAME);
3727	}
3728	strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
3729	    sizeof(kif->kf_path));
3730	return (0);
3731}
3732
3733static int
3734fill_pts_info(struct tty *tp, struct kinfo_file *kif)
3735{
3736
3737	if (tp == NULL)
3738		return (1);
3739	kif->kf_un.kf_pts.kf_pts_dev = tty_udev(tp);
3740	strlcpy(kif->kf_path, tty_devname(tp), sizeof(kif->kf_path));
3741	return (0);
3742}
3743
3744static int
3745fill_pipe_info(struct pipe *pi, struct kinfo_file *kif)
3746{
3747
3748	if (pi == NULL)
3749		return (1);
3750	kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
3751	kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
3752	kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
3753	return (0);
3754}
3755
3756static int
3757fill_procdesc_info(struct procdesc *pdp, struct kinfo_file *kif)
3758{
3759
3760	if (pdp == NULL)
3761		return (1);
3762	kif->kf_un.kf_proc.kf_pid = pdp->pd_pid;
3763	return (0);
3764}
3765
3766static int
3767fill_sem_info(struct file *fp, struct kinfo_file *kif)
3768{
3769	struct thread *td;
3770	struct stat sb;
3771
3772	td = curthread;
3773	if (fp->f_data == NULL)
3774		return (1);
3775	if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
3776		return (1);
3777	if (ksem_info == NULL)
3778		return (1);
3779	ksem_info(fp->f_data, kif->kf_path, sizeof(kif->kf_path),
3780	    &kif->kf_un.kf_sem.kf_sem_value);
3781	kif->kf_un.kf_sem.kf_sem_mode = sb.st_mode;
3782	return (0);
3783}
3784
3785static int
3786fill_shm_info(struct file *fp, struct kinfo_file *kif)
3787{
3788	struct thread *td;
3789	struct stat sb;
3790
3791	td = curthread;
3792	if (fp->f_data == NULL)
3793		return (1);
3794	if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
3795		return (1);
3796	shm_path(fp->f_data, kif->kf_path, sizeof(kif->kf_path));
3797	kif->kf_un.kf_file.kf_file_mode = sb.st_mode;
3798	kif->kf_un.kf_file.kf_file_size = sb.st_size;
3799	return (0);
3800}
3801
3802static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc,
3803    CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_filedesc,
3804    "Process filedesc entries");
3805
3806#ifdef DDB
3807/*
3808 * For the purposes of debugging, generate a human-readable string for the
3809 * file type.
3810 */
3811static const char *
3812file_type_to_name(short type)
3813{
3814
3815	switch (type) {
3816	case 0:
3817		return ("zero");
3818	case DTYPE_VNODE:
3819		return ("vnod");
3820	case DTYPE_SOCKET:
3821		return ("sock");
3822	case DTYPE_PIPE:
3823		return ("pipe");
3824	case DTYPE_FIFO:
3825		return ("fifo");
3826	case DTYPE_KQUEUE:
3827		return ("kque");
3828	case DTYPE_CRYPTO:
3829		return ("crpt");
3830	case DTYPE_MQUEUE:
3831		return ("mque");
3832	case DTYPE_SHM:
3833		return ("shm");
3834	case DTYPE_SEM:
3835		return ("ksem");
3836	default:
3837		return ("unkn");
3838	}
3839}
3840
3841/*
3842 * For the purposes of debugging, identify a process (if any, perhaps one of
3843 * many) that references the passed file in its file descriptor array. Return
3844 * NULL if none.
3845 */
3846static struct proc *
3847file_to_first_proc(struct file *fp)
3848{
3849	struct filedesc *fdp;
3850	struct proc *p;
3851	int n;
3852
3853	FOREACH_PROC_IN_SYSTEM(p) {
3854		if (p->p_state == PRS_NEW)
3855			continue;
3856		fdp = p->p_fd;
3857		if (fdp == NULL)
3858			continue;
3859		for (n = 0; n <= fdp->fd_lastfile; n++) {
3860			if (fp == fdp->fd_ofiles[n].fde_file)
3861				return (p);
3862		}
3863	}
3864	return (NULL);
3865}
3866
3867static void
3868db_print_file(struct file *fp, int header)
3869{
3870	struct proc *p;
3871
3872	if (header)
3873		db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n",
3874		    "File", "Type", "Data", "Flag", "GCFl", "Count",
3875		    "MCount", "Vnode", "FPID", "FCmd");
3876	p = file_to_first_proc(fp);
3877	db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp,
3878	    file_type_to_name(fp->f_type), fp->f_data, fp->f_flag,
3879	    0, fp->f_count, 0, fp->f_vnode,
3880	    p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
3881}
3882
3883DB_SHOW_COMMAND(file, db_show_file)
3884{
3885	struct file *fp;
3886
3887	if (!have_addr) {
3888		db_printf("usage: show file <addr>\n");
3889		return;
3890	}
3891	fp = (struct file *)addr;
3892	db_print_file(fp, 1);
3893}
3894
3895DB_SHOW_COMMAND(files, db_show_files)
3896{
3897	struct filedesc *fdp;
3898	struct file *fp;
3899	struct proc *p;
3900	int header;
3901	int n;
3902
3903	header = 1;
3904	FOREACH_PROC_IN_SYSTEM(p) {
3905		if (p->p_state == PRS_NEW)
3906			continue;
3907		if ((fdp = p->p_fd) == NULL)
3908			continue;
3909		for (n = 0; n <= fdp->fd_lastfile; ++n) {
3910			if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
3911				continue;
3912			db_print_file(fp, header);
3913			header = 0;
3914		}
3915	}
3916}
3917#endif
3918
3919SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
3920    &maxfilesperproc, 0, "Maximum files allowed open per process");
3921
3922SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
3923    &maxfiles, 0, "Maximum number of files");
3924
3925SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
3926    __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files");
3927
3928/* ARGSUSED*/
3929static void
3930filelistinit(void *dummy)
3931{
3932
3933	file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
3934	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
3935	mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
3936	mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF);
3937}
3938SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
3939
3940/*-------------------------------------------------------------------*/
3941
3942static int
3943badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
3944    int flags, struct thread *td)
3945{
3946
3947	return (EBADF);
3948}
3949
3950static int
3951badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
3952    struct thread *td)
3953{
3954
3955	return (EINVAL);
3956}
3957
3958static int
3959badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
3960    struct thread *td)
3961{
3962
3963	return (EBADF);
3964}
3965
3966static int
3967badfo_poll(struct file *fp, int events, struct ucred *active_cred,
3968    struct thread *td)
3969{
3970
3971	return (0);
3972}
3973
3974static int
3975badfo_kqfilter(struct file *fp, struct knote *kn)
3976{
3977
3978	return (EBADF);
3979}
3980
3981static int
3982badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
3983    struct thread *td)
3984{
3985
3986	return (EBADF);
3987}
3988
3989static int
3990badfo_close(struct file *fp, struct thread *td)
3991{
3992
3993	return (EBADF);
3994}
3995
3996static int
3997badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
3998    struct thread *td)
3999{
4000
4001	return (EBADF);
4002}
4003
4004static int
4005badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
4006    struct thread *td)
4007{
4008
4009	return (EBADF);
4010}
4011
4012static int
4013badfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
4014    struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
4015    int kflags, struct thread *td)
4016{
4017
4018	return (EBADF);
4019}
4020
4021struct fileops badfileops = {
4022	.fo_read = badfo_readwrite,
4023	.fo_write = badfo_readwrite,
4024	.fo_truncate = badfo_truncate,
4025	.fo_ioctl = badfo_ioctl,
4026	.fo_poll = badfo_poll,
4027	.fo_kqfilter = badfo_kqfilter,
4028	.fo_stat = badfo_stat,
4029	.fo_close = badfo_close,
4030	.fo_chmod = badfo_chmod,
4031	.fo_chown = badfo_chown,
4032	.fo_sendfile = badfo_sendfile,
4033};
4034
4035int
4036invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
4037    struct thread *td)
4038{
4039
4040	return (EINVAL);
4041}
4042
4043int
4044invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
4045    struct thread *td)
4046{
4047
4048	return (EINVAL);
4049}
4050
4051int
4052invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
4053    struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
4054    int kflags, struct thread *td)
4055{
4056
4057	return (EINVAL);
4058}
4059
4060/*-------------------------------------------------------------------*/
4061
4062/*
4063 * File Descriptor pseudo-device driver (/dev/fd/).
4064 *
4065 * Opening minor device N dup()s the file (if any) connected to file
4066 * descriptor N belonging to the calling process.  Note that this driver
4067 * consists of only the ``open()'' routine, because all subsequent
4068 * references to this file will be direct to the other driver.
4069 *
4070 * XXX: we could give this one a cloning event handler if necessary.
4071 */
4072
4073/* ARGSUSED */
4074static int
4075fdopen(struct cdev *dev, int mode, int type, struct thread *td)
4076{
4077
4078	/*
4079	 * XXX Kludge: set curthread->td_dupfd to contain the value of the
4080	 * the file descriptor being sought for duplication. The error
4081	 * return ensures that the vnode for this device will be released
4082	 * by vn_open. Open will detect this special error and take the
4083	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
4084	 * will simply report the error.
4085	 */
4086	td->td_dupfd = dev2unit(dev);
4087	return (ENODEV);
4088}
4089
4090static struct cdevsw fildesc_cdevsw = {
4091	.d_version =	D_VERSION,
4092	.d_open =	fdopen,
4093	.d_name =	"FD",
4094};
4095
4096static void
4097fildesc_drvinit(void *unused)
4098{
4099	struct cdev *dev;
4100
4101	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
4102	    UID_ROOT, GID_WHEEL, 0666, "fd/0");
4103	make_dev_alias(dev, "stdin");
4104	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
4105	    UID_ROOT, GID_WHEEL, 0666, "fd/1");
4106	make_dev_alias(dev, "stdout");
4107	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
4108	    UID_ROOT, GID_WHEEL, 0666, "fd/2");
4109	make_dev_alias(dev, "stderr");
4110}
4111
4112SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);
4113