vfs_subr.c revision 1.248
1/*	$OpenBSD: vfs_subr.c,v 1.248 2016/06/19 11:54:33 natano Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/sysctl.h>
48#include <sys/mount.h>
49#include <sys/time.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/conf.h>
53#include <sys/vnode.h>
54#include <sys/lock.h>
55#include <sys/stat.h>
56#include <sys/acct.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/syscallargs.h>
64#include <sys/pool.h>
65#include <sys/tree.h>
66#include <sys/specdev.h>
67
68#include <netinet/in.h>
69
70#include <uvm/uvm_extern.h>
71#include <uvm/uvm_vnode.h>
72
73#include "softraid.h"
74
75void sr_shutdown(void);
76
77enum vtype iftovt_tab[16] = {
78	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80};
81
82int	vttoif_tab[9] = {
83	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84	S_IFSOCK, S_IFIFO, S_IFMT,
85};
86
87int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89
90/*
91 * Insq/Remq for the vnode usage lists.
92 */
93#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94#define	bufremvn(bp) {							\
95	LIST_REMOVE(bp, b_vnbufs);					\
96	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97}
98
99struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100struct freelst vnode_free_list;	/* vnode free list */
101
102struct mntlist mountlist;	/* mounted filesystem list */
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *, u_int);
112void vfs_free_addrlist(struct netexport *);
113void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116int maxvnodes;
117
118#ifdef DEBUG
119void printlockedvnodes(void);
120#endif
121
122struct pool vnode_pool;
123struct pool uvm_vnode_pool;
124
125static int rb_buf_compare(struct buf *b1, struct buf *b2);
126RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
127
128static int
129rb_buf_compare(struct buf *b1, struct buf *b2)
130{
131	if (b1->b_lblkno < b2->b_lblkno)
132		return(-1);
133	if (b1->b_lblkno > b2->b_lblkno)
134		return(1);
135	return(0);
136}
137
138/*
139 * Initialize the vnode management data structures.
140 */
141void
142vntblinit(void)
143{
144	/* buffer cache may need a vnode for each buffer */
145	maxvnodes = 2 * initialvnodes;
146	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, PR_WAITOK,
147	    "vnodes", NULL);
148	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, 0, PR_WAITOK,
149	    "uvmvnodes", NULL);
150	TAILQ_INIT(&vnode_hold_list);
151	TAILQ_INIT(&vnode_free_list);
152	TAILQ_INIT(&mountlist);
153	/*
154	 * Initialize the filesystem syncer.
155	 */
156	vn_initialize_syncerd();
157
158	rn_init(sizeof(struct sockaddr_in));
159}
160
161/*
162 * Mark a mount point as busy. Used to synchronize access and to delay
163 * unmounting.
164 *
165 * Default behaviour is to attempt getting a READ lock and in case of an
166 * ongoing unmount, to wait for it to finish and then return failure.
167 */
168int
169vfs_busy(struct mount *mp, int flags)
170{
171	int rwflags = 0;
172
173	/* new mountpoints need their lock initialised */
174	if (mp->mnt_lock.rwl_name == NULL)
175		rw_init(&mp->mnt_lock, "vfslock");
176
177	if (flags & VB_WRITE)
178		rwflags |= RW_WRITE;
179	else
180		rwflags |= RW_READ;
181
182	if (flags & VB_WAIT)
183		rwflags |= RW_SLEEPFAIL;
184	else
185		rwflags |= RW_NOSLEEP;
186
187	if (rw_enter(&mp->mnt_lock, rwflags))
188		return (EBUSY);
189
190	return (0);
191}
192
193/*
194 * Free a busy file system
195 */
196void
197vfs_unbusy(struct mount *mp)
198{
199	rw_exit(&mp->mnt_lock);
200}
201
202int
203vfs_isbusy(struct mount *mp)
204{
205	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
206		return (1);
207	else
208		return (0);
209}
210
211/*
212 * Lookup a filesystem type, and if found allocate and initialize
213 * a mount structure for it.
214 *
215 * Devname is usually updated by mount(8) after booting.
216 */
217int
218vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
219{
220	struct vfsconf *vfsp;
221	struct mount *mp;
222
223	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
224		if (!strcmp(vfsp->vfc_name, fstypename))
225			break;
226	if (vfsp == NULL)
227		return (ENODEV);
228	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
229	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
230	LIST_INIT(&mp->mnt_vnodelist);
231	mp->mnt_vfc = vfsp;
232	mp->mnt_op = vfsp->vfc_vfsops;
233	mp->mnt_flag = MNT_RDONLY;
234	mp->mnt_vnodecovered = NULLVP;
235	vfsp->vfc_refcount++;
236	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
237	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
238	mp->mnt_stat.f_mntonname[0] = '/';
239	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
240	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
241	*mpp = mp;
242 	return (0);
243 }
244
245/*
246 * Lookup a mount point by filesystem identifier.
247 */
248struct mount *
249vfs_getvfs(fsid_t *fsid)
250{
251	struct mount *mp;
252
253	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
254		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
255		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
256			return (mp);
257		}
258	}
259
260	return (NULL);
261}
262
263
264/*
265 * Get a new unique fsid
266 */
267void
268vfs_getnewfsid(struct mount *mp)
269{
270	static u_short xxxfs_mntid;
271
272	fsid_t tfsid;
273	int mtype;
274
275	mtype = mp->mnt_vfc->vfc_typenum;
276	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
277	mp->mnt_stat.f_fsid.val[1] = mtype;
278	if (xxxfs_mntid == 0)
279		++xxxfs_mntid;
280	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
281	tfsid.val[1] = mtype;
282	if (!TAILQ_EMPTY(&mountlist)) {
283		while (vfs_getvfs(&tfsid)) {
284			tfsid.val[0]++;
285			xxxfs_mntid++;
286		}
287	}
288	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
289}
290
291/*
292 * Set vnode attributes to VNOVAL
293 */
294void
295vattr_null(struct vattr *vap)
296{
297
298	vap->va_type = VNON;
299	/*
300	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
301	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
302	 * the compiler do its job.
303	 */
304	vap->va_mode = VNOVAL;
305	vap->va_nlink = VNOVAL;
306	vap->va_uid = VNOVAL;
307	vap->va_gid = VNOVAL;
308	vap->va_fsid = VNOVAL;
309	vap->va_fileid = VNOVAL;
310	vap->va_size = VNOVAL;
311	vap->va_blocksize = VNOVAL;
312	vap->va_atime.tv_sec = VNOVAL;
313	vap->va_atime.tv_nsec = VNOVAL;
314	vap->va_mtime.tv_sec = VNOVAL;
315	vap->va_mtime.tv_nsec = VNOVAL;
316	vap->va_ctime.tv_sec = VNOVAL;
317	vap->va_ctime.tv_nsec = VNOVAL;
318	vap->va_gen = VNOVAL;
319	vap->va_flags = VNOVAL;
320	vap->va_rdev = VNOVAL;
321	vap->va_bytes = VNOVAL;
322	vap->va_filerev = VNOVAL;
323	vap->va_vaflags = 0;
324}
325
326/*
327 * Routines having to do with the management of the vnode table.
328 */
329long numvnodes;
330
331/*
332 * Return the next vnode from the free list.
333 */
334int
335getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
336    struct vnode **vpp)
337{
338	struct proc *p = curproc;
339	struct freelst *listhd;
340	static int toggle;
341	struct vnode *vp;
342	int s;
343
344	/*
345	 * allow maxvnodes to increase if the buffer cache itself
346	 * is big enough to justify it. (we don't shrink it ever)
347	 */
348	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
349	    : maxvnodes;
350
351	/*
352	 * We must choose whether to allocate a new vnode or recycle an
353	 * existing one. The criterion for allocating a new one is that
354	 * the total number of vnodes is less than the number desired or
355	 * there are no vnodes on either free list. Generally we only
356	 * want to recycle vnodes that have no buffers associated with
357	 * them, so we look first on the vnode_free_list. If it is empty,
358	 * we next consider vnodes with referencing buffers on the
359	 * vnode_hold_list. The toggle ensures that half the time we
360	 * will use a buffer from the vnode_hold_list, and half the time
361	 * we will allocate a new one unless the list has grown to twice
362	 * the desired size. We are reticent to recycle vnodes from the
363	 * vnode_hold_list because we will lose the identity of all its
364	 * referencing buffers.
365	 */
366	toggle ^= 1;
367	if (numvnodes / 2 > maxvnodes)
368		toggle = 0;
369
370	s = splbio();
371	if ((numvnodes < maxvnodes) ||
372	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
373	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
374		splx(s);
375		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
376		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
377		vp->v_uvm->u_vnode = vp;
378		RB_INIT(&vp->v_bufs_tree);
379		RB_INIT(&vp->v_nc_tree);
380		TAILQ_INIT(&vp->v_cache_dst);
381		numvnodes++;
382	} else {
383		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
384		    vp = TAILQ_NEXT(vp, v_freelist)) {
385			if (VOP_ISLOCKED(vp) == 0)
386				break;
387		}
388		/*
389		 * Unless this is a bad time of the month, at most
390		 * the first NCPUS items on the free list are
391		 * locked, so this is close enough to being empty.
392		 */
393		if (vp == NULL) {
394			splx(s);
395			tablefull("vnode");
396			*vpp = 0;
397			return (ENFILE);
398		}
399
400#ifdef DIAGNOSTIC
401		if (vp->v_usecount) {
402			vprint("free vnode", vp);
403			panic("free vnode isn't");
404		}
405#endif
406
407		TAILQ_REMOVE(listhd, vp, v_freelist);
408		vp->v_bioflag &= ~VBIOONFREELIST;
409		splx(s);
410
411		if (vp->v_type != VBAD)
412			vgonel(vp, p);
413#ifdef DIAGNOSTIC
414		if (vp->v_data) {
415			vprint("cleaned vnode", vp);
416			panic("cleaned vnode isn't");
417		}
418		s = splbio();
419		if (vp->v_numoutput)
420			panic("Clean vnode has pending I/O's");
421		splx(s);
422#endif
423		vp->v_flag = 0;
424		vp->v_socket = 0;
425	}
426	cache_purge(vp);
427	vp->v_type = VNON;
428	vp->v_tag = tag;
429	vp->v_op = vops;
430	insmntque(vp, mp);
431	*vpp = vp;
432	vp->v_usecount = 1;
433	vp->v_data = 0;
434	return (0);
435}
436
437/*
438 * Move a vnode from one mount queue to another.
439 */
440void
441insmntque(struct vnode *vp, struct mount *mp)
442{
443	/*
444	 * Delete from old mount point vnode list, if on one.
445	 */
446	if (vp->v_mount != NULL)
447		LIST_REMOVE(vp, v_mntvnodes);
448	/*
449	 * Insert into list of vnodes for the new mount point, if available.
450	 */
451	if ((vp->v_mount = mp) != NULL)
452		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
453}
454
455/*
456 * Create a vnode for a block device.
457 * Used for root filesystem, argdev, and swap areas.
458 * Also used for memory file system special devices.
459 */
460int
461bdevvp(dev_t dev, struct vnode **vpp)
462{
463	return (getdevvp(dev, vpp, VBLK));
464}
465
466/*
467 * Create a vnode for a character device.
468 * Used for console handling.
469 */
470int
471cdevvp(dev_t dev, struct vnode **vpp)
472{
473	return (getdevvp(dev, vpp, VCHR));
474}
475
476/*
477 * Create a vnode for a device.
478 * Used by bdevvp (block device) for root file system etc.,
479 * and by cdevvp (character device) for console.
480 */
481int
482getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
483{
484	struct vnode *vp;
485	struct vnode *nvp;
486	int error;
487
488	if (dev == NODEV) {
489		*vpp = NULLVP;
490		return (0);
491	}
492	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
493	if (error) {
494		*vpp = NULLVP;
495		return (error);
496	}
497	vp = nvp;
498	vp->v_type = type;
499	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
500		vput(vp);
501		vp = nvp;
502	}
503	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
504		vp->v_flag |= VISTTY;
505	*vpp = vp;
506	return (0);
507}
508
509/*
510 * Check to see if the new vnode represents a special device
511 * for which we already have a vnode (either because of
512 * bdevvp() or because of a different vnode representing
513 * the same block device). If such an alias exists, deallocate
514 * the existing contents and return the aliased vnode. The
515 * caller is responsible for filling it with its new contents.
516 */
517struct vnode *
518checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
519{
520	struct proc *p = curproc;
521	struct vnode *vp;
522	struct vnode **vpp;
523
524	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
525		return (NULLVP);
526
527	vpp = &speclisth[SPECHASH(nvp_rdev)];
528loop:
529	for (vp = *vpp; vp; vp = vp->v_specnext) {
530		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
531			continue;
532		}
533		/*
534		 * Alias, but not in use, so flush it out.
535		 */
536		if (vp->v_usecount == 0) {
537			vgonel(vp, p);
538			goto loop;
539		}
540		if (vget(vp, LK_EXCLUSIVE, p)) {
541			goto loop;
542		}
543		break;
544	}
545
546	/*
547	 * Common case is actually in the if statement
548	 */
549	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
550		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
551			M_WAITOK);
552		nvp->v_rdev = nvp_rdev;
553		nvp->v_hashchain = vpp;
554		nvp->v_specnext = *vpp;
555		nvp->v_specmountpoint = NULL;
556		nvp->v_speclockf = NULL;
557		nvp->v_specbitmap = NULL;
558		if (nvp->v_type == VCHR &&
559		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
560		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
561			if (vp != NULLVP)
562				nvp->v_specbitmap = vp->v_specbitmap;
563			else
564				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
565				    M_VNODE, M_WAITOK | M_ZERO);
566		}
567		*vpp = nvp;
568		if (vp != NULLVP) {
569			nvp->v_flag |= VALIASED;
570			vp->v_flag |= VALIASED;
571			vput(vp);
572		}
573		return (NULLVP);
574	}
575
576	/*
577	 * This code is the uncommon case. It is called in case
578	 * we found an alias that was VT_NON && vtype of VBLK
579	 * This means we found a block device that was created
580	 * using bdevvp.
581	 * An example of such a vnode is the root partition device vnode
582	 * created in ffs_mountroot.
583	 *
584	 * The vnodes created by bdevvp should not be aliased (why?).
585	 */
586
587	VOP_UNLOCK(vp, p);
588	vclean(vp, 0, p);
589	vp->v_op = nvp->v_op;
590	vp->v_tag = nvp->v_tag;
591	nvp->v_type = VNON;
592	insmntque(vp, mp);
593	return (vp);
594}
595
596/*
597 * Grab a particular vnode from the free list, increment its
598 * reference count and lock it. If the vnode lock bit is set,
599 * the vnode is being eliminated in vgone. In that case, we
600 * cannot grab it, so the process is awakened when the
601 * transition is completed, and an error code is returned to
602 * indicate that the vnode is no longer usable, possibly
603 * having been changed to a new file system type.
604 */
605int
606vget(struct vnode *vp, int flags, struct proc *p)
607{
608	int error, s, onfreelist;
609
610	/*
611	 * If the vnode is in the process of being cleaned out for
612	 * another use, we wait for the cleaning to finish and then
613	 * return failure. Cleaning is determined by checking that
614	 * the VXLOCK flag is set.
615	 */
616
617	if (vp->v_flag & VXLOCK) {
618		if (flags & LK_NOWAIT) {
619			return (EBUSY);
620		}
621
622		vp->v_flag |= VXWANT;
623		tsleep(vp, PINOD, "vget", 0);
624		return (ENOENT);
625	}
626
627	onfreelist = vp->v_bioflag & VBIOONFREELIST;
628	if (vp->v_usecount == 0 && onfreelist) {
629		s = splbio();
630		if (vp->v_holdcnt > 0)
631			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
632		else
633			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
634		vp->v_bioflag &= ~VBIOONFREELIST;
635		splx(s);
636	}
637
638 	vp->v_usecount++;
639	if (flags & LK_TYPE_MASK) {
640		if ((error = vn_lock(vp, flags, p)) != 0) {
641			vp->v_usecount--;
642			if (vp->v_usecount == 0 && onfreelist)
643				vputonfreelist(vp);
644		}
645		return (error);
646	}
647
648	return (0);
649}
650
651
652/* Vnode reference. */
653void
654vref(struct vnode *vp)
655{
656#ifdef DIAGNOSTIC
657	if (vp->v_usecount == 0)
658		panic("vref used where vget required");
659	if (vp->v_type == VNON)
660		panic("vref on a VNON vnode");
661#endif
662	vp->v_usecount++;
663}
664
665void
666vputonfreelist(struct vnode *vp)
667{
668	int s;
669	struct freelst *lst;
670
671	s = splbio();
672#ifdef DIAGNOSTIC
673	if (vp->v_usecount != 0)
674		panic("Use count is not zero!");
675
676	if (vp->v_bioflag & VBIOONFREELIST) {
677		vprint("vnode already on free list: ", vp);
678		panic("vnode already on free list");
679	}
680#endif
681
682	vp->v_bioflag |= VBIOONFREELIST;
683
684	if (vp->v_holdcnt > 0)
685		lst = &vnode_hold_list;
686	else
687		lst = &vnode_free_list;
688
689	if (vp->v_type == VBAD)
690		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
691	else
692		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
693
694	splx(s);
695}
696
697/*
698 * vput(), just unlock and vrele()
699 */
700void
701vput(struct vnode *vp)
702{
703	struct proc *p = curproc;
704
705#ifdef DIAGNOSTIC
706	if (vp == NULL)
707		panic("vput: null vp");
708#endif
709
710#ifdef DIAGNOSTIC
711	if (vp->v_usecount == 0) {
712		vprint("vput: bad ref count", vp);
713		panic("vput: ref cnt");
714	}
715#endif
716	vp->v_usecount--;
717	if (vp->v_usecount > 0) {
718		VOP_UNLOCK(vp, p);
719		return;
720	}
721
722#ifdef DIAGNOSTIC
723	if (vp->v_writecount != 0) {
724		vprint("vput: bad writecount", vp);
725		panic("vput: v_writecount != 0");
726	}
727#endif
728
729	VOP_INACTIVE(vp, p);
730
731	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
732		vputonfreelist(vp);
733}
734
735/*
736 * Vnode release - use for active VNODES.
737 * If count drops to zero, call inactive routine and return to freelist.
738 * Returns 0 if it did not sleep.
739 */
740int
741vrele(struct vnode *vp)
742{
743	struct proc *p = curproc;
744
745#ifdef DIAGNOSTIC
746	if (vp == NULL)
747		panic("vrele: null vp");
748#endif
749#ifdef DIAGNOSTIC
750	if (vp->v_usecount == 0) {
751		vprint("vrele: bad ref count", vp);
752		panic("vrele: ref cnt");
753	}
754#endif
755	vp->v_usecount--;
756	if (vp->v_usecount > 0) {
757		return (0);
758	}
759
760#ifdef DIAGNOSTIC
761	if (vp->v_writecount != 0) {
762		vprint("vrele: bad writecount", vp);
763		panic("vrele: v_writecount != 0");
764	}
765#endif
766
767	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
768#ifdef DIAGNOSTIC
769		vprint("vrele: cannot lock", vp);
770#endif
771		return (1);
772	}
773
774	VOP_INACTIVE(vp, p);
775
776	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
777		vputonfreelist(vp);
778	return (1);
779}
780
781/* Page or buffer structure gets a reference. */
782void
783vhold(struct vnode *vp)
784{
785	/*
786	 * If it is on the freelist and the hold count is currently
787	 * zero, move it to the hold list.
788	 */
789	if ((vp->v_bioflag & VBIOONFREELIST) &&
790	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
791		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
792		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
793	}
794	vp->v_holdcnt++;
795}
796
797/* Lose interest in a vnode. */
798void
799vdrop(struct vnode *vp)
800{
801#ifdef DIAGNOSTIC
802	if (vp->v_holdcnt == 0)
803		panic("vdrop: zero holdcnt");
804#endif
805
806	vp->v_holdcnt--;
807
808	/*
809	 * If it is on the holdlist and the hold count drops to
810	 * zero, move it to the free list.
811	 */
812	if ((vp->v_bioflag & VBIOONFREELIST) &&
813	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
814		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
815		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
816	}
817}
818
819/*
820 * Remove any vnodes in the vnode table belonging to mount point mp.
821 *
822 * If MNT_NOFORCE is specified, there should not be any active ones,
823 * return error if any are found (nb: this is a user error, not a
824 * system error). If MNT_FORCE is specified, detach any active vnodes
825 * that are found.
826 */
827#ifdef DEBUG
828int busyprt = 0;	/* print out busy vnodes */
829struct ctldebug debug1 = { "busyprt", &busyprt };
830#endif
831
832int
833vfs_mount_foreach_vnode(struct mount *mp,
834    int (*func)(struct vnode *, void *), void *arg) {
835	struct vnode *vp, *nvp;
836	int error = 0;
837
838loop:
839	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
840		if (vp->v_mount != mp)
841			goto loop;
842		nvp = LIST_NEXT(vp, v_mntvnodes);
843
844		error = func(vp, arg);
845
846		if (error != 0)
847			break;
848	}
849
850	return (error);
851}
852
853struct vflush_args {
854	struct vnode *skipvp;
855	int busy;
856	int flags;
857};
858
859int
860vflush_vnode(struct vnode *vp, void *arg) {
861	struct vflush_args *va = arg;
862	struct proc *p = curproc;
863
864	if (vp == va->skipvp) {
865		return (0);
866	}
867
868	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
869		return (0);
870	}
871
872	/*
873	 * If WRITECLOSE is set, only flush out regular file
874	 * vnodes open for writing.
875	 */
876	if ((va->flags & WRITECLOSE) &&
877	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
878		return (0);
879	}
880
881	/*
882	 * With v_usecount == 0, all we need to do is clear
883	 * out the vnode data structures and we are done.
884	 */
885	if (vp->v_usecount == 0) {
886		vgonel(vp, p);
887		return (0);
888	}
889
890	/*
891	 * If FORCECLOSE is set, forcibly close the vnode.
892	 * For block or character devices, revert to an
893	 * anonymous device. For all other files, just kill them.
894	 */
895	if (va->flags & FORCECLOSE) {
896		if (vp->v_type != VBLK && vp->v_type != VCHR) {
897			vgonel(vp, p);
898		} else {
899			vclean(vp, 0, p);
900			vp->v_op = &spec_vops;
901			insmntque(vp, NULL);
902		}
903		return (0);
904	}
905
906#ifdef DEBUG
907	if (busyprt)
908		vprint("vflush: busy vnode", vp);
909#endif
910	va->busy++;
911	return (0);
912}
913
914int
915vflush(struct mount *mp, struct vnode *skipvp, int flags)
916{
917	struct vflush_args va;
918	va.skipvp = skipvp;
919	va.busy = 0;
920	va.flags = flags;
921
922	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
923
924	if (va.busy)
925		return (EBUSY);
926	return (0);
927}
928
929/*
930 * Disassociate the underlying file system from a vnode.
931 */
932void
933vclean(struct vnode *vp, int flags, struct proc *p)
934{
935	int active;
936
937	/*
938	 * Check to see if the vnode is in use.
939	 * If so we have to reference it before we clean it out
940	 * so that its count cannot fall to zero and generate a
941	 * race against ourselves to recycle it.
942	 */
943	if ((active = vp->v_usecount) != 0)
944		vp->v_usecount++;
945
946	/*
947	 * Prevent the vnode from being recycled or
948	 * brought into use while we clean it out.
949	 */
950	if (vp->v_flag & VXLOCK)
951		panic("vclean: deadlock");
952	vp->v_flag |= VXLOCK;
953	/*
954	 * Even if the count is zero, the VOP_INACTIVE routine may still
955	 * have the object locked while it cleans it out. The VOP_LOCK
956	 * ensures that the VOP_INACTIVE routine is done with its work.
957	 * For active vnodes, it ensures that no other activity can
958	 * occur while the underlying object is being cleaned out.
959	 */
960	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE, p);
961
962	/*
963	 * Clean out any VM data associated with the vnode.
964	 */
965	uvm_vnp_terminate(vp);
966	/*
967	 * Clean out any buffers associated with the vnode.
968	 */
969	if (flags & DOCLOSE)
970		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
971	/*
972	 * If purging an active vnode, it must be closed and
973	 * deactivated before being reclaimed. Note that the
974	 * VOP_INACTIVE will unlock the vnode
975	 */
976	if (active) {
977		if (flags & DOCLOSE)
978			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
979		VOP_INACTIVE(vp, p);
980	} else {
981		/*
982		 * Any other processes trying to obtain this lock must first
983		 * wait for VXLOCK to clear, then call the new lock operation.
984		 */
985		VOP_UNLOCK(vp, p);
986	}
987
988	/*
989	 * Reclaim the vnode.
990	 */
991	if (VOP_RECLAIM(vp, p))
992		panic("vclean: cannot reclaim");
993	if (active) {
994		vp->v_usecount--;
995		if (vp->v_usecount == 0) {
996			if (vp->v_holdcnt > 0)
997				panic("vclean: not clean");
998			vputonfreelist(vp);
999		}
1000	}
1001	cache_purge(vp);
1002
1003	/*
1004	 * Done with purge, notify sleepers of the grim news.
1005	 */
1006	vp->v_op = &dead_vops;
1007	VN_KNOTE(vp, NOTE_REVOKE);
1008	vp->v_tag = VT_NON;
1009	vp->v_flag &= ~VXLOCK;
1010#ifdef VFSLCKDEBUG
1011	vp->v_flag &= ~VLOCKSWORK;
1012#endif
1013	if (vp->v_flag & VXWANT) {
1014		vp->v_flag &= ~VXWANT;
1015		wakeup(vp);
1016	}
1017}
1018
1019/*
1020 * Recycle an unused vnode to the front of the free list.
1021 */
1022int
1023vrecycle(struct vnode *vp, struct proc *p)
1024{
1025	if (vp->v_usecount == 0) {
1026		vgonel(vp, p);
1027		return (1);
1028	}
1029	return (0);
1030}
1031
1032/*
1033 * Eliminate all activity associated with a vnode
1034 * in preparation for reuse.
1035 */
1036void
1037vgone(struct vnode *vp)
1038{
1039	struct proc *p = curproc;
1040	vgonel(vp, p);
1041}
1042
1043/*
1044 * vgone, with struct proc.
1045 */
1046void
1047vgonel(struct vnode *vp, struct proc *p)
1048{
1049	struct vnode *vq;
1050	struct vnode *vx;
1051
1052	/*
1053	 * If a vgone (or vclean) is already in progress,
1054	 * wait until it is done and return.
1055	 */
1056	if (vp->v_flag & VXLOCK) {
1057		vp->v_flag |= VXWANT;
1058		tsleep(vp, PINOD, "vgone", 0);
1059		return;
1060	}
1061
1062	/*
1063	 * Clean out the filesystem specific data.
1064	 */
1065	vclean(vp, DOCLOSE, p);
1066	/*
1067	 * Delete from old mount point vnode list, if on one.
1068	 */
1069	if (vp->v_mount != NULL)
1070		insmntque(vp, NULL);
1071	/*
1072	 * If special device, remove it from special device alias list
1073	 * if it is on one.
1074	 */
1075	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1076		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1077		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1078		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1079			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1080		}
1081		if (*vp->v_hashchain == vp) {
1082			*vp->v_hashchain = vp->v_specnext;
1083		} else {
1084			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1085				if (vq->v_specnext != vp)
1086					continue;
1087				vq->v_specnext = vp->v_specnext;
1088				break;
1089			}
1090			if (vq == NULL)
1091				panic("missing bdev");
1092		}
1093		if (vp->v_flag & VALIASED) {
1094			vx = NULL;
1095			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1096				if (vq->v_rdev != vp->v_rdev ||
1097				    vq->v_type != vp->v_type)
1098					continue;
1099				if (vx)
1100					break;
1101				vx = vq;
1102			}
1103			if (vx == NULL)
1104				panic("missing alias");
1105			if (vq == NULL)
1106				vx->v_flag &= ~VALIASED;
1107			vp->v_flag &= ~VALIASED;
1108		}
1109		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1110		vp->v_specinfo = NULL;
1111	}
1112	/*
1113	 * If it is on the freelist and not already at the head,
1114	 * move it to the head of the list.
1115	 */
1116	vp->v_type = VBAD;
1117
1118	/*
1119	 * Move onto the free list, unless we were called from
1120	 * getnewvnode and we're not on any free list
1121	 */
1122	if (vp->v_usecount == 0 &&
1123	    (vp->v_bioflag & VBIOONFREELIST)) {
1124		int s;
1125
1126		s = splbio();
1127
1128		if (vp->v_holdcnt > 0)
1129			panic("vgonel: not clean");
1130
1131		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1132			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1133			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1134		}
1135		splx(s);
1136	}
1137}
1138
1139/*
1140 * Lookup a vnode by device number.
1141 */
1142int
1143vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1144{
1145	struct vnode *vp;
1146	int rc =0;
1147
1148	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1149		if (dev != vp->v_rdev || type != vp->v_type)
1150			continue;
1151		*vpp = vp;
1152		rc = 1;
1153		break;
1154	}
1155	return (rc);
1156}
1157
1158/*
1159 * Revoke all the vnodes corresponding to the specified minor number
1160 * range (endpoints inclusive) of the specified major.
1161 */
1162void
1163vdevgone(int maj, int minl, int minh, enum vtype type)
1164{
1165	struct vnode *vp;
1166	int mn;
1167
1168	for (mn = minl; mn <= minh; mn++)
1169		if (vfinddev(makedev(maj, mn), type, &vp))
1170			VOP_REVOKE(vp, REVOKEALL);
1171}
1172
1173/*
1174 * Calculate the total number of references to a special device.
1175 */
1176int
1177vcount(struct vnode *vp)
1178{
1179	struct vnode *vq, *vnext;
1180	int count;
1181
1182loop:
1183	if ((vp->v_flag & VALIASED) == 0)
1184		return (vp->v_usecount);
1185	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1186		vnext = vq->v_specnext;
1187		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1188			continue;
1189		/*
1190		 * Alias, but not in use, so flush it out.
1191		 */
1192		if (vq->v_usecount == 0 && vq != vp) {
1193			vgone(vq);
1194			goto loop;
1195		}
1196		count += vq->v_usecount;
1197	}
1198	return (count);
1199}
1200
1201#if defined(DEBUG) || defined(DIAGNOSTIC)
1202/*
1203 * Print out a description of a vnode.
1204 */
1205static char *typename[] =
1206   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1207
1208void
1209vprint(char *label, struct vnode *vp)
1210{
1211	char buf[64];
1212
1213	if (label != NULL)
1214		printf("%s: ", label);
1215	printf("%p, type %s, use %u, write %u, hold %u,",
1216		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1217		vp->v_holdcnt);
1218	buf[0] = '\0';
1219	if (vp->v_flag & VROOT)
1220		strlcat(buf, "|VROOT", sizeof buf);
1221	if (vp->v_flag & VTEXT)
1222		strlcat(buf, "|VTEXT", sizeof buf);
1223	if (vp->v_flag & VSYSTEM)
1224		strlcat(buf, "|VSYSTEM", sizeof buf);
1225	if (vp->v_flag & VXLOCK)
1226		strlcat(buf, "|VXLOCK", sizeof buf);
1227	if (vp->v_flag & VXWANT)
1228		strlcat(buf, "|VXWANT", sizeof buf);
1229	if (vp->v_bioflag & VBIOWAIT)
1230		strlcat(buf, "|VBIOWAIT", sizeof buf);
1231	if (vp->v_bioflag & VBIOONFREELIST)
1232		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1233	if (vp->v_bioflag & VBIOONSYNCLIST)
1234		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1235	if (vp->v_flag & VALIASED)
1236		strlcat(buf, "|VALIASED", sizeof buf);
1237	if (buf[0] != '\0')
1238		printf(" flags (%s)", &buf[1]);
1239	if (vp->v_data == NULL) {
1240		printf("\n");
1241	} else {
1242		printf("\n\t");
1243		VOP_PRINT(vp);
1244	}
1245}
1246#endif /* DEBUG || DIAGNOSTIC */
1247
1248#ifdef DEBUG
1249/*
1250 * List all of the locked vnodes in the system.
1251 * Called when debugging the kernel.
1252 */
1253void
1254printlockedvnodes(void)
1255{
1256	struct mount *mp, *nmp;
1257	struct vnode *vp;
1258
1259	printf("Locked vnodes\n");
1260
1261	TAILQ_FOREACH_SAFE(mp, &mountlist, mnt_list, nmp) {
1262		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1263			continue;
1264		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1265			if (VOP_ISLOCKED(vp))
1266				vprint(NULL, vp);
1267		}
1268		vfs_unbusy(mp);
1269 	}
1270
1271}
1272#endif
1273
1274/*
1275 * Top level filesystem related information gathering.
1276 */
1277int
1278vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1279    size_t newlen, struct proc *p)
1280{
1281	struct vfsconf *vfsp, *tmpvfsp;
1282	int ret;
1283
1284	/* all sysctl names at this level are at least name and field */
1285	if (namelen < 2)
1286		return (ENOTDIR);		/* overloaded */
1287
1288	if (name[0] != VFS_GENERIC) {
1289		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1290			if (vfsp->vfc_typenum == name[0])
1291				break;
1292
1293		if (vfsp == NULL)
1294			return (EOPNOTSUPP);
1295
1296		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1297		    oldp, oldlenp, newp, newlen, p));
1298	}
1299
1300	switch (name[1]) {
1301	case VFS_MAXTYPENUM:
1302		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1303
1304	case VFS_CONF:
1305		if (namelen < 3)
1306			return (ENOTDIR);	/* overloaded */
1307
1308		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1309			if (vfsp->vfc_typenum == name[2])
1310				break;
1311
1312		if (vfsp == NULL)
1313			return (EOPNOTSUPP);
1314
1315		/* Make a copy, clear out kernel pointers */
1316		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1317		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1318		tmpvfsp->vfc_vfsops = NULL;
1319		tmpvfsp->vfc_next = NULL;
1320
1321		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1322		    sizeof(struct vfsconf));
1323
1324		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1325		return (ret);
1326	case VFS_BCACHESTAT:	/* buffer cache statistics */
1327		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1328		    sizeof(struct bcachestats));
1329		return(ret);
1330	}
1331	return (EOPNOTSUPP);
1332}
1333
1334/*
1335 * Check to see if a filesystem is mounted on a block device.
1336 */
1337int
1338vfs_mountedon(struct vnode *vp)
1339{
1340	struct vnode *vq;
1341	int error = 0;
1342
1343 	if (vp->v_specmountpoint != NULL)
1344		return (EBUSY);
1345	if (vp->v_flag & VALIASED) {
1346		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1347			if (vq->v_rdev != vp->v_rdev ||
1348			    vq->v_type != vp->v_type)
1349				continue;
1350			if (vq->v_specmountpoint != NULL) {
1351				error = EBUSY;
1352				break;
1353			}
1354 		}
1355	}
1356	return (error);
1357}
1358
1359/*
1360 * Build hash lists of net addresses and hang them off the mount point.
1361 * Called by ufs_mount() to set up the lists of export addresses.
1362 */
1363int
1364vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1365    struct export_args *argp)
1366{
1367	struct netcred *np;
1368	struct radix_node_head *rnh;
1369	int nplen, i;
1370	struct radix_node *rn;
1371	struct sockaddr *saddr, *smask = 0;
1372	int error;
1373
1374	if (argp->ex_addrlen == 0) {
1375		if (mp->mnt_flag & MNT_DEFEXPORTED)
1376			return (EPERM);
1377		np = &nep->ne_defexported;
1378		/* fill in the kernel's ucred from userspace's xucred */
1379		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1380			return (error);
1381		mp->mnt_flag |= MNT_DEFEXPORTED;
1382		goto finish;
1383	}
1384	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1385	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1386		return (EINVAL);
1387	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1388	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1389	saddr = (struct sockaddr *)(np + 1);
1390	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1391	if (error)
1392		goto out;
1393	if (saddr->sa_len > argp->ex_addrlen)
1394		saddr->sa_len = argp->ex_addrlen;
1395	if (argp->ex_masklen) {
1396		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1397		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1398		if (error)
1399			goto out;
1400		if (smask->sa_len > argp->ex_masklen)
1401			smask->sa_len = argp->ex_masklen;
1402	}
1403	/* fill in the kernel's ucred from userspace's xucred */
1404	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1405		goto out;
1406	i = saddr->sa_family;
1407	switch (i) {
1408	case AF_INET:
1409		if ((rnh = nep->ne_rtable_inet) == NULL) {
1410			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1411			    offsetof(struct sockaddr_in, sin_addr))) {
1412				error = ENOBUFS;
1413				goto out;
1414			}
1415			rnh = nep->ne_rtable_inet;
1416		}
1417		break;
1418	default:
1419		error = EINVAL;
1420		goto out;
1421	}
1422	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1423	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1424		error = EPERM;
1425		goto out;
1426	}
1427finish:
1428	np->netc_exflags = argp->ex_flags;
1429	return (0);
1430out:
1431	free(np, M_NETADDR, nplen);
1432	return (error);
1433}
1434
1435int
1436vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1437{
1438	struct radix_node_head *rnh = (struct radix_node_head *)w;
1439
1440	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1441	free(rn, M_NETADDR, 0);
1442	return (0);
1443}
1444
1445/*
1446 * Free the net address hash lists that are hanging off the mount points.
1447 */
1448void
1449vfs_free_addrlist(struct netexport *nep)
1450{
1451	struct radix_node_head *rnh;
1452
1453	if ((rnh = nep->ne_rtable_inet) != NULL) {
1454		rn_walktree(rnh, vfs_free_netcred, rnh);
1455		free(rnh, M_RTABLE, 0);
1456		nep->ne_rtable_inet = NULL;
1457	}
1458}
1459
1460int
1461vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1462{
1463	int error;
1464
1465	if (argp->ex_flags & MNT_DELEXPORT) {
1466		vfs_free_addrlist(nep);
1467		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1468	}
1469	if (argp->ex_flags & MNT_EXPORTED) {
1470		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1471			return (error);
1472		mp->mnt_flag |= MNT_EXPORTED;
1473	}
1474	return (0);
1475}
1476
1477struct netcred *
1478vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1479{
1480	struct netcred *np;
1481	struct radix_node_head *rnh;
1482	struct sockaddr *saddr;
1483
1484	np = NULL;
1485	if (mp->mnt_flag & MNT_EXPORTED) {
1486		/*
1487		 * Lookup in the export list first.
1488		 */
1489		if (nam != NULL) {
1490			saddr = mtod(nam, struct sockaddr *);
1491			switch(saddr->sa_family) {
1492			case AF_INET:
1493				rnh = nep->ne_rtable_inet;
1494				break;
1495			default:
1496				rnh = NULL;
1497				break;
1498			}
1499			if (rnh != NULL)
1500				np = (struct netcred *)rn_match(saddr, rnh);
1501		}
1502		/*
1503		 * If no address match, use the default if it exists.
1504		 */
1505		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1506			np = &nep->ne_defexported;
1507	}
1508	return (np);
1509}
1510
1511/*
1512 * Do the usual access checking.
1513 * file_mode, uid and gid are from the vnode in question,
1514 * while acc_mode and cred are from the VOP_ACCESS parameter list
1515 */
1516int
1517vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1518    mode_t acc_mode, struct ucred *cred)
1519{
1520	mode_t mask;
1521
1522	/* User id 0 always gets read/write access. */
1523	if (cred->cr_uid == 0) {
1524		/* For VEXEC, at least one of the execute bits must be set. */
1525		if ((acc_mode & VEXEC) && type != VDIR &&
1526		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1527			return EACCES;
1528		return 0;
1529	}
1530
1531	mask = 0;
1532
1533	/* Otherwise, check the owner. */
1534	if (cred->cr_uid == uid) {
1535		if (acc_mode & VEXEC)
1536			mask |= S_IXUSR;
1537		if (acc_mode & VREAD)
1538			mask |= S_IRUSR;
1539		if (acc_mode & VWRITE)
1540			mask |= S_IWUSR;
1541		return (file_mode & mask) == mask ? 0 : EACCES;
1542	}
1543
1544	/* Otherwise, check the groups. */
1545	if (groupmember(gid, cred)) {
1546		if (acc_mode & VEXEC)
1547			mask |= S_IXGRP;
1548		if (acc_mode & VREAD)
1549			mask |= S_IRGRP;
1550		if (acc_mode & VWRITE)
1551			mask |= S_IWGRP;
1552		return (file_mode & mask) == mask ? 0 : EACCES;
1553	}
1554
1555	/* Otherwise, check everyone else. */
1556	if (acc_mode & VEXEC)
1557		mask |= S_IXOTH;
1558	if (acc_mode & VREAD)
1559		mask |= S_IROTH;
1560	if (acc_mode & VWRITE)
1561		mask |= S_IWOTH;
1562	return (file_mode & mask) == mask ? 0 : EACCES;
1563}
1564
1565/*
1566 * Unmount all file systems.
1567 * We traverse the list in reverse order under the assumption that doing so
1568 * will avoid needing to worry about dependencies.
1569 */
1570void
1571vfs_unmountall(void)
1572{
1573	struct mount *mp, *nmp;
1574	int allerror, error, again = 1;
1575
1576 retry:
1577	allerror = 0;
1578	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1579		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1580			continue;
1581		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1582			printf("unmount of %s failed with error %d\n",
1583			    mp->mnt_stat.f_mntonname, error);
1584			allerror = 1;
1585		}
1586	}
1587
1588	if (allerror) {
1589		printf("WARNING: some file systems would not unmount\n");
1590		if (again) {
1591			printf("retrying\n");
1592			again = 0;
1593			goto retry;
1594		}
1595	}
1596}
1597
1598/*
1599 * Sync and unmount file systems before shutting down.
1600 */
1601void
1602vfs_shutdown(void)
1603{
1604#ifdef ACCOUNTING
1605	acct_shutdown();
1606#endif
1607
1608	/* XXX Should suspend scheduling. */
1609	(void) spl0();
1610
1611	printf("syncing disks... ");
1612
1613	if (panicstr == 0) {
1614		/* Sync before unmount, in case we hang on something. */
1615		sys_sync(&proc0, NULL, NULL);
1616
1617		/* Unmount file systems. */
1618		vfs_unmountall();
1619	}
1620
1621	if (vfs_syncwait(1))
1622		printf("giving up\n");
1623	else
1624		printf("done\n");
1625
1626#if NSOFTRAID > 0
1627	sr_shutdown();
1628#endif
1629}
1630
1631/*
1632 * perform sync() operation and wait for buffers to flush.
1633 * assumptions: called w/ scheduler disabled and physical io enabled
1634 * for now called at spl0() XXX
1635 */
1636int
1637vfs_syncwait(int verbose)
1638{
1639	struct buf *bp;
1640	int iter, nbusy, dcount, s;
1641	struct proc *p;
1642#ifdef MULTIPROCESSOR
1643	int hold_count;
1644#endif
1645
1646	p = curproc? curproc : &proc0;
1647	sys_sync(p, NULL, NULL);
1648
1649	/* Wait for sync to finish. */
1650	dcount = 10000;
1651	for (iter = 0; iter < 20; iter++) {
1652		nbusy = 0;
1653		LIST_FOREACH(bp, &bufhead, b_list) {
1654			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1655				nbusy++;
1656			/*
1657			 * With soft updates, some buffers that are
1658			 * written will be remarked as dirty until other
1659			 * buffers are written.
1660			 */
1661			if (bp->b_flags & B_DELWRI) {
1662				s = splbio();
1663				bremfree(bp);
1664				buf_acquire(bp);
1665				splx(s);
1666				nbusy++;
1667				bawrite(bp);
1668				if (dcount-- <= 0) {
1669					if (verbose)
1670						printf("softdep ");
1671					return 1;
1672				}
1673			}
1674		}
1675		if (nbusy == 0)
1676			break;
1677		if (verbose)
1678			printf("%d ", nbusy);
1679#ifdef MULTIPROCESSOR
1680		if (__mp_lock_held(&kernel_lock))
1681			hold_count = __mp_release_all(&kernel_lock);
1682		else
1683			hold_count = 0;
1684#endif
1685		DELAY(40000 * iter);
1686#ifdef MULTIPROCESSOR
1687		if (hold_count)
1688			__mp_acquire_count(&kernel_lock, hold_count);
1689#endif
1690	}
1691
1692	return nbusy;
1693}
1694
1695/*
1696 * posix file system related system variables.
1697 */
1698int
1699fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1700    void *newp, size_t newlen, struct proc *p)
1701{
1702	/* all sysctl names at this level are terminal */
1703	if (namelen != 1)
1704		return (ENOTDIR);
1705
1706	switch (name[0]) {
1707	case FS_POSIX_SETUID:
1708		if (newp && securelevel > 0)
1709			return (EPERM);
1710		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1711	default:
1712		return (EOPNOTSUPP);
1713	}
1714	/* NOTREACHED */
1715}
1716
1717/*
1718 * file system related system variables.
1719 */
1720int
1721fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1722    size_t newlen, struct proc *p)
1723{
1724	sysctlfn *fn;
1725
1726	switch (name[0]) {
1727	case FS_POSIX:
1728		fn = fs_posix_sysctl;
1729		break;
1730	default:
1731		return (EOPNOTSUPP);
1732	}
1733	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1734}
1735
1736
1737/*
1738 * Routines dealing with vnodes and buffers
1739 */
1740
1741/*
1742 * Wait for all outstanding I/Os to complete
1743 *
1744 * Manipulates v_numoutput. Must be called at splbio()
1745 */
1746int
1747vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1748{
1749	int error = 0;
1750
1751	splassert(IPL_BIO);
1752
1753	while (vp->v_numoutput) {
1754		vp->v_bioflag |= VBIOWAIT;
1755		error = tsleep(&vp->v_numoutput,
1756		    slpflag | (PRIBIO + 1), wmesg, timeo);
1757		if (error)
1758			break;
1759	}
1760
1761	return (error);
1762}
1763
1764/*
1765 * Update outstanding I/O count and do wakeup if requested.
1766 *
1767 * Manipulates v_numoutput. Must be called at splbio()
1768 */
1769void
1770vwakeup(struct vnode *vp)
1771{
1772	splassert(IPL_BIO);
1773
1774	if (vp != NULL) {
1775		if (vp->v_numoutput-- == 0)
1776			panic("vwakeup: neg numoutput");
1777		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1778			vp->v_bioflag &= ~VBIOWAIT;
1779			wakeup(&vp->v_numoutput);
1780		}
1781	}
1782}
1783
1784/*
1785 * Flush out and invalidate all buffers associated with a vnode.
1786 * Called with the underlying object locked.
1787 */
1788int
1789vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1790    int slpflag, int slptimeo)
1791{
1792	struct buf *bp;
1793	struct buf *nbp, *blist;
1794	int s, error;
1795
1796#ifdef VFSLCKDEBUG
1797	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1798		panic("vinvalbuf(): vp isn't locked");
1799#endif
1800
1801	if (flags & V_SAVE) {
1802		s = splbio();
1803		vwaitforio(vp, 0, "vinvalbuf", 0);
1804		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1805			splx(s);
1806			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1807				return (error);
1808			s = splbio();
1809			if (vp->v_numoutput > 0 ||
1810			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1811				panic("vinvalbuf: dirty bufs");
1812		}
1813		splx(s);
1814	}
1815loop:
1816	s = splbio();
1817	for (;;) {
1818		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1819		    (flags & V_SAVEMETA))
1820			while (blist && blist->b_lblkno < 0)
1821				blist = LIST_NEXT(blist, b_vnbufs);
1822		if (blist == NULL &&
1823		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1824		    (flags & V_SAVEMETA))
1825			while (blist && blist->b_lblkno < 0)
1826				blist = LIST_NEXT(blist, b_vnbufs);
1827		if (!blist)
1828			break;
1829
1830		for (bp = blist; bp; bp = nbp) {
1831			nbp = LIST_NEXT(bp, b_vnbufs);
1832			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1833				continue;
1834			if (bp->b_flags & B_BUSY) {
1835				bp->b_flags |= B_WANTED;
1836				error = tsleep(bp, slpflag | (PRIBIO + 1),
1837				    "vinvalbuf", slptimeo);
1838				if (error) {
1839					splx(s);
1840					return (error);
1841				}
1842				break;
1843			}
1844			bremfree(bp);
1845			/*
1846			 * XXX Since there are no node locks for NFS, I believe
1847			 * there is a slight chance that a delayed write will
1848			 * occur while sleeping just above, so check for it.
1849			 */
1850			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1851				buf_acquire(bp);
1852				splx(s);
1853				(void) VOP_BWRITE(bp);
1854				goto loop;
1855			}
1856			buf_acquire_nomap(bp);
1857			bp->b_flags |= B_INVAL;
1858			brelse(bp);
1859		}
1860	}
1861	if (!(flags & V_SAVEMETA) &&
1862	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1863		panic("vinvalbuf: flush failed");
1864	splx(s);
1865	return (0);
1866}
1867
1868void
1869vflushbuf(struct vnode *vp, int sync)
1870{
1871	struct buf *bp, *nbp;
1872	int s;
1873
1874loop:
1875	s = splbio();
1876	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) {
1877		nbp = LIST_NEXT(bp, b_vnbufs);
1878		if ((bp->b_flags & B_BUSY))
1879			continue;
1880		if ((bp->b_flags & B_DELWRI) == 0)
1881			panic("vflushbuf: not dirty");
1882		bremfree(bp);
1883		buf_acquire(bp);
1884		splx(s);
1885		/*
1886		 * Wait for I/O associated with indirect blocks to complete,
1887		 * since there is no way to quickly wait for them below.
1888		 */
1889		if (bp->b_vp == vp || sync == 0)
1890			(void) bawrite(bp);
1891		else
1892			(void) bwrite(bp);
1893		goto loop;
1894	}
1895	if (sync == 0) {
1896		splx(s);
1897		return;
1898	}
1899	vwaitforio(vp, 0, "vflushbuf", 0);
1900	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1901		splx(s);
1902#ifdef DIAGNOSTIC
1903		vprint("vflushbuf: dirty", vp);
1904#endif
1905		goto loop;
1906	}
1907	splx(s);
1908}
1909
1910/*
1911 * Associate a buffer with a vnode.
1912 *
1913 * Manipulates buffer vnode queues. Must be called at splbio().
1914 */
1915void
1916bgetvp(struct vnode *vp, struct buf *bp)
1917{
1918	splassert(IPL_BIO);
1919
1920
1921	if (bp->b_vp)
1922		panic("bgetvp: not free");
1923	vhold(vp);
1924	bp->b_vp = vp;
1925	if (vp->v_type == VBLK || vp->v_type == VCHR)
1926		bp->b_dev = vp->v_rdev;
1927	else
1928		bp->b_dev = NODEV;
1929	/*
1930	 * Insert onto list for new vnode.
1931	 */
1932	bufinsvn(bp, &vp->v_cleanblkhd);
1933}
1934
1935/*
1936 * Disassociate a buffer from a vnode.
1937 *
1938 * Manipulates vnode buffer queues. Must be called at splbio().
1939 */
1940void
1941brelvp(struct buf *bp)
1942{
1943	struct vnode *vp;
1944
1945	splassert(IPL_BIO);
1946
1947	if ((vp = bp->b_vp) == (struct vnode *) 0)
1948		panic("brelvp: NULL");
1949	/*
1950	 * Delete from old vnode list, if on one.
1951	 */
1952	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1953		bufremvn(bp);
1954	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1955	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1956		vp->v_bioflag &= ~VBIOONSYNCLIST;
1957		LIST_REMOVE(vp, v_synclist);
1958	}
1959	bp->b_vp = NULL;
1960
1961	vdrop(vp);
1962}
1963
1964/*
1965 * Replaces the current vnode associated with the buffer, if any,
1966 * with a new vnode.
1967 *
1968 * If an output I/O is pending on the buffer, the old vnode
1969 * I/O count is adjusted.
1970 *
1971 * Ignores vnode buffer queues. Must be called at splbio().
1972 */
1973void
1974buf_replacevnode(struct buf *bp, struct vnode *newvp)
1975{
1976	struct vnode *oldvp = bp->b_vp;
1977
1978	splassert(IPL_BIO);
1979
1980	if (oldvp)
1981		brelvp(bp);
1982
1983	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
1984		newvp->v_numoutput++;	/* put it on swapdev */
1985		vwakeup(oldvp);
1986	}
1987
1988	bgetvp(newvp, bp);
1989	bufremvn(bp);
1990}
1991
1992/*
1993 * Used to assign buffers to the appropriate clean or dirty list on
1994 * the vnode and to add newly dirty vnodes to the appropriate
1995 * filesystem syncer list.
1996 *
1997 * Manipulates vnode buffer queues. Must be called at splbio().
1998 */
1999void
2000reassignbuf(struct buf *bp)
2001{
2002	struct buflists *listheadp;
2003	int delay;
2004	struct vnode *vp = bp->b_vp;
2005
2006	splassert(IPL_BIO);
2007
2008	/*
2009	 * Delete from old vnode list, if on one.
2010	 */
2011	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2012		bufremvn(bp);
2013
2014	/*
2015	 * If dirty, put on list of dirty buffers;
2016	 * otherwise insert onto list of clean buffers.
2017	 */
2018	if ((bp->b_flags & B_DELWRI) == 0) {
2019		listheadp = &vp->v_cleanblkhd;
2020		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2021		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2022			vp->v_bioflag &= ~VBIOONSYNCLIST;
2023			LIST_REMOVE(vp, v_synclist);
2024		}
2025	} else {
2026		listheadp = &vp->v_dirtyblkhd;
2027		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2028			switch (vp->v_type) {
2029			case VDIR:
2030				delay = syncdelay / 2;
2031				break;
2032			case VBLK:
2033				if (vp->v_specmountpoint != NULL) {
2034					delay = syncdelay / 3;
2035					break;
2036				}
2037				/* FALLTHROUGH */
2038			default:
2039				delay = syncdelay;
2040			}
2041			vn_syncer_add_to_worklist(vp, delay);
2042		}
2043	}
2044	bufinsvn(bp, listheadp);
2045}
2046
2047int
2048vfs_register(struct vfsconf *vfs)
2049{
2050	struct vfsconf *vfsp;
2051	struct vfsconf **vfspp;
2052
2053#ifdef DIAGNOSTIC
2054	/* Paranoia? */
2055	if (vfs->vfc_refcount != 0)
2056		printf("vfs_register called with vfc_refcount > 0\n");
2057#endif
2058
2059	/* Check if filesystem already known */
2060	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2061	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2062		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2063			return (EEXIST);
2064
2065	if (vfs->vfc_typenum > maxvfsconf)
2066		maxvfsconf = vfs->vfc_typenum;
2067
2068	vfs->vfc_next = NULL;
2069
2070	/* Add to the end of the list */
2071	*vfspp = vfs;
2072
2073	/* Call vfs_init() */
2074	if (vfs->vfc_vfsops->vfs_init)
2075		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2076
2077	return 0;
2078}
2079
2080int
2081vfs_unregister(struct vfsconf *vfs)
2082{
2083	struct vfsconf *vfsp;
2084	struct vfsconf **vfspp;
2085	int maxtypenum;
2086
2087	/* Find our vfsconf struct */
2088	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2089	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2090		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2091			break;
2092	}
2093
2094	if (!vfsp)			/* Not found */
2095		return (ENOENT);
2096
2097	if (vfsp->vfc_refcount)		/* In use */
2098		return (EBUSY);
2099
2100	/* Remove from list and free */
2101	*vfspp = vfsp->vfc_next;
2102
2103	maxtypenum = 0;
2104
2105	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2106		if (vfsp->vfc_typenum > maxtypenum)
2107			maxtypenum = vfsp->vfc_typenum;
2108
2109	maxvfsconf = maxtypenum;
2110	return 0;
2111}
2112
2113/*
2114 * Check if vnode represents a disk device
2115 */
2116int
2117vn_isdisk(struct vnode *vp, int *errp)
2118{
2119	if (vp->v_type != VBLK && vp->v_type != VCHR)
2120		return (0);
2121
2122	return (1);
2123}
2124
2125#ifdef DDB
2126#include <machine/db_machdep.h>
2127#include <ddb/db_interface.h>
2128
2129void
2130vfs_buf_print(void *b, int full,
2131    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2132{
2133	struct buf *bp = b;
2134
2135	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2136	      "  proc %p error %d flags %lb\n",
2137	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2138	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2139
2140	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2141	      "  data %p saveaddr %p dep %p iodone %p\n",
2142	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2143	    bp->b_data, bp->b_saveaddr,
2144	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2145
2146	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2147	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2148
2149#ifdef FFS_SOFTUPDATES
2150	if (full)
2151		softdep_print(bp, full, pr);
2152#endif
2153}
2154
2155const char *vtypes[] = { VTYPE_NAMES };
2156const char *vtags[] = { VTAG_NAMES };
2157
2158void
2159vfs_vnode_print(void *v, int full,
2160    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2161{
2162	struct vnode *vp = v;
2163
2164	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2165	      vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2166	      vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2167	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2168
2169	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2170	      vp->v_data, vp->v_usecount, vp->v_writecount,
2171	      vp->v_holdcnt, vp->v_numoutput);
2172
2173	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2174
2175	if (full) {
2176		struct buf *bp;
2177
2178		(*pr)("clean bufs:\n");
2179		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2180			(*pr)(" bp %p\n", bp);
2181			vfs_buf_print(bp, full, pr);
2182		}
2183
2184		(*pr)("dirty bufs:\n");
2185		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2186			(*pr)(" bp %p\n", bp);
2187			vfs_buf_print(bp, full, pr);
2188		}
2189	}
2190}
2191
2192void
2193vfs_mount_print(struct mount *mp, int full,
2194    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2195{
2196	struct vfsconf *vfc = mp->mnt_vfc;
2197	struct vnode *vp;
2198	int cnt = 0;
2199
2200	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2201	    mp->mnt_flag, MNT_BITS,
2202	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2203
2204	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2205            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2206	    vfc->vfc_refcount, vfc->vfc_flags);
2207
2208	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2209	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2210	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2211
2212	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2213	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2214
2215	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2216	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2217	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2218
2219 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2220	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2221
2222 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2223	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2224
2225	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2226	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2227	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2228
2229	(*pr)("locked vnodes:");
2230	/* XXX would take mountlist lock, except ddb has no context */
2231	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2232		if (VOP_ISLOCKED(vp)) {
2233			if (!LIST_NEXT(vp, v_mntvnodes))
2234				(*pr)(" %p", vp);
2235			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2236				(*pr)("\n\t%p", vp);
2237			else
2238				(*pr)(", %p", vp);
2239		}
2240	(*pr)("\n");
2241
2242	if (full) {
2243		(*pr)("all vnodes:\n\t");
2244		/* XXX would take mountlist lock, except ddb has no context */
2245		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2246			if (!LIST_NEXT(vp, v_mntvnodes))
2247				(*pr)(" %p", vp);
2248			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2249				(*pr)(" %p,\n\t", vp);
2250			else
2251				(*pr)(" %p,", vp);
2252		(*pr)("\n");
2253	}
2254}
2255#endif /* DDB */
2256
2257void
2258copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2259{
2260	const struct statfs *mbp;
2261
2262	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2263
2264	if (sbp == (mbp = &mp->mnt_stat))
2265		return;
2266
2267	sbp->f_fsid = mbp->f_fsid;
2268	sbp->f_owner = mbp->f_owner;
2269	sbp->f_flags = mbp->f_flags;
2270	sbp->f_syncwrites = mbp->f_syncwrites;
2271	sbp->f_asyncwrites = mbp->f_asyncwrites;
2272	sbp->f_syncreads = mbp->f_syncreads;
2273	sbp->f_asyncreads = mbp->f_asyncreads;
2274	sbp->f_namemax = mbp->f_namemax;
2275	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2276	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2277	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2278	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2279	    sizeof(union mount_info));
2280}
2281