vfs_subr.c revision 1.187
1/*	$OpenBSD: vfs_subr.c,v 1.187 2010/06/29 04:09:32 tedu Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/mount.h>
48#include <sys/time.h>
49#include <sys/fcntl.h>
50#include <sys/kernel.h>
51#include <sys/vnode.h>
52#include <sys/stat.h>
53#include <sys/namei.h>
54#include <sys/ucred.h>
55#include <sys/buf.h>
56#include <sys/errno.h>
57#include <sys/malloc.h>
58#include <sys/domain.h>
59#include <sys/mbuf.h>
60#include <sys/syscallargs.h>
61#include <sys/pool.h>
62#include <sys/tree.h>
63
64#include <uvm/uvm_extern.h>
65#include <sys/sysctl.h>
66
67#include <miscfs/specfs/specdev.h>
68
69enum vtype iftovt_tab[16] = {
70	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
71	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
72};
73
74int	vttoif_tab[9] = {
75	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
76	S_IFSOCK, S_IFIFO, S_IFMT,
77};
78
79int doforce = 1;		/* 1 => permit forcible unmounting */
80int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
81int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
82
83/*
84 * Insq/Remq for the vnode usage lists.
85 */
86#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
87#define	bufremvn(bp) {							\
88	LIST_REMOVE(bp, b_vnbufs);					\
89	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
90}
91
92struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
93struct freelst vnode_free_list;	/* vnode free list */
94
95struct mntlist mountlist;	/* mounted filesystem list */
96
97void	vclean(struct vnode *, int, struct proc *);
98void	vhold(struct vnode *);
99void	vdrop(struct vnode *);
100
101void insmntque(struct vnode *, struct mount *);
102int getdevvp(dev_t, struct vnode **, enum vtype);
103
104int vfs_hang_addrlist(struct mount *, struct netexport *,
105				  struct export_args *);
106int vfs_free_netcred(struct radix_node *, void *, u_int);
107void vfs_free_addrlist(struct netexport *);
108void vputonfreelist(struct vnode *);
109
110int vflush_vnode(struct vnode *, void *);
111int maxvnodes;
112
113#ifdef DEBUG
114void printlockedvnodes(void);
115#endif
116
117struct pool vnode_pool;
118
119static int rb_buf_compare(struct buf *b1, struct buf *b2);
120RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
121
122static int
123rb_buf_compare(struct buf *b1, struct buf *b2)
124{
125	if (b1->b_lblkno < b2->b_lblkno)
126		return(-1);
127	if (b1->b_lblkno > b2->b_lblkno)
128		return(1);
129	return(0);
130}
131
132/*
133 * Initialize the vnode management data structures.
134 */
135void
136vntblinit(void)
137{
138	/* buffer cache may need a vnode for each buffer */
139	maxvnodes = desiredvnodes;
140	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
141	    &pool_allocator_nointr);
142	TAILQ_INIT(&vnode_hold_list);
143	TAILQ_INIT(&vnode_free_list);
144	CIRCLEQ_INIT(&mountlist);
145	/*
146	 * Initialize the filesystem syncer.
147	 */
148	vn_initialize_syncerd();
149}
150
151/*
152 * Mark a mount point as busy. Used to synchronize access and to delay
153 * unmounting.
154 *
155 * Default behaviour is to attempt getting a READ lock and in case of an
156 * ongoing unmount, to wait for it to finish and then return failure.
157 */
158int
159vfs_busy(struct mount *mp, int flags)
160{
161	int rwflags = 0;
162
163	/* new mountpoints need their lock initialised */
164	if (mp->mnt_lock.rwl_name == NULL)
165		rw_init(&mp->mnt_lock, "vfslock");
166
167	if (flags & VB_WRITE)
168		rwflags |= RW_WRITE;
169	else
170		rwflags |= RW_READ;
171
172	if (flags & VB_WAIT)
173		rwflags |= RW_SLEEPFAIL;
174	else
175		rwflags |= RW_NOSLEEP;
176
177	if (rw_enter(&mp->mnt_lock, rwflags))
178		return (EBUSY);
179
180	return (0);
181}
182
183/*
184 * Free a busy file system
185 */
186void
187vfs_unbusy(struct mount *mp)
188{
189	rw_exit(&mp->mnt_lock);
190}
191
192int
193vfs_isbusy(struct mount *mp)
194{
195	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
196		return (1);
197	else
198		return (0);
199}
200
201/*
202 * Lookup a filesystem type, and if found allocate and initialize
203 * a mount structure for it.
204 *
205 * Devname is usually updated by mount(8) after booting.
206 */
207int
208vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
209{
210	struct vfsconf *vfsp;
211	struct mount *mp;
212
213	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
214		if (!strcmp(vfsp->vfc_name, fstypename))
215			break;
216	if (vfsp == NULL)
217		return (ENODEV);
218	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO);
219	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
220	LIST_INIT(&mp->mnt_vnodelist);
221	mp->mnt_vfc = vfsp;
222	mp->mnt_op = vfsp->vfc_vfsops;
223	mp->mnt_flag = MNT_RDONLY;
224	mp->mnt_vnodecovered = NULLVP;
225	vfsp->vfc_refcount++;
226	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
227	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
228	mp->mnt_stat.f_mntonname[0] = '/';
229	(void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
230	*mpp = mp;
231 	return (0);
232 }
233
234/*
235 * Lookup a mount point by filesystem identifier.
236 */
237struct mount *
238vfs_getvfs(fsid_t *fsid)
239{
240	struct mount *mp;
241
242	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
243		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
244		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
245			return (mp);
246		}
247	}
248
249	return (NULL);
250}
251
252
253/*
254 * Get a new unique fsid
255 */
256void
257vfs_getnewfsid(struct mount *mp)
258{
259	static u_short xxxfs_mntid;
260
261	fsid_t tfsid;
262	int mtype;
263
264	mtype = mp->mnt_vfc->vfc_typenum;
265	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
266	mp->mnt_stat.f_fsid.val[1] = mtype;
267	if (xxxfs_mntid == 0)
268		++xxxfs_mntid;
269	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
270	tfsid.val[1] = mtype;
271	if (!CIRCLEQ_EMPTY(&mountlist)) {
272		while (vfs_getvfs(&tfsid)) {
273			tfsid.val[0]++;
274			xxxfs_mntid++;
275		}
276	}
277	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
278}
279
280/*
281 * Set vnode attributes to VNOVAL
282 */
283void
284vattr_null(struct vattr *vap)
285{
286
287	vap->va_type = VNON;
288	/* XXX These next two used to be one line, but for a GCC bug. */
289	vap->va_size = VNOVAL;
290	vap->va_bytes = VNOVAL;
291	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
292		vap->va_fsid = vap->va_fileid =
293		vap->va_blocksize = vap->va_rdev =
294		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
295		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
296		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
297		vap->va_flags = vap->va_gen = VNOVAL;
298	vap->va_vaflags = 0;
299}
300
301/*
302 * Routines having to do with the management of the vnode table.
303 */
304extern int (**dead_vnodeop_p)(void *);
305long numvnodes;
306
307/*
308 * Return the next vnode from the free list.
309 */
310int
311getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
312    struct vnode **vpp)
313{
314	struct proc *p = curproc;
315	struct freelst *listhd;
316	static int toggle;
317	struct vnode *vp;
318	int s;
319
320	/*
321	 * We must choose whether to allocate a new vnode or recycle an
322	 * existing one. The criterion for allocating a new one is that
323	 * the total number of vnodes is less than the number desired or
324	 * there are no vnodes on either free list. Generally we only
325	 * want to recycle vnodes that have no buffers associated with
326	 * them, so we look first on the vnode_free_list. If it is empty,
327	 * we next consider vnodes with referencing buffers on the
328	 * vnode_hold_list. The toggle ensures that half the time we
329	 * will use a buffer from the vnode_hold_list, and half the time
330	 * we will allocate a new one unless the list has grown to twice
331	 * the desired size. We are reticent to recycle vnodes from the
332	 * vnode_hold_list because we will lose the identity of all its
333	 * referencing buffers.
334	 */
335	toggle ^= 1;
336	if (numvnodes > 2 * maxvnodes)
337		toggle = 0;
338
339	s = splbio();
340	if ((numvnodes < maxvnodes) ||
341	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
342	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
343		splx(s);
344		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
345		RB_INIT(&vp->v_bufs_tree);
346		RB_INIT(&vp->v_nc_tree);
347		TAILQ_INIT(&vp->v_cache_dst);
348		numvnodes++;
349	} else {
350		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
351		    vp = TAILQ_NEXT(vp, v_freelist)) {
352			if (VOP_ISLOCKED(vp) == 0)
353				break;
354		}
355		/*
356		 * Unless this is a bad time of the month, at most
357		 * the first NCPUS items on the free list are
358		 * locked, so this is close enough to being empty.
359		 */
360		if (vp == NULL) {
361			splx(s);
362			tablefull("vnode");
363			*vpp = 0;
364			return (ENFILE);
365		}
366
367#ifdef DIAGNOSTIC
368		if (vp->v_usecount) {
369			vprint("free vnode", vp);
370			panic("free vnode isn't");
371		}
372#endif
373
374		TAILQ_REMOVE(listhd, vp, v_freelist);
375		vp->v_bioflag &= ~VBIOONFREELIST;
376		splx(s);
377
378		if (vp->v_type != VBAD)
379			vgonel(vp, p);
380#ifdef DIAGNOSTIC
381		if (vp->v_data) {
382			vprint("cleaned vnode", vp);
383			panic("cleaned vnode isn't");
384		}
385		s = splbio();
386		if (vp->v_numoutput)
387			panic("Clean vnode has pending I/O's");
388		splx(s);
389#endif
390		vp->v_flag = 0;
391		vp->v_socket = 0;
392	}
393	vp->v_type = VNON;
394	cache_purge(vp);
395	vp->v_tag = tag;
396	vp->v_op = vops;
397	insmntque(vp, mp);
398	*vpp = vp;
399	vp->v_usecount = 1;
400	vp->v_data = 0;
401	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
402	return (0);
403}
404
405/*
406 * Move a vnode from one mount queue to another.
407 */
408void
409insmntque(struct vnode *vp, struct mount *mp)
410{
411	/*
412	 * Delete from old mount point vnode list, if on one.
413	 */
414	if (vp->v_mount != NULL)
415		LIST_REMOVE(vp, v_mntvnodes);
416	/*
417	 * Insert into list of vnodes for the new mount point, if available.
418	 */
419	if ((vp->v_mount = mp) != NULL)
420		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
421}
422
423/*
424 * Create a vnode for a block device.
425 * Used for root filesystem, argdev, and swap areas.
426 * Also used for memory file system special devices.
427 */
428int
429bdevvp(dev_t dev, struct vnode **vpp)
430{
431	return (getdevvp(dev, vpp, VBLK));
432}
433
434/*
435 * Create a vnode for a character device.
436 * Used for console handling.
437 */
438int
439cdevvp(dev_t dev, struct vnode **vpp)
440{
441	return (getdevvp(dev, vpp, VCHR));
442}
443
444/*
445 * Create a vnode for a device.
446 * Used by bdevvp (block device) for root file system etc.,
447 * and by cdevvp (character device) for console.
448 */
449int
450getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
451{
452	struct vnode *vp;
453	struct vnode *nvp;
454	int error;
455
456	if (dev == NODEV) {
457		*vpp = NULLVP;
458		return (0);
459	}
460	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
461	if (error) {
462		*vpp = NULLVP;
463		return (error);
464	}
465	vp = nvp;
466	vp->v_type = type;
467	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
468		vput(vp);
469		vp = nvp;
470	}
471	*vpp = vp;
472	return (0);
473}
474
475/*
476 * Check to see if the new vnode represents a special device
477 * for which we already have a vnode (either because of
478 * bdevvp() or because of a different vnode representing
479 * the same block device). If such an alias exists, deallocate
480 * the existing contents and return the aliased vnode. The
481 * caller is responsible for filling it with its new contents.
482 */
483struct vnode *
484checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
485{
486	struct proc *p = curproc;
487	struct vnode *vp;
488	struct vnode **vpp;
489
490	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
491		return (NULLVP);
492
493	vpp = &speclisth[SPECHASH(nvp_rdev)];
494loop:
495	for (vp = *vpp; vp; vp = vp->v_specnext) {
496		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
497			continue;
498		}
499		/*
500		 * Alias, but not in use, so flush it out.
501		 */
502		if (vp->v_usecount == 0) {
503			vgonel(vp, p);
504			goto loop;
505		}
506		if (vget(vp, LK_EXCLUSIVE, p)) {
507			goto loop;
508		}
509		break;
510	}
511
512	/*
513	 * Common case is actually in the if statement
514	 */
515	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
516		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
517			M_WAITOK);
518		nvp->v_rdev = nvp_rdev;
519		nvp->v_hashchain = vpp;
520		nvp->v_specnext = *vpp;
521		nvp->v_specmountpoint = NULL;
522		nvp->v_speclockf = NULL;
523		bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap));
524		*vpp = nvp;
525		if (vp != NULLVP) {
526			nvp->v_flag |= VALIASED;
527			vp->v_flag |= VALIASED;
528			vput(vp);
529		}
530		return (NULLVP);
531	}
532
533	/*
534	 * This code is the uncommon case. It is called in case
535	 * we found an alias that was VT_NON && vtype of VBLK
536	 * This means we found a block device that was created
537	 * using bdevvp.
538	 * An example of such a vnode is the root partition device vnode
539	 * created in ffs_mountroot.
540	 *
541	 * The vnodes created by bdevvp should not be aliased (why?).
542	 */
543
544	VOP_UNLOCK(vp, 0, p);
545	vclean(vp, 0, p);
546	vp->v_op = nvp->v_op;
547	vp->v_tag = nvp->v_tag;
548	nvp->v_type = VNON;
549	insmntque(vp, mp);
550	return (vp);
551}
552
553/*
554 * Grab a particular vnode from the free list, increment its
555 * reference count and lock it. If the vnode lock bit is set,
556 * the vnode is being eliminated in vgone. In that case, we
557 * cannot grab it, so the process is awakened when the
558 * transition is completed, and an error code is returned to
559 * indicate that the vnode is no longer usable, possibly
560 * having been changed to a new file system type.
561 */
562int
563vget(struct vnode *vp, int flags, struct proc *p)
564{
565	int error, s, onfreelist;
566
567	/*
568	 * If the vnode is in the process of being cleaned out for
569	 * another use, we wait for the cleaning to finish and then
570	 * return failure. Cleaning is determined by checking that
571	 * the VXLOCK flag is set.
572	 */
573
574	if (vp->v_flag & VXLOCK) {
575		if (flags & LK_NOWAIT) {
576			return (EBUSY);
577		}
578
579		vp->v_flag |= VXWANT;
580		tsleep(vp, PINOD, "vget", 0);
581		return (ENOENT);
582	}
583
584	onfreelist = vp->v_bioflag & VBIOONFREELIST;
585	if (vp->v_usecount == 0 && onfreelist) {
586		s = splbio();
587		if (vp->v_holdcnt > 0)
588			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
589		else
590			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
591		vp->v_bioflag &= ~VBIOONFREELIST;
592		splx(s);
593	}
594
595 	vp->v_usecount++;
596	if (flags & LK_TYPE_MASK) {
597		if ((error = vn_lock(vp, flags, p)) != 0) {
598			vp->v_usecount--;
599			if (vp->v_usecount == 0 && onfreelist)
600				vputonfreelist(vp);
601		}
602		return (error);
603	}
604
605	return (0);
606}
607
608
609/* Vnode reference. */
610void
611vref(struct vnode *vp)
612{
613#ifdef DIAGNOSTIC
614	if (vp->v_usecount == 0)
615		panic("vref used where vget required");
616	if (vp->v_type == VNON)
617		panic("vref on a VNON vnode");
618#endif
619	vp->v_usecount++;
620}
621
622void
623vputonfreelist(struct vnode *vp)
624{
625	int s;
626	struct freelst *lst;
627
628	s = splbio();
629#ifdef DIAGNOSTIC
630	if (vp->v_usecount != 0)
631		panic("Use count is not zero!");
632
633	if (vp->v_bioflag & VBIOONFREELIST) {
634		vprint("vnode already on free list: ", vp);
635		panic("vnode already on free list");
636	}
637#endif
638
639	vp->v_bioflag |= VBIOONFREELIST;
640
641	if (vp->v_holdcnt > 0)
642		lst = &vnode_hold_list;
643	else
644		lst = &vnode_free_list;
645
646	if (vp->v_type == VBAD)
647		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
648	else
649		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
650
651	splx(s);
652}
653
654/*
655 * vput(), just unlock and vrele()
656 */
657void
658vput(struct vnode *vp)
659{
660	struct proc *p = curproc;
661
662#ifdef DIAGNOSTIC
663	if (vp == NULL)
664		panic("vput: null vp");
665#endif
666
667#ifdef DIAGNOSTIC
668	if (vp->v_usecount == 0) {
669		vprint("vput: bad ref count", vp);
670		panic("vput: ref cnt");
671	}
672#endif
673	vp->v_usecount--;
674	if (vp->v_usecount > 0) {
675		VOP_UNLOCK(vp, 0, p);
676		return;
677	}
678
679#ifdef DIAGNOSTIC
680	if (vp->v_writecount != 0) {
681		vprint("vput: bad writecount", vp);
682		panic("vput: v_writecount != 0");
683	}
684#endif
685
686	VOP_INACTIVE(vp, p);
687
688	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
689		vputonfreelist(vp);
690}
691
692/*
693 * Vnode release - use for active VNODES.
694 * If count drops to zero, call inactive routine and return to freelist.
695 * Returns 0 if it did not sleep.
696 */
697int
698vrele(struct vnode *vp)
699{
700	struct proc *p = curproc;
701
702#ifdef DIAGNOSTIC
703	if (vp == NULL)
704		panic("vrele: null vp");
705#endif
706#ifdef DIAGNOSTIC
707	if (vp->v_usecount == 0) {
708		vprint("vrele: bad ref count", vp);
709		panic("vrele: ref cnt");
710	}
711#endif
712	vp->v_usecount--;
713	if (vp->v_usecount > 0) {
714		return (0);
715	}
716
717#ifdef DIAGNOSTIC
718	if (vp->v_writecount != 0) {
719		vprint("vrele: bad writecount", vp);
720		panic("vrele: v_writecount != 0");
721	}
722#endif
723
724	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
725#ifdef DIAGNOSTIC
726		vprint("vrele: cannot lock", vp);
727#endif
728		return (1);
729	}
730
731	VOP_INACTIVE(vp, p);
732
733	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
734		vputonfreelist(vp);
735	return (1);
736}
737
738/* Page or buffer structure gets a reference. */
739void
740vhold(struct vnode *vp)
741{
742	/*
743	 * If it is on the freelist and the hold count is currently
744	 * zero, move it to the hold list.
745	 */
746	if ((vp->v_bioflag & VBIOONFREELIST) &&
747	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
748		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
749		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
750	}
751	vp->v_holdcnt++;
752}
753
754/* Lose interest in a vnode. */
755void
756vdrop(struct vnode *vp)
757{
758#ifdef DIAGNOSTIC
759	if (vp->v_holdcnt == 0)
760		panic("vdrop: zero holdcnt");
761#endif
762
763	vp->v_holdcnt--;
764
765	/*
766	 * If it is on the holdlist and the hold count drops to
767	 * zero, move it to the free list.
768	 */
769	if ((vp->v_bioflag & VBIOONFREELIST) &&
770	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
771		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
772		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
773	}
774}
775
776/*
777 * Remove any vnodes in the vnode table belonging to mount point mp.
778 *
779 * If MNT_NOFORCE is specified, there should not be any active ones,
780 * return error if any are found (nb: this is a user error, not a
781 * system error). If MNT_FORCE is specified, detach any active vnodes
782 * that are found.
783 */
784#ifdef DEBUG
785int busyprt = 0;	/* print out busy vnodes */
786struct ctldebug debug1 = { "busyprt", &busyprt };
787#endif
788
789int
790vfs_mount_foreach_vnode(struct mount *mp,
791    int (*func)(struct vnode *, void *), void *arg) {
792	struct vnode *vp, *nvp;
793	int error = 0;
794
795loop:
796	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
797		if (vp->v_mount != mp)
798			goto loop;
799		nvp = LIST_NEXT(vp, v_mntvnodes);
800
801		error = func(vp, arg);
802
803		if (error != 0)
804			break;
805	}
806
807	return (error);
808}
809
810struct vflush_args {
811	struct vnode *skipvp;
812	int busy;
813	int flags;
814};
815
816int
817vflush_vnode(struct vnode *vp, void *arg) {
818	struct vflush_args *va = arg;
819	struct proc *p = curproc;
820
821	if (vp == va->skipvp) {
822		return (0);
823	}
824
825	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
826		return (0);
827	}
828
829	/*
830	 * If WRITECLOSE is set, only flush out regular file
831	 * vnodes open for writing.
832	 */
833	if ((va->flags & WRITECLOSE) &&
834	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
835		return (0);
836	}
837
838	/*
839	 * With v_usecount == 0, all we need to do is clear
840	 * out the vnode data structures and we are done.
841	 */
842	if (vp->v_usecount == 0) {
843		vgonel(vp, p);
844		return (0);
845	}
846
847	/*
848	 * If FORCECLOSE is set, forcibly close the vnode.
849	 * For block or character devices, revert to an
850	 * anonymous device. For all other files, just kill them.
851	 */
852	if (va->flags & FORCECLOSE) {
853		if (vp->v_type != VBLK && vp->v_type != VCHR) {
854			vgonel(vp, p);
855		} else {
856			vclean(vp, 0, p);
857			vp->v_op = spec_vnodeop_p;
858			insmntque(vp, (struct mount *)0);
859		}
860		return (0);
861	}
862
863#ifdef DEBUG
864	if (busyprt)
865		vprint("vflush: busy vnode", vp);
866#endif
867	va->busy++;
868	return (0);
869}
870
871int
872vflush(struct mount *mp, struct vnode *skipvp, int flags)
873{
874	struct vflush_args va;
875	va.skipvp = skipvp;
876	va.busy = 0;
877	va.flags = flags;
878
879	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
880
881	if (va.busy)
882		return (EBUSY);
883	return (0);
884}
885
886/*
887 * Disassociate the underlying file system from a vnode.
888 */
889void
890vclean(struct vnode *vp, int flags, struct proc *p)
891{
892	int active;
893
894	/*
895	 * Check to see if the vnode is in use.
896	 * If so we have to reference it before we clean it out
897	 * so that its count cannot fall to zero and generate a
898	 * race against ourselves to recycle it.
899	 */
900	if ((active = vp->v_usecount) != 0)
901		vp->v_usecount++;
902
903	/*
904	 * Prevent the vnode from being recycled or
905	 * brought into use while we clean it out.
906	 */
907	if (vp->v_flag & VXLOCK)
908		panic("vclean: deadlock");
909	vp->v_flag |= VXLOCK;
910	/*
911	 * Even if the count is zero, the VOP_INACTIVE routine may still
912	 * have the object locked while it cleans it out. The VOP_LOCK
913	 * ensures that the VOP_INACTIVE routine is done with its work.
914	 * For active vnodes, it ensures that no other activity can
915	 * occur while the underlying object is being cleaned out.
916	 */
917	VOP_LOCK(vp, LK_DRAIN, p);
918
919	/*
920	 * Clean out any VM data associated with the vnode.
921	 */
922	uvm_vnp_terminate(vp);
923	/*
924	 * Clean out any buffers associated with the vnode.
925	 */
926	if (flags & DOCLOSE)
927		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
928	/*
929	 * If purging an active vnode, it must be closed and
930	 * deactivated before being reclaimed. Note that the
931	 * VOP_INACTIVE will unlock the vnode
932	 */
933	if (active) {
934		if (flags & DOCLOSE)
935			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
936		VOP_INACTIVE(vp, p);
937	} else {
938		/*
939		 * Any other processes trying to obtain this lock must first
940		 * wait for VXLOCK to clear, then call the new lock operation.
941		 */
942		VOP_UNLOCK(vp, 0, p);
943	}
944
945	/*
946	 * Reclaim the vnode.
947	 */
948	if (VOP_RECLAIM(vp, p))
949		panic("vclean: cannot reclaim");
950	if (active) {
951		vp->v_usecount--;
952		if (vp->v_usecount == 0) {
953			if (vp->v_holdcnt > 0)
954				panic("vclean: not clean");
955			vputonfreelist(vp);
956		}
957	}
958	cache_purge(vp);
959
960	/*
961	 * Done with purge, notify sleepers of the grim news.
962	 */
963	vp->v_op = dead_vnodeop_p;
964	VN_KNOTE(vp, NOTE_REVOKE);
965	vp->v_tag = VT_NON;
966	vp->v_flag &= ~VXLOCK;
967#ifdef VFSDEBUG
968	vp->v_flag &= ~VLOCKSWORK;
969#endif
970	if (vp->v_flag & VXWANT) {
971		vp->v_flag &= ~VXWANT;
972		wakeup(vp);
973	}
974}
975
976/*
977 * Recycle an unused vnode to the front of the free list.
978 */
979int
980vrecycle(struct vnode *vp, struct proc *p)
981{
982	if (vp->v_usecount == 0) {
983		vgonel(vp, p);
984		return (1);
985	}
986	return (0);
987}
988
989/*
990 * Eliminate all activity associated with a vnode
991 * in preparation for reuse.
992 */
993void
994vgone(struct vnode *vp)
995{
996	struct proc *p = curproc;
997	vgonel(vp, p);
998}
999
1000/*
1001 * vgone, with struct proc.
1002 */
1003void
1004vgonel(struct vnode *vp, struct proc *p)
1005{
1006	struct vnode *vq;
1007	struct vnode *vx;
1008
1009	/*
1010	 * If a vgone (or vclean) is already in progress,
1011	 * wait until it is done and return.
1012	 */
1013	if (vp->v_flag & VXLOCK) {
1014		vp->v_flag |= VXWANT;
1015		tsleep(vp, PINOD, "vgone", 0);
1016		return;
1017	}
1018
1019	/*
1020	 * Clean out the filesystem specific data.
1021	 */
1022	vclean(vp, DOCLOSE, p);
1023	/*
1024	 * Delete from old mount point vnode list, if on one.
1025	 */
1026	if (vp->v_mount != NULL)
1027		insmntque(vp, (struct mount *)0);
1028	/*
1029	 * If special device, remove it from special device alias list
1030	 * if it is on one.
1031	 */
1032	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1033		if (*vp->v_hashchain == vp) {
1034			*vp->v_hashchain = vp->v_specnext;
1035		} else {
1036			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1037				if (vq->v_specnext != vp)
1038					continue;
1039				vq->v_specnext = vp->v_specnext;
1040				break;
1041			}
1042			if (vq == NULL)
1043				panic("missing bdev");
1044		}
1045		if (vp->v_flag & VALIASED) {
1046			vx = NULL;
1047			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1048				if (vq->v_rdev != vp->v_rdev ||
1049				    vq->v_type != vp->v_type)
1050					continue;
1051				if (vx)
1052					break;
1053				vx = vq;
1054			}
1055			if (vx == NULL)
1056				panic("missing alias");
1057			if (vq == NULL)
1058				vx->v_flag &= ~VALIASED;
1059			vp->v_flag &= ~VALIASED;
1060		}
1061		free(vp->v_specinfo, M_VNODE);
1062		vp->v_specinfo = NULL;
1063	}
1064	/*
1065	 * If it is on the freelist and not already at the head,
1066	 * move it to the head of the list.
1067	 */
1068	vp->v_type = VBAD;
1069
1070	/*
1071	 * Move onto the free list, unless we were called from
1072	 * getnewvnode and we're not on any free list
1073	 */
1074	if (vp->v_usecount == 0 &&
1075	    (vp->v_bioflag & VBIOONFREELIST)) {
1076		int s;
1077
1078		s = splbio();
1079
1080		if (vp->v_holdcnt > 0)
1081			panic("vgonel: not clean");
1082
1083		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1084			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1085			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1086		}
1087		splx(s);
1088	}
1089}
1090
1091/*
1092 * Lookup a vnode by device number.
1093 */
1094int
1095vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1096{
1097	struct vnode *vp;
1098	int rc =0;
1099
1100	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1101		if (dev != vp->v_rdev || type != vp->v_type)
1102			continue;
1103		*vpp = vp;
1104		rc = 1;
1105		break;
1106	}
1107	return (rc);
1108}
1109
1110/*
1111 * Revoke all the vnodes corresponding to the specified minor number
1112 * range (endpoints inclusive) of the specified major.
1113 */
1114void
1115vdevgone(int maj, int minl, int minh, enum vtype type)
1116{
1117	struct vnode *vp;
1118	int mn;
1119
1120	for (mn = minl; mn <= minh; mn++)
1121		if (vfinddev(makedev(maj, mn), type, &vp))
1122			VOP_REVOKE(vp, REVOKEALL);
1123}
1124
1125/*
1126 * Calculate the total number of references to a special device.
1127 */
1128int
1129vcount(struct vnode *vp)
1130{
1131	struct vnode *vq, *vnext;
1132	int count;
1133
1134loop:
1135	if ((vp->v_flag & VALIASED) == 0)
1136		return (vp->v_usecount);
1137	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1138		vnext = vq->v_specnext;
1139		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1140			continue;
1141		/*
1142		 * Alias, but not in use, so flush it out.
1143		 */
1144		if (vq->v_usecount == 0 && vq != vp) {
1145			vgone(vq);
1146			goto loop;
1147		}
1148		count += vq->v_usecount;
1149	}
1150	return (count);
1151}
1152
1153#if defined(DEBUG) || defined(DIAGNOSTIC)
1154/*
1155 * Print out a description of a vnode.
1156 */
1157static char *typename[] =
1158   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1159
1160void
1161vprint(char *label, struct vnode *vp)
1162{
1163	char buf[64];
1164
1165	if (label != NULL)
1166		printf("%s: ", label);
1167	printf("%p, type %s, use %u, write %u, hold %u,",
1168		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1169		vp->v_holdcnt);
1170	buf[0] = '\0';
1171	if (vp->v_flag & VROOT)
1172		strlcat(buf, "|VROOT", sizeof buf);
1173	if (vp->v_flag & VTEXT)
1174		strlcat(buf, "|VTEXT", sizeof buf);
1175	if (vp->v_flag & VSYSTEM)
1176		strlcat(buf, "|VSYSTEM", sizeof buf);
1177	if (vp->v_flag & VXLOCK)
1178		strlcat(buf, "|VXLOCK", sizeof buf);
1179	if (vp->v_flag & VXWANT)
1180		strlcat(buf, "|VXWANT", sizeof buf);
1181	if (vp->v_bioflag & VBIOWAIT)
1182		strlcat(buf, "|VBIOWAIT", sizeof buf);
1183	if (vp->v_bioflag & VBIOONFREELIST)
1184		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1185	if (vp->v_bioflag & VBIOONSYNCLIST)
1186		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1187	if (vp->v_flag & VALIASED)
1188		strlcat(buf, "|VALIASED", sizeof buf);
1189	if (buf[0] != '\0')
1190		printf(" flags (%s)", &buf[1]);
1191	if (vp->v_data == NULL) {
1192		printf("\n");
1193	} else {
1194		printf("\n\t");
1195		VOP_PRINT(vp);
1196	}
1197}
1198#endif /* DEBUG || DIAGNOSTIC */
1199
1200#ifdef DEBUG
1201/*
1202 * List all of the locked vnodes in the system.
1203 * Called when debugging the kernel.
1204 */
1205void
1206printlockedvnodes(void)
1207{
1208	struct mount *mp, *nmp;
1209	struct vnode *vp;
1210
1211	printf("Locked vnodes\n");
1212
1213	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1214	    mp = nmp) {
1215		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1216			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1217			continue;
1218		}
1219		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1220			if (VOP_ISLOCKED(vp))
1221				vprint((char *)0, vp);
1222		}
1223		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1224		vfs_unbusy(mp);
1225 	}
1226
1227}
1228#endif
1229
1230/*
1231 * Top level filesystem related information gathering.
1232 */
1233int
1234vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1235    size_t newlen, struct proc *p)
1236{
1237	struct vfsconf *vfsp, *tmpvfsp;
1238	int ret;
1239
1240	/* all sysctl names at this level are at least name and field */
1241	if (namelen < 2)
1242		return (ENOTDIR);		/* overloaded */
1243
1244	if (name[0] != VFS_GENERIC) {
1245		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1246			if (vfsp->vfc_typenum == name[0])
1247				break;
1248
1249		if (vfsp == NULL)
1250			return (EOPNOTSUPP);
1251
1252		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1253		    oldp, oldlenp, newp, newlen, p));
1254	}
1255
1256	switch (name[1]) {
1257	case VFS_MAXTYPENUM:
1258		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1259
1260	case VFS_CONF:
1261		if (namelen < 3)
1262			return (ENOTDIR);	/* overloaded */
1263
1264		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1265			if (vfsp->vfc_typenum == name[2])
1266				break;
1267
1268		if (vfsp == NULL)
1269			return (EOPNOTSUPP);
1270
1271		/* Make a copy, clear out kernel pointers */
1272		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1273		bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp));
1274		tmpvfsp->vfc_vfsops = NULL;
1275		tmpvfsp->vfc_next = NULL;
1276
1277		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1278		    sizeof(struct vfsconf));
1279
1280		free(tmpvfsp, M_TEMP);
1281		return (ret);
1282	case VFS_BCACHESTAT:	/* buffer cache statistics */
1283		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1284		    sizeof(struct bcachestats));
1285		return(ret);
1286	}
1287	return (EOPNOTSUPP);
1288}
1289
1290int kinfo_vdebug = 1;
1291#define KINFO_VNODESLOP	10
1292/*
1293 * Dump vnode list (via sysctl).
1294 * Copyout address of vnode followed by vnode.
1295 */
1296/* ARGSUSED */
1297int
1298sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1299{
1300	struct mount *mp, *nmp;
1301	struct vnode *vp, *nvp;
1302	char *bp = where, *savebp;
1303	char *ewhere;
1304	int error;
1305
1306	if (where == NULL) {
1307		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1308		return (0);
1309	}
1310	ewhere = where + *sizep;
1311
1312	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1313	    mp = nmp) {
1314		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1315			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1316			continue;
1317		}
1318		savebp = bp;
1319again:
1320		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
1321		    vp = nvp) {
1322			/*
1323			 * Check that the vp is still associated with
1324			 * this filesystem.  RACE: could have been
1325			 * recycled onto the same filesystem.
1326			 */
1327			if (vp->v_mount != mp) {
1328				if (kinfo_vdebug)
1329					printf("kinfo: vp changed\n");
1330				bp = savebp;
1331				goto again;
1332			}
1333			nvp = LIST_NEXT(vp, v_mntvnodes);
1334			if (bp + sizeof(struct e_vnode) > ewhere) {
1335				*sizep = bp - where;
1336				vfs_unbusy(mp);
1337				return (ENOMEM);
1338			}
1339			if ((error = copyout(&vp,
1340			    &((struct e_vnode *)bp)->vptr,
1341			    sizeof(struct vnode *))) ||
1342			   (error = copyout(vp,
1343			    &((struct e_vnode *)bp)->vnode,
1344			    sizeof(struct vnode)))) {
1345				vfs_unbusy(mp);
1346				return (error);
1347			}
1348			bp += sizeof(struct e_vnode);
1349		}
1350
1351		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1352		vfs_unbusy(mp);
1353	}
1354
1355	*sizep = bp - where;
1356
1357	return (0);
1358}
1359
1360/*
1361 * Check to see if a filesystem is mounted on a block device.
1362 */
1363int
1364vfs_mountedon(struct vnode *vp)
1365{
1366	struct vnode *vq;
1367	int error = 0;
1368
1369 	if (vp->v_specmountpoint != NULL)
1370		return (EBUSY);
1371	if (vp->v_flag & VALIASED) {
1372		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1373			if (vq->v_rdev != vp->v_rdev ||
1374			    vq->v_type != vp->v_type)
1375				continue;
1376			if (vq->v_specmountpoint != NULL) {
1377				error = EBUSY;
1378				break;
1379			}
1380 		}
1381	}
1382	return (error);
1383}
1384
1385/*
1386 * Build hash lists of net addresses and hang them off the mount point.
1387 * Called by ufs_mount() to set up the lists of export addresses.
1388 */
1389int
1390vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1391    struct export_args *argp)
1392{
1393	struct netcred *np;
1394	struct radix_node_head *rnh;
1395	int i;
1396	struct radix_node *rn;
1397	struct sockaddr *saddr, *smask = 0;
1398	struct domain *dom;
1399	int error;
1400
1401	if (argp->ex_addrlen == 0) {
1402		if (mp->mnt_flag & MNT_DEFEXPORTED)
1403			return (EPERM);
1404		np = &nep->ne_defexported;
1405		np->netc_exflags = argp->ex_flags;
1406		np->netc_anon = argp->ex_anon;
1407		np->netc_anon.cr_ref = 1;
1408		mp->mnt_flag |= MNT_DEFEXPORTED;
1409		return (0);
1410	}
1411	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1412	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1413		return (EINVAL);
1414	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1415	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO);
1416	saddr = (struct sockaddr *)(np + 1);
1417	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1418	if (error)
1419		goto out;
1420	if (saddr->sa_len > argp->ex_addrlen)
1421		saddr->sa_len = argp->ex_addrlen;
1422	if (argp->ex_masklen) {
1423		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1424		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1425		if (error)
1426			goto out;
1427		if (smask->sa_len > argp->ex_masklen)
1428			smask->sa_len = argp->ex_masklen;
1429	}
1430	i = saddr->sa_family;
1431	if (i < 0 || i > AF_MAX) {
1432		error = EINVAL;
1433		goto out;
1434	}
1435	if ((rnh = nep->ne_rtable[i]) == 0) {
1436		/*
1437		 * Seems silly to initialize every AF when most are not
1438		 * used, do so on demand here
1439		 */
1440		for (dom = domains; dom; dom = dom->dom_next)
1441			if (dom->dom_family == i && dom->dom_rtattach) {
1442				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1443					dom->dom_rtoffset);
1444				break;
1445			}
1446		if ((rnh = nep->ne_rtable[i]) == 0) {
1447			error = ENOBUFS;
1448			goto out;
1449		}
1450	}
1451	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1452		np->netc_rnodes, 0);
1453	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1454		error = EPERM;
1455		goto out;
1456	}
1457	np->netc_exflags = argp->ex_flags;
1458	np->netc_anon = argp->ex_anon;
1459	np->netc_anon.cr_ref = 1;
1460	return (0);
1461out:
1462	free(np, M_NETADDR);
1463	return (error);
1464}
1465
1466/* ARGSUSED */
1467int
1468vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1469{
1470	struct radix_node_head *rnh = (struct radix_node_head *)w;
1471
1472	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1473	free(rn, M_NETADDR);
1474	return (0);
1475}
1476
1477/*
1478 * Free the net address hash lists that are hanging off the mount points.
1479 */
1480void
1481vfs_free_addrlist(struct netexport *nep)
1482{
1483	int i;
1484	struct radix_node_head *rnh;
1485
1486	for (i = 0; i <= AF_MAX; i++)
1487		if ((rnh = nep->ne_rtable[i]) != NULL) {
1488			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1489			free(rnh, M_RTABLE);
1490			nep->ne_rtable[i] = 0;
1491		}
1492}
1493
1494int
1495vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1496{
1497	int error;
1498
1499	if (argp->ex_flags & MNT_DELEXPORT) {
1500		vfs_free_addrlist(nep);
1501		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1502	}
1503	if (argp->ex_flags & MNT_EXPORTED) {
1504		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1505			return (error);
1506		mp->mnt_flag |= MNT_EXPORTED;
1507	}
1508	return (0);
1509}
1510
1511struct netcred *
1512vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1513{
1514	struct netcred *np;
1515	struct radix_node_head *rnh;
1516	struct sockaddr *saddr;
1517
1518	np = NULL;
1519	if (mp->mnt_flag & MNT_EXPORTED) {
1520		/*
1521		 * Lookup in the export list first.
1522		 */
1523		if (nam != NULL) {
1524			saddr = mtod(nam, struct sockaddr *);
1525			rnh = nep->ne_rtable[saddr->sa_family];
1526			if (rnh != NULL) {
1527				np = (struct netcred *)
1528					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1529					    rnh);
1530				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1531					np = NULL;
1532			}
1533		}
1534		/*
1535		 * If no address match, use the default if it exists.
1536		 */
1537		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1538			np = &nep->ne_defexported;
1539	}
1540	return (np);
1541}
1542
1543/*
1544 * Do the usual access checking.
1545 * file_mode, uid and gid are from the vnode in question,
1546 * while acc_mode and cred are from the VOP_ACCESS parameter list
1547 */
1548int
1549vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1550    mode_t acc_mode, struct ucred *cred)
1551{
1552	mode_t mask;
1553
1554	/* User id 0 always gets read/write access. */
1555	if (cred->cr_uid == 0) {
1556		/* For VEXEC, at least one of the execute bits must be set. */
1557		if ((acc_mode & VEXEC) && type != VDIR &&
1558		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1559			return EACCES;
1560		return 0;
1561	}
1562
1563	mask = 0;
1564
1565	/* Otherwise, check the owner. */
1566	if (cred->cr_uid == uid) {
1567		if (acc_mode & VEXEC)
1568			mask |= S_IXUSR;
1569		if (acc_mode & VREAD)
1570			mask |= S_IRUSR;
1571		if (acc_mode & VWRITE)
1572			mask |= S_IWUSR;
1573		return (file_mode & mask) == mask ? 0 : EACCES;
1574	}
1575
1576	/* Otherwise, check the groups. */
1577	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1578		if (acc_mode & VEXEC)
1579			mask |= S_IXGRP;
1580		if (acc_mode & VREAD)
1581			mask |= S_IRGRP;
1582		if (acc_mode & VWRITE)
1583			mask |= S_IWGRP;
1584		return (file_mode & mask) == mask ? 0 : EACCES;
1585	}
1586
1587	/* Otherwise, check everyone else. */
1588	if (acc_mode & VEXEC)
1589		mask |= S_IXOTH;
1590	if (acc_mode & VREAD)
1591		mask |= S_IROTH;
1592	if (acc_mode & VWRITE)
1593		mask |= S_IWOTH;
1594	return (file_mode & mask) == mask ? 0 : EACCES;
1595}
1596
1597/*
1598 * Unmount all file systems.
1599 * We traverse the list in reverse order under the assumption that doing so
1600 * will avoid needing to worry about dependencies.
1601 */
1602void
1603vfs_unmountall(void)
1604{
1605	struct mount *mp, *nmp;
1606	int allerror, error, again = 1;
1607
1608 retry:
1609	allerror = 0;
1610	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1611	    mp = nmp) {
1612		nmp = CIRCLEQ_PREV(mp, mnt_list);
1613		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1614			continue;
1615		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1616			printf("unmount of %s failed with error %d\n",
1617			    mp->mnt_stat.f_mntonname, error);
1618			allerror = 1;
1619		}
1620	}
1621
1622	if (allerror) {
1623		printf("WARNING: some file systems would not unmount\n");
1624		if (again) {
1625			printf("retrying\n");
1626			again = 0;
1627			goto retry;
1628		}
1629	}
1630}
1631
1632/*
1633 * Sync and unmount file systems before shutting down.
1634 */
1635void
1636vfs_shutdown(void)
1637{
1638#ifdef ACCOUNTING
1639	extern void acct_shutdown(void);
1640
1641	acct_shutdown();
1642#endif
1643
1644	/* XXX Should suspend scheduling. */
1645	(void) spl0();
1646
1647	printf("syncing disks... ");
1648
1649	if (panicstr == 0) {
1650		/* Sync before unmount, in case we hang on something. */
1651		sys_sync(&proc0, (void *)0, (register_t *)0);
1652
1653		/* Unmount file systems. */
1654		vfs_unmountall();
1655	}
1656
1657	if (vfs_syncwait(1))
1658		printf("giving up\n");
1659	else
1660		printf("done\n");
1661}
1662
1663/*
1664 * perform sync() operation and wait for buffers to flush.
1665 * assumptions: called w/ scheduler disabled and physical io enabled
1666 * for now called at spl0() XXX
1667 */
1668int
1669vfs_syncwait(int verbose)
1670{
1671	struct buf *bp;
1672	int iter, nbusy, dcount, s;
1673	struct proc *p;
1674
1675	p = curproc? curproc : &proc0;
1676	sys_sync(p, (void *)0, (register_t *)0);
1677
1678	/* Wait for sync to finish. */
1679	dcount = 10000;
1680	for (iter = 0; iter < 20; iter++) {
1681		nbusy = 0;
1682		LIST_FOREACH(bp, &bufhead, b_list) {
1683			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1684				nbusy++;
1685			/*
1686			 * With soft updates, some buffers that are
1687			 * written will be remarked as dirty until other
1688			 * buffers are written.
1689			 */
1690			if (bp->b_flags & B_DELWRI) {
1691				s = splbio();
1692				bremfree(bp);
1693				buf_acquire(bp);
1694				splx(s);
1695				nbusy++;
1696				bawrite(bp);
1697				if (dcount-- <= 0) {
1698					if (verbose)
1699						printf("softdep ");
1700					return 1;
1701				}
1702			}
1703		}
1704		if (nbusy == 0)
1705			break;
1706		if (verbose)
1707			printf("%d ", nbusy);
1708		DELAY(40000 * iter);
1709	}
1710
1711	return nbusy;
1712}
1713
1714/*
1715 * posix file system related system variables.
1716 */
1717int
1718fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1719    void *newp, size_t newlen, struct proc *p)
1720{
1721	/* all sysctl names at this level are terminal */
1722	if (namelen != 1)
1723		return (ENOTDIR);
1724
1725	switch (name[0]) {
1726	case FS_POSIX_SETUID:
1727		if (newp && securelevel > 0)
1728			return (EPERM);
1729		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1730	default:
1731		return (EOPNOTSUPP);
1732	}
1733	/* NOTREACHED */
1734}
1735
1736/*
1737 * file system related system variables.
1738 */
1739int
1740fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1741    size_t newlen, struct proc *p)
1742{
1743	sysctlfn *fn;
1744
1745	switch (name[0]) {
1746	case FS_POSIX:
1747		fn = fs_posix_sysctl;
1748		break;
1749	default:
1750		return (EOPNOTSUPP);
1751	}
1752	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1753}
1754
1755
1756/*
1757 * Routines dealing with vnodes and buffers
1758 */
1759
1760/*
1761 * Wait for all outstanding I/Os to complete
1762 *
1763 * Manipulates v_numoutput. Must be called at splbio()
1764 */
1765int
1766vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1767{
1768	int error = 0;
1769
1770	splassert(IPL_BIO);
1771
1772	while (vp->v_numoutput) {
1773		vp->v_bioflag |= VBIOWAIT;
1774		error = tsleep(&vp->v_numoutput,
1775		    slpflag | (PRIBIO + 1), wmesg, timeo);
1776		if (error)
1777			break;
1778	}
1779
1780	return (error);
1781}
1782
1783/*
1784 * Update outstanding I/O count and do wakeup if requested.
1785 *
1786 * Manipulates v_numoutput. Must be called at splbio()
1787 */
1788void
1789vwakeup(struct vnode *vp)
1790{
1791	splassert(IPL_BIO);
1792
1793	if (vp != NULL) {
1794		if (vp->v_numoutput-- == 0)
1795			panic("vwakeup: neg numoutput");
1796		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1797			vp->v_bioflag &= ~VBIOWAIT;
1798			wakeup(&vp->v_numoutput);
1799		}
1800	}
1801}
1802
1803/*
1804 * Flush out and invalidate all buffers associated with a vnode.
1805 * Called with the underlying object locked.
1806 */
1807int
1808vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1809    int slpflag, int slptimeo)
1810{
1811	struct buf *bp;
1812	struct buf *nbp, *blist;
1813	int s, error;
1814
1815#ifdef VFSDEBUG
1816	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1817		panic("vinvalbuf(): vp isn't locked");
1818#endif
1819
1820	if (flags & V_SAVE) {
1821		s = splbio();
1822		vwaitforio(vp, 0, "vinvalbuf", 0);
1823		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1824			splx(s);
1825			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1826				return (error);
1827			s = splbio();
1828			if (vp->v_numoutput > 0 ||
1829			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1830				panic("vinvalbuf: dirty bufs");
1831		}
1832		splx(s);
1833	}
1834loop:
1835	s = splbio();
1836	for (;;) {
1837		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1838		    (flags & V_SAVEMETA))
1839			while (blist && blist->b_lblkno < 0)
1840				blist = LIST_NEXT(blist, b_vnbufs);
1841		if (blist == NULL &&
1842		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1843		    (flags & V_SAVEMETA))
1844			while (blist && blist->b_lblkno < 0)
1845				blist = LIST_NEXT(blist, b_vnbufs);
1846		if (!blist)
1847			break;
1848
1849		for (bp = blist; bp; bp = nbp) {
1850			nbp = LIST_NEXT(bp, b_vnbufs);
1851			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1852				continue;
1853			if (bp->b_flags & B_BUSY) {
1854				bp->b_flags |= B_WANTED;
1855				error = tsleep(bp, slpflag | (PRIBIO + 1),
1856				    "vinvalbuf", slptimeo);
1857				if (error) {
1858					splx(s);
1859					return (error);
1860				}
1861				break;
1862			}
1863			bremfree(bp);
1864			buf_acquire(bp);
1865			/*
1866			 * XXX Since there are no node locks for NFS, I believe
1867			 * there is a slight chance that a delayed write will
1868			 * occur while sleeping just above, so check for it.
1869			 */
1870			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1871				splx(s);
1872				(void) VOP_BWRITE(bp);
1873				goto loop;
1874			}
1875			bp->b_flags |= B_INVAL;
1876			brelse(bp);
1877		}
1878	}
1879	if (!(flags & V_SAVEMETA) &&
1880	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1881		panic("vinvalbuf: flush failed");
1882	splx(s);
1883	return (0);
1884}
1885
1886void
1887vflushbuf(struct vnode *vp, int sync)
1888{
1889	struct buf *bp, *nbp;
1890	int s;
1891
1892loop:
1893	s = splbio();
1894	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1895	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1896		nbp = LIST_NEXT(bp, b_vnbufs);
1897		if ((bp->b_flags & B_BUSY))
1898			continue;
1899		if ((bp->b_flags & B_DELWRI) == 0)
1900			panic("vflushbuf: not dirty");
1901		bremfree(bp);
1902		buf_acquire(bp);
1903		splx(s);
1904		/*
1905		 * Wait for I/O associated with indirect blocks to complete,
1906		 * since there is no way to quickly wait for them below.
1907		 */
1908		if (bp->b_vp == vp || sync == 0)
1909			(void) bawrite(bp);
1910		else
1911			(void) bwrite(bp);
1912		goto loop;
1913	}
1914	if (sync == 0) {
1915		splx(s);
1916		return;
1917	}
1918	vwaitforio(vp, 0, "vflushbuf", 0);
1919	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1920		splx(s);
1921#ifdef DIAGNOSTIC
1922		vprint("vflushbuf: dirty", vp);
1923#endif
1924		goto loop;
1925	}
1926	splx(s);
1927}
1928
1929/*
1930 * Associate a buffer with a vnode.
1931 *
1932 * Manipulates buffer vnode queues. Must be called at splbio().
1933 */
1934void
1935bgetvp(struct vnode *vp, struct buf *bp)
1936{
1937	splassert(IPL_BIO);
1938
1939
1940	if (bp->b_vp)
1941		panic("bgetvp: not free");
1942	vhold(vp);
1943	bp->b_vp = vp;
1944	if (vp->v_type == VBLK || vp->v_type == VCHR)
1945		bp->b_dev = vp->v_rdev;
1946	else
1947		bp->b_dev = NODEV;
1948	/*
1949	 * Insert onto list for new vnode.
1950	 */
1951	bufinsvn(bp, &vp->v_cleanblkhd);
1952}
1953
1954/*
1955 * Disassociate a buffer from a vnode.
1956 *
1957 * Manipulates vnode buffer queues. Must be called at splbio().
1958 */
1959void
1960brelvp(struct buf *bp)
1961{
1962	struct vnode *vp;
1963
1964	splassert(IPL_BIO);
1965
1966	if ((vp = bp->b_vp) == (struct vnode *) 0)
1967		panic("brelvp: NULL");
1968	/*
1969	 * Delete from old vnode list, if on one.
1970	 */
1971	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1972		bufremvn(bp);
1973	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1974	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1975		vp->v_bioflag &= ~VBIOONSYNCLIST;
1976		LIST_REMOVE(vp, v_synclist);
1977	}
1978	bp->b_vp = NULL;
1979
1980	vdrop(vp);
1981}
1982
1983/*
1984 * Replaces the current vnode associated with the buffer, if any,
1985 * with a new vnode.
1986 *
1987 * If an output I/O is pending on the buffer, the old vnode
1988 * I/O count is adjusted.
1989 *
1990 * Ignores vnode buffer queues. Must be called at splbio().
1991 */
1992void
1993buf_replacevnode(struct buf *bp, struct vnode *newvp)
1994{
1995	struct vnode *oldvp = bp->b_vp;
1996
1997	splassert(IPL_BIO);
1998
1999	if (oldvp)
2000		brelvp(bp);
2001
2002	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2003		newvp->v_numoutput++;	/* put it on swapdev */
2004		vwakeup(oldvp);
2005	}
2006
2007	bgetvp(newvp, bp);
2008	bufremvn(bp);
2009}
2010
2011/*
2012 * Used to assign buffers to the appropriate clean or dirty list on
2013 * the vnode and to add newly dirty vnodes to the appropriate
2014 * filesystem syncer list.
2015 *
2016 * Manipulates vnode buffer queues. Must be called at splbio().
2017 */
2018void
2019reassignbuf(struct buf *bp)
2020{
2021	struct buflists *listheadp;
2022	int delay;
2023	struct vnode *vp = bp->b_vp;
2024
2025	splassert(IPL_BIO);
2026
2027	/*
2028	 * Delete from old vnode list, if on one.
2029	 */
2030	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2031		bufremvn(bp);
2032
2033	/*
2034	 * If dirty, put on list of dirty buffers;
2035	 * otherwise insert onto list of clean buffers.
2036	 */
2037	if ((bp->b_flags & B_DELWRI) == 0) {
2038		listheadp = &vp->v_cleanblkhd;
2039		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2040		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2041			vp->v_bioflag &= ~VBIOONSYNCLIST;
2042			LIST_REMOVE(vp, v_synclist);
2043		}
2044	} else {
2045		listheadp = &vp->v_dirtyblkhd;
2046		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2047			switch (vp->v_type) {
2048			case VDIR:
2049				delay = syncdelay / 2;
2050				break;
2051			case VBLK:
2052				if (vp->v_specmountpoint != NULL) {
2053					delay = syncdelay / 3;
2054					break;
2055				}
2056				/* FALLTHROUGH */
2057			default:
2058				delay = syncdelay;
2059			}
2060			vn_syncer_add_to_worklist(vp, delay);
2061		}
2062	}
2063	bufinsvn(bp, listheadp);
2064}
2065
2066int
2067vfs_register(struct vfsconf *vfs)
2068{
2069	struct vfsconf *vfsp;
2070	struct vfsconf **vfspp;
2071
2072#ifdef DIAGNOSTIC
2073	/* Paranoia? */
2074	if (vfs->vfc_refcount != 0)
2075		printf("vfs_register called with vfc_refcount > 0\n");
2076#endif
2077
2078	/* Check if filesystem already known */
2079	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2080	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2081		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2082			return (EEXIST);
2083
2084	if (vfs->vfc_typenum > maxvfsconf)
2085		maxvfsconf = vfs->vfc_typenum;
2086
2087	vfs->vfc_next = NULL;
2088
2089	/* Add to the end of the list */
2090	*vfspp = vfs;
2091
2092	/* Call vfs_init() */
2093	if (vfs->vfc_vfsops->vfs_init)
2094		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2095
2096	return 0;
2097}
2098
2099int
2100vfs_unregister(struct vfsconf *vfs)
2101{
2102	struct vfsconf *vfsp;
2103	struct vfsconf **vfspp;
2104	int maxtypenum;
2105
2106	/* Find our vfsconf struct */
2107	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2108	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2109		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2110			break;
2111	}
2112
2113	if (!vfsp)			/* Not found */
2114		return (ENOENT);
2115
2116	if (vfsp->vfc_refcount)		/* In use */
2117		return (EBUSY);
2118
2119	/* Remove from list and free */
2120	*vfspp = vfsp->vfc_next;
2121
2122	maxtypenum = 0;
2123
2124	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2125		if (vfsp->vfc_typenum > maxtypenum)
2126			maxtypenum = vfsp->vfc_typenum;
2127
2128	maxvfsconf = maxtypenum;
2129	return 0;
2130}
2131
2132/*
2133 * Check if vnode represents a disk device
2134 */
2135int
2136vn_isdisk(struct vnode *vp, int *errp)
2137{
2138	if (vp->v_type != VBLK && vp->v_type != VCHR)
2139		return (0);
2140
2141	return (1);
2142}
2143
2144#ifdef DDB
2145#include <machine/db_machdep.h>
2146#include <ddb/db_interface.h>
2147#include <ddb/db_output.h>
2148
2149void
2150vfs_buf_print(void *b, int full, int (*pr)(const char *, ...))
2151{
2152	struct buf *bp = b;
2153
2154	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2155	      "  proc %p error %d flags %b\n",
2156	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2157	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2158
2159	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n"
2160	      "  data %p saveaddr %p dep %p iodone %p\n",
2161	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime,
2162	    bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone);
2163
2164	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2165	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2166
2167#ifdef FFS_SOFTUPDATES
2168	if (full)
2169		softdep_print(bp, full, pr);
2170#endif
2171}
2172
2173const char *vtypes[] = { VTYPE_NAMES };
2174const char *vtags[] = { VTAG_NAMES };
2175
2176void
2177vfs_vnode_print(void *v, int full, int (*pr)(const char *, ...))
2178{
2179	struct vnode *vp = v;
2180
2181#define	NENTS(n)	(sizeof n / sizeof(n[0]))
2182	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2183	      vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2184	      vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type],
2185	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2186
2187	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
2188	      vp->v_data, vp->v_usecount, vp->v_writecount,
2189	      vp->v_holdcnt, vp->v_numoutput);
2190
2191	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2192
2193	if (full) {
2194		struct buf *bp;
2195
2196		(*pr)("clean bufs:\n");
2197		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2198			(*pr)(" bp %p\n", bp);
2199			vfs_buf_print(bp, full, pr);
2200		}
2201
2202		(*pr)("dirty bufs:\n");
2203		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2204			(*pr)(" bp %p\n", bp);
2205			vfs_buf_print(bp, full, pr);
2206		}
2207	}
2208}
2209
2210void
2211vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...))
2212{
2213	struct vfsconf *vfc = mp->mnt_vfc;
2214	struct vnode *vp;
2215	int cnt = 0;
2216
2217	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2218	    mp->mnt_flag, MNT_BITS,
2219	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2220
2221	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2222            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2223	    vfc->vfc_refcount, vfc->vfc_flags);
2224
2225	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2226	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2227	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2228
2229	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2230	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2231
2232	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n",
2233	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2234	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2235
2236 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2237	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2238
2239 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2240	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2241
2242	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n",
2243	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2244	    mp->mnt_stat.f_mntfromname);
2245
2246	(*pr)("locked vnodes:");
2247	/* XXX would take mountlist lock, except ddb has no context */
2248	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2249		if (VOP_ISLOCKED(vp)) {
2250			if (!LIST_NEXT(vp, v_mntvnodes))
2251				(*pr)(" %p", vp);
2252			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2253				(*pr)("\n\t%p", vp);
2254			else
2255				(*pr)(", %p", vp);
2256		}
2257	(*pr)("\n");
2258
2259	if (full) {
2260		(*pr)("all vnodes:\n\t");
2261		/* XXX would take mountlist lock, except ddb has no context */
2262		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2263			if (!LIST_NEXT(vp, v_mntvnodes))
2264				(*pr)(" %p", vp);
2265			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2266				(*pr)(" %p,\n\t", vp);
2267			else
2268				(*pr)(" %p,", vp);
2269		(*pr)("\n");
2270	}
2271}
2272#endif /* DDB */
2273
2274void
2275copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2276{
2277	const struct statfs *mbp;
2278
2279	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2280
2281	if (sbp == (mbp = &mp->mnt_stat))
2282		return;
2283
2284	sbp->f_fsid = mbp->f_fsid;
2285	sbp->f_owner = mbp->f_owner;
2286	sbp->f_flags = mbp->f_flags;
2287	sbp->f_syncwrites = mbp->f_syncwrites;
2288	sbp->f_asyncwrites = mbp->f_asyncwrites;
2289	sbp->f_syncreads = mbp->f_syncreads;
2290	sbp->f_asyncreads = mbp->f_asyncreads;
2291	sbp->f_namemax = mbp->f_namemax;
2292	bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
2293	bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
2294	bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args,
2295	    sizeof(struct ufs_args));
2296}
2297