vfs_subr.c revision 1.144
1/*	$OpenBSD: vfs_subr.c,v 1.144 2007/04/13 17:09:22 thib Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/mount.h>
48#include <sys/time.h>
49#include <sys/fcntl.h>
50#include <sys/kernel.h>
51#include <sys/vnode.h>
52#include <sys/stat.h>
53#include <sys/namei.h>
54#include <sys/ucred.h>
55#include <sys/buf.h>
56#include <sys/errno.h>
57#include <sys/malloc.h>
58#include <sys/domain.h>
59#include <sys/mbuf.h>
60#include <sys/syscallargs.h>
61#include <sys/pool.h>
62
63#include <uvm/uvm_extern.h>
64#include <sys/sysctl.h>
65
66#include <miscfs/specfs/specdev.h>
67
68enum vtype iftovt_tab[16] = {
69	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
70	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
71};
72
73int	vttoif_tab[9] = {
74	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
75	S_IFSOCK, S_IFIFO, S_IFMT,
76};
77
78int doforce = 1;		/* 1 => permit forcible unmounting */
79int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
80int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
81
82/*
83 * Insq/Remq for the vnode usage lists.
84 */
85#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
86#define	bufremvn(bp) {							\
87	LIST_REMOVE(bp, b_vnbufs);					\
88	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
89}
90
91struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
92struct freelst vnode_free_list;	/* vnode free list */
93
94struct mntlist mountlist;	/* mounted filesystem list */
95static struct simplelock mntid_slock;
96struct simplelock mntvnode_slock;
97struct simplelock vnode_free_list_slock;
98struct simplelock spechash_slock;
99
100void	vclean(struct vnode *, int, struct proc *);
101
102void insmntque(struct vnode *, struct mount *);
103int getdevvp(dev_t, struct vnode **, enum vtype);
104
105int vfs_hang_addrlist(struct mount *, struct netexport *,
106				  struct export_args *);
107int vfs_free_netcred(struct radix_node *, void *);
108void vfs_free_addrlist(struct netexport *);
109void vputonfreelist(struct vnode *);
110
111int vflush_vnode(struct vnode *, void *);
112
113#ifdef DEBUG
114void printlockedvnodes(void);
115#endif
116
117struct pool vnode_pool;
118int desiredvnodes;
119
120/*
121 * Initialize the vnode management data structures.
122 */
123void
124vntblinit(void)
125{
126
127	/* every buffer needs its vnode! */
128	desiredvnodes = nbuf;
129	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
130	    &pool_allocator_nointr);
131	simple_lock_init(&mntvnode_slock);
132	simple_lock_init(&mntid_slock);
133	simple_lock_init(&spechash_slock);
134	TAILQ_INIT(&vnode_hold_list);
135	TAILQ_INIT(&vnode_free_list);
136	simple_lock_init(&vnode_free_list_slock);
137	CIRCLEQ_INIT(&mountlist);
138	/*
139	 * Initialize the filesystem syncer.
140	 */
141	vn_initialize_syncerd();
142}
143
144/*
145 * Mark a mount point as busy. Used to synchronize access and to delay
146 * unmounting.
147 *
148 * Default behaviour is to attempt getting a READ lock and in case of an
149 * ongoing unmount, to wait for it to finish and then return failure.
150 */
151int
152vfs_busy(struct mount *mp, int flags)
153{
154	int rwflags = 0;
155
156	/* new mountpoints need their lock initialised */
157	if (mp->mnt_lock.rwl_name == NULL)
158		rw_init(&mp->mnt_lock, "vfslock");
159
160	if (flags & VB_WRITE)
161		rwflags |= RW_WRITE;
162	else
163		rwflags |= RW_READ;
164
165	if (flags & VB_WAIT)
166		rwflags |= RW_SLEEPFAIL;
167	else
168		rwflags |= RW_NOSLEEP;
169
170	if (rw_enter(&mp->mnt_lock, rwflags))
171		return (EBUSY);
172
173	return (0);
174}
175
176/*
177 * Free a busy file system
178 */
179void
180vfs_unbusy(struct mount *mp)
181{
182	rw_exit(&mp->mnt_lock);
183}
184
185int
186vfs_isbusy(struct mount *mp)
187{
188	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
189		return (1);
190	else
191		return (0);
192}
193
194/*
195 * Lookup a filesystem type, and if found allocate and initialize
196 * a mount structure for it.
197 *
198 * Devname is usually updated by mount(8) after booting.
199 */
200int
201vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
202{
203	struct vfsconf *vfsp;
204	struct mount *mp;
205
206	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
207		if (!strcmp(vfsp->vfc_name, fstypename))
208			break;
209	if (vfsp == NULL)
210		return (ENODEV);
211	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
212	bzero((char *)mp, (u_long)sizeof(struct mount));
213	(void) vfs_busy(mp, VB_READ|VB_NOWAIT);
214	LIST_INIT(&mp->mnt_vnodelist);
215	mp->mnt_vfc = vfsp;
216	mp->mnt_op = vfsp->vfc_vfsops;
217	mp->mnt_flag = MNT_RDONLY;
218	mp->mnt_vnodecovered = NULLVP;
219	vfsp->vfc_refcount++;
220	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
221	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
222	mp->mnt_stat.f_mntonname[0] = '/';
223	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
224	*mpp = mp;
225 	return (0);
226 }
227
228/*
229 * Find an appropriate filesystem to use for the root. If a filesystem
230 * has not been preselected, walk through the list of known filesystems
231 * trying those that have mountroot routines, and try them until one
232 * works or we have tried them all.
233 */
234int
235vfs_mountroot(void)
236{
237	struct vfsconf *vfsp;
238	int error;
239
240	if (mountroot != NULL)
241		return ((*mountroot)());
242	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
243		if (vfsp->vfc_mountroot == NULL)
244			continue;
245		if ((error = (*vfsp->vfc_mountroot)()) == 0)
246			return (0);
247		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
248 	}
249	return (ENODEV);
250}
251
252/*
253 * Lookup a mount point by filesystem identifier.
254 */
255struct mount *
256vfs_getvfs(fsid_t *fsid)
257{
258	struct mount *mp;
259
260	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
261		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
262		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
263			return (mp);
264		}
265	}
266
267	return ((struct mount *)0);
268}
269
270
271/*
272 * Get a new unique fsid
273 */
274void
275vfs_getnewfsid(struct mount *mp)
276{
277	static u_short xxxfs_mntid;
278
279	fsid_t tfsid;
280	int mtype;
281
282	simple_lock(&mntid_slock);
283	mtype = mp->mnt_vfc->vfc_typenum;
284	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
285	mp->mnt_stat.f_fsid.val[1] = mtype;
286	if (xxxfs_mntid == 0)
287		++xxxfs_mntid;
288	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
289	tfsid.val[1] = mtype;
290	if (!CIRCLEQ_EMPTY(&mountlist)) {
291		while (vfs_getvfs(&tfsid)) {
292			tfsid.val[0]++;
293			xxxfs_mntid++;
294		}
295	}
296	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
297	simple_unlock(&mntid_slock);
298}
299
300/*
301 * Make a 'unique' number from a mount type name.
302 * Note that this is no longer used for ffs which
303 * now has an on-disk filesystem id.
304 */
305long
306makefstype(char *type)
307{
308	long rv;
309
310	for (rv = 0; *type; type++) {
311		rv <<= 2;
312		rv ^= *type;
313	}
314	return rv;
315}
316
317/*
318 * Set vnode attributes to VNOVAL
319 */
320void
321vattr_null(struct vattr *vap)
322{
323
324	vap->va_type = VNON;
325	/* XXX These next two used to be one line, but for a GCC bug. */
326	vap->va_size = VNOVAL;
327	vap->va_bytes = VNOVAL;
328	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
329		vap->va_fsid = vap->va_fileid =
330		vap->va_blocksize = vap->va_rdev =
331		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
332		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
333		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
334		vap->va_flags = vap->va_gen = VNOVAL;
335	vap->va_vaflags = 0;
336}
337
338/*
339 * Routines having to do with the management of the vnode table.
340 */
341extern int (**dead_vnodeop_p)(void *);
342long numvnodes;
343
344/*
345 * Return the next vnode from the free list.
346 */
347int
348getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
349    struct vnode **vpp)
350{
351	struct proc *p = curproc;
352	struct freelst *listhd;
353	static int toggle;
354	struct vnode *vp;
355	int s;
356
357	/*
358	 * We must choose whether to allocate a new vnode or recycle an
359	 * existing one. The criterion for allocating a new one is that
360	 * the total number of vnodes is less than the number desired or
361	 * there are no vnodes on either free list. Generally we only
362	 * want to recycle vnodes that have no buffers associated with
363	 * them, so we look first on the vnode_free_list. If it is empty,
364	 * we next consider vnodes with referencing buffers on the
365	 * vnode_hold_list. The toggle ensures that half the time we
366	 * will use a buffer from the vnode_hold_list, and half the time
367	 * we will allocate a new one unless the list has grown to twice
368	 * the desired size. We are reticent to recycle vnodes from the
369	 * vnode_hold_list because we will lose the identity of all its
370	 * referencing buffers.
371	 */
372	toggle ^= 1;
373	if (numvnodes > 2 * desiredvnodes)
374		toggle = 0;
375
376	simple_lock(&vnode_free_list_slock);
377	s = splbio();
378	if ((numvnodes < desiredvnodes) ||
379	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
380	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
381		splx(s);
382		simple_unlock(&vnode_free_list_slock);
383		vp = pool_get(&vnode_pool, PR_WAITOK);
384		bzero((char *)vp, sizeof *vp);
385		numvnodes++;
386	} else {
387		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
388		    vp = TAILQ_NEXT(vp, v_freelist)) {
389			if (VOP_ISLOCKED(vp) == 0)
390				break;
391		}
392		/*
393		 * Unless this is a bad time of the month, at most
394		 * the first NCPUS items on the free list are
395		 * locked, so this is close enough to being empty.
396		 */
397		if (vp == NULL) {
398			splx(s);
399			simple_unlock(&vnode_free_list_slock);
400			tablefull("vnode");
401			*vpp = 0;
402			return (ENFILE);
403		}
404
405#ifdef DIAGNOSTIC
406		if (vp->v_usecount) {
407			vprint("free vnode", vp);
408			panic("free vnode isn't");
409		}
410#endif
411
412		TAILQ_REMOVE(listhd, vp, v_freelist);
413		vp->v_bioflag &= ~VBIOONFREELIST;
414		splx(s);
415
416		simple_unlock(&vnode_free_list_slock);
417		if (vp->v_type != VBAD)
418			vgonel(vp, p);
419#ifdef DIAGNOSTIC
420		if (vp->v_data) {
421			vprint("cleaned vnode", vp);
422			panic("cleaned vnode isn't");
423		}
424		s = splbio();
425		if (vp->v_numoutput)
426			panic("Clean vnode has pending I/O's");
427		splx(s);
428#endif
429		vp->v_flag = 0;
430		vp->v_socket = 0;
431	}
432	vp->v_type = VNON;
433	cache_purge(vp);
434	vp->v_tag = tag;
435	vp->v_op = vops;
436	insmntque(vp, mp);
437	*vpp = vp;
438	vp->v_usecount = 1;
439	vp->v_data = 0;
440	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
441	return (0);
442}
443
444/*
445 * Move a vnode from one mount queue to another.
446 */
447void
448insmntque(struct vnode *vp, struct mount *mp)
449{
450	simple_lock(&mntvnode_slock);
451
452	/*
453	 * Delete from old mount point vnode list, if on one.
454	 */
455	if (vp->v_mount != NULL)
456		LIST_REMOVE(vp, v_mntvnodes);
457	/*
458	 * Insert into list of vnodes for the new mount point, if available.
459	 */
460	if ((vp->v_mount = mp) != NULL)
461		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
462
463	simple_unlock(&mntvnode_slock);
464}
465
466/*
467 * Create a vnode for a block device.
468 * Used for root filesystem, argdev, and swap areas.
469 * Also used for memory file system special devices.
470 */
471int
472bdevvp(dev_t dev, struct vnode **vpp)
473{
474	return (getdevvp(dev, vpp, VBLK));
475}
476
477/*
478 * Create a vnode for a character device.
479 * Used for console handling.
480 */
481int
482cdevvp(dev_t dev, struct vnode **vpp)
483{
484	return (getdevvp(dev, vpp, VCHR));
485}
486
487/*
488 * Create a vnode for a device.
489 * Used by bdevvp (block device) for root file system etc.,
490 * and by cdevvp (character device) for console.
491 */
492int
493getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
494{
495	struct vnode *vp;
496	struct vnode *nvp;
497	int error;
498
499	if (dev == NODEV) {
500		*vpp = NULLVP;
501		return (0);
502	}
503	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
504	if (error) {
505		*vpp = NULLVP;
506		return (error);
507	}
508	vp = nvp;
509	vp->v_type = type;
510	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
511		vput(vp);
512		vp = nvp;
513	}
514	*vpp = vp;
515	return (0);
516}
517
518/*
519 * Check to see if the new vnode represents a special device
520 * for which we already have a vnode (either because of
521 * bdevvp() or because of a different vnode representing
522 * the same block device). If such an alias exists, deallocate
523 * the existing contents and return the aliased vnode. The
524 * caller is responsible for filling it with its new contents.
525 */
526struct vnode *
527checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
528{
529	struct proc *p = curproc;
530	struct vnode *vp;
531	struct vnode **vpp;
532
533	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
534		return (NULLVP);
535
536	vpp = &speclisth[SPECHASH(nvp_rdev)];
537loop:
538	simple_lock(&spechash_slock);
539	for (vp = *vpp; vp; vp = vp->v_specnext) {
540		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
541			continue;
542		}
543		/*
544		 * Alias, but not in use, so flush it out.
545		 */
546		if (vp->v_usecount == 0) {
547			simple_unlock(&spechash_slock);
548			vgonel(vp, p);
549			goto loop;
550		}
551		if (vget(vp, LK_EXCLUSIVE, p)) {
552			simple_unlock(&spechash_slock);
553			goto loop;
554		}
555		break;
556	}
557
558	/*
559	 * Common case is actually in the if statement
560	 */
561	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
562		MALLOC(nvp->v_specinfo, struct specinfo *,
563			sizeof(struct specinfo), M_VNODE, M_WAITOK);
564		nvp->v_rdev = nvp_rdev;
565		nvp->v_hashchain = vpp;
566		nvp->v_specnext = *vpp;
567		nvp->v_specmountpoint = NULL;
568		nvp->v_speclockf = NULL;
569		bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap));
570		simple_unlock(&spechash_slock);
571		*vpp = nvp;
572		if (vp != NULLVP) {
573			nvp->v_flag |= VALIASED;
574			vp->v_flag |= VALIASED;
575			vput(vp);
576		}
577		return (NULLVP);
578	}
579
580	/*
581	 * This code is the uncommon case. It is called in case
582	 * we found an alias that was VT_NON && vtype of VBLK
583	 * This means we found a block device that was created
584	 * using bdevvp.
585	 * An example of such a vnode is the root partition device vnode
586	 * created in ffs_mountroot.
587	 *
588	 * The vnodes created by bdevvp should not be aliased (why?).
589	 */
590
591	simple_unlock(&spechash_slock);
592	VOP_UNLOCK(vp, 0, p);
593	vclean(vp, 0, p);
594	vp->v_op = nvp->v_op;
595	vp->v_tag = nvp->v_tag;
596	nvp->v_type = VNON;
597	insmntque(vp, mp);
598	return (vp);
599}
600
601/*
602 * Grab a particular vnode from the free list, increment its
603 * reference count and lock it. If the vnode lock bit is set,
604 * the vnode is being eliminated in vgone. In that case, we
605 * cannot grab it, so the process is awakened when the
606 * transition is completed, and an error code is returned to
607 * indicate that the vnode is no longer usable, possibly
608 * having been changed to a new file system type.
609 */
610int
611vget(struct vnode *vp, int flags, struct proc *p)
612{
613	int error, s, onfreelist;
614
615	/*
616	 * If the vnode is in the process of being cleaned out for
617	 * another use, we wait for the cleaning to finish and then
618	 * return failure. Cleaning is determined by checking that
619	 * the VXLOCK flag is set.
620	 */
621
622	if (vp->v_flag & VXLOCK) {
623		if (flags & LK_NOWAIT) {
624			return (EBUSY);
625		}
626
627		vp->v_flag |= VXWANT;
628		ltsleep(vp, PINOD | PNORELOCK, "vget", 0, NULL);
629		return (ENOENT);
630	}
631
632	onfreelist = vp->v_bioflag & VBIOONFREELIST;
633	if (vp->v_usecount == 0 && onfreelist) {
634		s = splbio();
635		simple_lock(&vnode_free_list_slock);
636		if (vp->v_holdcnt > 0)
637			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
638		else
639			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
640		simple_unlock(&vnode_free_list_slock);
641		vp->v_bioflag &= ~VBIOONFREELIST;
642		splx(s);
643	}
644
645 	vp->v_usecount++;
646	if (flags & LK_TYPE_MASK) {
647		if ((error = vn_lock(vp, flags, p)) != 0) {
648			vp->v_usecount--;
649			if (vp->v_usecount == 0 && onfreelist)
650				vputonfreelist(vp);
651		}
652		return (error);
653	}
654
655	return (0);
656}
657
658
659#ifdef DIAGNOSTIC
660/*
661 * Vnode reference.
662 */
663void
664vref(struct vnode *vp)
665{
666	if (vp->v_usecount == 0)
667		panic("vref used where vget required");
668	vp->v_usecount++;
669}
670#endif /* DIAGNOSTIC */
671
672void
673vputonfreelist(struct vnode *vp)
674{
675	int s;
676	struct freelst *lst;
677
678	s = splbio();
679#ifdef DIAGNOSTIC
680	if (vp->v_usecount != 0)
681		panic("Use count is not zero!");
682
683	if (vp->v_bioflag & VBIOONFREELIST) {
684		vprint("vnode already on free list: ", vp);
685		panic("vnode already on free list");
686	}
687#endif
688
689	vp->v_bioflag |= VBIOONFREELIST;
690
691	if (vp->v_holdcnt > 0)
692		lst = &vnode_hold_list;
693	else
694		lst = &vnode_free_list;
695
696	if (vp->v_type == VBAD)
697		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
698	else
699		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
700
701	splx(s);
702}
703
704/*
705 * vput(), just unlock and vrele()
706 */
707void
708vput(struct vnode *vp)
709{
710	struct proc *p = curproc;
711
712#ifdef DIAGNOSTIC
713	if (vp == NULL)
714		panic("vput: null vp");
715#endif
716
717#ifdef DIAGNOSTIC
718	if (vp->v_usecount == 0) {
719		vprint("vput: bad ref count", vp);
720		panic("vput: ref cnt");
721	}
722#endif
723	vp->v_usecount--;
724	if (vp->v_usecount > 0) {
725		VOP_UNLOCK(vp, 0, p);
726		return;
727	}
728
729#ifdef DIAGNOSTIC
730	if (vp->v_writecount != 0) {
731		vprint("vput: bad writecount", vp);
732		panic("vput: v_writecount != 0");
733	}
734#endif
735
736	VOP_INACTIVE(vp, p);
737
738	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
739		vputonfreelist(vp);
740}
741
742/*
743 * Vnode release - use for active VNODES.
744 * If count drops to zero, call inactive routine and return to freelist.
745 */
746void
747vrele(struct vnode *vp)
748{
749	struct proc *p = curproc;
750
751#ifdef DIAGNOSTIC
752	if (vp == NULL)
753		panic("vrele: null vp");
754#endif
755#ifdef DIAGNOSTIC
756	if (vp->v_usecount == 0) {
757		vprint("vrele: bad ref count", vp);
758		panic("vrele: ref cnt");
759	}
760#endif
761	vp->v_usecount--;
762	if (vp->v_usecount > 0) {
763		return;
764	}
765
766#ifdef DIAGNOSTIC
767	if (vp->v_writecount != 0) {
768		vprint("vrele: bad writecount", vp);
769		panic("vrele: v_writecount != 0");
770	}
771#endif
772
773	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
774#ifdef DIAGNOSTIC
775		vprint("vrele: cannot lock", vp);
776#endif
777		return;
778	}
779
780	VOP_INACTIVE(vp, p);
781
782	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
783		vputonfreelist(vp);
784}
785
786void vhold(struct vnode *vp);
787
788/*
789 * Page or buffer structure gets a reference.
790 */
791void
792vhold(struct vnode *vp)
793{
794	/*
795	 * If it is on the freelist and the hold count is currently
796	 * zero, move it to the hold list.
797	 */
798	if ((vp->v_bioflag & VBIOONFREELIST) &&
799	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
800		simple_lock(&vnode_free_list_slock);
801		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
802		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
803		simple_unlock(&vnode_free_list_slock);
804	}
805	vp->v_holdcnt++;
806}
807
808/*
809 * Remove any vnodes in the vnode table belonging to mount point mp.
810 *
811 * If MNT_NOFORCE is specified, there should not be any active ones,
812 * return error if any are found (nb: this is a user error, not a
813 * system error). If MNT_FORCE is specified, detach any active vnodes
814 * that are found.
815 */
816#ifdef DEBUG
817int busyprt = 0;	/* print out busy vnodes */
818struct ctldebug debug1 = { "busyprt", &busyprt };
819#endif
820
821int
822vfs_mount_foreach_vnode(struct mount *mp,
823    int (*func)(struct vnode *, void *), void *arg) {
824	struct vnode *vp, *nvp;
825	int error = 0;
826
827	simple_lock(&mntvnode_slock);
828loop:
829	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
830		if (vp->v_mount != mp)
831			goto loop;
832		nvp = LIST_NEXT(vp, v_mntvnodes);
833		simple_unlock(&mntvnode_slock);
834
835		error = func(vp, arg);
836
837		simple_lock(&mntvnode_slock);
838
839		if (error != 0)
840			break;
841	}
842	simple_unlock(&mntvnode_slock);
843
844	return (error);
845}
846
847struct vflush_args {
848	struct vnode *skipvp;
849	int busy;
850	int flags;
851};
852
853int
854vflush_vnode(struct vnode *vp, void *arg) {
855	struct vflush_args *va = arg;
856	struct proc *p = curproc;
857
858	if (vp == va->skipvp) {
859		return (0);
860	}
861
862	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
863		return (0);
864	}
865
866	/*
867	 * If WRITECLOSE is set, only flush out regular file
868	 * vnodes open for writing.
869	 */
870	if ((va->flags & WRITECLOSE) &&
871	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
872		return (0);
873	}
874
875	/*
876	 * With v_usecount == 0, all we need to do is clear
877	 * out the vnode data structures and we are done.
878	 */
879	if (vp->v_usecount == 0) {
880		vgonel(vp, p);
881		return (0);
882	}
883
884	/*
885	 * If FORCECLOSE is set, forcibly close the vnode.
886	 * For block or character devices, revert to an
887	 * anonymous device. For all other files, just kill them.
888	 */
889	if (va->flags & FORCECLOSE) {
890		if (vp->v_type != VBLK && vp->v_type != VCHR) {
891			vgonel(vp, p);
892		} else {
893			vclean(vp, 0, p);
894			vp->v_op = spec_vnodeop_p;
895			insmntque(vp, (struct mount *)0);
896		}
897		return (0);
898	}
899
900#ifdef DEBUG
901	if (busyprt)
902		vprint("vflush: busy vnode", vp);
903#endif
904	va->busy++;
905	return (0);
906}
907
908int
909vflush(struct mount *mp, struct vnode *skipvp, int flags)
910{
911	struct vflush_args va;
912	va.skipvp = skipvp;
913	va.busy = 0;
914	va.flags = flags;
915
916	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
917
918	if (va.busy)
919		return (EBUSY);
920	return (0);
921}
922
923/*
924 * Disassociate the underlying file system from a vnode.
925 */
926void
927vclean(struct vnode *vp, int flags, struct proc *p)
928{
929	int active;
930
931	/*
932	 * Check to see if the vnode is in use.
933	 * If so we have to reference it before we clean it out
934	 * so that its count cannot fall to zero and generate a
935	 * race against ourselves to recycle it.
936	 */
937	if ((active = vp->v_usecount) != 0)
938		vp->v_usecount++;
939
940	/*
941	 * Prevent the vnode from being recycled or
942	 * brought into use while we clean it out.
943	 */
944	if (vp->v_flag & VXLOCK)
945		panic("vclean: deadlock");
946	vp->v_flag |= VXLOCK;
947	/*
948	 * Even if the count is zero, the VOP_INACTIVE routine may still
949	 * have the object locked while it cleans it out. The VOP_LOCK
950	 * ensures that the VOP_INACTIVE routine is done with its work.
951	 * For active vnodes, it ensures that no other activity can
952	 * occur while the underlying object is being cleaned out.
953	 */
954	VOP_LOCK(vp, LK_DRAIN, p);
955
956	/*
957	 * Clean out any VM data associated with the vnode.
958	 */
959	uvm_vnp_terminate(vp);
960	/*
961	 * Clean out any buffers associated with the vnode.
962	 */
963	if (flags & DOCLOSE)
964		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
965	/*
966	 * If purging an active vnode, it must be closed and
967	 * deactivated before being reclaimed. Note that the
968	 * VOP_INACTIVE will unlock the vnode
969	 */
970	if (active) {
971		if (flags & DOCLOSE)
972			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
973		VOP_INACTIVE(vp, p);
974	} else {
975		/*
976		 * Any other processes trying to obtain this lock must first
977		 * wait for VXLOCK to clear, then call the new lock operation.
978		 */
979		VOP_UNLOCK(vp, 0, p);
980	}
981
982	/*
983	 * Reclaim the vnode.
984	 */
985	if (VOP_RECLAIM(vp, p))
986		panic("vclean: cannot reclaim");
987	if (active) {
988		vp->v_usecount--;
989		if (vp->v_usecount == 0) {
990			if (vp->v_holdcnt > 0)
991				panic("vclean: not clean");
992			vputonfreelist(vp);
993		}
994	}
995	cache_purge(vp);
996
997	/*
998	 * Done with purge, notify sleepers of the grim news.
999	 */
1000	vp->v_op = dead_vnodeop_p;
1001	simple_lock(&vp->v_selectinfo.vsi_lock);
1002	VN_KNOTE(vp, NOTE_REVOKE);
1003	simple_unlock(&vp->v_selectinfo.vsi_lock);
1004	vp->v_tag = VT_NON;
1005	vp->v_flag &= ~VXLOCK;
1006#ifdef VFSDEBUG
1007	vp->v_flag &= ~VLOCKSWORK;
1008#endif
1009	if (vp->v_flag & VXWANT) {
1010		vp->v_flag &= ~VXWANT;
1011		wakeup(vp);
1012	}
1013}
1014
1015/*
1016 * Recycle an unused vnode to the front of the free list.
1017 */
1018int
1019vrecycle(struct vnode *vp, struct proc *p)
1020{
1021	if (vp->v_usecount == 0) {
1022		vgonel(vp, p);
1023		return (1);
1024	}
1025	return (0);
1026}
1027
1028/*
1029 * Eliminate all activity associated with a vnode
1030 * in preparation for reuse.
1031 */
1032void
1033vgone(struct vnode *vp)
1034{
1035	struct proc *p = curproc;
1036	vgonel(vp, p);
1037}
1038
1039/*
1040 * vgone, with struct proc.
1041 */
1042void
1043vgonel(struct vnode *vp, struct proc *p)
1044{
1045	struct vnode *vq;
1046	struct vnode *vx;
1047	struct mount *mp;
1048	int flags;
1049
1050	/*
1051	 * If a vgone (or vclean) is already in progress,
1052	 * wait until it is done and return.
1053	 */
1054	if (vp->v_flag & VXLOCK) {
1055		vp->v_flag |= VXWANT;
1056		ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, NULL);
1057		return;
1058	}
1059
1060	/*
1061	 * Clean out the filesystem specific data.
1062	 */
1063	vclean(vp, DOCLOSE, p);
1064	/*
1065	 * Delete from old mount point vnode list, if on one.
1066	 */
1067	if (vp->v_mount != NULL)
1068		insmntque(vp, (struct mount *)0);
1069	/*
1070	 * If special device, remove it from special device alias list
1071	 * if it is on one.
1072	 */
1073	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1074		simple_lock(&spechash_slock);
1075		if (*vp->v_hashchain == vp) {
1076			*vp->v_hashchain = vp->v_specnext;
1077		} else {
1078			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1079				if (vq->v_specnext != vp)
1080					continue;
1081				vq->v_specnext = vp->v_specnext;
1082				break;
1083			}
1084			if (vq == NULL)
1085				panic("missing bdev");
1086		}
1087		if (vp->v_flag & VALIASED) {
1088			vx = NULL;
1089			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1090				if (vq->v_rdev != vp->v_rdev ||
1091				    vq->v_type != vp->v_type)
1092					continue;
1093				if (vx)
1094					break;
1095				vx = vq;
1096			}
1097			if (vx == NULL)
1098				panic("missing alias");
1099			if (vq == NULL)
1100				vx->v_flag &= ~VALIASED;
1101			vp->v_flag &= ~VALIASED;
1102		}
1103		simple_unlock(&spechash_slock);
1104
1105		/*
1106		 * If we have a mount point associated with the vnode, we must
1107		 * flush it out now, as to not leave a dangling zombie mount
1108		 * point laying around in VFS.
1109		 */
1110		mp = vp->v_specmountpoint;
1111		if (mp != NULL) {
1112			if (!vfs_busy(mp, VB_WRITE|VB_WAIT)) {
1113				flags = MNT_FORCE | MNT_DOOMED;
1114				dounmount(mp, flags, p, NULL);
1115			}
1116		}
1117
1118		FREE(vp->v_specinfo, M_VNODE);
1119		vp->v_specinfo = NULL;
1120	}
1121	/*
1122	 * If it is on the freelist and not already at the head,
1123	 * move it to the head of the list.
1124	 */
1125	vp->v_type = VBAD;
1126
1127	/*
1128	 * Move onto the free list, unless we were called from
1129	 * getnewvnode and we're not on any free list
1130	 */
1131	if (vp->v_usecount == 0 &&
1132	    (vp->v_bioflag & VBIOONFREELIST)) {
1133		int s;
1134
1135		simple_lock(&vnode_free_list_slock);
1136		s = splbio();
1137
1138		if (vp->v_holdcnt > 0)
1139			panic("vgonel: not clean");
1140
1141		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1142			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1143			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1144		}
1145		splx(s);
1146		simple_unlock(&vnode_free_list_slock);
1147	}
1148}
1149
1150/*
1151 * Lookup a vnode by device number.
1152 */
1153int
1154vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1155{
1156	struct vnode *vp;
1157	int rc =0;
1158
1159	simple_lock(&spechash_slock);
1160	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1161		if (dev != vp->v_rdev || type != vp->v_type)
1162			continue;
1163		*vpp = vp;
1164		rc = 1;
1165		break;
1166	}
1167	simple_unlock(&spechash_slock);
1168	return (rc);
1169}
1170
1171/*
1172 * Revoke all the vnodes corresponding to the specified minor number
1173 * range (endpoints inclusive) of the specified major.
1174 */
1175void
1176vdevgone(int maj, int minl, int minh, enum vtype type)
1177{
1178	struct vnode *vp;
1179	int mn;
1180
1181	for (mn = minl; mn <= minh; mn++)
1182		if (vfinddev(makedev(maj, mn), type, &vp))
1183			VOP_REVOKE(vp, REVOKEALL);
1184}
1185
1186/*
1187 * Calculate the total number of references to a special device.
1188 */
1189int
1190vcount(struct vnode *vp)
1191{
1192	struct vnode *vq, *vnext;
1193	int count;
1194
1195loop:
1196	if ((vp->v_flag & VALIASED) == 0)
1197		return (vp->v_usecount);
1198	simple_lock(&spechash_slock);
1199	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1200		vnext = vq->v_specnext;
1201		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1202			continue;
1203		/*
1204		 * Alias, but not in use, so flush it out.
1205		 */
1206		if (vq->v_usecount == 0 && vq != vp) {
1207			simple_unlock(&spechash_slock);
1208			vgone(vq);
1209			goto loop;
1210		}
1211		count += vq->v_usecount;
1212	}
1213	simple_unlock(&spechash_slock);
1214	return (count);
1215}
1216
1217#if defined(DEBUG) || defined(DIAGNOSTIC)
1218/*
1219 * Print out a description of a vnode.
1220 */
1221static char *typename[] =
1222   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1223
1224void
1225vprint(char *label, struct vnode *vp)
1226{
1227	char buf[64];
1228
1229	if (label != NULL)
1230		printf("%s: ", label);
1231	printf("%p, type %s, use %u, write %u, hold %u,",
1232		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1233		vp->v_holdcnt);
1234	buf[0] = '\0';
1235	if (vp->v_flag & VROOT)
1236		strlcat(buf, "|VROOT", sizeof buf);
1237	if (vp->v_flag & VTEXT)
1238		strlcat(buf, "|VTEXT", sizeof buf);
1239	if (vp->v_flag & VSYSTEM)
1240		strlcat(buf, "|VSYSTEM", sizeof buf);
1241	if (vp->v_flag & VXLOCK)
1242		strlcat(buf, "|VXLOCK", sizeof buf);
1243	if (vp->v_flag & VXWANT)
1244		strlcat(buf, "|VXWANT", sizeof buf);
1245	if (vp->v_bioflag & VBIOWAIT)
1246		strlcat(buf, "|VBIOWAIT", sizeof buf);
1247	if (vp->v_bioflag & VBIOONFREELIST)
1248		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1249	if (vp->v_bioflag & VBIOONSYNCLIST)
1250		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1251	if (vp->v_flag & VALIASED)
1252		strlcat(buf, "|VALIASED", sizeof buf);
1253	if (buf[0] != '\0')
1254		printf(" flags (%s)", &buf[1]);
1255	if (vp->v_data == NULL) {
1256		printf("\n");
1257	} else {
1258		printf("\n\t");
1259		VOP_PRINT(vp);
1260	}
1261}
1262#endif /* DEBUG || DIAGNOSTIC */
1263
1264#ifdef DEBUG
1265/*
1266 * List all of the locked vnodes in the system.
1267 * Called when debugging the kernel.
1268 */
1269void
1270printlockedvnodes(void)
1271{
1272	struct mount *mp, *nmp;
1273	struct vnode *vp;
1274
1275	printf("Locked vnodes\n");
1276
1277	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1278	    mp = nmp) {
1279		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1280			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1281			continue;
1282		}
1283		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1284			if (VOP_ISLOCKED(vp))
1285				vprint((char *)0, vp);
1286		}
1287		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1288		vfs_unbusy(mp);
1289 	}
1290
1291}
1292#endif
1293
1294/*
1295 * Top level filesystem related information gathering.
1296 */
1297int
1298vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1299    size_t newlen, struct proc *p)
1300{
1301	struct vfsconf *vfsp, *tmpvfsp;
1302	int ret;
1303
1304	/* all sysctl names at this level are at least name and field */
1305	if (namelen < 2)
1306		return (ENOTDIR);		/* overloaded */
1307
1308	if (name[0] != VFS_GENERIC) {
1309		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1310			if (vfsp->vfc_typenum == name[0])
1311				break;
1312
1313		if (vfsp == NULL)
1314			return (EOPNOTSUPP);
1315
1316		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1317		    oldp, oldlenp, newp, newlen, p));
1318	}
1319
1320	switch (name[1]) {
1321	case VFS_MAXTYPENUM:
1322		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1323
1324	case VFS_CONF:
1325		if (namelen < 3)
1326			return (ENOTDIR);	/* overloaded */
1327
1328		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1329			if (vfsp->vfc_typenum == name[2])
1330				break;
1331
1332		if (vfsp == NULL)
1333			return (EOPNOTSUPP);
1334
1335		/* Make a copy, clear out kernel pointers */
1336		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1337		bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp));
1338		tmpvfsp->vfc_vfsops = NULL;
1339		tmpvfsp->vfc_mountroot = NULL;
1340		tmpvfsp->vfc_next = NULL;
1341
1342		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1343		    sizeof(struct vfsconf));
1344
1345		free(tmpvfsp, M_TEMP);
1346		return (ret);
1347	}
1348
1349	return (EOPNOTSUPP);
1350}
1351
1352int kinfo_vdebug = 1;
1353int kinfo_vgetfailed;
1354#define KINFO_VNODESLOP	10
1355/*
1356 * Dump vnode list (via sysctl).
1357 * Copyout address of vnode followed by vnode.
1358 */
1359/* ARGSUSED */
1360int
1361sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1362{
1363	struct mount *mp, *nmp;
1364	struct vnode *vp, *nvp;
1365	char *bp = where, *savebp;
1366	char *ewhere;
1367	int error;
1368
1369	if (where == NULL) {
1370		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1371		return (0);
1372	}
1373	ewhere = where + *sizep;
1374
1375	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1376	    mp = nmp) {
1377		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1378			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1379			continue;
1380		}
1381		savebp = bp;
1382again:
1383		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
1384		    vp = nvp) {
1385			/*
1386			 * Check that the vp is still associated with
1387			 * this filesystem.  RACE: could have been
1388			 * recycled onto the same filesystem.
1389			 */
1390			if (vp->v_mount != mp) {
1391				simple_unlock(&mntvnode_slock);
1392				if (kinfo_vdebug)
1393					printf("kinfo: vp changed\n");
1394				bp = savebp;
1395				goto again;
1396			}
1397			nvp = LIST_NEXT(vp, v_mntvnodes);
1398			if (bp + sizeof(struct e_vnode) > ewhere) {
1399				simple_unlock(&mntvnode_slock);
1400				*sizep = bp - where;
1401				vfs_unbusy(mp);
1402				return (ENOMEM);
1403			}
1404			if ((error = copyout(&vp,
1405			    &((struct e_vnode *)bp)->vptr,
1406			    sizeof(struct vnode *))) ||
1407			   (error = copyout(vp,
1408			    &((struct e_vnode *)bp)->vnode,
1409			    sizeof(struct vnode)))) {
1410				vfs_unbusy(mp);
1411				return (error);
1412			}
1413			bp += sizeof(struct e_vnode);
1414			simple_lock(&mntvnode_slock);
1415		}
1416
1417		simple_unlock(&mntvnode_slock);
1418		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1419		vfs_unbusy(mp);
1420	}
1421
1422	*sizep = bp - where;
1423
1424	return (0);
1425}
1426
1427/*
1428 * Check to see if a filesystem is mounted on a block device.
1429 */
1430int
1431vfs_mountedon(struct vnode *vp)
1432{
1433	struct vnode *vq;
1434	int error = 0;
1435
1436 	if (vp->v_specmountpoint != NULL)
1437		return (EBUSY);
1438	if (vp->v_flag & VALIASED) {
1439		simple_lock(&spechash_slock);
1440		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1441			if (vq->v_rdev != vp->v_rdev ||
1442			    vq->v_type != vp->v_type)
1443				continue;
1444			if (vq->v_specmountpoint != NULL) {
1445				error = EBUSY;
1446				break;
1447			}
1448 		}
1449		simple_unlock(&spechash_slock);
1450	}
1451	return (error);
1452}
1453
1454/*
1455 * Build hash lists of net addresses and hang them off the mount point.
1456 * Called by ufs_mount() to set up the lists of export addresses.
1457 */
1458int
1459vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1460    struct export_args *argp)
1461{
1462	struct netcred *np;
1463	struct radix_node_head *rnh;
1464	int i;
1465	struct radix_node *rn;
1466	struct sockaddr *saddr, *smask = 0;
1467	struct domain *dom;
1468	int error;
1469
1470	if (argp->ex_addrlen == 0) {
1471		if (mp->mnt_flag & MNT_DEFEXPORTED)
1472			return (EPERM);
1473		np = &nep->ne_defexported;
1474		np->netc_exflags = argp->ex_flags;
1475		np->netc_anon = argp->ex_anon;
1476		np->netc_anon.cr_ref = 1;
1477		mp->mnt_flag |= MNT_DEFEXPORTED;
1478		return (0);
1479	}
1480	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1481	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1482		return (EINVAL);
1483	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1484	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1485	bzero(np, i);
1486	saddr = (struct sockaddr *)(np + 1);
1487	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1488	if (error)
1489		goto out;
1490	if (saddr->sa_len > argp->ex_addrlen)
1491		saddr->sa_len = argp->ex_addrlen;
1492	if (argp->ex_masklen) {
1493		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1494		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1495		if (error)
1496			goto out;
1497		if (smask->sa_len > argp->ex_masklen)
1498			smask->sa_len = argp->ex_masklen;
1499	}
1500	i = saddr->sa_family;
1501	if (i < 0 || i > AF_MAX) {
1502		error = EINVAL;
1503		goto out;
1504	}
1505	if ((rnh = nep->ne_rtable[i]) == 0) {
1506		/*
1507		 * Seems silly to initialize every AF when most are not
1508		 * used, do so on demand here
1509		 */
1510		for (dom = domains; dom; dom = dom->dom_next)
1511			if (dom->dom_family == i && dom->dom_rtattach) {
1512				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1513					dom->dom_rtoffset);
1514				break;
1515			}
1516		if ((rnh = nep->ne_rtable[i]) == 0) {
1517			error = ENOBUFS;
1518			goto out;
1519		}
1520	}
1521	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1522		np->netc_rnodes);
1523	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1524		error = EPERM;
1525		goto out;
1526	}
1527	np->netc_exflags = argp->ex_flags;
1528	np->netc_anon = argp->ex_anon;
1529	np->netc_anon.cr_ref = 1;
1530	return (0);
1531out:
1532	free(np, M_NETADDR);
1533	return (error);
1534}
1535
1536/* ARGSUSED */
1537int
1538vfs_free_netcred(struct radix_node *rn, void *w)
1539{
1540	struct radix_node_head *rnh = (struct radix_node_head *)w;
1541
1542	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1543	free(rn, M_NETADDR);
1544	return (0);
1545}
1546
1547/*
1548 * Free the net address hash lists that are hanging off the mount points.
1549 */
1550void
1551vfs_free_addrlist(struct netexport *nep)
1552{
1553	int i;
1554	struct radix_node_head *rnh;
1555
1556	for (i = 0; i <= AF_MAX; i++)
1557		if ((rnh = nep->ne_rtable[i]) != NULL) {
1558			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1559			free(rnh, M_RTABLE);
1560			nep->ne_rtable[i] = 0;
1561		}
1562}
1563
1564int
1565vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1566{
1567	int error;
1568
1569	if (argp->ex_flags & MNT_DELEXPORT) {
1570		vfs_free_addrlist(nep);
1571		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1572	}
1573	if (argp->ex_flags & MNT_EXPORTED) {
1574		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1575			return (error);
1576		mp->mnt_flag |= MNT_EXPORTED;
1577	}
1578	return (0);
1579}
1580
1581struct netcred *
1582vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1583{
1584	struct netcred *np;
1585	struct radix_node_head *rnh;
1586	struct sockaddr *saddr;
1587
1588	np = NULL;
1589	if (mp->mnt_flag & MNT_EXPORTED) {
1590		/*
1591		 * Lookup in the export list first.
1592		 */
1593		if (nam != NULL) {
1594			saddr = mtod(nam, struct sockaddr *);
1595			rnh = nep->ne_rtable[saddr->sa_family];
1596			if (rnh != NULL) {
1597				np = (struct netcred *)
1598					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1599					    rnh);
1600				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1601					np = NULL;
1602			}
1603		}
1604		/*
1605		 * If no address match, use the default if it exists.
1606		 */
1607		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1608			np = &nep->ne_defexported;
1609	}
1610	return (np);
1611}
1612
1613/*
1614 * Do the usual access checking.
1615 * file_mode, uid and gid are from the vnode in question,
1616 * while acc_mode and cred are from the VOP_ACCESS parameter list
1617 */
1618int
1619vaccess(mode_t file_mode, uid_t uid, gid_t gid, mode_t acc_mode,
1620    struct ucred *cred)
1621{
1622	mode_t mask;
1623
1624	/* User id 0 always gets access. */
1625	if (cred->cr_uid == 0)
1626		return 0;
1627
1628	mask = 0;
1629
1630	/* Otherwise, check the owner. */
1631	if (cred->cr_uid == uid) {
1632		if (acc_mode & VEXEC)
1633			mask |= S_IXUSR;
1634		if (acc_mode & VREAD)
1635			mask |= S_IRUSR;
1636		if (acc_mode & VWRITE)
1637			mask |= S_IWUSR;
1638		return (file_mode & mask) == mask ? 0 : EACCES;
1639	}
1640
1641	/* Otherwise, check the groups. */
1642	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1643		if (acc_mode & VEXEC)
1644			mask |= S_IXGRP;
1645		if (acc_mode & VREAD)
1646			mask |= S_IRGRP;
1647		if (acc_mode & VWRITE)
1648			mask |= S_IWGRP;
1649		return (file_mode & mask) == mask ? 0 : EACCES;
1650	}
1651
1652	/* Otherwise, check everyone else. */
1653	if (acc_mode & VEXEC)
1654		mask |= S_IXOTH;
1655	if (acc_mode & VREAD)
1656		mask |= S_IROTH;
1657	if (acc_mode & VWRITE)
1658		mask |= S_IWOTH;
1659	return (file_mode & mask) == mask ? 0 : EACCES;
1660}
1661
1662/*
1663 * Unmount all file systems.
1664 * We traverse the list in reverse order under the assumption that doing so
1665 * will avoid needing to worry about dependencies.
1666 */
1667void
1668vfs_unmountall(void)
1669{
1670	struct mount *mp, *nmp;
1671	int allerror, error, again = 1;
1672
1673 retry:
1674	allerror = 0;
1675	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1676	    mp = nmp) {
1677		nmp = CIRCLEQ_PREV(mp, mnt_list);
1678		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1679			continue;
1680		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1681			printf("unmount of %s failed with error %d\n",
1682			    mp->mnt_stat.f_mntonname, error);
1683			allerror = 1;
1684		}
1685	}
1686
1687	if (allerror) {
1688		printf("WARNING: some file systems would not unmount\n");
1689		if (again) {
1690			printf("retrying\n");
1691			again = 0;
1692			goto retry;
1693		}
1694	}
1695}
1696
1697/*
1698 * Sync and unmount file systems before shutting down.
1699 */
1700void
1701vfs_shutdown(void)
1702{
1703#ifdef ACCOUNTING
1704	extern void acct_shutdown(void);
1705
1706	acct_shutdown();
1707#endif
1708
1709	/* XXX Should suspend scheduling. */
1710	(void) spl0();
1711
1712	printf("syncing disks... ");
1713
1714	if (panicstr == 0) {
1715		/* Sync before unmount, in case we hang on something. */
1716		sys_sync(&proc0, (void *)0, (register_t *)0);
1717
1718		/* Unmount file systems. */
1719		vfs_unmountall();
1720	}
1721
1722	if (vfs_syncwait(1))
1723		printf("giving up\n");
1724	else
1725		printf("done\n");
1726}
1727
1728/*
1729 * perform sync() operation and wait for buffers to flush.
1730 * assumtions: called w/ scheduler disabled and physical io enabled
1731 * for now called at spl0() XXX
1732 */
1733int
1734vfs_syncwait(int verbose)
1735{
1736	struct buf *bp;
1737	int iter, nbusy, dcount, s;
1738	struct proc *p;
1739
1740	p = curproc? curproc : &proc0;
1741	sys_sync(p, (void *)0, (register_t *)0);
1742
1743	/* Wait for sync to finish. */
1744	dcount = 10000;
1745	for (iter = 0; iter < 20; iter++) {
1746		nbusy = 0;
1747		for (bp = &buf[nbuf]; --bp >= buf; ) {
1748			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1749				nbusy++;
1750			/*
1751			 * With soft updates, some buffers that are
1752			 * written will be remarked as dirty until other
1753			 * buffers are written.
1754			 */
1755			if (bp->b_flags & B_DELWRI) {
1756				s = splbio();
1757				bremfree(bp);
1758				bp->b_flags |= B_BUSY;
1759				splx(s);
1760				nbusy++;
1761				bawrite(bp);
1762				if (dcount-- <= 0) {
1763					if (verbose)
1764						printf("softdep ");
1765					return 1;
1766				}
1767			}
1768		}
1769		if (nbusy == 0)
1770			break;
1771		if (verbose)
1772			printf("%d ", nbusy);
1773		DELAY(40000 * iter);
1774	}
1775
1776	return nbusy;
1777}
1778
1779/*
1780 * posix file system related system variables.
1781 */
1782int
1783fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1784    void *newp, size_t newlen, struct proc *p)
1785{
1786	/* all sysctl names at this level are terminal */
1787	if (namelen != 1)
1788		return (ENOTDIR);
1789
1790	switch (name[0]) {
1791	case FS_POSIX_SETUID:
1792		if (newp && securelevel > 0)
1793			return (EPERM);
1794		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1795	default:
1796		return (EOPNOTSUPP);
1797	}
1798	/* NOTREACHED */
1799}
1800
1801/*
1802 * file system related system variables.
1803 */
1804int
1805fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1806    size_t newlen, struct proc *p)
1807{
1808	sysctlfn *fn;
1809
1810	switch (name[0]) {
1811	case FS_POSIX:
1812		fn = fs_posix_sysctl;
1813		break;
1814	default:
1815		return (EOPNOTSUPP);
1816	}
1817	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1818}
1819
1820
1821/*
1822 * Routines dealing with vnodes and buffers
1823 */
1824
1825/*
1826 * Wait for all outstanding I/Os to complete
1827 *
1828 * Manipulates v_numoutput. Must be called at splbio()
1829 */
1830int
1831vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1832{
1833	int error = 0;
1834
1835	splassert(IPL_BIO);
1836
1837	while (vp->v_numoutput) {
1838		vp->v_bioflag |= VBIOWAIT;
1839		error = tsleep(&vp->v_numoutput,
1840		    slpflag | (PRIBIO + 1), wmesg, timeo);
1841		if (error)
1842			break;
1843	}
1844
1845	return (error);
1846}
1847
1848/*
1849 * Update outstanding I/O count and do wakeup if requested.
1850 *
1851 * Manipulates v_numoutput. Must be called at splbio()
1852 */
1853void
1854vwakeup(struct vnode *vp)
1855{
1856	splassert(IPL_BIO);
1857
1858	if (vp != NULL) {
1859		if (vp->v_numoutput-- == 0)
1860			panic("vwakeup: neg numoutput");
1861		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1862			vp->v_bioflag &= ~VBIOWAIT;
1863			wakeup(&vp->v_numoutput);
1864		}
1865	}
1866}
1867
1868/*
1869 * Flush out and invalidate all buffers associated with a vnode.
1870 * Called with the underlying object locked.
1871 */
1872int
1873vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1874    int slpflag, int slptimeo)
1875{
1876	struct buf *bp;
1877	struct buf *nbp, *blist;
1878	int s, error;
1879
1880#ifdef VFSDEBUG
1881	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1882		panic("vinvalbuf(): vp isn't locked");
1883#endif
1884
1885	if (flags & V_SAVE) {
1886		s = splbio();
1887		vwaitforio(vp, 0, "vinvalbuf", 0);
1888		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1889			splx(s);
1890			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1891				return (error);
1892			s = splbio();
1893			if (vp->v_numoutput > 0 ||
1894			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1895				panic("vinvalbuf: dirty bufs");
1896		}
1897		splx(s);
1898	}
1899loop:
1900	s = splbio();
1901	for (;;) {
1902		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1903		    (flags & V_SAVEMETA))
1904			while (blist && blist->b_lblkno < 0)
1905				blist = LIST_NEXT(blist, b_vnbufs);
1906		if (blist == NULL &&
1907		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1908		    (flags & V_SAVEMETA))
1909			while (blist && blist->b_lblkno < 0)
1910				blist = LIST_NEXT(blist, b_vnbufs);
1911		if (!blist)
1912			break;
1913
1914		for (bp = blist; bp; bp = nbp) {
1915			nbp = LIST_NEXT(bp, b_vnbufs);
1916			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1917				continue;
1918			if (bp->b_flags & B_BUSY) {
1919				bp->b_flags |= B_WANTED;
1920				error = tsleep(bp, slpflag | (PRIBIO + 1),
1921				    "vinvalbuf", slptimeo);
1922				if (error) {
1923					splx(s);
1924					return (error);
1925				}
1926				break;
1927			}
1928			bremfree(bp);
1929			bp->b_flags |= B_BUSY;
1930			/*
1931			 * XXX Since there are no node locks for NFS, I believe
1932			 * there is a slight chance that a delayed write will
1933			 * occur while sleeping just above, so check for it.
1934			 */
1935			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1936				splx(s);
1937				(void) VOP_BWRITE(bp);
1938				goto loop;
1939			}
1940			bp->b_flags |= B_INVAL;
1941			brelse(bp);
1942		}
1943	}
1944	if (!(flags & V_SAVEMETA) &&
1945	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1946		panic("vinvalbuf: flush failed");
1947	splx(s);
1948	return (0);
1949}
1950
1951void
1952vflushbuf(struct vnode *vp, int sync)
1953{
1954	struct buf *bp, *nbp;
1955	int s;
1956
1957loop:
1958	s = splbio();
1959	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1960	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1961		nbp = LIST_NEXT(bp, b_vnbufs);
1962		if ((bp->b_flags & B_BUSY))
1963			continue;
1964		if ((bp->b_flags & B_DELWRI) == 0)
1965			panic("vflushbuf: not dirty");
1966		bremfree(bp);
1967		bp->b_flags |= B_BUSY;
1968		splx(s);
1969		/*
1970		 * Wait for I/O associated with indirect blocks to complete,
1971		 * since there is no way to quickly wait for them below.
1972		 */
1973		if (bp->b_vp == vp || sync == 0)
1974			(void) bawrite(bp);
1975		else
1976			(void) bwrite(bp);
1977		goto loop;
1978	}
1979	if (sync == 0) {
1980		splx(s);
1981		return;
1982	}
1983	vwaitforio(vp, 0, "vflushbuf", 0);
1984	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1985		splx(s);
1986#ifdef DIAGNOSTIC
1987		vprint("vflushbuf: dirty", vp);
1988#endif
1989		goto loop;
1990	}
1991	splx(s);
1992}
1993
1994/*
1995 * Associate a buffer with a vnode.
1996 *
1997 * Manipulates buffer vnode queues. Must be called at splbio().
1998 */
1999void
2000bgetvp(struct vnode *vp, struct buf *bp)
2001{
2002	splassert(IPL_BIO);
2003
2004
2005	if (bp->b_vp)
2006		panic("bgetvp: not free");
2007	vhold(vp);
2008	bp->b_vp = vp;
2009	if (vp->v_type == VBLK || vp->v_type == VCHR)
2010		bp->b_dev = vp->v_rdev;
2011	else
2012		bp->b_dev = NODEV;
2013	/*
2014	 * Insert onto list for new vnode.
2015	 */
2016	bufinsvn(bp, &vp->v_cleanblkhd);
2017}
2018
2019/*
2020 * Disassociate a buffer from a vnode.
2021 *
2022 * Manipulates vnode buffer queues. Must be called at splbio().
2023 */
2024void
2025brelvp(struct buf *bp)
2026{
2027	struct vnode *vp;
2028
2029	splassert(IPL_BIO);
2030
2031	if ((vp = bp->b_vp) == (struct vnode *) 0)
2032		panic("brelvp: NULL");
2033	/*
2034	 * Delete from old vnode list, if on one.
2035	 */
2036	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2037		bufremvn(bp);
2038	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2039	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2040		vp->v_bioflag &= ~VBIOONSYNCLIST;
2041		LIST_REMOVE(vp, v_synclist);
2042	}
2043	bp->b_vp = (struct vnode *) 0;
2044
2045#ifdef DIAGNOSTIC
2046	if (vp->v_holdcnt == 0)
2047		panic("brelvp: holdcnt");
2048#endif
2049	vp->v_holdcnt--;
2050
2051	/*
2052	 * If it is on the holdlist and the hold count drops to
2053	 * zero, move it to the free list.
2054	 */
2055	if ((vp->v_bioflag & VBIOONFREELIST) &&
2056	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
2057		simple_lock(&vnode_free_list_slock);
2058		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
2059		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2060		simple_unlock(&vnode_free_list_slock);
2061	}
2062}
2063
2064/*
2065 * Replaces the current vnode associated with the buffer, if any,
2066 * with a new vnode.
2067 *
2068 * If an output I/O is pending on the buffer, the old vnode
2069 * I/O count is adjusted.
2070 *
2071 * Ignores vnode buffer queues. Must be called at splbio().
2072 */
2073void
2074buf_replacevnode(struct buf *bp, struct vnode *newvp)
2075{
2076	struct vnode *oldvp = bp->b_vp;
2077
2078	splassert(IPL_BIO);
2079
2080	if (oldvp)
2081		brelvp(bp);
2082
2083	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2084		newvp->v_numoutput++;	/* put it on swapdev */
2085		vwakeup(oldvp);
2086	}
2087
2088	bgetvp(newvp, bp);
2089	bufremvn(bp);
2090}
2091
2092/*
2093 * Used to assign buffers to the appropriate clean or dirty list on
2094 * the vnode and to add newly dirty vnodes to the appropriate
2095 * filesystem syncer list.
2096 *
2097 * Manipulates vnode buffer queues. Must be called at splbio().
2098 */
2099void
2100reassignbuf(struct buf *bp)
2101{
2102	struct buflists *listheadp;
2103	int delay;
2104	struct vnode *vp = bp->b_vp;
2105
2106	splassert(IPL_BIO);
2107
2108	/*
2109	 * Delete from old vnode list, if on one.
2110	 */
2111	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2112		bufremvn(bp);
2113
2114	/*
2115	 * If dirty, put on list of dirty buffers;
2116	 * otherwise insert onto list of clean buffers.
2117	 */
2118	if ((bp->b_flags & B_DELWRI) == 0) {
2119		listheadp = &vp->v_cleanblkhd;
2120		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2121		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2122			vp->v_bioflag &= ~VBIOONSYNCLIST;
2123			LIST_REMOVE(vp, v_synclist);
2124		}
2125	} else {
2126		listheadp = &vp->v_dirtyblkhd;
2127		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2128			switch (vp->v_type) {
2129			case VDIR:
2130				delay = syncdelay / 2;
2131				break;
2132			case VBLK:
2133				if (vp->v_specmountpoint != NULL) {
2134					delay = syncdelay / 3;
2135					break;
2136				}
2137				/* FALLTHROUGH */
2138			default:
2139				delay = syncdelay;
2140			}
2141			vn_syncer_add_to_worklist(vp, delay);
2142		}
2143	}
2144	bufinsvn(bp, listheadp);
2145}
2146
2147int
2148vfs_register(struct vfsconf *vfs)
2149{
2150	struct vfsconf *vfsp;
2151	struct vfsconf **vfspp;
2152
2153#ifdef DIAGNOSTIC
2154	/* Paranoia? */
2155	if (vfs->vfc_refcount != 0)
2156		printf("vfs_register called with vfc_refcount > 0\n");
2157#endif
2158
2159	/* Check if filesystem already known */
2160	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2161	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2162		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2163			return (EEXIST);
2164
2165	if (vfs->vfc_typenum > maxvfsconf)
2166		maxvfsconf = vfs->vfc_typenum;
2167
2168	vfs->vfc_next = NULL;
2169
2170	/* Add to the end of the list */
2171	*vfspp = vfs;
2172
2173	/* Call vfs_init() */
2174	if (vfs->vfc_vfsops->vfs_init)
2175		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2176
2177	return 0;
2178}
2179
2180int
2181vfs_unregister(struct vfsconf *vfs)
2182{
2183	struct vfsconf *vfsp;
2184	struct vfsconf **vfspp;
2185	int maxtypenum;
2186
2187	/* Find our vfsconf struct */
2188	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2189	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2190		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2191			break;
2192	}
2193
2194	if (!vfsp)			/* Not found */
2195		return (ENOENT);
2196
2197	if (vfsp->vfc_refcount)		/* In use */
2198		return (EBUSY);
2199
2200	/* Remove from list and free */
2201	*vfspp = vfsp->vfc_next;
2202
2203	maxtypenum = 0;
2204
2205	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2206		if (vfsp->vfc_typenum > maxtypenum)
2207			maxtypenum = vfsp->vfc_typenum;
2208
2209	maxvfsconf = maxtypenum;
2210	return 0;
2211}
2212
2213/*
2214 * Check if vnode represents a disk device
2215 */
2216int
2217vn_isdisk(struct vnode *vp, int *errp)
2218{
2219	if (vp->v_type != VBLK && vp->v_type != VCHR)
2220		return (0);
2221
2222	return (1);
2223}
2224
2225#ifdef DDB
2226#include <machine/db_machdep.h>
2227#include <ddb/db_interface.h>
2228#include <ddb/db_output.h>
2229
2230void
2231vfs_buf_print(struct buf *bp, int full, int (*pr)(const char *, ...))
2232{
2233
2234	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2235	      "  proc %p error %d flags %b\n",
2236	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2237	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2238
2239	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n"
2240	      "  data %p saveaddr %p dep %p iodone %p\n",
2241	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime,
2242	    bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone);
2243
2244	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2245	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2246
2247#ifdef FFS_SOFTUPDATES
2248	if (full)
2249		softdep_print(bp, full, pr);
2250#endif
2251}
2252
2253const char *vtypes[] = { VTYPE_NAMES };
2254const char *vtags[] = { VTAG_NAMES };
2255
2256void
2257vfs_vnode_print(struct vnode *vp, int full, int (*pr)(const char *, ...))
2258{
2259
2260#define	NENTS(n)	(sizeof n / sizeof(n[0]))
2261	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2262	      vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2263	      vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type],
2264	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2265
2266	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
2267	      vp->v_data, vp->v_usecount, vp->v_writecount,
2268	      vp->v_holdcnt, vp->v_numoutput);
2269
2270	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2271
2272	if (full) {
2273		struct buf *bp;
2274
2275		(*pr)("clean bufs:\n");
2276		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2277			(*pr)(" bp %p\n", bp);
2278			vfs_buf_print(bp, full, pr);
2279		}
2280
2281		(*pr)("dirty bufs:\n");
2282		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2283			(*pr)(" bp %p\n", bp);
2284			vfs_buf_print(bp, full, pr);
2285		}
2286	}
2287}
2288
2289void
2290vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...))
2291{
2292	struct vfsconf *vfc = mp->mnt_vfc;
2293	struct vnode *vp;
2294	int cnt = 0;
2295
2296	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2297	    mp->mnt_flag, MNT_BITS,
2298	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2299
2300	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2301            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2302	    vfc->vfc_refcount, vfc->vfc_flags);
2303
2304	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %u free %u avail %u\n",
2305	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2306	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2307
2308	(*pr)("  files %u ffiles %u\n", mp->mnt_stat.f_files,
2309	    mp->mnt_stat.f_ffree);
2310
2311	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n",
2312	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2313	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2314
2315 	(*pr)("  syncwrites %lu asyncwrites = %lu\n",
2316	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2317
2318	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n",
2319	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2320	    mp->mnt_stat.f_mntfromname);
2321
2322	(*pr)("locked vnodes:");
2323	/* XXX would take mountlist lock, except ddb has no context */
2324	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2325		if (VOP_ISLOCKED(vp)) {
2326			if (!LIST_NEXT(vp, v_mntvnodes))
2327				(*pr)(" %p", vp);
2328			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2329				(*pr)("\n\t%p", vp);
2330			else
2331				(*pr)(", %p", vp);
2332		}
2333	(*pr)("\n");
2334
2335	if (full) {
2336		(*pr)("all vnodes:\n\t");
2337		/* XXX would take mountlist lock, except ddb has no context */
2338		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2339			if (!LIST_NEXT(vp, v_mntvnodes))
2340				(*pr)(" %p", vp);
2341			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2342				(*pr)(" %p,\n\t", vp);
2343			else
2344				(*pr)(" %p,", vp);
2345		(*pr)("\n", vp);
2346	}
2347}
2348#endif /* DDB */
2349