vfs_subr.c revision 1.116
1/*	$OpenBSD: vfs_subr.c,v 1.116 2005/11/07 23:15:00 pedro Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/mount.h>
48#include <sys/time.h>
49#include <sys/fcntl.h>
50#include <sys/kernel.h>
51#include <sys/vnode.h>
52#include <sys/stat.h>
53#include <sys/namei.h>
54#include <sys/ucred.h>
55#include <sys/buf.h>
56#include <sys/errno.h>
57#include <sys/malloc.h>
58#include <sys/domain.h>
59#include <sys/mbuf.h>
60#include <sys/syscallargs.h>
61#include <sys/pool.h>
62
63#include <uvm/uvm_extern.h>
64#include <sys/sysctl.h>
65
66#include <miscfs/specfs/specdev.h>
67
68enum vtype iftovt_tab[16] = {
69	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
70	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
71};
72
73int	vttoif_tab[9] = {
74	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
75	S_IFSOCK, S_IFIFO, S_IFMT,
76};
77
78int doforce = 1;		/* 1 => permit forcible unmounting */
79int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
80int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
81
82/*
83 * Insq/Remq for the vnode usage lists.
84 */
85#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
86#define	bufremvn(bp) {							\
87	LIST_REMOVE(bp, b_vnbufs);					\
88	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
89}
90
91struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
92struct freelst vnode_free_list;	/* vnode free list */
93
94struct mntlist mountlist;	/* mounted filesystem list */
95struct simplelock mountlist_slock;
96static struct simplelock mntid_slock;
97struct simplelock mntvnode_slock;
98struct simplelock vnode_free_list_slock;
99struct simplelock spechash_slock;
100
101void	vclean(struct vnode *, int, struct proc *);
102
103void insmntque(struct vnode *, struct mount *);
104int getdevvp(dev_t, struct vnode **, enum vtype);
105
106int vfs_hang_addrlist(struct mount *, struct netexport *,
107				  struct export_args *);
108int vfs_free_netcred(struct radix_node *, void *);
109void vfs_free_addrlist(struct netexport *);
110void vputonfreelist(struct vnode *);
111
112int vflush_vnode(struct vnode *, void *);
113
114#ifdef DEBUG
115void printlockedvnodes(void);
116#endif
117
118#define VN_KNOTE(vp, b) \
119	KNOTE((struct klist *)&vp->v_selectinfo.vsi_selinfo.si_note, (b))
120
121struct pool vnode_pool;
122
123/*
124 * Initialize the vnode management data structures.
125 */
126void
127vntblinit(void)
128{
129
130	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
131	    &pool_allocator_nointr);
132	simple_lock_init(&mntvnode_slock);
133	simple_lock_init(&mntid_slock);
134	simple_lock_init(&spechash_slock);
135	TAILQ_INIT(&vnode_hold_list);
136	TAILQ_INIT(&vnode_free_list);
137	simple_lock_init(&vnode_free_list_slock);
138	CIRCLEQ_INIT(&mountlist);
139	simple_lock_init(&mountlist_slock);
140	/*
141	 * Initialize the filesystem syncer.
142	 */
143	vn_initialize_syncerd();
144}
145
146/*
147 * Mark a mount point as busy. Used to synchronize access and to delay
148 * unmounting. Interlock is not released on failure.
149 *
150 * historical behavior:
151 *  - LK_NOWAIT means that we should just ignore the mount point if it's
152 *     being unmounted.
153 *  - no flags means that we should sleep on the mountpoint and then
154 *     fail.
155 */
156int
157vfs_busy(struct mount *mp, int flags, struct simplelock *interlkp,
158    struct proc *p)
159{
160	int lkflags;
161
162	switch (flags) {
163	case LK_NOWAIT:
164		lkflags = LK_SHARED|LK_NOWAIT;
165		break;
166	case 0:
167		lkflags = LK_SHARED;
168		break;
169	default:
170		lkflags = flags;
171	}
172
173	/*
174	 * Always sleepfail. We will only sleep for an exclusive lock
175	 * and the exclusive lock will only be acquired when unmounting.
176	 */
177	lkflags |= LK_SLEEPFAIL;
178
179	if (interlkp)
180		lkflags |= LK_INTERLOCK;
181	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
182		return (ENOENT);
183	return (0);
184}
185
186
187/*
188 * Free a busy file system
189 */
190void
191vfs_unbusy(struct mount *mp, struct proc *p)
192{
193	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
194}
195
196int
197vfs_isbusy(struct mount *mp)
198{
199	return (lockstatus(&mp->mnt_lock));
200}
201
202/*
203 * Lookup a filesystem type, and if found allocate and initialize
204 * a mount structure for it.
205 *
206 * Devname is usually updated by mount(8) after booting.
207 */
208int
209vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
210{
211	struct proc *p = curproc;
212	struct vfsconf *vfsp;
213	struct mount *mp;
214
215	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
216		if (!strcmp(vfsp->vfc_name, fstypename))
217			break;
218	if (vfsp == NULL)
219		return (ENODEV);
220	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
221	bzero((char *)mp, (u_long)sizeof(struct mount));
222	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
223	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
224	LIST_INIT(&mp->mnt_vnodelist);
225	mp->mnt_vfc = vfsp;
226	mp->mnt_op = vfsp->vfc_vfsops;
227	mp->mnt_flag = MNT_RDONLY;
228	mp->mnt_vnodecovered = NULLVP;
229	vfsp->vfc_refcount++;
230	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
231	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
232	mp->mnt_stat.f_mntonname[0] = '/';
233	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
234	*mpp = mp;
235 	return (0);
236 }
237
238/*
239 * Find an appropriate filesystem to use for the root. If a filesystem
240 * has not been preselected, walk through the list of known filesystems
241 * trying those that have mountroot routines, and try them until one
242 * works or we have tried them all.
243 */
244int
245vfs_mountroot(void)
246{
247	struct vfsconf *vfsp;
248	int error;
249
250	if (mountroot != NULL)
251		return ((*mountroot)());
252	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
253		if (vfsp->vfc_mountroot == NULL)
254			continue;
255		if ((error = (*vfsp->vfc_mountroot)()) == 0)
256			return (0);
257		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
258 	}
259	return (ENODEV);
260}
261
262/*
263 * Lookup a mount point by filesystem identifier.
264 */
265struct mount *
266vfs_getvfs(fsid_t *fsid)
267{
268	struct mount *mp;
269
270	simple_lock(&mountlist_slock);
271	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
272		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
273		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
274			simple_unlock(&mountlist_slock);
275			return (mp);
276		}
277	}
278	simple_unlock(&mountlist_slock);
279	return ((struct mount *)0);
280}
281
282
283/*
284 * Get a new unique fsid
285 */
286void
287vfs_getnewfsid(struct mount *mp)
288{
289	static u_short xxxfs_mntid;
290
291	fsid_t tfsid;
292	int mtype;
293
294	simple_lock(&mntid_slock);
295	mtype = mp->mnt_vfc->vfc_typenum;
296	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
297	mp->mnt_stat.f_fsid.val[1] = mtype;
298	if (xxxfs_mntid == 0)
299		++xxxfs_mntid;
300	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
301	tfsid.val[1] = mtype;
302	if (!CIRCLEQ_EMPTY(&mountlist)) {
303		while (vfs_getvfs(&tfsid)) {
304			tfsid.val[0]++;
305			xxxfs_mntid++;
306		}
307	}
308	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
309	simple_unlock(&mntid_slock);
310}
311
312/*
313 * Make a 'unique' number from a mount type name.
314 * Note that this is no longer used for ffs which
315 * now has an on-disk filesystem id.
316 */
317long
318makefstype(char *type)
319{
320	long rv;
321
322	for (rv = 0; *type; type++) {
323		rv <<= 2;
324		rv ^= *type;
325	}
326	return rv;
327}
328
329/*
330 * Set vnode attributes to VNOVAL
331 */
332void
333vattr_null(struct vattr *vap)
334{
335
336	vap->va_type = VNON;
337	/* XXX These next two used to be one line, but for a GCC bug. */
338	vap->va_size = VNOVAL;
339	vap->va_bytes = VNOVAL;
340	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
341		vap->va_fsid = vap->va_fileid =
342		vap->va_blocksize = vap->va_rdev =
343		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
344		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
345		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
346		vap->va_flags = vap->va_gen = VNOVAL;
347	vap->va_vaflags = 0;
348}
349
350/*
351 * Routines having to do with the management of the vnode table.
352 */
353extern int (**dead_vnodeop_p)(void *);
354long numvnodes;
355
356/*
357 * Return the next vnode from the free list.
358 */
359int
360getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
361    struct vnode **vpp)
362{
363	struct proc *p = curproc;
364	struct freelst *listhd;
365	static int toggle;
366	struct vnode *vp;
367	int s;
368
369	/*
370	 * We must choose whether to allocate a new vnode or recycle an
371	 * existing one. The criterion for allocating a new one is that
372	 * the total number of vnodes is less than the number desired or
373	 * there are no vnodes on either free list. Generally we only
374	 * want to recycle vnodes that have no buffers associated with
375	 * them, so we look first on the vnode_free_list. If it is empty,
376	 * we next consider vnodes with referencing buffers on the
377	 * vnode_hold_list. The toggle ensures that half the time we
378	 * will use a buffer from the vnode_hold_list, and half the time
379	 * we will allocate a new one unless the list has grown to twice
380	 * the desired size. We are reticent to recycle vnodes from the
381	 * vnode_hold_list because we will lose the identity of all its
382	 * referencing buffers.
383	 */
384	toggle ^= 1;
385	if (numvnodes > 2 * desiredvnodes)
386		toggle = 0;
387
388	simple_lock(&vnode_free_list_slock);
389	s = splbio();
390	if ((numvnodes < desiredvnodes) ||
391	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
392	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
393		splx(s);
394		simple_unlock(&vnode_free_list_slock);
395		vp = pool_get(&vnode_pool, PR_WAITOK);
396		bzero((char *)vp, sizeof *vp);
397		simple_lock_init(&vp->v_interlock);
398		numvnodes++;
399	} else {
400		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
401		    vp = TAILQ_NEXT(vp, v_freelist)) {
402			if (simple_lock_try(&vp->v_interlock)) {
403				if (VOP_ISLOCKED(vp) == 0)
404					break;
405				else
406					simple_unlock(&vp->v_interlock);
407			}
408		}
409		/*
410		 * Unless this is a bad time of the month, at most
411		 * the first NCPUS items on the free list are
412		 * locked, so this is close enough to being empty.
413		 */
414		if (vp == NULL) {
415			splx(s);
416			simple_unlock(&vnode_free_list_slock);
417			tablefull("vnode");
418			*vpp = 0;
419			return (ENFILE);
420		}
421		if (vp->v_usecount) {
422			vprint("free vnode", vp);
423			panic("free vnode isn't");
424		}
425
426		TAILQ_REMOVE(listhd, vp, v_freelist);
427		vp->v_bioflag &= ~VBIOONFREELIST;
428		splx(s);
429
430		simple_unlock(&vnode_free_list_slock);
431		if (vp->v_type != VBAD)
432			vgonel(vp, p);
433		else
434			simple_unlock(&vp->v_interlock);
435#ifdef DIAGNOSTIC
436		if (vp->v_data) {
437			vprint("cleaned vnode", vp);
438			panic("cleaned vnode isn't");
439		}
440		s = splbio();
441		if (vp->v_numoutput)
442			panic("Clean vnode has pending I/O's");
443		splx(s);
444#endif
445		vp->v_flag = 0;
446		vp->v_socket = 0;
447	}
448	vp->v_type = VNON;
449	cache_purge(vp);
450	vp->v_tag = tag;
451	vp->v_op = vops;
452	insmntque(vp, mp);
453	*vpp = vp;
454	vp->v_usecount = 1;
455	vp->v_data = 0;
456	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
457	return (0);
458}
459
460/*
461 * Move a vnode from one mount queue to another.
462 */
463void
464insmntque(struct vnode *vp, struct mount *mp)
465{
466	simple_lock(&mntvnode_slock);
467
468	/*
469	 * Delete from old mount point vnode list, if on one.
470	 */
471	if (vp->v_mount != NULL)
472		LIST_REMOVE(vp, v_mntvnodes);
473	/*
474	 * Insert into list of vnodes for the new mount point, if available.
475	 */
476	if ((vp->v_mount = mp) != NULL)
477		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
478
479	simple_unlock(&mntvnode_slock);
480}
481
482/*
483 * Create a vnode for a block device.
484 * Used for root filesystem, argdev, and swap areas.
485 * Also used for memory file system special devices.
486 */
487int
488bdevvp(dev_t dev, struct vnode **vpp)
489{
490	return (getdevvp(dev, vpp, VBLK));
491}
492
493/*
494 * Create a vnode for a character device.
495 * Used for kernfs and some console handling.
496 */
497int
498cdevvp(dev_t dev, struct vnode **vpp)
499{
500	return (getdevvp(dev, vpp, VCHR));
501}
502
503/*
504 * Create a vnode for a device.
505 * Used by bdevvp (block device) for root file system etc.,
506 * and by cdevvp (character device) for console and kernfs.
507 */
508int
509getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
510{
511	struct vnode *vp;
512	struct vnode *nvp;
513	int error;
514
515	if (dev == NODEV) {
516		*vpp = NULLVP;
517		return (0);
518	}
519	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
520	if (error) {
521		*vpp = NULLVP;
522		return (error);
523	}
524	vp = nvp;
525	vp->v_type = type;
526	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
527		vput(vp);
528		vp = nvp;
529	}
530	*vpp = vp;
531	return (0);
532}
533
534/*
535 * Check to see if the new vnode represents a special device
536 * for which we already have a vnode (either because of
537 * bdevvp() or because of a different vnode representing
538 * the same block device). If such an alias exists, deallocate
539 * the existing contents and return the aliased vnode. The
540 * caller is responsible for filling it with its new contents.
541 */
542struct vnode *
543checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
544{
545	struct proc *p = curproc;
546	struct vnode *vp;
547	struct vnode **vpp;
548
549	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
550		return (NULLVP);
551
552	vpp = &speclisth[SPECHASH(nvp_rdev)];
553loop:
554	simple_lock(&spechash_slock);
555	for (vp = *vpp; vp; vp = vp->v_specnext) {
556		simple_lock(&vp->v_interlock);
557		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
558			simple_unlock(&vp->v_interlock);
559			continue;
560		}
561		/*
562		 * Alias, but not in use, so flush it out.
563		 */
564		if (vp->v_usecount == 0) {
565			simple_unlock(&spechash_slock);
566			vgonel(vp, p);
567			goto loop;
568		}
569		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
570			simple_unlock(&spechash_slock);
571			goto loop;
572		}
573		break;
574	}
575
576	/*
577	 * Common case is actually in the if statement
578	 */
579	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
580		MALLOC(nvp->v_specinfo, struct specinfo *,
581			sizeof(struct specinfo), M_VNODE, M_WAITOK);
582		nvp->v_rdev = nvp_rdev;
583		nvp->v_hashchain = vpp;
584		nvp->v_specnext = *vpp;
585		nvp->v_specmountpoint = NULL;
586		nvp->v_speclockf = NULL;
587		simple_unlock(&spechash_slock);
588		*vpp = nvp;
589		if (vp != NULLVP) {
590			nvp->v_flag |= VALIASED;
591			vp->v_flag |= VALIASED;
592			vput(vp);
593		}
594		return (NULLVP);
595	}
596
597	/*
598	 * This code is the uncommon case. It is called in case
599	 * we found an alias that was VT_NON && vtype of VBLK
600	 * This means we found a block device that was created
601	 * using bdevvp.
602	 * An example of such a vnode is the root partition device vnode
603	 * created in ffs_mountroot.
604	 *
605	 * The vnodes created by bdevvp should not be aliased (why?).
606	 */
607
608	simple_unlock(&spechash_slock);
609	VOP_UNLOCK(vp, 0, p);
610	simple_lock(&vp->v_interlock);
611	vclean(vp, 0, p);
612	vp->v_op = nvp->v_op;
613	vp->v_tag = nvp->v_tag;
614	nvp->v_type = VNON;
615	insmntque(vp, mp);
616	return (vp);
617}
618
619/*
620 * Grab a particular vnode from the free list, increment its
621 * reference count and lock it. If the vnode lock bit is set,
622 * the vnode is being eliminated in vgone. In that case, we
623 * cannot grab it, so the process is awakened when the
624 * transition is completed, and an error code is returned to
625 * indicate that the vnode is no longer usable, possibly
626 * having been changed to a new file system type.
627 */
628int
629vget(struct vnode *vp, int flags, struct proc *p)
630{
631	int error, s, onfreelist;
632
633	/*
634	 * If the vnode is in the process of being cleaned out for
635	 * another use, we wait for the cleaning to finish and then
636	 * return failure. Cleaning is determined by checking that
637	 * the VXLOCK flag is set.
638	 */
639	if ((flags & LK_INTERLOCK) == 0) {
640		simple_lock(&vp->v_interlock);
641		flags |= LK_INTERLOCK;
642	}
643
644	if (vp->v_flag & VXLOCK) {
645		if (flags & LK_NOWAIT) {
646			simple_unlock(&vp->v_interlock);
647			return (EBUSY);
648		}
649
650 		vp->v_flag |= VXWANT;
651		ltsleep(vp, PINOD | PNORELOCK, "vget", 0, &vp->v_interlock);
652		return (ENOENT);
653 	}
654
655	onfreelist = vp->v_bioflag & VBIOONFREELIST;
656	if (vp->v_usecount == 0 && onfreelist) {
657		s = splbio();
658		simple_lock(&vnode_free_list_slock);
659		if (vp->v_holdcnt > 0)
660			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
661		else
662			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
663		simple_unlock(&vnode_free_list_slock);
664		vp->v_bioflag &= ~VBIOONFREELIST;
665		splx(s);
666	}
667
668 	vp->v_usecount++;
669	if (flags & LK_TYPE_MASK) {
670		if ((error = vn_lock(vp, flags, p)) != 0) {
671			vp->v_usecount--;
672			if (vp->v_usecount == 0 && onfreelist)
673				vputonfreelist(vp);
674
675			simple_unlock(&vp->v_interlock);
676		}
677		return (error);
678	}
679
680	simple_unlock(&vp->v_interlock);
681
682	return (0);
683}
684
685
686#ifdef DIAGNOSTIC
687/*
688 * Vnode reference.
689 */
690void
691vref(struct vnode *vp)
692{
693	simple_lock(&vp->v_interlock);
694	if (vp->v_usecount == 0)
695		panic("vref used where vget required");
696	vp->v_usecount++;
697	simple_unlock(&vp->v_interlock);
698}
699#endif /* DIAGNOSTIC */
700
701void
702vputonfreelist(struct vnode *vp)
703{
704	int s;
705	struct freelst *lst;
706
707	s = splbio();
708#ifdef DIAGNOSTIC
709	if (vp->v_usecount != 0)
710		panic("Use count is not zero!");
711
712	if (vp->v_bioflag & VBIOONFREELIST) {
713		vprint("vnode already on free list: ", vp);
714		panic("vnode already on free list");
715	}
716#endif
717
718	vp->v_bioflag |= VBIOONFREELIST;
719
720	if (vp->v_holdcnt > 0)
721		lst = &vnode_hold_list;
722	else
723		lst = &vnode_free_list;
724
725	if (vp->v_type == VBAD)
726		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
727	else
728		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
729
730	splx(s);
731}
732
733/*
734 * vput(), just unlock and vrele()
735 */
736void
737vput(struct vnode *vp)
738{
739	struct proc *p = curproc;
740
741#ifdef DIAGNOSTIC
742	if (vp == NULL)
743		panic("vput: null vp");
744#endif
745	simple_lock(&vp->v_interlock);
746
747#ifdef DIAGNOSTIC
748	if (vp->v_usecount == 0) {
749		vprint("vput: bad ref count", vp);
750		panic("vput: ref cnt");
751	}
752#endif
753	vp->v_usecount--;
754	if (vp->v_usecount > 0) {
755		simple_unlock(&vp->v_interlock);
756		VOP_UNLOCK(vp, 0, p);
757		return;
758	}
759
760#ifdef DIAGNOSTIC
761	if (vp->v_writecount != 0) {
762		vprint("vput: bad writecount", vp);
763		panic("vput: v_writecount != 0");
764	}
765#endif
766	simple_unlock(&vp->v_interlock);
767
768	VOP_INACTIVE(vp, p);
769
770	simple_lock(&vp->v_interlock);
771
772	if (vp->v_usecount == 0)
773		vputonfreelist(vp);
774
775	simple_unlock(&vp->v_interlock);
776}
777
778/*
779 * Vnode release - use for active VNODES.
780 * If count drops to zero, call inactive routine and return to freelist.
781 */
782void
783vrele(struct vnode *vp)
784{
785	struct proc *p = curproc;
786
787#ifdef DIAGNOSTIC
788	if (vp == NULL)
789		panic("vrele: null vp");
790#endif
791	simple_lock(&vp->v_interlock);
792#ifdef DIAGNOSTIC
793	if (vp->v_usecount == 0) {
794		vprint("vrele: bad ref count", vp);
795		panic("vrele: ref cnt");
796	}
797#endif
798	vp->v_usecount--;
799	if (vp->v_usecount > 0) {
800		simple_unlock(&vp->v_interlock);
801		return;
802	}
803
804#ifdef DIAGNOSTIC
805	if (vp->v_writecount != 0) {
806		vprint("vrele: bad writecount", vp);
807		panic("vrele: v_writecount != 0");
808	}
809#endif
810	if (vn_lock(vp, LK_EXCLUSIVE|LK_INTERLOCK, p)) {
811		vprint("vrele: cannot lock", vp);
812		return;
813	}
814
815	VOP_INACTIVE(vp, p);
816
817	simple_lock(&vp->v_interlock);
818
819	if (vp->v_usecount == 0)
820		vputonfreelist(vp);
821
822	simple_unlock(&vp->v_interlock);
823}
824
825void vhold(struct vnode *vp);
826
827/*
828 * Page or buffer structure gets a reference.
829 */
830void
831vhold(struct vnode *vp)
832{
833	/*
834	 * If it is on the freelist and the hold count is currently
835	 * zero, move it to the hold list.
836	 */
837  	simple_lock(&vp->v_interlock);
838	if ((vp->v_bioflag & VBIOONFREELIST) &&
839	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
840		simple_lock(&vnode_free_list_slock);
841		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
842		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
843		simple_unlock(&vnode_free_list_slock);
844	}
845	vp->v_holdcnt++;
846	simple_unlock(&vp->v_interlock);
847}
848
849/*
850 * Remove any vnodes in the vnode table belonging to mount point mp.
851 *
852 * If MNT_NOFORCE is specified, there should not be any active ones,
853 * return error if any are found (nb: this is a user error, not a
854 * system error). If MNT_FORCE is specified, detach any active vnodes
855 * that are found.
856 */
857#ifdef DEBUG
858int busyprt = 0;	/* print out busy vnodes */
859struct ctldebug debug1 = { "busyprt", &busyprt };
860#endif
861
862int
863vfs_mount_foreach_vnode(struct mount *mp,
864    int (*func)(struct vnode *, void *), void *arg) {
865	struct vnode *vp, *nvp;
866	int error = 0;
867
868	simple_lock(&mntvnode_slock);
869loop:
870	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
871		if (vp->v_mount != mp)
872			goto loop;
873		nvp = LIST_NEXT(vp, v_mntvnodes);
874		simple_lock(&vp->v_interlock);
875		simple_unlock(&mntvnode_slock);
876
877		error = func(vp, arg);
878
879		simple_lock(&mntvnode_slock);
880
881		if (error != 0)
882			break;
883	}
884	simple_unlock(&mntvnode_slock);
885
886	return (error);
887}
888
889struct vflush_args {
890	struct vnode *skipvp;
891	int busy;
892	int flags;
893};
894
895int
896vflush_vnode(struct vnode *vp, void *arg) {
897	struct vflush_args *va = arg;
898	struct proc *p = curproc;
899
900	if (vp == va->skipvp) {
901		simple_unlock(&vp->v_interlock);
902		return (0);
903	}
904
905	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
906		simple_unlock(&vp->v_interlock);
907		return (0);
908	}
909
910	/*
911	 * If WRITECLOSE is set, only flush out regular file
912	 * vnodes open for writing.
913	 */
914	if ((va->flags & WRITECLOSE) &&
915	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
916		simple_unlock(&vp->v_interlock);
917		return (0);
918	}
919
920	/*
921	 * With v_usecount == 0, all we need to do is clear
922	 * out the vnode data structures and we are done.
923	 */
924	if (vp->v_usecount == 0) {
925		vgonel(vp, p);
926		return (0);
927	}
928
929	/*
930	 * If FORCECLOSE is set, forcibly close the vnode.
931	 * For block or character devices, revert to an
932	 * anonymous device. For all other files, just kill them.
933	 */
934	if (va->flags & FORCECLOSE) {
935		if (vp->v_type != VBLK && vp->v_type != VCHR) {
936			vgonel(vp, p);
937		} else {
938			vclean(vp, 0, p);
939			vp->v_op = spec_vnodeop_p;
940			insmntque(vp, (struct mount *)0);
941		}
942		return (0);
943	}
944
945#ifdef DEBUG
946	if (busyprt)
947		vprint("vflush: busy vnode", vp);
948#endif
949	simple_unlock(&vp->v_interlock);
950	va->busy++;
951	return (0);
952}
953
954int
955vflush(struct mount *mp, struct vnode *skipvp, int flags)
956{
957	struct vflush_args va;
958	va.skipvp = skipvp;
959	va.busy = 0;
960	va.flags = flags;
961
962	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
963
964	if (va.busy)
965		return (EBUSY);
966	return (0);
967}
968
969/*
970 * Disassociate the underlying file system from a vnode.
971 * The vnode interlock is held on entry.
972 */
973void
974vclean(struct vnode *vp, int flags, struct proc *p)
975{
976	int active;
977
978	/*
979	 * Check to see if the vnode is in use.
980	 * If so we have to reference it before we clean it out
981	 * so that its count cannot fall to zero and generate a
982	 * race against ourselves to recycle it.
983	 */
984	if ((active = vp->v_usecount) != 0)
985		vp->v_usecount++;
986
987	/*
988	 * Prevent the vnode from being recycled or
989	 * brought into use while we clean it out.
990	 */
991	if (vp->v_flag & VXLOCK)
992		panic("vclean: deadlock");
993	vp->v_flag |= VXLOCK;
994	/*
995	 * Even if the count is zero, the VOP_INACTIVE routine may still
996	 * have the object locked while it cleans it out. The VOP_LOCK
997	 * ensures that the VOP_INACTIVE routine is done with its work.
998	 * For active vnodes, it ensures that no other activity can
999	 * occur while the underlying object is being cleaned out.
1000	 */
1001	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1002
1003	/*
1004	 * Clean out any VM data associated with the vnode.
1005	 */
1006	uvm_vnp_terminate(vp);
1007	/*
1008	 * Clean out any buffers associated with the vnode.
1009	 */
1010	if (flags & DOCLOSE)
1011		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1012	/*
1013	 * If purging an active vnode, it must be closed and
1014	 * deactivated before being reclaimed. Note that the
1015	 * VOP_INACTIVE will unlock the vnode
1016	 */
1017	if (active) {
1018		if (flags & DOCLOSE)
1019			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1020		VOP_INACTIVE(vp, p);
1021	} else {
1022		/*
1023		 * Any other processes trying to obtain this lock must first
1024		 * wait for VXLOCK to clear, then call the new lock operation.
1025		 */
1026		VOP_UNLOCK(vp, 0, p);
1027	}
1028
1029	/*
1030	 * Reclaim the vnode.
1031	 */
1032	if (VOP_RECLAIM(vp, p))
1033		panic("vclean: cannot reclaim");
1034	if (active) {
1035		simple_lock(&vp->v_interlock);
1036
1037		vp->v_usecount--;
1038		if (vp->v_usecount == 0) {
1039			if (vp->v_holdcnt > 0)
1040				panic("vclean: not clean");
1041			vputonfreelist(vp);
1042		}
1043
1044		simple_unlock(&vp->v_interlock);
1045	}
1046	cache_purge(vp);
1047
1048	/*
1049	 * Done with purge, notify sleepers of the grim news.
1050	 */
1051	vp->v_op = dead_vnodeop_p;
1052	simple_lock(&vp->v_selectinfo.vsi_lock);
1053	VN_KNOTE(vp, NOTE_REVOKE);
1054	simple_unlock(&vp->v_selectinfo.vsi_lock);
1055	vp->v_tag = VT_NON;
1056	vp->v_flag &= ~VXLOCK;
1057#ifdef VFSDEBUG
1058	vp->v_flag &= ~VLOCKSWORK;
1059#endif
1060	if (vp->v_flag & VXWANT) {
1061		vp->v_flag &= ~VXWANT;
1062		wakeup(vp);
1063	}
1064}
1065
1066/*
1067 * Recycle an unused vnode to the front of the free list.
1068 * Release the passed interlock if the vnode will be recycled.
1069 */
1070int
1071vrecycle(struct vnode *vp, struct simplelock *inter_lkp, struct proc *p)
1072{
1073	simple_lock(&vp->v_interlock);
1074	if (vp->v_usecount == 0) {
1075		if (inter_lkp)
1076			simple_unlock(inter_lkp);
1077		vgonel(vp, p);
1078		return (1);
1079	}
1080	simple_unlock(&vp->v_interlock);
1081	return (0);
1082}
1083
1084/*
1085 * Eliminate all activity associated with a vnode
1086 * in preparation for reuse.
1087 */
1088void
1089vgone(struct vnode *vp)
1090{
1091	struct proc *p = curproc;
1092
1093	simple_lock (&vp->v_interlock);
1094	vgonel(vp, p);
1095}
1096
1097/*
1098 * vgone, with the vp interlock held.
1099 */
1100void
1101vgonel(struct vnode *vp, struct proc *p)
1102{
1103	struct vnode *vq;
1104	struct vnode *vx;
1105	struct mount *mp;
1106	int flags;
1107
1108	/*
1109	 * If a vgone (or vclean) is already in progress,
1110	 * wait until it is done and return.
1111	 */
1112	if (vp->v_flag & VXLOCK) {
1113		vp->v_flag |= VXWANT;
1114		ltsleep(vp, PINOD | PNORELOCK, "vgone", 0, &vp->v_interlock);
1115		return;
1116	}
1117
1118	/*
1119	 * Clean out the filesystem specific data.
1120	 */
1121	vclean(vp, DOCLOSE, p);
1122	/*
1123	 * Delete from old mount point vnode list, if on one.
1124	 */
1125	if (vp->v_mount != NULL)
1126		insmntque(vp, (struct mount *)0);
1127	/*
1128	 * If special device, remove it from special device alias list
1129	 * if it is on one.
1130	 */
1131	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1132		simple_lock(&spechash_slock);
1133		if (*vp->v_hashchain == vp) {
1134			*vp->v_hashchain = vp->v_specnext;
1135		} else {
1136			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1137				if (vq->v_specnext != vp)
1138					continue;
1139				vq->v_specnext = vp->v_specnext;
1140				break;
1141			}
1142			if (vq == NULL)
1143				panic("missing bdev");
1144		}
1145		if (vp->v_flag & VALIASED) {
1146			vx = NULL;
1147			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1148				if (vq->v_rdev != vp->v_rdev ||
1149				    vq->v_type != vp->v_type)
1150					continue;
1151				if (vx)
1152					break;
1153				vx = vq;
1154			}
1155			if (vx == NULL)
1156				panic("missing alias");
1157			if (vq == NULL)
1158				vx->v_flag &= ~VALIASED;
1159			vp->v_flag &= ~VALIASED;
1160		}
1161		simple_unlock(&spechash_slock);
1162
1163		/*
1164		 * If we have a mount point associated with the vnode, we must
1165		 * flush it out now, as to not leave a dangling zombie mount
1166		 * point laying around in VFS.
1167		 */
1168		mp = vp->v_specmountpoint;
1169		if (mp != NULL) {
1170			if (!vfs_busy(mp, LK_EXCLUSIVE, NULL, p)) {
1171				flags = MNT_FORCE | MNT_DOOMED;
1172				dounmount(mp, flags, p, NULL);
1173			}
1174		}
1175
1176		FREE(vp->v_specinfo, M_VNODE);
1177		vp->v_specinfo = NULL;
1178	}
1179	/*
1180	 * If it is on the freelist and not already at the head,
1181	 * move it to the head of the list.
1182	 */
1183	vp->v_type = VBAD;
1184
1185	/*
1186	 * Move onto the free list, unless we were called from
1187	 * getnewvnode and we're not on any free list
1188	 */
1189	if (vp->v_usecount == 0 &&
1190	    (vp->v_bioflag & VBIOONFREELIST)) {
1191		int s;
1192
1193		simple_lock(&vnode_free_list_slock);
1194		s = splbio();
1195
1196		if (vp->v_holdcnt > 0)
1197			panic("vgonel: not clean");
1198
1199		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1200			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1201			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1202		}
1203		splx(s);
1204		simple_unlock(&vnode_free_list_slock);
1205	}
1206}
1207
1208/*
1209 * Lookup a vnode by device number.
1210 */
1211int
1212vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1213{
1214	struct vnode *vp;
1215	int rc =0;
1216
1217	simple_lock(&spechash_slock);
1218	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1219		if (dev != vp->v_rdev || type != vp->v_type)
1220			continue;
1221		*vpp = vp;
1222		rc = 1;
1223		break;
1224	}
1225	simple_unlock(&spechash_slock);
1226	return (rc);
1227}
1228
1229/*
1230 * Revoke all the vnodes corresponding to the specified minor number
1231 * range (endpoints inclusive) of the specified major.
1232 */
1233void
1234vdevgone(int maj, int minl, int minh, enum vtype type)
1235{
1236	struct vnode *vp;
1237	int mn;
1238
1239	for (mn = minl; mn <= minh; mn++)
1240		if (vfinddev(makedev(maj, mn), type, &vp))
1241			VOP_REVOKE(vp, REVOKEALL);
1242}
1243
1244/*
1245 * Calculate the total number of references to a special device.
1246 */
1247int
1248vcount(struct vnode *vp)
1249{
1250	struct vnode *vq, *vnext;
1251	int count;
1252
1253loop:
1254	if ((vp->v_flag & VALIASED) == 0)
1255		return (vp->v_usecount);
1256	simple_lock(&spechash_slock);
1257	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1258		vnext = vq->v_specnext;
1259		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1260			continue;
1261		/*
1262		 * Alias, but not in use, so flush it out.
1263		 */
1264		if (vq->v_usecount == 0 && vq != vp) {
1265			simple_unlock(&spechash_slock);
1266			vgone(vq);
1267			goto loop;
1268		}
1269		count += vq->v_usecount;
1270	}
1271	simple_unlock(&spechash_slock);
1272	return (count);
1273}
1274
1275/*
1276 * Print out a description of a vnode.
1277 */
1278static char *typename[] =
1279   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1280
1281void
1282vprint(char *label, struct vnode *vp)
1283{
1284	char buf[64];
1285
1286	if (label != NULL)
1287		printf("%s: ", label);
1288	printf("type %s, usecount %u, writecount %u, holdcount %u,",
1289		typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1290		vp->v_holdcnt);
1291	buf[0] = '\0';
1292	if (vp->v_flag & VROOT)
1293		strlcat(buf, "|VROOT", sizeof buf);
1294	if (vp->v_flag & VTEXT)
1295		strlcat(buf, "|VTEXT", sizeof buf);
1296	if (vp->v_flag & VSYSTEM)
1297		strlcat(buf, "|VSYSTEM", sizeof buf);
1298	if (vp->v_flag & VXLOCK)
1299		strlcat(buf, "|VXLOCK", sizeof buf);
1300	if (vp->v_flag & VXWANT)
1301		strlcat(buf, "|VXWANT", sizeof buf);
1302	if (vp->v_bioflag & VBIOWAIT)
1303		strlcat(buf, "|VBIOWAIT", sizeof buf);
1304	if (vp->v_bioflag & VBIOONFREELIST)
1305		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1306	if (vp->v_bioflag & VBIOONSYNCLIST)
1307		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1308	if (vp->v_flag & VALIASED)
1309		strlcat(buf, "|VALIASED", sizeof buf);
1310	if (buf[0] != '\0')
1311		printf(" flags (%s)", &buf[1]);
1312	if (vp->v_data == NULL) {
1313		printf("\n");
1314	} else {
1315		printf("\n\t");
1316		VOP_PRINT(vp);
1317	}
1318}
1319
1320#ifdef DEBUG
1321/*
1322 * List all of the locked vnodes in the system.
1323 * Called when debugging the kernel.
1324 */
1325void
1326printlockedvnodes(void)
1327{
1328	struct proc *p = curproc;
1329	struct mount *mp, *nmp;
1330	struct vnode *vp;
1331
1332	printf("Locked vnodes\n");
1333	simple_lock(&mountlist_slock);
1334	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1335	    mp = nmp) {
1336		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1337			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1338			continue;
1339		}
1340		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1341			if (VOP_ISLOCKED(vp))
1342				vprint((char *)0, vp);
1343		}
1344		simple_lock(&mountlist_slock);
1345		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1346		vfs_unbusy(mp, p);
1347 	}
1348	simple_unlock(&mountlist_slock);
1349
1350}
1351#endif
1352
1353/*
1354 * Top level filesystem related information gathering.
1355 */
1356int
1357vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1358    size_t newlen, struct proc *p)
1359{
1360	struct vfsconf *vfsp;
1361
1362	/* all sysctl names at this level are at least name and field */
1363	if (namelen < 2)
1364		return (ENOTDIR);		/* overloaded */
1365	if (name[0] != VFS_GENERIC) {
1366		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1367			if (vfsp->vfc_typenum == name[0])
1368				break;
1369		if (vfsp == NULL)
1370			return (EOPNOTSUPP);
1371		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1372		    oldp, oldlenp, newp, newlen, p));
1373	}
1374	switch (name[1]) {
1375	case VFS_MAXTYPENUM:
1376		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1377	case VFS_CONF:
1378		if (namelen < 3)
1379			return (ENOTDIR);	/* overloaded */
1380		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1381			if (vfsp->vfc_typenum == name[2])
1382				break;
1383		if (vfsp == NULL)
1384			return (EOPNOTSUPP);
1385		return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp,
1386		    sizeof(struct vfsconf)));
1387	}
1388	return (EOPNOTSUPP);
1389}
1390
1391int kinfo_vdebug = 1;
1392int kinfo_vgetfailed;
1393#define KINFO_VNODESLOP	10
1394/*
1395 * Dump vnode list (via sysctl).
1396 * Copyout address of vnode followed by vnode.
1397 */
1398/* ARGSUSED */
1399int
1400sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1401{
1402	struct mount *mp, *nmp;
1403	struct vnode *vp, *nvp;
1404	char *bp = where, *savebp;
1405	char *ewhere;
1406	int error;
1407
1408	if (where == NULL) {
1409		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1410		return (0);
1411	}
1412	ewhere = where + *sizep;
1413
1414	simple_lock(&mountlist_slock);
1415	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1416	    mp = nmp) {
1417		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1418			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1419			continue;
1420		}
1421		savebp = bp;
1422again:
1423		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
1424		    vp = nvp) {
1425			/*
1426			 * Check that the vp is still associated with
1427			 * this filesystem.  RACE: could have been
1428			 * recycled onto the same filesystem.
1429			 */
1430			if (vp->v_mount != mp) {
1431				simple_unlock(&mntvnode_slock);
1432				if (kinfo_vdebug)
1433					printf("kinfo: vp changed\n");
1434				bp = savebp;
1435				goto again;
1436			}
1437			nvp = LIST_NEXT(vp, v_mntvnodes);
1438			if (bp + sizeof(struct e_vnode) > ewhere) {
1439				simple_unlock(&mntvnode_slock);
1440				*sizep = bp - where;
1441				vfs_unbusy(mp, p);
1442				return (ENOMEM);
1443			}
1444			if ((error = copyout(&vp,
1445			    &((struct e_vnode *)bp)->vptr,
1446			    sizeof(struct vnode *))) ||
1447			   (error = copyout(vp,
1448			    &((struct e_vnode *)bp)->vnode,
1449			    sizeof(struct vnode)))) {
1450				vfs_unbusy(mp, p);
1451				return (error);
1452			}
1453			bp += sizeof(struct e_vnode);
1454			simple_lock(&mntvnode_slock);
1455		}
1456
1457		simple_unlock(&mntvnode_slock);
1458		simple_lock(&mountlist_slock);
1459		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1460		vfs_unbusy(mp, p);
1461	}
1462
1463	simple_unlock(&mountlist_slock);
1464
1465	*sizep = bp - where;
1466	return (0);
1467}
1468
1469/*
1470 * Check to see if a filesystem is mounted on a block device.
1471 */
1472int
1473vfs_mountedon(struct vnode *vp)
1474{
1475	struct vnode *vq;
1476	int error = 0;
1477
1478 	if (vp->v_specmountpoint != NULL)
1479		return (EBUSY);
1480	if (vp->v_flag & VALIASED) {
1481		simple_lock(&spechash_slock);
1482		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1483			if (vq->v_rdev != vp->v_rdev ||
1484			    vq->v_type != vp->v_type)
1485				continue;
1486			if (vq->v_specmountpoint != NULL) {
1487				error = EBUSY;
1488				break;
1489			}
1490 		}
1491		simple_unlock(&spechash_slock);
1492	}
1493	return (error);
1494}
1495
1496/*
1497 * Build hash lists of net addresses and hang them off the mount point.
1498 * Called by ufs_mount() to set up the lists of export addresses.
1499 */
1500int
1501vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1502    struct export_args *argp)
1503{
1504	struct netcred *np;
1505	struct radix_node_head *rnh;
1506	int i;
1507	struct radix_node *rn;
1508	struct sockaddr *saddr, *smask = 0;
1509	struct domain *dom;
1510	int error;
1511
1512	if (argp->ex_addrlen == 0) {
1513		if (mp->mnt_flag & MNT_DEFEXPORTED)
1514			return (EPERM);
1515		np = &nep->ne_defexported;
1516		np->netc_exflags = argp->ex_flags;
1517		np->netc_anon = argp->ex_anon;
1518		np->netc_anon.cr_ref = 1;
1519		mp->mnt_flag |= MNT_DEFEXPORTED;
1520		return (0);
1521	}
1522	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1523	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1524		return (EINVAL);
1525	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1526	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1527	bzero(np, i);
1528	saddr = (struct sockaddr *)(np + 1);
1529	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1530	if (error)
1531		goto out;
1532	if (saddr->sa_len > argp->ex_addrlen)
1533		saddr->sa_len = argp->ex_addrlen;
1534	if (argp->ex_masklen) {
1535		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1536		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1537		if (error)
1538			goto out;
1539		if (smask->sa_len > argp->ex_masklen)
1540			smask->sa_len = argp->ex_masklen;
1541	}
1542	i = saddr->sa_family;
1543	if (i < 0 || i > AF_MAX) {
1544		error = EINVAL;
1545		goto out;
1546	}
1547	if ((rnh = nep->ne_rtable[i]) == 0) {
1548		/*
1549		 * Seems silly to initialize every AF when most are not
1550		 * used, do so on demand here
1551		 */
1552		for (dom = domains; dom; dom = dom->dom_next)
1553			if (dom->dom_family == i && dom->dom_rtattach) {
1554				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1555					dom->dom_rtoffset);
1556				break;
1557			}
1558		if ((rnh = nep->ne_rtable[i]) == 0) {
1559			error = ENOBUFS;
1560			goto out;
1561		}
1562	}
1563	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1564		np->netc_rnodes);
1565	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1566		error = EPERM;
1567		goto out;
1568	}
1569	np->netc_exflags = argp->ex_flags;
1570	np->netc_anon = argp->ex_anon;
1571	np->netc_anon.cr_ref = 1;
1572	return (0);
1573out:
1574	free(np, M_NETADDR);
1575	return (error);
1576}
1577
1578/* ARGSUSED */
1579int
1580vfs_free_netcred(struct radix_node *rn, void *w)
1581{
1582	register struct radix_node_head *rnh = (struct radix_node_head *)w;
1583
1584	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1585	free(rn, M_NETADDR);
1586	return (0);
1587}
1588
1589/*
1590 * Free the net address hash lists that are hanging off the mount points.
1591 */
1592void
1593vfs_free_addrlist(struct netexport *nep)
1594{
1595	int i;
1596	struct radix_node_head *rnh;
1597
1598	for (i = 0; i <= AF_MAX; i++)
1599		if ((rnh = nep->ne_rtable[i]) != NULL) {
1600			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1601			free(rnh, M_RTABLE);
1602			nep->ne_rtable[i] = 0;
1603		}
1604}
1605
1606int
1607vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1608{
1609	int error;
1610
1611	if (argp->ex_flags & MNT_DELEXPORT) {
1612		vfs_free_addrlist(nep);
1613		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1614	}
1615	if (argp->ex_flags & MNT_EXPORTED) {
1616		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1617			return (error);
1618		mp->mnt_flag |= MNT_EXPORTED;
1619	}
1620	return (0);
1621}
1622
1623struct netcred *
1624vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1625{
1626	struct netcred *np;
1627	struct radix_node_head *rnh;
1628	struct sockaddr *saddr;
1629
1630	np = NULL;
1631	if (mp->mnt_flag & MNT_EXPORTED) {
1632		/*
1633		 * Lookup in the export list first.
1634		 */
1635		if (nam != NULL) {
1636			saddr = mtod(nam, struct sockaddr *);
1637			rnh = nep->ne_rtable[saddr->sa_family];
1638			if (rnh != NULL) {
1639				np = (struct netcred *)
1640					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1641					    rnh);
1642				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1643					np = NULL;
1644			}
1645		}
1646		/*
1647		 * If no address match, use the default if it exists.
1648		 */
1649		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1650			np = &nep->ne_defexported;
1651	}
1652	return (np);
1653}
1654
1655/*
1656 * Do the usual access checking.
1657 * file_mode, uid and gid are from the vnode in question,
1658 * while acc_mode and cred are from the VOP_ACCESS parameter list
1659 */
1660int
1661vaccess(mode_t file_mode, uid_t uid, gid_t gid, mode_t acc_mode,
1662    struct ucred *cred)
1663{
1664	mode_t mask;
1665
1666	/* User id 0 always gets access. */
1667	if (cred->cr_uid == 0)
1668		return 0;
1669
1670	mask = 0;
1671
1672	/* Otherwise, check the owner. */
1673	if (cred->cr_uid == uid) {
1674		if (acc_mode & VEXEC)
1675			mask |= S_IXUSR;
1676		if (acc_mode & VREAD)
1677			mask |= S_IRUSR;
1678		if (acc_mode & VWRITE)
1679			mask |= S_IWUSR;
1680		return (file_mode & mask) == mask ? 0 : EACCES;
1681	}
1682
1683	/* Otherwise, check the groups. */
1684	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1685		if (acc_mode & VEXEC)
1686			mask |= S_IXGRP;
1687		if (acc_mode & VREAD)
1688			mask |= S_IRGRP;
1689		if (acc_mode & VWRITE)
1690			mask |= S_IWGRP;
1691		return (file_mode & mask) == mask ? 0 : EACCES;
1692	}
1693
1694	/* Otherwise, check everyone else. */
1695	if (acc_mode & VEXEC)
1696		mask |= S_IXOTH;
1697	if (acc_mode & VREAD)
1698		mask |= S_IROTH;
1699	if (acc_mode & VWRITE)
1700		mask |= S_IWOTH;
1701	return (file_mode & mask) == mask ? 0 : EACCES;
1702}
1703
1704/*
1705 * Unmount all file systems.
1706 * We traverse the list in reverse order under the assumption that doing so
1707 * will avoid needing to worry about dependencies.
1708 */
1709void
1710vfs_unmountall(void)
1711{
1712	struct mount *mp, *nmp;
1713	int allerror, error, again = 1;
1714	struct proc *p = curproc;
1715
1716 retry:
1717	allerror = 0;
1718	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1719	    mp = nmp) {
1720		nmp = CIRCLEQ_PREV(mp, mnt_list);
1721		if ((vfs_busy(mp, LK_EXCLUSIVE|LK_NOWAIT, NULL, p)) != 0)
1722			continue;
1723		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1724			printf("unmount of %s failed with error %d\n",
1725			    mp->mnt_stat.f_mntonname, error);
1726			allerror = 1;
1727		}
1728	}
1729
1730	if (allerror) {
1731		printf("WARNING: some file systems would not unmount\n");
1732		if (again) {
1733			printf("retrying\n");
1734			again = 0;
1735			goto retry;
1736		}
1737	}
1738}
1739
1740/*
1741 * Sync and unmount file systems before shutting down.
1742 */
1743void
1744vfs_shutdown(void)
1745{
1746#ifdef ACCOUNTING
1747	extern void acct_shutdown(void);
1748
1749	acct_shutdown();
1750#endif
1751
1752	/* XXX Should suspend scheduling. */
1753	(void) spl0();
1754
1755	printf("syncing disks... ");
1756
1757	if (panicstr == 0) {
1758		/* Sync before unmount, in case we hang on something. */
1759		sys_sync(&proc0, (void *)0, (register_t *)0);
1760
1761		/* Unmount file systems. */
1762		vfs_unmountall();
1763	}
1764
1765	if (vfs_syncwait(1))
1766		printf("giving up\n");
1767	else
1768		printf("done\n");
1769}
1770
1771/*
1772 * perform sync() operation and wait for buffers to flush.
1773 * assumtions: called w/ scheduler disabled and physical io enabled
1774 * for now called at spl0() XXX
1775 */
1776int
1777vfs_syncwait(int verbose)
1778{
1779	struct buf *bp;
1780	int iter, nbusy, dcount, s;
1781	struct proc *p;
1782
1783	p = curproc? curproc : &proc0;
1784	sys_sync(p, (void *)0, (register_t *)0);
1785
1786	/* Wait for sync to finish. */
1787	dcount = 10000;
1788	for (iter = 0; iter < 20; iter++) {
1789		nbusy = 0;
1790		for (bp = &buf[nbuf]; --bp >= buf; ) {
1791			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1792				nbusy++;
1793			/*
1794			 * With soft updates, some buffers that are
1795			 * written will be remarked as dirty until other
1796			 * buffers are written.
1797			 */
1798			if (bp->b_flags & B_DELWRI) {
1799				s = splbio();
1800				bremfree(bp);
1801				bp->b_flags |= B_BUSY;
1802				splx(s);
1803				nbusy++;
1804				bawrite(bp);
1805				if (dcount-- <= 0) {
1806					if (verbose)
1807						printf("softdep ");
1808					return 1;
1809				}
1810			}
1811		}
1812		if (nbusy == 0)
1813			break;
1814		if (verbose)
1815			printf("%d ", nbusy);
1816		DELAY(40000 * iter);
1817	}
1818
1819	return nbusy;
1820}
1821
1822/*
1823 * posix file system related system variables.
1824 */
1825int
1826fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1827    void *newp, size_t newlen, struct proc *p)
1828{
1829	/* all sysctl names at this level are terminal */
1830	if (namelen != 1)
1831		return (ENOTDIR);
1832
1833	switch (name[0]) {
1834	case FS_POSIX_SETUID:
1835		if (newp && securelevel > 0)
1836			return (EPERM);
1837		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1838	default:
1839		return (EOPNOTSUPP);
1840	}
1841	/* NOTREACHED */
1842}
1843
1844/*
1845 * file system related system variables.
1846 */
1847int
1848fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1849    size_t newlen, struct proc *p)
1850{
1851	sysctlfn *fn;
1852
1853	switch (name[0]) {
1854	case FS_POSIX:
1855		fn = fs_posix_sysctl;
1856		break;
1857	default:
1858		return (EOPNOTSUPP);
1859	}
1860	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1861}
1862
1863
1864/*
1865 * Routines dealing with vnodes and buffers
1866 */
1867
1868/*
1869 * Wait for all outstanding I/Os to complete
1870 *
1871 * Manipulates v_numoutput. Must be called at splbio()
1872 */
1873int
1874vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1875{
1876	int error = 0;
1877
1878	splassert(IPL_BIO);
1879
1880	while (vp->v_numoutput) {
1881		vp->v_bioflag |= VBIOWAIT;
1882		error = tsleep(&vp->v_numoutput,
1883		    slpflag | (PRIBIO + 1), wmesg, timeo);
1884		if (error)
1885			break;
1886	}
1887
1888	return (error);
1889}
1890
1891/*
1892 * Update outstanding I/O count and do wakeup if requested.
1893 *
1894 * Manipulates v_numoutput. Must be called at splbio()
1895 */
1896void
1897vwakeup(struct vnode *vp)
1898{
1899	splassert(IPL_BIO);
1900
1901	if (vp != NULL) {
1902		if (vp->v_numoutput-- == 0)
1903			panic("vwakeup: neg numoutput");
1904		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1905			vp->v_bioflag &= ~VBIOWAIT;
1906			wakeup(&vp->v_numoutput);
1907		}
1908	}
1909}
1910
1911/*
1912 * Flush out and invalidate all buffers associated with a vnode.
1913 * Called with the underlying object locked.
1914 */
1915int
1916vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1917    int slpflag, int slptimeo)
1918{
1919	struct buf *bp;
1920	struct buf *nbp, *blist;
1921	int s, error;
1922
1923	if (flags & V_SAVE) {
1924		s = splbio();
1925		vwaitforio(vp, 0, "vinvalbuf", 0);
1926		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1927			splx(s);
1928			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1929				return (error);
1930			s = splbio();
1931			if (vp->v_numoutput > 0 ||
1932			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1933				panic("vinvalbuf: dirty bufs");
1934		}
1935		splx(s);
1936	}
1937loop:
1938	s = splbio();
1939	for (;;) {
1940		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1941		    (flags & V_SAVEMETA))
1942			while (blist && blist->b_lblkno < 0)
1943				blist = LIST_NEXT(blist, b_vnbufs);
1944		if (blist == NULL &&
1945		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1946		    (flags & V_SAVEMETA))
1947			while (blist && blist->b_lblkno < 0)
1948				blist = LIST_NEXT(blist, b_vnbufs);
1949		if (!blist)
1950			break;
1951
1952		for (bp = blist; bp; bp = nbp) {
1953			nbp = LIST_NEXT(bp, b_vnbufs);
1954			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1955				continue;
1956			if (bp->b_flags & B_BUSY) {
1957				bp->b_flags |= B_WANTED;
1958				error = tsleep(bp, slpflag | (PRIBIO + 1),
1959				    "vinvalbuf", slptimeo);
1960				if (error) {
1961					splx(s);
1962					return (error);
1963				}
1964				break;
1965			}
1966			bremfree(bp);
1967			bp->b_flags |= B_BUSY;
1968			/*
1969			 * XXX Since there are no node locks for NFS, I believe
1970			 * there is a slight chance that a delayed write will
1971			 * occur while sleeping just above, so check for it.
1972			 */
1973			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1974				splx(s);
1975				(void) VOP_BWRITE(bp);
1976				goto loop;
1977			}
1978			bp->b_flags |= B_INVAL;
1979			brelse(bp);
1980		}
1981	}
1982	if (!(flags & V_SAVEMETA) &&
1983	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1984		panic("vinvalbuf: flush failed");
1985	splx(s);
1986	return (0);
1987}
1988
1989void
1990vflushbuf(struct vnode *vp, int sync)
1991{
1992	struct buf *bp, *nbp;
1993	int s;
1994
1995loop:
1996	s = splbio();
1997	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1998	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1999		nbp = LIST_NEXT(bp, b_vnbufs);
2000		if ((bp->b_flags & B_BUSY))
2001			continue;
2002		if ((bp->b_flags & B_DELWRI) == 0)
2003			panic("vflushbuf: not dirty");
2004		bremfree(bp);
2005		bp->b_flags |= B_BUSY;
2006		splx(s);
2007		/*
2008		 * Wait for I/O associated with indirect blocks to complete,
2009		 * since there is no way to quickly wait for them below.
2010		 */
2011		if (bp->b_vp == vp || sync == 0)
2012			(void) bawrite(bp);
2013		else
2014			(void) bwrite(bp);
2015		goto loop;
2016	}
2017	if (sync == 0) {
2018		splx(s);
2019		return;
2020	}
2021	vwaitforio(vp, 0, "vflushbuf", 0);
2022	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2023		splx(s);
2024		vprint("vflushbuf: dirty", vp);
2025		goto loop;
2026	}
2027	splx(s);
2028}
2029
2030/*
2031 * Associate a buffer with a vnode.
2032 *
2033 * Manipulates buffer vnode queues. Must be called at splbio().
2034 */
2035void
2036bgetvp(struct vnode *vp, struct buf *bp)
2037{
2038	splassert(IPL_BIO);
2039
2040
2041	if (bp->b_vp)
2042		panic("bgetvp: not free");
2043	vhold(vp);
2044	bp->b_vp = vp;
2045	if (vp->v_type == VBLK || vp->v_type == VCHR)
2046		bp->b_dev = vp->v_rdev;
2047	else
2048		bp->b_dev = NODEV;
2049	/*
2050	 * Insert onto list for new vnode.
2051	 */
2052	bufinsvn(bp, &vp->v_cleanblkhd);
2053}
2054
2055/*
2056 * Disassociate a buffer from a vnode.
2057 *
2058 * Manipulates vnode buffer queues. Must be called at splbio().
2059 */
2060void
2061brelvp(struct buf *bp)
2062{
2063	struct vnode *vp;
2064
2065	splassert(IPL_BIO);
2066
2067	if ((vp = bp->b_vp) == (struct vnode *) 0)
2068		panic("brelvp: NULL");
2069	/*
2070	 * Delete from old vnode list, if on one.
2071	 */
2072	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2073		bufremvn(bp);
2074	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2075	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2076		vp->v_bioflag &= ~VBIOONSYNCLIST;
2077		LIST_REMOVE(vp, v_synclist);
2078	}
2079	bp->b_vp = (struct vnode *) 0;
2080
2081	simple_lock(&vp->v_interlock);
2082#ifdef DIAGNOSTIC
2083	if (vp->v_holdcnt == 0)
2084		panic("brelvp: holdcnt");
2085#endif
2086	vp->v_holdcnt--;
2087
2088	/*
2089	 * If it is on the holdlist and the hold count drops to
2090	 * zero, move it to the free list.
2091	 */
2092	if ((vp->v_bioflag & VBIOONFREELIST) &&
2093	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
2094		simple_lock(&vnode_free_list_slock);
2095		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
2096		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2097		simple_unlock(&vnode_free_list_slock);
2098	}
2099	simple_unlock(&vp->v_interlock);
2100}
2101
2102/*
2103 * Replaces the current vnode associated with the buffer, if any,
2104 * with a new vnode.
2105 *
2106 * If an output I/O is pending on the buffer, the old vnode
2107 * I/O count is adjusted.
2108 *
2109 * Ignores vnode buffer queues. Must be called at splbio().
2110 */
2111void
2112buf_replacevnode(struct buf *bp, struct vnode *newvp)
2113{
2114	struct vnode *oldvp = bp->b_vp;
2115
2116	splassert(IPL_BIO);
2117
2118	if (oldvp)
2119		brelvp(bp);
2120
2121	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2122		newvp->v_numoutput++;	/* put it on swapdev */
2123		vwakeup(oldvp);
2124	}
2125
2126	bgetvp(newvp, bp);
2127	bufremvn(bp);
2128}
2129
2130/*
2131 * Used to assign buffers to the appropriate clean or dirty list on
2132 * the vnode and to add newly dirty vnodes to the appropriate
2133 * filesystem syncer list.
2134 *
2135 * Manipulates vnode buffer queues. Must be called at splbio().
2136 */
2137void
2138reassignbuf(struct buf *bp)
2139{
2140	struct buflists *listheadp;
2141	int delay;
2142	struct vnode *vp = bp->b_vp;
2143
2144	splassert(IPL_BIO);
2145
2146	/*
2147	 * Delete from old vnode list, if on one.
2148	 */
2149	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2150		bufremvn(bp);
2151
2152	/*
2153	 * If dirty, put on list of dirty buffers;
2154	 * otherwise insert onto list of clean buffers.
2155	 */
2156	if ((bp->b_flags & B_DELWRI) == 0) {
2157		listheadp = &vp->v_cleanblkhd;
2158		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2159		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2160			vp->v_bioflag &= ~VBIOONSYNCLIST;
2161			LIST_REMOVE(vp, v_synclist);
2162		}
2163	} else {
2164		listheadp = &vp->v_dirtyblkhd;
2165		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2166			switch (vp->v_type) {
2167			case VDIR:
2168				delay = syncdelay / 2;
2169				break;
2170			case VBLK:
2171				if (vp->v_specmountpoint != NULL) {
2172					delay = syncdelay / 3;
2173					break;
2174				}
2175				/* fall through */
2176			default:
2177				delay = syncdelay;
2178			}
2179			vn_syncer_add_to_worklist(vp, delay);
2180		}
2181	}
2182	bufinsvn(bp, listheadp);
2183}
2184
2185int
2186vfs_register(struct vfsconf *vfs)
2187{
2188	struct vfsconf *vfsp;
2189	struct vfsconf **vfspp;
2190
2191#ifdef DIAGNOSTIC
2192	/* Paranoia? */
2193	if (vfs->vfc_refcount != 0)
2194		printf("vfs_register called with vfc_refcount > 0\n");
2195#endif
2196
2197	/* Check if filesystem already known */
2198	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2199	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2200		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2201			return (EEXIST);
2202
2203	if (vfs->vfc_typenum > maxvfsconf)
2204		maxvfsconf = vfs->vfc_typenum;
2205
2206	vfs->vfc_next = NULL;
2207
2208	/* Add to the end of the list */
2209	*vfspp = vfs;
2210
2211	/* Call vfs_init() */
2212	if (vfs->vfc_vfsops->vfs_init)
2213		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2214
2215	return 0;
2216}
2217
2218int
2219vfs_unregister(struct vfsconf *vfs)
2220{
2221	struct vfsconf *vfsp;
2222	struct vfsconf **vfspp;
2223	int maxtypenum;
2224
2225	/* Find our vfsconf struct */
2226	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2227	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2228		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2229			break;
2230	}
2231
2232	if (!vfsp)			/* Not found */
2233		return (ENOENT);
2234
2235	if (vfsp->vfc_refcount)		/* In use */
2236		return (EBUSY);
2237
2238	/* Remove from list and free */
2239	*vfspp = vfsp->vfc_next;
2240
2241	maxtypenum = 0;
2242
2243	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2244		if (vfsp->vfc_typenum > maxtypenum)
2245			maxtypenum = vfsp->vfc_typenum;
2246
2247	maxvfsconf = maxtypenum;
2248	return 0;
2249}
2250
2251/*
2252 * Check if vnode represents a disk device
2253 */
2254int
2255vn_isdisk(struct vnode *vp, int *errp)
2256{
2257	if (vp->v_type != VBLK && vp->v_type != VCHR)
2258		return (0);
2259
2260	return (1);
2261}
2262