vfs_subr.c revision 1.93
1/*	$OpenBSD: vfs_subr.c,v 1.93 2003/05/13 09:31:06 naddy Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
42 */
43
44/*
45 * External virtual filesystem routines
46 */
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/proc.h>
51#include <sys/mount.h>
52#include <sys/time.h>
53#include <sys/fcntl.h>
54#include <sys/kernel.h>
55#include <sys/vnode.h>
56#include <sys/stat.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/domain.h>
63#include <sys/mbuf.h>
64#include <sys/syscallargs.h>
65#include <sys/pool.h>
66
67#include <uvm/uvm_extern.h>
68#include <sys/sysctl.h>
69
70#include <miscfs/specfs/specdev.h>
71
72enum vtype iftovt_tab[16] = {
73	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
74	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
75};
76int	vttoif_tab[9] = {
77	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
78	S_IFSOCK, S_IFIFO, S_IFMT,
79};
80
81int doforce = 1;		/* 1 => permit forcible unmounting */
82int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
83int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
84
85/*
86 * Insq/Remq for the vnode usage lists.
87 */
88#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
89#define	bufremvn(bp) {							\
90	LIST_REMOVE(bp, b_vnbufs);					\
91	(bp)->b_vnbufs.le_next = NOLIST;				\
92}
93
94struct freelst vnode_hold_list;   /* list of vnodes referencing buffers */
95struct freelst vnode_free_list;   /* vnode free list */
96
97struct mntlist mountlist;			/* mounted filesystem list */
98struct simplelock mountlist_slock;
99static struct simplelock mntid_slock;
100struct simplelock mntvnode_slock;
101struct simplelock vnode_free_list_slock;
102struct simplelock spechash_slock;
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *);
112void vfs_free_addrlist(struct netexport *);
113static __inline__ void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116
117#ifdef DEBUG
118void printlockedvnodes(void);
119#endif
120
121#define VN_KNOTE(vp, b) \
122	KNOTE((struct klist *)&vp->v_selectinfo.vsi_selinfo.si_note, (b))
123
124struct pool vnode_pool;
125
126/*
127 * Initialize the vnode management data structures.
128 */
129void
130vntblinit()
131{
132
133	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
134	    &pool_allocator_nointr);
135	simple_lock_init(&mntvnode_slock);
136	simple_lock_init(&mntid_slock);
137	simple_lock_init(&spechash_slock);
138	TAILQ_INIT(&vnode_hold_list);
139	TAILQ_INIT(&vnode_free_list);
140	simple_lock_init(&vnode_free_list_slock);
141	CIRCLEQ_INIT(&mountlist);
142	simple_lock_init(&mountlist_slock);
143	/*
144	 * Initialize the filesystem syncer.
145	 */
146	vn_initialize_syncerd();
147}
148
149/*
150 * Mark a mount point as busy. Used to synchronize access and to delay
151 * unmounting. Interlock is not released on failure.
152 *
153 * historical behavior:
154 *  - LK_NOWAIT means that we should just ignore the mount point if it's
155 *     being unmounted.
156 *  - no flags means that we should sleep on the mountpoint and then
157 *     fail.
158 */
159
160int
161vfs_busy(struct mount *mp, int flags, struct simplelock *interlkp,
162    struct proc *p)
163{
164	int lkflags;
165
166	switch (flags) {
167	case LK_NOWAIT:
168		lkflags = LK_SHARED|LK_NOWAIT;
169		break;
170	case 0:
171		lkflags = LK_SHARED;
172		break;
173	default:
174		lkflags = flags;
175	}
176
177	/*
178	 * Always sleepfail. We will only sleep for an exclusive lock
179	 * and the exclusive lock will only be acquired when unmounting.
180	 */
181	lkflags |= LK_SLEEPFAIL;
182
183	if (interlkp)
184		lkflags |= LK_INTERLOCK;
185	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
186		return (ENOENT);
187	return (0);
188}
189
190
191/*
192 * Free a busy file system
193 */
194void
195vfs_unbusy(struct mount *mp, struct proc *p)
196{
197	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
198}
199
200int
201vfs_isbusy(struct mount *mp)
202{
203	return (lockstatus(&mp->mnt_lock));
204}
205
206/*
207 * Lookup a filesystem type, and if found allocate and initialize
208 * a mount structure for it.
209 *
210 * Devname is usually updated by mount(8) after booting.
211 */
212
213int
214vfs_rootmountalloc(fstypename, devname, mpp)
215	char *fstypename;
216	char *devname;
217	struct mount **mpp;
218{
219	struct proc *p = curproc;	/* XXX */
220	struct vfsconf *vfsp;
221	struct mount *mp;
222
223	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
224		if (!strcmp(vfsp->vfc_name, fstypename))
225			break;
226	if (vfsp == NULL)
227		return (ENODEV);
228	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
229	bzero((char *)mp, (u_long)sizeof(struct mount));
230	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
231	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
232	LIST_INIT(&mp->mnt_vnodelist);
233	mp->mnt_vfc = vfsp;
234	mp->mnt_op = vfsp->vfc_vfsops;
235	mp->mnt_flag = MNT_RDONLY;
236	mp->mnt_vnodecovered = NULLVP;
237	vfsp->vfc_refcount++;
238	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
239	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
240	mp->mnt_stat.f_mntonname[0] = '/';
241	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
242	*mpp = mp;
243 	return (0);
244 }
245
246/*
247 * Find an appropriate filesystem to use for the root. If a filesystem
248 * has not been preselected, walk through the list of known filesystems
249 * trying those that have mountroot routines, and try them until one
250 * works or we have tried them all.
251  */
252int
253vfs_mountroot()
254{
255	struct vfsconf *vfsp;
256	int error;
257
258	if (mountroot != NULL)
259		return ((*mountroot)());
260	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
261		if (vfsp->vfc_mountroot == NULL)
262			continue;
263		if ((error = (*vfsp->vfc_mountroot)()) == 0)
264			return (0);
265		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
266 	}
267	return (ENODEV);
268}
269
270/*
271 * Lookup a mount point by filesystem identifier.
272 */
273struct mount *
274vfs_getvfs(fsid)
275	fsid_t *fsid;
276{
277	register struct mount *mp;
278
279	simple_lock(&mountlist_slock);
280	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
281		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
282		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
283			simple_unlock(&mountlist_slock);
284			return (mp);
285		}
286	}
287	simple_unlock(&mountlist_slock);
288	return ((struct mount *)0);
289}
290
291
292/*
293 * Get a new unique fsid
294 */
295void
296vfs_getnewfsid(mp)
297	struct mount *mp;
298{
299	static u_short xxxfs_mntid;
300
301	fsid_t tfsid;
302	int mtype;
303
304	simple_lock(&mntid_slock);
305	mtype = mp->mnt_vfc->vfc_typenum;
306	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
307	mp->mnt_stat.f_fsid.val[1] = mtype;
308	if (xxxfs_mntid == 0)
309		++xxxfs_mntid;
310	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
311	tfsid.val[1] = mtype;
312	if (!CIRCLEQ_EMPTY(&mountlist)) {
313		while (vfs_getvfs(&tfsid)) {
314			tfsid.val[0]++;
315			xxxfs_mntid++;
316		}
317	}
318	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
319	simple_unlock(&mntid_slock);
320}
321
322/*
323 * Make a 'unique' number from a mount type name.
324 * Note that this is no longer used for ffs which
325 * now has an on-disk filesystem id.
326 */
327long
328makefstype(type)
329	char *type;
330{
331	long rv;
332
333	for (rv = 0; *type; type++) {
334		rv <<= 2;
335		rv ^= *type;
336	}
337	return rv;
338}
339
340/*
341 * Set vnode attributes to VNOVAL
342 */
343void
344vattr_null(vap)
345	register struct vattr *vap;
346{
347
348	vap->va_type = VNON;
349	/* XXX These next two used to be one line, but for a GCC bug. */
350	vap->va_size = VNOVAL;
351	vap->va_bytes = VNOVAL;
352	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
353		vap->va_fsid = vap->va_fileid =
354		vap->va_blocksize = vap->va_rdev =
355		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
356		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
357		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
358		vap->va_flags = vap->va_gen = VNOVAL;
359	vap->va_vaflags = 0;
360}
361
362/*
363 * Routines having to do with the management of the vnode table.
364 */
365extern int (**dead_vnodeop_p)(void *);
366long numvnodes;
367
368/*
369 * Return the next vnode from the free list.
370 */
371int
372getnewvnode(tag, mp, vops, vpp)
373	enum vtagtype tag;
374	struct mount *mp;
375	int (**vops)(void *);
376	struct vnode **vpp;
377{
378	struct proc *p = curproc;			/* XXX */
379	struct freelst *listhd;
380	static int toggle;
381	struct vnode *vp;
382	int s;
383
384	/*
385	 * We must choose whether to allocate a new vnode or recycle an
386	 * existing one. The criterion for allocating a new one is that
387	 * the total number of vnodes is less than the number desired or
388	 * there are no vnodes on either free list. Generally we only
389	 * want to recycle vnodes that have no buffers associated with
390	 * them, so we look first on the vnode_free_list. If it is empty,
391	 * we next consider vnodes with referencing buffers on the
392	 * vnode_hold_list. The toggle ensures that half the time we
393	 * will use a buffer from the vnode_hold_list, and half the time
394	 * we will allocate a new one unless the list has grown to twice
395	 * the desired size. We are reticent to recycle vnodes from the
396	 * vnode_hold_list because we will lose the identity of all its
397	 * referencing buffers.
398	 */
399	toggle ^= 1;
400	if (numvnodes > 2 * desiredvnodes)
401		toggle = 0;
402
403	simple_lock(&vnode_free_list_slock);
404	s = splbio();
405	if ((numvnodes < desiredvnodes) ||
406	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
407	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
408		splx(s);
409		simple_unlock(&vnode_free_list_slock);
410		vp = pool_get(&vnode_pool, PR_WAITOK);
411		bzero((char *)vp, sizeof *vp);
412		simple_lock_init(&vp->v_interlock);
413		numvnodes++;
414	} else {
415		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
416		    vp = TAILQ_NEXT(vp, v_freelist)) {
417			if (simple_lock_try(&vp->v_interlock)) {
418				if ((vp->v_flag & VLAYER) == 0)
419					break;
420				if (VOP_ISLOCKED(vp) == 0)
421					break;
422				else
423					simple_unlock(&vp->v_interlock);
424			}
425		}
426		/*
427		 * Unless this is a bad time of the month, at most
428		 * the first NCPUS items on the free list are
429		 * locked, so this is close enough to being empty.
430		 */
431		if (vp == NULL) {
432			splx(s);
433			simple_unlock(&vnode_free_list_slock);
434			tablefull("vnode");
435			*vpp = 0;
436			return (ENFILE);
437		}
438		if (vp->v_usecount) {
439			vprint("free vnode", vp);
440			panic("free vnode isn't");
441		}
442
443		TAILQ_REMOVE(listhd, vp, v_freelist);
444		vp->v_bioflag &= ~VBIOONFREELIST;
445		splx(s);
446
447		simple_unlock(&vnode_free_list_slock);
448		if (vp->v_type != VBAD)
449			vgonel(vp, p);
450		else
451			simple_unlock(&vp->v_interlock);
452#ifdef DIAGNOSTIC
453		if (vp->v_data) {
454			vprint("cleaned vnode", vp);
455			panic("cleaned vnode isn't");
456		}
457		s = splbio();
458		if (vp->v_numoutput)
459			panic("Clean vnode has pending I/O's");
460		splx(s);
461#endif
462		vp->v_flag = 0;
463		vp->v_socket = 0;
464	}
465	vp->v_type = VNON;
466	cache_purge(vp);
467	vp->v_vnlock = NULL;
468	lockinit(&vp->v_lock, PVFS, "v_lock", 0, 0);
469	vp->v_tag = tag;
470	vp->v_op = vops;
471	insmntque(vp, mp);
472	*vpp = vp;
473	vp->v_usecount = 1;
474	vp->v_data = 0;
475	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
476	return (0);
477}
478
479/*
480 * Move a vnode from one mount queue to another.
481 */
482void
483insmntque(vp, mp)
484	register struct vnode *vp;
485	register struct mount *mp;
486{
487	simple_lock(&mntvnode_slock);
488	/*
489	 * Delete from old mount point vnode list, if on one.
490	 */
491
492	if (vp->v_mount != NULL)
493		LIST_REMOVE(vp, v_mntvnodes);
494	/*
495	 * Insert into list of vnodes for the new mount point, if available.
496	 */
497	if ((vp->v_mount = mp) != NULL)
498		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
499	simple_unlock(&mntvnode_slock);
500}
501
502
503/*
504 * Create a vnode for a block device.
505 * Used for root filesystem, argdev, and swap areas.
506 * Also used for memory file system special devices.
507 */
508int
509bdevvp(dev, vpp)
510	dev_t dev;
511	struct vnode **vpp;
512{
513
514	return (getdevvp(dev, vpp, VBLK));
515}
516
517/*
518 * Create a vnode for a character device.
519 * Used for kernfs and some console handling.
520 */
521int
522cdevvp(dev, vpp)
523	dev_t dev;
524	struct vnode **vpp;
525{
526
527	return (getdevvp(dev, vpp, VCHR));
528}
529
530/*
531 * Create a vnode for a device.
532 * Used by bdevvp (block device) for root file system etc.,
533 * and by cdevvp (character device) for console and kernfs.
534 */
535int
536getdevvp(dev, vpp, type)
537	dev_t dev;
538	struct vnode **vpp;
539	enum vtype type;
540{
541	register struct vnode *vp;
542	struct vnode *nvp;
543	int error;
544
545	if (dev == NODEV) {
546		*vpp = NULLVP;
547		return (0);
548	}
549	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
550	if (error) {
551		*vpp = NULLVP;
552		return (error);
553	}
554	vp = nvp;
555	vp->v_type = type;
556	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
557		vput(vp);
558		vp = nvp;
559	}
560	*vpp = vp;
561	return (0);
562}
563
564/*
565 * Check to see if the new vnode represents a special device
566 * for which we already have a vnode (either because of
567 * bdevvp() or because of a different vnode representing
568 * the same block device). If such an alias exists, deallocate
569 * the existing contents and return the aliased vnode. The
570 * caller is responsible for filling it with its new contents.
571 */
572struct vnode *
573checkalias(nvp, nvp_rdev, mp)
574	register struct vnode *nvp;
575	dev_t nvp_rdev;
576	struct mount *mp;
577{
578	struct proc *p = curproc;
579	register struct vnode *vp;
580	struct vnode **vpp;
581
582	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
583		return (NULLVP);
584
585	vpp = &speclisth[SPECHASH(nvp_rdev)];
586loop:
587	simple_lock(&spechash_slock);
588	for (vp = *vpp; vp; vp = vp->v_specnext) {
589		simple_lock(&vp->v_interlock);
590		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
591			simple_unlock(&vp->v_interlock);
592			continue;
593		}
594		/*
595		 * Alias, but not in use, so flush it out.
596		 */
597		if (vp->v_usecount == 0) {
598			simple_unlock(&spechash_slock);
599			vgonel(vp, p);
600			goto loop;
601		}
602		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
603			simple_unlock(&spechash_slock);
604			goto loop;
605		}
606		break;
607	}
608
609	/*
610	 * Common case is actually in the if statement
611	 */
612	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
613		MALLOC(nvp->v_specinfo, struct specinfo *,
614			sizeof(struct specinfo), M_VNODE, M_WAITOK);
615		nvp->v_rdev = nvp_rdev;
616		nvp->v_hashchain = vpp;
617		nvp->v_specnext = *vpp;
618		nvp->v_specmountpoint = NULL;
619		nvp->v_speclockf = NULL;
620		simple_unlock(&spechash_slock);
621		*vpp = nvp;
622		if (vp != NULLVP) {
623			nvp->v_flag |= VALIASED;
624			vp->v_flag |= VALIASED;
625			vput(vp);
626		}
627		return (NULLVP);
628	}
629
630	/*
631	 * This code is the uncommon case. It is called in case
632	 * we found an alias that was VT_NON && vtype of VBLK
633	 * This means we found a block device that was created
634	 * using bdevvp.
635	 * An example of such a vnode is the root partition device vnode
636	 * created in ffs_mountroot.
637	 *
638	 * The vnodes created by bdevvp should not be aliased (why?).
639	 */
640
641	simple_unlock(&spechash_slock);
642	VOP_UNLOCK(vp, 0, p);
643	simple_lock(&vp->v_interlock);
644	vclean(vp, 0, p);
645	vp->v_vnlock = NULL;
646	lockinit(&vp->v_lock, PVFS, "v_lock", 0, 0);
647	vp->v_op = nvp->v_op;
648	vp->v_tag = nvp->v_tag;
649	nvp->v_type = VNON;
650	insmntque(vp, mp);
651	return (vp);
652}
653
654/*
655 * Grab a particular vnode from the free list, increment its
656 * reference count and lock it. The vnode lock bit is set the
657 * vnode is being eliminated in vgone. The process is awakened
658 * when the transition is completed, and an error returned to
659 * indicate that the vnode is no longer usable (possibly having
660 * been changed to a new file system type).
661 */
662int
663vget(vp, flags, p)
664	struct vnode *vp;
665	int flags;
666	struct proc *p;
667{
668	int error;
669	int s;
670	/*
671	 * If the vnode is in the process of being cleaned out for
672	 * another use, we wait for the cleaning to finish and then
673	 * return failure. Cleaning is determined by checking that
674	 * the VXLOCK flag is set.
675	 */
676	if ((flags & LK_INTERLOCK) == 0) {
677		simple_lock(&vp->v_interlock);
678		flags |= LK_INTERLOCK;
679	}
680	if (vp->v_flag & VXLOCK) {
681 		vp->v_flag |= VXWANT;
682		simple_unlock(&vp->v_interlock);
683		tsleep((caddr_t)vp, PINOD, "vget", 0);
684		return (ENOENT);
685 	}
686	if (vp->v_usecount == 0 &&
687	    (vp->v_bioflag & VBIOONFREELIST)) {
688		s = splbio();
689		simple_lock(&vnode_free_list_slock);
690		if (vp->v_holdcnt > 0)
691			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
692		else
693			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
694		simple_unlock(&vnode_free_list_slock);
695		vp->v_bioflag &= ~VBIOONFREELIST;
696		splx(s);
697	}
698 	vp->v_usecount++;
699	if (flags & LK_TYPE_MASK) {
700		if ((error = vn_lock(vp, flags, p)) != 0) {
701			vp->v_usecount--;
702			if (vp->v_usecount == 0)
703				vputonfreelist(vp);
704
705			simple_unlock(&vp->v_interlock);
706		}
707		return (error);
708	}
709	simple_unlock(&vp->v_interlock);
710	return (0);
711}
712
713
714#ifdef DIAGNOSTIC
715/*
716 * Vnode reference.
717 */
718void
719vref(vp)
720	struct vnode *vp;
721{
722	simple_lock(&vp->v_interlock);
723	if (vp->v_usecount == 0)
724		panic("vref used where vget required");
725	vp->v_usecount++;
726	simple_unlock(&vp->v_interlock);
727}
728#endif /* DIAGNOSTIC */
729
730static __inline__ void
731vputonfreelist(vp)
732	struct vnode *vp;
733{
734	int s;
735	struct freelst *lst;
736
737	s = splbio();
738#ifdef DIAGNOSTIC
739	if (vp->v_usecount != 0)
740		panic("Use count is not zero!");
741
742	if (vp->v_bioflag & VBIOONFREELIST) {
743		vprint("vnode already on free list: ", vp);
744		panic("vnode already on free list");
745	}
746#endif
747
748	vp->v_bioflag |= VBIOONFREELIST;
749
750	if (vp->v_holdcnt > 0)
751		lst = &vnode_hold_list;
752	else
753		lst = &vnode_free_list;
754
755	if (vp->v_type == VBAD)
756		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
757	else
758		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
759
760	splx(s);
761}
762
763/*
764 * vput(), just unlock and vrele()
765 */
766void
767vput(vp)
768	register struct vnode *vp;
769{
770	struct proc *p = curproc;	/* XXX */
771
772#ifdef DIAGNOSTIC
773	if (vp == NULL)
774		panic("vput: null vp");
775#endif
776	simple_lock(&vp->v_interlock);
777
778#ifdef DIAGNOSTIC
779	if (vp->v_usecount == 0) {
780		vprint("vput: bad ref count", vp);
781		panic("vput: ref cnt");
782	}
783#endif
784	vp->v_usecount--;
785	if (vp->v_usecount > 0) {
786		simple_unlock(&vp->v_interlock);
787		VOP_UNLOCK(vp, 0, p);
788		return;
789	}
790
791#ifdef DIAGNOSTIC
792	if (vp->v_writecount != 0) {
793		vprint("vput: bad writecount", vp);
794		panic("vput: v_writecount != 0");
795	}
796#endif
797	vputonfreelist(vp);
798
799	simple_unlock(&vp->v_interlock);
800
801	VOP_INACTIVE(vp, p);
802}
803
804/*
805 * Vnode release - use for active VNODES.
806 * If count drops to zero, call inactive routine and return to freelist.
807 */
808void
809vrele(vp)
810	register struct vnode *vp;
811{
812	struct proc *p = curproc;	/* XXX */
813
814#ifdef DIAGNOSTIC
815	if (vp == NULL)
816		panic("vrele: null vp");
817#endif
818	simple_lock(&vp->v_interlock);
819#ifdef DIAGNOSTIC
820	if (vp->v_usecount == 0) {
821		vprint("vrele: bad ref count", vp);
822		panic("vrele: ref cnt");
823	}
824#endif
825	vp->v_usecount--;
826	if (vp->v_usecount > 0) {
827		simple_unlock(&vp->v_interlock);
828		return;
829	}
830
831#ifdef DIAGNOSTIC
832	if (vp->v_writecount != 0) {
833		vprint("vrele: bad writecount", vp);
834		panic("vrele: v_writecount != 0");
835	}
836#endif
837	vputonfreelist(vp);
838
839	if (vn_lock(vp, LK_EXCLUSIVE|LK_INTERLOCK, p) == 0)
840		VOP_INACTIVE(vp, p);
841}
842
843void vhold(struct vnode *vp);
844
845/*
846 * Page or buffer structure gets a reference.
847 */
848void
849vhold(vp)
850	register struct vnode *vp;
851{
852
853	/*
854	 * If it is on the freelist and the hold count is currently
855	 * zero, move it to the hold list.
856	 */
857  	simple_lock(&vp->v_interlock);
858	if ((vp->v_bioflag & VBIOONFREELIST) &&
859	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
860		simple_lock(&vnode_free_list_slock);
861		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
862		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
863		simple_unlock(&vnode_free_list_slock);
864	}
865	vp->v_holdcnt++;
866	simple_unlock(&vp->v_interlock);
867}
868
869/*
870 * Remove any vnodes in the vnode table belonging to mount point mp.
871 *
872 * If MNT_NOFORCE is specified, there should not be any active ones,
873 * return error if any are found (nb: this is a user error, not a
874 * system error). If MNT_FORCE is specified, detach any active vnodes
875 * that are found.
876 */
877#ifdef DEBUG
878int busyprt = 0;	/* print out busy vnodes */
879struct ctldebug debug1 = { "busyprt", &busyprt };
880#endif
881
882int
883vfs_mount_foreach_vnode(struct mount *mp,
884    int (*func)(struct vnode *, void *), void *arg) {
885	struct vnode *vp, *nvp;
886	int error = 0;
887
888	simple_lock(&mntvnode_slock);
889loop:
890	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
891		if (vp->v_mount != mp)
892			goto loop;
893		nvp = vp->v_mntvnodes.le_next;
894		simple_lock(&vp->v_interlock);
895		simple_unlock(&mntvnode_slock);
896
897		error = func(vp, arg);
898
899		simple_lock(&mntvnode_slock);
900
901		if (error != 0)
902			break;
903	}
904	simple_unlock(&mntvnode_slock);
905
906	return (error);
907}
908
909
910struct vflush_args {
911	struct vnode *skipvp;
912	int busy;
913	int flags;
914};
915
916int
917vflush_vnode(struct vnode *vp, void *arg) {
918	struct vflush_args *va = arg;
919	struct proc *p = curproc;
920
921	if (vp == va->skipvp) {
922		simple_unlock(&vp->v_interlock);
923		return (0);
924	}
925
926	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
927		simple_unlock(&vp->v_interlock);
928		return (0);
929	}
930
931	/*
932	 * If WRITECLOSE is set, only flush out regular file
933	 * vnodes open for writing.
934	 */
935	if ((va->flags & WRITECLOSE) &&
936	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
937		simple_unlock(&vp->v_interlock);
938		return (0);
939	}
940
941	/*
942	 * With v_usecount == 0, all we need to do is clear
943	 * out the vnode data structures and we are done.
944	 */
945	if (vp->v_usecount == 0) {
946		vgonel(vp, p);
947		return (0);
948	}
949
950	/*
951	 * If FORCECLOSE is set, forcibly close the vnode.
952	 * For block or character devices, revert to an
953	 * anonymous device. For all other files, just kill them.
954	 */
955	if (va->flags & FORCECLOSE) {
956		if (vp->v_type != VBLK && vp->v_type != VCHR) {
957			vgonel(vp, p);
958		} else {
959			vclean(vp, 0, p);
960			vp->v_op = spec_vnodeop_p;
961			insmntque(vp, (struct mount *)0);
962		}
963		return (0);
964	}
965
966#ifdef DEBUG
967	if (busyprt)
968		vprint("vflush: busy vnode", vp);
969#endif
970	simple_unlock(&vp->v_interlock);
971	va->busy++;
972	return (0);
973}
974
975int
976vflush(mp, skipvp, flags)
977	struct mount *mp;
978	struct vnode *skipvp;
979	int flags;
980{
981	struct vflush_args va;
982	va.skipvp = skipvp;
983	va.busy = 0;
984	va.flags = flags;
985
986	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
987
988	if (va.busy)
989		return (EBUSY);
990	return (0);
991}
992
993/*
994 * Disassociate the underlying file system from a vnode.
995 * The vnode interlock is held on entry.
996 */
997void
998vclean(vp, flags, p)
999	register struct vnode *vp;
1000	int flags;
1001	struct proc *p;
1002{
1003	int active;
1004
1005	/*
1006	 * Check to see if the vnode is in use.
1007	 * If so we have to reference it before we clean it out
1008	 * so that its count cannot fall to zero and generate a
1009	 * race against ourselves to recycle it.
1010	 */
1011	if ((active = vp->v_usecount) != 0)
1012		vp->v_usecount++;
1013
1014	/*
1015	 * Prevent the vnode from being recycled or
1016	 * brought into use while we clean it out.
1017	 */
1018	if (vp->v_flag & VXLOCK)
1019		panic("vclean: deadlock");
1020	vp->v_flag |= VXLOCK;
1021	/*
1022	 * Even if the count is zero, the VOP_INACTIVE routine may still
1023	 * have the object locked while it cleans it out. The VOP_LOCK
1024	 * ensures that the VOP_INACTIVE routine is done with its work.
1025	 * For active vnodes, it ensures that no other activity can
1026	 * occur while the underlying object is being cleaned out.
1027	 */
1028	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1029
1030	/*
1031	 * clean out any VM data associated with the vnode.
1032	 */
1033	uvm_vnp_terminate(vp);
1034	/*
1035	 * Clean out any buffers associated with the vnode.
1036	 */
1037	if (flags & DOCLOSE)
1038		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1039	/*
1040	 * If purging an active vnode, it must be closed and
1041	 * deactivated before being reclaimed. Note that the
1042	 * VOP_INACTIVE will unlock the vnode
1043	 */
1044	if (active) {
1045		if (flags & DOCLOSE)
1046			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1047		VOP_INACTIVE(vp, p);
1048	} else {
1049		/*
1050		 * Any other processes trying to obtain this lock must first
1051		 * wait for VXLOCK to clear, then call the new lock operation.
1052		 */
1053		VOP_UNLOCK(vp, 0, p);
1054	}
1055
1056	/*
1057	 * Reclaim the vnode.
1058	 */
1059	if (VOP_RECLAIM(vp, p))
1060		panic("vclean: cannot reclaim");
1061	if (active) {
1062		simple_lock(&vp->v_interlock);
1063
1064		vp->v_usecount--;
1065		if (vp->v_usecount == 0) {
1066			if (vp->v_holdcnt > 0)
1067				panic("vclean: not clean");
1068			vputonfreelist(vp);
1069		}
1070
1071		simple_unlock(&vp->v_interlock);
1072	}
1073	cache_purge(vp);
1074
1075	/*
1076	 * Done with purge, notify sleepers of the grim news.
1077	 */
1078	vp->v_op = dead_vnodeop_p;
1079	simple_lock(&vp->v_selectinfo.vsi_lock);
1080	VN_KNOTE(vp, NOTE_REVOKE);
1081	simple_unlock(&vp->v_selectinfo.vsi_lock);
1082	vp->v_tag = VT_NON;
1083	vp->v_flag &= ~VXLOCK;
1084#ifdef DIAGNOSTIC
1085	vp->v_flag &= ~VLOCKSWORK;
1086#endif
1087	if (vp->v_flag & VXWANT) {
1088		vp->v_flag &= ~VXWANT;
1089		wakeup((caddr_t)vp);
1090	}
1091}
1092
1093
1094
1095/*
1096 * Recycle an unused vnode to the front of the free list.
1097 * Release the passed interlock if the vnode will be recycled.
1098 */
1099int
1100vrecycle(vp, inter_lkp, p)
1101	struct vnode *vp;
1102	struct simplelock *inter_lkp;
1103	struct proc *p;
1104{
1105
1106	simple_lock(&vp->v_interlock);
1107	if (vp->v_usecount == 0) {
1108		if (inter_lkp)
1109			simple_unlock(inter_lkp);
1110		vgonel(vp, p);
1111		return (1);
1112	}
1113	simple_unlock(&vp->v_interlock);
1114	return (0);
1115}
1116
1117
1118/*
1119 * Eliminate all activity associated with a vnode
1120 * in preparation for reuse.
1121 */
1122void
1123vgone(vp)
1124	register struct vnode *vp;
1125{
1126	struct proc *p = curproc;
1127
1128	simple_lock (&vp->v_interlock);
1129	vgonel(vp, p);
1130}
1131
1132/*
1133 * vgone, with the vp interlock held.
1134 */
1135void
1136vgonel(vp, p)
1137	struct vnode *vp;
1138	struct proc *p;
1139{
1140	register struct vnode *vq;
1141	struct vnode *vx;
1142
1143	/*
1144	 * If a vgone (or vclean) is already in progress,
1145	 * wait until it is done and return.
1146	 */
1147	if (vp->v_flag & VXLOCK) {
1148		vp->v_flag |= VXWANT;
1149		simple_unlock(&vp->v_interlock);
1150		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1151		return;
1152	}
1153	/*
1154	 * Clean out the filesystem specific data.
1155	 */
1156	vclean(vp, DOCLOSE, p);
1157	/*
1158	 * Delete from old mount point vnode list, if on one.
1159	 */
1160	if (vp->v_mount != NULL)
1161		insmntque(vp, (struct mount *)0);
1162	/*
1163	 * If special device, remove it from special device alias list
1164	 * if it is on one.
1165	 */
1166	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1167		simple_lock(&spechash_slock);
1168		if (*vp->v_hashchain == vp) {
1169			*vp->v_hashchain = vp->v_specnext;
1170		} else {
1171			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1172				if (vq->v_specnext != vp)
1173					continue;
1174				vq->v_specnext = vp->v_specnext;
1175				break;
1176			}
1177			if (vq == NULL)
1178				panic("missing bdev");
1179		}
1180		if (vp->v_flag & VALIASED) {
1181			vx = NULL;
1182			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1183				if (vq->v_rdev != vp->v_rdev ||
1184				    vq->v_type != vp->v_type)
1185					continue;
1186				if (vx)
1187					break;
1188				vx = vq;
1189			}
1190			if (vx == NULL)
1191				panic("missing alias");
1192			if (vq == NULL)
1193				vx->v_flag &= ~VALIASED;
1194			vp->v_flag &= ~VALIASED;
1195		}
1196		simple_unlock(&spechash_slock);
1197		FREE(vp->v_specinfo, M_VNODE);
1198		vp->v_specinfo = NULL;
1199	}
1200	/*
1201	 * If it is on the freelist and not already at the head,
1202	 * move it to the head of the list.
1203	 */
1204	vp->v_type = VBAD;
1205
1206	/*
1207	 * Move onto the free list, unless we were called from
1208	 * getnewvnode and we're not on any free list
1209	 */
1210	if (vp->v_usecount == 0 &&
1211	    (vp->v_bioflag & VBIOONFREELIST)) {
1212		int s;
1213
1214		simple_lock(&vnode_free_list_slock);
1215		s = splbio();
1216
1217		if (vp->v_holdcnt > 0)
1218			panic("vgonel: not clean");
1219
1220		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1221			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1222			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1223		}
1224		splx(s);
1225		simple_unlock(&vnode_free_list_slock);
1226	}
1227}
1228
1229/*
1230 * Lookup a vnode by device number.
1231 */
1232int
1233vfinddev(dev, type, vpp)
1234	dev_t dev;
1235	enum vtype type;
1236	struct vnode **vpp;
1237{
1238	register struct vnode *vp;
1239	int rc =0;
1240
1241	simple_lock(&spechash_slock);
1242	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1243		if (dev != vp->v_rdev || type != vp->v_type)
1244			continue;
1245		*vpp = vp;
1246		rc = 1;
1247		break;
1248	}
1249	simple_unlock(&spechash_slock);
1250	return (rc);
1251}
1252
1253/*
1254 * Revoke all the vnodes corresponding to the specified minor number
1255 * range (endpoints inclusive) of the specified major.
1256 */
1257void
1258vdevgone(maj, minl, minh, type)
1259	int maj, minl, minh;
1260	enum vtype type;
1261{
1262	struct vnode *vp;
1263	int mn;
1264
1265	for (mn = minl; mn <= minh; mn++)
1266		if (vfinddev(makedev(maj, mn), type, &vp))
1267			VOP_REVOKE(vp, REVOKEALL);
1268}
1269
1270/*
1271 * Calculate the total number of references to a special device.
1272 */
1273int
1274vcount(vp)
1275	struct vnode *vp;
1276{
1277	struct vnode *vq, *vnext;
1278	int count;
1279
1280loop:
1281	if ((vp->v_flag & VALIASED) == 0)
1282		return (vp->v_usecount);
1283	simple_lock(&spechash_slock);
1284	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1285		vnext = vq->v_specnext;
1286		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1287			continue;
1288		/*
1289		 * Alias, but not in use, so flush it out.
1290		 */
1291		if (vq->v_usecount == 0 && vq != vp) {
1292			simple_unlock(&spechash_slock);
1293			vgone(vq);
1294			goto loop;
1295		}
1296		count += vq->v_usecount;
1297	}
1298	simple_unlock(&spechash_slock);
1299	return (count);
1300}
1301
1302/*
1303 * Print out a description of a vnode.
1304 */
1305static char *typename[] =
1306   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1307
1308void
1309vprint(label, vp)
1310	char *label;
1311	register struct vnode *vp;
1312{
1313	char buf[64];
1314
1315	if (label != NULL)
1316		printf("%s: ", label);
1317	printf("type %s, usecount %u, writecount %u, holdcount %u,",
1318		typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1319		vp->v_holdcnt);
1320	buf[0] = '\0';
1321	if (vp->v_flag & VROOT)
1322		strlcat(buf, "|VROOT", sizeof buf);
1323	if (vp->v_flag & VTEXT)
1324		strlcat(buf, "|VTEXT", sizeof buf);
1325	if (vp->v_flag & VSYSTEM)
1326		strlcat(buf, "|VSYSTEM", sizeof buf);
1327	if (vp->v_flag & VXLOCK)
1328		strlcat(buf, "|VXLOCK", sizeof buf);
1329	if (vp->v_flag & VXWANT)
1330		strlcat(buf, "|VXWANT", sizeof buf);
1331	if (vp->v_bioflag & VBIOWAIT)
1332		strlcat(buf, "| VBIOWAIT", sizeof buf);
1333	if (vp->v_flag & VALIASED)
1334		strlcat(buf, "|VALIASED", sizeof buf);
1335	if (buf[0] != '\0')
1336		printf(" flags (%s)", &buf[1]);
1337	if (vp->v_data == NULL) {
1338		printf("\n");
1339	} else {
1340		printf("\n\t");
1341		VOP_PRINT(vp);
1342	}
1343}
1344
1345#ifdef DEBUG
1346/*
1347 * List all of the locked vnodes in the system.
1348 * Called when debugging the kernel.
1349 */
1350void
1351printlockedvnodes()
1352{
1353	struct proc *p = curproc;
1354	register struct mount *mp, *nmp;
1355	register struct vnode *vp;
1356
1357	printf("Locked vnodes\n");
1358	simple_lock(&mountlist_slock);
1359	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1360	    mp = nmp) {
1361		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1362			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1363			continue;
1364		}
1365		for (vp = mp->mnt_vnodelist.lh_first; vp;
1366		    vp = vp->v_mntvnodes.le_next) {
1367			if (VOP_ISLOCKED(vp))
1368				vprint((char *)0, vp);
1369		}
1370		simple_lock(&mountlist_slock);
1371		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1372		vfs_unbusy(mp, p);
1373 	}
1374	simple_unlock(&mountlist_slock);
1375
1376}
1377#endif
1378
1379/*
1380 * Top level filesystem related information gathering.
1381 */
1382int
1383vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1384	int *name;
1385	u_int namelen;
1386	void *oldp;
1387	size_t *oldlenp;
1388	void *newp;
1389	size_t newlen;
1390	struct proc *p;
1391{
1392	struct vfsconf *vfsp;
1393
1394	/* all sysctl names at this level are at least name and field */
1395	if (namelen < 2)
1396		return (ENOTDIR);		/* overloaded */
1397	if (name[0] != VFS_GENERIC) {
1398		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1399			if (vfsp->vfc_typenum == name[0])
1400				break;
1401		if (vfsp == NULL)
1402			return (EOPNOTSUPP);
1403		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1404		    oldp, oldlenp, newp, newlen, p));
1405	}
1406	switch (name[1]) {
1407	case VFS_MAXTYPENUM:
1408		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1409	case VFS_CONF:
1410		if (namelen < 3)
1411			return (ENOTDIR);	/* overloaded */
1412		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1413			if (vfsp->vfc_typenum == name[2])
1414				break;
1415		if (vfsp == NULL)
1416			return (EOPNOTSUPP);
1417		return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp,
1418		    sizeof(struct vfsconf)));
1419	}
1420	return (EOPNOTSUPP);
1421}
1422
1423
1424int kinfo_vdebug = 1;
1425int kinfo_vgetfailed;
1426#define KINFO_VNODESLOP	10
1427/*
1428 * Dump vnode list (via sysctl).
1429 * Copyout address of vnode followed by vnode.
1430 */
1431/* ARGSUSED */
1432int
1433sysctl_vnode(where, sizep, p)
1434	char *where;
1435	size_t *sizep;
1436	struct proc *p;
1437{
1438	register struct mount *mp, *nmp;
1439	struct vnode *vp, *nvp;
1440	register char *bp = where, *savebp;
1441	char *ewhere;
1442	int error;
1443
1444	if (where == NULL) {
1445		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1446		return (0);
1447	}
1448	ewhere = where + *sizep;
1449
1450	simple_lock(&mountlist_slock);
1451	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1452	    mp = nmp) {
1453		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1454			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1455			continue;
1456		}
1457		savebp = bp;
1458again:
1459		for (vp = mp->mnt_vnodelist.lh_first; vp != NULL;
1460		    vp = nvp) {
1461			/*
1462			 * Check that the vp is still associated with
1463			 * this filesystem.  RACE: could have been
1464			 * recycled onto the same filesystem.
1465			 */
1466			if (vp->v_mount != mp) {
1467				simple_unlock(&mntvnode_slock);
1468				if (kinfo_vdebug)
1469					printf("kinfo: vp changed\n");
1470				bp = savebp;
1471				goto again;
1472			}
1473			nvp = vp->v_mntvnodes.le_next;
1474			if (bp + sizeof(struct e_vnode) > ewhere) {
1475				simple_unlock(&mntvnode_slock);
1476				*sizep = bp - where;
1477				vfs_unbusy(mp, p);
1478				return (ENOMEM);
1479			}
1480			if ((error = copyout((caddr_t)&vp,
1481			    &((struct e_vnode *)bp)->vptr,
1482			    sizeof(struct vnode *))) ||
1483			   (error = copyout((caddr_t)vp,
1484			    &((struct e_vnode *)bp)->vnode,
1485			    sizeof(struct vnode)))) {
1486				vfs_unbusy(mp, p);
1487				return (error);
1488			}
1489			bp += sizeof(struct e_vnode);
1490			simple_lock(&mntvnode_slock);
1491		}
1492
1493		simple_unlock(&mntvnode_slock);
1494		simple_lock(&mountlist_slock);
1495		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1496		vfs_unbusy(mp, p);
1497	}
1498
1499	simple_unlock(&mountlist_slock);
1500
1501	*sizep = bp - where;
1502	return (0);
1503}
1504
1505/*
1506 * Check to see if a filesystem is mounted on a block device.
1507 */
1508int
1509vfs_mountedon(vp)
1510	register struct vnode *vp;
1511{
1512	register struct vnode *vq;
1513	int error = 0;
1514
1515 	if (vp->v_specmountpoint != NULL)
1516		return (EBUSY);
1517	if (vp->v_flag & VALIASED) {
1518		simple_lock(&spechash_slock);
1519		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1520			if (vq->v_rdev != vp->v_rdev ||
1521			    vq->v_type != vp->v_type)
1522				continue;
1523			if (vq->v_specmountpoint != NULL) {
1524				error = EBUSY;
1525				break;
1526			}
1527 		}
1528		simple_unlock(&spechash_slock);
1529	}
1530	return (error);
1531}
1532
1533/*
1534 * Build hash lists of net addresses and hang them off the mount point.
1535 * Called by ufs_mount() to set up the lists of export addresses.
1536 */
1537int
1538vfs_hang_addrlist(mp, nep, argp)
1539	struct mount *mp;
1540	struct netexport *nep;
1541	struct export_args *argp;
1542{
1543	register struct netcred *np;
1544	register struct radix_node_head *rnh;
1545	register int i;
1546	struct radix_node *rn;
1547	struct sockaddr *saddr, *smask = 0;
1548	struct domain *dom;
1549	int error;
1550
1551	if (argp->ex_addrlen == 0) {
1552		if (mp->mnt_flag & MNT_DEFEXPORTED)
1553			return (EPERM);
1554		np = &nep->ne_defexported;
1555		np->netc_exflags = argp->ex_flags;
1556		np->netc_anon = argp->ex_anon;
1557		np->netc_anon.cr_ref = 1;
1558		mp->mnt_flag |= MNT_DEFEXPORTED;
1559		return (0);
1560	}
1561	if (argp->ex_addrlen > MLEN)
1562		return (EINVAL);
1563	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1564	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1565	bzero((caddr_t)np, i);
1566	saddr = (struct sockaddr *)(np + 1);
1567	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
1568	if (error)
1569		goto out;
1570	if (saddr->sa_len > argp->ex_addrlen)
1571		saddr->sa_len = argp->ex_addrlen;
1572	if (argp->ex_masklen) {
1573		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1574		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
1575		if (error)
1576			goto out;
1577		if (smask->sa_len > argp->ex_masklen)
1578			smask->sa_len = argp->ex_masklen;
1579	}
1580	i = saddr->sa_family;
1581	if (i < 0 || i > AF_MAX) {
1582		error = EINVAL;
1583		goto out;
1584	}
1585	if ((rnh = nep->ne_rtable[i]) == 0) {
1586		/*
1587		 * Seems silly to initialize every AF when most are not
1588		 * used, do so on demand here
1589		 */
1590		for (dom = domains; dom; dom = dom->dom_next)
1591			if (dom->dom_family == i && dom->dom_rtattach) {
1592				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1593					dom->dom_rtoffset);
1594				break;
1595			}
1596		if ((rnh = nep->ne_rtable[i]) == 0) {
1597			error = ENOBUFS;
1598			goto out;
1599		}
1600	}
1601	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1602		np->netc_rnodes);
1603	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1604		error = EPERM;
1605		goto out;
1606	}
1607	np->netc_exflags = argp->ex_flags;
1608	np->netc_anon = argp->ex_anon;
1609	np->netc_anon.cr_ref = 1;
1610	return (0);
1611out:
1612	free(np, M_NETADDR);
1613	return (error);
1614}
1615
1616/* ARGSUSED */
1617int
1618vfs_free_netcred(rn, w)
1619	struct radix_node *rn;
1620	void *w;
1621{
1622	register struct radix_node_head *rnh = (struct radix_node_head *)w;
1623
1624	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
1625	free((caddr_t)rn, M_NETADDR);
1626	return (0);
1627}
1628
1629/*
1630 * Free the net address hash lists that are hanging off the mount points.
1631 */
1632void
1633vfs_free_addrlist(nep)
1634	struct netexport *nep;
1635{
1636	register int i;
1637	register struct radix_node_head *rnh;
1638
1639	for (i = 0; i <= AF_MAX; i++)
1640		if ((rnh = nep->ne_rtable[i]) != NULL) {
1641			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1642			free((caddr_t)rnh, M_RTABLE);
1643			nep->ne_rtable[i] = 0;
1644		}
1645}
1646
1647int
1648vfs_export(mp, nep, argp)
1649	struct mount *mp;
1650	struct netexport *nep;
1651	struct export_args *argp;
1652{
1653	int error;
1654
1655	if (argp->ex_flags & MNT_DELEXPORT) {
1656		vfs_free_addrlist(nep);
1657		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1658	}
1659	if (argp->ex_flags & MNT_EXPORTED) {
1660		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1661			return (error);
1662		mp->mnt_flag |= MNT_EXPORTED;
1663	}
1664	return (0);
1665}
1666
1667struct netcred *
1668vfs_export_lookup(mp, nep, nam)
1669	register struct mount *mp;
1670	struct netexport *nep;
1671	struct mbuf *nam;
1672{
1673	register struct netcred *np;
1674	register struct radix_node_head *rnh;
1675	struct sockaddr *saddr;
1676
1677	np = NULL;
1678	if (mp->mnt_flag & MNT_EXPORTED) {
1679		/*
1680		 * Lookup in the export list first.
1681		 */
1682		if (nam != NULL) {
1683			saddr = mtod(nam, struct sockaddr *);
1684			rnh = nep->ne_rtable[saddr->sa_family];
1685			if (rnh != NULL) {
1686				np = (struct netcred *)
1687					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1688					    rnh);
1689				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1690					np = NULL;
1691			}
1692		}
1693		/*
1694		 * If no address match, use the default if it exists.
1695		 */
1696		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1697			np = &nep->ne_defexported;
1698	}
1699	return (np);
1700}
1701
1702/*
1703 * Do the usual access checking.
1704 * file_mode, uid and gid are from the vnode in question,
1705 * while acc_mode and cred are from the VOP_ACCESS parameter list
1706 */
1707int
1708vaccess(file_mode, uid, gid, acc_mode, cred)
1709	mode_t file_mode;
1710	uid_t uid;
1711	gid_t gid;
1712	mode_t acc_mode;
1713	struct ucred *cred;
1714{
1715	mode_t mask;
1716
1717	/* User id 0 always gets access. */
1718	if (cred->cr_uid == 0)
1719		return 0;
1720
1721	mask = 0;
1722
1723	/* Otherwise, check the owner. */
1724	if (cred->cr_uid == uid) {
1725		if (acc_mode & VEXEC)
1726			mask |= S_IXUSR;
1727		if (acc_mode & VREAD)
1728			mask |= S_IRUSR;
1729		if (acc_mode & VWRITE)
1730			mask |= S_IWUSR;
1731		return (file_mode & mask) == mask ? 0 : EACCES;
1732	}
1733
1734	/* Otherwise, check the groups. */
1735	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1736		if (acc_mode & VEXEC)
1737			mask |= S_IXGRP;
1738		if (acc_mode & VREAD)
1739			mask |= S_IRGRP;
1740		if (acc_mode & VWRITE)
1741			mask |= S_IWGRP;
1742		return (file_mode & mask) == mask ? 0 : EACCES;
1743	}
1744
1745	/* Otherwise, check everyone else. */
1746	if (acc_mode & VEXEC)
1747		mask |= S_IXOTH;
1748	if (acc_mode & VREAD)
1749		mask |= S_IROTH;
1750	if (acc_mode & VWRITE)
1751		mask |= S_IWOTH;
1752	return (file_mode & mask) == mask ? 0 : EACCES;
1753}
1754
1755/*
1756 * Unmount all file systems.
1757 * We traverse the list in reverse order under the assumption that doing so
1758 * will avoid needing to worry about dependencies.
1759 */
1760void
1761vfs_unmountall(void)
1762{
1763	struct mount *mp, *nmp;
1764	int allerror, error, again = 1;
1765	struct proc *p = curproc;
1766
1767 retry:
1768	allerror = 0;
1769	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1770	    mp = nmp) {
1771		nmp = CIRCLEQ_PREV(mp, mnt_list);
1772		if ((vfs_busy(mp, LK_EXCLUSIVE|LK_NOWAIT, NULL, p)) != 0)
1773			continue;
1774		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1775			printf("unmount of %s failed with error %d\n",
1776			    mp->mnt_stat.f_mntonname, error);
1777			allerror = 1;
1778		}
1779	}
1780
1781	if (allerror) {
1782		printf("WARNING: some file systems would not unmount\n");
1783		if (again) {
1784			printf("retrying\n");
1785			again = 0;
1786			goto retry;
1787		}
1788	}
1789}
1790
1791/*
1792 * Sync and unmount file systems before shutting down.
1793 */
1794void
1795vfs_shutdown()
1796{
1797	/* XXX Should suspend scheduling. */
1798	(void) spl0();
1799
1800	printf("syncing disks... ");
1801
1802	if (panicstr == 0) {
1803		/* Sync before unmount, in case we hang on something. */
1804		sys_sync(&proc0, (void *)0, (register_t *)0);
1805
1806		/* Unmount file systems. */
1807		vfs_unmountall();
1808	}
1809
1810	if (vfs_syncwait(1))
1811		printf("giving up\n");
1812	else
1813		printf("done\n");
1814}
1815
1816/*
1817 * perform sync() operation and wait for buffers to flush.
1818 * assumtions: called w/ scheduler disabled and physical io enabled
1819 * for now called at spl0() XXX
1820 */
1821int
1822vfs_syncwait(verbose)
1823	int verbose;
1824{
1825	register struct buf *bp;
1826	int iter, nbusy, dcount, s;
1827	struct proc *p;
1828
1829	p = curproc? curproc : &proc0;
1830	sys_sync(p, (void *)0, (register_t *)0);
1831
1832	/* Wait for sync to finish. */
1833	dcount = 10000;
1834	for (iter = 0; iter < 20; iter++) {
1835		nbusy = 0;
1836		for (bp = &buf[nbuf]; --bp >= buf; ) {
1837			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1838				nbusy++;
1839			/*
1840			 * With soft updates, some buffers that are
1841			 * written will be remarked as dirty until other
1842			 * buffers are written.
1843			 */
1844			if (bp->b_flags & B_DELWRI) {
1845				s = splbio();
1846				bremfree(bp);
1847				bp->b_flags |= B_BUSY;
1848				splx(s);
1849				nbusy++;
1850				bawrite(bp);
1851				if (dcount-- <= 0) {
1852					if (verbose)
1853						printf("softdep ");
1854					return 1;
1855				}
1856			}
1857		}
1858		if (nbusy == 0)
1859			break;
1860		if (verbose)
1861			printf("%d ", nbusy);
1862		DELAY(40000 * iter);
1863	}
1864
1865	return nbusy;
1866}
1867
1868/*
1869 * posix file system related system variables.
1870 */
1871int
1872fs_posix_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1873	int *name;
1874	u_int namelen;
1875	void *oldp;
1876	size_t *oldlenp;
1877	void *newp;
1878	size_t newlen;
1879	struct proc *p;
1880{
1881	/* all sysctl names at this level are terminal */
1882	if (namelen != 1)
1883		return (ENOTDIR);
1884
1885	switch (name[0]) {
1886	case FS_POSIX_SETUID:
1887		if (newp && securelevel > 0)
1888			return (EPERM);
1889		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1890	default:
1891		return (EOPNOTSUPP);
1892	}
1893	/* NOTREACHED */
1894}
1895
1896/*
1897 * file system related system variables.
1898 */
1899int
1900fs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1901	int *name;
1902	u_int namelen;
1903	void *oldp;
1904	size_t *oldlenp;
1905	void *newp;
1906	size_t newlen;
1907	struct proc *p;
1908{
1909	sysctlfn *fn;
1910
1911	switch (name[0]) {
1912	case FS_POSIX:
1913		fn = fs_posix_sysctl;
1914		break;
1915	default:
1916		return (EOPNOTSUPP);
1917	}
1918	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1919}
1920
1921
1922/*
1923 * Routines dealing with vnodes and buffers
1924 */
1925
1926/*
1927 * Wait for all outstanding I/Os to complete
1928 *
1929 * Manipulates v_numoutput. Must be called at splbio()
1930 */
1931int
1932vwaitforio(vp, slpflag, wmesg, timeo)
1933	struct vnode *vp;
1934	int slpflag, timeo;
1935	char *wmesg;
1936{
1937	int error = 0;
1938
1939	splassert(IPL_BIO);
1940
1941	while (vp->v_numoutput) {
1942		vp->v_bioflag |= VBIOWAIT;
1943		error = tsleep((caddr_t)&vp->v_numoutput,
1944		    slpflag | (PRIBIO + 1), wmesg, timeo);
1945		if (error)
1946			break;
1947	}
1948
1949	return (error);
1950}
1951
1952
1953/*
1954 * Update outstanding I/O count and do wakeup if requested.
1955 *
1956 * Manipulates v_numoutput. Must be called at splbio()
1957 */
1958void
1959vwakeup(vp)
1960	struct vnode *vp;
1961{
1962	splassert(IPL_BIO);
1963
1964	if (vp != NULL) {
1965		if (vp->v_numoutput-- == 0)
1966			panic("vwakeup: neg numoutput");
1967		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1968			vp->v_bioflag &= ~VBIOWAIT;
1969			wakeup((caddr_t)&vp->v_numoutput);
1970		}
1971	}
1972}
1973
1974/*
1975 * Flush out and invalidate all buffers associated with a vnode.
1976 * Called with the underlying object locked.
1977 */
1978int
1979vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
1980	register struct vnode *vp;
1981	int flags;
1982	struct ucred *cred;
1983	struct proc *p;
1984	int slpflag, slptimeo;
1985{
1986	register struct buf *bp;
1987	struct buf *nbp, *blist;
1988	int s, error;
1989
1990	if (flags & V_SAVE) {
1991		s = splbio();
1992		vwaitforio(vp, 0, "vinvalbuf", 0);
1993		if (vp->v_dirtyblkhd.lh_first != NULL) {
1994			splx(s);
1995			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1996				return (error);
1997			s = splbio();
1998			if (vp->v_numoutput > 0 ||
1999			    vp->v_dirtyblkhd.lh_first != NULL)
2000				panic("vinvalbuf: dirty bufs");
2001		}
2002		splx(s);
2003	}
2004loop:
2005	s = splbio();
2006	for (;;) {
2007		if ((blist = vp->v_cleanblkhd.lh_first) &&
2008		    (flags & V_SAVEMETA))
2009			while (blist && blist->b_lblkno < 0)
2010				blist = blist->b_vnbufs.le_next;
2011		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
2012		    (flags & V_SAVEMETA))
2013			while (blist && blist->b_lblkno < 0)
2014				blist = blist->b_vnbufs.le_next;
2015		if (!blist)
2016			break;
2017
2018		for (bp = blist; bp; bp = nbp) {
2019			nbp = bp->b_vnbufs.le_next;
2020			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
2021				continue;
2022			if (bp->b_flags & B_BUSY) {
2023				bp->b_flags |= B_WANTED;
2024				error = tsleep((caddr_t)bp,
2025					slpflag | (PRIBIO + 1), "vinvalbuf",
2026					slptimeo);
2027				if (error) {
2028					splx(s);
2029					return (error);
2030				}
2031				break;
2032			}
2033			bremfree(bp);
2034			bp->b_flags |= B_BUSY;
2035			/*
2036			 * XXX Since there are no node locks for NFS, I believe
2037			 * there is a slight chance that a delayed write will
2038			 * occur while sleeping just above, so check for it.
2039			 */
2040			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
2041				splx(s);
2042				(void) VOP_BWRITE(bp);
2043				goto loop;
2044			}
2045			bp->b_flags |= B_INVAL;
2046			brelse(bp);
2047		}
2048	}
2049	if (!(flags & V_SAVEMETA) &&
2050	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
2051		panic("vinvalbuf: flush failed");
2052	splx(s);
2053	return (0);
2054}
2055
2056void
2057vflushbuf(vp, sync)
2058	register struct vnode *vp;
2059	int sync;
2060{
2061	register struct buf *bp, *nbp;
2062	int s;
2063
2064loop:
2065	s = splbio();
2066	for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
2067		nbp = bp->b_vnbufs.le_next;
2068		if ((bp->b_flags & B_BUSY))
2069			continue;
2070		if ((bp->b_flags & B_DELWRI) == 0)
2071			panic("vflushbuf: not dirty");
2072		bremfree(bp);
2073		bp->b_flags |= B_BUSY;
2074		splx(s);
2075		/*
2076		 * Wait for I/O associated with indirect blocks to complete,
2077		 * since there is no way to quickly wait for them below.
2078		 */
2079		if (bp->b_vp == vp || sync == 0)
2080			(void) bawrite(bp);
2081		else
2082			(void) bwrite(bp);
2083		goto loop;
2084	}
2085	if (sync == 0) {
2086		splx(s);
2087		return;
2088	}
2089	vwaitforio(vp, 0, "vflushbuf", 0);
2090	if (vp->v_dirtyblkhd.lh_first != NULL) {
2091		splx(s);
2092		vprint("vflushbuf: dirty", vp);
2093		goto loop;
2094	}
2095	splx(s);
2096}
2097
2098/*
2099 * Associate a buffer with a vnode.
2100 *
2101 * Manipulates buffer vnode queues. Must be called at splbio().
2102 */
2103void
2104bgetvp(vp, bp)
2105	register struct vnode *vp;
2106	register struct buf *bp;
2107{
2108	splassert(IPL_BIO);
2109
2110
2111	if (bp->b_vp)
2112		panic("bgetvp: not free");
2113	vhold(vp);
2114	bp->b_vp = vp;
2115	if (vp->v_type == VBLK || vp->v_type == VCHR)
2116		bp->b_dev = vp->v_rdev;
2117	else
2118		bp->b_dev = NODEV;
2119	/*
2120	 * Insert onto list for new vnode.
2121	 */
2122	bufinsvn(bp, &vp->v_cleanblkhd);
2123}
2124
2125/*
2126 * Disassociate a buffer from a vnode.
2127 *
2128 * Manipulates vnode buffer queues. Must be called at splbio().
2129 */
2130void
2131brelvp(bp)
2132	register struct buf *bp;
2133{
2134	struct vnode *vp;
2135
2136	splassert(IPL_BIO);
2137
2138	if ((vp = bp->b_vp) == (struct vnode *) 0)
2139		panic("brelvp: NULL");
2140	/*
2141	 * Delete from old vnode list, if on one.
2142	 */
2143	if (bp->b_vnbufs.le_next != NOLIST)
2144		bufremvn(bp);
2145	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2146	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2147		vp->v_bioflag &= ~VBIOONSYNCLIST;
2148		LIST_REMOVE(vp, v_synclist);
2149	}
2150	bp->b_vp = (struct vnode *) 0;
2151
2152	simple_lock(&vp->v_interlock);
2153#ifdef DIAGNOSTIC
2154	if (vp->v_holdcnt == 0)
2155		panic("brelvp: holdcnt");
2156#endif
2157	vp->v_holdcnt--;
2158
2159	/*
2160	 * If it is on the holdlist and the hold count drops to
2161	 * zero, move it to the free list.
2162	 */
2163	if ((vp->v_bioflag & VBIOONFREELIST) &&
2164	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
2165		simple_lock(&vnode_free_list_slock);
2166		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
2167		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2168		simple_unlock(&vnode_free_list_slock);
2169	}
2170	simple_unlock(&vp->v_interlock);
2171}
2172
2173/*
2174 * Replaces the current vnode associated with the buffer, if any
2175 * with a new vnode.
2176 *
2177 * If an output I/O is pending on the buffer, the old vnode is
2178 * I/O count is adjusted.
2179 *
2180 * Ignores vnode buffer queues. Must be called at splbio().
2181 */
2182void
2183buf_replacevnode(bp, newvp)
2184	struct buf *bp;
2185	struct vnode *newvp;
2186{
2187	struct vnode *oldvp = bp->b_vp;
2188
2189	splassert(IPL_BIO);
2190
2191	if (oldvp)
2192		brelvp(bp);
2193
2194	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2195		newvp->v_numoutput++;	/* put it on swapdev */
2196		vwakeup(oldvp);
2197	}
2198
2199	bgetvp(newvp, bp);
2200	bufremvn(bp);
2201}
2202
2203/*
2204 * Used to assign buffers to the appropriate clean or dirty list on
2205 * the vnode and to add newly dirty vnodes to the appropriate
2206 * filesystem syncer list.
2207 *
2208 * Manipulates vnode buffer queues. Must be called at splbio().
2209 */
2210void
2211reassignbuf(bp)
2212	struct buf *bp;
2213{
2214	struct buflists *listheadp;
2215	int delay;
2216	struct vnode *vp = bp->b_vp;
2217
2218	splassert(IPL_BIO);
2219
2220	/*
2221	 * Delete from old vnode list, if on one.
2222	 */
2223	if (bp->b_vnbufs.le_next != NOLIST)
2224		bufremvn(bp);
2225	/*
2226	 * If dirty, put on list of dirty buffers;
2227	 * otherwise insert onto list of clean buffers.
2228	 */
2229	if ((bp->b_flags & B_DELWRI) == 0) {
2230		listheadp = &vp->v_cleanblkhd;
2231		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2232		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2233			vp->v_bioflag &= ~VBIOONSYNCLIST;
2234			LIST_REMOVE(vp, v_synclist);
2235		}
2236	} else {
2237		listheadp = &vp->v_dirtyblkhd;
2238		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2239			switch (vp->v_type) {
2240			case VDIR:
2241				delay = syncdelay / 2;
2242				break;
2243			case VBLK:
2244				if (vp->v_specmountpoint != NULL) {
2245					delay = syncdelay / 3;
2246					break;
2247				}
2248				/* fall through */
2249			default:
2250				delay = syncdelay;
2251			}
2252			vn_syncer_add_to_worklist(vp, delay);
2253		}
2254	}
2255	bufinsvn(bp, listheadp);
2256}
2257
2258int
2259vfs_register(vfs)
2260	struct vfsconf *vfs;
2261{
2262	struct vfsconf *vfsp;
2263	struct vfsconf **vfspp;
2264
2265#ifdef DIAGNOSTIC
2266	/* Paranoia? */
2267	if (vfs->vfc_refcount != 0)
2268		printf("vfs_register called with vfc_refcount > 0\n");
2269#endif
2270
2271	/* Check if filesystem already known */
2272	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2273	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2274		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2275			return (EEXIST);
2276
2277	if (vfs->vfc_typenum > maxvfsconf)
2278		maxvfsconf = vfs->vfc_typenum;
2279
2280	vfs->vfc_next = NULL;
2281
2282	/* Add to the end of the list */
2283	*vfspp = vfs;
2284
2285	/* Call vfs_init() */
2286	if (vfs->vfc_vfsops->vfs_init)
2287		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2288
2289	return 0;
2290}
2291
2292int
2293vfs_unregister(vfs)
2294	struct vfsconf *vfs;
2295{
2296	struct vfsconf *vfsp;
2297	struct vfsconf **vfspp;
2298	int maxtypenum;
2299
2300	/* Find our vfsconf struct */
2301	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2302	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2303		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2304			break;
2305	}
2306
2307	if (!vfsp)			/* Not found */
2308		return (ENOENT);
2309
2310	if (vfsp->vfc_refcount)		/* In use */
2311		return (EBUSY);
2312
2313	/* Remove from list and free */
2314	*vfspp = vfsp->vfc_next;
2315
2316	maxtypenum = 0;
2317
2318	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2319		if (vfsp->vfc_typenum > maxtypenum)
2320			maxtypenum = vfsp->vfc_typenum;
2321
2322	maxvfsconf = maxtypenum;
2323	return 0;
2324}
2325
2326/*
2327 * Check if vnode represents a disk device
2328 */
2329int
2330vn_isdisk(vp, errp)
2331	struct vnode *vp;
2332	int *errp;
2333{
2334	if (vp->v_type != VBLK && vp->v_type != VCHR)
2335		return (0);
2336
2337	return (1);
2338}
2339