vfs_subr.c revision 1.92
1/*	$OpenBSD: vfs_subr.c,v 1.92 2003/05/13 02:30:01 tedu Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
42 */
43
44/*
45 * External virtual filesystem routines
46 */
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/proc.h>
51#include <sys/mount.h>
52#include <sys/time.h>
53#include <sys/fcntl.h>
54#include <sys/kernel.h>
55#include <sys/vnode.h>
56#include <sys/stat.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/domain.h>
63#include <sys/mbuf.h>
64#include <sys/syscallargs.h>
65#include <sys/pool.h>
66
67#include <uvm/uvm_extern.h>
68#include <sys/sysctl.h>
69
70#include <miscfs/specfs/specdev.h>
71
72enum vtype iftovt_tab[16] = {
73	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
74	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
75};
76int	vttoif_tab[9] = {
77	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
78	S_IFSOCK, S_IFIFO, S_IFMT,
79};
80
81int doforce = 1;		/* 1 => permit forcible unmounting */
82int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
83int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
84
85/*
86 * Insq/Remq for the vnode usage lists.
87 */
88#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
89#define	bufremvn(bp) {							\
90	LIST_REMOVE(bp, b_vnbufs);					\
91	(bp)->b_vnbufs.le_next = NOLIST;				\
92}
93
94struct freelst vnode_hold_list;   /* list of vnodes referencing buffers */
95struct freelst vnode_free_list;   /* vnode free list */
96
97struct mntlist mountlist;			/* mounted filesystem list */
98struct simplelock mountlist_slock;
99static struct simplelock mntid_slock;
100struct simplelock mntvnode_slock;
101struct simplelock vnode_free_list_slock;
102struct simplelock spechash_slock;
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *);
112void vfs_free_addrlist(struct netexport *);
113static __inline__ void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116
117#ifdef DEBUG
118void printlockedvnodes(void);
119#endif
120
121#define VN_KNOTE(vp, b) \
122	KNOTE((struct klist *)&vp->v_selectinfo.vsi_selinfo.si_note, (b))
123
124struct pool vnode_pool;
125
126/*
127 * Initialize the vnode management data structures.
128 */
129void
130vntblinit()
131{
132
133	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
134	    &pool_allocator_nointr);
135	simple_lock_init(&mntvnode_slock);
136	simple_lock_init(&mntid_slock);
137	simple_lock_init(&spechash_slock);
138	TAILQ_INIT(&vnode_hold_list);
139	TAILQ_INIT(&vnode_free_list);
140	simple_lock_init(&vnode_free_list_slock);
141	CIRCLEQ_INIT(&mountlist);
142	simple_lock_init(&mountlist_slock);
143	/*
144	 * Initialize the filesystem syncer.
145	 */
146	vn_initialize_syncerd();
147}
148
149/*
150 * Mark a mount point as busy. Used to synchronize access and to delay
151 * unmounting. Interlock is not released on failure.
152 *
153 * historical behavior:
154 *  - LK_NOWAIT means that we should just ignore the mount point if it's
155 *     being unmounted.
156 *  - no flags means that we should sleep on the mountpoint and then
157 *     fail.
158 */
159
160int
161vfs_busy(struct mount *mp, int flags, struct simplelock *interlkp,
162    struct proc *p)
163{
164	int lkflags;
165
166	switch (flags) {
167	case LK_NOWAIT:
168		lkflags = LK_SHARED|LK_NOWAIT;
169		break;
170	case 0:
171		lkflags = LK_SHARED;
172		break;
173	default:
174		lkflags = flags;
175	}
176
177	/*
178	 * Always sleepfail. We will only sleep for an exclusive lock
179	 * and the exclusive lock will only be acquired when unmounting.
180	 */
181	lkflags |= LK_SLEEPFAIL;
182
183	if (interlkp)
184		lkflags |= LK_INTERLOCK;
185	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
186		return (ENOENT);
187	return (0);
188}
189
190
191/*
192 * Free a busy file system
193 */
194void
195vfs_unbusy(struct mount *mp, struct proc *p)
196{
197	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
198}
199
200int
201vfs_isbusy(struct mount *mp)
202{
203	return (lockstatus(&mp->mnt_lock));
204}
205
206/*
207 * Lookup a filesystem type, and if found allocate and initialize
208 * a mount structure for it.
209 *
210 * Devname is usually updated by mount(8) after booting.
211 */
212
213int
214vfs_rootmountalloc(fstypename, devname, mpp)
215	char *fstypename;
216	char *devname;
217	struct mount **mpp;
218{
219	struct proc *p = curproc;	/* XXX */
220	struct vfsconf *vfsp;
221	struct mount *mp;
222
223	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
224		if (!strcmp(vfsp->vfc_name, fstypename))
225			break;
226	if (vfsp == NULL)
227		return (ENODEV);
228	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
229	bzero((char *)mp, (u_long)sizeof(struct mount));
230	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
231	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
232	LIST_INIT(&mp->mnt_vnodelist);
233	mp->mnt_vfc = vfsp;
234	mp->mnt_op = vfsp->vfc_vfsops;
235	mp->mnt_flag = MNT_RDONLY;
236	mp->mnt_vnodecovered = NULLVP;
237	vfsp->vfc_refcount++;
238	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
239	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
240	mp->mnt_stat.f_mntonname[0] = '/';
241	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
242	*mpp = mp;
243 	return (0);
244 }
245
246/*
247 * Find an appropriate filesystem to use for the root. If a filesystem
248 * has not been preselected, walk through the list of known filesystems
249 * trying those that have mountroot routines, and try them until one
250 * works or we have tried them all.
251  */
252int
253vfs_mountroot()
254{
255	struct vfsconf *vfsp;
256	int error;
257
258	if (mountroot != NULL)
259		return ((*mountroot)());
260	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
261		if (vfsp->vfc_mountroot == NULL)
262			continue;
263		if ((error = (*vfsp->vfc_mountroot)()) == 0)
264			return (0);
265		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
266 	}
267	return (ENODEV);
268}
269
270/*
271 * Lookup a mount point by filesystem identifier.
272 */
273struct mount *
274vfs_getvfs(fsid)
275	fsid_t *fsid;
276{
277	register struct mount *mp;
278
279	simple_lock(&mountlist_slock);
280	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
281		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
282		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
283			simple_unlock(&mountlist_slock);
284			return (mp);
285		}
286	}
287	simple_unlock(&mountlist_slock);
288	return ((struct mount *)0);
289}
290
291
292/*
293 * Get a new unique fsid
294 */
295void
296vfs_getnewfsid(mp)
297	struct mount *mp;
298{
299	static u_short xxxfs_mntid;
300
301	fsid_t tfsid;
302	int mtype;
303
304	simple_lock(&mntid_slock);
305	mtype = mp->mnt_vfc->vfc_typenum;
306	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
307	mp->mnt_stat.f_fsid.val[1] = mtype;
308	if (xxxfs_mntid == 0)
309		++xxxfs_mntid;
310	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
311	tfsid.val[1] = mtype;
312	if (!CIRCLEQ_EMPTY(&mountlist)) {
313		while (vfs_getvfs(&tfsid)) {
314			tfsid.val[0]++;
315			xxxfs_mntid++;
316		}
317	}
318	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
319	simple_unlock(&mntid_slock);
320}
321
322/*
323 * Make a 'unique' number from a mount type name.
324 * Note that this is no longer used for ffs which
325 * now has an on-disk filesystem id.
326 */
327long
328makefstype(type)
329	char *type;
330{
331	long rv;
332
333	for (rv = 0; *type; type++) {
334		rv <<= 2;
335		rv ^= *type;
336	}
337	return rv;
338}
339
340/*
341 * Set vnode attributes to VNOVAL
342 */
343void
344vattr_null(vap)
345	register struct vattr *vap;
346{
347
348	vap->va_type = VNON;
349	/* XXX These next two used to be one line, but for a GCC bug. */
350	vap->va_size = VNOVAL;
351	vap->va_bytes = VNOVAL;
352	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
353		vap->va_fsid = vap->va_fileid =
354		vap->va_blocksize = vap->va_rdev =
355		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
356		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
357		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
358		vap->va_flags = vap->va_gen = VNOVAL;
359	vap->va_vaflags = 0;
360}
361
362/*
363 * Routines having to do with the management of the vnode table.
364 */
365extern int (**dead_vnodeop_p)(void *);
366long numvnodes;
367
368/*
369 * Return the next vnode from the free list.
370 */
371int
372getnewvnode(tag, mp, vops, vpp)
373	enum vtagtype tag;
374	struct mount *mp;
375	int (**vops)(void *);
376	struct vnode **vpp;
377{
378	struct proc *p = curproc;			/* XXX */
379	struct freelst *listhd;
380	static int toggle;
381	struct vnode *vp;
382	int s;
383
384	/*
385	 * We must choose whether to allocate a new vnode or recycle an
386	 * existing one. The criterion for allocating a new one is that
387	 * the total number of vnodes is less than the number desired or
388	 * there are no vnodes on either free list. Generally we only
389	 * want to recycle vnodes that have no buffers associated with
390	 * them, so we look first on the vnode_free_list. If it is empty,
391	 * we next consider vnodes with referencing buffers on the
392	 * vnode_hold_list. The toggle ensures that half the time we
393	 * will use a buffer from the vnode_hold_list, and half the time
394	 * we will allocate a new one unless the list has grown to twice
395	 * the desired size. We are reticent to recycle vnodes from the
396	 * vnode_hold_list because we will lose the identity of all its
397	 * referencing buffers.
398	 */
399	toggle ^= 1;
400	if (numvnodes > 2 * desiredvnodes)
401		toggle = 0;
402
403	simple_lock(&vnode_free_list_slock);
404	s = splbio();
405	if ((numvnodes < desiredvnodes) ||
406	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
407	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
408		splx(s);
409		simple_unlock(&vnode_free_list_slock);
410		vp = pool_get(&vnode_pool, PR_WAITOK);
411		bzero((char *)vp, sizeof *vp);
412		simple_lock_init(&vp->v_interlock);
413		numvnodes++;
414	} else {
415		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
416		    vp = TAILQ_NEXT(vp, v_freelist)) {
417			if (simple_lock_try(&vp->v_interlock)) {
418				if (VOP_ISLOCKED(vp) == 0)
419					break;
420				else
421					simple_unlock(&vp->v_interlock);
422			}
423		}
424		/*
425		 * Unless this is a bad time of the month, at most
426		 * the first NCPUS items on the free list are
427		 * locked, so this is close enough to being empty.
428		 */
429		if (vp == NULL) {
430			splx(s);
431			simple_unlock(&vnode_free_list_slock);
432			tablefull("vnode");
433			*vpp = 0;
434			return (ENFILE);
435		}
436		if (vp->v_usecount) {
437			vprint("free vnode", vp);
438			panic("free vnode isn't");
439		}
440
441		TAILQ_REMOVE(listhd, vp, v_freelist);
442		vp->v_bioflag &= ~VBIOONFREELIST;
443		splx(s);
444
445		simple_unlock(&vnode_free_list_slock);
446		if (vp->v_type != VBAD)
447			vgonel(vp, p);
448		else
449			simple_unlock(&vp->v_interlock);
450#ifdef DIAGNOSTIC
451		if (vp->v_data) {
452			vprint("cleaned vnode", vp);
453			panic("cleaned vnode isn't");
454		}
455		s = splbio();
456		if (vp->v_numoutput)
457			panic("Clean vnode has pending I/O's");
458		splx(s);
459#endif
460		vp->v_flag = 0;
461		vp->v_socket = 0;
462	}
463	vp->v_type = VNON;
464	cache_purge(vp);
465	vp->v_vnlock = NULL;
466	lockinit(&vp->v_lock, PVFS, "v_lock", 0, 0);
467	vp->v_tag = tag;
468	vp->v_op = vops;
469	insmntque(vp, mp);
470	*vpp = vp;
471	vp->v_usecount = 1;
472	vp->v_data = 0;
473	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
474	return (0);
475}
476
477/*
478 * Move a vnode from one mount queue to another.
479 */
480void
481insmntque(vp, mp)
482	register struct vnode *vp;
483	register struct mount *mp;
484{
485	simple_lock(&mntvnode_slock);
486	/*
487	 * Delete from old mount point vnode list, if on one.
488	 */
489
490	if (vp->v_mount != NULL)
491		LIST_REMOVE(vp, v_mntvnodes);
492	/*
493	 * Insert into list of vnodes for the new mount point, if available.
494	 */
495	if ((vp->v_mount = mp) != NULL)
496		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
497	simple_unlock(&mntvnode_slock);
498}
499
500
501/*
502 * Create a vnode for a block device.
503 * Used for root filesystem, argdev, and swap areas.
504 * Also used for memory file system special devices.
505 */
506int
507bdevvp(dev, vpp)
508	dev_t dev;
509	struct vnode **vpp;
510{
511
512	return (getdevvp(dev, vpp, VBLK));
513}
514
515/*
516 * Create a vnode for a character device.
517 * Used for kernfs and some console handling.
518 */
519int
520cdevvp(dev, vpp)
521	dev_t dev;
522	struct vnode **vpp;
523{
524
525	return (getdevvp(dev, vpp, VCHR));
526}
527
528/*
529 * Create a vnode for a device.
530 * Used by bdevvp (block device) for root file system etc.,
531 * and by cdevvp (character device) for console and kernfs.
532 */
533int
534getdevvp(dev, vpp, type)
535	dev_t dev;
536	struct vnode **vpp;
537	enum vtype type;
538{
539	register struct vnode *vp;
540	struct vnode *nvp;
541	int error;
542
543	if (dev == NODEV) {
544		*vpp = NULLVP;
545		return (0);
546	}
547	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
548	if (error) {
549		*vpp = NULLVP;
550		return (error);
551	}
552	vp = nvp;
553	vp->v_type = type;
554	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
555		vput(vp);
556		vp = nvp;
557	}
558	*vpp = vp;
559	return (0);
560}
561
562/*
563 * Check to see if the new vnode represents a special device
564 * for which we already have a vnode (either because of
565 * bdevvp() or because of a different vnode representing
566 * the same block device). If such an alias exists, deallocate
567 * the existing contents and return the aliased vnode. The
568 * caller is responsible for filling it with its new contents.
569 */
570struct vnode *
571checkalias(nvp, nvp_rdev, mp)
572	register struct vnode *nvp;
573	dev_t nvp_rdev;
574	struct mount *mp;
575{
576	struct proc *p = curproc;
577	register struct vnode *vp;
578	struct vnode **vpp;
579
580	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
581		return (NULLVP);
582
583	vpp = &speclisth[SPECHASH(nvp_rdev)];
584loop:
585	simple_lock(&spechash_slock);
586	for (vp = *vpp; vp; vp = vp->v_specnext) {
587		simple_lock(&vp->v_interlock);
588		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
589			simple_unlock(&vp->v_interlock);
590			continue;
591		}
592		/*
593		 * Alias, but not in use, so flush it out.
594		 */
595		if (vp->v_usecount == 0) {
596			simple_unlock(&spechash_slock);
597			vgonel(vp, p);
598			goto loop;
599		}
600		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
601			simple_unlock(&spechash_slock);
602			goto loop;
603		}
604		break;
605	}
606
607	/*
608	 * Common case is actually in the if statement
609	 */
610	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
611		MALLOC(nvp->v_specinfo, struct specinfo *,
612			sizeof(struct specinfo), M_VNODE, M_WAITOK);
613		nvp->v_rdev = nvp_rdev;
614		nvp->v_hashchain = vpp;
615		nvp->v_specnext = *vpp;
616		nvp->v_specmountpoint = NULL;
617		nvp->v_speclockf = NULL;
618		simple_unlock(&spechash_slock);
619		*vpp = nvp;
620		if (vp != NULLVP) {
621			nvp->v_flag |= VALIASED;
622			vp->v_flag |= VALIASED;
623			vput(vp);
624		}
625		return (NULLVP);
626	}
627
628	/*
629	 * This code is the uncommon case. It is called in case
630	 * we found an alias that was VT_NON && vtype of VBLK
631	 * This means we found a block device that was created
632	 * using bdevvp.
633	 * An example of such a vnode is the root partition device vnode
634	 * created in ffs_mountroot.
635	 *
636	 * The vnodes created by bdevvp should not be aliased (why?).
637	 */
638
639	simple_unlock(&spechash_slock);
640	VOP_UNLOCK(vp, 0, p);
641	simple_lock(&vp->v_interlock);
642	vclean(vp, 0, p);
643	vp->v_vnlock = NULL;
644	lockinit(&vp->v_lock, PVFS, "v_lock", 0, 0);
645	vp->v_op = nvp->v_op;
646	vp->v_tag = nvp->v_tag;
647	nvp->v_type = VNON;
648	insmntque(vp, mp);
649	return (vp);
650}
651
652/*
653 * Grab a particular vnode from the free list, increment its
654 * reference count and lock it. The vnode lock bit is set the
655 * vnode is being eliminated in vgone. The process is awakened
656 * when the transition is completed, and an error returned to
657 * indicate that the vnode is no longer usable (possibly having
658 * been changed to a new file system type).
659 */
660int
661vget(vp, flags, p)
662	struct vnode *vp;
663	int flags;
664	struct proc *p;
665{
666	int error;
667	int s;
668	/*
669	 * If the vnode is in the process of being cleaned out for
670	 * another use, we wait for the cleaning to finish and then
671	 * return failure. Cleaning is determined by checking that
672	 * the VXLOCK flag is set.
673	 */
674	if ((flags & LK_INTERLOCK) == 0) {
675		simple_lock(&vp->v_interlock);
676		flags |= LK_INTERLOCK;
677	}
678	if (vp->v_flag & VXLOCK) {
679 		vp->v_flag |= VXWANT;
680		simple_unlock(&vp->v_interlock);
681		tsleep((caddr_t)vp, PINOD, "vget", 0);
682		return (ENOENT);
683 	}
684	if (vp->v_usecount == 0 &&
685	    (vp->v_bioflag & VBIOONFREELIST)) {
686		s = splbio();
687		simple_lock(&vnode_free_list_slock);
688		if (vp->v_holdcnt > 0)
689			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
690		else
691			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
692		simple_unlock(&vnode_free_list_slock);
693		vp->v_bioflag &= ~VBIOONFREELIST;
694		splx(s);
695	}
696 	vp->v_usecount++;
697	if (flags & LK_TYPE_MASK) {
698		if ((error = vn_lock(vp, flags, p)) != 0) {
699			vp->v_usecount--;
700			if (vp->v_usecount == 0)
701				vputonfreelist(vp);
702
703			simple_unlock(&vp->v_interlock);
704		}
705		return (error);
706	}
707	simple_unlock(&vp->v_interlock);
708	return (0);
709}
710
711
712#ifdef DIAGNOSTIC
713/*
714 * Vnode reference.
715 */
716void
717vref(vp)
718	struct vnode *vp;
719{
720	simple_lock(&vp->v_interlock);
721	if (vp->v_usecount == 0)
722		panic("vref used where vget required");
723	vp->v_usecount++;
724	simple_unlock(&vp->v_interlock);
725}
726#endif /* DIAGNOSTIC */
727
728static __inline__ void
729vputonfreelist(vp)
730	struct vnode *vp;
731{
732	int s;
733	struct freelst *lst;
734
735	s = splbio();
736#ifdef DIAGNOSTIC
737	if (vp->v_usecount != 0)
738		panic("Use count is not zero!");
739
740	if (vp->v_bioflag & VBIOONFREELIST) {
741		vprint("vnode already on free list: ", vp);
742		panic("vnode already on free list");
743	}
744#endif
745
746	vp->v_bioflag |= VBIOONFREELIST;
747
748	if (vp->v_holdcnt > 0)
749		lst = &vnode_hold_list;
750	else
751		lst = &vnode_free_list;
752
753	if (vp->v_type == VBAD)
754		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
755	else
756		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
757
758	splx(s);
759}
760
761/*
762 * vput(), just unlock and vrele()
763 */
764void
765vput(vp)
766	register struct vnode *vp;
767{
768	struct proc *p = curproc;	/* XXX */
769
770#ifdef DIAGNOSTIC
771	if (vp == NULL)
772		panic("vput: null vp");
773#endif
774	simple_lock(&vp->v_interlock);
775
776#ifdef DIAGNOSTIC
777	if (vp->v_usecount == 0) {
778		vprint("vput: bad ref count", vp);
779		panic("vput: ref cnt");
780	}
781#endif
782	vp->v_usecount--;
783	if (vp->v_usecount > 0) {
784		simple_unlock(&vp->v_interlock);
785		VOP_UNLOCK(vp, 0, p);
786		return;
787	}
788
789#ifdef DIAGNOSTIC
790	if (vp->v_writecount != 0) {
791		vprint("vput: bad writecount", vp);
792		panic("vput: v_writecount != 0");
793	}
794#endif
795	vputonfreelist(vp);
796
797	simple_unlock(&vp->v_interlock);
798
799	VOP_INACTIVE(vp, p);
800}
801
802/*
803 * Vnode release - use for active VNODES.
804 * If count drops to zero, call inactive routine and return to freelist.
805 */
806void
807vrele(vp)
808	register struct vnode *vp;
809{
810	struct proc *p = curproc;	/* XXX */
811
812#ifdef DIAGNOSTIC
813	if (vp == NULL)
814		panic("vrele: null vp");
815#endif
816	simple_lock(&vp->v_interlock);
817#ifdef DIAGNOSTIC
818	if (vp->v_usecount == 0) {
819		vprint("vrele: bad ref count", vp);
820		panic("vrele: ref cnt");
821	}
822#endif
823	vp->v_usecount--;
824	if (vp->v_usecount > 0) {
825		simple_unlock(&vp->v_interlock);
826		return;
827	}
828
829#ifdef DIAGNOSTIC
830	if (vp->v_writecount != 0) {
831		vprint("vrele: bad writecount", vp);
832		panic("vrele: v_writecount != 0");
833	}
834#endif
835	vputonfreelist(vp);
836
837	if (vn_lock(vp, LK_EXCLUSIVE|LK_INTERLOCK, p) == 0)
838		VOP_INACTIVE(vp, p);
839}
840
841void vhold(struct vnode *vp);
842
843/*
844 * Page or buffer structure gets a reference.
845 */
846void
847vhold(vp)
848	register struct vnode *vp;
849{
850
851	/*
852	 * If it is on the freelist and the hold count is currently
853	 * zero, move it to the hold list.
854	 */
855  	simple_lock(&vp->v_interlock);
856	if ((vp->v_bioflag & VBIOONFREELIST) &&
857	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
858		simple_lock(&vnode_free_list_slock);
859		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
860		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
861		simple_unlock(&vnode_free_list_slock);
862	}
863	vp->v_holdcnt++;
864	simple_unlock(&vp->v_interlock);
865}
866
867/*
868 * Remove any vnodes in the vnode table belonging to mount point mp.
869 *
870 * If MNT_NOFORCE is specified, there should not be any active ones,
871 * return error if any are found (nb: this is a user error, not a
872 * system error). If MNT_FORCE is specified, detach any active vnodes
873 * that are found.
874 */
875#ifdef DEBUG
876int busyprt = 0;	/* print out busy vnodes */
877struct ctldebug debug1 = { "busyprt", &busyprt };
878#endif
879
880int
881vfs_mount_foreach_vnode(struct mount *mp,
882    int (*func)(struct vnode *, void *), void *arg) {
883	struct vnode *vp, *nvp;
884	int error = 0;
885
886	simple_lock(&mntvnode_slock);
887loop:
888	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
889		if (vp->v_mount != mp)
890			goto loop;
891		nvp = vp->v_mntvnodes.le_next;
892		simple_lock(&vp->v_interlock);
893		simple_unlock(&mntvnode_slock);
894
895		error = func(vp, arg);
896
897		simple_lock(&mntvnode_slock);
898
899		if (error != 0)
900			break;
901	}
902	simple_unlock(&mntvnode_slock);
903
904	return (error);
905}
906
907
908struct vflush_args {
909	struct vnode *skipvp;
910	int busy;
911	int flags;
912};
913
914int
915vflush_vnode(struct vnode *vp, void *arg) {
916	struct vflush_args *va = arg;
917	struct proc *p = curproc;
918
919	if (vp == va->skipvp) {
920		simple_unlock(&vp->v_interlock);
921		return (0);
922	}
923
924	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
925		simple_unlock(&vp->v_interlock);
926		return (0);
927	}
928
929	/*
930	 * If WRITECLOSE is set, only flush out regular file
931	 * vnodes open for writing.
932	 */
933	if ((va->flags & WRITECLOSE) &&
934	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
935		simple_unlock(&vp->v_interlock);
936		return (0);
937	}
938
939	/*
940	 * With v_usecount == 0, all we need to do is clear
941	 * out the vnode data structures and we are done.
942	 */
943	if (vp->v_usecount == 0) {
944		vgonel(vp, p);
945		return (0);
946	}
947
948	/*
949	 * If FORCECLOSE is set, forcibly close the vnode.
950	 * For block or character devices, revert to an
951	 * anonymous device. For all other files, just kill them.
952	 */
953	if (va->flags & FORCECLOSE) {
954		if (vp->v_type != VBLK && vp->v_type != VCHR) {
955			vgonel(vp, p);
956		} else {
957			vclean(vp, 0, p);
958			vp->v_op = spec_vnodeop_p;
959			insmntque(vp, (struct mount *)0);
960		}
961		return (0);
962	}
963
964#ifdef DEBUG
965	if (busyprt)
966		vprint("vflush: busy vnode", vp);
967#endif
968	simple_unlock(&vp->v_interlock);
969	va->busy++;
970	return (0);
971}
972
973int
974vflush(mp, skipvp, flags)
975	struct mount *mp;
976	struct vnode *skipvp;
977	int flags;
978{
979	struct vflush_args va;
980	va.skipvp = skipvp;
981	va.busy = 0;
982	va.flags = flags;
983
984	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
985
986	if (va.busy)
987		return (EBUSY);
988	return (0);
989}
990
991/*
992 * Disassociate the underlying file system from a vnode.
993 * The vnode interlock is held on entry.
994 */
995void
996vclean(vp, flags, p)
997	register struct vnode *vp;
998	int flags;
999	struct proc *p;
1000{
1001	int active;
1002
1003	/*
1004	 * Check to see if the vnode is in use.
1005	 * If so we have to reference it before we clean it out
1006	 * so that its count cannot fall to zero and generate a
1007	 * race against ourselves to recycle it.
1008	 */
1009	if ((active = vp->v_usecount) != 0)
1010		vp->v_usecount++;
1011
1012	/*
1013	 * Prevent the vnode from being recycled or
1014	 * brought into use while we clean it out.
1015	 */
1016	if (vp->v_flag & VXLOCK)
1017		panic("vclean: deadlock");
1018	vp->v_flag |= VXLOCK;
1019	/*
1020	 * Even if the count is zero, the VOP_INACTIVE routine may still
1021	 * have the object locked while it cleans it out. The VOP_LOCK
1022	 * ensures that the VOP_INACTIVE routine is done with its work.
1023	 * For active vnodes, it ensures that no other activity can
1024	 * occur while the underlying object is being cleaned out.
1025	 */
1026	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1027
1028	/*
1029	 * clean out any VM data associated with the vnode.
1030	 */
1031	uvm_vnp_terminate(vp);
1032	/*
1033	 * Clean out any buffers associated with the vnode.
1034	 */
1035	if (flags & DOCLOSE)
1036		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1037	/*
1038	 * If purging an active vnode, it must be closed and
1039	 * deactivated before being reclaimed. Note that the
1040	 * VOP_INACTIVE will unlock the vnode
1041	 */
1042	if (active) {
1043		if (flags & DOCLOSE)
1044			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1045		VOP_INACTIVE(vp, p);
1046	} else {
1047		/*
1048		 * Any other processes trying to obtain this lock must first
1049		 * wait for VXLOCK to clear, then call the new lock operation.
1050		 */
1051		VOP_UNLOCK(vp, 0, p);
1052	}
1053
1054	/*
1055	 * Reclaim the vnode.
1056	 */
1057	if (VOP_RECLAIM(vp, p))
1058		panic("vclean: cannot reclaim");
1059	if (active) {
1060		simple_lock(&vp->v_interlock);
1061
1062		vp->v_usecount--;
1063		if (vp->v_usecount == 0) {
1064			if (vp->v_holdcnt > 0)
1065				panic("vclean: not clean");
1066			vputonfreelist(vp);
1067		}
1068
1069		simple_unlock(&vp->v_interlock);
1070	}
1071	cache_purge(vp);
1072
1073	/*
1074	 * Done with purge, notify sleepers of the grim news.
1075	 */
1076	vp->v_op = dead_vnodeop_p;
1077	simple_lock(&vp->v_selectinfo.vsi_lock);
1078	VN_KNOTE(vp, NOTE_REVOKE);
1079	simple_unlock(&vp->v_selectinfo.vsi_lock);
1080	vp->v_tag = VT_NON;
1081	vp->v_flag &= ~VXLOCK;
1082#ifdef DIAGNOSTIC
1083	vp->v_flag &= ~VLOCKSWORK;
1084#endif
1085	if (vp->v_flag & VXWANT) {
1086		vp->v_flag &= ~VXWANT;
1087		wakeup((caddr_t)vp);
1088	}
1089}
1090
1091
1092
1093/*
1094 * Recycle an unused vnode to the front of the free list.
1095 * Release the passed interlock if the vnode will be recycled.
1096 */
1097int
1098vrecycle(vp, inter_lkp, p)
1099	struct vnode *vp;
1100	struct simplelock *inter_lkp;
1101	struct proc *p;
1102{
1103
1104	simple_lock(&vp->v_interlock);
1105	if (vp->v_usecount == 0) {
1106		if (inter_lkp)
1107			simple_unlock(inter_lkp);
1108		vgonel(vp, p);
1109		return (1);
1110	}
1111	simple_unlock(&vp->v_interlock);
1112	return (0);
1113}
1114
1115
1116/*
1117 * Eliminate all activity associated with a vnode
1118 * in preparation for reuse.
1119 */
1120void
1121vgone(vp)
1122	register struct vnode *vp;
1123{
1124	struct proc *p = curproc;
1125
1126	simple_lock (&vp->v_interlock);
1127	vgonel(vp, p);
1128}
1129
1130/*
1131 * vgone, with the vp interlock held.
1132 */
1133void
1134vgonel(vp, p)
1135	struct vnode *vp;
1136	struct proc *p;
1137{
1138	register struct vnode *vq;
1139	struct vnode *vx;
1140
1141	/*
1142	 * If a vgone (or vclean) is already in progress,
1143	 * wait until it is done and return.
1144	 */
1145	if (vp->v_flag & VXLOCK) {
1146		vp->v_flag |= VXWANT;
1147		simple_unlock(&vp->v_interlock);
1148		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1149		return;
1150	}
1151	/*
1152	 * Clean out the filesystem specific data.
1153	 */
1154	vclean(vp, DOCLOSE, p);
1155	/*
1156	 * Delete from old mount point vnode list, if on one.
1157	 */
1158	if (vp->v_mount != NULL)
1159		insmntque(vp, (struct mount *)0);
1160	/*
1161	 * If special device, remove it from special device alias list
1162	 * if it is on one.
1163	 */
1164	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1165		simple_lock(&spechash_slock);
1166		if (*vp->v_hashchain == vp) {
1167			*vp->v_hashchain = vp->v_specnext;
1168		} else {
1169			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1170				if (vq->v_specnext != vp)
1171					continue;
1172				vq->v_specnext = vp->v_specnext;
1173				break;
1174			}
1175			if (vq == NULL)
1176				panic("missing bdev");
1177		}
1178		if (vp->v_flag & VALIASED) {
1179			vx = NULL;
1180			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1181				if (vq->v_rdev != vp->v_rdev ||
1182				    vq->v_type != vp->v_type)
1183					continue;
1184				if (vx)
1185					break;
1186				vx = vq;
1187			}
1188			if (vx == NULL)
1189				panic("missing alias");
1190			if (vq == NULL)
1191				vx->v_flag &= ~VALIASED;
1192			vp->v_flag &= ~VALIASED;
1193		}
1194		simple_unlock(&spechash_slock);
1195		FREE(vp->v_specinfo, M_VNODE);
1196		vp->v_specinfo = NULL;
1197	}
1198	/*
1199	 * If it is on the freelist and not already at the head,
1200	 * move it to the head of the list.
1201	 */
1202	vp->v_type = VBAD;
1203
1204	/*
1205	 * Move onto the free list, unless we were called from
1206	 * getnewvnode and we're not on any free list
1207	 */
1208	if (vp->v_usecount == 0 &&
1209	    (vp->v_bioflag & VBIOONFREELIST)) {
1210		int s;
1211
1212		simple_lock(&vnode_free_list_slock);
1213		s = splbio();
1214
1215		if (vp->v_holdcnt > 0)
1216			panic("vgonel: not clean");
1217
1218		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1219			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1220			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1221		}
1222		splx(s);
1223		simple_unlock(&vnode_free_list_slock);
1224	}
1225}
1226
1227/*
1228 * Lookup a vnode by device number.
1229 */
1230int
1231vfinddev(dev, type, vpp)
1232	dev_t dev;
1233	enum vtype type;
1234	struct vnode **vpp;
1235{
1236	register struct vnode *vp;
1237	int rc =0;
1238
1239	simple_lock(&spechash_slock);
1240	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1241		if (dev != vp->v_rdev || type != vp->v_type)
1242			continue;
1243		*vpp = vp;
1244		rc = 1;
1245		break;
1246	}
1247	simple_unlock(&spechash_slock);
1248	return (rc);
1249}
1250
1251/*
1252 * Revoke all the vnodes corresponding to the specified minor number
1253 * range (endpoints inclusive) of the specified major.
1254 */
1255void
1256vdevgone(maj, minl, minh, type)
1257	int maj, minl, minh;
1258	enum vtype type;
1259{
1260	struct vnode *vp;
1261	int mn;
1262
1263	for (mn = minl; mn <= minh; mn++)
1264		if (vfinddev(makedev(maj, mn), type, &vp))
1265			VOP_REVOKE(vp, REVOKEALL);
1266}
1267
1268/*
1269 * Calculate the total number of references to a special device.
1270 */
1271int
1272vcount(vp)
1273	struct vnode *vp;
1274{
1275	struct vnode *vq, *vnext;
1276	int count;
1277
1278loop:
1279	if ((vp->v_flag & VALIASED) == 0)
1280		return (vp->v_usecount);
1281	simple_lock(&spechash_slock);
1282	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1283		vnext = vq->v_specnext;
1284		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1285			continue;
1286		/*
1287		 * Alias, but not in use, so flush it out.
1288		 */
1289		if (vq->v_usecount == 0 && vq != vp) {
1290			simple_unlock(&spechash_slock);
1291			vgone(vq);
1292			goto loop;
1293		}
1294		count += vq->v_usecount;
1295	}
1296	simple_unlock(&spechash_slock);
1297	return (count);
1298}
1299
1300/*
1301 * Print out a description of a vnode.
1302 */
1303static char *typename[] =
1304   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1305
1306void
1307vprint(label, vp)
1308	char *label;
1309	register struct vnode *vp;
1310{
1311	char buf[64];
1312
1313	if (label != NULL)
1314		printf("%s: ", label);
1315	printf("type %s, usecount %u, writecount %u, holdcount %u,",
1316		typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1317		vp->v_holdcnt);
1318	buf[0] = '\0';
1319	if (vp->v_flag & VROOT)
1320		strlcat(buf, "|VROOT", sizeof buf);
1321	if (vp->v_flag & VTEXT)
1322		strlcat(buf, "|VTEXT", sizeof buf);
1323	if (vp->v_flag & VSYSTEM)
1324		strlcat(buf, "|VSYSTEM", sizeof buf);
1325	if (vp->v_flag & VXLOCK)
1326		strlcat(buf, "|VXLOCK", sizeof buf);
1327	if (vp->v_flag & VXWANT)
1328		strlcat(buf, "|VXWANT", sizeof buf);
1329	if (vp->v_bioflag & VBIOWAIT)
1330		strlcat(buf, "| VBIOWAIT", sizeof buf);
1331	if (vp->v_flag & VALIASED)
1332		strlcat(buf, "|VALIASED", sizeof buf);
1333	if (buf[0] != '\0')
1334		printf(" flags (%s)", &buf[1]);
1335	if (vp->v_data == NULL) {
1336		printf("\n");
1337	} else {
1338		printf("\n\t");
1339		VOP_PRINT(vp);
1340	}
1341}
1342
1343#ifdef DEBUG
1344/*
1345 * List all of the locked vnodes in the system.
1346 * Called when debugging the kernel.
1347 */
1348void
1349printlockedvnodes()
1350{
1351	struct proc *p = curproc;
1352	register struct mount *mp, *nmp;
1353	register struct vnode *vp;
1354
1355	printf("Locked vnodes\n");
1356	simple_lock(&mountlist_slock);
1357	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1358	    mp = nmp) {
1359		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1360			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1361			continue;
1362		}
1363		for (vp = mp->mnt_vnodelist.lh_first; vp;
1364		    vp = vp->v_mntvnodes.le_next) {
1365			if (VOP_ISLOCKED(vp))
1366				vprint((char *)0, vp);
1367		}
1368		simple_lock(&mountlist_slock);
1369		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1370		vfs_unbusy(mp, p);
1371 	}
1372	simple_unlock(&mountlist_slock);
1373
1374}
1375#endif
1376
1377/*
1378 * Top level filesystem related information gathering.
1379 */
1380int
1381vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1382	int *name;
1383	u_int namelen;
1384	void *oldp;
1385	size_t *oldlenp;
1386	void *newp;
1387	size_t newlen;
1388	struct proc *p;
1389{
1390	struct vfsconf *vfsp;
1391
1392	/* all sysctl names at this level are at least name and field */
1393	if (namelen < 2)
1394		return (ENOTDIR);		/* overloaded */
1395	if (name[0] != VFS_GENERIC) {
1396		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1397			if (vfsp->vfc_typenum == name[0])
1398				break;
1399		if (vfsp == NULL)
1400			return (EOPNOTSUPP);
1401		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1402		    oldp, oldlenp, newp, newlen, p));
1403	}
1404	switch (name[1]) {
1405	case VFS_MAXTYPENUM:
1406		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1407	case VFS_CONF:
1408		if (namelen < 3)
1409			return (ENOTDIR);	/* overloaded */
1410		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1411			if (vfsp->vfc_typenum == name[2])
1412				break;
1413		if (vfsp == NULL)
1414			return (EOPNOTSUPP);
1415		return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp,
1416		    sizeof(struct vfsconf)));
1417	}
1418	return (EOPNOTSUPP);
1419}
1420
1421
1422int kinfo_vdebug = 1;
1423int kinfo_vgetfailed;
1424#define KINFO_VNODESLOP	10
1425/*
1426 * Dump vnode list (via sysctl).
1427 * Copyout address of vnode followed by vnode.
1428 */
1429/* ARGSUSED */
1430int
1431sysctl_vnode(where, sizep, p)
1432	char *where;
1433	size_t *sizep;
1434	struct proc *p;
1435{
1436	register struct mount *mp, *nmp;
1437	struct vnode *vp, *nvp;
1438	register char *bp = where, *savebp;
1439	char *ewhere;
1440	int error;
1441
1442	if (where == NULL) {
1443		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1444		return (0);
1445	}
1446	ewhere = where + *sizep;
1447
1448	simple_lock(&mountlist_slock);
1449	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1450	    mp = nmp) {
1451		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1452			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1453			continue;
1454		}
1455		savebp = bp;
1456again:
1457		for (vp = mp->mnt_vnodelist.lh_first; vp != NULL;
1458		    vp = nvp) {
1459			/*
1460			 * Check that the vp is still associated with
1461			 * this filesystem.  RACE: could have been
1462			 * recycled onto the same filesystem.
1463			 */
1464			if (vp->v_mount != mp) {
1465				simple_unlock(&mntvnode_slock);
1466				if (kinfo_vdebug)
1467					printf("kinfo: vp changed\n");
1468				bp = savebp;
1469				goto again;
1470			}
1471			nvp = vp->v_mntvnodes.le_next;
1472			if (bp + sizeof(struct e_vnode) > ewhere) {
1473				simple_unlock(&mntvnode_slock);
1474				*sizep = bp - where;
1475				vfs_unbusy(mp, p);
1476				return (ENOMEM);
1477			}
1478			if ((error = copyout((caddr_t)&vp,
1479			    &((struct e_vnode *)bp)->vptr,
1480			    sizeof(struct vnode *))) ||
1481			   (error = copyout((caddr_t)vp,
1482			    &((struct e_vnode *)bp)->vnode,
1483			    sizeof(struct vnode)))) {
1484				vfs_unbusy(mp, p);
1485				return (error);
1486			}
1487			bp += sizeof(struct e_vnode);
1488			simple_lock(&mntvnode_slock);
1489		}
1490
1491		simple_unlock(&mntvnode_slock);
1492		simple_lock(&mountlist_slock);
1493		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1494		vfs_unbusy(mp, p);
1495	}
1496
1497	simple_unlock(&mountlist_slock);
1498
1499	*sizep = bp - where;
1500	return (0);
1501}
1502
1503/*
1504 * Check to see if a filesystem is mounted on a block device.
1505 */
1506int
1507vfs_mountedon(vp)
1508	register struct vnode *vp;
1509{
1510	register struct vnode *vq;
1511	int error = 0;
1512
1513 	if (vp->v_specmountpoint != NULL)
1514		return (EBUSY);
1515	if (vp->v_flag & VALIASED) {
1516		simple_lock(&spechash_slock);
1517		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1518			if (vq->v_rdev != vp->v_rdev ||
1519			    vq->v_type != vp->v_type)
1520				continue;
1521			if (vq->v_specmountpoint != NULL) {
1522				error = EBUSY;
1523				break;
1524			}
1525 		}
1526		simple_unlock(&spechash_slock);
1527	}
1528	return (error);
1529}
1530
1531/*
1532 * Build hash lists of net addresses and hang them off the mount point.
1533 * Called by ufs_mount() to set up the lists of export addresses.
1534 */
1535int
1536vfs_hang_addrlist(mp, nep, argp)
1537	struct mount *mp;
1538	struct netexport *nep;
1539	struct export_args *argp;
1540{
1541	register struct netcred *np;
1542	register struct radix_node_head *rnh;
1543	register int i;
1544	struct radix_node *rn;
1545	struct sockaddr *saddr, *smask = 0;
1546	struct domain *dom;
1547	int error;
1548
1549	if (argp->ex_addrlen == 0) {
1550		if (mp->mnt_flag & MNT_DEFEXPORTED)
1551			return (EPERM);
1552		np = &nep->ne_defexported;
1553		np->netc_exflags = argp->ex_flags;
1554		np->netc_anon = argp->ex_anon;
1555		np->netc_anon.cr_ref = 1;
1556		mp->mnt_flag |= MNT_DEFEXPORTED;
1557		return (0);
1558	}
1559	if (argp->ex_addrlen > MLEN)
1560		return (EINVAL);
1561	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1562	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
1563	bzero((caddr_t)np, i);
1564	saddr = (struct sockaddr *)(np + 1);
1565	error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen);
1566	if (error)
1567		goto out;
1568	if (saddr->sa_len > argp->ex_addrlen)
1569		saddr->sa_len = argp->ex_addrlen;
1570	if (argp->ex_masklen) {
1571		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1572		error = copyin(argp->ex_mask, (caddr_t)smask, argp->ex_masklen);
1573		if (error)
1574			goto out;
1575		if (smask->sa_len > argp->ex_masklen)
1576			smask->sa_len = argp->ex_masklen;
1577	}
1578	i = saddr->sa_family;
1579	if (i < 0 || i > AF_MAX) {
1580		error = EINVAL;
1581		goto out;
1582	}
1583	if ((rnh = nep->ne_rtable[i]) == 0) {
1584		/*
1585		 * Seems silly to initialize every AF when most are not
1586		 * used, do so on demand here
1587		 */
1588		for (dom = domains; dom; dom = dom->dom_next)
1589			if (dom->dom_family == i && dom->dom_rtattach) {
1590				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1591					dom->dom_rtoffset);
1592				break;
1593			}
1594		if ((rnh = nep->ne_rtable[i]) == 0) {
1595			error = ENOBUFS;
1596			goto out;
1597		}
1598	}
1599	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1600		np->netc_rnodes);
1601	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1602		error = EPERM;
1603		goto out;
1604	}
1605	np->netc_exflags = argp->ex_flags;
1606	np->netc_anon = argp->ex_anon;
1607	np->netc_anon.cr_ref = 1;
1608	return (0);
1609out:
1610	free(np, M_NETADDR);
1611	return (error);
1612}
1613
1614/* ARGSUSED */
1615int
1616vfs_free_netcred(rn, w)
1617	struct radix_node *rn;
1618	void *w;
1619{
1620	register struct radix_node_head *rnh = (struct radix_node_head *)w;
1621
1622	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
1623	free((caddr_t)rn, M_NETADDR);
1624	return (0);
1625}
1626
1627/*
1628 * Free the net address hash lists that are hanging off the mount points.
1629 */
1630void
1631vfs_free_addrlist(nep)
1632	struct netexport *nep;
1633{
1634	register int i;
1635	register struct radix_node_head *rnh;
1636
1637	for (i = 0; i <= AF_MAX; i++)
1638		if ((rnh = nep->ne_rtable[i]) != NULL) {
1639			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1640			free((caddr_t)rnh, M_RTABLE);
1641			nep->ne_rtable[i] = 0;
1642		}
1643}
1644
1645int
1646vfs_export(mp, nep, argp)
1647	struct mount *mp;
1648	struct netexport *nep;
1649	struct export_args *argp;
1650{
1651	int error;
1652
1653	if (argp->ex_flags & MNT_DELEXPORT) {
1654		vfs_free_addrlist(nep);
1655		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1656	}
1657	if (argp->ex_flags & MNT_EXPORTED) {
1658		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1659			return (error);
1660		mp->mnt_flag |= MNT_EXPORTED;
1661	}
1662	return (0);
1663}
1664
1665struct netcred *
1666vfs_export_lookup(mp, nep, nam)
1667	register struct mount *mp;
1668	struct netexport *nep;
1669	struct mbuf *nam;
1670{
1671	register struct netcred *np;
1672	register struct radix_node_head *rnh;
1673	struct sockaddr *saddr;
1674
1675	np = NULL;
1676	if (mp->mnt_flag & MNT_EXPORTED) {
1677		/*
1678		 * Lookup in the export list first.
1679		 */
1680		if (nam != NULL) {
1681			saddr = mtod(nam, struct sockaddr *);
1682			rnh = nep->ne_rtable[saddr->sa_family];
1683			if (rnh != NULL) {
1684				np = (struct netcred *)
1685					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1686					    rnh);
1687				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1688					np = NULL;
1689			}
1690		}
1691		/*
1692		 * If no address match, use the default if it exists.
1693		 */
1694		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1695			np = &nep->ne_defexported;
1696	}
1697	return (np);
1698}
1699
1700/*
1701 * Do the usual access checking.
1702 * file_mode, uid and gid are from the vnode in question,
1703 * while acc_mode and cred are from the VOP_ACCESS parameter list
1704 */
1705int
1706vaccess(file_mode, uid, gid, acc_mode, cred)
1707	mode_t file_mode;
1708	uid_t uid;
1709	gid_t gid;
1710	mode_t acc_mode;
1711	struct ucred *cred;
1712{
1713	mode_t mask;
1714
1715	/* User id 0 always gets access. */
1716	if (cred->cr_uid == 0)
1717		return 0;
1718
1719	mask = 0;
1720
1721	/* Otherwise, check the owner. */
1722	if (cred->cr_uid == uid) {
1723		if (acc_mode & VEXEC)
1724			mask |= S_IXUSR;
1725		if (acc_mode & VREAD)
1726			mask |= S_IRUSR;
1727		if (acc_mode & VWRITE)
1728			mask |= S_IWUSR;
1729		return (file_mode & mask) == mask ? 0 : EACCES;
1730	}
1731
1732	/* Otherwise, check the groups. */
1733	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1734		if (acc_mode & VEXEC)
1735			mask |= S_IXGRP;
1736		if (acc_mode & VREAD)
1737			mask |= S_IRGRP;
1738		if (acc_mode & VWRITE)
1739			mask |= S_IWGRP;
1740		return (file_mode & mask) == mask ? 0 : EACCES;
1741	}
1742
1743	/* Otherwise, check everyone else. */
1744	if (acc_mode & VEXEC)
1745		mask |= S_IXOTH;
1746	if (acc_mode & VREAD)
1747		mask |= S_IROTH;
1748	if (acc_mode & VWRITE)
1749		mask |= S_IWOTH;
1750	return (file_mode & mask) == mask ? 0 : EACCES;
1751}
1752
1753/*
1754 * Unmount all file systems.
1755 * We traverse the list in reverse order under the assumption that doing so
1756 * will avoid needing to worry about dependencies.
1757 */
1758void
1759vfs_unmountall(void)
1760{
1761	struct mount *mp, *nmp;
1762	int allerror, error, again = 1;
1763	struct proc *p = curproc;
1764
1765 retry:
1766	allerror = 0;
1767	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1768	    mp = nmp) {
1769		nmp = CIRCLEQ_PREV(mp, mnt_list);
1770		if ((vfs_busy(mp, LK_EXCLUSIVE|LK_NOWAIT, NULL, p)) != 0)
1771			continue;
1772		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1773			printf("unmount of %s failed with error %d\n",
1774			    mp->mnt_stat.f_mntonname, error);
1775			allerror = 1;
1776		}
1777	}
1778
1779	if (allerror) {
1780		printf("WARNING: some file systems would not unmount\n");
1781		if (again) {
1782			printf("retrying\n");
1783			again = 0;
1784			goto retry;
1785		}
1786	}
1787}
1788
1789/*
1790 * Sync and unmount file systems before shutting down.
1791 */
1792void
1793vfs_shutdown()
1794{
1795	/* XXX Should suspend scheduling. */
1796	(void) spl0();
1797
1798	printf("syncing disks... ");
1799
1800	if (panicstr == 0) {
1801		/* Sync before unmount, in case we hang on something. */
1802		sys_sync(&proc0, (void *)0, (register_t *)0);
1803
1804		/* Unmount file systems. */
1805		vfs_unmountall();
1806	}
1807
1808	if (vfs_syncwait(1))
1809		printf("giving up\n");
1810	else
1811		printf("done\n");
1812}
1813
1814/*
1815 * perform sync() operation and wait for buffers to flush.
1816 * assumtions: called w/ scheduler disabled and physical io enabled
1817 * for now called at spl0() XXX
1818 */
1819int
1820vfs_syncwait(verbose)
1821	int verbose;
1822{
1823	register struct buf *bp;
1824	int iter, nbusy, dcount, s;
1825	struct proc *p;
1826
1827	p = curproc? curproc : &proc0;
1828	sys_sync(p, (void *)0, (register_t *)0);
1829
1830	/* Wait for sync to finish. */
1831	dcount = 10000;
1832	for (iter = 0; iter < 20; iter++) {
1833		nbusy = 0;
1834		for (bp = &buf[nbuf]; --bp >= buf; ) {
1835			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1836				nbusy++;
1837			/*
1838			 * With soft updates, some buffers that are
1839			 * written will be remarked as dirty until other
1840			 * buffers are written.
1841			 */
1842			if (bp->b_flags & B_DELWRI) {
1843				s = splbio();
1844				bremfree(bp);
1845				bp->b_flags |= B_BUSY;
1846				splx(s);
1847				nbusy++;
1848				bawrite(bp);
1849				if (dcount-- <= 0) {
1850					if (verbose)
1851						printf("softdep ");
1852					return 1;
1853				}
1854			}
1855		}
1856		if (nbusy == 0)
1857			break;
1858		if (verbose)
1859			printf("%d ", nbusy);
1860		DELAY(40000 * iter);
1861	}
1862
1863	return nbusy;
1864}
1865
1866/*
1867 * posix file system related system variables.
1868 */
1869int
1870fs_posix_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1871	int *name;
1872	u_int namelen;
1873	void *oldp;
1874	size_t *oldlenp;
1875	void *newp;
1876	size_t newlen;
1877	struct proc *p;
1878{
1879	/* all sysctl names at this level are terminal */
1880	if (namelen != 1)
1881		return (ENOTDIR);
1882
1883	switch (name[0]) {
1884	case FS_POSIX_SETUID:
1885		if (newp && securelevel > 0)
1886			return (EPERM);
1887		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1888	default:
1889		return (EOPNOTSUPP);
1890	}
1891	/* NOTREACHED */
1892}
1893
1894/*
1895 * file system related system variables.
1896 */
1897int
1898fs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1899	int *name;
1900	u_int namelen;
1901	void *oldp;
1902	size_t *oldlenp;
1903	void *newp;
1904	size_t newlen;
1905	struct proc *p;
1906{
1907	sysctlfn *fn;
1908
1909	switch (name[0]) {
1910	case FS_POSIX:
1911		fn = fs_posix_sysctl;
1912		break;
1913	default:
1914		return (EOPNOTSUPP);
1915	}
1916	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1917}
1918
1919
1920/*
1921 * Routines dealing with vnodes and buffers
1922 */
1923
1924/*
1925 * Wait for all outstanding I/Os to complete
1926 *
1927 * Manipulates v_numoutput. Must be called at splbio()
1928 */
1929int
1930vwaitforio(vp, slpflag, wmesg, timeo)
1931	struct vnode *vp;
1932	int slpflag, timeo;
1933	char *wmesg;
1934{
1935	int error = 0;
1936
1937	splassert(IPL_BIO);
1938
1939	while (vp->v_numoutput) {
1940		vp->v_bioflag |= VBIOWAIT;
1941		error = tsleep((caddr_t)&vp->v_numoutput,
1942		    slpflag | (PRIBIO + 1), wmesg, timeo);
1943		if (error)
1944			break;
1945	}
1946
1947	return (error);
1948}
1949
1950
1951/*
1952 * Update outstanding I/O count and do wakeup if requested.
1953 *
1954 * Manipulates v_numoutput. Must be called at splbio()
1955 */
1956void
1957vwakeup(vp)
1958	struct vnode *vp;
1959{
1960	splassert(IPL_BIO);
1961
1962	if (vp != NULL) {
1963		if (vp->v_numoutput-- == 0)
1964			panic("vwakeup: neg numoutput");
1965		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1966			vp->v_bioflag &= ~VBIOWAIT;
1967			wakeup((caddr_t)&vp->v_numoutput);
1968		}
1969	}
1970}
1971
1972/*
1973 * Flush out and invalidate all buffers associated with a vnode.
1974 * Called with the underlying object locked.
1975 */
1976int
1977vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
1978	register struct vnode *vp;
1979	int flags;
1980	struct ucred *cred;
1981	struct proc *p;
1982	int slpflag, slptimeo;
1983{
1984	register struct buf *bp;
1985	struct buf *nbp, *blist;
1986	int s, error;
1987
1988	if (flags & V_SAVE) {
1989		s = splbio();
1990		vwaitforio(vp, 0, "vinvalbuf", 0);
1991		if (vp->v_dirtyblkhd.lh_first != NULL) {
1992			splx(s);
1993			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1994				return (error);
1995			s = splbio();
1996			if (vp->v_numoutput > 0 ||
1997			    vp->v_dirtyblkhd.lh_first != NULL)
1998				panic("vinvalbuf: dirty bufs");
1999		}
2000		splx(s);
2001	}
2002loop:
2003	s = splbio();
2004	for (;;) {
2005		if ((blist = vp->v_cleanblkhd.lh_first) &&
2006		    (flags & V_SAVEMETA))
2007			while (blist && blist->b_lblkno < 0)
2008				blist = blist->b_vnbufs.le_next;
2009		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
2010		    (flags & V_SAVEMETA))
2011			while (blist && blist->b_lblkno < 0)
2012				blist = blist->b_vnbufs.le_next;
2013		if (!blist)
2014			break;
2015
2016		for (bp = blist; bp; bp = nbp) {
2017			nbp = bp->b_vnbufs.le_next;
2018			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
2019				continue;
2020			if (bp->b_flags & B_BUSY) {
2021				bp->b_flags |= B_WANTED;
2022				error = tsleep((caddr_t)bp,
2023					slpflag | (PRIBIO + 1), "vinvalbuf",
2024					slptimeo);
2025				if (error) {
2026					splx(s);
2027					return (error);
2028				}
2029				break;
2030			}
2031			bremfree(bp);
2032			bp->b_flags |= B_BUSY;
2033			/*
2034			 * XXX Since there are no node locks for NFS, I believe
2035			 * there is a slight chance that a delayed write will
2036			 * occur while sleeping just above, so check for it.
2037			 */
2038			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
2039				splx(s);
2040				(void) VOP_BWRITE(bp);
2041				goto loop;
2042			}
2043			bp->b_flags |= B_INVAL;
2044			brelse(bp);
2045		}
2046	}
2047	if (!(flags & V_SAVEMETA) &&
2048	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
2049		panic("vinvalbuf: flush failed");
2050	splx(s);
2051	return (0);
2052}
2053
2054void
2055vflushbuf(vp, sync)
2056	register struct vnode *vp;
2057	int sync;
2058{
2059	register struct buf *bp, *nbp;
2060	int s;
2061
2062loop:
2063	s = splbio();
2064	for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
2065		nbp = bp->b_vnbufs.le_next;
2066		if ((bp->b_flags & B_BUSY))
2067			continue;
2068		if ((bp->b_flags & B_DELWRI) == 0)
2069			panic("vflushbuf: not dirty");
2070		bremfree(bp);
2071		bp->b_flags |= B_BUSY;
2072		splx(s);
2073		/*
2074		 * Wait for I/O associated with indirect blocks to complete,
2075		 * since there is no way to quickly wait for them below.
2076		 */
2077		if (bp->b_vp == vp || sync == 0)
2078			(void) bawrite(bp);
2079		else
2080			(void) bwrite(bp);
2081		goto loop;
2082	}
2083	if (sync == 0) {
2084		splx(s);
2085		return;
2086	}
2087	vwaitforio(vp, 0, "vflushbuf", 0);
2088	if (vp->v_dirtyblkhd.lh_first != NULL) {
2089		splx(s);
2090		vprint("vflushbuf: dirty", vp);
2091		goto loop;
2092	}
2093	splx(s);
2094}
2095
2096/*
2097 * Associate a buffer with a vnode.
2098 *
2099 * Manipulates buffer vnode queues. Must be called at splbio().
2100 */
2101void
2102bgetvp(vp, bp)
2103	register struct vnode *vp;
2104	register struct buf *bp;
2105{
2106	splassert(IPL_BIO);
2107
2108
2109	if (bp->b_vp)
2110		panic("bgetvp: not free");
2111	vhold(vp);
2112	bp->b_vp = vp;
2113	if (vp->v_type == VBLK || vp->v_type == VCHR)
2114		bp->b_dev = vp->v_rdev;
2115	else
2116		bp->b_dev = NODEV;
2117	/*
2118	 * Insert onto list for new vnode.
2119	 */
2120	bufinsvn(bp, &vp->v_cleanblkhd);
2121}
2122
2123/*
2124 * Disassociate a buffer from a vnode.
2125 *
2126 * Manipulates vnode buffer queues. Must be called at splbio().
2127 */
2128void
2129brelvp(bp)
2130	register struct buf *bp;
2131{
2132	struct vnode *vp;
2133
2134	splassert(IPL_BIO);
2135
2136	if ((vp = bp->b_vp) == (struct vnode *) 0)
2137		panic("brelvp: NULL");
2138	/*
2139	 * Delete from old vnode list, if on one.
2140	 */
2141	if (bp->b_vnbufs.le_next != NOLIST)
2142		bufremvn(bp);
2143	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2144	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2145		vp->v_bioflag &= ~VBIOONSYNCLIST;
2146		LIST_REMOVE(vp, v_synclist);
2147	}
2148	bp->b_vp = (struct vnode *) 0;
2149
2150	simple_lock(&vp->v_interlock);
2151#ifdef DIAGNOSTIC
2152	if (vp->v_holdcnt == 0)
2153		panic("brelvp: holdcnt");
2154#endif
2155	vp->v_holdcnt--;
2156
2157	/*
2158	 * If it is on the holdlist and the hold count drops to
2159	 * zero, move it to the free list.
2160	 */
2161	if ((vp->v_bioflag & VBIOONFREELIST) &&
2162	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
2163		simple_lock(&vnode_free_list_slock);
2164		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
2165		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2166		simple_unlock(&vnode_free_list_slock);
2167	}
2168	simple_unlock(&vp->v_interlock);
2169}
2170
2171/*
2172 * Replaces the current vnode associated with the buffer, if any
2173 * with a new vnode.
2174 *
2175 * If an output I/O is pending on the buffer, the old vnode is
2176 * I/O count is adjusted.
2177 *
2178 * Ignores vnode buffer queues. Must be called at splbio().
2179 */
2180void
2181buf_replacevnode(bp, newvp)
2182	struct buf *bp;
2183	struct vnode *newvp;
2184{
2185	struct vnode *oldvp = bp->b_vp;
2186
2187	splassert(IPL_BIO);
2188
2189	if (oldvp)
2190		brelvp(bp);
2191
2192	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2193		newvp->v_numoutput++;	/* put it on swapdev */
2194		vwakeup(oldvp);
2195	}
2196
2197	bgetvp(newvp, bp);
2198	bufremvn(bp);
2199}
2200
2201/*
2202 * Used to assign buffers to the appropriate clean or dirty list on
2203 * the vnode and to add newly dirty vnodes to the appropriate
2204 * filesystem syncer list.
2205 *
2206 * Manipulates vnode buffer queues. Must be called at splbio().
2207 */
2208void
2209reassignbuf(bp)
2210	struct buf *bp;
2211{
2212	struct buflists *listheadp;
2213	int delay;
2214	struct vnode *vp = bp->b_vp;
2215
2216	splassert(IPL_BIO);
2217
2218	/*
2219	 * Delete from old vnode list, if on one.
2220	 */
2221	if (bp->b_vnbufs.le_next != NOLIST)
2222		bufremvn(bp);
2223	/*
2224	 * If dirty, put on list of dirty buffers;
2225	 * otherwise insert onto list of clean buffers.
2226	 */
2227	if ((bp->b_flags & B_DELWRI) == 0) {
2228		listheadp = &vp->v_cleanblkhd;
2229		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2230		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2231			vp->v_bioflag &= ~VBIOONSYNCLIST;
2232			LIST_REMOVE(vp, v_synclist);
2233		}
2234	} else {
2235		listheadp = &vp->v_dirtyblkhd;
2236		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2237			switch (vp->v_type) {
2238			case VDIR:
2239				delay = syncdelay / 2;
2240				break;
2241			case VBLK:
2242				if (vp->v_specmountpoint != NULL) {
2243					delay = syncdelay / 3;
2244					break;
2245				}
2246				/* fall through */
2247			default:
2248				delay = syncdelay;
2249			}
2250			vn_syncer_add_to_worklist(vp, delay);
2251		}
2252	}
2253	bufinsvn(bp, listheadp);
2254}
2255
2256int
2257vfs_register(vfs)
2258	struct vfsconf *vfs;
2259{
2260	struct vfsconf *vfsp;
2261	struct vfsconf **vfspp;
2262
2263#ifdef DIAGNOSTIC
2264	/* Paranoia? */
2265	if (vfs->vfc_refcount != 0)
2266		printf("vfs_register called with vfc_refcount > 0\n");
2267#endif
2268
2269	/* Check if filesystem already known */
2270	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2271	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2272		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2273			return (EEXIST);
2274
2275	if (vfs->vfc_typenum > maxvfsconf)
2276		maxvfsconf = vfs->vfc_typenum;
2277
2278	vfs->vfc_next = NULL;
2279
2280	/* Add to the end of the list */
2281	*vfspp = vfs;
2282
2283	/* Call vfs_init() */
2284	if (vfs->vfc_vfsops->vfs_init)
2285		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2286
2287	return 0;
2288}
2289
2290int
2291vfs_unregister(vfs)
2292	struct vfsconf *vfs;
2293{
2294	struct vfsconf *vfsp;
2295	struct vfsconf **vfspp;
2296	int maxtypenum;
2297
2298	/* Find our vfsconf struct */
2299	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2300	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2301		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2302			break;
2303	}
2304
2305	if (!vfsp)			/* Not found */
2306		return (ENOENT);
2307
2308	if (vfsp->vfc_refcount)		/* In use */
2309		return (EBUSY);
2310
2311	/* Remove from list and free */
2312	*vfspp = vfsp->vfc_next;
2313
2314	maxtypenum = 0;
2315
2316	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2317		if (vfsp->vfc_typenum > maxtypenum)
2318			maxtypenum = vfsp->vfc_typenum;
2319
2320	maxvfsconf = maxtypenum;
2321	return 0;
2322}
2323
2324/*
2325 * Check if vnode represents a disk device
2326 */
2327int
2328vn_isdisk(vp, errp)
2329	struct vnode *vp;
2330	int *errp;
2331{
2332	if (vp->v_type != VBLK && vp->v_type != VCHR)
2333		return (0);
2334
2335	return (1);
2336}
2337