vfs_export.c revision 54444
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39 * $FreeBSD: head/sys/kern/vfs_export.c 54444 1999-12-11 16:13:02Z eivind $
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45#include "opt_ddb.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/fcntl.h>
50#include <sys/kernel.h>
51#include <sys/proc.h>
52#include <sys/kthread.h>
53#include <sys/malloc.h>
54#include <sys/mount.h>
55#include <sys/socket.h>
56#include <sys/vnode.h>
57#include <sys/stat.h>
58#include <sys/buf.h>
59#include <sys/domain.h>
60#include <sys/dirent.h>
61#include <sys/vmmeter.h>
62#include <sys/conf.h>
63
64#include <machine/limits.h>
65
66#include <vm/vm.h>
67#include <vm/vm_object.h>
68#include <vm/vm_extern.h>
69#include <vm/pmap.h>
70#include <vm/vm_map.h>
71#include <vm/vm_page.h>
72#include <vm/vm_pager.h>
73#include <vm/vnode_pager.h>
74#include <vm/vm_zone.h>
75#include <sys/sysctl.h>
76
77static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
78
79static void	insmntque __P((struct vnode *vp, struct mount *mp));
80static void	vclean __P((struct vnode *vp, int flags, struct proc *p));
81static void	vfree __P((struct vnode *));
82static void	vgonel __P((struct vnode *vp, struct proc *p));
83static unsigned long	numvnodes;
84SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
85
86enum vtype iftovt_tab[16] = {
87	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
88	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
89};
90int vttoif_tab[9] = {
91	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
92	S_IFSOCK, S_IFIFO, S_IFMT,
93};
94
95static TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
96struct tobefreelist vnode_tobefree_list;	/* vnode free list */
97
98static u_long wantfreevnodes = 25;
99SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
100static u_long freevnodes = 0;
101SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
102
103static int reassignbufcalls;
104SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
105static int reassignbufloops;
106SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, "");
107static int reassignbufsortgood;
108SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, "");
109static int reassignbufsortbad;
110SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, "");
111static int reassignbufmethod = 1;
112SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, "");
113
114#ifdef ENABLE_VFS_IOOPT
115int vfs_ioopt = 0;
116SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
117#endif
118
119struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */
120struct simplelock mountlist_slock;
121struct simplelock mntvnode_slock;
122int	nfs_mount_type = -1;
123#ifndef NULL_SIMPLELOCKS
124static struct simplelock mntid_slock;
125static struct simplelock vnode_free_list_slock;
126static struct simplelock spechash_slock;
127#endif
128struct nfs_public nfs_pub;	/* publicly exported FS */
129static vm_zone_t vnode_zone;
130
131/*
132 * The workitem queue.
133 */
134#define SYNCER_MAXDELAY		32
135static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
136time_t syncdelay = 30;		/* max time to delay syncing data */
137time_t filedelay = 30;		/* time to delay syncing files */
138SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
139time_t dirdelay = 29;		/* time to delay syncing directories */
140SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
141time_t metadelay = 28;		/* time to delay syncing metadata */
142SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
143static int rushjob;			/* number of slots to run ASAP */
144static int stat_rush_requests;	/* number of times I/O speeded up */
145SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
146
147static int syncer_delayno = 0;
148static long syncer_mask;
149LIST_HEAD(synclist, vnode);
150static struct synclist *syncer_workitem_pending;
151
152int desiredvnodes;
153SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
154    &desiredvnodes, 0, "Maximum number of vnodes");
155
156static void	vfs_free_addrlist __P((struct netexport *nep));
157static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
158static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
159				       struct export_args *argp));
160
161/*
162 * Initialize the vnode management data structures.
163 */
164void
165vntblinit()
166{
167
168	desiredvnodes = maxproc + cnt.v_page_count / 4;
169	simple_lock_init(&mntvnode_slock);
170	simple_lock_init(&mntid_slock);
171	simple_lock_init(&spechash_slock);
172	TAILQ_INIT(&vnode_free_list);
173	TAILQ_INIT(&vnode_tobefree_list);
174	simple_lock_init(&vnode_free_list_slock);
175	vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
176	/*
177	 * Initialize the filesystem syncer.
178	 */
179	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
180		&syncer_mask);
181	syncer_maxdelay = syncer_mask + 1;
182}
183
184/*
185 * Mark a mount point as busy. Used to synchronize access and to delay
186 * unmounting. Interlock is not released on failure.
187 */
188int
189vfs_busy(mp, flags, interlkp, p)
190	struct mount *mp;
191	int flags;
192	struct simplelock *interlkp;
193	struct proc *p;
194{
195	int lkflags;
196
197	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
198		if (flags & LK_NOWAIT)
199			return (ENOENT);
200		mp->mnt_kern_flag |= MNTK_MWAIT;
201		if (interlkp) {
202			simple_unlock(interlkp);
203		}
204		/*
205		 * Since all busy locks are shared except the exclusive
206		 * lock granted when unmounting, the only place that a
207		 * wakeup needs to be done is at the release of the
208		 * exclusive lock at the end of dounmount.
209		 */
210		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
211		if (interlkp) {
212			simple_lock(interlkp);
213		}
214		return (ENOENT);
215	}
216	lkflags = LK_SHARED | LK_NOPAUSE;
217	if (interlkp)
218		lkflags |= LK_INTERLOCK;
219	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
220		panic("vfs_busy: unexpected lock failure");
221	return (0);
222}
223
224/*
225 * Free a busy filesystem.
226 */
227void
228vfs_unbusy(mp, p)
229	struct mount *mp;
230	struct proc *p;
231{
232
233	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
234}
235
236/*
237 * Lookup a filesystem type, and if found allocate and initialize
238 * a mount structure for it.
239 *
240 * Devname is usually updated by mount(8) after booting.
241 */
242int
243vfs_rootmountalloc(fstypename, devname, mpp)
244	char *fstypename;
245	char *devname;
246	struct mount **mpp;
247{
248	struct proc *p = curproc;	/* XXX */
249	struct vfsconf *vfsp;
250	struct mount *mp;
251
252	if (fstypename == NULL)
253		return (ENODEV);
254	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
255		if (!strcmp(vfsp->vfc_name, fstypename))
256			break;
257	if (vfsp == NULL)
258		return (ENODEV);
259	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
260	bzero((char *)mp, (u_long)sizeof(struct mount));
261	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
262	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
263	LIST_INIT(&mp->mnt_vnodelist);
264	mp->mnt_vfc = vfsp;
265	mp->mnt_op = vfsp->vfc_vfsops;
266	mp->mnt_flag = MNT_RDONLY;
267	mp->mnt_vnodecovered = NULLVP;
268	vfsp->vfc_refcount++;
269	mp->mnt_iosize_max = DFLTPHYS;
270	mp->mnt_stat.f_type = vfsp->vfc_typenum;
271	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
272	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
273	mp->mnt_stat.f_mntonname[0] = '/';
274	mp->mnt_stat.f_mntonname[1] = 0;
275	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
276	*mpp = mp;
277	return (0);
278}
279
280/*
281 * Find an appropriate filesystem to use for the root. If a filesystem
282 * has not been preselected, walk through the list of known filesystems
283 * trying those that have mountroot routines, and try them until one
284 * works or we have tried them all.
285 */
286#ifdef notdef	/* XXX JH */
287int
288lite2_vfs_mountroot()
289{
290	struct vfsconf *vfsp;
291	extern int (*lite2_mountroot) __P((void));
292	int error;
293
294	if (lite2_mountroot != NULL)
295		return ((*lite2_mountroot)());
296	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
297		if (vfsp->vfc_mountroot == NULL)
298			continue;
299		if ((error = (*vfsp->vfc_mountroot)()) == 0)
300			return (0);
301		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
302	}
303	return (ENODEV);
304}
305#endif
306
307/*
308 * Lookup a mount point by filesystem identifier.
309 */
310struct mount *
311vfs_getvfs(fsid)
312	fsid_t *fsid;
313{
314	register struct mount *mp;
315
316	simple_lock(&mountlist_slock);
317	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
318		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
319		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
320			simple_unlock(&mountlist_slock);
321			return (mp);
322	    }
323	}
324	simple_unlock(&mountlist_slock);
325	return ((struct mount *) 0);
326}
327
328/*
329 * Get a new unique fsid
330 *
331 * Keep in mind that several mounts may be running in parallel,
332 * so always increment mntid_base even if lower numbers are available.
333 */
334
335static u_short mntid_base;
336
337void
338vfs_getnewfsid(mp)
339	struct mount *mp;
340{
341	fsid_t tfsid;
342	int mtype;
343
344	simple_lock(&mntid_slock);
345
346	mtype = mp->mnt_vfc->vfc_typenum;
347	for (;;) {
348		tfsid.val[0] = makeudev(255, mtype + (mntid_base << 16));
349		tfsid.val[1] = mtype;
350		++mntid_base;
351		if (vfs_getvfs(&tfsid) == NULL)
352			break;
353	}
354
355	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
356	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
357
358	simple_unlock(&mntid_slock);
359}
360
361/*
362 * Knob to control the precision of file timestamps:
363 *
364 *   0 = seconds only; nanoseconds zeroed.
365 *   1 = seconds and nanoseconds, accurate within 1/HZ.
366 *   2 = seconds and nanoseconds, truncated to microseconds.
367 * >=3 = seconds and nanoseconds, maximum precision.
368 */
369enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
370
371static int timestamp_precision = TSP_SEC;
372SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
373    &timestamp_precision, 0, "");
374
375/*
376 * Get a current timestamp.
377 */
378void
379vfs_timestamp(tsp)
380	struct timespec *tsp;
381{
382	struct timeval tv;
383
384	switch (timestamp_precision) {
385	case TSP_SEC:
386		tsp->tv_sec = time_second;
387		tsp->tv_nsec = 0;
388		break;
389	case TSP_HZ:
390		getnanotime(tsp);
391		break;
392	case TSP_USEC:
393		microtime(&tv);
394		TIMEVAL_TO_TIMESPEC(&tv, tsp);
395		break;
396	case TSP_NSEC:
397	default:
398		nanotime(tsp);
399		break;
400	}
401}
402
403/*
404 * Set vnode attributes to VNOVAL
405 */
406void
407vattr_null(vap)
408	register struct vattr *vap;
409{
410
411	vap->va_type = VNON;
412	vap->va_size = VNOVAL;
413	vap->va_bytes = VNOVAL;
414	vap->va_mode = VNOVAL;
415	vap->va_nlink = VNOVAL;
416	vap->va_uid = VNOVAL;
417	vap->va_gid = VNOVAL;
418	vap->va_fsid = VNOVAL;
419	vap->va_fileid = VNOVAL;
420	vap->va_blocksize = VNOVAL;
421	vap->va_rdev = VNOVAL;
422	vap->va_atime.tv_sec = VNOVAL;
423	vap->va_atime.tv_nsec = VNOVAL;
424	vap->va_mtime.tv_sec = VNOVAL;
425	vap->va_mtime.tv_nsec = VNOVAL;
426	vap->va_ctime.tv_sec = VNOVAL;
427	vap->va_ctime.tv_nsec = VNOVAL;
428	vap->va_flags = VNOVAL;
429	vap->va_gen = VNOVAL;
430	vap->va_vaflags = 0;
431}
432
433/*
434 * Routines having to do with the management of the vnode table.
435 */
436extern vop_t **dead_vnodeop_p;
437
438/*
439 * Return the next vnode from the free list.
440 */
441int
442getnewvnode(tag, mp, vops, vpp)
443	enum vtagtype tag;
444	struct mount *mp;
445	vop_t **vops;
446	struct vnode **vpp;
447{
448	int s;
449	struct proc *p = curproc;	/* XXX */
450	struct vnode *vp, *tvp, *nvp;
451	vm_object_t object;
452	TAILQ_HEAD(freelst, vnode) vnode_tmp_list;
453
454	/*
455	 * We take the least recently used vnode from the freelist
456	 * if we can get it and it has no cached pages, and no
457	 * namecache entries are relative to it.
458	 * Otherwise we allocate a new vnode
459	 */
460
461	s = splbio();
462	simple_lock(&vnode_free_list_slock);
463	TAILQ_INIT(&vnode_tmp_list);
464
465	for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) {
466		nvp = TAILQ_NEXT(vp, v_freelist);
467		TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
468		if (vp->v_flag & VAGE) {
469			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
470		} else {
471			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
472		}
473		vp->v_flag &= ~(VTBFREE|VAGE);
474		vp->v_flag |= VFREE;
475		if (vp->v_usecount)
476			panic("tobe free vnode isn't");
477		freevnodes++;
478	}
479
480	if (wantfreevnodes && freevnodes < wantfreevnodes) {
481		vp = NULL;
482	} else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
483		/*
484		 * XXX: this is only here to be backwards compatible
485		 */
486		vp = NULL;
487	} else {
488		for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) {
489			nvp = TAILQ_NEXT(vp, v_freelist);
490			if (!simple_lock_try(&vp->v_interlock))
491				continue;
492			if (vp->v_usecount)
493				panic("free vnode isn't");
494
495			object = vp->v_object;
496			if (object && (object->resident_page_count || object->ref_count)) {
497				printf("object inconsistant state: RPC: %d, RC: %d\n",
498					object->resident_page_count, object->ref_count);
499				/* Don't recycle if it's caching some pages */
500				TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
501				TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
502				continue;
503			} else if (LIST_FIRST(&vp->v_cache_src)) {
504				/* Don't recycle if active in the namecache */
505				simple_unlock(&vp->v_interlock);
506				continue;
507			} else {
508				break;
509			}
510		}
511	}
512
513	for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) {
514		nvp = TAILQ_NEXT(tvp, v_freelist);
515		TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist);
516		TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist);
517		simple_unlock(&tvp->v_interlock);
518	}
519
520	if (vp) {
521		vp->v_flag |= VDOOMED;
522		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
523		freevnodes--;
524		simple_unlock(&vnode_free_list_slock);
525		cache_purge(vp);
526		vp->v_lease = NULL;
527		if (vp->v_type != VBAD) {
528			vgonel(vp, p);
529		} else {
530			simple_unlock(&vp->v_interlock);
531		}
532
533#ifdef INVARIANTS
534		{
535			int s;
536
537			if (vp->v_data)
538				panic("cleaned vnode isn't");
539			s = splbio();
540			if (vp->v_numoutput)
541				panic("Clean vnode has pending I/O's");
542			splx(s);
543		}
544#endif
545		vp->v_flag = 0;
546		vp->v_lastw = 0;
547		vp->v_lasta = 0;
548		vp->v_cstart = 0;
549		vp->v_clen = 0;
550		vp->v_socket = 0;
551		vp->v_writecount = 0;	/* XXX */
552	} else {
553		simple_unlock(&vnode_free_list_slock);
554		vp = (struct vnode *) zalloc(vnode_zone);
555		bzero((char *) vp, sizeof *vp);
556		simple_lock_init(&vp->v_interlock);
557		vp->v_dd = vp;
558		cache_purge(vp);
559		LIST_INIT(&vp->v_cache_src);
560		TAILQ_INIT(&vp->v_cache_dst);
561		numvnodes++;
562	}
563
564	TAILQ_INIT(&vp->v_cleanblkhd);
565	TAILQ_INIT(&vp->v_dirtyblkhd);
566	vp->v_type = VNON;
567	vp->v_tag = tag;
568	vp->v_op = vops;
569	insmntque(vp, mp);
570	*vpp = vp;
571	vp->v_usecount = 1;
572	vp->v_data = 0;
573	splx(s);
574
575	vfs_object_create(vp, p, p->p_ucred);
576	return (0);
577}
578
579/*
580 * Move a vnode from one mount queue to another.
581 */
582static void
583insmntque(vp, mp)
584	register struct vnode *vp;
585	register struct mount *mp;
586{
587
588	simple_lock(&mntvnode_slock);
589	/*
590	 * Delete from old mount point vnode list, if on one.
591	 */
592	if (vp->v_mount != NULL)
593		LIST_REMOVE(vp, v_mntvnodes);
594	/*
595	 * Insert into list of vnodes for the new mount point, if available.
596	 */
597	if ((vp->v_mount = mp) == NULL) {
598		simple_unlock(&mntvnode_slock);
599		return;
600	}
601	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
602	simple_unlock(&mntvnode_slock);
603}
604
605/*
606 * Update outstanding I/O count and do wakeup if requested.
607 */
608void
609vwakeup(bp)
610	register struct buf *bp;
611{
612	register struct vnode *vp;
613
614	bp->b_flags &= ~B_WRITEINPROG;
615	if ((vp = bp->b_vp)) {
616		vp->v_numoutput--;
617		if (vp->v_numoutput < 0)
618			panic("vwakeup: neg numoutput");
619		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
620			vp->v_flag &= ~VBWAIT;
621			wakeup((caddr_t) &vp->v_numoutput);
622		}
623	}
624}
625
626/*
627 * Flush out and invalidate all buffers associated with a vnode.
628 * Called with the underlying object locked.
629 */
630int
631vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
632	register struct vnode *vp;
633	int flags;
634	struct ucred *cred;
635	struct proc *p;
636	int slpflag, slptimeo;
637{
638	register struct buf *bp;
639	struct buf *nbp, *blist;
640	int s, error;
641	vm_object_t object;
642
643	if (flags & V_SAVE) {
644		s = splbio();
645		while (vp->v_numoutput) {
646			vp->v_flag |= VBWAIT;
647			error = tsleep((caddr_t)&vp->v_numoutput,
648			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
649			if (error) {
650				splx(s);
651				return (error);
652			}
653		}
654		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
655			splx(s);
656			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
657				return (error);
658			s = splbio();
659			if (vp->v_numoutput > 0 ||
660			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
661				panic("vinvalbuf: dirty bufs");
662		}
663		splx(s);
664  	}
665	s = splbio();
666	for (;;) {
667		blist = TAILQ_FIRST(&vp->v_cleanblkhd);
668		if (!blist)
669			blist = TAILQ_FIRST(&vp->v_dirtyblkhd);
670		if (!blist)
671			break;
672
673		for (bp = blist; bp; bp = nbp) {
674			nbp = TAILQ_NEXT(bp, b_vnbufs);
675			if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
676				error = BUF_TIMELOCK(bp,
677				    LK_EXCLUSIVE | LK_SLEEPFAIL,
678				    "vinvalbuf", slpflag, slptimeo);
679				if (error == ENOLCK)
680					break;
681				splx(s);
682				return (error);
683			}
684			/*
685			 * XXX Since there are no node locks for NFS, I
686			 * believe there is a slight chance that a delayed
687			 * write will occur while sleeping just above, so
688			 * check for it.  Note that vfs_bio_awrite expects
689			 * buffers to reside on a queue, while VOP_BWRITE and
690			 * brelse do not.
691			 */
692			if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
693				(flags & V_SAVE)) {
694
695				if (bp->b_vp == vp) {
696					if (bp->b_flags & B_CLUSTEROK) {
697						BUF_UNLOCK(bp);
698						vfs_bio_awrite(bp);
699					} else {
700						bremfree(bp);
701						bp->b_flags |= B_ASYNC;
702						VOP_BWRITE(bp->b_vp, bp);
703					}
704				} else {
705					bremfree(bp);
706					(void) VOP_BWRITE(bp->b_vp, bp);
707				}
708				break;
709			}
710			bremfree(bp);
711			bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
712			bp->b_flags &= ~B_ASYNC;
713			brelse(bp);
714		}
715	}
716
717	while (vp->v_numoutput > 0) {
718		vp->v_flag |= VBWAIT;
719		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
720	}
721
722	splx(s);
723
724	/*
725	 * Destroy the copy in the VM cache, too.
726	 */
727	simple_lock(&vp->v_interlock);
728	object = vp->v_object;
729	if (object != NULL) {
730		vm_object_page_remove(object, 0, 0,
731			(flags & V_SAVE) ? TRUE : FALSE);
732	}
733	simple_unlock(&vp->v_interlock);
734
735	if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
736		panic("vinvalbuf: flush failed");
737	return (0);
738}
739
740/*
741 * Truncate a file's buffer and pages to a specified length.  This
742 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
743 * sync activity.
744 */
745int
746vtruncbuf(vp, cred, p, length, blksize)
747	register struct vnode *vp;
748	struct ucred *cred;
749	struct proc *p;
750	off_t length;
751	int blksize;
752{
753	register struct buf *bp;
754	struct buf *nbp;
755	int s, anyfreed;
756	int trunclbn;
757
758	/*
759	 * Round up to the *next* lbn.
760	 */
761	trunclbn = (length + blksize - 1) / blksize;
762
763	s = splbio();
764restart:
765	anyfreed = 1;
766	for (;anyfreed;) {
767		anyfreed = 0;
768		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
769			nbp = TAILQ_NEXT(bp, b_vnbufs);
770			if (bp->b_lblkno >= trunclbn) {
771				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
772					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
773					goto restart;
774				} else {
775					bremfree(bp);
776					bp->b_flags |= (B_INVAL | B_RELBUF);
777					bp->b_flags &= ~B_ASYNC;
778					brelse(bp);
779					anyfreed = 1;
780				}
781				if (nbp && (((nbp->b_xflags & B_VNCLEAN) == 0)||
782					 (nbp->b_vp != vp) ||
783					 (nbp->b_flags & B_DELWRI))) {
784					goto restart;
785				}
786			}
787		}
788
789		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
790			nbp = TAILQ_NEXT(bp, b_vnbufs);
791			if (bp->b_lblkno >= trunclbn) {
792				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
793					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
794					goto restart;
795				} else {
796					bremfree(bp);
797					bp->b_flags |= (B_INVAL | B_RELBUF);
798					bp->b_flags &= ~B_ASYNC;
799					brelse(bp);
800					anyfreed = 1;
801				}
802				if (nbp && (((nbp->b_xflags & B_VNDIRTY) == 0)||
803					 (nbp->b_vp != vp) ||
804					 (nbp->b_flags & B_DELWRI) == 0)) {
805					goto restart;
806				}
807			}
808		}
809	}
810
811	if (length > 0) {
812restartsync:
813		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
814			nbp = TAILQ_NEXT(bp, b_vnbufs);
815			if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
816				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
817					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
818					goto restart;
819				} else {
820					bremfree(bp);
821					if (bp->b_vp == vp) {
822						bp->b_flags |= B_ASYNC;
823					} else {
824						bp->b_flags &= ~B_ASYNC;
825					}
826					VOP_BWRITE(bp->b_vp, bp);
827				}
828				goto restartsync;
829			}
830
831		}
832	}
833
834	while (vp->v_numoutput > 0) {
835		vp->v_flag |= VBWAIT;
836		tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0);
837	}
838
839	splx(s);
840
841	vnode_pager_setsize(vp, length);
842
843	return (0);
844}
845
846/*
847 * Associate a buffer with a vnode.
848 */
849void
850bgetvp(vp, bp)
851	register struct vnode *vp;
852	register struct buf *bp;
853{
854	int s;
855
856	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
857
858	vhold(vp);
859	bp->b_vp = vp;
860	bp->b_dev = vn_todev(vp);
861	/*
862	 * Insert onto list for new vnode.
863	 */
864	s = splbio();
865	bp->b_xflags |= B_VNCLEAN;
866	bp->b_xflags &= ~B_VNDIRTY;
867	TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
868	splx(s);
869}
870
871/*
872 * Disassociate a buffer from a vnode.
873 */
874void
875brelvp(bp)
876	register struct buf *bp;
877{
878	struct vnode *vp;
879	struct buflists *listheadp;
880	int s;
881
882	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
883
884	/*
885	 * Delete from old vnode list, if on one.
886	 */
887	vp = bp->b_vp;
888	s = splbio();
889	if (bp->b_xflags & (B_VNDIRTY|B_VNCLEAN)) {
890		if (bp->b_xflags & B_VNDIRTY)
891			listheadp = &vp->v_dirtyblkhd;
892		else
893			listheadp = &vp->v_cleanblkhd;
894		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
895		bp->b_xflags &= ~(B_VNDIRTY|B_VNCLEAN);
896	}
897	if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
898		vp->v_flag &= ~VONWORKLST;
899		LIST_REMOVE(vp, v_synclist);
900	}
901	splx(s);
902	bp->b_vp = (struct vnode *) 0;
903	vdrop(vp);
904}
905
906/*
907 * The workitem queue.
908 *
909 * It is useful to delay writes of file data and filesystem metadata
910 * for tens of seconds so that quickly created and deleted files need
911 * not waste disk bandwidth being created and removed. To realize this,
912 * we append vnodes to a "workitem" queue. When running with a soft
913 * updates implementation, most pending metadata dependencies should
914 * not wait for more than a few seconds. Thus, mounted on block devices
915 * are delayed only about a half the time that file data is delayed.
916 * Similarly, directory updates are more critical, so are only delayed
917 * about a third the time that file data is delayed. Thus, there are
918 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
919 * one each second (driven off the filesystem syncer process). The
920 * syncer_delayno variable indicates the next queue that is to be processed.
921 * Items that need to be processed soon are placed in this queue:
922 *
923 *	syncer_workitem_pending[syncer_delayno]
924 *
925 * A delay of fifteen seconds is done by placing the request fifteen
926 * entries later in the queue:
927 *
928 *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
929 *
930 */
931
932/*
933 * Add an item to the syncer work queue.
934 */
935static void
936vn_syncer_add_to_worklist(struct vnode *vp, int delay)
937{
938	int s, slot;
939
940	s = splbio();
941
942	if (vp->v_flag & VONWORKLST) {
943		LIST_REMOVE(vp, v_synclist);
944	}
945
946	if (delay > syncer_maxdelay - 2)
947		delay = syncer_maxdelay - 2;
948	slot = (syncer_delayno + delay) & syncer_mask;
949
950	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
951	vp->v_flag |= VONWORKLST;
952	splx(s);
953}
954
955struct  proc *updateproc;
956static void sched_sync __P((void));
957static struct kproc_desc up_kp = {
958	"syncer",
959	sched_sync,
960	&updateproc
961};
962SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
963
964/*
965 * System filesystem synchronizer daemon.
966 */
967void
968sched_sync(void)
969{
970	struct synclist *slp;
971	struct vnode *vp;
972	long starttime;
973	int s;
974	struct proc *p = updateproc;
975
976	p->p_flag |= P_BUFEXHAUST;
977
978	for (;;) {
979		starttime = time_second;
980
981		/*
982		 * Push files whose dirty time has expired.  Be careful
983		 * of interrupt race on slp queue.
984		 */
985		s = splbio();
986		slp = &syncer_workitem_pending[syncer_delayno];
987		syncer_delayno += 1;
988		if (syncer_delayno == syncer_maxdelay)
989			syncer_delayno = 0;
990		splx(s);
991
992		while ((vp = LIST_FIRST(slp)) != NULL) {
993			if (VOP_ISLOCKED(vp, NULL) == 0) {
994				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
995				(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
996				VOP_UNLOCK(vp, 0, p);
997			}
998			s = splbio();
999			if (LIST_FIRST(slp) == vp) {
1000				/*
1001				 * Note: v_tag VT_VFS vps can remain on the
1002				 * worklist too with no dirty blocks, but
1003				 * since sync_fsync() moves it to a different
1004				 * slot we are safe.
1005				 */
1006				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1007				    !vn_isdisk(vp))
1008					panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag);
1009				/*
1010				 * Put us back on the worklist.  The worklist
1011				 * routine will remove us from our current
1012				 * position and then add us back in at a later
1013				 * position.
1014				 */
1015				vn_syncer_add_to_worklist(vp, syncdelay);
1016			}
1017			splx(s);
1018		}
1019
1020		/*
1021		 * Do soft update processing.
1022		 */
1023		if (bioops.io_sync)
1024			(*bioops.io_sync)(NULL);
1025
1026		/*
1027		 * The variable rushjob allows the kernel to speed up the
1028		 * processing of the filesystem syncer process. A rushjob
1029		 * value of N tells the filesystem syncer to process the next
1030		 * N seconds worth of work on its queue ASAP. Currently rushjob
1031		 * is used by the soft update code to speed up the filesystem
1032		 * syncer process when the incore state is getting so far
1033		 * ahead of the disk that the kernel memory pool is being
1034		 * threatened with exhaustion.
1035		 */
1036		if (rushjob > 0) {
1037			rushjob -= 1;
1038			continue;
1039		}
1040		/*
1041		 * If it has taken us less than a second to process the
1042		 * current work, then wait. Otherwise start right over
1043		 * again. We can still lose time if any single round
1044		 * takes more than two seconds, but it does not really
1045		 * matter as we are just trying to generally pace the
1046		 * filesystem activity.
1047		 */
1048		if (time_second == starttime)
1049			tsleep(&lbolt, PPAUSE, "syncer", 0);
1050	}
1051}
1052
1053/*
1054 * Request the syncer daemon to speed up its work.
1055 * We never push it to speed up more than half of its
1056 * normal turn time, otherwise it could take over the cpu.
1057 */
1058int
1059speedup_syncer()
1060{
1061	int s;
1062
1063	s = splhigh();
1064	if (updateproc->p_wchan == &lbolt)
1065		setrunnable(updateproc);
1066	splx(s);
1067	if (rushjob < syncdelay / 2) {
1068		rushjob += 1;
1069		stat_rush_requests += 1;
1070		return (1);
1071	}
1072	return(0);
1073}
1074
1075/*
1076 * Associate a p-buffer with a vnode.
1077 *
1078 * Also sets B_PAGING flag to indicate that vnode is not fully associated
1079 * with the buffer.  i.e. the bp has not been linked into the vnode or
1080 * ref-counted.
1081 */
1082void
1083pbgetvp(vp, bp)
1084	register struct vnode *vp;
1085	register struct buf *bp;
1086{
1087
1088	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1089
1090	bp->b_vp = vp;
1091	bp->b_flags |= B_PAGING;
1092	bp->b_dev = vn_todev(vp);
1093}
1094
1095/*
1096 * Disassociate a p-buffer from a vnode.
1097 */
1098void
1099pbrelvp(bp)
1100	register struct buf *bp;
1101{
1102
1103	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1104
1105#if !defined(MAX_PERF)
1106	/* XXX REMOVE ME */
1107	if (bp->b_vnbufs.tqe_next != NULL) {
1108		panic(
1109		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1110		    bp,
1111		    (int)bp->b_flags
1112		);
1113	}
1114#endif
1115	bp->b_vp = (struct vnode *) 0;
1116	bp->b_flags &= ~B_PAGING;
1117}
1118
1119void
1120pbreassignbuf(bp, newvp)
1121	struct buf *bp;
1122	struct vnode *newvp;
1123{
1124#if !defined(MAX_PERF)
1125	if ((bp->b_flags & B_PAGING) == 0) {
1126		panic(
1127		    "pbreassignbuf() on non phys bp %p",
1128		    bp
1129		);
1130	}
1131#endif
1132	bp->b_vp = newvp;
1133}
1134
1135/*
1136 * Reassign a buffer from one vnode to another.
1137 * Used to assign file specific control information
1138 * (indirect blocks) to the vnode to which they belong.
1139 */
1140void
1141reassignbuf(bp, newvp)
1142	register struct buf *bp;
1143	register struct vnode *newvp;
1144{
1145	struct buflists *listheadp;
1146	int delay;
1147	int s;
1148
1149	if (newvp == NULL) {
1150		printf("reassignbuf: NULL");
1151		return;
1152	}
1153	++reassignbufcalls;
1154
1155#if !defined(MAX_PERF)
1156	/*
1157	 * B_PAGING flagged buffers cannot be reassigned because their vp
1158	 * is not fully linked in.
1159	 */
1160	if (bp->b_flags & B_PAGING)
1161		panic("cannot reassign paging buffer");
1162#endif
1163
1164	s = splbio();
1165	/*
1166	 * Delete from old vnode list, if on one.
1167	 */
1168	if (bp->b_xflags & (B_VNDIRTY|B_VNCLEAN)) {
1169		if (bp->b_xflags & B_VNDIRTY)
1170			listheadp = &bp->b_vp->v_dirtyblkhd;
1171		else
1172			listheadp = &bp->b_vp->v_cleanblkhd;
1173		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
1174		bp->b_xflags &= ~(B_VNDIRTY|B_VNCLEAN);
1175		if (bp->b_vp != newvp) {
1176			vdrop(bp->b_vp);
1177			bp->b_vp = NULL;	/* for clarification */
1178		}
1179	}
1180	/*
1181	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1182	 * of clean buffers.
1183	 */
1184	if (bp->b_flags & B_DELWRI) {
1185		struct buf *tbp;
1186
1187		listheadp = &newvp->v_dirtyblkhd;
1188		if ((newvp->v_flag & VONWORKLST) == 0) {
1189			switch (newvp->v_type) {
1190			case VDIR:
1191				delay = dirdelay;
1192				break;
1193			case VCHR:
1194			case VBLK:
1195				if (newvp->v_specmountpoint != NULL) {
1196					delay = metadelay;
1197					break;
1198				}
1199				/* fall through */
1200			default:
1201				delay = filedelay;
1202			}
1203			vn_syncer_add_to_worklist(newvp, delay);
1204		}
1205		bp->b_xflags |= B_VNDIRTY;
1206		tbp = TAILQ_FIRST(listheadp);
1207		if (tbp == NULL ||
1208		    bp->b_lblkno == 0 ||
1209		    (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) {
1210			TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs);
1211			++reassignbufsortgood;
1212		} else if (bp->b_lblkno < 0) {
1213			TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs);
1214			++reassignbufsortgood;
1215		} else if (reassignbufmethod == 1) {
1216			/*
1217			 * New sorting algorithm, only handle sequential case,
1218			 * otherwise guess.
1219			 */
1220			if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL &&
1221			    (tbp->b_xflags & B_VNDIRTY)) {
1222				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1223				++reassignbufsortgood;
1224			} else {
1225				TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs);
1226				++reassignbufsortbad;
1227			}
1228		} else {
1229			/*
1230			 * Old sorting algorithm, scan queue and insert
1231			 */
1232			struct buf *ttbp;
1233			while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) &&
1234			    (ttbp->b_lblkno < bp->b_lblkno)) {
1235				++reassignbufloops;
1236				tbp = ttbp;
1237			}
1238			TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1239		}
1240	} else {
1241		bp->b_xflags |= B_VNCLEAN;
1242		TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs);
1243		if ((newvp->v_flag & VONWORKLST) &&
1244		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1245			newvp->v_flag &= ~VONWORKLST;
1246			LIST_REMOVE(newvp, v_synclist);
1247		}
1248	}
1249	if (bp->b_vp != newvp) {
1250		bp->b_vp = newvp;
1251		vhold(bp->b_vp);
1252	}
1253	splx(s);
1254}
1255
1256/*
1257 * Create a vnode for a block device.
1258 * Used for mounting the root file system.
1259 */
1260int
1261bdevvp(dev, vpp)
1262	dev_t dev;
1263	struct vnode **vpp;
1264{
1265	register struct vnode *vp;
1266	struct vnode *nvp;
1267	int error;
1268
1269	if (dev == NODEV) {
1270		*vpp = NULLVP;
1271		return (ENXIO);
1272	}
1273	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
1274	if (error) {
1275		*vpp = NULLVP;
1276		return (error);
1277	}
1278	vp = nvp;
1279	vp->v_type = VBLK;
1280	addalias(vp, dev);
1281	*vpp = vp;
1282	return (0);
1283}
1284
1285/*
1286 * Add vnode to the alias list hung off the dev_t.
1287 *
1288 * The reason for this gunk is that multiple vnodes can reference
1289 * the same physical device, so checking vp->v_usecount to see
1290 * how many users there are is inadequate; the v_usecount for
1291 * the vnodes need to be accumulated.  vcount() does that.
1292 */
1293void
1294addaliasu(nvp, nvp_rdev)
1295	struct vnode *nvp;
1296	udev_t nvp_rdev;
1297{
1298
1299	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1300		panic("addaliasu on non-special vnode");
1301	addalias(nvp, udev2dev(nvp_rdev, nvp->v_type == VBLK ? 1 : 0));
1302}
1303
1304void
1305addalias(nvp, dev)
1306	struct vnode *nvp;
1307	dev_t dev;
1308{
1309
1310	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1311		panic("addalias on non-special vnode");
1312
1313	nvp->v_rdev = dev;
1314	simple_lock(&spechash_slock);
1315	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1316	simple_unlock(&spechash_slock);
1317}
1318
1319/*
1320 * Grab a particular vnode from the free list, increment its
1321 * reference count and lock it. The vnode lock bit is set if the
1322 * vnode is being eliminated in vgone. The process is awakened
1323 * when the transition is completed, and an error returned to
1324 * indicate that the vnode is no longer usable (possibly having
1325 * been changed to a new file system type).
1326 */
1327int
1328vget(vp, flags, p)
1329	register struct vnode *vp;
1330	int flags;
1331	struct proc *p;
1332{
1333	int error;
1334
1335	/*
1336	 * If the vnode is in the process of being cleaned out for
1337	 * another use, we wait for the cleaning to finish and then
1338	 * return failure. Cleaning is determined by checking that
1339	 * the VXLOCK flag is set.
1340	 */
1341	if ((flags & LK_INTERLOCK) == 0) {
1342		simple_lock(&vp->v_interlock);
1343	}
1344	if (vp->v_flag & VXLOCK) {
1345		vp->v_flag |= VXWANT;
1346		simple_unlock(&vp->v_interlock);
1347		tsleep((caddr_t)vp, PINOD, "vget", 0);
1348		return (ENOENT);
1349	}
1350
1351	vp->v_usecount++;
1352
1353	if (VSHOULDBUSY(vp))
1354		vbusy(vp);
1355	if (flags & LK_TYPE_MASK) {
1356		if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) {
1357			/*
1358			 * must expand vrele here because we do not want
1359			 * to call VOP_INACTIVE if the reference count
1360			 * drops back to zero since it was never really
1361			 * active. We must remove it from the free list
1362			 * before sleeping so that multiple processes do
1363			 * not try to recycle it.
1364			 */
1365			simple_lock(&vp->v_interlock);
1366			vp->v_usecount--;
1367			if (VSHOULDFREE(vp))
1368				vfree(vp);
1369			simple_unlock(&vp->v_interlock);
1370		}
1371		return (error);
1372	}
1373	simple_unlock(&vp->v_interlock);
1374	return (0);
1375}
1376
1377void
1378vref(struct vnode *vp)
1379{
1380	simple_lock(&vp->v_interlock);
1381	vp->v_usecount++;
1382	simple_unlock(&vp->v_interlock);
1383}
1384
1385/*
1386 * Vnode put/release.
1387 * If count drops to zero, call inactive routine and return to freelist.
1388 */
1389void
1390vrele(vp)
1391	struct vnode *vp;
1392{
1393	struct proc *p = curproc;	/* XXX */
1394
1395	KASSERT(vp != NULL, ("vrele: null vp"));
1396
1397	simple_lock(&vp->v_interlock);
1398
1399	if (vp->v_usecount > 1) {
1400
1401		vp->v_usecount--;
1402		simple_unlock(&vp->v_interlock);
1403
1404		return;
1405	}
1406
1407	if (vp->v_usecount == 1) {
1408
1409		vp->v_usecount--;
1410		if (VSHOULDFREE(vp))
1411			vfree(vp);
1412	/*
1413	 * If we are doing a vput, the node is already locked, and we must
1414	 * call VOP_INACTIVE with the node locked.  So, in the case of
1415	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1416	 */
1417		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
1418			VOP_INACTIVE(vp, p);
1419		}
1420
1421	} else {
1422#ifdef DIAGNOSTIC
1423		vprint("vrele: negative ref count", vp);
1424		simple_unlock(&vp->v_interlock);
1425#endif
1426		panic("vrele: negative ref cnt");
1427	}
1428}
1429
1430void
1431vput(vp)
1432	struct vnode *vp;
1433{
1434	struct proc *p = curproc;	/* XXX */
1435
1436	KASSERT(vp != NULL, ("vput: null vp"));
1437
1438	simple_lock(&vp->v_interlock);
1439
1440	if (vp->v_usecount > 1) {
1441
1442		vp->v_usecount--;
1443		VOP_UNLOCK(vp, LK_INTERLOCK, p);
1444		return;
1445
1446	}
1447
1448	if (vp->v_usecount == 1) {
1449
1450		vp->v_usecount--;
1451		if (VSHOULDFREE(vp))
1452			vfree(vp);
1453	/*
1454	 * If we are doing a vput, the node is already locked, and we must
1455	 * call VOP_INACTIVE with the node locked.  So, in the case of
1456	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1457	 */
1458		simple_unlock(&vp->v_interlock);
1459		VOP_INACTIVE(vp, p);
1460
1461	} else {
1462#ifdef DIAGNOSTIC
1463		vprint("vput: negative ref count", vp);
1464#endif
1465		panic("vput: negative ref cnt");
1466	}
1467}
1468
1469/*
1470 * Somebody doesn't want the vnode recycled.
1471 */
1472void
1473vhold(vp)
1474	register struct vnode *vp;
1475{
1476	int s;
1477
1478  	s = splbio();
1479	vp->v_holdcnt++;
1480	if (VSHOULDBUSY(vp))
1481		vbusy(vp);
1482	splx(s);
1483}
1484
1485/*
1486 * One less who cares about this vnode.
1487 */
1488void
1489vdrop(vp)
1490	register struct vnode *vp;
1491{
1492	int s;
1493
1494	s = splbio();
1495	if (vp->v_holdcnt <= 0)
1496		panic("vdrop: holdcnt");
1497	vp->v_holdcnt--;
1498	if (VSHOULDFREE(vp))
1499		vfree(vp);
1500	splx(s);
1501}
1502
1503/*
1504 * Remove any vnodes in the vnode table belonging to mount point mp.
1505 *
1506 * If MNT_NOFORCE is specified, there should not be any active ones,
1507 * return error if any are found (nb: this is a user error, not a
1508 * system error). If MNT_FORCE is specified, detach any active vnodes
1509 * that are found.
1510 */
1511#ifdef DIAGNOSTIC
1512static int busyprt = 0;		/* print out busy vnodes */
1513SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1514#endif
1515
1516int
1517vflush(mp, skipvp, flags)
1518	struct mount *mp;
1519	struct vnode *skipvp;
1520	int flags;
1521{
1522	struct proc *p = curproc;	/* XXX */
1523	struct vnode *vp, *nvp;
1524	int busy = 0;
1525
1526	simple_lock(&mntvnode_slock);
1527loop:
1528	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1529		/*
1530		 * Make sure this vnode wasn't reclaimed in getnewvnode().
1531		 * Start over if it has (it won't be on the list anymore).
1532		 */
1533		if (vp->v_mount != mp)
1534			goto loop;
1535		nvp = LIST_NEXT(vp, v_mntvnodes);
1536		/*
1537		 * Skip over a selected vnode.
1538		 */
1539		if (vp == skipvp)
1540			continue;
1541
1542		simple_lock(&vp->v_interlock);
1543		/*
1544		 * Skip over a vnodes marked VSYSTEM.
1545		 */
1546		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1547			simple_unlock(&vp->v_interlock);
1548			continue;
1549		}
1550		/*
1551		 * If WRITECLOSE is set, only flush out regular file vnodes
1552		 * open for writing.
1553		 */
1554		if ((flags & WRITECLOSE) &&
1555		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1556			simple_unlock(&vp->v_interlock);
1557			continue;
1558		}
1559
1560		/*
1561		 * With v_usecount == 0, all we need to do is clear out the
1562		 * vnode data structures and we are done.
1563		 */
1564		if (vp->v_usecount == 0) {
1565			simple_unlock(&mntvnode_slock);
1566			vgonel(vp, p);
1567			simple_lock(&mntvnode_slock);
1568			continue;
1569		}
1570
1571		/*
1572		 * If FORCECLOSE is set, forcibly close the vnode. For block
1573		 * or character devices, revert to an anonymous device. For
1574		 * all other files, just kill them.
1575		 */
1576		if (flags & FORCECLOSE) {
1577			simple_unlock(&mntvnode_slock);
1578			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1579				vgonel(vp, p);
1580			} else {
1581				vclean(vp, 0, p);
1582				vp->v_op = spec_vnodeop_p;
1583				insmntque(vp, (struct mount *) 0);
1584			}
1585			simple_lock(&mntvnode_slock);
1586			continue;
1587		}
1588#ifdef DIAGNOSTIC
1589		if (busyprt)
1590			vprint("vflush: busy vnode", vp);
1591#endif
1592		simple_unlock(&vp->v_interlock);
1593		busy++;
1594	}
1595	simple_unlock(&mntvnode_slock);
1596	if (busy)
1597		return (EBUSY);
1598	return (0);
1599}
1600
1601/*
1602 * Disassociate the underlying file system from a vnode.
1603 */
1604static void
1605vclean(vp, flags, p)
1606	struct vnode *vp;
1607	int flags;
1608	struct proc *p;
1609{
1610	int active;
1611	vm_object_t obj;
1612
1613	/*
1614	 * Check to see if the vnode is in use. If so we have to reference it
1615	 * before we clean it out so that its count cannot fall to zero and
1616	 * generate a race against ourselves to recycle it.
1617	 */
1618	if ((active = vp->v_usecount))
1619		vp->v_usecount++;
1620
1621	/*
1622	 * Prevent the vnode from being recycled or brought into use while we
1623	 * clean it out.
1624	 */
1625	if (vp->v_flag & VXLOCK)
1626		panic("vclean: deadlock");
1627	vp->v_flag |= VXLOCK;
1628	/*
1629	 * Even if the count is zero, the VOP_INACTIVE routine may still
1630	 * have the object locked while it cleans it out. The VOP_LOCK
1631	 * ensures that the VOP_INACTIVE routine is done with its work.
1632	 * For active vnodes, it ensures that no other activity can
1633	 * occur while the underlying object is being cleaned out.
1634	 */
1635	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1636
1637	/*
1638	 * Clean out any buffers associated with the vnode.
1639	 */
1640	vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1641	if ((obj = vp->v_object) != NULL) {
1642		if (obj->ref_count == 0) {
1643			/*
1644			 * vclean() may be called twice.  The first time removes the
1645			 * primary reference to the object, the second time goes
1646			 * one further and is a special-case to terminate the object.
1647			 */
1648			vm_object_terminate(obj);
1649		} else {
1650			/*
1651			 * Woe to the process that tries to page now :-).
1652			 */
1653			vm_pager_deallocate(obj);
1654		}
1655	}
1656
1657	/*
1658	 * If purging an active vnode, it must be closed and
1659	 * deactivated before being reclaimed. Note that the
1660	 * VOP_INACTIVE will unlock the vnode.
1661	 */
1662	if (active) {
1663		if (flags & DOCLOSE)
1664			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1665		VOP_INACTIVE(vp, p);
1666	} else {
1667		/*
1668		 * Any other processes trying to obtain this lock must first
1669		 * wait for VXLOCK to clear, then call the new lock operation.
1670		 */
1671		VOP_UNLOCK(vp, 0, p);
1672	}
1673	/*
1674	 * Reclaim the vnode.
1675	 */
1676	if (VOP_RECLAIM(vp, p))
1677		panic("vclean: cannot reclaim");
1678
1679	if (active)
1680		vrele(vp);
1681
1682	cache_purge(vp);
1683	if (vp->v_vnlock) {
1684		FREE(vp->v_vnlock, M_VNODE);
1685		vp->v_vnlock = NULL;
1686	}
1687
1688	if (VSHOULDFREE(vp))
1689		vfree(vp);
1690
1691	/*
1692	 * Done with purge, notify sleepers of the grim news.
1693	 */
1694	vp->v_op = dead_vnodeop_p;
1695	vn_pollgone(vp);
1696	vp->v_tag = VT_NON;
1697	vp->v_flag &= ~VXLOCK;
1698	if (vp->v_flag & VXWANT) {
1699		vp->v_flag &= ~VXWANT;
1700		wakeup((caddr_t) vp);
1701	}
1702}
1703
1704/*
1705 * Eliminate all activity associated with the requested vnode
1706 * and with all vnodes aliased to the requested vnode.
1707 */
1708int
1709vop_revoke(ap)
1710	struct vop_revoke_args /* {
1711		struct vnode *a_vp;
1712		int a_flags;
1713	} */ *ap;
1714{
1715	struct vnode *vp, *vq;
1716	dev_t dev;
1717
1718	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
1719
1720	vp = ap->a_vp;
1721	/*
1722	 * If a vgone (or vclean) is already in progress,
1723	 * wait until it is done and return.
1724	 */
1725	if (vp->v_flag & VXLOCK) {
1726		vp->v_flag |= VXWANT;
1727		simple_unlock(&vp->v_interlock);
1728		tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
1729		return (0);
1730	}
1731	dev = vp->v_rdev;
1732	for (;;) {
1733		simple_lock(&spechash_slock);
1734		vq = SLIST_FIRST(&dev->si_hlist);
1735		simple_unlock(&spechash_slock);
1736		if (!vq)
1737			break;
1738		vgone(vq);
1739	}
1740	return (0);
1741}
1742
1743/*
1744 * Recycle an unused vnode to the front of the free list.
1745 * Release the passed interlock if the vnode will be recycled.
1746 */
1747int
1748vrecycle(vp, inter_lkp, p)
1749	struct vnode *vp;
1750	struct simplelock *inter_lkp;
1751	struct proc *p;
1752{
1753
1754	simple_lock(&vp->v_interlock);
1755	if (vp->v_usecount == 0) {
1756		if (inter_lkp) {
1757			simple_unlock(inter_lkp);
1758		}
1759		vgonel(vp, p);
1760		return (1);
1761	}
1762	simple_unlock(&vp->v_interlock);
1763	return (0);
1764}
1765
1766/*
1767 * Eliminate all activity associated with a vnode
1768 * in preparation for reuse.
1769 */
1770void
1771vgone(vp)
1772	register struct vnode *vp;
1773{
1774	struct proc *p = curproc;	/* XXX */
1775
1776	simple_lock(&vp->v_interlock);
1777	vgonel(vp, p);
1778}
1779
1780/*
1781 * vgone, with the vp interlock held.
1782 */
1783static void
1784vgonel(vp, p)
1785	struct vnode *vp;
1786	struct proc *p;
1787{
1788	int s;
1789
1790	/*
1791	 * If a vgone (or vclean) is already in progress,
1792	 * wait until it is done and return.
1793	 */
1794	if (vp->v_flag & VXLOCK) {
1795		vp->v_flag |= VXWANT;
1796		simple_unlock(&vp->v_interlock);
1797		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1798		return;
1799	}
1800
1801	/*
1802	 * Clean out the filesystem specific data.
1803	 */
1804	vclean(vp, DOCLOSE, p);
1805	simple_lock(&vp->v_interlock);
1806
1807	/*
1808	 * Delete from old mount point vnode list, if on one.
1809	 */
1810	if (vp->v_mount != NULL)
1811		insmntque(vp, (struct mount *)0);
1812	/*
1813	 * If special device, remove it from special device alias list
1814	 * if it is on one.
1815	 */
1816	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) {
1817		simple_lock(&spechash_slock);
1818		SLIST_REMOVE(&vp->v_hashchain, vp, vnode, v_specnext);
1819		freedev(vp->v_rdev);
1820		simple_unlock(&spechash_slock);
1821		vp->v_rdev = NULL;
1822	}
1823
1824	/*
1825	 * If it is on the freelist and not already at the head,
1826	 * move it to the head of the list. The test of the back
1827	 * pointer and the reference count of zero is because
1828	 * it will be removed from the free list by getnewvnode,
1829	 * but will not have its reference count incremented until
1830	 * after calling vgone. If the reference count were
1831	 * incremented first, vgone would (incorrectly) try to
1832	 * close the previous instance of the underlying object.
1833	 */
1834	if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
1835		s = splbio();
1836		simple_lock(&vnode_free_list_slock);
1837		if (vp->v_flag & VFREE) {
1838			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1839		} else if (vp->v_flag & VTBFREE) {
1840			TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
1841			vp->v_flag &= ~VTBFREE;
1842			freevnodes++;
1843		} else
1844			freevnodes++;
1845		vp->v_flag |= VFREE;
1846		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1847		simple_unlock(&vnode_free_list_slock);
1848		splx(s);
1849	}
1850
1851	vp->v_type = VBAD;
1852	simple_unlock(&vp->v_interlock);
1853}
1854
1855/*
1856 * Lookup a vnode by device number.
1857 */
1858int
1859vfinddev(dev, type, vpp)
1860	dev_t dev;
1861	enum vtype type;
1862	struct vnode **vpp;
1863{
1864	struct vnode *vp;
1865
1866	simple_lock(&spechash_slock);
1867	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
1868		if (type == vp->v_type) {
1869			*vpp = vp;
1870			simple_unlock(&spechash_slock);
1871			return (1);
1872		}
1873	}
1874	simple_unlock(&spechash_slock);
1875	return (0);
1876}
1877
1878/*
1879 * Calculate the total number of references to a special device.
1880 */
1881int
1882vcount(vp)
1883	struct vnode *vp;
1884{
1885	struct vnode *vq;
1886	int count;
1887
1888	count = 0;
1889	simple_lock(&spechash_slock);
1890	SLIST_FOREACH(vq, &vp->v_hashchain, v_specnext)
1891		count += vq->v_usecount;
1892	simple_unlock(&spechash_slock);
1893	return (count);
1894}
1895
1896/*
1897 * Print out a description of a vnode.
1898 */
1899static char *typename[] =
1900{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1901
1902void
1903vprint(label, vp)
1904	char *label;
1905	struct vnode *vp;
1906{
1907	char buf[96];
1908
1909	if (label != NULL)
1910		printf("%s: %p: ", label, (void *)vp);
1911	else
1912		printf("%p: ", (void *)vp);
1913	printf("type %s, usecount %d, writecount %d, refcount %d,",
1914	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1915	    vp->v_holdcnt);
1916	buf[0] = '\0';
1917	if (vp->v_flag & VROOT)
1918		strcat(buf, "|VROOT");
1919	if (vp->v_flag & VTEXT)
1920		strcat(buf, "|VTEXT");
1921	if (vp->v_flag & VSYSTEM)
1922		strcat(buf, "|VSYSTEM");
1923	if (vp->v_flag & VXLOCK)
1924		strcat(buf, "|VXLOCK");
1925	if (vp->v_flag & VXWANT)
1926		strcat(buf, "|VXWANT");
1927	if (vp->v_flag & VBWAIT)
1928		strcat(buf, "|VBWAIT");
1929	if (vp->v_flag & VDOOMED)
1930		strcat(buf, "|VDOOMED");
1931	if (vp->v_flag & VFREE)
1932		strcat(buf, "|VFREE");
1933	if (vp->v_flag & VOBJBUF)
1934		strcat(buf, "|VOBJBUF");
1935	if (buf[0] != '\0')
1936		printf(" flags (%s)", &buf[1]);
1937	if (vp->v_data == NULL) {
1938		printf("\n");
1939	} else {
1940		printf("\n\t");
1941		VOP_PRINT(vp);
1942	}
1943}
1944
1945#ifdef DDB
1946#include <ddb/ddb.h>
1947/*
1948 * List all of the locked vnodes in the system.
1949 * Called when debugging the kernel.
1950 */
1951DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
1952{
1953	struct proc *p = curproc;	/* XXX */
1954	struct mount *mp, *nmp;
1955	struct vnode *vp;
1956
1957	printf("Locked vnodes\n");
1958	simple_lock(&mountlist_slock);
1959	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1960		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1961			nmp = TAILQ_NEXT(mp, mnt_list);
1962			continue;
1963		}
1964		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1965			if (VOP_ISLOCKED(vp, NULL))
1966				vprint((char *)0, vp);
1967		}
1968		simple_lock(&mountlist_slock);
1969		nmp = TAILQ_NEXT(mp, mnt_list);
1970		vfs_unbusy(mp, p);
1971	}
1972	simple_unlock(&mountlist_slock);
1973}
1974#endif
1975
1976/*
1977 * Top level filesystem related information gathering.
1978 */
1979static int	sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS);
1980
1981static int
1982vfs_sysctl SYSCTL_HANDLER_ARGS
1983{
1984	int *name = (int *)arg1 - 1;	/* XXX */
1985	u_int namelen = arg2 + 1;	/* XXX */
1986	struct vfsconf *vfsp;
1987
1988#if 1 || defined(COMPAT_PRELITE2)
1989	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
1990	if (namelen == 1)
1991		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
1992#endif
1993
1994#ifdef notyet
1995	/* all sysctl names at this level are at least name and field */
1996	if (namelen < 2)
1997		return (ENOTDIR);		/* overloaded */
1998	if (name[0] != VFS_GENERIC) {
1999		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2000			if (vfsp->vfc_typenum == name[0])
2001				break;
2002		if (vfsp == NULL)
2003			return (EOPNOTSUPP);
2004		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2005		    oldp, oldlenp, newp, newlen, p));
2006	}
2007#endif
2008	switch (name[1]) {
2009	case VFS_MAXTYPENUM:
2010		if (namelen != 2)
2011			return (ENOTDIR);
2012		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2013	case VFS_CONF:
2014		if (namelen != 3)
2015			return (ENOTDIR);	/* overloaded */
2016		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2017			if (vfsp->vfc_typenum == name[2])
2018				break;
2019		if (vfsp == NULL)
2020			return (EOPNOTSUPP);
2021		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
2022	}
2023	return (EOPNOTSUPP);
2024}
2025
2026SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
2027	"Generic filesystem");
2028
2029#if 1 || defined(COMPAT_PRELITE2)
2030
2031static int
2032sysctl_ovfs_conf SYSCTL_HANDLER_ARGS
2033{
2034	int error;
2035	struct vfsconf *vfsp;
2036	struct ovfsconf ovfs;
2037
2038	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2039		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2040		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2041		ovfs.vfc_index = vfsp->vfc_typenum;
2042		ovfs.vfc_refcount = vfsp->vfc_refcount;
2043		ovfs.vfc_flags = vfsp->vfc_flags;
2044		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2045		if (error)
2046			return error;
2047	}
2048	return 0;
2049}
2050
2051#endif /* 1 || COMPAT_PRELITE2 */
2052
2053#if 0
2054#define KINFO_VNODESLOP	10
2055/*
2056 * Dump vnode list (via sysctl).
2057 * Copyout address of vnode followed by vnode.
2058 */
2059/* ARGSUSED */
2060static int
2061sysctl_vnode SYSCTL_HANDLER_ARGS
2062{
2063	struct proc *p = curproc;	/* XXX */
2064	struct mount *mp, *nmp;
2065	struct vnode *nvp, *vp;
2066	int error;
2067
2068#define VPTRSZ	sizeof (struct vnode *)
2069#define VNODESZ	sizeof (struct vnode)
2070
2071	req->lock = 0;
2072	if (!req->oldptr) /* Make an estimate */
2073		return (SYSCTL_OUT(req, 0,
2074			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
2075
2076	simple_lock(&mountlist_slock);
2077	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2078		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
2079			nmp = TAILQ_NEXT(mp, mnt_list);
2080			continue;
2081		}
2082again:
2083		simple_lock(&mntvnode_slock);
2084		for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2085		     vp != NULL;
2086		     vp = nvp) {
2087			/*
2088			 * Check that the vp is still associated with
2089			 * this filesystem.  RACE: could have been
2090			 * recycled onto the same filesystem.
2091			 */
2092			if (vp->v_mount != mp) {
2093				simple_unlock(&mntvnode_slock);
2094				goto again;
2095			}
2096			nvp = LIST_NEXT(vp, v_mntvnodes);
2097			simple_unlock(&mntvnode_slock);
2098			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
2099			    (error = SYSCTL_OUT(req, vp, VNODESZ)))
2100				return (error);
2101			simple_lock(&mntvnode_slock);
2102		}
2103		simple_unlock(&mntvnode_slock);
2104		simple_lock(&mountlist_slock);
2105		nmp = TAILQ_NEXT(mp, mnt_list);
2106		vfs_unbusy(mp, p);
2107	}
2108	simple_unlock(&mountlist_slock);
2109
2110	return (0);
2111}
2112#endif
2113
2114/*
2115 * XXX
2116 * Exporting the vnode list on large systems causes them to crash.
2117 * Exporting the vnode list on medium systems causes sysctl to coredump.
2118 */
2119#if 0
2120SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2121	0, 0, sysctl_vnode, "S,vnode", "");
2122#endif
2123
2124/*
2125 * Check to see if a filesystem is mounted on a block device.
2126 */
2127int
2128vfs_mountedon(vp)
2129	struct vnode *vp;
2130{
2131
2132	if (vp->v_specmountpoint != NULL)
2133		return (EBUSY);
2134	return (0);
2135}
2136
2137/*
2138 * Unmount all filesystems. The list is traversed in reverse order
2139 * of mounting to avoid dependencies.
2140 */
2141void
2142vfs_unmountall()
2143{
2144	struct mount *mp;
2145	struct proc *p;
2146	int error;
2147
2148	if (curproc != NULL)
2149		p = curproc;
2150	else
2151		p = initproc;	/* XXX XXX should this be proc0? */
2152	/*
2153	 * Since this only runs when rebooting, it is not interlocked.
2154	 */
2155	while(!TAILQ_EMPTY(&mountlist)) {
2156		mp = TAILQ_LAST(&mountlist, mntlist);
2157		error = dounmount(mp, MNT_FORCE, p);
2158		if (error) {
2159			TAILQ_REMOVE(&mountlist, mp, mnt_list);
2160			printf("unmount of %s failed (",
2161			    mp->mnt_stat.f_mntonname);
2162			if (error == EBUSY)
2163				printf("BUSY)\n");
2164			else
2165				printf("%d)\n", error);
2166		} else {
2167			/* The unmount has removed mp from the mountlist */
2168		}
2169	}
2170}
2171
2172/*
2173 * Build hash lists of net addresses and hang them off the mount point.
2174 * Called by ufs_mount() to set up the lists of export addresses.
2175 */
2176static int
2177vfs_hang_addrlist(mp, nep, argp)
2178	struct mount *mp;
2179	struct netexport *nep;
2180	struct export_args *argp;
2181{
2182	register struct netcred *np;
2183	register struct radix_node_head *rnh;
2184	register int i;
2185	struct radix_node *rn;
2186	struct sockaddr *saddr, *smask = 0;
2187	struct domain *dom;
2188	int error;
2189
2190	if (argp->ex_addrlen == 0) {
2191		if (mp->mnt_flag & MNT_DEFEXPORTED)
2192			return (EPERM);
2193		np = &nep->ne_defexported;
2194		np->netc_exflags = argp->ex_flags;
2195		np->netc_anon = argp->ex_anon;
2196		np->netc_anon.cr_ref = 1;
2197		mp->mnt_flag |= MNT_DEFEXPORTED;
2198		return (0);
2199	}
2200	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2201	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
2202	bzero((caddr_t) np, i);
2203	saddr = (struct sockaddr *) (np + 1);
2204	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
2205		goto out;
2206	if (saddr->sa_len > argp->ex_addrlen)
2207		saddr->sa_len = argp->ex_addrlen;
2208	if (argp->ex_masklen) {
2209		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
2210		error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen);
2211		if (error)
2212			goto out;
2213		if (smask->sa_len > argp->ex_masklen)
2214			smask->sa_len = argp->ex_masklen;
2215	}
2216	i = saddr->sa_family;
2217	if ((rnh = nep->ne_rtable[i]) == 0) {
2218		/*
2219		 * Seems silly to initialize every AF when most are not used,
2220		 * do so on demand here
2221		 */
2222		for (dom = domains; dom; dom = dom->dom_next)
2223			if (dom->dom_family == i && dom->dom_rtattach) {
2224				dom->dom_rtattach((void **) &nep->ne_rtable[i],
2225				    dom->dom_rtoffset);
2226				break;
2227			}
2228		if ((rnh = nep->ne_rtable[i]) == 0) {
2229			error = ENOBUFS;
2230			goto out;
2231		}
2232	}
2233	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
2234	    np->netc_rnodes);
2235	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
2236		error = EPERM;
2237		goto out;
2238	}
2239	np->netc_exflags = argp->ex_flags;
2240	np->netc_anon = argp->ex_anon;
2241	np->netc_anon.cr_ref = 1;
2242	return (0);
2243out:
2244	free(np, M_NETADDR);
2245	return (error);
2246}
2247
2248/* ARGSUSED */
2249static int
2250vfs_free_netcred(rn, w)
2251	struct radix_node *rn;
2252	void *w;
2253{
2254	register struct radix_node_head *rnh = (struct radix_node_head *) w;
2255
2256	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2257	free((caddr_t) rn, M_NETADDR);
2258	return (0);
2259}
2260
2261/*
2262 * Free the net address hash lists that are hanging off the mount points.
2263 */
2264static void
2265vfs_free_addrlist(nep)
2266	struct netexport *nep;
2267{
2268	register int i;
2269	register struct radix_node_head *rnh;
2270
2271	for (i = 0; i <= AF_MAX; i++)
2272		if ((rnh = nep->ne_rtable[i])) {
2273			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
2274			    (caddr_t) rnh);
2275			free((caddr_t) rnh, M_RTABLE);
2276			nep->ne_rtable[i] = 0;
2277		}
2278}
2279
2280int
2281vfs_export(mp, nep, argp)
2282	struct mount *mp;
2283	struct netexport *nep;
2284	struct export_args *argp;
2285{
2286	int error;
2287
2288	if (argp->ex_flags & MNT_DELEXPORT) {
2289		if (mp->mnt_flag & MNT_EXPUBLIC) {
2290			vfs_setpublicfs(NULL, NULL, NULL);
2291			mp->mnt_flag &= ~MNT_EXPUBLIC;
2292		}
2293		vfs_free_addrlist(nep);
2294		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2295	}
2296	if (argp->ex_flags & MNT_EXPORTED) {
2297		if (argp->ex_flags & MNT_EXPUBLIC) {
2298			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2299				return (error);
2300			mp->mnt_flag |= MNT_EXPUBLIC;
2301		}
2302		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2303			return (error);
2304		mp->mnt_flag |= MNT_EXPORTED;
2305	}
2306	return (0);
2307}
2308
2309
2310/*
2311 * Set the publicly exported filesystem (WebNFS). Currently, only
2312 * one public filesystem is possible in the spec (RFC 2054 and 2055)
2313 */
2314int
2315vfs_setpublicfs(mp, nep, argp)
2316	struct mount *mp;
2317	struct netexport *nep;
2318	struct export_args *argp;
2319{
2320	int error;
2321	struct vnode *rvp;
2322	char *cp;
2323
2324	/*
2325	 * mp == NULL -> invalidate the current info, the FS is
2326	 * no longer exported. May be called from either vfs_export
2327	 * or unmount, so check if it hasn't already been done.
2328	 */
2329	if (mp == NULL) {
2330		if (nfs_pub.np_valid) {
2331			nfs_pub.np_valid = 0;
2332			if (nfs_pub.np_index != NULL) {
2333				FREE(nfs_pub.np_index, M_TEMP);
2334				nfs_pub.np_index = NULL;
2335			}
2336		}
2337		return (0);
2338	}
2339
2340	/*
2341	 * Only one allowed at a time.
2342	 */
2343	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2344		return (EBUSY);
2345
2346	/*
2347	 * Get real filehandle for root of exported FS.
2348	 */
2349	bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle));
2350	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2351
2352	if ((error = VFS_ROOT(mp, &rvp)))
2353		return (error);
2354
2355	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2356		return (error);
2357
2358	vput(rvp);
2359
2360	/*
2361	 * If an indexfile was specified, pull it in.
2362	 */
2363	if (argp->ex_indexfile != NULL) {
2364		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2365		    M_WAITOK);
2366		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2367		    MAXNAMLEN, (size_t *)0);
2368		if (!error) {
2369			/*
2370			 * Check for illegal filenames.
2371			 */
2372			for (cp = nfs_pub.np_index; *cp; cp++) {
2373				if (*cp == '/') {
2374					error = EINVAL;
2375					break;
2376				}
2377			}
2378		}
2379		if (error) {
2380			FREE(nfs_pub.np_index, M_TEMP);
2381			return (error);
2382		}
2383	}
2384
2385	nfs_pub.np_mount = mp;
2386	nfs_pub.np_valid = 1;
2387	return (0);
2388}
2389
2390struct netcred *
2391vfs_export_lookup(mp, nep, nam)
2392	register struct mount *mp;
2393	struct netexport *nep;
2394	struct sockaddr *nam;
2395{
2396	register struct netcred *np;
2397	register struct radix_node_head *rnh;
2398	struct sockaddr *saddr;
2399
2400	np = NULL;
2401	if (mp->mnt_flag & MNT_EXPORTED) {
2402		/*
2403		 * Lookup in the export list first.
2404		 */
2405		if (nam != NULL) {
2406			saddr = nam;
2407			rnh = nep->ne_rtable[saddr->sa_family];
2408			if (rnh != NULL) {
2409				np = (struct netcred *)
2410					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2411							      rnh);
2412				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2413					np = NULL;
2414			}
2415		}
2416		/*
2417		 * If no address match, use the default if it exists.
2418		 */
2419		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2420			np = &nep->ne_defexported;
2421	}
2422	return (np);
2423}
2424
2425/*
2426 * perform msync on all vnodes under a mount point
2427 * the mount point must be locked.
2428 */
2429void
2430vfs_msync(struct mount *mp, int flags) {
2431	struct vnode *vp, *nvp;
2432	struct vm_object *obj;
2433	int anyio, tries;
2434
2435	tries = 5;
2436loop:
2437	anyio = 0;
2438	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
2439
2440		nvp = LIST_NEXT(vp, v_mntvnodes);
2441
2442		if (vp->v_mount != mp) {
2443			goto loop;
2444		}
2445
2446		if (vp->v_flag & VXLOCK)	/* XXX: what if MNT_WAIT? */
2447			continue;
2448
2449		if (flags != MNT_WAIT) {
2450			obj = vp->v_object;
2451			if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
2452				continue;
2453			if (VOP_ISLOCKED(vp, NULL))
2454				continue;
2455		}
2456
2457		simple_lock(&vp->v_interlock);
2458		if (vp->v_object &&
2459		   (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
2460			if (!vget(vp,
2461				LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
2462				if (vp->v_object) {
2463					vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : 0);
2464					anyio = 1;
2465				}
2466				vput(vp);
2467			}
2468		} else {
2469			simple_unlock(&vp->v_interlock);
2470		}
2471	}
2472	if (anyio && (--tries > 0))
2473		goto loop;
2474}
2475
2476/*
2477 * Create the VM object needed for VMIO and mmap support.  This
2478 * is done for all VREG files in the system.  Some filesystems might
2479 * afford the additional metadata buffering capability of the
2480 * VMIO code by making the device node be VMIO mode also.
2481 *
2482 * vp must be locked when vfs_object_create is called.
2483 */
2484int
2485vfs_object_create(vp, p, cred)
2486	struct vnode *vp;
2487	struct proc *p;
2488	struct ucred *cred;
2489{
2490	struct vattr vat;
2491	vm_object_t object;
2492	int error = 0;
2493
2494	if (!vn_isdisk(vp) && vn_canvmio(vp) == FALSE)
2495		return 0;
2496
2497retry:
2498	if ((object = vp->v_object) == NULL) {
2499		if (vp->v_type == VREG || vp->v_type == VDIR) {
2500			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
2501				goto retn;
2502			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
2503		} else if (devsw(vp->v_rdev) != NULL) {
2504			/*
2505			 * This simply allocates the biggest object possible
2506			 * for a disk vnode.  This should be fixed, but doesn't
2507			 * cause any problems (yet).
2508			 */
2509			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
2510		} else {
2511			goto retn;
2512		}
2513		/*
2514		 * Dereference the reference we just created.  This assumes
2515		 * that the object is associated with the vp.
2516		 */
2517		object->ref_count--;
2518		vp->v_usecount--;
2519	} else {
2520		if (object->flags & OBJ_DEAD) {
2521			VOP_UNLOCK(vp, 0, p);
2522			tsleep(object, PVM, "vodead", 0);
2523			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
2524			goto retry;
2525		}
2526	}
2527
2528	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
2529	vp->v_flag |= VOBJBUF;
2530
2531retn:
2532	return error;
2533}
2534
2535static void
2536vfree(vp)
2537	struct vnode *vp;
2538{
2539	int s;
2540
2541	s = splbio();
2542	simple_lock(&vnode_free_list_slock);
2543	if (vp->v_flag & VTBFREE) {
2544		TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
2545		vp->v_flag &= ~VTBFREE;
2546	}
2547	if (vp->v_flag & VAGE) {
2548		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2549	} else {
2550		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2551	}
2552	freevnodes++;
2553	simple_unlock(&vnode_free_list_slock);
2554	vp->v_flag &= ~VAGE;
2555	vp->v_flag |= VFREE;
2556	splx(s);
2557}
2558
2559void
2560vbusy(vp)
2561	struct vnode *vp;
2562{
2563	int s;
2564
2565	s = splbio();
2566	simple_lock(&vnode_free_list_slock);
2567	if (vp->v_flag & VTBFREE) {
2568		TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
2569		vp->v_flag &= ~VTBFREE;
2570	} else {
2571		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2572		freevnodes--;
2573	}
2574	simple_unlock(&vnode_free_list_slock);
2575	vp->v_flag &= ~(VFREE|VAGE);
2576	splx(s);
2577}
2578
2579/*
2580 * Record a process's interest in events which might happen to
2581 * a vnode.  Because poll uses the historic select-style interface
2582 * internally, this routine serves as both the ``check for any
2583 * pending events'' and the ``record my interest in future events''
2584 * functions.  (These are done together, while the lock is held,
2585 * to avoid race conditions.)
2586 */
2587int
2588vn_pollrecord(vp, p, events)
2589	struct vnode *vp;
2590	struct proc *p;
2591	short events;
2592{
2593	simple_lock(&vp->v_pollinfo.vpi_lock);
2594	if (vp->v_pollinfo.vpi_revents & events) {
2595		/*
2596		 * This leaves events we are not interested
2597		 * in available for the other process which
2598		 * which presumably had requested them
2599		 * (otherwise they would never have been
2600		 * recorded).
2601		 */
2602		events &= vp->v_pollinfo.vpi_revents;
2603		vp->v_pollinfo.vpi_revents &= ~events;
2604
2605		simple_unlock(&vp->v_pollinfo.vpi_lock);
2606		return events;
2607	}
2608	vp->v_pollinfo.vpi_events |= events;
2609	selrecord(p, &vp->v_pollinfo.vpi_selinfo);
2610	simple_unlock(&vp->v_pollinfo.vpi_lock);
2611	return 0;
2612}
2613
2614/*
2615 * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
2616 * it is possible for us to miss an event due to race conditions, but
2617 * that condition is expected to be rare, so for the moment it is the
2618 * preferred interface.
2619 */
2620void
2621vn_pollevent(vp, events)
2622	struct vnode *vp;
2623	short events;
2624{
2625	simple_lock(&vp->v_pollinfo.vpi_lock);
2626	if (vp->v_pollinfo.vpi_events & events) {
2627		/*
2628		 * We clear vpi_events so that we don't
2629		 * call selwakeup() twice if two events are
2630		 * posted before the polling process(es) is
2631		 * awakened.  This also ensures that we take at
2632		 * most one selwakeup() if the polling process
2633		 * is no longer interested.  However, it does
2634		 * mean that only one event can be noticed at
2635		 * a time.  (Perhaps we should only clear those
2636		 * event bits which we note?) XXX
2637		 */
2638		vp->v_pollinfo.vpi_events = 0;	/* &= ~events ??? */
2639		vp->v_pollinfo.vpi_revents |= events;
2640		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2641	}
2642	simple_unlock(&vp->v_pollinfo.vpi_lock);
2643}
2644
2645/*
2646 * Wake up anyone polling on vp because it is being revoked.
2647 * This depends on dead_poll() returning POLLHUP for correct
2648 * behavior.
2649 */
2650void
2651vn_pollgone(vp)
2652	struct vnode *vp;
2653{
2654	simple_lock(&vp->v_pollinfo.vpi_lock);
2655	if (vp->v_pollinfo.vpi_events) {
2656		vp->v_pollinfo.vpi_events = 0;
2657		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2658	}
2659	simple_unlock(&vp->v_pollinfo.vpi_lock);
2660}
2661
2662
2663
2664/*
2665 * Routine to create and manage a filesystem syncer vnode.
2666 */
2667#define sync_close ((int (*) __P((struct  vop_close_args *)))nullop)
2668static int	sync_fsync __P((struct  vop_fsync_args *));
2669static int	sync_inactive __P((struct  vop_inactive_args *));
2670static int	sync_reclaim  __P((struct  vop_reclaim_args *));
2671#define sync_lock ((int (*) __P((struct  vop_lock_args *)))vop_nolock)
2672#define sync_unlock ((int (*) __P((struct  vop_unlock_args *)))vop_nounlock)
2673static int	sync_print __P((struct vop_print_args *));
2674#define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
2675
2676static vop_t **sync_vnodeop_p;
2677static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
2678	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
2679	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
2680	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
2681	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
2682	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
2683	{ &vop_lock_desc,	(vop_t *) sync_lock },		/* lock */
2684	{ &vop_unlock_desc,	(vop_t *) sync_unlock },	/* unlock */
2685	{ &vop_print_desc,	(vop_t *) sync_print },		/* print */
2686	{ &vop_islocked_desc,	(vop_t *) sync_islocked },	/* islocked */
2687	{ NULL, NULL }
2688};
2689static struct vnodeopv_desc sync_vnodeop_opv_desc =
2690	{ &sync_vnodeop_p, sync_vnodeop_entries };
2691
2692VNODEOP_SET(sync_vnodeop_opv_desc);
2693
2694/*
2695 * Create a new filesystem syncer vnode for the specified mount point.
2696 */
2697int
2698vfs_allocate_syncvnode(mp)
2699	struct mount *mp;
2700{
2701	struct vnode *vp;
2702	static long start, incr, next;
2703	int error;
2704
2705	/* Allocate a new vnode */
2706	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
2707		mp->mnt_syncer = NULL;
2708		return (error);
2709	}
2710	vp->v_type = VNON;
2711	/*
2712	 * Place the vnode onto the syncer worklist. We attempt to
2713	 * scatter them about on the list so that they will go off
2714	 * at evenly distributed times even if all the filesystems
2715	 * are mounted at once.
2716	 */
2717	next += incr;
2718	if (next == 0 || next > syncer_maxdelay) {
2719		start /= 2;
2720		incr /= 2;
2721		if (start == 0) {
2722			start = syncer_maxdelay / 2;
2723			incr = syncer_maxdelay;
2724		}
2725		next = start;
2726	}
2727	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
2728	mp->mnt_syncer = vp;
2729	return (0);
2730}
2731
2732/*
2733 * Do a lazy sync of the filesystem.
2734 */
2735static int
2736sync_fsync(ap)
2737	struct vop_fsync_args /* {
2738		struct vnode *a_vp;
2739		struct ucred *a_cred;
2740		int a_waitfor;
2741		struct proc *a_p;
2742	} */ *ap;
2743{
2744	struct vnode *syncvp = ap->a_vp;
2745	struct mount *mp = syncvp->v_mount;
2746	struct proc *p = ap->a_p;
2747	int asyncflag;
2748
2749	/*
2750	 * We only need to do something if this is a lazy evaluation.
2751	 */
2752	if (ap->a_waitfor != MNT_LAZY)
2753		return (0);
2754
2755	/*
2756	 * Move ourselves to the back of the sync list.
2757	 */
2758	vn_syncer_add_to_worklist(syncvp, syncdelay);
2759
2760	/*
2761	 * Walk the list of vnodes pushing all that are dirty and
2762	 * not already on the sync list.
2763	 */
2764	simple_lock(&mountlist_slock);
2765	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
2766		simple_unlock(&mountlist_slock);
2767		return (0);
2768	}
2769	asyncflag = mp->mnt_flag & MNT_ASYNC;
2770	mp->mnt_flag &= ~MNT_ASYNC;
2771	vfs_msync(mp, MNT_NOWAIT);
2772	VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p);
2773	if (asyncflag)
2774		mp->mnt_flag |= MNT_ASYNC;
2775	vfs_unbusy(mp, p);
2776	return (0);
2777}
2778
2779/*
2780 * The syncer vnode is no referenced.
2781 */
2782static int
2783sync_inactive(ap)
2784	struct vop_inactive_args /* {
2785		struct vnode *a_vp;
2786		struct proc *a_p;
2787	} */ *ap;
2788{
2789
2790	vgone(ap->a_vp);
2791	return (0);
2792}
2793
2794/*
2795 * The syncer vnode is no longer needed and is being decommissioned.
2796 *
2797 * Modifications to the worklist must be protected at splbio().
2798 */
2799static int
2800sync_reclaim(ap)
2801	struct vop_reclaim_args /* {
2802		struct vnode *a_vp;
2803	} */ *ap;
2804{
2805	struct vnode *vp = ap->a_vp;
2806	int s;
2807
2808	s = splbio();
2809	vp->v_mount->mnt_syncer = NULL;
2810	if (vp->v_flag & VONWORKLST) {
2811		LIST_REMOVE(vp, v_synclist);
2812		vp->v_flag &= ~VONWORKLST;
2813	}
2814	splx(s);
2815
2816	return (0);
2817}
2818
2819/*
2820 * Print out a syncer vnode.
2821 */
2822static int
2823sync_print(ap)
2824	struct vop_print_args /* {
2825		struct vnode *a_vp;
2826	} */ *ap;
2827{
2828	struct vnode *vp = ap->a_vp;
2829
2830	printf("syncer vnode");
2831	if (vp->v_vnlock != NULL)
2832		lockmgr_printinfo(vp->v_vnlock);
2833	printf("\n");
2834	return (0);
2835}
2836
2837/*
2838 * extract the dev_t from a VBLK or VCHR
2839 */
2840dev_t
2841vn_todev(vp)
2842	struct vnode *vp;
2843{
2844	if (vp->v_type != VBLK && vp->v_type != VCHR)
2845		return (NODEV);
2846	return (vp->v_rdev);
2847}
2848
2849/*
2850 * Check if vnode represents a disk device
2851 */
2852int
2853vn_isdisk(vp)
2854	struct vnode *vp;
2855{
2856	if (vp->v_type != VBLK && vp->v_type != VCHR)
2857		return (0);
2858	if (!devsw(vp->v_rdev))
2859		return (0);
2860	if (!(devsw(vp->v_rdev)->d_flags & D_DISK))
2861		return (0);
2862	return (1);
2863}
2864
2865