vfs_subr.c revision 58132
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39 * $FreeBSD: head/sys/kern/vfs_subr.c 58132 2000-03-16 08:51:55Z phk $
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45#include "opt_ddb.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/buf.h>
50#include <sys/conf.h>
51#include <sys/dirent.h>
52#include <sys/domain.h>
53#include <sys/eventhandler.h>
54#include <sys/fcntl.h>
55#include <sys/kernel.h>
56#include <sys/kthread.h>
57#include <sys/malloc.h>
58#include <sys/mount.h>
59#include <sys/namei.h>
60#include <sys/proc.h>
61#include <sys/reboot.h>
62#include <sys/socket.h>
63#include <sys/stat.h>
64#include <sys/sysctl.h>
65#include <sys/vmmeter.h>
66#include <sys/vnode.h>
67
68#include <machine/limits.h>
69
70#include <vm/vm.h>
71#include <vm/vm_object.h>
72#include <vm/vm_extern.h>
73#include <vm/pmap.h>
74#include <vm/vm_map.h>
75#include <vm/vm_page.h>
76#include <vm/vm_pager.h>
77#include <vm/vnode_pager.h>
78#include <vm/vm_zone.h>
79
80static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
81
82static void	insmntque __P((struct vnode *vp, struct mount *mp));
83static void	vclean __P((struct vnode *vp, int flags, struct proc *p));
84static void	vfree __P((struct vnode *));
85static unsigned long	numvnodes;
86SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
87
88enum vtype iftovt_tab[16] = {
89	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
90	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
91};
92int vttoif_tab[9] = {
93	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
94	S_IFSOCK, S_IFIFO, S_IFMT,
95};
96
97static TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
98struct tobefreelist vnode_tobefree_list;	/* vnode free list */
99
100static u_long wantfreevnodes = 25;
101SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
102static u_long freevnodes = 0;
103SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
104
105static int reassignbufcalls;
106SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
107static int reassignbufloops;
108SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, "");
109static int reassignbufsortgood;
110SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, "");
111static int reassignbufsortbad;
112SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, "");
113static int reassignbufmethod = 1;
114SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, "");
115
116#ifdef ENABLE_VFS_IOOPT
117int vfs_ioopt = 0;
118SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
119#endif
120
121struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */
122struct simplelock mountlist_slock;
123struct simplelock mntvnode_slock;
124int	nfs_mount_type = -1;
125#ifndef NULL_SIMPLELOCKS
126static struct simplelock mntid_slock;
127static struct simplelock vnode_free_list_slock;
128static struct simplelock spechash_slock;
129#endif
130struct nfs_public nfs_pub;	/* publicly exported FS */
131static vm_zone_t vnode_zone;
132
133/*
134 * The workitem queue.
135 */
136#define SYNCER_MAXDELAY		32
137static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
138time_t syncdelay = 30;		/* max time to delay syncing data */
139time_t filedelay = 30;		/* time to delay syncing files */
140SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
141time_t dirdelay = 29;		/* time to delay syncing directories */
142SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
143time_t metadelay = 28;		/* time to delay syncing metadata */
144SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
145static int rushjob;			/* number of slots to run ASAP */
146static int stat_rush_requests;	/* number of times I/O speeded up */
147SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
148
149static int syncer_delayno = 0;
150static long syncer_mask;
151LIST_HEAD(synclist, vnode);
152static struct synclist *syncer_workitem_pending;
153
154int desiredvnodes;
155SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
156    &desiredvnodes, 0, "Maximum number of vnodes");
157
158static void	vfs_free_addrlist __P((struct netexport *nep));
159static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
160static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
161				       struct export_args *argp));
162
163/*
164 * Initialize the vnode management data structures.
165 */
166void
167vntblinit()
168{
169
170	desiredvnodes = maxproc + cnt.v_page_count / 4;
171	simple_lock_init(&mntvnode_slock);
172	simple_lock_init(&mntid_slock);
173	simple_lock_init(&spechash_slock);
174	TAILQ_INIT(&vnode_free_list);
175	TAILQ_INIT(&vnode_tobefree_list);
176	simple_lock_init(&vnode_free_list_slock);
177	vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
178	/*
179	 * Initialize the filesystem syncer.
180	 */
181	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
182		&syncer_mask);
183	syncer_maxdelay = syncer_mask + 1;
184}
185
186/*
187 * Mark a mount point as busy. Used to synchronize access and to delay
188 * unmounting. Interlock is not released on failure.
189 */
190int
191vfs_busy(mp, flags, interlkp, p)
192	struct mount *mp;
193	int flags;
194	struct simplelock *interlkp;
195	struct proc *p;
196{
197	int lkflags;
198
199	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
200		if (flags & LK_NOWAIT)
201			return (ENOENT);
202		mp->mnt_kern_flag |= MNTK_MWAIT;
203		if (interlkp) {
204			simple_unlock(interlkp);
205		}
206		/*
207		 * Since all busy locks are shared except the exclusive
208		 * lock granted when unmounting, the only place that a
209		 * wakeup needs to be done is at the release of the
210		 * exclusive lock at the end of dounmount.
211		 */
212		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
213		if (interlkp) {
214			simple_lock(interlkp);
215		}
216		return (ENOENT);
217	}
218	lkflags = LK_SHARED | LK_NOPAUSE;
219	if (interlkp)
220		lkflags |= LK_INTERLOCK;
221	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
222		panic("vfs_busy: unexpected lock failure");
223	return (0);
224}
225
226/*
227 * Free a busy filesystem.
228 */
229void
230vfs_unbusy(mp, p)
231	struct mount *mp;
232	struct proc *p;
233{
234
235	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
236}
237
238/*
239 * Lookup a filesystem type, and if found allocate and initialize
240 * a mount structure for it.
241 *
242 * Devname is usually updated by mount(8) after booting.
243 */
244int
245vfs_rootmountalloc(fstypename, devname, mpp)
246	char *fstypename;
247	char *devname;
248	struct mount **mpp;
249{
250	struct proc *p = curproc;	/* XXX */
251	struct vfsconf *vfsp;
252	struct mount *mp;
253
254	if (fstypename == NULL)
255		return (ENODEV);
256	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
257		if (!strcmp(vfsp->vfc_name, fstypename))
258			break;
259	if (vfsp == NULL)
260		return (ENODEV);
261	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
262	bzero((char *)mp, (u_long)sizeof(struct mount));
263	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
264	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
265	LIST_INIT(&mp->mnt_vnodelist);
266	mp->mnt_vfc = vfsp;
267	mp->mnt_op = vfsp->vfc_vfsops;
268	mp->mnt_flag = MNT_RDONLY;
269	mp->mnt_vnodecovered = NULLVP;
270	vfsp->vfc_refcount++;
271	mp->mnt_iosize_max = DFLTPHYS;
272	mp->mnt_stat.f_type = vfsp->vfc_typenum;
273	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
274	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
275	mp->mnt_stat.f_mntonname[0] = '/';
276	mp->mnt_stat.f_mntonname[1] = 0;
277	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
278	*mpp = mp;
279	return (0);
280}
281
282/*
283 * Find an appropriate filesystem to use for the root. If a filesystem
284 * has not been preselected, walk through the list of known filesystems
285 * trying those that have mountroot routines, and try them until one
286 * works or we have tried them all.
287 */
288#ifdef notdef	/* XXX JH */
289int
290lite2_vfs_mountroot()
291{
292	struct vfsconf *vfsp;
293	extern int (*lite2_mountroot) __P((void));
294	int error;
295
296	if (lite2_mountroot != NULL)
297		return ((*lite2_mountroot)());
298	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
299		if (vfsp->vfc_mountroot == NULL)
300			continue;
301		if ((error = (*vfsp->vfc_mountroot)()) == 0)
302			return (0);
303		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
304	}
305	return (ENODEV);
306}
307#endif
308
309/*
310 * Lookup a mount point by filesystem identifier.
311 */
312struct mount *
313vfs_getvfs(fsid)
314	fsid_t *fsid;
315{
316	register struct mount *mp;
317
318	simple_lock(&mountlist_slock);
319	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
320		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
321		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
322			simple_unlock(&mountlist_slock);
323			return (mp);
324	    }
325	}
326	simple_unlock(&mountlist_slock);
327	return ((struct mount *) 0);
328}
329
330/*
331 * Get a new unique fsid.  Try to make its val[0] unique, since this value
332 * will be used to create fake device numbers for stat().  Also try (but
333 * not so hard) make its val[0] unique mod 2^16, since some emulators only
334 * support 16-bit device numbers.  We end up with unique val[0]'s for the
335 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
336 *
337 * Keep in mind that several mounts may be running in parallel.  Starting
338 * the search one past where the previous search terminated is both a
339 * micro-optimization and a defense against returning the same fsid to
340 * different mounts.
341 */
342void
343vfs_getnewfsid(mp)
344	struct mount *mp;
345{
346	static u_int16_t mntid_base;
347	fsid_t tfsid;
348	int mtype;
349
350	simple_lock(&mntid_slock);
351	mtype = mp->mnt_vfc->vfc_typenum;
352	tfsid.val[1] = mtype;
353	mtype = (mtype & 0xFF) << 16;
354	for (;;) {
355		tfsid.val[0] = makeudev(255, mtype | mntid_base++);
356		if (vfs_getvfs(&tfsid) == NULL)
357			break;
358	}
359	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
360	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
361	simple_unlock(&mntid_slock);
362}
363
364/*
365 * Knob to control the precision of file timestamps:
366 *
367 *   0 = seconds only; nanoseconds zeroed.
368 *   1 = seconds and nanoseconds, accurate within 1/HZ.
369 *   2 = seconds and nanoseconds, truncated to microseconds.
370 * >=3 = seconds and nanoseconds, maximum precision.
371 */
372enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
373
374static int timestamp_precision = TSP_SEC;
375SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
376    &timestamp_precision, 0, "");
377
378/*
379 * Get a current timestamp.
380 */
381void
382vfs_timestamp(tsp)
383	struct timespec *tsp;
384{
385	struct timeval tv;
386
387	switch (timestamp_precision) {
388	case TSP_SEC:
389		tsp->tv_sec = time_second;
390		tsp->tv_nsec = 0;
391		break;
392	case TSP_HZ:
393		getnanotime(tsp);
394		break;
395	case TSP_USEC:
396		microtime(&tv);
397		TIMEVAL_TO_TIMESPEC(&tv, tsp);
398		break;
399	case TSP_NSEC:
400	default:
401		nanotime(tsp);
402		break;
403	}
404}
405
406/*
407 * Set vnode attributes to VNOVAL
408 */
409void
410vattr_null(vap)
411	register struct vattr *vap;
412{
413
414	vap->va_type = VNON;
415	vap->va_size = VNOVAL;
416	vap->va_bytes = VNOVAL;
417	vap->va_mode = VNOVAL;
418	vap->va_nlink = VNOVAL;
419	vap->va_uid = VNOVAL;
420	vap->va_gid = VNOVAL;
421	vap->va_fsid = VNOVAL;
422	vap->va_fileid = VNOVAL;
423	vap->va_blocksize = VNOVAL;
424	vap->va_rdev = VNOVAL;
425	vap->va_atime.tv_sec = VNOVAL;
426	vap->va_atime.tv_nsec = VNOVAL;
427	vap->va_mtime.tv_sec = VNOVAL;
428	vap->va_mtime.tv_nsec = VNOVAL;
429	vap->va_ctime.tv_sec = VNOVAL;
430	vap->va_ctime.tv_nsec = VNOVAL;
431	vap->va_flags = VNOVAL;
432	vap->va_gen = VNOVAL;
433	vap->va_vaflags = 0;
434}
435
436/*
437 * Routines having to do with the management of the vnode table.
438 */
439extern vop_t **dead_vnodeop_p;
440
441/*
442 * Return the next vnode from the free list.
443 */
444int
445getnewvnode(tag, mp, vops, vpp)
446	enum vtagtype tag;
447	struct mount *mp;
448	vop_t **vops;
449	struct vnode **vpp;
450{
451	int s;
452	struct proc *p = curproc;	/* XXX */
453	struct vnode *vp, *tvp, *nvp;
454	vm_object_t object;
455	TAILQ_HEAD(freelst, vnode) vnode_tmp_list;
456
457	/*
458	 * We take the least recently used vnode from the freelist
459	 * if we can get it and it has no cached pages, and no
460	 * namecache entries are relative to it.
461	 * Otherwise we allocate a new vnode
462	 */
463
464	s = splbio();
465	simple_lock(&vnode_free_list_slock);
466	TAILQ_INIT(&vnode_tmp_list);
467
468	for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) {
469		nvp = TAILQ_NEXT(vp, v_freelist);
470		TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
471		if (vp->v_flag & VAGE) {
472			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
473		} else {
474			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
475		}
476		vp->v_flag &= ~(VTBFREE|VAGE);
477		vp->v_flag |= VFREE;
478		if (vp->v_usecount)
479			panic("tobe free vnode isn't");
480		freevnodes++;
481	}
482
483	if (wantfreevnodes && freevnodes < wantfreevnodes) {
484		vp = NULL;
485	} else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
486		/*
487		 * XXX: this is only here to be backwards compatible
488		 */
489		vp = NULL;
490	} else {
491		for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) {
492			nvp = TAILQ_NEXT(vp, v_freelist);
493			if (!simple_lock_try(&vp->v_interlock))
494				continue;
495			if (vp->v_usecount)
496				panic("free vnode isn't");
497
498			object = vp->v_object;
499			if (object && (object->resident_page_count || object->ref_count)) {
500				printf("object inconsistant state: RPC: %d, RC: %d\n",
501					object->resident_page_count, object->ref_count);
502				/* Don't recycle if it's caching some pages */
503				TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
504				TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
505				continue;
506			} else if (LIST_FIRST(&vp->v_cache_src)) {
507				/* Don't recycle if active in the namecache */
508				simple_unlock(&vp->v_interlock);
509				continue;
510			} else {
511				break;
512			}
513		}
514	}
515
516	for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) {
517		nvp = TAILQ_NEXT(tvp, v_freelist);
518		TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist);
519		TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist);
520		simple_unlock(&tvp->v_interlock);
521	}
522
523	if (vp) {
524		vp->v_flag |= VDOOMED;
525		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
526		freevnodes--;
527		simple_unlock(&vnode_free_list_slock);
528		cache_purge(vp);
529		vp->v_lease = NULL;
530		if (vp->v_type != VBAD) {
531			vgonel(vp, p);
532		} else {
533			simple_unlock(&vp->v_interlock);
534		}
535
536#ifdef INVARIANTS
537		{
538			int s;
539
540			if (vp->v_data)
541				panic("cleaned vnode isn't");
542			s = splbio();
543			if (vp->v_numoutput)
544				panic("Clean vnode has pending I/O's");
545			splx(s);
546		}
547#endif
548		vp->v_flag = 0;
549		vp->v_lastw = 0;
550		vp->v_lasta = 0;
551		vp->v_cstart = 0;
552		vp->v_clen = 0;
553		vp->v_socket = 0;
554		vp->v_writecount = 0;	/* XXX */
555	} else {
556		simple_unlock(&vnode_free_list_slock);
557		vp = (struct vnode *) zalloc(vnode_zone);
558		bzero((char *) vp, sizeof *vp);
559		simple_lock_init(&vp->v_interlock);
560		vp->v_dd = vp;
561		cache_purge(vp);
562		LIST_INIT(&vp->v_cache_src);
563		TAILQ_INIT(&vp->v_cache_dst);
564		numvnodes++;
565	}
566
567	TAILQ_INIT(&vp->v_cleanblkhd);
568	TAILQ_INIT(&vp->v_dirtyblkhd);
569	vp->v_type = VNON;
570	vp->v_tag = tag;
571	vp->v_op = vops;
572	insmntque(vp, mp);
573	*vpp = vp;
574	vp->v_usecount = 1;
575	vp->v_data = 0;
576	splx(s);
577
578	vfs_object_create(vp, p, p->p_ucred);
579	return (0);
580}
581
582/*
583 * Move a vnode from one mount queue to another.
584 */
585static void
586insmntque(vp, mp)
587	register struct vnode *vp;
588	register struct mount *mp;
589{
590
591	simple_lock(&mntvnode_slock);
592	/*
593	 * Delete from old mount point vnode list, if on one.
594	 */
595	if (vp->v_mount != NULL)
596		LIST_REMOVE(vp, v_mntvnodes);
597	/*
598	 * Insert into list of vnodes for the new mount point, if available.
599	 */
600	if ((vp->v_mount = mp) == NULL) {
601		simple_unlock(&mntvnode_slock);
602		return;
603	}
604	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
605	simple_unlock(&mntvnode_slock);
606}
607
608/*
609 * Update outstanding I/O count and do wakeup if requested.
610 */
611void
612vwakeup(bp)
613	register struct buf *bp;
614{
615	register struct vnode *vp;
616
617	bp->b_flags &= ~B_WRITEINPROG;
618	if ((vp = bp->b_vp)) {
619		vp->v_numoutput--;
620		if (vp->v_numoutput < 0)
621			panic("vwakeup: neg numoutput");
622		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
623			vp->v_flag &= ~VBWAIT;
624			wakeup((caddr_t) &vp->v_numoutput);
625		}
626	}
627}
628
629/*
630 * Flush out and invalidate all buffers associated with a vnode.
631 * Called with the underlying object locked.
632 */
633int
634vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
635	register struct vnode *vp;
636	int flags;
637	struct ucred *cred;
638	struct proc *p;
639	int slpflag, slptimeo;
640{
641	register struct buf *bp;
642	struct buf *nbp, *blist;
643	int s, error;
644	vm_object_t object;
645
646	if (flags & V_SAVE) {
647		s = splbio();
648		while (vp->v_numoutput) {
649			vp->v_flag |= VBWAIT;
650			error = tsleep((caddr_t)&vp->v_numoutput,
651			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
652			if (error) {
653				splx(s);
654				return (error);
655			}
656		}
657		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
658			splx(s);
659			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
660				return (error);
661			s = splbio();
662			if (vp->v_numoutput > 0 ||
663			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
664				panic("vinvalbuf: dirty bufs");
665		}
666		splx(s);
667  	}
668	s = splbio();
669	for (;;) {
670		blist = TAILQ_FIRST(&vp->v_cleanblkhd);
671		if (!blist)
672			blist = TAILQ_FIRST(&vp->v_dirtyblkhd);
673		if (!blist)
674			break;
675
676		for (bp = blist; bp; bp = nbp) {
677			nbp = TAILQ_NEXT(bp, b_vnbufs);
678			if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
679				error = BUF_TIMELOCK(bp,
680				    LK_EXCLUSIVE | LK_SLEEPFAIL,
681				    "vinvalbuf", slpflag, slptimeo);
682				if (error == ENOLCK)
683					break;
684				splx(s);
685				return (error);
686			}
687			/*
688			 * XXX Since there are no node locks for NFS, I
689			 * believe there is a slight chance that a delayed
690			 * write will occur while sleeping just above, so
691			 * check for it.  Note that vfs_bio_awrite expects
692			 * buffers to reside on a queue, while VOP_BWRITE and
693			 * brelse do not.
694			 */
695			if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
696				(flags & V_SAVE)) {
697
698				if (bp->b_vp == vp) {
699					if (bp->b_flags & B_CLUSTEROK) {
700						BUF_UNLOCK(bp);
701						vfs_bio_awrite(bp);
702					} else {
703						bremfree(bp);
704						bp->b_flags |= B_ASYNC;
705						VOP_BWRITE(bp->b_vp, bp);
706					}
707				} else {
708					bremfree(bp);
709					(void) VOP_BWRITE(bp->b_vp, bp);
710				}
711				break;
712			}
713			bremfree(bp);
714			bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
715			bp->b_flags &= ~B_ASYNC;
716			brelse(bp);
717		}
718	}
719
720	while (vp->v_numoutput > 0) {
721		vp->v_flag |= VBWAIT;
722		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
723	}
724
725	splx(s);
726
727	/*
728	 * Destroy the copy in the VM cache, too.
729	 */
730	simple_lock(&vp->v_interlock);
731	object = vp->v_object;
732	if (object != NULL) {
733		vm_object_page_remove(object, 0, 0,
734			(flags & V_SAVE) ? TRUE : FALSE);
735	}
736	simple_unlock(&vp->v_interlock);
737
738	if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
739		panic("vinvalbuf: flush failed");
740	return (0);
741}
742
743/*
744 * Truncate a file's buffer and pages to a specified length.  This
745 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
746 * sync activity.
747 */
748int
749vtruncbuf(vp, cred, p, length, blksize)
750	register struct vnode *vp;
751	struct ucred *cred;
752	struct proc *p;
753	off_t length;
754	int blksize;
755{
756	register struct buf *bp;
757	struct buf *nbp;
758	int s, anyfreed;
759	int trunclbn;
760
761	/*
762	 * Round up to the *next* lbn.
763	 */
764	trunclbn = (length + blksize - 1) / blksize;
765
766	s = splbio();
767restart:
768	anyfreed = 1;
769	for (;anyfreed;) {
770		anyfreed = 0;
771		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
772			nbp = TAILQ_NEXT(bp, b_vnbufs);
773			if (bp->b_lblkno >= trunclbn) {
774				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
775					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
776					goto restart;
777				} else {
778					bremfree(bp);
779					bp->b_flags |= (B_INVAL | B_RELBUF);
780					bp->b_flags &= ~B_ASYNC;
781					brelse(bp);
782					anyfreed = 1;
783				}
784				if (nbp &&
785				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
786				    (nbp->b_vp != vp) ||
787				    (nbp->b_flags & B_DELWRI))) {
788					goto restart;
789				}
790			}
791		}
792
793		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
794			nbp = TAILQ_NEXT(bp, b_vnbufs);
795			if (bp->b_lblkno >= trunclbn) {
796				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
797					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
798					goto restart;
799				} else {
800					bremfree(bp);
801					bp->b_flags |= (B_INVAL | B_RELBUF);
802					bp->b_flags &= ~B_ASYNC;
803					brelse(bp);
804					anyfreed = 1;
805				}
806				if (nbp &&
807				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
808				    (nbp->b_vp != vp) ||
809				    (nbp->b_flags & B_DELWRI) == 0)) {
810					goto restart;
811				}
812			}
813		}
814	}
815
816	if (length > 0) {
817restartsync:
818		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
819			nbp = TAILQ_NEXT(bp, b_vnbufs);
820			if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
821				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
822					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
823					goto restart;
824				} else {
825					bremfree(bp);
826					if (bp->b_vp == vp) {
827						bp->b_flags |= B_ASYNC;
828					} else {
829						bp->b_flags &= ~B_ASYNC;
830					}
831					VOP_BWRITE(bp->b_vp, bp);
832				}
833				goto restartsync;
834			}
835
836		}
837	}
838
839	while (vp->v_numoutput > 0) {
840		vp->v_flag |= VBWAIT;
841		tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0);
842	}
843
844	splx(s);
845
846	vnode_pager_setsize(vp, length);
847
848	return (0);
849}
850
851/*
852 * Associate a buffer with a vnode.
853 */
854void
855bgetvp(vp, bp)
856	register struct vnode *vp;
857	register struct buf *bp;
858{
859	int s;
860
861	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
862
863	vhold(vp);
864	bp->b_vp = vp;
865	bp->b_dev = vn_todev(vp);
866	/*
867	 * Insert onto list for new vnode.
868	 */
869	s = splbio();
870	bp->b_xflags |= BX_VNCLEAN;
871	bp->b_xflags &= ~BX_VNDIRTY;
872	TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
873	splx(s);
874}
875
876/*
877 * Disassociate a buffer from a vnode.
878 */
879void
880brelvp(bp)
881	register struct buf *bp;
882{
883	struct vnode *vp;
884	struct buflists *listheadp;
885	int s;
886
887	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
888
889	/*
890	 * Delete from old vnode list, if on one.
891	 */
892	vp = bp->b_vp;
893	s = splbio();
894	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
895		if (bp->b_xflags & BX_VNDIRTY)
896			listheadp = &vp->v_dirtyblkhd;
897		else
898			listheadp = &vp->v_cleanblkhd;
899		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
900		bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
901	}
902	if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
903		vp->v_flag &= ~VONWORKLST;
904		LIST_REMOVE(vp, v_synclist);
905	}
906	splx(s);
907	bp->b_vp = (struct vnode *) 0;
908	vdrop(vp);
909}
910
911/*
912 * The workitem queue.
913 *
914 * It is useful to delay writes of file data and filesystem metadata
915 * for tens of seconds so that quickly created and deleted files need
916 * not waste disk bandwidth being created and removed. To realize this,
917 * we append vnodes to a "workitem" queue. When running with a soft
918 * updates implementation, most pending metadata dependencies should
919 * not wait for more than a few seconds. Thus, mounted on block devices
920 * are delayed only about a half the time that file data is delayed.
921 * Similarly, directory updates are more critical, so are only delayed
922 * about a third the time that file data is delayed. Thus, there are
923 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
924 * one each second (driven off the filesystem syncer process). The
925 * syncer_delayno variable indicates the next queue that is to be processed.
926 * Items that need to be processed soon are placed in this queue:
927 *
928 *	syncer_workitem_pending[syncer_delayno]
929 *
930 * A delay of fifteen seconds is done by placing the request fifteen
931 * entries later in the queue:
932 *
933 *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
934 *
935 */
936
937/*
938 * Add an item to the syncer work queue.
939 */
940static void
941vn_syncer_add_to_worklist(struct vnode *vp, int delay)
942{
943	int s, slot;
944
945	s = splbio();
946
947	if (vp->v_flag & VONWORKLST) {
948		LIST_REMOVE(vp, v_synclist);
949	}
950
951	if (delay > syncer_maxdelay - 2)
952		delay = syncer_maxdelay - 2;
953	slot = (syncer_delayno + delay) & syncer_mask;
954
955	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
956	vp->v_flag |= VONWORKLST;
957	splx(s);
958}
959
960struct  proc *updateproc;
961static void sched_sync __P((void));
962static struct kproc_desc up_kp = {
963	"syncer",
964	sched_sync,
965	&updateproc
966};
967SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
968
969/*
970 * System filesystem synchronizer daemon.
971 */
972void
973sched_sync(void)
974{
975	struct synclist *slp;
976	struct vnode *vp;
977	long starttime;
978	int s;
979	struct proc *p = updateproc;
980
981	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, p,
982	    SHUTDOWN_PRI_LAST);
983
984	for (;;) {
985		kproc_suspend_loop(p);
986
987		starttime = time_second;
988
989		/*
990		 * Push files whose dirty time has expired.  Be careful
991		 * of interrupt race on slp queue.
992		 */
993		s = splbio();
994		slp = &syncer_workitem_pending[syncer_delayno];
995		syncer_delayno += 1;
996		if (syncer_delayno == syncer_maxdelay)
997			syncer_delayno = 0;
998		splx(s);
999
1000		while ((vp = LIST_FIRST(slp)) != NULL) {
1001			if (VOP_ISLOCKED(vp, NULL) == 0) {
1002				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
1003				(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
1004				VOP_UNLOCK(vp, 0, p);
1005			}
1006			s = splbio();
1007			if (LIST_FIRST(slp) == vp) {
1008				/*
1009				 * Note: v_tag VT_VFS vps can remain on the
1010				 * worklist too with no dirty blocks, but
1011				 * since sync_fsync() moves it to a different
1012				 * slot we are safe.
1013				 */
1014				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1015				    !vn_isdisk(vp, NULL))
1016					panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag);
1017				/*
1018				 * Put us back on the worklist.  The worklist
1019				 * routine will remove us from our current
1020				 * position and then add us back in at a later
1021				 * position.
1022				 */
1023				vn_syncer_add_to_worklist(vp, syncdelay);
1024			}
1025			splx(s);
1026		}
1027
1028		/*
1029		 * Do soft update processing.
1030		 */
1031		if (bioops.io_sync)
1032			(*bioops.io_sync)(NULL);
1033
1034		/*
1035		 * The variable rushjob allows the kernel to speed up the
1036		 * processing of the filesystem syncer process. A rushjob
1037		 * value of N tells the filesystem syncer to process the next
1038		 * N seconds worth of work on its queue ASAP. Currently rushjob
1039		 * is used by the soft update code to speed up the filesystem
1040		 * syncer process when the incore state is getting so far
1041		 * ahead of the disk that the kernel memory pool is being
1042		 * threatened with exhaustion.
1043		 */
1044		if (rushjob > 0) {
1045			rushjob -= 1;
1046			continue;
1047		}
1048		/*
1049		 * If it has taken us less than a second to process the
1050		 * current work, then wait. Otherwise start right over
1051		 * again. We can still lose time if any single round
1052		 * takes more than two seconds, but it does not really
1053		 * matter as we are just trying to generally pace the
1054		 * filesystem activity.
1055		 */
1056		if (time_second == starttime)
1057			tsleep(&lbolt, PPAUSE, "syncer", 0);
1058	}
1059}
1060
1061/*
1062 * Request the syncer daemon to speed up its work.
1063 * We never push it to speed up more than half of its
1064 * normal turn time, otherwise it could take over the cpu.
1065 */
1066int
1067speedup_syncer()
1068{
1069	int s;
1070
1071	s = splhigh();
1072	if (updateproc->p_wchan == &lbolt)
1073		setrunnable(updateproc);
1074	splx(s);
1075	if (rushjob < syncdelay / 2) {
1076		rushjob += 1;
1077		stat_rush_requests += 1;
1078		return (1);
1079	}
1080	return(0);
1081}
1082
1083/*
1084 * Associate a p-buffer with a vnode.
1085 *
1086 * Also sets B_PAGING flag to indicate that vnode is not fully associated
1087 * with the buffer.  i.e. the bp has not been linked into the vnode or
1088 * ref-counted.
1089 */
1090void
1091pbgetvp(vp, bp)
1092	register struct vnode *vp;
1093	register struct buf *bp;
1094{
1095
1096	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1097
1098	bp->b_vp = vp;
1099	bp->b_flags |= B_PAGING;
1100	bp->b_dev = vn_todev(vp);
1101}
1102
1103/*
1104 * Disassociate a p-buffer from a vnode.
1105 */
1106void
1107pbrelvp(bp)
1108	register struct buf *bp;
1109{
1110
1111	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1112
1113	/* XXX REMOVE ME */
1114	if (bp->b_vnbufs.tqe_next != NULL) {
1115		panic(
1116		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1117		    bp,
1118		    (int)bp->b_flags
1119		);
1120	}
1121	bp->b_vp = (struct vnode *) 0;
1122	bp->b_flags &= ~B_PAGING;
1123}
1124
1125void
1126pbreassignbuf(bp, newvp)
1127	struct buf *bp;
1128	struct vnode *newvp;
1129{
1130	if ((bp->b_flags & B_PAGING) == 0) {
1131		panic(
1132		    "pbreassignbuf() on non phys bp %p",
1133		    bp
1134		);
1135	}
1136	bp->b_vp = newvp;
1137}
1138
1139/*
1140 * Reassign a buffer from one vnode to another.
1141 * Used to assign file specific control information
1142 * (indirect blocks) to the vnode to which they belong.
1143 */
1144void
1145reassignbuf(bp, newvp)
1146	register struct buf *bp;
1147	register struct vnode *newvp;
1148{
1149	struct buflists *listheadp;
1150	int delay;
1151	int s;
1152
1153	if (newvp == NULL) {
1154		printf("reassignbuf: NULL");
1155		return;
1156	}
1157	++reassignbufcalls;
1158
1159	/*
1160	 * B_PAGING flagged buffers cannot be reassigned because their vp
1161	 * is not fully linked in.
1162	 */
1163	if (bp->b_flags & B_PAGING)
1164		panic("cannot reassign paging buffer");
1165
1166	s = splbio();
1167	/*
1168	 * Delete from old vnode list, if on one.
1169	 */
1170	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1171		if (bp->b_xflags & BX_VNDIRTY)
1172			listheadp = &bp->b_vp->v_dirtyblkhd;
1173		else
1174			listheadp = &bp->b_vp->v_cleanblkhd;
1175		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
1176		bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1177		if (bp->b_vp != newvp) {
1178			vdrop(bp->b_vp);
1179			bp->b_vp = NULL;	/* for clarification */
1180		}
1181	}
1182	/*
1183	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1184	 * of clean buffers.
1185	 */
1186	if (bp->b_flags & B_DELWRI) {
1187		struct buf *tbp;
1188
1189		listheadp = &newvp->v_dirtyblkhd;
1190		if ((newvp->v_flag & VONWORKLST) == 0) {
1191			switch (newvp->v_type) {
1192			case VDIR:
1193				delay = dirdelay;
1194				break;
1195			case VCHR:
1196			case VBLK:
1197				if (newvp->v_specmountpoint != NULL) {
1198					delay = metadelay;
1199					break;
1200				}
1201				/* fall through */
1202			default:
1203				delay = filedelay;
1204			}
1205			vn_syncer_add_to_worklist(newvp, delay);
1206		}
1207		bp->b_xflags |= BX_VNDIRTY;
1208		tbp = TAILQ_FIRST(listheadp);
1209		if (tbp == NULL ||
1210		    bp->b_lblkno == 0 ||
1211		    (bp->b_lblkno > 0 && tbp->b_lblkno < 0) ||
1212		    (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) {
1213			TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs);
1214			++reassignbufsortgood;
1215		} else if (bp->b_lblkno < 0) {
1216			TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs);
1217			++reassignbufsortgood;
1218		} else if (reassignbufmethod == 1) {
1219			/*
1220			 * New sorting algorithm, only handle sequential case,
1221			 * otherwise append to end (but before metadata)
1222			 */
1223			if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL &&
1224			    (tbp->b_xflags & BX_VNDIRTY)) {
1225				/*
1226				 * Found the best place to insert the buffer
1227				 */
1228				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1229				++reassignbufsortgood;
1230			} else {
1231				/*
1232				 * Missed, append to end, but before meta-data.
1233				 * We know that the head buffer in the list is
1234				 * not meta-data due to prior conditionals.
1235				 *
1236				 * Indirect effects:  NFS second stage write
1237				 * tends to wind up here, giving maximum
1238				 * distance between the unstable write and the
1239				 * commit rpc.
1240				 */
1241				tbp = TAILQ_LAST(listheadp, buflists);
1242				while (tbp && tbp->b_lblkno < 0)
1243					tbp = TAILQ_PREV(tbp, buflists, b_vnbufs);
1244				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1245				++reassignbufsortbad;
1246			}
1247		} else {
1248			/*
1249			 * Old sorting algorithm, scan queue and insert
1250			 */
1251			struct buf *ttbp;
1252			while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) &&
1253			    (ttbp->b_lblkno < bp->b_lblkno)) {
1254				++reassignbufloops;
1255				tbp = ttbp;
1256			}
1257			TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1258		}
1259	} else {
1260		bp->b_xflags |= BX_VNCLEAN;
1261		TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs);
1262		if ((newvp->v_flag & VONWORKLST) &&
1263		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1264			newvp->v_flag &= ~VONWORKLST;
1265			LIST_REMOVE(newvp, v_synclist);
1266		}
1267	}
1268	if (bp->b_vp != newvp) {
1269		bp->b_vp = newvp;
1270		vhold(bp->b_vp);
1271	}
1272	splx(s);
1273}
1274
1275/*
1276 * Create a vnode for a block device.
1277 * Used for mounting the root file system.
1278 */
1279int
1280bdevvp(dev, vpp)
1281	dev_t dev;
1282	struct vnode **vpp;
1283{
1284	register struct vnode *vp;
1285	struct vnode *nvp;
1286	int error;
1287
1288	if (dev == NODEV) {
1289		*vpp = NULLVP;
1290		return (ENXIO);
1291	}
1292	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
1293	if (error) {
1294		*vpp = NULLVP;
1295		return (error);
1296	}
1297	vp = nvp;
1298	vp->v_type = VBLK;
1299	addalias(vp, dev);
1300	*vpp = vp;
1301	return (0);
1302}
1303
1304/*
1305 * Add vnode to the alias list hung off the dev_t.
1306 *
1307 * The reason for this gunk is that multiple vnodes can reference
1308 * the same physical device, so checking vp->v_usecount to see
1309 * how many users there are is inadequate; the v_usecount for
1310 * the vnodes need to be accumulated.  vcount() does that.
1311 */
1312void
1313addaliasu(nvp, nvp_rdev)
1314	struct vnode *nvp;
1315	udev_t nvp_rdev;
1316{
1317
1318	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1319		panic("addaliasu on non-special vnode");
1320	addalias(nvp, udev2dev(nvp_rdev, nvp->v_type == VBLK ? 1 : 0));
1321}
1322
1323void
1324addalias(nvp, dev)
1325	struct vnode *nvp;
1326	dev_t dev;
1327{
1328
1329	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1330		panic("addalias on non-special vnode");
1331
1332	nvp->v_rdev = dev;
1333	simple_lock(&spechash_slock);
1334	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1335	simple_unlock(&spechash_slock);
1336}
1337
1338/*
1339 * Grab a particular vnode from the free list, increment its
1340 * reference count and lock it. The vnode lock bit is set if the
1341 * vnode is being eliminated in vgone. The process is awakened
1342 * when the transition is completed, and an error returned to
1343 * indicate that the vnode is no longer usable (possibly having
1344 * been changed to a new file system type).
1345 */
1346int
1347vget(vp, flags, p)
1348	register struct vnode *vp;
1349	int flags;
1350	struct proc *p;
1351{
1352	int error;
1353
1354	/*
1355	 * If the vnode is in the process of being cleaned out for
1356	 * another use, we wait for the cleaning to finish and then
1357	 * return failure. Cleaning is determined by checking that
1358	 * the VXLOCK flag is set.
1359	 */
1360	if ((flags & LK_INTERLOCK) == 0) {
1361		simple_lock(&vp->v_interlock);
1362	}
1363	if (vp->v_flag & VXLOCK) {
1364		vp->v_flag |= VXWANT;
1365		simple_unlock(&vp->v_interlock);
1366		tsleep((caddr_t)vp, PINOD, "vget", 0);
1367		return (ENOENT);
1368	}
1369
1370	vp->v_usecount++;
1371
1372	if (VSHOULDBUSY(vp))
1373		vbusy(vp);
1374	if (flags & LK_TYPE_MASK) {
1375		if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) {
1376			/*
1377			 * must expand vrele here because we do not want
1378			 * to call VOP_INACTIVE if the reference count
1379			 * drops back to zero since it was never really
1380			 * active. We must remove it from the free list
1381			 * before sleeping so that multiple processes do
1382			 * not try to recycle it.
1383			 */
1384			simple_lock(&vp->v_interlock);
1385			vp->v_usecount--;
1386			if (VSHOULDFREE(vp))
1387				vfree(vp);
1388			simple_unlock(&vp->v_interlock);
1389		}
1390		return (error);
1391	}
1392	simple_unlock(&vp->v_interlock);
1393	return (0);
1394}
1395
1396void
1397vref(struct vnode *vp)
1398{
1399	simple_lock(&vp->v_interlock);
1400	vp->v_usecount++;
1401	simple_unlock(&vp->v_interlock);
1402}
1403
1404/*
1405 * Vnode put/release.
1406 * If count drops to zero, call inactive routine and return to freelist.
1407 */
1408void
1409vrele(vp)
1410	struct vnode *vp;
1411{
1412	struct proc *p = curproc;	/* XXX */
1413
1414	KASSERT(vp != NULL, ("vrele: null vp"));
1415
1416	simple_lock(&vp->v_interlock);
1417
1418	if (vp->v_usecount > 1) {
1419
1420		vp->v_usecount--;
1421		simple_unlock(&vp->v_interlock);
1422
1423		return;
1424	}
1425
1426	if (vp->v_usecount == 1) {
1427
1428		vp->v_usecount--;
1429		if (VSHOULDFREE(vp))
1430			vfree(vp);
1431	/*
1432	 * If we are doing a vput, the node is already locked, and we must
1433	 * call VOP_INACTIVE with the node locked.  So, in the case of
1434	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1435	 */
1436		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
1437			VOP_INACTIVE(vp, p);
1438		}
1439
1440	} else {
1441#ifdef DIAGNOSTIC
1442		vprint("vrele: negative ref count", vp);
1443		simple_unlock(&vp->v_interlock);
1444#endif
1445		panic("vrele: negative ref cnt");
1446	}
1447}
1448
1449void
1450vput(vp)
1451	struct vnode *vp;
1452{
1453	struct proc *p = curproc;	/* XXX */
1454
1455	KASSERT(vp != NULL, ("vput: null vp"));
1456
1457	simple_lock(&vp->v_interlock);
1458
1459	if (vp->v_usecount > 1) {
1460
1461		vp->v_usecount--;
1462		VOP_UNLOCK(vp, LK_INTERLOCK, p);
1463		return;
1464
1465	}
1466
1467	if (vp->v_usecount == 1) {
1468
1469		vp->v_usecount--;
1470		if (VSHOULDFREE(vp))
1471			vfree(vp);
1472	/*
1473	 * If we are doing a vput, the node is already locked, and we must
1474	 * call VOP_INACTIVE with the node locked.  So, in the case of
1475	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1476	 */
1477		simple_unlock(&vp->v_interlock);
1478		VOP_INACTIVE(vp, p);
1479
1480	} else {
1481#ifdef DIAGNOSTIC
1482		vprint("vput: negative ref count", vp);
1483#endif
1484		panic("vput: negative ref cnt");
1485	}
1486}
1487
1488/*
1489 * Somebody doesn't want the vnode recycled.
1490 */
1491void
1492vhold(vp)
1493	register struct vnode *vp;
1494{
1495	int s;
1496
1497  	s = splbio();
1498	vp->v_holdcnt++;
1499	if (VSHOULDBUSY(vp))
1500		vbusy(vp);
1501	splx(s);
1502}
1503
1504/*
1505 * One less who cares about this vnode.
1506 */
1507void
1508vdrop(vp)
1509	register struct vnode *vp;
1510{
1511	int s;
1512
1513	s = splbio();
1514	if (vp->v_holdcnt <= 0)
1515		panic("vdrop: holdcnt");
1516	vp->v_holdcnt--;
1517	if (VSHOULDFREE(vp))
1518		vfree(vp);
1519	splx(s);
1520}
1521
1522/*
1523 * Remove any vnodes in the vnode table belonging to mount point mp.
1524 *
1525 * If MNT_NOFORCE is specified, there should not be any active ones,
1526 * return error if any are found (nb: this is a user error, not a
1527 * system error). If MNT_FORCE is specified, detach any active vnodes
1528 * that are found.
1529 */
1530#ifdef DIAGNOSTIC
1531static int busyprt = 0;		/* print out busy vnodes */
1532SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1533#endif
1534
1535int
1536vflush(mp, skipvp, flags)
1537	struct mount *mp;
1538	struct vnode *skipvp;
1539	int flags;
1540{
1541	struct proc *p = curproc;	/* XXX */
1542	struct vnode *vp, *nvp;
1543	int busy = 0;
1544
1545	simple_lock(&mntvnode_slock);
1546loop:
1547	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1548		/*
1549		 * Make sure this vnode wasn't reclaimed in getnewvnode().
1550		 * Start over if it has (it won't be on the list anymore).
1551		 */
1552		if (vp->v_mount != mp)
1553			goto loop;
1554		nvp = LIST_NEXT(vp, v_mntvnodes);
1555		/*
1556		 * Skip over a selected vnode.
1557		 */
1558		if (vp == skipvp)
1559			continue;
1560
1561		simple_lock(&vp->v_interlock);
1562		/*
1563		 * Skip over a vnodes marked VSYSTEM.
1564		 */
1565		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1566			simple_unlock(&vp->v_interlock);
1567			continue;
1568		}
1569		/*
1570		 * If WRITECLOSE is set, only flush out regular file vnodes
1571		 * open for writing.
1572		 */
1573		if ((flags & WRITECLOSE) &&
1574		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1575			simple_unlock(&vp->v_interlock);
1576			continue;
1577		}
1578
1579		/*
1580		 * With v_usecount == 0, all we need to do is clear out the
1581		 * vnode data structures and we are done.
1582		 */
1583		if (vp->v_usecount == 0) {
1584			simple_unlock(&mntvnode_slock);
1585			vgonel(vp, p);
1586			simple_lock(&mntvnode_slock);
1587			continue;
1588		}
1589
1590		/*
1591		 * If FORCECLOSE is set, forcibly close the vnode. For block
1592		 * or character devices, revert to an anonymous device. For
1593		 * all other files, just kill them.
1594		 */
1595		if (flags & FORCECLOSE) {
1596			simple_unlock(&mntvnode_slock);
1597			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1598				vgonel(vp, p);
1599			} else {
1600				vclean(vp, 0, p);
1601				vp->v_op = spec_vnodeop_p;
1602				insmntque(vp, (struct mount *) 0);
1603			}
1604			simple_lock(&mntvnode_slock);
1605			continue;
1606		}
1607#ifdef DIAGNOSTIC
1608		if (busyprt)
1609			vprint("vflush: busy vnode", vp);
1610#endif
1611		simple_unlock(&vp->v_interlock);
1612		busy++;
1613	}
1614	simple_unlock(&mntvnode_slock);
1615	if (busy)
1616		return (EBUSY);
1617	return (0);
1618}
1619
1620/*
1621 * Disassociate the underlying file system from a vnode.
1622 */
1623static void
1624vclean(vp, flags, p)
1625	struct vnode *vp;
1626	int flags;
1627	struct proc *p;
1628{
1629	int active;
1630	vm_object_t obj;
1631
1632	/*
1633	 * Check to see if the vnode is in use. If so we have to reference it
1634	 * before we clean it out so that its count cannot fall to zero and
1635	 * generate a race against ourselves to recycle it.
1636	 */
1637	if ((active = vp->v_usecount))
1638		vp->v_usecount++;
1639
1640	/*
1641	 * Prevent the vnode from being recycled or brought into use while we
1642	 * clean it out.
1643	 */
1644	if (vp->v_flag & VXLOCK)
1645		panic("vclean: deadlock");
1646	vp->v_flag |= VXLOCK;
1647	/*
1648	 * Even if the count is zero, the VOP_INACTIVE routine may still
1649	 * have the object locked while it cleans it out. The VOP_LOCK
1650	 * ensures that the VOP_INACTIVE routine is done with its work.
1651	 * For active vnodes, it ensures that no other activity can
1652	 * occur while the underlying object is being cleaned out.
1653	 */
1654	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1655
1656	/*
1657	 * Clean out any buffers associated with the vnode.
1658	 */
1659	vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1660	if ((obj = vp->v_object) != NULL) {
1661		if (obj->ref_count == 0) {
1662			/*
1663			 * vclean() may be called twice.  The first time removes the
1664			 * primary reference to the object, the second time goes
1665			 * one further and is a special-case to terminate the object.
1666			 */
1667			vm_object_terminate(obj);
1668		} else {
1669			/*
1670			 * Woe to the process that tries to page now :-).
1671			 */
1672			vm_pager_deallocate(obj);
1673		}
1674	}
1675
1676	/*
1677	 * If purging an active vnode, it must be closed and
1678	 * deactivated before being reclaimed. Note that the
1679	 * VOP_INACTIVE will unlock the vnode.
1680	 */
1681	if (active) {
1682		if (flags & DOCLOSE)
1683			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1684		VOP_INACTIVE(vp, p);
1685	} else {
1686		/*
1687		 * Any other processes trying to obtain this lock must first
1688		 * wait for VXLOCK to clear, then call the new lock operation.
1689		 */
1690		VOP_UNLOCK(vp, 0, p);
1691	}
1692	/*
1693	 * Reclaim the vnode.
1694	 */
1695	if (VOP_RECLAIM(vp, p))
1696		panic("vclean: cannot reclaim");
1697
1698	if (active) {
1699		/*
1700		 * Inline copy of vrele() since VOP_INACTIVE
1701		 * has already been called.
1702		 */
1703		simple_lock(&vp->v_interlock);
1704		if (--vp->v_usecount <= 0) {
1705#ifdef DIAGNOSTIC
1706			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1707				vprint("vclean: bad ref count", vp);
1708				panic("vclean: ref cnt");
1709			}
1710#endif
1711			vfree(vp);
1712		}
1713		simple_unlock(&vp->v_interlock);
1714	}
1715
1716	cache_purge(vp);
1717	if (vp->v_vnlock) {
1718		FREE(vp->v_vnlock, M_VNODE);
1719		vp->v_vnlock = NULL;
1720	}
1721
1722	if (VSHOULDFREE(vp))
1723		vfree(vp);
1724
1725	/*
1726	 * Done with purge, notify sleepers of the grim news.
1727	 */
1728	vp->v_op = dead_vnodeop_p;
1729	vn_pollgone(vp);
1730	vp->v_tag = VT_NON;
1731	vp->v_flag &= ~VXLOCK;
1732	if (vp->v_flag & VXWANT) {
1733		vp->v_flag &= ~VXWANT;
1734		wakeup((caddr_t) vp);
1735	}
1736}
1737
1738/*
1739 * Eliminate all activity associated with the requested vnode
1740 * and with all vnodes aliased to the requested vnode.
1741 */
1742int
1743vop_revoke(ap)
1744	struct vop_revoke_args /* {
1745		struct vnode *a_vp;
1746		int a_flags;
1747	} */ *ap;
1748{
1749	struct vnode *vp, *vq;
1750	dev_t dev;
1751
1752	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
1753
1754	vp = ap->a_vp;
1755	/*
1756	 * If a vgone (or vclean) is already in progress,
1757	 * wait until it is done and return.
1758	 */
1759	if (vp->v_flag & VXLOCK) {
1760		vp->v_flag |= VXWANT;
1761		simple_unlock(&vp->v_interlock);
1762		tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
1763		return (0);
1764	}
1765	dev = vp->v_rdev;
1766	for (;;) {
1767		simple_lock(&spechash_slock);
1768		vq = SLIST_FIRST(&dev->si_hlist);
1769		simple_unlock(&spechash_slock);
1770		if (!vq)
1771			break;
1772		vgone(vq);
1773	}
1774	return (0);
1775}
1776
1777/*
1778 * Recycle an unused vnode to the front of the free list.
1779 * Release the passed interlock if the vnode will be recycled.
1780 */
1781int
1782vrecycle(vp, inter_lkp, p)
1783	struct vnode *vp;
1784	struct simplelock *inter_lkp;
1785	struct proc *p;
1786{
1787
1788	simple_lock(&vp->v_interlock);
1789	if (vp->v_usecount == 0) {
1790		if (inter_lkp) {
1791			simple_unlock(inter_lkp);
1792		}
1793		vgonel(vp, p);
1794		return (1);
1795	}
1796	simple_unlock(&vp->v_interlock);
1797	return (0);
1798}
1799
1800/*
1801 * Eliminate all activity associated with a vnode
1802 * in preparation for reuse.
1803 */
1804void
1805vgone(vp)
1806	register struct vnode *vp;
1807{
1808	struct proc *p = curproc;	/* XXX */
1809
1810	simple_lock(&vp->v_interlock);
1811	vgonel(vp, p);
1812}
1813
1814/*
1815 * vgone, with the vp interlock held.
1816 */
1817void
1818vgonel(vp, p)
1819	struct vnode *vp;
1820	struct proc *p;
1821{
1822	int s;
1823
1824	/*
1825	 * If a vgone (or vclean) is already in progress,
1826	 * wait until it is done and return.
1827	 */
1828	if (vp->v_flag & VXLOCK) {
1829		vp->v_flag |= VXWANT;
1830		simple_unlock(&vp->v_interlock);
1831		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1832		return;
1833	}
1834
1835	/*
1836	 * Clean out the filesystem specific data.
1837	 */
1838	vclean(vp, DOCLOSE, p);
1839	simple_lock(&vp->v_interlock);
1840
1841	/*
1842	 * Delete from old mount point vnode list, if on one.
1843	 */
1844	if (vp->v_mount != NULL)
1845		insmntque(vp, (struct mount *)0);
1846	/*
1847	 * If special device, remove it from special device alias list
1848	 * if it is on one.
1849	 */
1850	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) {
1851		simple_lock(&spechash_slock);
1852		SLIST_REMOVE(&vp->v_hashchain, vp, vnode, v_specnext);
1853		freedev(vp->v_rdev);
1854		simple_unlock(&spechash_slock);
1855		vp->v_rdev = NULL;
1856	}
1857
1858	/*
1859	 * If it is on the freelist and not already at the head,
1860	 * move it to the head of the list. The test of the back
1861	 * pointer and the reference count of zero is because
1862	 * it will be removed from the free list by getnewvnode,
1863	 * but will not have its reference count incremented until
1864	 * after calling vgone. If the reference count were
1865	 * incremented first, vgone would (incorrectly) try to
1866	 * close the previous instance of the underlying object.
1867	 */
1868	if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
1869		s = splbio();
1870		simple_lock(&vnode_free_list_slock);
1871		if (vp->v_flag & VFREE) {
1872			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1873		} else if (vp->v_flag & VTBFREE) {
1874			TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
1875			vp->v_flag &= ~VTBFREE;
1876			freevnodes++;
1877		} else
1878			freevnodes++;
1879		vp->v_flag |= VFREE;
1880		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1881		simple_unlock(&vnode_free_list_slock);
1882		splx(s);
1883	}
1884
1885	vp->v_type = VBAD;
1886	simple_unlock(&vp->v_interlock);
1887}
1888
1889/*
1890 * Lookup a vnode by device number.
1891 */
1892int
1893vfinddev(dev, type, vpp)
1894	dev_t dev;
1895	enum vtype type;
1896	struct vnode **vpp;
1897{
1898	struct vnode *vp;
1899
1900	simple_lock(&spechash_slock);
1901	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
1902		if (type == vp->v_type) {
1903			*vpp = vp;
1904			simple_unlock(&spechash_slock);
1905			return (1);
1906		}
1907	}
1908	simple_unlock(&spechash_slock);
1909	return (0);
1910}
1911
1912/*
1913 * Calculate the total number of references to a special device.
1914 */
1915int
1916vcount(vp)
1917	struct vnode *vp;
1918{
1919	struct vnode *vq;
1920	int count;
1921
1922	count = 0;
1923	simple_lock(&spechash_slock);
1924	SLIST_FOREACH(vq, &vp->v_hashchain, v_specnext)
1925		count += vq->v_usecount;
1926	simple_unlock(&spechash_slock);
1927	return (count);
1928}
1929
1930/*
1931 * Same as above, but using the dev_t as argument
1932 */
1933
1934int
1935count_dev(dev)
1936	dev_t dev;
1937{
1938	struct vnode *vp;
1939
1940	vp = SLIST_FIRST(&dev->si_hlist);
1941	if (vp == NULL)
1942		return (0);
1943	return(vcount(vp));
1944}
1945
1946/*
1947 * Print out a description of a vnode.
1948 */
1949static char *typename[] =
1950{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1951
1952void
1953vprint(label, vp)
1954	char *label;
1955	struct vnode *vp;
1956{
1957	char buf[96];
1958
1959	if (label != NULL)
1960		printf("%s: %p: ", label, (void *)vp);
1961	else
1962		printf("%p: ", (void *)vp);
1963	printf("type %s, usecount %d, writecount %d, refcount %d,",
1964	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1965	    vp->v_holdcnt);
1966	buf[0] = '\0';
1967	if (vp->v_flag & VROOT)
1968		strcat(buf, "|VROOT");
1969	if (vp->v_flag & VTEXT)
1970		strcat(buf, "|VTEXT");
1971	if (vp->v_flag & VSYSTEM)
1972		strcat(buf, "|VSYSTEM");
1973	if (vp->v_flag & VXLOCK)
1974		strcat(buf, "|VXLOCK");
1975	if (vp->v_flag & VXWANT)
1976		strcat(buf, "|VXWANT");
1977	if (vp->v_flag & VBWAIT)
1978		strcat(buf, "|VBWAIT");
1979	if (vp->v_flag & VDOOMED)
1980		strcat(buf, "|VDOOMED");
1981	if (vp->v_flag & VFREE)
1982		strcat(buf, "|VFREE");
1983	if (vp->v_flag & VOBJBUF)
1984		strcat(buf, "|VOBJBUF");
1985	if (buf[0] != '\0')
1986		printf(" flags (%s)", &buf[1]);
1987	if (vp->v_data == NULL) {
1988		printf("\n");
1989	} else {
1990		printf("\n\t");
1991		VOP_PRINT(vp);
1992	}
1993}
1994
1995#ifdef DDB
1996#include <ddb/ddb.h>
1997/*
1998 * List all of the locked vnodes in the system.
1999 * Called when debugging the kernel.
2000 */
2001DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
2002{
2003	struct proc *p = curproc;	/* XXX */
2004	struct mount *mp, *nmp;
2005	struct vnode *vp;
2006
2007	printf("Locked vnodes\n");
2008	simple_lock(&mountlist_slock);
2009	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2010		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
2011			nmp = TAILQ_NEXT(mp, mnt_list);
2012			continue;
2013		}
2014		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2015			if (VOP_ISLOCKED(vp, NULL))
2016				vprint((char *)0, vp);
2017		}
2018		simple_lock(&mountlist_slock);
2019		nmp = TAILQ_NEXT(mp, mnt_list);
2020		vfs_unbusy(mp, p);
2021	}
2022	simple_unlock(&mountlist_slock);
2023}
2024#endif
2025
2026/*
2027 * Top level filesystem related information gathering.
2028 */
2029static int	sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS);
2030
2031static int
2032vfs_sysctl SYSCTL_HANDLER_ARGS
2033{
2034	int *name = (int *)arg1 - 1;	/* XXX */
2035	u_int namelen = arg2 + 1;	/* XXX */
2036	struct vfsconf *vfsp;
2037
2038#if 1 || defined(COMPAT_PRELITE2)
2039	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2040	if (namelen == 1)
2041		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2042#endif
2043
2044#ifdef notyet
2045	/* all sysctl names at this level are at least name and field */
2046	if (namelen < 2)
2047		return (ENOTDIR);		/* overloaded */
2048	if (name[0] != VFS_GENERIC) {
2049		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2050			if (vfsp->vfc_typenum == name[0])
2051				break;
2052		if (vfsp == NULL)
2053			return (EOPNOTSUPP);
2054		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2055		    oldp, oldlenp, newp, newlen, p));
2056	}
2057#endif
2058	switch (name[1]) {
2059	case VFS_MAXTYPENUM:
2060		if (namelen != 2)
2061			return (ENOTDIR);
2062		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2063	case VFS_CONF:
2064		if (namelen != 3)
2065			return (ENOTDIR);	/* overloaded */
2066		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2067			if (vfsp->vfc_typenum == name[2])
2068				break;
2069		if (vfsp == NULL)
2070			return (EOPNOTSUPP);
2071		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
2072	}
2073	return (EOPNOTSUPP);
2074}
2075
2076SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
2077	"Generic filesystem");
2078
2079#if 1 || defined(COMPAT_PRELITE2)
2080
2081static int
2082sysctl_ovfs_conf SYSCTL_HANDLER_ARGS
2083{
2084	int error;
2085	struct vfsconf *vfsp;
2086	struct ovfsconf ovfs;
2087
2088	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2089		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2090		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2091		ovfs.vfc_index = vfsp->vfc_typenum;
2092		ovfs.vfc_refcount = vfsp->vfc_refcount;
2093		ovfs.vfc_flags = vfsp->vfc_flags;
2094		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2095		if (error)
2096			return error;
2097	}
2098	return 0;
2099}
2100
2101#endif /* 1 || COMPAT_PRELITE2 */
2102
2103#if 0
2104#define KINFO_VNODESLOP	10
2105/*
2106 * Dump vnode list (via sysctl).
2107 * Copyout address of vnode followed by vnode.
2108 */
2109/* ARGSUSED */
2110static int
2111sysctl_vnode SYSCTL_HANDLER_ARGS
2112{
2113	struct proc *p = curproc;	/* XXX */
2114	struct mount *mp, *nmp;
2115	struct vnode *nvp, *vp;
2116	int error;
2117
2118#define VPTRSZ	sizeof (struct vnode *)
2119#define VNODESZ	sizeof (struct vnode)
2120
2121	req->lock = 0;
2122	if (!req->oldptr) /* Make an estimate */
2123		return (SYSCTL_OUT(req, 0,
2124			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
2125
2126	simple_lock(&mountlist_slock);
2127	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2128		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
2129			nmp = TAILQ_NEXT(mp, mnt_list);
2130			continue;
2131		}
2132again:
2133		simple_lock(&mntvnode_slock);
2134		for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2135		     vp != NULL;
2136		     vp = nvp) {
2137			/*
2138			 * Check that the vp is still associated with
2139			 * this filesystem.  RACE: could have been
2140			 * recycled onto the same filesystem.
2141			 */
2142			if (vp->v_mount != mp) {
2143				simple_unlock(&mntvnode_slock);
2144				goto again;
2145			}
2146			nvp = LIST_NEXT(vp, v_mntvnodes);
2147			simple_unlock(&mntvnode_slock);
2148			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
2149			    (error = SYSCTL_OUT(req, vp, VNODESZ)))
2150				return (error);
2151			simple_lock(&mntvnode_slock);
2152		}
2153		simple_unlock(&mntvnode_slock);
2154		simple_lock(&mountlist_slock);
2155		nmp = TAILQ_NEXT(mp, mnt_list);
2156		vfs_unbusy(mp, p);
2157	}
2158	simple_unlock(&mountlist_slock);
2159
2160	return (0);
2161}
2162#endif
2163
2164/*
2165 * XXX
2166 * Exporting the vnode list on large systems causes them to crash.
2167 * Exporting the vnode list on medium systems causes sysctl to coredump.
2168 */
2169#if 0
2170SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2171	0, 0, sysctl_vnode, "S,vnode", "");
2172#endif
2173
2174/*
2175 * Check to see if a filesystem is mounted on a block device.
2176 */
2177int
2178vfs_mountedon(vp)
2179	struct vnode *vp;
2180{
2181
2182	if (vp->v_specmountpoint != NULL)
2183		return (EBUSY);
2184	return (0);
2185}
2186
2187/*
2188 * Unmount all filesystems. The list is traversed in reverse order
2189 * of mounting to avoid dependencies.
2190 */
2191void
2192vfs_unmountall()
2193{
2194	struct mount *mp;
2195	struct proc *p;
2196	int error;
2197
2198	if (curproc != NULL)
2199		p = curproc;
2200	else
2201		p = initproc;	/* XXX XXX should this be proc0? */
2202	/*
2203	 * Since this only runs when rebooting, it is not interlocked.
2204	 */
2205	while(!TAILQ_EMPTY(&mountlist)) {
2206		mp = TAILQ_LAST(&mountlist, mntlist);
2207		error = dounmount(mp, MNT_FORCE, p);
2208		if (error) {
2209			TAILQ_REMOVE(&mountlist, mp, mnt_list);
2210			printf("unmount of %s failed (",
2211			    mp->mnt_stat.f_mntonname);
2212			if (error == EBUSY)
2213				printf("BUSY)\n");
2214			else
2215				printf("%d)\n", error);
2216		} else {
2217			/* The unmount has removed mp from the mountlist */
2218		}
2219	}
2220}
2221
2222/*
2223 * Build hash lists of net addresses and hang them off the mount point.
2224 * Called by ufs_mount() to set up the lists of export addresses.
2225 */
2226static int
2227vfs_hang_addrlist(mp, nep, argp)
2228	struct mount *mp;
2229	struct netexport *nep;
2230	struct export_args *argp;
2231{
2232	register struct netcred *np;
2233	register struct radix_node_head *rnh;
2234	register int i;
2235	struct radix_node *rn;
2236	struct sockaddr *saddr, *smask = 0;
2237	struct domain *dom;
2238	int error;
2239
2240	if (argp->ex_addrlen == 0) {
2241		if (mp->mnt_flag & MNT_DEFEXPORTED)
2242			return (EPERM);
2243		np = &nep->ne_defexported;
2244		np->netc_exflags = argp->ex_flags;
2245		np->netc_anon = argp->ex_anon;
2246		np->netc_anon.cr_ref = 1;
2247		mp->mnt_flag |= MNT_DEFEXPORTED;
2248		return (0);
2249	}
2250	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2251	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
2252	bzero((caddr_t) np, i);
2253	saddr = (struct sockaddr *) (np + 1);
2254	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
2255		goto out;
2256	if (saddr->sa_len > argp->ex_addrlen)
2257		saddr->sa_len = argp->ex_addrlen;
2258	if (argp->ex_masklen) {
2259		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
2260		error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen);
2261		if (error)
2262			goto out;
2263		if (smask->sa_len > argp->ex_masklen)
2264			smask->sa_len = argp->ex_masklen;
2265	}
2266	i = saddr->sa_family;
2267	if ((rnh = nep->ne_rtable[i]) == 0) {
2268		/*
2269		 * Seems silly to initialize every AF when most are not used,
2270		 * do so on demand here
2271		 */
2272		for (dom = domains; dom; dom = dom->dom_next)
2273			if (dom->dom_family == i && dom->dom_rtattach) {
2274				dom->dom_rtattach((void **) &nep->ne_rtable[i],
2275				    dom->dom_rtoffset);
2276				break;
2277			}
2278		if ((rnh = nep->ne_rtable[i]) == 0) {
2279			error = ENOBUFS;
2280			goto out;
2281		}
2282	}
2283	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
2284	    np->netc_rnodes);
2285	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
2286		error = EPERM;
2287		goto out;
2288	}
2289	np->netc_exflags = argp->ex_flags;
2290	np->netc_anon = argp->ex_anon;
2291	np->netc_anon.cr_ref = 1;
2292	return (0);
2293out:
2294	free(np, M_NETADDR);
2295	return (error);
2296}
2297
2298/* ARGSUSED */
2299static int
2300vfs_free_netcred(rn, w)
2301	struct radix_node *rn;
2302	void *w;
2303{
2304	register struct radix_node_head *rnh = (struct radix_node_head *) w;
2305
2306	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2307	free((caddr_t) rn, M_NETADDR);
2308	return (0);
2309}
2310
2311/*
2312 * Free the net address hash lists that are hanging off the mount points.
2313 */
2314static void
2315vfs_free_addrlist(nep)
2316	struct netexport *nep;
2317{
2318	register int i;
2319	register struct radix_node_head *rnh;
2320
2321	for (i = 0; i <= AF_MAX; i++)
2322		if ((rnh = nep->ne_rtable[i])) {
2323			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
2324			    (caddr_t) rnh);
2325			free((caddr_t) rnh, M_RTABLE);
2326			nep->ne_rtable[i] = 0;
2327		}
2328}
2329
2330int
2331vfs_export(mp, nep, argp)
2332	struct mount *mp;
2333	struct netexport *nep;
2334	struct export_args *argp;
2335{
2336	int error;
2337
2338	if (argp->ex_flags & MNT_DELEXPORT) {
2339		if (mp->mnt_flag & MNT_EXPUBLIC) {
2340			vfs_setpublicfs(NULL, NULL, NULL);
2341			mp->mnt_flag &= ~MNT_EXPUBLIC;
2342		}
2343		vfs_free_addrlist(nep);
2344		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2345	}
2346	if (argp->ex_flags & MNT_EXPORTED) {
2347		if (argp->ex_flags & MNT_EXPUBLIC) {
2348			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2349				return (error);
2350			mp->mnt_flag |= MNT_EXPUBLIC;
2351		}
2352		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2353			return (error);
2354		mp->mnt_flag |= MNT_EXPORTED;
2355	}
2356	return (0);
2357}
2358
2359
2360/*
2361 * Set the publicly exported filesystem (WebNFS). Currently, only
2362 * one public filesystem is possible in the spec (RFC 2054 and 2055)
2363 */
2364int
2365vfs_setpublicfs(mp, nep, argp)
2366	struct mount *mp;
2367	struct netexport *nep;
2368	struct export_args *argp;
2369{
2370	int error;
2371	struct vnode *rvp;
2372	char *cp;
2373
2374	/*
2375	 * mp == NULL -> invalidate the current info, the FS is
2376	 * no longer exported. May be called from either vfs_export
2377	 * or unmount, so check if it hasn't already been done.
2378	 */
2379	if (mp == NULL) {
2380		if (nfs_pub.np_valid) {
2381			nfs_pub.np_valid = 0;
2382			if (nfs_pub.np_index != NULL) {
2383				FREE(nfs_pub.np_index, M_TEMP);
2384				nfs_pub.np_index = NULL;
2385			}
2386		}
2387		return (0);
2388	}
2389
2390	/*
2391	 * Only one allowed at a time.
2392	 */
2393	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2394		return (EBUSY);
2395
2396	/*
2397	 * Get real filehandle for root of exported FS.
2398	 */
2399	bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle));
2400	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2401
2402	if ((error = VFS_ROOT(mp, &rvp)))
2403		return (error);
2404
2405	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2406		return (error);
2407
2408	vput(rvp);
2409
2410	/*
2411	 * If an indexfile was specified, pull it in.
2412	 */
2413	if (argp->ex_indexfile != NULL) {
2414		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2415		    M_WAITOK);
2416		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2417		    MAXNAMLEN, (size_t *)0);
2418		if (!error) {
2419			/*
2420			 * Check for illegal filenames.
2421			 */
2422			for (cp = nfs_pub.np_index; *cp; cp++) {
2423				if (*cp == '/') {
2424					error = EINVAL;
2425					break;
2426				}
2427			}
2428		}
2429		if (error) {
2430			FREE(nfs_pub.np_index, M_TEMP);
2431			return (error);
2432		}
2433	}
2434
2435	nfs_pub.np_mount = mp;
2436	nfs_pub.np_valid = 1;
2437	return (0);
2438}
2439
2440struct netcred *
2441vfs_export_lookup(mp, nep, nam)
2442	register struct mount *mp;
2443	struct netexport *nep;
2444	struct sockaddr *nam;
2445{
2446	register struct netcred *np;
2447	register struct radix_node_head *rnh;
2448	struct sockaddr *saddr;
2449
2450	np = NULL;
2451	if (mp->mnt_flag & MNT_EXPORTED) {
2452		/*
2453		 * Lookup in the export list first.
2454		 */
2455		if (nam != NULL) {
2456			saddr = nam;
2457			rnh = nep->ne_rtable[saddr->sa_family];
2458			if (rnh != NULL) {
2459				np = (struct netcred *)
2460					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2461							      rnh);
2462				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2463					np = NULL;
2464			}
2465		}
2466		/*
2467		 * If no address match, use the default if it exists.
2468		 */
2469		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2470			np = &nep->ne_defexported;
2471	}
2472	return (np);
2473}
2474
2475/*
2476 * perform msync on all vnodes under a mount point
2477 * the mount point must be locked.
2478 */
2479void
2480vfs_msync(struct mount *mp, int flags) {
2481	struct vnode *vp, *nvp;
2482	struct vm_object *obj;
2483	int anyio, tries;
2484
2485	tries = 5;
2486loop:
2487	anyio = 0;
2488	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
2489
2490		nvp = LIST_NEXT(vp, v_mntvnodes);
2491
2492		if (vp->v_mount != mp) {
2493			goto loop;
2494		}
2495
2496		if (vp->v_flag & VXLOCK)	/* XXX: what if MNT_WAIT? */
2497			continue;
2498
2499		if (flags != MNT_WAIT) {
2500			obj = vp->v_object;
2501			if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
2502				continue;
2503			if (VOP_ISLOCKED(vp, NULL))
2504				continue;
2505		}
2506
2507		simple_lock(&vp->v_interlock);
2508		if (vp->v_object &&
2509		   (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
2510			if (!vget(vp,
2511				LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
2512				if (vp->v_object) {
2513					vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
2514					anyio = 1;
2515				}
2516				vput(vp);
2517			}
2518		} else {
2519			simple_unlock(&vp->v_interlock);
2520		}
2521	}
2522	if (anyio && (--tries > 0))
2523		goto loop;
2524}
2525
2526/*
2527 * Create the VM object needed for VMIO and mmap support.  This
2528 * is done for all VREG files in the system.  Some filesystems might
2529 * afford the additional metadata buffering capability of the
2530 * VMIO code by making the device node be VMIO mode also.
2531 *
2532 * vp must be locked when vfs_object_create is called.
2533 */
2534int
2535vfs_object_create(vp, p, cred)
2536	struct vnode *vp;
2537	struct proc *p;
2538	struct ucred *cred;
2539{
2540	struct vattr vat;
2541	vm_object_t object;
2542	int error = 0;
2543
2544	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
2545		return 0;
2546
2547retry:
2548	if ((object = vp->v_object) == NULL) {
2549		if (vp->v_type == VREG || vp->v_type == VDIR) {
2550			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
2551				goto retn;
2552			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
2553		} else if (devsw(vp->v_rdev) != NULL) {
2554			/*
2555			 * This simply allocates the biggest object possible
2556			 * for a disk vnode.  This should be fixed, but doesn't
2557			 * cause any problems (yet).
2558			 */
2559			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
2560		} else {
2561			goto retn;
2562		}
2563		/*
2564		 * Dereference the reference we just created.  This assumes
2565		 * that the object is associated with the vp.
2566		 */
2567		object->ref_count--;
2568		vp->v_usecount--;
2569	} else {
2570		if (object->flags & OBJ_DEAD) {
2571			VOP_UNLOCK(vp, 0, p);
2572			tsleep(object, PVM, "vodead", 0);
2573			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
2574			goto retry;
2575		}
2576	}
2577
2578	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
2579	vp->v_flag |= VOBJBUF;
2580
2581retn:
2582	return error;
2583}
2584
2585static void
2586vfree(vp)
2587	struct vnode *vp;
2588{
2589	int s;
2590
2591	s = splbio();
2592	simple_lock(&vnode_free_list_slock);
2593	if (vp->v_flag & VTBFREE) {
2594		TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
2595		vp->v_flag &= ~VTBFREE;
2596	}
2597	if (vp->v_flag & VAGE) {
2598		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2599	} else {
2600		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2601	}
2602	freevnodes++;
2603	simple_unlock(&vnode_free_list_slock);
2604	vp->v_flag &= ~VAGE;
2605	vp->v_flag |= VFREE;
2606	splx(s);
2607}
2608
2609void
2610vbusy(vp)
2611	struct vnode *vp;
2612{
2613	int s;
2614
2615	s = splbio();
2616	simple_lock(&vnode_free_list_slock);
2617	if (vp->v_flag & VTBFREE) {
2618		TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
2619		vp->v_flag &= ~VTBFREE;
2620	} else {
2621		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2622		freevnodes--;
2623	}
2624	simple_unlock(&vnode_free_list_slock);
2625	vp->v_flag &= ~(VFREE|VAGE);
2626	splx(s);
2627}
2628
2629/*
2630 * Record a process's interest in events which might happen to
2631 * a vnode.  Because poll uses the historic select-style interface
2632 * internally, this routine serves as both the ``check for any
2633 * pending events'' and the ``record my interest in future events''
2634 * functions.  (These are done together, while the lock is held,
2635 * to avoid race conditions.)
2636 */
2637int
2638vn_pollrecord(vp, p, events)
2639	struct vnode *vp;
2640	struct proc *p;
2641	short events;
2642{
2643	simple_lock(&vp->v_pollinfo.vpi_lock);
2644	if (vp->v_pollinfo.vpi_revents & events) {
2645		/*
2646		 * This leaves events we are not interested
2647		 * in available for the other process which
2648		 * which presumably had requested them
2649		 * (otherwise they would never have been
2650		 * recorded).
2651		 */
2652		events &= vp->v_pollinfo.vpi_revents;
2653		vp->v_pollinfo.vpi_revents &= ~events;
2654
2655		simple_unlock(&vp->v_pollinfo.vpi_lock);
2656		return events;
2657	}
2658	vp->v_pollinfo.vpi_events |= events;
2659	selrecord(p, &vp->v_pollinfo.vpi_selinfo);
2660	simple_unlock(&vp->v_pollinfo.vpi_lock);
2661	return 0;
2662}
2663
2664/*
2665 * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
2666 * it is possible for us to miss an event due to race conditions, but
2667 * that condition is expected to be rare, so for the moment it is the
2668 * preferred interface.
2669 */
2670void
2671vn_pollevent(vp, events)
2672	struct vnode *vp;
2673	short events;
2674{
2675	simple_lock(&vp->v_pollinfo.vpi_lock);
2676	if (vp->v_pollinfo.vpi_events & events) {
2677		/*
2678		 * We clear vpi_events so that we don't
2679		 * call selwakeup() twice if two events are
2680		 * posted before the polling process(es) is
2681		 * awakened.  This also ensures that we take at
2682		 * most one selwakeup() if the polling process
2683		 * is no longer interested.  However, it does
2684		 * mean that only one event can be noticed at
2685		 * a time.  (Perhaps we should only clear those
2686		 * event bits which we note?) XXX
2687		 */
2688		vp->v_pollinfo.vpi_events = 0;	/* &= ~events ??? */
2689		vp->v_pollinfo.vpi_revents |= events;
2690		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2691	}
2692	simple_unlock(&vp->v_pollinfo.vpi_lock);
2693}
2694
2695/*
2696 * Wake up anyone polling on vp because it is being revoked.
2697 * This depends on dead_poll() returning POLLHUP for correct
2698 * behavior.
2699 */
2700void
2701vn_pollgone(vp)
2702	struct vnode *vp;
2703{
2704	simple_lock(&vp->v_pollinfo.vpi_lock);
2705	if (vp->v_pollinfo.vpi_events) {
2706		vp->v_pollinfo.vpi_events = 0;
2707		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2708	}
2709	simple_unlock(&vp->v_pollinfo.vpi_lock);
2710}
2711
2712
2713
2714/*
2715 * Routine to create and manage a filesystem syncer vnode.
2716 */
2717#define sync_close ((int (*) __P((struct  vop_close_args *)))nullop)
2718static int	sync_fsync __P((struct  vop_fsync_args *));
2719static int	sync_inactive __P((struct  vop_inactive_args *));
2720static int	sync_reclaim  __P((struct  vop_reclaim_args *));
2721#define sync_lock ((int (*) __P((struct  vop_lock_args *)))vop_nolock)
2722#define sync_unlock ((int (*) __P((struct  vop_unlock_args *)))vop_nounlock)
2723static int	sync_print __P((struct vop_print_args *));
2724#define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
2725
2726static vop_t **sync_vnodeop_p;
2727static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
2728	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
2729	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
2730	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
2731	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
2732	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
2733	{ &vop_lock_desc,	(vop_t *) sync_lock },		/* lock */
2734	{ &vop_unlock_desc,	(vop_t *) sync_unlock },	/* unlock */
2735	{ &vop_print_desc,	(vop_t *) sync_print },		/* print */
2736	{ &vop_islocked_desc,	(vop_t *) sync_islocked },	/* islocked */
2737	{ NULL, NULL }
2738};
2739static struct vnodeopv_desc sync_vnodeop_opv_desc =
2740	{ &sync_vnodeop_p, sync_vnodeop_entries };
2741
2742VNODEOP_SET(sync_vnodeop_opv_desc);
2743
2744/*
2745 * Create a new filesystem syncer vnode for the specified mount point.
2746 */
2747int
2748vfs_allocate_syncvnode(mp)
2749	struct mount *mp;
2750{
2751	struct vnode *vp;
2752	static long start, incr, next;
2753	int error;
2754
2755	/* Allocate a new vnode */
2756	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
2757		mp->mnt_syncer = NULL;
2758		return (error);
2759	}
2760	vp->v_type = VNON;
2761	/*
2762	 * Place the vnode onto the syncer worklist. We attempt to
2763	 * scatter them about on the list so that they will go off
2764	 * at evenly distributed times even if all the filesystems
2765	 * are mounted at once.
2766	 */
2767	next += incr;
2768	if (next == 0 || next > syncer_maxdelay) {
2769		start /= 2;
2770		incr /= 2;
2771		if (start == 0) {
2772			start = syncer_maxdelay / 2;
2773			incr = syncer_maxdelay;
2774		}
2775		next = start;
2776	}
2777	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
2778	mp->mnt_syncer = vp;
2779	return (0);
2780}
2781
2782/*
2783 * Do a lazy sync of the filesystem.
2784 */
2785static int
2786sync_fsync(ap)
2787	struct vop_fsync_args /* {
2788		struct vnode *a_vp;
2789		struct ucred *a_cred;
2790		int a_waitfor;
2791		struct proc *a_p;
2792	} */ *ap;
2793{
2794	struct vnode *syncvp = ap->a_vp;
2795	struct mount *mp = syncvp->v_mount;
2796	struct proc *p = ap->a_p;
2797	int asyncflag;
2798
2799	/*
2800	 * We only need to do something if this is a lazy evaluation.
2801	 */
2802	if (ap->a_waitfor != MNT_LAZY)
2803		return (0);
2804
2805	/*
2806	 * Move ourselves to the back of the sync list.
2807	 */
2808	vn_syncer_add_to_worklist(syncvp, syncdelay);
2809
2810	/*
2811	 * Walk the list of vnodes pushing all that are dirty and
2812	 * not already on the sync list.
2813	 */
2814	simple_lock(&mountlist_slock);
2815	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
2816		simple_unlock(&mountlist_slock);
2817		return (0);
2818	}
2819	asyncflag = mp->mnt_flag & MNT_ASYNC;
2820	mp->mnt_flag &= ~MNT_ASYNC;
2821	vfs_msync(mp, MNT_NOWAIT);
2822	VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p);
2823	if (asyncflag)
2824		mp->mnt_flag |= MNT_ASYNC;
2825	vfs_unbusy(mp, p);
2826	return (0);
2827}
2828
2829/*
2830 * The syncer vnode is no referenced.
2831 */
2832static int
2833sync_inactive(ap)
2834	struct vop_inactive_args /* {
2835		struct vnode *a_vp;
2836		struct proc *a_p;
2837	} */ *ap;
2838{
2839
2840	vgone(ap->a_vp);
2841	return (0);
2842}
2843
2844/*
2845 * The syncer vnode is no longer needed and is being decommissioned.
2846 *
2847 * Modifications to the worklist must be protected at splbio().
2848 */
2849static int
2850sync_reclaim(ap)
2851	struct vop_reclaim_args /* {
2852		struct vnode *a_vp;
2853	} */ *ap;
2854{
2855	struct vnode *vp = ap->a_vp;
2856	int s;
2857
2858	s = splbio();
2859	vp->v_mount->mnt_syncer = NULL;
2860	if (vp->v_flag & VONWORKLST) {
2861		LIST_REMOVE(vp, v_synclist);
2862		vp->v_flag &= ~VONWORKLST;
2863	}
2864	splx(s);
2865
2866	return (0);
2867}
2868
2869/*
2870 * Print out a syncer vnode.
2871 */
2872static int
2873sync_print(ap)
2874	struct vop_print_args /* {
2875		struct vnode *a_vp;
2876	} */ *ap;
2877{
2878	struct vnode *vp = ap->a_vp;
2879
2880	printf("syncer vnode");
2881	if (vp->v_vnlock != NULL)
2882		lockmgr_printinfo(vp->v_vnlock);
2883	printf("\n");
2884	return (0);
2885}
2886
2887/*
2888 * extract the dev_t from a VBLK or VCHR
2889 */
2890dev_t
2891vn_todev(vp)
2892	struct vnode *vp;
2893{
2894	if (vp->v_type != VBLK && vp->v_type != VCHR)
2895		return (NODEV);
2896	return (vp->v_rdev);
2897}
2898
2899/*
2900 * Check if vnode represents a disk device
2901 */
2902int
2903vn_isdisk(vp, errp)
2904	struct vnode *vp;
2905	int *errp;
2906{
2907	if (vp->v_type != VBLK && vp->v_type != VCHR) {
2908		if (errp != NULL)
2909			*errp = ENOTBLK;
2910		return (0);
2911	}
2912	if (!devsw(vp->v_rdev)) {
2913		if (errp != NULL)
2914			*errp = ENXIO;
2915		return (0);
2916	}
2917	if (!(devsw(vp->v_rdev)->d_flags & D_DISK)) {
2918		if (errp != NULL)
2919			*errp = ENOTBLK;
2920		return (0);
2921	}
2922	if (errp != NULL)
2923		*errp = 0;
2924	return (1);
2925}
2926
2927void
2928NDFREE(ndp, flags)
2929     struct nameidata *ndp;
2930     const uint flags;
2931{
2932	if (!(flags & NDF_NO_FREE_PNBUF) &&
2933	    (ndp->ni_cnd.cn_flags & HASBUF)) {
2934		zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
2935		ndp->ni_cnd.cn_flags &= ~HASBUF;
2936	}
2937	if (!(flags & NDF_NO_DVP_UNLOCK) &&
2938	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
2939	    ndp->ni_dvp != ndp->ni_vp)
2940		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_proc);
2941	if (!(flags & NDF_NO_DVP_RELE) &&
2942	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
2943		vrele(ndp->ni_dvp);
2944		ndp->ni_dvp = NULL;
2945	}
2946	if (!(flags & NDF_NO_VP_UNLOCK) &&
2947	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
2948		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_proc);
2949	if (!(flags & NDF_NO_VP_RELE) &&
2950	    ndp->ni_vp) {
2951		vrele(ndp->ni_vp);
2952		ndp->ni_vp = NULL;
2953	}
2954	if (!(flags & NDF_NO_STARTDIR_RELE) &&
2955	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
2956		vrele(ndp->ni_startdir);
2957		ndp->ni_startdir = NULL;
2958	}
2959}
2960