vfs_subr.c revision 120780
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39 */
40
41/*
42 * External virtual filesystem routines
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: head/sys/kern/vfs_subr.c 120780 2003-10-05 05:35:41Z jeff $");
47
48#include "opt_ddb.h"
49#include "opt_mac.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/bio.h>
54#include <sys/buf.h>
55#include <sys/conf.h>
56#include <sys/eventhandler.h>
57#include <sys/extattr.h>
58#include <sys/fcntl.h>
59#include <sys/kernel.h>
60#include <sys/kthread.h>
61#include <sys/mac.h>
62#include <sys/malloc.h>
63#include <sys/mount.h>
64#include <sys/namei.h>
65#include <sys/stat.h>
66#include <sys/sysctl.h>
67#include <sys/syslog.h>
68#include <sys/vmmeter.h>
69#include <sys/vnode.h>
70
71#include <vm/vm.h>
72#include <vm/vm_object.h>
73#include <vm/vm_extern.h>
74#include <vm/pmap.h>
75#include <vm/vm_map.h>
76#include <vm/vm_page.h>
77#include <vm/vm_kern.h>
78#include <vm/uma.h>
79
80static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
81
82static void	addalias(struct vnode *vp, dev_t nvp_rdev);
83static void	insmntque(struct vnode *vp, struct mount *mp);
84static void	vclean(struct vnode *vp, int flags, struct thread *td);
85static void	vlruvp(struct vnode *vp);
86static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
87		    int slpflag, int slptimeo, int *errorp);
88static int	vtryrecycle(struct vnode *vp);
89static void	vx_lock(struct vnode *vp);
90static void	vx_unlock(struct vnode *vp);
91static void	vgonechrl(struct vnode *vp, struct thread *td);
92
93
94/*
95 * Number of vnodes in existence.  Increased whenever getnewvnode()
96 * allocates a new vnode, never decreased.
97 */
98static unsigned long	numvnodes;
99
100SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
101
102/*
103 * Conversion tables for conversion from vnode types to inode formats
104 * and back.
105 */
106enum vtype iftovt_tab[16] = {
107	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
108	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
109};
110int vttoif_tab[9] = {
111	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
112	S_IFSOCK, S_IFIFO, S_IFMT,
113};
114
115/*
116 * List of vnodes that are ready for recycling.
117 */
118static TAILQ_HEAD(freelst, vnode) vnode_free_list;
119
120/*
121 * Minimum number of free vnodes.  If there are fewer than this free vnodes,
122 * getnewvnode() will return a newly allocated vnode.
123 */
124static u_long wantfreevnodes = 25;
125SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
126/* Number of vnodes in the free list. */
127static u_long freevnodes;
128SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
129
130/*
131 * Various variables used for debugging the new implementation of
132 * reassignbuf().
133 * XXX these are probably of (very) limited utility now.
134 */
135static int reassignbufcalls;
136SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
137static int nameileafonly;
138SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
139
140/*
141 * Cache for the mount type id assigned to NFS.  This is used for
142 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
143 */
144int	nfs_mount_type = -1;
145
146/* To keep more than one thread at a time from running vfs_getnewfsid */
147static struct mtx mntid_mtx;
148
149/*
150 * Lock for any access to the following:
151 *	vnode_free_list
152 *	numvnodes
153 *	freevnodes
154 */
155static struct mtx vnode_free_list_mtx;
156
157/*
158 * For any iteration/modification of dev->si_hlist (linked through
159 * v_specnext)
160 */
161static struct mtx spechash_mtx;
162
163/* Publicly exported FS */
164struct nfs_public nfs_pub;
165
166/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
167static uma_zone_t vnode_zone;
168static uma_zone_t vnodepoll_zone;
169
170/* Set to 1 to print out reclaim of active vnodes */
171int	prtactive;
172
173/*
174 * The workitem queue.
175 *
176 * It is useful to delay writes of file data and filesystem metadata
177 * for tens of seconds so that quickly created and deleted files need
178 * not waste disk bandwidth being created and removed. To realize this,
179 * we append vnodes to a "workitem" queue. When running with a soft
180 * updates implementation, most pending metadata dependencies should
181 * not wait for more than a few seconds. Thus, mounted on block devices
182 * are delayed only about a half the time that file data is delayed.
183 * Similarly, directory updates are more critical, so are only delayed
184 * about a third the time that file data is delayed. Thus, there are
185 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
186 * one each second (driven off the filesystem syncer process). The
187 * syncer_delayno variable indicates the next queue that is to be processed.
188 * Items that need to be processed soon are placed in this queue:
189 *
190 *	syncer_workitem_pending[syncer_delayno]
191 *
192 * A delay of fifteen seconds is done by placing the request fifteen
193 * entries later in the queue:
194 *
195 *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
196 *
197 */
198static int syncer_delayno;
199static long syncer_mask;
200LIST_HEAD(synclist, vnode);
201static struct synclist *syncer_workitem_pending;
202/*
203 * The sync_mtx protects:
204 *	vp->v_synclist
205 *	syncer_delayno
206 *	syncer_workitem_pending
207 *	rushjob
208 */
209static struct mtx sync_mtx;
210
211#define SYNCER_MAXDELAY		32
212static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
213static int syncdelay = 30;		/* max time to delay syncing data */
214static int filedelay = 30;		/* time to delay syncing files */
215SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
216static int dirdelay = 29;		/* time to delay syncing directories */
217SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
218static int metadelay = 28;		/* time to delay syncing metadata */
219SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
220static int rushjob;		/* number of slots to run ASAP */
221static int stat_rush_requests;	/* number of times I/O speeded up */
222SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
223
224/*
225 * Number of vnodes we want to exist at any one time.  This is mostly used
226 * to size hash tables in vnode-related code.  It is normally not used in
227 * getnewvnode(), as wantfreevnodes is normally nonzero.)
228 *
229 * XXX desiredvnodes is historical cruft and should not exist.
230 */
231int desiredvnodes;
232SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
233    &desiredvnodes, 0, "Maximum number of vnodes");
234static int minvnodes;
235SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
236    &minvnodes, 0, "Minimum number of vnodes");
237static int vnlru_nowhere;
238SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
239    "Number of times the vnlru process ran without success");
240
241/* Hook for calling soft updates */
242int (*softdep_process_worklist_hook)(struct mount *);
243
244/*
245 * This only exists to supress warnings from unlocked specfs accesses.  It is
246 * no longer ok to have an unlocked VFS.
247 */
248#define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
249
250/* Print lock violations */
251int vfs_badlock_print = 1;
252
253/* Panic on violation */
254int vfs_badlock_panic = 1;
255
256/* Check for interlock across VOPs */
257int vfs_badlock_mutex = 1;
258
259static void
260vfs_badlock(char *msg, char *str, struct vnode *vp)
261{
262	if (vfs_badlock_print)
263		printf("%s: %p %s\n", str, vp, msg);
264	if (vfs_badlock_panic)
265		Debugger("Lock violation.\n");
266}
267
268void
269assert_vi_unlocked(struct vnode *vp, char *str)
270{
271	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
272		vfs_badlock("interlock is locked but should not be", str, vp);
273}
274
275void
276assert_vi_locked(struct vnode *vp, char *str)
277{
278	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
279		vfs_badlock("interlock is not locked but should be", str, vp);
280}
281
282void
283assert_vop_locked(struct vnode *vp, char *str)
284{
285	if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
286		vfs_badlock("is not locked but should be", str, vp);
287}
288
289void
290assert_vop_unlocked(struct vnode *vp, char *str)
291{
292	if (vp && !IGNORE_LOCK(vp) &&
293	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
294		vfs_badlock("is locked but should not be", str, vp);
295}
296
297void
298assert_vop_elocked(struct vnode *vp, char *str)
299{
300	if (vp && !IGNORE_LOCK(vp) &&
301	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
302		vfs_badlock("is not exclusive locked but should be", str, vp);
303}
304
305void
306assert_vop_elocked_other(struct vnode *vp, char *str)
307{
308	if (vp && !IGNORE_LOCK(vp) &&
309	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
310		vfs_badlock("is not exclusive locked by another thread",
311		    str, vp);
312}
313
314void
315assert_vop_slocked(struct vnode *vp, char *str)
316{
317	if (vp && !IGNORE_LOCK(vp) &&
318	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
319		vfs_badlock("is not locked shared but should be", str, vp);
320}
321
322void
323vop_rename_pre(void *ap)
324{
325	struct vop_rename_args *a = ap;
326
327	if (a->a_tvp)
328		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
329	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
330	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
331	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
332
333	/* Check the source (from) */
334	if (a->a_tdvp != a->a_fdvp)
335		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
336	if (a->a_tvp != a->a_fvp)
337		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
338
339	/* Check the target */
340	if (a->a_tvp)
341		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
342
343	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
344}
345
346void
347vop_strategy_pre(void *ap)
348{
349	struct vop_strategy_args *a = ap;
350	struct buf *bp;
351
352	bp = a->a_bp;
353
354	/*
355	 * Cluster ops lock their component buffers but not the IO container.
356	 */
357	if ((bp->b_flags & B_CLUSTER) != 0)
358		return;
359
360	if (BUF_REFCNT(bp) < 1) {
361		if (vfs_badlock_print)
362			printf("VOP_STRATEGY: bp is not locked but should be.\n");
363		if (vfs_badlock_panic)
364			Debugger("Lock violation.\n");
365	}
366}
367
368void
369vop_lookup_pre(void *ap)
370{
371	struct vop_lookup_args *a = ap;
372	struct vnode *dvp;
373
374	dvp = a->a_dvp;
375
376	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
377	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
378}
379
380void
381vop_lookup_post(void *ap, int rc)
382{
383	struct vop_lookup_args *a = ap;
384	struct componentname *cnp;
385	struct vnode *dvp;
386	struct vnode *vp;
387	int flags;
388
389	dvp = a->a_dvp;
390	cnp = a->a_cnp;
391	vp = *(a->a_vpp);
392	flags = cnp->cn_flags;
393
394
395	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
396	/*
397	 * If this is the last path component for this lookup and LOCPARENT
398	 * is set, OR if there is an error the directory has to be locked.
399	 */
400	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
401		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
402	else if (rc != 0)
403		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
404	else if (dvp != vp)
405		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
406
407	if (flags & PDIRUNLOCK)
408		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
409}
410
411void
412vop_unlock_pre(void *ap)
413{
414	struct vop_unlock_args *a = ap;
415
416	if (a->a_flags & LK_INTERLOCK)
417		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
418
419	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
420}
421
422void
423vop_unlock_post(void *ap, int rc)
424{
425	struct vop_unlock_args *a = ap;
426
427	if (a->a_flags & LK_INTERLOCK)
428		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
429}
430
431void
432vop_lock_pre(void *ap)
433{
434	struct vop_lock_args *a = ap;
435
436	if ((a->a_flags & LK_INTERLOCK) == 0)
437		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
438	else
439		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
440}
441
442void
443vop_lock_post(void *ap, int rc)
444{
445	struct vop_lock_args *a;
446
447	a = ap;
448
449	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
450	if (rc == 0)
451		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
452}
453
454void
455v_addpollinfo(struct vnode *vp)
456{
457	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
458	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
459}
460
461/*
462 * Initialize the vnode management data structures.
463 */
464static void
465vntblinit(void *dummy __unused)
466{
467
468	/*
469	 * Desiredvnodes is a function of the physical memory size and
470	 * the kernel's heap size.  Specifically, desiredvnodes scales
471	 * in proportion to the physical memory size until two fifths
472	 * of the kernel's heap size is consumed by vnodes and vm
473	 * objects.
474	 */
475	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
476	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
477	minvnodes = desiredvnodes / 4;
478	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
479	mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
480	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
481	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
482	TAILQ_INIT(&vnode_free_list);
483	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
484	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
485	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
486	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
487	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
488	/*
489	 * Initialize the filesystem syncer.
490	 */
491	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
492		&syncer_mask);
493	syncer_maxdelay = syncer_mask + 1;
494	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
495}
496SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
497
498
499/*
500 * Mark a mount point as busy. Used to synchronize access and to delay
501 * unmounting. Interlock is not released on failure.
502 */
503int
504vfs_busy(mp, flags, interlkp, td)
505	struct mount *mp;
506	int flags;
507	struct mtx *interlkp;
508	struct thread *td;
509{
510	int lkflags;
511
512	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
513		if (flags & LK_NOWAIT)
514			return (ENOENT);
515		mp->mnt_kern_flag |= MNTK_MWAIT;
516		/*
517		 * Since all busy locks are shared except the exclusive
518		 * lock granted when unmounting, the only place that a
519		 * wakeup needs to be done is at the release of the
520		 * exclusive lock at the end of dounmount.
521		 */
522		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
523		return (ENOENT);
524	}
525	lkflags = LK_SHARED | LK_NOPAUSE;
526	if (interlkp)
527		lkflags |= LK_INTERLOCK;
528	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
529		panic("vfs_busy: unexpected lock failure");
530	return (0);
531}
532
533/*
534 * Free a busy filesystem.
535 */
536void
537vfs_unbusy(mp, td)
538	struct mount *mp;
539	struct thread *td;
540{
541
542	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
543}
544
545/*
546 * Lookup a mount point by filesystem identifier.
547 */
548struct mount *
549vfs_getvfs(fsid)
550	fsid_t *fsid;
551{
552	register struct mount *mp;
553
554	mtx_lock(&mountlist_mtx);
555	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
556		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
557		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
558			mtx_unlock(&mountlist_mtx);
559			return (mp);
560		}
561	}
562	mtx_unlock(&mountlist_mtx);
563	return ((struct mount *) 0);
564}
565
566/*
567 * Get a new unique fsid.  Try to make its val[0] unique, since this value
568 * will be used to create fake device numbers for stat().  Also try (but
569 * not so hard) make its val[0] unique mod 2^16, since some emulators only
570 * support 16-bit device numbers.  We end up with unique val[0]'s for the
571 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
572 *
573 * Keep in mind that several mounts may be running in parallel.  Starting
574 * the search one past where the previous search terminated is both a
575 * micro-optimization and a defense against returning the same fsid to
576 * different mounts.
577 */
578void
579vfs_getnewfsid(mp)
580	struct mount *mp;
581{
582	static u_int16_t mntid_base;
583	fsid_t tfsid;
584	int mtype;
585
586	mtx_lock(&mntid_mtx);
587	mtype = mp->mnt_vfc->vfc_typenum;
588	tfsid.val[1] = mtype;
589	mtype = (mtype & 0xFF) << 24;
590	for (;;) {
591		tfsid.val[0] = makeudev(255,
592		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
593		mntid_base++;
594		if (vfs_getvfs(&tfsid) == NULL)
595			break;
596	}
597	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
598	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
599	mtx_unlock(&mntid_mtx);
600}
601
602/*
603 * Knob to control the precision of file timestamps:
604 *
605 *   0 = seconds only; nanoseconds zeroed.
606 *   1 = seconds and nanoseconds, accurate within 1/HZ.
607 *   2 = seconds and nanoseconds, truncated to microseconds.
608 * >=3 = seconds and nanoseconds, maximum precision.
609 */
610enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
611
612static int timestamp_precision = TSP_SEC;
613SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
614    &timestamp_precision, 0, "");
615
616/*
617 * Get a current timestamp.
618 */
619void
620vfs_timestamp(tsp)
621	struct timespec *tsp;
622{
623	struct timeval tv;
624
625	switch (timestamp_precision) {
626	case TSP_SEC:
627		tsp->tv_sec = time_second;
628		tsp->tv_nsec = 0;
629		break;
630	case TSP_HZ:
631		getnanotime(tsp);
632		break;
633	case TSP_USEC:
634		microtime(&tv);
635		TIMEVAL_TO_TIMESPEC(&tv, tsp);
636		break;
637	case TSP_NSEC:
638	default:
639		nanotime(tsp);
640		break;
641	}
642}
643
644/*
645 * Set vnode attributes to VNOVAL
646 */
647void
648vattr_null(vap)
649	register struct vattr *vap;
650{
651
652	vap->va_type = VNON;
653	vap->va_size = VNOVAL;
654	vap->va_bytes = VNOVAL;
655	vap->va_mode = VNOVAL;
656	vap->va_nlink = VNOVAL;
657	vap->va_uid = VNOVAL;
658	vap->va_gid = VNOVAL;
659	vap->va_fsid = VNOVAL;
660	vap->va_fileid = VNOVAL;
661	vap->va_blocksize = VNOVAL;
662	vap->va_rdev = VNOVAL;
663	vap->va_atime.tv_sec = VNOVAL;
664	vap->va_atime.tv_nsec = VNOVAL;
665	vap->va_mtime.tv_sec = VNOVAL;
666	vap->va_mtime.tv_nsec = VNOVAL;
667	vap->va_ctime.tv_sec = VNOVAL;
668	vap->va_ctime.tv_nsec = VNOVAL;
669	vap->va_birthtime.tv_sec = VNOVAL;
670	vap->va_birthtime.tv_nsec = VNOVAL;
671	vap->va_flags = VNOVAL;
672	vap->va_gen = VNOVAL;
673	vap->va_vaflags = 0;
674}
675
676/*
677 * This routine is called when we have too many vnodes.  It attempts
678 * to free <count> vnodes and will potentially free vnodes that still
679 * have VM backing store (VM backing store is typically the cause
680 * of a vnode blowout so we want to do this).  Therefore, this operation
681 * is not considered cheap.
682 *
683 * A number of conditions may prevent a vnode from being reclaimed.
684 * the buffer cache may have references on the vnode, a directory
685 * vnode may still have references due to the namei cache representing
686 * underlying files, or the vnode may be in active use.   It is not
687 * desireable to reuse such vnodes.  These conditions may cause the
688 * number of vnodes to reach some minimum value regardless of what
689 * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
690 */
691static int
692vlrureclaim(struct mount *mp)
693{
694	struct vnode *vp;
695	int done;
696	int trigger;
697	int usevnodes;
698	int count;
699
700	/*
701	 * Calculate the trigger point, don't allow user
702	 * screwups to blow us up.   This prevents us from
703	 * recycling vnodes with lots of resident pages.  We
704	 * aren't trying to free memory, we are trying to
705	 * free vnodes.
706	 */
707	usevnodes = desiredvnodes;
708	if (usevnodes <= 0)
709		usevnodes = 1;
710	trigger = cnt.v_page_count * 2 / usevnodes;
711
712	done = 0;
713	mtx_lock(&mntvnode_mtx);
714	count = mp->mnt_nvnodelistsize / 10 + 1;
715	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
716		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
717		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
718
719		if (vp->v_type != VNON &&
720		    vp->v_type != VBAD &&
721		    VI_TRYLOCK(vp)) {
722			if (VMIGHTFREE(vp) &&           /* critical path opt */
723			    (vp->v_object == NULL ||
724			    vp->v_object->resident_page_count < trigger)) {
725				mtx_unlock(&mntvnode_mtx);
726				vgonel(vp, curthread);
727				done++;
728				mtx_lock(&mntvnode_mtx);
729			} else
730				VI_UNLOCK(vp);
731		}
732		--count;
733	}
734	mtx_unlock(&mntvnode_mtx);
735	return done;
736}
737
738/*
739 * Attempt to recycle vnodes in a context that is always safe to block.
740 * Calling vlrurecycle() from the bowels of filesystem code has some
741 * interesting deadlock problems.
742 */
743static struct proc *vnlruproc;
744static int vnlruproc_sig;
745
746static void
747vnlru_proc(void)
748{
749	struct mount *mp, *nmp;
750	int done;
751	struct proc *p = vnlruproc;
752	struct thread *td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
753
754	mtx_lock(&Giant);
755
756	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
757	    SHUTDOWN_PRI_FIRST);
758
759	for (;;) {
760		kthread_suspend_check(p);
761		mtx_lock(&vnode_free_list_mtx);
762		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
763			mtx_unlock(&vnode_free_list_mtx);
764			vnlruproc_sig = 0;
765			wakeup(&vnlruproc_sig);
766			tsleep(vnlruproc, PVFS, "vlruwt", hz);
767			continue;
768		}
769		mtx_unlock(&vnode_free_list_mtx);
770		done = 0;
771		mtx_lock(&mountlist_mtx);
772		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
773			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
774				nmp = TAILQ_NEXT(mp, mnt_list);
775				continue;
776			}
777			done += vlrureclaim(mp);
778			mtx_lock(&mountlist_mtx);
779			nmp = TAILQ_NEXT(mp, mnt_list);
780			vfs_unbusy(mp, td);
781		}
782		mtx_unlock(&mountlist_mtx);
783		if (done == 0) {
784#if 0
785			/* These messages are temporary debugging aids */
786			if (vnlru_nowhere < 5)
787				printf("vnlru process getting nowhere..\n");
788			else if (vnlru_nowhere == 5)
789				printf("vnlru process messages stopped.\n");
790#endif
791			vnlru_nowhere++;
792			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
793		}
794	}
795}
796
797static struct kproc_desc vnlru_kp = {
798	"vnlru",
799	vnlru_proc,
800	&vnlruproc
801};
802SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
803
804
805/*
806 * Routines having to do with the management of the vnode table.
807 */
808
809/*
810 * Check to see if a free vnode can be recycled. If it can,
811 * recycle it and return it with the vnode interlock held.
812 */
813static int
814vtryrecycle(struct vnode *vp)
815{
816	struct thread *td = curthread;
817	vm_object_t object;
818	struct mount *vnmp;
819	int error;
820
821	/* Don't recycle if we can't get the interlock */
822	if (!VI_TRYLOCK(vp))
823		return (EWOULDBLOCK);
824	/*
825	 * This vnode may found and locked via some other list, if so we
826	 * can't recycle it yet.
827	 */
828	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
829		return (EWOULDBLOCK);
830	/*
831	 * Don't recycle if its filesystem is being suspended.
832	 */
833	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
834		error = EBUSY;
835		goto done;
836	}
837
838	/*
839	 * Don't recycle if we still have cached pages.
840	 */
841	if (VOP_GETVOBJECT(vp, &object) == 0) {
842		VM_OBJECT_LOCK(object);
843		if (object->resident_page_count ||
844		    object->ref_count) {
845			VM_OBJECT_UNLOCK(object);
846			error = EBUSY;
847			goto done;
848		}
849		VM_OBJECT_UNLOCK(object);
850	}
851	if (LIST_FIRST(&vp->v_cache_src)) {
852		/*
853		 * note: nameileafonly sysctl is temporary,
854		 * for debugging only, and will eventually be
855		 * removed.
856		 */
857		if (nameileafonly > 0) {
858			/*
859			 * Do not reuse namei-cached directory
860			 * vnodes that have cached
861			 * subdirectories.
862			 */
863			if (cache_leaf_test(vp) < 0) {
864				error = EISDIR;
865				goto done;
866			}
867		} else if (nameileafonly < 0 ||
868			    vmiodirenable == 0) {
869			/*
870			 * Do not reuse namei-cached directory
871			 * vnodes if nameileafonly is -1 or
872			 * if VMIO backing for directories is
873			 * turned off (otherwise we reuse them
874			 * too quickly).
875			 */
876			error = EBUSY;
877			goto done;
878		}
879	}
880	/*
881	 * If we got this far, we need to acquire the interlock and see if
882	 * anyone picked up this vnode from another list.  If not, we will
883	 * mark it with XLOCK via vgonel() so that anyone who does find it
884	 * will skip over it.
885	 */
886	VI_LOCK(vp);
887	if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
888		VI_UNLOCK(vp);
889		error = EBUSY;
890		goto done;
891	}
892	mtx_lock(&vnode_free_list_mtx);
893	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
894	vp->v_iflag &= ~VI_FREE;
895	mtx_unlock(&vnode_free_list_mtx);
896	vp->v_iflag |= VI_DOOMED;
897	if (vp->v_type != VBAD) {
898		VOP_UNLOCK(vp, 0, td);
899		vgonel(vp, td);
900		VI_LOCK(vp);
901	} else
902		VOP_UNLOCK(vp, 0, td);
903	vn_finished_write(vnmp);
904	return (0);
905done:
906	VOP_UNLOCK(vp, 0, td);
907	return (error);
908}
909
910/*
911 * Return the next vnode from the free list.
912 */
913int
914getnewvnode(tag, mp, vops, vpp)
915	const char *tag;
916	struct mount *mp;
917	vop_t **vops;
918	struct vnode **vpp;
919{
920	struct vnode *vp = NULL;
921	struct vpollinfo *pollinfo = NULL;
922
923	mtx_lock(&vnode_free_list_mtx);
924
925	/*
926	 * Try to reuse vnodes if we hit the max.  This situation only
927	 * occurs in certain large-memory (2G+) situations.  We cannot
928	 * attempt to directly reclaim vnodes due to nasty recursion
929	 * problems.
930	 */
931	while (numvnodes - freevnodes > desiredvnodes) {
932		if (vnlruproc_sig == 0) {
933			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
934			wakeup(vnlruproc);
935		}
936		mtx_unlock(&vnode_free_list_mtx);
937		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
938		mtx_lock(&vnode_free_list_mtx);
939	}
940
941	/*
942	 * Attempt to reuse a vnode already on the free list, allocating
943	 * a new vnode if we can't find one or if we have not reached a
944	 * good minimum for good LRU performance.
945	 */
946
947	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
948		int error;
949		int count;
950
951		for (count = 0; count < freevnodes; count++) {
952			vp = TAILQ_FIRST(&vnode_free_list);
953
954			KASSERT(vp->v_usecount == 0 &&
955			    (vp->v_iflag & VI_DOINGINACT) == 0,
956			    ("getnewvnode: free vnode isn't"));
957
958			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
959			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
960			mtx_unlock(&vnode_free_list_mtx);
961			error = vtryrecycle(vp);
962			mtx_lock(&vnode_free_list_mtx);
963			if (error == 0)
964				break;
965			vp = NULL;
966		}
967	}
968	if (vp) {
969		freevnodes--;
970		mtx_unlock(&vnode_free_list_mtx);
971
972#ifdef INVARIANTS
973		{
974			if (vp->v_data)
975				panic("cleaned vnode isn't");
976			if (vp->v_numoutput)
977				panic("Clean vnode has pending I/O's");
978			if (vp->v_writecount != 0)
979				panic("Non-zero write count");
980		}
981#endif
982		if ((pollinfo = vp->v_pollinfo) != NULL) {
983			/*
984			 * To avoid lock order reversals, the call to
985			 * uma_zfree() must be delayed until the vnode
986			 * interlock is released.
987			 */
988			vp->v_pollinfo = NULL;
989		}
990#ifdef MAC
991		mac_destroy_vnode(vp);
992#endif
993		vp->v_iflag = 0;
994		vp->v_vflag = 0;
995		vp->v_lastw = 0;
996		vp->v_lasta = 0;
997		vp->v_cstart = 0;
998		vp->v_clen = 0;
999		vp->v_socket = 0;
1000		lockdestroy(vp->v_vnlock);
1001		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1002		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
1003		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
1004		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
1005		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
1006	} else {
1007		numvnodes++;
1008		mtx_unlock(&vnode_free_list_mtx);
1009
1010		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
1011		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1012		VI_LOCK(vp);
1013		vp->v_dd = vp;
1014		vp->v_vnlock = &vp->v_lock;
1015		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1016		cache_purge(vp);		/* Sets up v_id. */
1017		LIST_INIT(&vp->v_cache_src);
1018		TAILQ_INIT(&vp->v_cache_dst);
1019	}
1020
1021	TAILQ_INIT(&vp->v_cleanblkhd);
1022	TAILQ_INIT(&vp->v_dirtyblkhd);
1023	vp->v_type = VNON;
1024	vp->v_tag = tag;
1025	vp->v_op = vops;
1026	*vpp = vp;
1027	vp->v_usecount = 1;
1028	vp->v_data = 0;
1029	vp->v_cachedid = -1;
1030	VI_UNLOCK(vp);
1031	if (pollinfo != NULL) {
1032		mtx_destroy(&pollinfo->vpi_lock);
1033		uma_zfree(vnodepoll_zone, pollinfo);
1034	}
1035#ifdef MAC
1036	mac_init_vnode(vp);
1037	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1038		mac_associate_vnode_singlelabel(mp, vp);
1039#endif
1040	insmntque(vp, mp);
1041
1042	return (0);
1043}
1044
1045/*
1046 * Move a vnode from one mount queue to another.
1047 */
1048static void
1049insmntque(vp, mp)
1050	register struct vnode *vp;
1051	register struct mount *mp;
1052{
1053
1054	mtx_lock(&mntvnode_mtx);
1055	/*
1056	 * Delete from old mount point vnode list, if on one.
1057	 */
1058	if (vp->v_mount != NULL) {
1059		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
1060			("bad mount point vnode list size"));
1061		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
1062		vp->v_mount->mnt_nvnodelistsize--;
1063	}
1064	/*
1065	 * Insert into list of vnodes for the new mount point, if available.
1066	 */
1067	if ((vp->v_mount = mp) != NULL) {
1068		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1069		mp->mnt_nvnodelistsize++;
1070	}
1071	mtx_unlock(&mntvnode_mtx);
1072}
1073
1074/*
1075 * Update outstanding I/O count and do wakeup if requested.
1076 */
1077void
1078vwakeup(bp)
1079	register struct buf *bp;
1080{
1081	register struct vnode *vp;
1082
1083	bp->b_flags &= ~B_WRITEINPROG;
1084	if ((vp = bp->b_vp)) {
1085		VI_LOCK(vp);
1086		vp->v_numoutput--;
1087		if (vp->v_numoutput < 0)
1088			panic("vwakeup: neg numoutput");
1089		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
1090			vp->v_iflag &= ~VI_BWAIT;
1091			wakeup(&vp->v_numoutput);
1092		}
1093		VI_UNLOCK(vp);
1094	}
1095}
1096
1097/*
1098 * Flush out and invalidate all buffers associated with a vnode.
1099 * Called with the underlying object locked.
1100 */
1101int
1102vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
1103	struct vnode *vp;
1104	int flags;
1105	struct ucred *cred;
1106	struct thread *td;
1107	int slpflag, slptimeo;
1108{
1109	struct buf *blist;
1110	int error;
1111	vm_object_t object;
1112
1113	GIANT_REQUIRED;
1114
1115	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1116
1117	VI_LOCK(vp);
1118	if (flags & V_SAVE) {
1119		while (vp->v_numoutput) {
1120			vp->v_iflag |= VI_BWAIT;
1121			error = msleep(&vp->v_numoutput, VI_MTX(vp),
1122			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
1123			if (error) {
1124				VI_UNLOCK(vp);
1125				return (error);
1126			}
1127		}
1128		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1129			VI_UNLOCK(vp);
1130			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
1131				return (error);
1132			/*
1133			 * XXX We could save a lock/unlock if this was only
1134			 * enabled under INVARIANTS
1135			 */
1136			VI_LOCK(vp);
1137			if (vp->v_numoutput > 0 ||
1138			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
1139				panic("vinvalbuf: dirty bufs");
1140		}
1141	}
1142	/*
1143	 * If you alter this loop please notice that interlock is dropped and
1144	 * reacquired in flushbuflist.  Special care is needed to ensure that
1145	 * no race conditions occur from this.
1146	 */
1147	for (error = 0;;) {
1148		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
1149		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1150			if (error)
1151				break;
1152			continue;
1153		}
1154		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
1155		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1156			if (error)
1157				break;
1158			continue;
1159		}
1160		break;
1161	}
1162	if (error) {
1163		VI_UNLOCK(vp);
1164		return (error);
1165	}
1166
1167	/*
1168	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1169	 * have write I/O in-progress but if there is a VM object then the
1170	 * VM object can also have read-I/O in-progress.
1171	 */
1172	do {
1173		while (vp->v_numoutput > 0) {
1174			vp->v_iflag |= VI_BWAIT;
1175			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1176		}
1177		VI_UNLOCK(vp);
1178		if (VOP_GETVOBJECT(vp, &object) == 0) {
1179			VM_OBJECT_LOCK(object);
1180			vm_object_pip_wait(object, "vnvlbx");
1181			VM_OBJECT_UNLOCK(object);
1182		}
1183		VI_LOCK(vp);
1184	} while (vp->v_numoutput > 0);
1185	VI_UNLOCK(vp);
1186
1187	/*
1188	 * Destroy the copy in the VM cache, too.
1189	 */
1190	if (VOP_GETVOBJECT(vp, &object) == 0) {
1191		VM_OBJECT_LOCK(object);
1192		vm_object_page_remove(object, 0, 0,
1193			(flags & V_SAVE) ? TRUE : FALSE);
1194		VM_OBJECT_UNLOCK(object);
1195	}
1196
1197#ifdef INVARIANTS
1198	VI_LOCK(vp);
1199	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1200	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1201	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1202		panic("vinvalbuf: flush failed");
1203	VI_UNLOCK(vp);
1204#endif
1205	return (0);
1206}
1207
1208/*
1209 * Flush out buffers on the specified list.
1210 *
1211 */
1212static int
1213flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1214	struct buf *blist;
1215	int flags;
1216	struct vnode *vp;
1217	int slpflag, slptimeo;
1218	int *errorp;
1219{
1220	struct buf *bp, *nbp;
1221	int found, error;
1222
1223	ASSERT_VI_LOCKED(vp, "flushbuflist");
1224
1225	for (found = 0, bp = blist; bp; bp = nbp) {
1226		nbp = TAILQ_NEXT(bp, b_vnbufs);
1227		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1228		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1229			continue;
1230		}
1231		found += 1;
1232		error = BUF_TIMELOCK(bp,
1233		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1234		    "flushbuf", slpflag, slptimeo);
1235		if (error) {
1236			if (error != ENOLCK)
1237				*errorp = error;
1238			goto done;
1239		}
1240		/*
1241		 * XXX Since there are no node locks for NFS, I
1242		 * believe there is a slight chance that a delayed
1243		 * write will occur while sleeping just above, so
1244		 * check for it.  Note that vfs_bio_awrite expects
1245		 * buffers to reside on a queue, while BUF_WRITE and
1246		 * brelse do not.
1247		 */
1248		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1249			(flags & V_SAVE)) {
1250
1251			if (bp->b_vp == vp) {
1252				if (bp->b_flags & B_CLUSTEROK) {
1253					vfs_bio_awrite(bp);
1254				} else {
1255					bremfree(bp);
1256					bp->b_flags |= B_ASYNC;
1257					BUF_WRITE(bp);
1258				}
1259			} else {
1260				bremfree(bp);
1261				(void) BUF_WRITE(bp);
1262			}
1263			goto done;
1264		}
1265		bremfree(bp);
1266		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1267		bp->b_flags &= ~B_ASYNC;
1268		brelse(bp);
1269		VI_LOCK(vp);
1270	}
1271	return (found);
1272done:
1273	VI_LOCK(vp);
1274	return (found);
1275}
1276
1277/*
1278 * Truncate a file's buffer and pages to a specified length.  This
1279 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1280 * sync activity.
1281 */
1282int
1283vtruncbuf(vp, cred, td, length, blksize)
1284	register struct vnode *vp;
1285	struct ucred *cred;
1286	struct thread *td;
1287	off_t length;
1288	int blksize;
1289{
1290	register struct buf *bp;
1291	struct buf *nbp;
1292	int anyfreed;
1293	int trunclbn;
1294
1295	/*
1296	 * Round up to the *next* lbn.
1297	 */
1298	trunclbn = (length + blksize - 1) / blksize;
1299
1300	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1301restart:
1302	VI_LOCK(vp);
1303	anyfreed = 1;
1304	for (;anyfreed;) {
1305		anyfreed = 0;
1306		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1307			nbp = TAILQ_NEXT(bp, b_vnbufs);
1308			if (bp->b_lblkno >= trunclbn) {
1309				if (BUF_LOCK(bp,
1310				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1311				    VI_MTX(vp)) == ENOLCK)
1312					goto restart;
1313
1314				bremfree(bp);
1315				bp->b_flags |= (B_INVAL | B_RELBUF);
1316				bp->b_flags &= ~B_ASYNC;
1317				brelse(bp);
1318				anyfreed = 1;
1319
1320				if (nbp &&
1321				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1322				    (nbp->b_vp != vp) ||
1323				    (nbp->b_flags & B_DELWRI))) {
1324					goto restart;
1325				}
1326				VI_LOCK(vp);
1327			}
1328		}
1329
1330		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1331			nbp = TAILQ_NEXT(bp, b_vnbufs);
1332			if (bp->b_lblkno >= trunclbn) {
1333				if (BUF_LOCK(bp,
1334				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1335				    VI_MTX(vp)) == ENOLCK)
1336					goto restart;
1337				bremfree(bp);
1338				bp->b_flags |= (B_INVAL | B_RELBUF);
1339				bp->b_flags &= ~B_ASYNC;
1340				brelse(bp);
1341				anyfreed = 1;
1342				if (nbp &&
1343				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1344				    (nbp->b_vp != vp) ||
1345				    (nbp->b_flags & B_DELWRI) == 0)) {
1346					goto restart;
1347				}
1348				VI_LOCK(vp);
1349			}
1350		}
1351	}
1352
1353	if (length > 0) {
1354restartsync:
1355		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1356			nbp = TAILQ_NEXT(bp, b_vnbufs);
1357			if (bp->b_lblkno > 0)
1358				continue;
1359			/*
1360			 * Since we hold the vnode lock this should only
1361			 * fail if we're racing with the buf daemon.
1362			 */
1363			if (BUF_LOCK(bp,
1364			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1365			    VI_MTX(vp)) == ENOLCK) {
1366				goto restart;
1367			}
1368			KASSERT((bp->b_flags & B_DELWRI),
1369			    ("buf(%p) on dirty queue without DELWRI.", bp));
1370
1371			bremfree(bp);
1372			bawrite(bp);
1373			VI_LOCK(vp);
1374			goto restartsync;
1375		}
1376	}
1377
1378	while (vp->v_numoutput > 0) {
1379		vp->v_iflag |= VI_BWAIT;
1380		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1381	}
1382	VI_UNLOCK(vp);
1383	vnode_pager_setsize(vp, length);
1384
1385	return (0);
1386}
1387
1388/*
1389 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1390 * 		 a vnode.
1391 *
1392 *	NOTE: We have to deal with the special case of a background bitmap
1393 *	buffer, a situation where two buffers will have the same logical
1394 *	block offset.  We want (1) only the foreground buffer to be accessed
1395 *	in a lookup and (2) must differentiate between the foreground and
1396 *	background buffer in the splay tree algorithm because the splay
1397 *	tree cannot normally handle multiple entities with the same 'index'.
1398 *	We accomplish this by adding differentiating flags to the splay tree's
1399 *	numerical domain.
1400 */
1401static
1402struct buf *
1403buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1404{
1405	struct buf dummy;
1406	struct buf *lefttreemax, *righttreemin, *y;
1407
1408	if (root == NULL)
1409		return (NULL);
1410	lefttreemax = righttreemin = &dummy;
1411	for (;;) {
1412		if (lblkno < root->b_lblkno ||
1413		    (lblkno == root->b_lblkno &&
1414		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1415			if ((y = root->b_left) == NULL)
1416				break;
1417			if (lblkno < y->b_lblkno) {
1418				/* Rotate right. */
1419				root->b_left = y->b_right;
1420				y->b_right = root;
1421				root = y;
1422				if ((y = root->b_left) == NULL)
1423					break;
1424			}
1425			/* Link into the new root's right tree. */
1426			righttreemin->b_left = root;
1427			righttreemin = root;
1428		} else if (lblkno > root->b_lblkno ||
1429		    (lblkno == root->b_lblkno &&
1430		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1431			if ((y = root->b_right) == NULL)
1432				break;
1433			if (lblkno > y->b_lblkno) {
1434				/* Rotate left. */
1435				root->b_right = y->b_left;
1436				y->b_left = root;
1437				root = y;
1438				if ((y = root->b_right) == NULL)
1439					break;
1440			}
1441			/* Link into the new root's left tree. */
1442			lefttreemax->b_right = root;
1443			lefttreemax = root;
1444		} else {
1445			break;
1446		}
1447		root = y;
1448	}
1449	/* Assemble the new root. */
1450	lefttreemax->b_right = root->b_left;
1451	righttreemin->b_left = root->b_right;
1452	root->b_left = dummy.b_right;
1453	root->b_right = dummy.b_left;
1454	return (root);
1455}
1456
1457static
1458void
1459buf_vlist_remove(struct buf *bp)
1460{
1461	struct vnode *vp = bp->b_vp;
1462	struct buf *root;
1463
1464	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1465	if (bp->b_xflags & BX_VNDIRTY) {
1466		if (bp != vp->v_dirtyblkroot) {
1467			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1468			KASSERT(root == bp, ("splay lookup failed during dirty remove"));
1469		}
1470		if (bp->b_left == NULL) {
1471			root = bp->b_right;
1472		} else {
1473			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1474			root->b_right = bp->b_right;
1475		}
1476		vp->v_dirtyblkroot = root;
1477		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1478		vp->v_dirtybufcnt--;
1479	} else {
1480		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1481		if (bp != vp->v_cleanblkroot) {
1482			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1483			KASSERT(root == bp, ("splay lookup failed during clean remove"));
1484		}
1485		if (bp->b_left == NULL) {
1486			root = bp->b_right;
1487		} else {
1488			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1489			root->b_right = bp->b_right;
1490		}
1491		vp->v_cleanblkroot = root;
1492		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1493		vp->v_cleanbufcnt--;
1494	}
1495	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1496}
1497
1498/*
1499 * Add the buffer to the sorted clean or dirty block list using a
1500 * splay tree algorithm.
1501 *
1502 * NOTE: xflags is passed as a constant, optimizing this inline function!
1503 */
1504static
1505void
1506buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1507{
1508	struct buf *root;
1509
1510	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1511	bp->b_xflags |= xflags;
1512	if (xflags & BX_VNDIRTY) {
1513		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1514		if (root == NULL) {
1515			bp->b_left = NULL;
1516			bp->b_right = NULL;
1517			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1518		} else if (bp->b_lblkno < root->b_lblkno ||
1519		    (bp->b_lblkno == root->b_lblkno &&
1520		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1521			bp->b_left = root->b_left;
1522			bp->b_right = root;
1523			root->b_left = NULL;
1524			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1525		} else {
1526			bp->b_right = root->b_right;
1527			bp->b_left = root;
1528			root->b_right = NULL;
1529			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1530			    root, bp, b_vnbufs);
1531		}
1532		vp->v_dirtybufcnt++;
1533		vp->v_dirtyblkroot = bp;
1534	} else {
1535		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1536		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1537		if (root == NULL) {
1538			bp->b_left = NULL;
1539			bp->b_right = NULL;
1540			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1541		} else if (bp->b_lblkno < root->b_lblkno ||
1542		    (bp->b_lblkno == root->b_lblkno &&
1543		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1544			bp->b_left = root->b_left;
1545			bp->b_right = root;
1546			root->b_left = NULL;
1547			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1548		} else {
1549			bp->b_right = root->b_right;
1550			bp->b_left = root;
1551			root->b_right = NULL;
1552			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1553			    root, bp, b_vnbufs);
1554		}
1555		vp->v_cleanbufcnt++;
1556		vp->v_cleanblkroot = bp;
1557	}
1558}
1559
1560/*
1561 * Lookup a buffer using the splay tree.  Note that we specifically avoid
1562 * shadow buffers used in background bitmap writes.
1563 *
1564 * This code isn't quite efficient as it could be because we are maintaining
1565 * two sorted lists and do not know which list the block resides in.
1566 *
1567 * During a "make buildworld" the desired buffer is found at one of
1568 * the roots more than 60% of the time.  Thus, checking both roots
1569 * before performing either splay eliminates unnecessary splays on the
1570 * first tree splayed.
1571 */
1572struct buf *
1573gbincore(struct vnode *vp, daddr_t lblkno)
1574{
1575	struct buf *bp;
1576
1577	GIANT_REQUIRED;
1578
1579	ASSERT_VI_LOCKED(vp, "gbincore");
1580	if ((bp = vp->v_cleanblkroot) != NULL &&
1581	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1582		return (bp);
1583	if ((bp = vp->v_dirtyblkroot) != NULL &&
1584	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1585		return (bp);
1586	if ((bp = vp->v_cleanblkroot) != NULL) {
1587		vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1588		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1589			return (bp);
1590	}
1591	if ((bp = vp->v_dirtyblkroot) != NULL) {
1592		vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1593		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1594			return (bp);
1595	}
1596	return (NULL);
1597}
1598
1599/*
1600 * Associate a buffer with a vnode.
1601 */
1602void
1603bgetvp(vp, bp)
1604	register struct vnode *vp;
1605	register struct buf *bp;
1606{
1607	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1608
1609	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1610	    ("bgetvp: bp already attached! %p", bp));
1611
1612	ASSERT_VI_LOCKED(vp, "bgetvp");
1613	vholdl(vp);
1614	bp->b_vp = vp;
1615	bp->b_dev = vn_todev(vp);
1616	/*
1617	 * Insert onto list for new vnode.
1618	 */
1619	buf_vlist_add(bp, vp, BX_VNCLEAN);
1620}
1621
1622/*
1623 * Disassociate a buffer from a vnode.
1624 */
1625void
1626brelvp(bp)
1627	register struct buf *bp;
1628{
1629	struct vnode *vp;
1630
1631	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1632
1633	/*
1634	 * Delete from old vnode list, if on one.
1635	 */
1636	vp = bp->b_vp;
1637	VI_LOCK(vp);
1638	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1639		buf_vlist_remove(bp);
1640	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1641		vp->v_iflag &= ~VI_ONWORKLST;
1642		mtx_lock(&sync_mtx);
1643		LIST_REMOVE(vp, v_synclist);
1644		mtx_unlock(&sync_mtx);
1645	}
1646	vdropl(vp);
1647	bp->b_vp = (struct vnode *) 0;
1648	if (bp->b_object)
1649		bp->b_object = NULL;
1650	VI_UNLOCK(vp);
1651}
1652
1653/*
1654 * Add an item to the syncer work queue.
1655 */
1656static void
1657vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1658{
1659	int slot;
1660
1661	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1662
1663	mtx_lock(&sync_mtx);
1664	if (vp->v_iflag & VI_ONWORKLST)
1665		LIST_REMOVE(vp, v_synclist);
1666	else
1667		vp->v_iflag |= VI_ONWORKLST;
1668
1669	if (delay > syncer_maxdelay - 2)
1670		delay = syncer_maxdelay - 2;
1671	slot = (syncer_delayno + delay) & syncer_mask;
1672
1673	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1674	mtx_unlock(&sync_mtx);
1675}
1676
1677struct  proc *updateproc;
1678static void sched_sync(void);
1679static struct kproc_desc up_kp = {
1680	"syncer",
1681	sched_sync,
1682	&updateproc
1683};
1684SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1685
1686/*
1687 * System filesystem synchronizer daemon.
1688 */
1689static void
1690sched_sync(void)
1691{
1692	struct synclist *next;
1693	struct synclist *slp;
1694	struct vnode *vp;
1695	struct mount *mp;
1696	long starttime;
1697	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);  /* XXXKSE */
1698
1699	mtx_lock(&Giant);
1700
1701	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1702	    SHUTDOWN_PRI_LAST);
1703
1704	for (;;) {
1705		kthread_suspend_check(td->td_proc);
1706
1707		starttime = time_second;
1708
1709		/*
1710		 * Push files whose dirty time has expired.  Be careful
1711		 * of interrupt race on slp queue.
1712		 */
1713		mtx_lock(&sync_mtx);
1714		slp = &syncer_workitem_pending[syncer_delayno];
1715		syncer_delayno += 1;
1716		if (syncer_delayno == syncer_maxdelay)
1717			syncer_delayno = 0;
1718		next = &syncer_workitem_pending[syncer_delayno];
1719
1720		while ((vp = LIST_FIRST(slp)) != NULL) {
1721			if (VOP_ISLOCKED(vp, NULL) != 0 ||
1722			    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1723				LIST_REMOVE(vp, v_synclist);
1724				LIST_INSERT_HEAD(next, vp, v_synclist);
1725				continue;
1726			}
1727			if (VI_TRYLOCK(vp) == 0) {
1728				LIST_REMOVE(vp, v_synclist);
1729				LIST_INSERT_HEAD(next, vp, v_synclist);
1730				vn_finished_write(mp);
1731				continue;
1732			}
1733			/*
1734			 * We use vhold in case the vnode does not
1735			 * successfully sync.  vhold prevents the vnode from
1736			 * going away when we unlock the sync_mtx so that
1737			 * we can acquire the vnode interlock.
1738			 */
1739			vholdl(vp);
1740			mtx_unlock(&sync_mtx);
1741			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
1742			(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1743			VOP_UNLOCK(vp, 0, td);
1744			vn_finished_write(mp);
1745			VI_LOCK(vp);
1746			if ((vp->v_iflag | VI_ONWORKLST) != 0) {
1747				/*
1748				 * Put us back on the worklist.  The worklist
1749				 * routine will remove us from our current
1750				 * position and then add us back in at a later
1751				 * position.
1752				 */
1753				vn_syncer_add_to_worklist(vp, syncdelay);
1754			}
1755			vdropl(vp);
1756			VI_UNLOCK(vp);
1757			mtx_lock(&sync_mtx);
1758		}
1759		mtx_unlock(&sync_mtx);
1760
1761		/*
1762		 * Do soft update processing.
1763		 */
1764		if (softdep_process_worklist_hook != NULL)
1765			(*softdep_process_worklist_hook)(NULL);
1766
1767		/*
1768		 * The variable rushjob allows the kernel to speed up the
1769		 * processing of the filesystem syncer process. A rushjob
1770		 * value of N tells the filesystem syncer to process the next
1771		 * N seconds worth of work on its queue ASAP. Currently rushjob
1772		 * is used by the soft update code to speed up the filesystem
1773		 * syncer process when the incore state is getting so far
1774		 * ahead of the disk that the kernel memory pool is being
1775		 * threatened with exhaustion.
1776		 */
1777		mtx_lock(&sync_mtx);
1778		if (rushjob > 0) {
1779			rushjob -= 1;
1780			mtx_unlock(&sync_mtx);
1781			continue;
1782		}
1783		mtx_unlock(&sync_mtx);
1784		/*
1785		 * If it has taken us less than a second to process the
1786		 * current work, then wait. Otherwise start right over
1787		 * again. We can still lose time if any single round
1788		 * takes more than two seconds, but it does not really
1789		 * matter as we are just trying to generally pace the
1790		 * filesystem activity.
1791		 */
1792		if (time_second == starttime)
1793			tsleep(&lbolt, PPAUSE, "syncer", 0);
1794	}
1795}
1796
1797/*
1798 * Request the syncer daemon to speed up its work.
1799 * We never push it to speed up more than half of its
1800 * normal turn time, otherwise it could take over the cpu.
1801 * XXXKSE  only one update?
1802 */
1803int
1804speedup_syncer()
1805{
1806	struct thread *td;
1807	int ret = 0;
1808
1809	td = FIRST_THREAD_IN_PROC(updateproc);
1810	mtx_lock_spin(&sched_lock);
1811	if (td->td_wchan == &lbolt) {
1812		unsleep(td);
1813		TD_CLR_SLEEPING(td);
1814		setrunnable(td);
1815	}
1816	mtx_unlock_spin(&sched_lock);
1817	mtx_lock(&sync_mtx);
1818	if (rushjob < syncdelay / 2) {
1819		rushjob += 1;
1820		stat_rush_requests += 1;
1821		ret = 1;
1822	}
1823	mtx_unlock(&sync_mtx);
1824	return (ret);
1825}
1826
1827/*
1828 * Associate a p-buffer with a vnode.
1829 *
1830 * Also sets B_PAGING flag to indicate that vnode is not fully associated
1831 * with the buffer.  i.e. the bp has not been linked into the vnode or
1832 * ref-counted.
1833 */
1834void
1835pbgetvp(vp, bp)
1836	register struct vnode *vp;
1837	register struct buf *bp;
1838{
1839
1840	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1841
1842	bp->b_vp = vp;
1843	bp->b_flags |= B_PAGING;
1844	bp->b_dev = vn_todev(vp);
1845}
1846
1847/*
1848 * Disassociate a p-buffer from a vnode.
1849 */
1850void
1851pbrelvp(bp)
1852	register struct buf *bp;
1853{
1854
1855	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1856
1857	/* XXX REMOVE ME */
1858	VI_LOCK(bp->b_vp);
1859	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1860		panic(
1861		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1862		    bp,
1863		    (int)bp->b_flags
1864		);
1865	}
1866	VI_UNLOCK(bp->b_vp);
1867	bp->b_vp = (struct vnode *) 0;
1868	bp->b_flags &= ~B_PAGING;
1869}
1870
1871/*
1872 * Reassign a buffer from one vnode to another.
1873 * Used to assign file specific control information
1874 * (indirect blocks) to the vnode to which they belong.
1875 */
1876void
1877reassignbuf(bp, newvp)
1878	register struct buf *bp;
1879	register struct vnode *newvp;
1880{
1881	struct vnode *vp;
1882	int delay;
1883
1884	if (newvp == NULL) {
1885		printf("reassignbuf: NULL");
1886		return;
1887	}
1888	vp = bp->b_vp;
1889	++reassignbufcalls;
1890
1891	/*
1892	 * B_PAGING flagged buffers cannot be reassigned because their vp
1893	 * is not fully linked in.
1894	 */
1895	if (bp->b_flags & B_PAGING)
1896		panic("cannot reassign paging buffer");
1897
1898	/*
1899	 * Delete from old vnode list, if on one.
1900	 */
1901	VI_LOCK(vp);
1902	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1903		buf_vlist_remove(bp);
1904		if (vp != newvp) {
1905			vdropl(bp->b_vp);
1906			bp->b_vp = NULL;	/* for clarification */
1907		}
1908	}
1909	if (vp != newvp) {
1910		VI_UNLOCK(vp);
1911		VI_LOCK(newvp);
1912	}
1913	/*
1914	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1915	 * of clean buffers.
1916	 */
1917	if (bp->b_flags & B_DELWRI) {
1918		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1919			switch (newvp->v_type) {
1920			case VDIR:
1921				delay = dirdelay;
1922				break;
1923			case VCHR:
1924				if (newvp->v_rdev->si_mountpoint != NULL) {
1925					delay = metadelay;
1926					break;
1927				}
1928				/* FALLTHROUGH */
1929			default:
1930				delay = filedelay;
1931			}
1932			vn_syncer_add_to_worklist(newvp, delay);
1933		}
1934		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1935	} else {
1936		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1937
1938		if ((newvp->v_iflag & VI_ONWORKLST) &&
1939		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1940			mtx_lock(&sync_mtx);
1941			LIST_REMOVE(newvp, v_synclist);
1942			mtx_unlock(&sync_mtx);
1943			newvp->v_iflag &= ~VI_ONWORKLST;
1944		}
1945	}
1946	if (bp->b_vp != newvp) {
1947		bp->b_vp = newvp;
1948		vholdl(bp->b_vp);
1949	}
1950	VI_UNLOCK(newvp);
1951}
1952
1953/*
1954 * Create a vnode for a device.
1955 * Used for mounting the root filesystem.
1956 */
1957int
1958bdevvp(dev, vpp)
1959	dev_t dev;
1960	struct vnode **vpp;
1961{
1962	register struct vnode *vp;
1963	struct vnode *nvp;
1964	int error;
1965
1966	if (dev == NODEV) {
1967		*vpp = NULLVP;
1968		return (ENXIO);
1969	}
1970	if (vfinddev(dev, VCHR, vpp))
1971		return (0);
1972	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1973	if (error) {
1974		*vpp = NULLVP;
1975		return (error);
1976	}
1977	vp = nvp;
1978	vp->v_type = VCHR;
1979	addalias(vp, dev);
1980	*vpp = vp;
1981	return (0);
1982}
1983
1984static void
1985v_incr_usecount(struct vnode *vp, int delta)
1986{
1987	vp->v_usecount += delta;
1988	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1989		mtx_lock(&spechash_mtx);
1990		vp->v_rdev->si_usecount += delta;
1991		mtx_unlock(&spechash_mtx);
1992	}
1993}
1994
1995/*
1996 * Add vnode to the alias list hung off the dev_t.
1997 *
1998 * The reason for this gunk is that multiple vnodes can reference
1999 * the same physical device, so checking vp->v_usecount to see
2000 * how many users there are is inadequate; the v_usecount for
2001 * the vnodes need to be accumulated.  vcount() does that.
2002 */
2003struct vnode *
2004addaliasu(nvp, nvp_rdev)
2005	struct vnode *nvp;
2006	udev_t nvp_rdev;
2007{
2008	struct vnode *ovp;
2009	vop_t **ops;
2010	dev_t dev;
2011
2012	if (nvp->v_type == VBLK)
2013		return (nvp);
2014	if (nvp->v_type != VCHR)
2015		panic("addaliasu on non-special vnode");
2016	dev = udev2dev(nvp_rdev, 0);
2017	/*
2018	 * Check to see if we have a bdevvp vnode with no associated
2019	 * filesystem. If so, we want to associate the filesystem of
2020	 * the new newly instigated vnode with the bdevvp vnode and
2021	 * discard the newly created vnode rather than leaving the
2022	 * bdevvp vnode lying around with no associated filesystem.
2023	 */
2024	if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
2025		addalias(nvp, dev);
2026		return (nvp);
2027	}
2028	/*
2029	 * Discard unneeded vnode, but save its node specific data.
2030	 * Note that if there is a lock, it is carried over in the
2031	 * node specific data to the replacement vnode.
2032	 */
2033	vref(ovp);
2034	ovp->v_data = nvp->v_data;
2035	ovp->v_tag = nvp->v_tag;
2036	nvp->v_data = NULL;
2037	lockdestroy(ovp->v_vnlock);
2038	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
2039	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
2040	ops = ovp->v_op;
2041	ovp->v_op = nvp->v_op;
2042	if (VOP_ISLOCKED(nvp, curthread)) {
2043		VOP_UNLOCK(nvp, 0, curthread);
2044		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
2045	}
2046	nvp->v_op = ops;
2047	insmntque(ovp, nvp->v_mount);
2048	vrele(nvp);
2049	vgone(nvp);
2050	return (ovp);
2051}
2052
2053/* This is a local helper function that do the same as addaliasu, but for a
2054 * dev_t instead of an udev_t. */
2055static void
2056addalias(nvp, dev)
2057	struct vnode *nvp;
2058	dev_t dev;
2059{
2060
2061	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
2062	nvp->v_rdev = dev;
2063	VI_LOCK(nvp);
2064	mtx_lock(&spechash_mtx);
2065	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
2066	dev->si_usecount += nvp->v_usecount;
2067	mtx_unlock(&spechash_mtx);
2068	VI_UNLOCK(nvp);
2069}
2070
2071/*
2072 * Grab a particular vnode from the free list, increment its
2073 * reference count and lock it. The vnode lock bit is set if the
2074 * vnode is being eliminated in vgone. The process is awakened
2075 * when the transition is completed, and an error returned to
2076 * indicate that the vnode is no longer usable (possibly having
2077 * been changed to a new filesystem type).
2078 */
2079int
2080vget(vp, flags, td)
2081	register struct vnode *vp;
2082	int flags;
2083	struct thread *td;
2084{
2085	int error;
2086
2087	/*
2088	 * If the vnode is in the process of being cleaned out for
2089	 * another use, we wait for the cleaning to finish and then
2090	 * return failure. Cleaning is determined by checking that
2091	 * the VI_XLOCK flag is set.
2092	 */
2093	if ((flags & LK_INTERLOCK) == 0)
2094		VI_LOCK(vp);
2095	if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
2096		if ((flags & LK_NOWAIT) == 0) {
2097			vp->v_iflag |= VI_XWANT;
2098			msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2099		}
2100		return (ENOENT);
2101	}
2102
2103	v_incr_usecount(vp, 1);
2104
2105	if (VSHOULDBUSY(vp))
2106		vbusy(vp);
2107	if (flags & LK_TYPE_MASK) {
2108		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2109			/*
2110			 * must expand vrele here because we do not want
2111			 * to call VOP_INACTIVE if the reference count
2112			 * drops back to zero since it was never really
2113			 * active. We must remove it from the free list
2114			 * before sleeping so that multiple processes do
2115			 * not try to recycle it.
2116			 */
2117			VI_LOCK(vp);
2118			v_incr_usecount(vp, -1);
2119			if (VSHOULDFREE(vp))
2120				vfree(vp);
2121			else
2122				vlruvp(vp);
2123			VI_UNLOCK(vp);
2124		}
2125		return (error);
2126	}
2127	VI_UNLOCK(vp);
2128	return (0);
2129}
2130
2131/*
2132 * Increase the reference count of a vnode.
2133 */
2134void
2135vref(struct vnode *vp)
2136{
2137	VI_LOCK(vp);
2138	v_incr_usecount(vp, 1);
2139	VI_UNLOCK(vp);
2140}
2141
2142/*
2143 * Return reference count of a vnode.
2144 *
2145 * The results of this call are only guaranteed when some mechanism other
2146 * than the VI lock is used to stop other processes from gaining references
2147 * to the vnode.  This may be the case if the caller holds the only reference.
2148 * This is also useful when stale data is acceptable as race conditions may
2149 * be accounted for by some other means.
2150 */
2151int
2152vrefcnt(struct vnode *vp)
2153{
2154	int usecnt;
2155
2156	VI_LOCK(vp);
2157	usecnt = vp->v_usecount;
2158	VI_UNLOCK(vp);
2159
2160	return (usecnt);
2161}
2162
2163
2164/*
2165 * Vnode put/release.
2166 * If count drops to zero, call inactive routine and return to freelist.
2167 */
2168void
2169vrele(vp)
2170	struct vnode *vp;
2171{
2172	struct thread *td = curthread;	/* XXX */
2173
2174	KASSERT(vp != NULL, ("vrele: null vp"));
2175
2176	VI_LOCK(vp);
2177
2178	/* Skip this v_writecount check if we're going to panic below. */
2179	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2180	    ("vrele: missed vn_close"));
2181
2182	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2183	    vp->v_usecount == 1)) {
2184		v_incr_usecount(vp, -1);
2185		VI_UNLOCK(vp);
2186
2187		return;
2188	}
2189
2190	if (vp->v_usecount == 1) {
2191		v_incr_usecount(vp, -1);
2192		/*
2193		 * We must call VOP_INACTIVE with the node locked. Mark
2194		 * as VI_DOINGINACT to avoid recursion.
2195		 */
2196		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2197			VI_LOCK(vp);
2198			vp->v_iflag |= VI_DOINGINACT;
2199			VI_UNLOCK(vp);
2200			VOP_INACTIVE(vp, td);
2201			VI_LOCK(vp);
2202			KASSERT(vp->v_iflag & VI_DOINGINACT,
2203			    ("vrele: lost VI_DOINGINACT"));
2204			vp->v_iflag &= ~VI_DOINGINACT;
2205		} else
2206			VI_LOCK(vp);
2207		if (VSHOULDFREE(vp))
2208			vfree(vp);
2209		else
2210			vlruvp(vp);
2211		VI_UNLOCK(vp);
2212
2213	} else {
2214#ifdef DIAGNOSTIC
2215		vprint("vrele: negative ref count", vp);
2216#endif
2217		VI_UNLOCK(vp);
2218		panic("vrele: negative ref cnt");
2219	}
2220}
2221
2222/*
2223 * Release an already locked vnode.  This give the same effects as
2224 * unlock+vrele(), but takes less time and avoids releasing and
2225 * re-aquiring the lock (as vrele() aquires the lock internally.)
2226 */
2227void
2228vput(vp)
2229	struct vnode *vp;
2230{
2231	struct thread *td = curthread;	/* XXX */
2232
2233	GIANT_REQUIRED;
2234
2235	KASSERT(vp != NULL, ("vput: null vp"));
2236	VI_LOCK(vp);
2237	/* Skip this v_writecount check if we're going to panic below. */
2238	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2239	    ("vput: missed vn_close"));
2240
2241	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2242	    vp->v_usecount == 1)) {
2243		v_incr_usecount(vp, -1);
2244		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2245		return;
2246	}
2247
2248	if (vp->v_usecount == 1) {
2249		v_incr_usecount(vp, -1);
2250		/*
2251		 * We must call VOP_INACTIVE with the node locked, so
2252		 * we just need to release the vnode mutex. Mark as
2253		 * as VI_DOINGINACT to avoid recursion.
2254		 */
2255		vp->v_iflag |= VI_DOINGINACT;
2256		VI_UNLOCK(vp);
2257		VOP_INACTIVE(vp, td);
2258		VI_LOCK(vp);
2259		KASSERT(vp->v_iflag & VI_DOINGINACT,
2260		    ("vput: lost VI_DOINGINACT"));
2261		vp->v_iflag &= ~VI_DOINGINACT;
2262		if (VSHOULDFREE(vp))
2263			vfree(vp);
2264		else
2265			vlruvp(vp);
2266		VI_UNLOCK(vp);
2267
2268	} else {
2269#ifdef DIAGNOSTIC
2270		vprint("vput: negative ref count", vp);
2271#endif
2272		panic("vput: negative ref cnt");
2273	}
2274}
2275
2276/*
2277 * Somebody doesn't want the vnode recycled.
2278 */
2279void
2280vhold(struct vnode *vp)
2281{
2282	VI_LOCK(vp);
2283	vholdl(vp);
2284	VI_UNLOCK(vp);
2285}
2286
2287void
2288vholdl(vp)
2289	register struct vnode *vp;
2290{
2291	vp->v_holdcnt++;
2292	if (VSHOULDBUSY(vp))
2293		vbusy(vp);
2294}
2295
2296/*
2297 * Note that there is one less who cares about this vnode.  vdrop() is the
2298 * opposite of vhold().
2299 */
2300void
2301vdrop(struct vnode *vp)
2302{
2303	VI_LOCK(vp);
2304	vdropl(vp);
2305	VI_UNLOCK(vp);
2306}
2307
2308void
2309vdropl(vp)
2310	register struct vnode *vp;
2311{
2312	if (vp->v_holdcnt <= 0)
2313		panic("vdrop: holdcnt");
2314	vp->v_holdcnt--;
2315	if (VSHOULDFREE(vp))
2316		vfree(vp);
2317	else
2318		vlruvp(vp);
2319}
2320
2321/*
2322 * Remove any vnodes in the vnode table belonging to mount point mp.
2323 *
2324 * If FORCECLOSE is not specified, there should not be any active ones,
2325 * return error if any are found (nb: this is a user error, not a
2326 * system error). If FORCECLOSE is specified, detach any active vnodes
2327 * that are found.
2328 *
2329 * If WRITECLOSE is set, only flush out regular file vnodes open for
2330 * writing.
2331 *
2332 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2333 *
2334 * `rootrefs' specifies the base reference count for the root vnode
2335 * of this filesystem. The root vnode is considered busy if its
2336 * v_usecount exceeds this value. On a successful return, vflush()
2337 * will call vrele() on the root vnode exactly rootrefs times.
2338 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2339 * be zero.
2340 */
2341#ifdef DIAGNOSTIC
2342static int busyprt = 0;		/* print out busy vnodes */
2343SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2344#endif
2345
2346int
2347vflush(mp, rootrefs, flags)
2348	struct mount *mp;
2349	int rootrefs;
2350	int flags;
2351{
2352	struct thread *td = curthread;	/* XXX */
2353	struct vnode *vp, *nvp, *rootvp = NULL;
2354	struct vattr vattr;
2355	int busy = 0, error;
2356
2357	if (rootrefs > 0) {
2358		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2359		    ("vflush: bad args"));
2360		/*
2361		 * Get the filesystem root vnode. We can vput() it
2362		 * immediately, since with rootrefs > 0, it won't go away.
2363		 */
2364		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2365			return (error);
2366		vput(rootvp);
2367
2368	}
2369	mtx_lock(&mntvnode_mtx);
2370loop:
2371	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2372		/*
2373		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2374		 * Start over if it has (it won't be on the list anymore).
2375		 */
2376		if (vp->v_mount != mp)
2377			goto loop;
2378		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2379
2380		VI_LOCK(vp);
2381		mtx_unlock(&mntvnode_mtx);
2382		/*
2383		 * XXX Does not check vn_lock error.  Should restart loop if
2384		 * error == ENOENT.
2385		 */
2386		vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td);
2387		/*
2388		 * This vnode could have been reclaimed while we were
2389		 * waiting for the lock since we are not holding a
2390		 * reference.
2391		 * Start over if the vnode was reclaimed.
2392		 */
2393		if (vp->v_mount != mp) {
2394			VOP_UNLOCK(vp, 0, td);
2395			mtx_lock(&mntvnode_mtx);
2396			goto loop;
2397		}
2398		/*
2399		 * Skip over a vnodes marked VV_SYSTEM.
2400		 */
2401		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2402			VOP_UNLOCK(vp, 0, td);
2403			mtx_lock(&mntvnode_mtx);
2404			continue;
2405		}
2406		/*
2407		 * If WRITECLOSE is set, flush out unlinked but still open
2408		 * files (even if open only for reading) and regular file
2409		 * vnodes open for writing.
2410		 */
2411		if (flags & WRITECLOSE) {
2412			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2413			VI_LOCK(vp);
2414
2415			if ((vp->v_type == VNON ||
2416			    (error == 0 && vattr.va_nlink > 0)) &&
2417			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2418				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2419				mtx_lock(&mntvnode_mtx);
2420				continue;
2421			}
2422		} else
2423			VI_LOCK(vp);
2424
2425		VOP_UNLOCK(vp, 0, td);
2426
2427		/*
2428		 * With v_usecount == 0, all we need to do is clear out the
2429		 * vnode data structures and we are done.
2430		 */
2431		if (vp->v_usecount == 0) {
2432			vgonel(vp, td);
2433			mtx_lock(&mntvnode_mtx);
2434			continue;
2435		}
2436
2437		/*
2438		 * If FORCECLOSE is set, forcibly close the vnode. For block
2439		 * or character devices, revert to an anonymous device. For
2440		 * all other files, just kill them.
2441		 */
2442		if (flags & FORCECLOSE) {
2443			if (vp->v_type != VCHR)
2444				vgonel(vp, td);
2445			else
2446				vgonechrl(vp, td);
2447			mtx_lock(&mntvnode_mtx);
2448			continue;
2449		}
2450#ifdef DIAGNOSTIC
2451		if (busyprt)
2452			vprint("vflush: busy vnode", vp);
2453#endif
2454		VI_UNLOCK(vp);
2455		mtx_lock(&mntvnode_mtx);
2456		busy++;
2457	}
2458	mtx_unlock(&mntvnode_mtx);
2459	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2460		/*
2461		 * If just the root vnode is busy, and if its refcount
2462		 * is equal to `rootrefs', then go ahead and kill it.
2463		 */
2464		VI_LOCK(rootvp);
2465		KASSERT(busy > 0, ("vflush: not busy"));
2466		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2467		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2468			vgonel(rootvp, td);
2469			busy = 0;
2470		} else
2471			VI_UNLOCK(rootvp);
2472	}
2473	if (busy)
2474		return (EBUSY);
2475	for (; rootrefs > 0; rootrefs--)
2476		vrele(rootvp);
2477	return (0);
2478}
2479
2480/*
2481 * This moves a now (likely recyclable) vnode to the end of the
2482 * mountlist.  XXX However, it is temporarily disabled until we
2483 * can clean up ffs_sync() and friends, which have loop restart
2484 * conditions which this code causes to operate O(N^2).
2485 */
2486static void
2487vlruvp(struct vnode *vp)
2488{
2489#if 0
2490	struct mount *mp;
2491
2492	if ((mp = vp->v_mount) != NULL) {
2493		mtx_lock(&mntvnode_mtx);
2494		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2495		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2496		mtx_unlock(&mntvnode_mtx);
2497	}
2498#endif
2499}
2500
2501static void
2502vx_lock(struct vnode *vp)
2503{
2504	ASSERT_VI_LOCKED(vp, "vx_lock");
2505
2506	/*
2507	 * Prevent the vnode from being recycled or brought into use while we
2508	 * clean it out.
2509	 */
2510	if (vp->v_iflag & VI_XLOCK)
2511		panic("vclean: deadlock");
2512	vp->v_iflag |= VI_XLOCK;
2513	vp->v_vxproc = curthread;
2514}
2515
2516static void
2517vx_unlock(struct vnode *vp)
2518{
2519	ASSERT_VI_LOCKED(vp, "vx_unlock");
2520	vp->v_iflag &= ~VI_XLOCK;
2521	vp->v_vxproc = NULL;
2522	if (vp->v_iflag & VI_XWANT) {
2523		vp->v_iflag &= ~VI_XWANT;
2524		wakeup(vp);
2525	}
2526}
2527
2528
2529/*
2530 * Disassociate the underlying filesystem from a vnode.
2531 */
2532static void
2533vclean(vp, flags, td)
2534	struct vnode *vp;
2535	int flags;
2536	struct thread *td;
2537{
2538	int active;
2539
2540	ASSERT_VI_LOCKED(vp, "vclean");
2541	/*
2542	 * Check to see if the vnode is in use. If so we have to reference it
2543	 * before we clean it out so that its count cannot fall to zero and
2544	 * generate a race against ourselves to recycle it.
2545	 */
2546	if ((active = vp->v_usecount))
2547		v_incr_usecount(vp, 1);
2548
2549	/*
2550	 * Even if the count is zero, the VOP_INACTIVE routine may still
2551	 * have the object locked while it cleans it out. The VOP_LOCK
2552	 * ensures that the VOP_INACTIVE routine is done with its work.
2553	 * For active vnodes, it ensures that no other activity can
2554	 * occur while the underlying object is being cleaned out.
2555	 */
2556	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2557
2558	/*
2559	 * Clean out any buffers associated with the vnode.
2560	 * If the flush fails, just toss the buffers.
2561	 */
2562	if (flags & DOCLOSE) {
2563		struct buf *bp;
2564		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2565		if (bp != NULL)
2566			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2567		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2568			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2569	}
2570
2571	VOP_DESTROYVOBJECT(vp);
2572
2573	/*
2574	 * Any other processes trying to obtain this lock must first
2575	 * wait for VXLOCK to clear, then call the new lock operation.
2576	 */
2577	VOP_UNLOCK(vp, 0, td);
2578
2579	/*
2580	 * If purging an active vnode, it must be closed and
2581	 * deactivated before being reclaimed. Note that the
2582	 * VOP_INACTIVE will unlock the vnode.
2583	 */
2584	if (active) {
2585		if (flags & DOCLOSE)
2586			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2587		VI_LOCK(vp);
2588		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2589			vp->v_iflag |= VI_DOINGINACT;
2590			VI_UNLOCK(vp);
2591			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2592				panic("vclean: cannot relock.");
2593			VOP_INACTIVE(vp, td);
2594			VI_LOCK(vp);
2595			KASSERT(vp->v_iflag & VI_DOINGINACT,
2596			    ("vclean: lost VI_DOINGINACT"));
2597			vp->v_iflag &= ~VI_DOINGINACT;
2598		}
2599		VI_UNLOCK(vp);
2600	}
2601	/*
2602	 * Reclaim the vnode.
2603	 */
2604	if (VOP_RECLAIM(vp, td))
2605		panic("vclean: cannot reclaim");
2606
2607	if (active) {
2608		/*
2609		 * Inline copy of vrele() since VOP_INACTIVE
2610		 * has already been called.
2611		 */
2612		VI_LOCK(vp);
2613		v_incr_usecount(vp, -1);
2614		if (vp->v_usecount <= 0) {
2615#ifdef INVARIANTS
2616			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2617				vprint("vclean: bad ref count", vp);
2618				panic("vclean: ref cnt");
2619			}
2620#endif
2621			if (VSHOULDFREE(vp))
2622				vfree(vp);
2623		}
2624		VI_UNLOCK(vp);
2625	}
2626	/*
2627	 * Delete from old mount point vnode list.
2628	 */
2629	if (vp->v_mount != NULL)
2630		insmntque(vp, (struct mount *)0);
2631	cache_purge(vp);
2632	VI_LOCK(vp);
2633	if (VSHOULDFREE(vp))
2634		vfree(vp);
2635
2636	/*
2637	 * Done with purge, reset to the standard lock and
2638	 * notify sleepers of the grim news.
2639	 */
2640	vp->v_vnlock = &vp->v_lock;
2641	vp->v_op = dead_vnodeop_p;
2642	if (vp->v_pollinfo != NULL)
2643		vn_pollgone(vp);
2644	vp->v_tag = "none";
2645}
2646
2647/*
2648 * Eliminate all activity associated with the requested vnode
2649 * and with all vnodes aliased to the requested vnode.
2650 */
2651int
2652vop_revoke(ap)
2653	struct vop_revoke_args /* {
2654		struct vnode *a_vp;
2655		int a_flags;
2656	} */ *ap;
2657{
2658	struct vnode *vp, *vq;
2659	dev_t dev;
2660
2661	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2662	vp = ap->a_vp;
2663	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2664
2665	VI_LOCK(vp);
2666	/*
2667	 * If a vgone (or vclean) is already in progress,
2668	 * wait until it is done and return.
2669	 */
2670	if (vp->v_iflag & VI_XLOCK) {
2671		vp->v_iflag |= VI_XWANT;
2672		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2673		    "vop_revokeall", 0);
2674		return (0);
2675	}
2676	VI_UNLOCK(vp);
2677	dev = vp->v_rdev;
2678	for (;;) {
2679		mtx_lock(&spechash_mtx);
2680		vq = SLIST_FIRST(&dev->si_hlist);
2681		mtx_unlock(&spechash_mtx);
2682		if (!vq)
2683			break;
2684		vgone(vq);
2685	}
2686	return (0);
2687}
2688
2689/*
2690 * Recycle an unused vnode to the front of the free list.
2691 * Release the passed interlock if the vnode will be recycled.
2692 */
2693int
2694vrecycle(vp, inter_lkp, td)
2695	struct vnode *vp;
2696	struct mtx *inter_lkp;
2697	struct thread *td;
2698{
2699
2700	VI_LOCK(vp);
2701	if (vp->v_usecount == 0) {
2702		if (inter_lkp) {
2703			mtx_unlock(inter_lkp);
2704		}
2705		vgonel(vp, td);
2706		return (1);
2707	}
2708	VI_UNLOCK(vp);
2709	return (0);
2710}
2711
2712/*
2713 * Eliminate all activity associated with a vnode
2714 * in preparation for reuse.
2715 */
2716void
2717vgone(vp)
2718	register struct vnode *vp;
2719{
2720	struct thread *td = curthread;	/* XXX */
2721
2722	VI_LOCK(vp);
2723	vgonel(vp, td);
2724}
2725
2726/*
2727 * Disassociate a character device from the its underlying filesystem and
2728 * attach it to spec.  This is for use when the chr device is still active
2729 * and the filesystem is going away.
2730 */
2731static void
2732vgonechrl(struct vnode *vp, struct thread *td)
2733{
2734	ASSERT_VI_LOCKED(vp, "vgonechrl");
2735	vx_lock(vp);
2736	vclean(vp, 0, td);
2737	vp->v_op = spec_vnodeop_p;
2738	vx_unlock(vp);
2739	VI_UNLOCK(vp);
2740}
2741/*
2742 * vgone, with the vp interlock held.
2743 */
2744void
2745vgonel(vp, td)
2746	struct vnode *vp;
2747	struct thread *td;
2748{
2749	/*
2750	 * If a vgone (or vclean) is already in progress,
2751	 * wait until it is done and return.
2752	 */
2753	ASSERT_VI_LOCKED(vp, "vgonel");
2754	if (vp->v_iflag & VI_XLOCK) {
2755		vp->v_iflag |= VI_XWANT;
2756		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2757		return;
2758	}
2759	vx_lock(vp);
2760
2761	/*
2762	 * Clean out the filesystem specific data.
2763	 */
2764	vclean(vp, DOCLOSE, td);
2765	VI_UNLOCK(vp);
2766
2767	/*
2768	 * If special device, remove it from special device alias list
2769	 * if it is on one.
2770	 */
2771	VI_LOCK(vp);
2772	if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
2773		mtx_lock(&spechash_mtx);
2774		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2775		vp->v_rdev->si_usecount -= vp->v_usecount;
2776		mtx_unlock(&spechash_mtx);
2777		vp->v_rdev = NULL;
2778	}
2779
2780	/*
2781	 * If it is on the freelist and not already at the head,
2782	 * move it to the head of the list. The test of the
2783	 * VDOOMED flag and the reference count of zero is because
2784	 * it will be removed from the free list by getnewvnode,
2785	 * but will not have its reference count incremented until
2786	 * after calling vgone. If the reference count were
2787	 * incremented first, vgone would (incorrectly) try to
2788	 * close the previous instance of the underlying object.
2789	 */
2790	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2791		mtx_lock(&vnode_free_list_mtx);
2792		if (vp->v_iflag & VI_FREE) {
2793			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2794		} else {
2795			vp->v_iflag |= VI_FREE;
2796			freevnodes++;
2797		}
2798		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2799		mtx_unlock(&vnode_free_list_mtx);
2800	}
2801
2802	vp->v_type = VBAD;
2803	vx_unlock(vp);
2804	VI_UNLOCK(vp);
2805}
2806
2807/*
2808 * Lookup a vnode by device number.
2809 */
2810int
2811vfinddev(dev, type, vpp)
2812	dev_t dev;
2813	enum vtype type;
2814	struct vnode **vpp;
2815{
2816	struct vnode *vp;
2817
2818	mtx_lock(&spechash_mtx);
2819	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2820		if (type == vp->v_type) {
2821			*vpp = vp;
2822			mtx_unlock(&spechash_mtx);
2823			return (1);
2824		}
2825	}
2826	mtx_unlock(&spechash_mtx);
2827	return (0);
2828}
2829
2830/*
2831 * Calculate the total number of references to a special device.
2832 */
2833int
2834vcount(vp)
2835	struct vnode *vp;
2836{
2837	int count;
2838
2839	mtx_lock(&spechash_mtx);
2840	count = vp->v_rdev->si_usecount;
2841	mtx_unlock(&spechash_mtx);
2842	return (count);
2843}
2844
2845/*
2846 * Same as above, but using the dev_t as argument
2847 */
2848int
2849count_dev(dev)
2850	dev_t dev;
2851{
2852	struct vnode *vp;
2853
2854	vp = SLIST_FIRST(&dev->si_hlist);
2855	if (vp == NULL)
2856		return (0);
2857	return(vcount(vp));
2858}
2859
2860/*
2861 * Print out a description of a vnode.
2862 */
2863static char *typename[] =
2864{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2865
2866void
2867vprint(label, vp)
2868	char *label;
2869	struct vnode *vp;
2870{
2871	char buf[96];
2872
2873	if (label != NULL)
2874		printf("%s: %p: ", label, (void *)vp);
2875	else
2876		printf("%p: ", (void *)vp);
2877	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2878	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2879	    vp->v_writecount, vp->v_holdcnt);
2880	buf[0] = '\0';
2881	if (vp->v_vflag & VV_ROOT)
2882		strcat(buf, "|VV_ROOT");
2883	if (vp->v_vflag & VV_TEXT)
2884		strcat(buf, "|VV_TEXT");
2885	if (vp->v_vflag & VV_SYSTEM)
2886		strcat(buf, "|VV_SYSTEM");
2887	if (vp->v_iflag & VI_XLOCK)
2888		strcat(buf, "|VI_XLOCK");
2889	if (vp->v_iflag & VI_XWANT)
2890		strcat(buf, "|VI_XWANT");
2891	if (vp->v_iflag & VI_BWAIT)
2892		strcat(buf, "|VI_BWAIT");
2893	if (vp->v_iflag & VI_DOOMED)
2894		strcat(buf, "|VI_DOOMED");
2895	if (vp->v_iflag & VI_FREE)
2896		strcat(buf, "|VI_FREE");
2897	if (vp->v_vflag & VV_OBJBUF)
2898		strcat(buf, "|VV_OBJBUF");
2899	if (buf[0] != '\0')
2900		printf(" flags (%s),", &buf[1]);
2901	lockmgr_printinfo(vp->v_vnlock);
2902	printf("\n");
2903	if (vp->v_data != NULL)
2904		VOP_PRINT(vp);
2905}
2906
2907#ifdef DDB
2908#include <ddb/ddb.h>
2909/*
2910 * List all of the locked vnodes in the system.
2911 * Called when debugging the kernel.
2912 */
2913DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2914{
2915	struct mount *mp, *nmp;
2916	struct vnode *vp;
2917
2918	/*
2919	 * Note: because this is DDB, we can't obey the locking semantics
2920	 * for these structures, which means we could catch an inconsistent
2921	 * state and dereference a nasty pointer.  Not much to be done
2922	 * about that.
2923	 */
2924	printf("Locked vnodes\n");
2925	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2926		nmp = TAILQ_NEXT(mp, mnt_list);
2927		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2928			if (VOP_ISLOCKED(vp, NULL))
2929				vprint(NULL, vp);
2930		}
2931		nmp = TAILQ_NEXT(mp, mnt_list);
2932	}
2933}
2934#endif
2935
2936/*
2937 * Fill in a struct xvfsconf based on a struct vfsconf.
2938 */
2939static void
2940vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2941{
2942
2943	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2944	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2945	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2946	xvfsp->vfc_flags = vfsp->vfc_flags;
2947	/*
2948	 * These are unused in userland, we keep them
2949	 * to not break binary compatibility.
2950	 */
2951	xvfsp->vfc_vfsops = NULL;
2952	xvfsp->vfc_next = NULL;
2953}
2954
2955static int
2956sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2957{
2958	struct vfsconf *vfsp;
2959	struct xvfsconf *xvfsp;
2960	int cnt, error, i;
2961
2962	cnt = 0;
2963	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2964		cnt++;
2965	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2966	/*
2967	 * Handle the race that we will have here when struct vfsconf
2968	 * will be locked down by using both cnt and checking vfc_next
2969	 * against NULL to determine the end of the loop.  The race will
2970	 * happen because we will have to unlock before calling malloc().
2971	 * We are protected by Giant for now.
2972	 */
2973	i = 0;
2974	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2975		vfsconf2x(vfsp, xvfsp + i);
2976		i++;
2977	}
2978	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2979	free(xvfsp, M_TEMP);
2980	return (error);
2981}
2982
2983SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2984    "S,xvfsconf", "List of all configured filesystems");
2985
2986/*
2987 * Top level filesystem related information gathering.
2988 */
2989static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2990
2991static int
2992vfs_sysctl(SYSCTL_HANDLER_ARGS)
2993{
2994	int *name = (int *)arg1 - 1;	/* XXX */
2995	u_int namelen = arg2 + 1;	/* XXX */
2996	struct vfsconf *vfsp;
2997	struct xvfsconf xvfsp;
2998
2999	printf("WARNING: userland calling deprecated sysctl, "
3000	    "please rebuild world\n");
3001
3002#if 1 || defined(COMPAT_PRELITE2)
3003	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3004	if (namelen == 1)
3005		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3006#endif
3007
3008	switch (name[1]) {
3009	case VFS_MAXTYPENUM:
3010		if (namelen != 2)
3011			return (ENOTDIR);
3012		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3013	case VFS_CONF:
3014		if (namelen != 3)
3015			return (ENOTDIR);	/* overloaded */
3016		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
3017			if (vfsp->vfc_typenum == name[2])
3018				break;
3019		if (vfsp == NULL)
3020			return (EOPNOTSUPP);
3021		vfsconf2x(vfsp, &xvfsp);
3022		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3023	}
3024	return (EOPNOTSUPP);
3025}
3026
3027SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
3028	"Generic filesystem");
3029
3030#if 1 || defined(COMPAT_PRELITE2)
3031
3032static int
3033sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3034{
3035	int error;
3036	struct vfsconf *vfsp;
3037	struct ovfsconf ovfs;
3038
3039	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3040		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3041		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3042		ovfs.vfc_index = vfsp->vfc_typenum;
3043		ovfs.vfc_refcount = vfsp->vfc_refcount;
3044		ovfs.vfc_flags = vfsp->vfc_flags;
3045		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3046		if (error)
3047			return error;
3048	}
3049	return 0;
3050}
3051
3052#endif /* 1 || COMPAT_PRELITE2 */
3053
3054#define KINFO_VNODESLOP		10
3055#ifdef notyet
3056/*
3057 * Dump vnode list (via sysctl).
3058 */
3059/* ARGSUSED */
3060static int
3061sysctl_vnode(SYSCTL_HANDLER_ARGS)
3062{
3063	struct xvnode *xvn;
3064	struct thread *td = req->td;
3065	struct mount *mp;
3066	struct vnode *vp;
3067	int error, len, n;
3068
3069	/*
3070	 * Stale numvnodes access is not fatal here.
3071	 */
3072	req->lock = 0;
3073	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3074	if (!req->oldptr)
3075		/* Make an estimate */
3076		return (SYSCTL_OUT(req, 0, len));
3077
3078	sysctl_wire_old_buffer(req, 0);
3079	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3080	n = 0;
3081	mtx_lock(&mountlist_mtx);
3082	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3083		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3084			continue;
3085		mtx_lock(&mntvnode_mtx);
3086		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3087			if (n == len)
3088				break;
3089			vref(vp);
3090			xvn[n].xv_size = sizeof *xvn;
3091			xvn[n].xv_vnode = vp;
3092#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3093			XV_COPY(usecount);
3094			XV_COPY(writecount);
3095			XV_COPY(holdcnt);
3096			XV_COPY(id);
3097			XV_COPY(mount);
3098			XV_COPY(numoutput);
3099			XV_COPY(type);
3100#undef XV_COPY
3101			xvn[n].xv_flag = vp->v_vflag;
3102
3103			switch (vp->v_type) {
3104			case VREG:
3105			case VDIR:
3106			case VLNK:
3107				xvn[n].xv_dev = vp->v_cachedfs;
3108				xvn[n].xv_ino = vp->v_cachedid;
3109				break;
3110			case VBLK:
3111			case VCHR:
3112				if (vp->v_rdev == NULL) {
3113					vrele(vp);
3114					continue;
3115				}
3116				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3117				break;
3118			case VSOCK:
3119				xvn[n].xv_socket = vp->v_socket;
3120				break;
3121			case VFIFO:
3122				xvn[n].xv_fifo = vp->v_fifoinfo;
3123				break;
3124			case VNON:
3125			case VBAD:
3126			default:
3127				/* shouldn't happen? */
3128				vrele(vp);
3129				continue;
3130			}
3131			vrele(vp);
3132			++n;
3133		}
3134		mtx_unlock(&mntvnode_mtx);
3135		mtx_lock(&mountlist_mtx);
3136		vfs_unbusy(mp, td);
3137		if (n == len)
3138			break;
3139	}
3140	mtx_unlock(&mountlist_mtx);
3141
3142	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3143	free(xvn, M_TEMP);
3144	return (error);
3145}
3146
3147SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3148	0, 0, sysctl_vnode, "S,xvnode", "");
3149#endif
3150
3151/*
3152 * Check to see if a filesystem is mounted on a block device.
3153 */
3154int
3155vfs_mountedon(vp)
3156	struct vnode *vp;
3157{
3158
3159	if (vp->v_rdev->si_mountpoint != NULL)
3160		return (EBUSY);
3161	return (0);
3162}
3163
3164/*
3165 * Unmount all filesystems. The list is traversed in reverse order
3166 * of mounting to avoid dependencies.
3167 */
3168void
3169vfs_unmountall()
3170{
3171	struct mount *mp;
3172	struct thread *td;
3173	int error;
3174
3175	if (curthread != NULL)
3176		td = curthread;
3177	else
3178		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3179	/*
3180	 * Since this only runs when rebooting, it is not interlocked.
3181	 */
3182	while(!TAILQ_EMPTY(&mountlist)) {
3183		mp = TAILQ_LAST(&mountlist, mntlist);
3184		error = dounmount(mp, MNT_FORCE, td);
3185		if (error) {
3186			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3187			printf("unmount of %s failed (",
3188			    mp->mnt_stat.f_mntonname);
3189			if (error == EBUSY)
3190				printf("BUSY)\n");
3191			else
3192				printf("%d)\n", error);
3193		} else {
3194			/* The unmount has removed mp from the mountlist */
3195		}
3196	}
3197}
3198
3199/*
3200 * perform msync on all vnodes under a mount point
3201 * the mount point must be locked.
3202 */
3203void
3204vfs_msync(struct mount *mp, int flags)
3205{
3206	struct vnode *vp, *nvp;
3207	struct vm_object *obj;
3208	int tries;
3209
3210	GIANT_REQUIRED;
3211
3212	tries = 5;
3213	mtx_lock(&mntvnode_mtx);
3214loop:
3215	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3216		if (vp->v_mount != mp) {
3217			if (--tries > 0)
3218				goto loop;
3219			break;
3220		}
3221		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3222
3223		VI_LOCK(vp);
3224		if (vp->v_iflag & VI_XLOCK) {
3225			VI_UNLOCK(vp);
3226			continue;
3227		}
3228
3229		if ((vp->v_iflag & VI_OBJDIRTY) &&
3230		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3231			mtx_unlock(&mntvnode_mtx);
3232			if (!vget(vp,
3233			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3234			    curthread)) {
3235				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3236					vput(vp);
3237					mtx_lock(&mntvnode_mtx);
3238					continue;
3239				}
3240
3241				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3242					VM_OBJECT_LOCK(obj);
3243					vm_object_page_clean(obj, 0, 0,
3244					    flags == MNT_WAIT ?
3245					    OBJPC_SYNC : OBJPC_NOSYNC);
3246					VM_OBJECT_UNLOCK(obj);
3247				}
3248				vput(vp);
3249			}
3250			mtx_lock(&mntvnode_mtx);
3251			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3252				if (--tries > 0)
3253					goto loop;
3254				break;
3255			}
3256		} else
3257			VI_UNLOCK(vp);
3258	}
3259	mtx_unlock(&mntvnode_mtx);
3260}
3261
3262/*
3263 * Create the VM object needed for VMIO and mmap support.  This
3264 * is done for all VREG files in the system.  Some filesystems might
3265 * afford the additional metadata buffering capability of the
3266 * VMIO code by making the device node be VMIO mode also.
3267 *
3268 * vp must be locked when vfs_object_create is called.
3269 */
3270int
3271vfs_object_create(vp, td, cred)
3272	struct vnode *vp;
3273	struct thread *td;
3274	struct ucred *cred;
3275{
3276	GIANT_REQUIRED;
3277	return (VOP_CREATEVOBJECT(vp, cred, td));
3278}
3279
3280/*
3281 * Mark a vnode as free, putting it up for recycling.
3282 */
3283void
3284vfree(vp)
3285	struct vnode *vp;
3286{
3287	ASSERT_VI_LOCKED(vp, "vfree");
3288	mtx_lock(&vnode_free_list_mtx);
3289	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3290	if (vp->v_iflag & VI_AGE) {
3291		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3292	} else {
3293		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3294	}
3295	freevnodes++;
3296	mtx_unlock(&vnode_free_list_mtx);
3297	vp->v_iflag &= ~VI_AGE;
3298	vp->v_iflag |= VI_FREE;
3299}
3300
3301/*
3302 * Opposite of vfree() - mark a vnode as in use.
3303 */
3304void
3305vbusy(vp)
3306	struct vnode *vp;
3307{
3308	ASSERT_VI_LOCKED(vp, "vbusy");
3309	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3310
3311	mtx_lock(&vnode_free_list_mtx);
3312	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3313	freevnodes--;
3314	mtx_unlock(&vnode_free_list_mtx);
3315
3316	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3317}
3318
3319/*
3320 * Record a process's interest in events which might happen to
3321 * a vnode.  Because poll uses the historic select-style interface
3322 * internally, this routine serves as both the ``check for any
3323 * pending events'' and the ``record my interest in future events''
3324 * functions.  (These are done together, while the lock is held,
3325 * to avoid race conditions.)
3326 */
3327int
3328vn_pollrecord(vp, td, events)
3329	struct vnode *vp;
3330	struct thread *td;
3331	short events;
3332{
3333
3334	if (vp->v_pollinfo == NULL)
3335		v_addpollinfo(vp);
3336	mtx_lock(&vp->v_pollinfo->vpi_lock);
3337	if (vp->v_pollinfo->vpi_revents & events) {
3338		/*
3339		 * This leaves events we are not interested
3340		 * in available for the other process which
3341		 * which presumably had requested them
3342		 * (otherwise they would never have been
3343		 * recorded).
3344		 */
3345		events &= vp->v_pollinfo->vpi_revents;
3346		vp->v_pollinfo->vpi_revents &= ~events;
3347
3348		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3349		return events;
3350	}
3351	vp->v_pollinfo->vpi_events |= events;
3352	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3353	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3354	return 0;
3355}
3356
3357/*
3358 * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3359 * it is possible for us to miss an event due to race conditions, but
3360 * that condition is expected to be rare, so for the moment it is the
3361 * preferred interface.
3362 */
3363void
3364vn_pollevent(vp, events)
3365	struct vnode *vp;
3366	short events;
3367{
3368
3369	if (vp->v_pollinfo == NULL)
3370		v_addpollinfo(vp);
3371	mtx_lock(&vp->v_pollinfo->vpi_lock);
3372	if (vp->v_pollinfo->vpi_events & events) {
3373		/*
3374		 * We clear vpi_events so that we don't
3375		 * call selwakeup() twice if two events are
3376		 * posted before the polling process(es) is
3377		 * awakened.  This also ensures that we take at
3378		 * most one selwakeup() if the polling process
3379		 * is no longer interested.  However, it does
3380		 * mean that only one event can be noticed at
3381		 * a time.  (Perhaps we should only clear those
3382		 * event bits which we note?) XXX
3383		 */
3384		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3385		vp->v_pollinfo->vpi_revents |= events;
3386		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3387	}
3388	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3389}
3390
3391/*
3392 * Wake up anyone polling on vp because it is being revoked.
3393 * This depends on dead_poll() returning POLLHUP for correct
3394 * behavior.
3395 */
3396void
3397vn_pollgone(vp)
3398	struct vnode *vp;
3399{
3400
3401	mtx_lock(&vp->v_pollinfo->vpi_lock);
3402	VN_KNOTE(vp, NOTE_REVOKE);
3403	if (vp->v_pollinfo->vpi_events) {
3404		vp->v_pollinfo->vpi_events = 0;
3405		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3406	}
3407	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3408}
3409
3410
3411
3412/*
3413 * Routine to create and manage a filesystem syncer vnode.
3414 */
3415#define sync_close ((int (*)(struct  vop_close_args *))nullop)
3416static int	sync_fsync(struct  vop_fsync_args *);
3417static int	sync_inactive(struct  vop_inactive_args *);
3418static int	sync_reclaim(struct  vop_reclaim_args *);
3419
3420static vop_t **sync_vnodeop_p;
3421static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3422	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3423	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3424	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3425	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3426	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3427	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3428	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3429	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3430	{ NULL, NULL }
3431};
3432static struct vnodeopv_desc sync_vnodeop_opv_desc =
3433	{ &sync_vnodeop_p, sync_vnodeop_entries };
3434
3435VNODEOP_SET(sync_vnodeop_opv_desc);
3436
3437/*
3438 * Create a new filesystem syncer vnode for the specified mount point.
3439 */
3440int
3441vfs_allocate_syncvnode(mp)
3442	struct mount *mp;
3443{
3444	struct vnode *vp;
3445	static long start, incr, next;
3446	int error;
3447
3448	/* Allocate a new vnode */
3449	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3450		mp->mnt_syncer = NULL;
3451		return (error);
3452	}
3453	vp->v_type = VNON;
3454	/*
3455	 * Place the vnode onto the syncer worklist. We attempt to
3456	 * scatter them about on the list so that they will go off
3457	 * at evenly distributed times even if all the filesystems
3458	 * are mounted at once.
3459	 */
3460	next += incr;
3461	if (next == 0 || next > syncer_maxdelay) {
3462		start /= 2;
3463		incr /= 2;
3464		if (start == 0) {
3465			start = syncer_maxdelay / 2;
3466			incr = syncer_maxdelay;
3467		}
3468		next = start;
3469	}
3470	VI_LOCK(vp);
3471	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3472	VI_UNLOCK(vp);
3473	mp->mnt_syncer = vp;
3474	return (0);
3475}
3476
3477/*
3478 * Do a lazy sync of the filesystem.
3479 */
3480static int
3481sync_fsync(ap)
3482	struct vop_fsync_args /* {
3483		struct vnode *a_vp;
3484		struct ucred *a_cred;
3485		int a_waitfor;
3486		struct thread *a_td;
3487	} */ *ap;
3488{
3489	struct vnode *syncvp = ap->a_vp;
3490	struct mount *mp = syncvp->v_mount;
3491	struct thread *td = ap->a_td;
3492	int error, asyncflag;
3493
3494	/*
3495	 * We only need to do something if this is a lazy evaluation.
3496	 */
3497	if (ap->a_waitfor != MNT_LAZY)
3498		return (0);
3499
3500	/*
3501	 * Move ourselves to the back of the sync list.
3502	 */
3503	VI_LOCK(syncvp);
3504	vn_syncer_add_to_worklist(syncvp, syncdelay);
3505	VI_UNLOCK(syncvp);
3506
3507	/*
3508	 * Walk the list of vnodes pushing all that are dirty and
3509	 * not already on the sync list.
3510	 */
3511	mtx_lock(&mountlist_mtx);
3512	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3513		mtx_unlock(&mountlist_mtx);
3514		return (0);
3515	}
3516	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3517		vfs_unbusy(mp, td);
3518		return (0);
3519	}
3520	asyncflag = mp->mnt_flag & MNT_ASYNC;
3521	mp->mnt_flag &= ~MNT_ASYNC;
3522	vfs_msync(mp, MNT_NOWAIT);
3523	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3524	if (asyncflag)
3525		mp->mnt_flag |= MNT_ASYNC;
3526	vn_finished_write(mp);
3527	vfs_unbusy(mp, td);
3528	return (error);
3529}
3530
3531/*
3532 * The syncer vnode is no referenced.
3533 */
3534static int
3535sync_inactive(ap)
3536	struct vop_inactive_args /* {
3537		struct vnode *a_vp;
3538		struct thread *a_td;
3539	} */ *ap;
3540{
3541
3542	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3543	vgone(ap->a_vp);
3544	return (0);
3545}
3546
3547/*
3548 * The syncer vnode is no longer needed and is being decommissioned.
3549 *
3550 * Modifications to the worklist must be protected by sync_mtx.
3551 */
3552static int
3553sync_reclaim(ap)
3554	struct vop_reclaim_args /* {
3555		struct vnode *a_vp;
3556	} */ *ap;
3557{
3558	struct vnode *vp = ap->a_vp;
3559
3560	VI_LOCK(vp);
3561	vp->v_mount->mnt_syncer = NULL;
3562	if (vp->v_iflag & VI_ONWORKLST) {
3563		mtx_lock(&sync_mtx);
3564		LIST_REMOVE(vp, v_synclist);
3565		mtx_unlock(&sync_mtx);
3566		vp->v_iflag &= ~VI_ONWORKLST;
3567	}
3568	VI_UNLOCK(vp);
3569
3570	return (0);
3571}
3572
3573/*
3574 * extract the dev_t from a VCHR
3575 */
3576dev_t
3577vn_todev(vp)
3578	struct vnode *vp;
3579{
3580	if (vp->v_type != VCHR)
3581		return (NODEV);
3582	return (vp->v_rdev);
3583}
3584
3585/*
3586 * Check if vnode represents a disk device
3587 */
3588int
3589vn_isdisk(vp, errp)
3590	struct vnode *vp;
3591	int *errp;
3592{
3593	struct cdevsw *cdevsw;
3594
3595	if (vp->v_type != VCHR) {
3596		if (errp != NULL)
3597			*errp = ENOTBLK;
3598		return (0);
3599	}
3600	if (vp->v_rdev == NULL) {
3601		if (errp != NULL)
3602			*errp = ENXIO;
3603		return (0);
3604	}
3605	cdevsw = devsw(vp->v_rdev);
3606	if (cdevsw == NULL) {
3607		if (errp != NULL)
3608			*errp = ENXIO;
3609		return (0);
3610	}
3611	if (!(cdevsw->d_flags & D_DISK)) {
3612		if (errp != NULL)
3613			*errp = ENOTBLK;
3614		return (0);
3615	}
3616	if (errp != NULL)
3617		*errp = 0;
3618	return (1);
3619}
3620
3621/*
3622 * Free data allocated by namei(); see namei(9) for details.
3623 */
3624void
3625NDFREE(ndp, flags)
3626     struct nameidata *ndp;
3627     const u_int flags;
3628{
3629	if (!(flags & NDF_NO_FREE_PNBUF) &&
3630	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3631		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3632		ndp->ni_cnd.cn_flags &= ~HASBUF;
3633	}
3634	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3635	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3636	    ndp->ni_dvp != ndp->ni_vp)
3637		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3638	if (!(flags & NDF_NO_DVP_RELE) &&
3639	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3640		vrele(ndp->ni_dvp);
3641		ndp->ni_dvp = NULL;
3642	}
3643	if (!(flags & NDF_NO_VP_UNLOCK) &&
3644	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3645		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3646	if (!(flags & NDF_NO_VP_RELE) &&
3647	    ndp->ni_vp) {
3648		vrele(ndp->ni_vp);
3649		ndp->ni_vp = NULL;
3650	}
3651	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3652	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3653		vrele(ndp->ni_startdir);
3654		ndp->ni_startdir = NULL;
3655	}
3656}
3657
3658/*
3659 * Common filesystem object access control check routine.  Accepts a
3660 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3661 * and optional call-by-reference privused argument allowing vaccess()
3662 * to indicate to the caller whether privilege was used to satisfy the
3663 * request (obsoleted).  Returns 0 on success, or an errno on failure.
3664 */
3665int
3666vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3667	enum vtype type;
3668	mode_t file_mode;
3669	uid_t file_uid;
3670	gid_t file_gid;
3671	mode_t acc_mode;
3672	struct ucred *cred;
3673	int *privused;
3674{
3675	mode_t dac_granted;
3676#ifdef CAPABILITIES
3677	mode_t cap_granted;
3678#endif
3679
3680	/*
3681	 * Look for a normal, non-privileged way to access the file/directory
3682	 * as requested.  If it exists, go with that.
3683	 */
3684
3685	if (privused != NULL)
3686		*privused = 0;
3687
3688	dac_granted = 0;
3689
3690	/* Check the owner. */
3691	if (cred->cr_uid == file_uid) {
3692		dac_granted |= VADMIN;
3693		if (file_mode & S_IXUSR)
3694			dac_granted |= VEXEC;
3695		if (file_mode & S_IRUSR)
3696			dac_granted |= VREAD;
3697		if (file_mode & S_IWUSR)
3698			dac_granted |= (VWRITE | VAPPEND);
3699
3700		if ((acc_mode & dac_granted) == acc_mode)
3701			return (0);
3702
3703		goto privcheck;
3704	}
3705
3706	/* Otherwise, check the groups (first match) */
3707	if (groupmember(file_gid, cred)) {
3708		if (file_mode & S_IXGRP)
3709			dac_granted |= VEXEC;
3710		if (file_mode & S_IRGRP)
3711			dac_granted |= VREAD;
3712		if (file_mode & S_IWGRP)
3713			dac_granted |= (VWRITE | VAPPEND);
3714
3715		if ((acc_mode & dac_granted) == acc_mode)
3716			return (0);
3717
3718		goto privcheck;
3719	}
3720
3721	/* Otherwise, check everyone else. */
3722	if (file_mode & S_IXOTH)
3723		dac_granted |= VEXEC;
3724	if (file_mode & S_IROTH)
3725		dac_granted |= VREAD;
3726	if (file_mode & S_IWOTH)
3727		dac_granted |= (VWRITE | VAPPEND);
3728	if ((acc_mode & dac_granted) == acc_mode)
3729		return (0);
3730
3731privcheck:
3732	if (!suser_cred(cred, PRISON_ROOT)) {
3733		/* XXX audit: privilege used */
3734		if (privused != NULL)
3735			*privused = 1;
3736		return (0);
3737	}
3738
3739#ifdef CAPABILITIES
3740	/*
3741	 * Build a capability mask to determine if the set of capabilities
3742	 * satisfies the requirements when combined with the granted mask
3743	 * from above.
3744	 * For each capability, if the capability is required, bitwise
3745	 * or the request type onto the cap_granted mask.
3746	 */
3747	cap_granted = 0;
3748
3749	if (type == VDIR) {
3750		/*
3751		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3752		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3753		 */
3754		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3755		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3756			cap_granted |= VEXEC;
3757	} else {
3758		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3759		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3760			cap_granted |= VEXEC;
3761	}
3762
3763	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3764	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3765		cap_granted |= VREAD;
3766
3767	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3768	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3769		cap_granted |= (VWRITE | VAPPEND);
3770
3771	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3772	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3773		cap_granted |= VADMIN;
3774
3775	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3776		/* XXX audit: privilege used */
3777		if (privused != NULL)
3778			*privused = 1;
3779		return (0);
3780	}
3781#endif
3782
3783	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3784}
3785
3786/*
3787 * Credential check based on process requesting service, and per-attribute
3788 * permissions.
3789 */
3790int
3791extattr_check_cred(struct vnode *vp, int attrnamespace,
3792    struct ucred *cred, struct thread *td, int access)
3793{
3794
3795	/*
3796	 * Kernel-invoked always succeeds.
3797	 */
3798	if (cred == NOCRED)
3799		return (0);
3800
3801	/*
3802	 * Do not allow privileged processes in jail to directly
3803	 * manipulate system attributes.
3804	 *
3805	 * XXX What capability should apply here?
3806	 * Probably CAP_SYS_SETFFLAG.
3807	 */
3808	switch (attrnamespace) {
3809	case EXTATTR_NAMESPACE_SYSTEM:
3810		/* Potentially should be: return (EPERM); */
3811		return (suser_cred(cred, 0));
3812	case EXTATTR_NAMESPACE_USER:
3813		return (VOP_ACCESS(vp, access, cred, td));
3814	default:
3815		return (EPERM);
3816	}
3817}
3818