vfs_subr.c revision 137506
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35 */
36
37/*
38 * External virtual filesystem routines
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/vfs_subr.c 137506 2004-11-10 07:17:28Z phk $");
43
44#include "opt_ddb.h"
45#include "opt_mac.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bio.h>
50#include <sys/buf.h>
51#include <sys/conf.h>
52#include <sys/event.h>
53#include <sys/eventhandler.h>
54#include <sys/extattr.h>
55#include <sys/fcntl.h>
56#include <sys/kdb.h>
57#include <sys/kernel.h>
58#include <sys/kthread.h>
59#include <sys/mac.h>
60#include <sys/malloc.h>
61#include <sys/mount.h>
62#include <sys/namei.h>
63#include <sys/reboot.h>
64#include <sys/sleepqueue.h>
65#include <sys/stat.h>
66#include <sys/sysctl.h>
67#include <sys/syslog.h>
68#include <sys/vmmeter.h>
69#include <sys/vnode.h>
70
71#include <vm/vm.h>
72#include <vm/vm_object.h>
73#include <vm/vm_extern.h>
74#include <vm/pmap.h>
75#include <vm/vm_map.h>
76#include <vm/vm_page.h>
77#include <vm/vm_kern.h>
78#include <vm/uma.h>
79
80static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
81
82static void	addalias(struct vnode *vp, struct cdev *nvp_rdev);
83static void	delmntque(struct vnode *vp);
84static void	insmntque(struct vnode *vp, struct mount *mp);
85static void	vclean(struct vnode *vp, int flags, struct thread *td);
86static void	vlruvp(struct vnode *vp);
87static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
88		    int slpflag, int slptimeo, int *errorp);
89static void	syncer_shutdown(void *arg, int howto);
90static int	vtryrecycle(struct vnode *vp);
91static void	vx_lock(struct vnode *vp);
92static void	vx_unlock(struct vnode *vp);
93static void	vgonechrl(struct vnode *vp, struct thread *td);
94
95
96/*
97 * Number of vnodes in existence.  Increased whenever getnewvnode()
98 * allocates a new vnode, never decreased.
99 */
100static unsigned long	numvnodes;
101
102SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
103
104/*
105 * Conversion tables for conversion from vnode types to inode formats
106 * and back.
107 */
108enum vtype iftovt_tab[16] = {
109	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
110	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
111};
112int vttoif_tab[9] = {
113	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
114	S_IFSOCK, S_IFIFO, S_IFMT,
115};
116
117/*
118 * List of vnodes that are ready for recycling.
119 */
120static TAILQ_HEAD(freelst, vnode) vnode_free_list;
121
122/*
123 * Minimum number of free vnodes.  If there are fewer than this free vnodes,
124 * getnewvnode() will return a newly allocated vnode.
125 */
126static u_long wantfreevnodes = 25;
127SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
128/* Number of vnodes in the free list. */
129static u_long freevnodes;
130SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
131
132/*
133 * Various variables used for debugging the new implementation of
134 * reassignbuf().
135 * XXX these are probably of (very) limited utility now.
136 */
137static int reassignbufcalls;
138SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
139static int nameileafonly;
140SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
141
142/*
143 * Cache for the mount type id assigned to NFS.  This is used for
144 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
145 */
146int	nfs_mount_type = -1;
147
148/* To keep more than one thread at a time from running vfs_getnewfsid */
149static struct mtx mntid_mtx;
150
151/*
152 * Lock for any access to the following:
153 *	vnode_free_list
154 *	numvnodes
155 *	freevnodes
156 */
157static struct mtx vnode_free_list_mtx;
158
159/* Publicly exported FS */
160struct nfs_public nfs_pub;
161
162/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
163static uma_zone_t vnode_zone;
164static uma_zone_t vnodepoll_zone;
165
166/* Set to 1 to print out reclaim of active vnodes */
167int	prtactive;
168
169/*
170 * The workitem queue.
171 *
172 * It is useful to delay writes of file data and filesystem metadata
173 * for tens of seconds so that quickly created and deleted files need
174 * not waste disk bandwidth being created and removed. To realize this,
175 * we append vnodes to a "workitem" queue. When running with a soft
176 * updates implementation, most pending metadata dependencies should
177 * not wait for more than a few seconds. Thus, mounted on block devices
178 * are delayed only about a half the time that file data is delayed.
179 * Similarly, directory updates are more critical, so are only delayed
180 * about a third the time that file data is delayed. Thus, there are
181 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
182 * one each second (driven off the filesystem syncer process). The
183 * syncer_delayno variable indicates the next queue that is to be processed.
184 * Items that need to be processed soon are placed in this queue:
185 *
186 *	syncer_workitem_pending[syncer_delayno]
187 *
188 * A delay of fifteen seconds is done by placing the request fifteen
189 * entries later in the queue:
190 *
191 *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
192 *
193 */
194static int syncer_delayno;
195static long syncer_mask;
196LIST_HEAD(synclist, bufobj);
197static struct synclist *syncer_workitem_pending;
198/*
199 * The sync_mtx protects:
200 *	bo->bo_synclist
201 *	sync_vnode_count
202 *	syncer_delayno
203 *	syncer_state
204 *	syncer_workitem_pending
205 *	syncer_worklist_len
206 *	rushjob
207 */
208static struct mtx sync_mtx;
209
210#define SYNCER_MAXDELAY		32
211static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
212static int syncdelay = 30;		/* max time to delay syncing data */
213static int filedelay = 30;		/* time to delay syncing files */
214SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
215static int dirdelay = 29;		/* time to delay syncing directories */
216SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
217static int metadelay = 28;		/* time to delay syncing metadata */
218SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
219static int rushjob;		/* number of slots to run ASAP */
220static int stat_rush_requests;	/* number of times I/O speeded up */
221SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
222
223/*
224 * When shutting down the syncer, run it at four times normal speed.
225 */
226#define SYNCER_SHUTDOWN_SPEEDUP		4
227static int sync_vnode_count;
228static int syncer_worklist_len;
229static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
230    syncer_state;
231
232/*
233 * Number of vnodes we want to exist at any one time.  This is mostly used
234 * to size hash tables in vnode-related code.  It is normally not used in
235 * getnewvnode(), as wantfreevnodes is normally nonzero.)
236 *
237 * XXX desiredvnodes is historical cruft and should not exist.
238 */
239int desiredvnodes;
240SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
241    &desiredvnodes, 0, "Maximum number of vnodes");
242static int minvnodes;
243SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
244    &minvnodes, 0, "Minimum number of vnodes");
245static int vnlru_nowhere;
246SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
247    &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
248
249/* Hook for calling soft updates. */
250int (*softdep_process_worklist_hook)(struct mount *);
251
252/*
253 * Initialize the vnode management data structures.
254 */
255#ifndef	MAXVNODES_MAX
256#define	MAXVNODES_MAX	100000
257#endif
258static void
259vntblinit(void *dummy __unused)
260{
261
262	/*
263	 * Desiredvnodes is a function of the physical memory size and
264	 * the kernel's heap size.  Specifically, desiredvnodes scales
265	 * in proportion to the physical memory size until two fifths
266	 * of the kernel's heap size is consumed by vnodes and vm
267	 * objects.
268	 */
269	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
270	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
271	if (desiredvnodes > MAXVNODES_MAX) {
272		if (bootverbose)
273			printf("Reducing kern.maxvnodes %d -> %d\n",
274			    desiredvnodes, MAXVNODES_MAX);
275		desiredvnodes = MAXVNODES_MAX;
276	}
277	minvnodes = desiredvnodes / 4;
278	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
279	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
280	TAILQ_INIT(&vnode_free_list);
281	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
282	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
283	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
284	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
285	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
286	/*
287	 * Initialize the filesystem syncer.
288	 */
289	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
290		&syncer_mask);
291	syncer_maxdelay = syncer_mask + 1;
292	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
293}
294SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
295
296
297/*
298 * Mark a mount point as busy. Used to synchronize access and to delay
299 * unmounting. Interlock is not released on failure.
300 */
301int
302vfs_busy(mp, flags, interlkp, td)
303	struct mount *mp;
304	int flags;
305	struct mtx *interlkp;
306	struct thread *td;
307{
308	int lkflags;
309
310	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
311		if (flags & LK_NOWAIT)
312			return (ENOENT);
313		mp->mnt_kern_flag |= MNTK_MWAIT;
314		/*
315		 * Since all busy locks are shared except the exclusive
316		 * lock granted when unmounting, the only place that a
317		 * wakeup needs to be done is at the release of the
318		 * exclusive lock at the end of dounmount.
319		 */
320		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
321		return (ENOENT);
322	}
323	lkflags = LK_SHARED | LK_NOPAUSE;
324	if (interlkp)
325		lkflags |= LK_INTERLOCK;
326	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
327		panic("vfs_busy: unexpected lock failure");
328	return (0);
329}
330
331/*
332 * Free a busy filesystem.
333 */
334void
335vfs_unbusy(mp, td)
336	struct mount *mp;
337	struct thread *td;
338{
339
340	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
341}
342
343/*
344 * Lookup a mount point by filesystem identifier.
345 */
346struct mount *
347vfs_getvfs(fsid)
348	fsid_t *fsid;
349{
350	register struct mount *mp;
351
352	mtx_lock(&mountlist_mtx);
353	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
354		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
355		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
356			mtx_unlock(&mountlist_mtx);
357			return (mp);
358		}
359	}
360	mtx_unlock(&mountlist_mtx);
361	return ((struct mount *) 0);
362}
363
364/*
365 * Check if a user can access priveledged mount options.
366 */
367int
368vfs_suser(struct mount *mp, struct thread *td)
369{
370	int error;
371
372	if ((mp->mnt_flag & MNT_USER) == 0 ||
373	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
374		if ((error = suser(td)) != 0)
375			return (error);
376	}
377	return (0);
378}
379
380/*
381 * Get a new unique fsid.  Try to make its val[0] unique, since this value
382 * will be used to create fake device numbers for stat().  Also try (but
383 * not so hard) make its val[0] unique mod 2^16, since some emulators only
384 * support 16-bit device numbers.  We end up with unique val[0]'s for the
385 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
386 *
387 * Keep in mind that several mounts may be running in parallel.  Starting
388 * the search one past where the previous search terminated is both a
389 * micro-optimization and a defense against returning the same fsid to
390 * different mounts.
391 */
392void
393vfs_getnewfsid(mp)
394	struct mount *mp;
395{
396	static u_int16_t mntid_base;
397	fsid_t tfsid;
398	int mtype;
399
400	mtx_lock(&mntid_mtx);
401	mtype = mp->mnt_vfc->vfc_typenum;
402	tfsid.val[1] = mtype;
403	mtype = (mtype & 0xFF) << 24;
404	for (;;) {
405		tfsid.val[0] = makedev(255,
406		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
407		mntid_base++;
408		if (vfs_getvfs(&tfsid) == NULL)
409			break;
410	}
411	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
412	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
413	mtx_unlock(&mntid_mtx);
414}
415
416/*
417 * Knob to control the precision of file timestamps:
418 *
419 *   0 = seconds only; nanoseconds zeroed.
420 *   1 = seconds and nanoseconds, accurate within 1/HZ.
421 *   2 = seconds and nanoseconds, truncated to microseconds.
422 * >=3 = seconds and nanoseconds, maximum precision.
423 */
424enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
425
426static int timestamp_precision = TSP_SEC;
427SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
428    &timestamp_precision, 0, "");
429
430/*
431 * Get a current timestamp.
432 */
433void
434vfs_timestamp(tsp)
435	struct timespec *tsp;
436{
437	struct timeval tv;
438
439	switch (timestamp_precision) {
440	case TSP_SEC:
441		tsp->tv_sec = time_second;
442		tsp->tv_nsec = 0;
443		break;
444	case TSP_HZ:
445		getnanotime(tsp);
446		break;
447	case TSP_USEC:
448		microtime(&tv);
449		TIMEVAL_TO_TIMESPEC(&tv, tsp);
450		break;
451	case TSP_NSEC:
452	default:
453		nanotime(tsp);
454		break;
455	}
456}
457
458/*
459 * Set vnode attributes to VNOVAL
460 */
461void
462vattr_null(vap)
463	register struct vattr *vap;
464{
465
466	vap->va_type = VNON;
467	vap->va_size = VNOVAL;
468	vap->va_bytes = VNOVAL;
469	vap->va_mode = VNOVAL;
470	vap->va_nlink = VNOVAL;
471	vap->va_uid = VNOVAL;
472	vap->va_gid = VNOVAL;
473	vap->va_fsid = VNOVAL;
474	vap->va_fileid = VNOVAL;
475	vap->va_blocksize = VNOVAL;
476	vap->va_rdev = VNOVAL;
477	vap->va_atime.tv_sec = VNOVAL;
478	vap->va_atime.tv_nsec = VNOVAL;
479	vap->va_mtime.tv_sec = VNOVAL;
480	vap->va_mtime.tv_nsec = VNOVAL;
481	vap->va_ctime.tv_sec = VNOVAL;
482	vap->va_ctime.tv_nsec = VNOVAL;
483	vap->va_birthtime.tv_sec = VNOVAL;
484	vap->va_birthtime.tv_nsec = VNOVAL;
485	vap->va_flags = VNOVAL;
486	vap->va_gen = VNOVAL;
487	vap->va_vaflags = 0;
488}
489
490/*
491 * This routine is called when we have too many vnodes.  It attempts
492 * to free <count> vnodes and will potentially free vnodes that still
493 * have VM backing store (VM backing store is typically the cause
494 * of a vnode blowout so we want to do this).  Therefore, this operation
495 * is not considered cheap.
496 *
497 * A number of conditions may prevent a vnode from being reclaimed.
498 * the buffer cache may have references on the vnode, a directory
499 * vnode may still have references due to the namei cache representing
500 * underlying files, or the vnode may be in active use.   It is not
501 * desireable to reuse such vnodes.  These conditions may cause the
502 * number of vnodes to reach some minimum value regardless of what
503 * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
504 */
505static int
506vlrureclaim(struct mount *mp)
507{
508	struct vnode *vp;
509	int done;
510	int trigger;
511	int usevnodes;
512	int count;
513
514	/*
515	 * Calculate the trigger point, don't allow user
516	 * screwups to blow us up.   This prevents us from
517	 * recycling vnodes with lots of resident pages.  We
518	 * aren't trying to free memory, we are trying to
519	 * free vnodes.
520	 */
521	usevnodes = desiredvnodes;
522	if (usevnodes <= 0)
523		usevnodes = 1;
524	trigger = cnt.v_page_count * 2 / usevnodes;
525
526	done = 0;
527	MNT_ILOCK(mp);
528	count = mp->mnt_nvnodelistsize / 10 + 1;
529	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
530		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
531		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
532
533		if (vp->v_type != VNON &&
534		    vp->v_type != VBAD &&
535		    VI_TRYLOCK(vp)) {
536			if (VMIGHTFREE(vp) &&           /* critical path opt */
537			    (vp->v_object == NULL ||
538			    vp->v_object->resident_page_count < trigger)) {
539				MNT_IUNLOCK(mp);
540				vgonel(vp, curthread);
541				done++;
542				MNT_ILOCK(mp);
543			} else
544				VI_UNLOCK(vp);
545		}
546		--count;
547	}
548	MNT_IUNLOCK(mp);
549	return done;
550}
551
552/*
553 * Attempt to recycle vnodes in a context that is always safe to block.
554 * Calling vlrurecycle() from the bowels of filesystem code has some
555 * interesting deadlock problems.
556 */
557static struct proc *vnlruproc;
558static int vnlruproc_sig;
559
560static void
561vnlru_proc(void)
562{
563	struct mount *mp, *nmp;
564	int done;
565	struct proc *p = vnlruproc;
566	struct thread *td = FIRST_THREAD_IN_PROC(p);
567
568	mtx_lock(&Giant);
569
570	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
571	    SHUTDOWN_PRI_FIRST);
572
573	for (;;) {
574		kthread_suspend_check(p);
575		mtx_lock(&vnode_free_list_mtx);
576		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
577			mtx_unlock(&vnode_free_list_mtx);
578			vnlruproc_sig = 0;
579			wakeup(&vnlruproc_sig);
580			tsleep(vnlruproc, PVFS, "vlruwt", hz);
581			continue;
582		}
583		mtx_unlock(&vnode_free_list_mtx);
584		done = 0;
585		mtx_lock(&mountlist_mtx);
586		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
587			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
588				nmp = TAILQ_NEXT(mp, mnt_list);
589				continue;
590			}
591			done += vlrureclaim(mp);
592			mtx_lock(&mountlist_mtx);
593			nmp = TAILQ_NEXT(mp, mnt_list);
594			vfs_unbusy(mp, td);
595		}
596		mtx_unlock(&mountlist_mtx);
597		if (done == 0) {
598#if 0
599			/* These messages are temporary debugging aids */
600			if (vnlru_nowhere < 5)
601				printf("vnlru process getting nowhere..\n");
602			else if (vnlru_nowhere == 5)
603				printf("vnlru process messages stopped.\n");
604#endif
605			vnlru_nowhere++;
606			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
607		}
608	}
609}
610
611static struct kproc_desc vnlru_kp = {
612	"vnlru",
613	vnlru_proc,
614	&vnlruproc
615};
616SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
617
618
619/*
620 * Routines having to do with the management of the vnode table.
621 */
622
623/*
624 * Check to see if a free vnode can be recycled. If it can,
625 * recycle it and return it with the vnode interlock held.
626 */
627static int
628vtryrecycle(struct vnode *vp)
629{
630	struct thread *td = curthread;
631	vm_object_t object;
632	struct mount *vnmp;
633	int error;
634
635	/* Don't recycle if we can't get the interlock */
636	if (!VI_TRYLOCK(vp))
637		return (EWOULDBLOCK);
638	/*
639	 * This vnode may found and locked via some other list, if so we
640	 * can't recycle it yet.
641	 */
642	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
643		return (EWOULDBLOCK);
644	/*
645	 * Don't recycle if its filesystem is being suspended.
646	 */
647	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
648		VOP_UNLOCK(vp, 0, td);
649		return (EBUSY);
650	}
651
652	/*
653	 * Don't recycle if we still have cached pages.
654	 */
655	if (VOP_GETVOBJECT(vp, &object) == 0) {
656		VM_OBJECT_LOCK(object);
657		if (object->resident_page_count ||
658		    object->ref_count) {
659			VM_OBJECT_UNLOCK(object);
660			error = EBUSY;
661			goto done;
662		}
663		VM_OBJECT_UNLOCK(object);
664	}
665	if (LIST_FIRST(&vp->v_cache_src)) {
666		/*
667		 * note: nameileafonly sysctl is temporary,
668		 * for debugging only, and will eventually be
669		 * removed.
670		 */
671		if (nameileafonly > 0) {
672			/*
673			 * Do not reuse namei-cached directory
674			 * vnodes that have cached
675			 * subdirectories.
676			 */
677			if (cache_leaf_test(vp) < 0) {
678				error = EISDIR;
679				goto done;
680			}
681		} else if (nameileafonly < 0 ||
682			    vmiodirenable == 0) {
683			/*
684			 * Do not reuse namei-cached directory
685			 * vnodes if nameileafonly is -1 or
686			 * if VMIO backing for directories is
687			 * turned off (otherwise we reuse them
688			 * too quickly).
689			 */
690			error = EBUSY;
691			goto done;
692		}
693	}
694	/*
695	 * If we got this far, we need to acquire the interlock and see if
696	 * anyone picked up this vnode from another list.  If not, we will
697	 * mark it with XLOCK via vgonel() so that anyone who does find it
698	 * will skip over it.
699	 */
700	VI_LOCK(vp);
701	if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
702		VI_UNLOCK(vp);
703		error = EBUSY;
704		goto done;
705	}
706	mtx_lock(&vnode_free_list_mtx);
707	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
708	vp->v_iflag &= ~VI_FREE;
709	mtx_unlock(&vnode_free_list_mtx);
710	vp->v_iflag |= VI_DOOMED;
711	if ((vp->v_type != VBAD) || (vp->v_data != NULL)) {
712		VOP_UNLOCK(vp, 0, td);
713		vgonel(vp, td);
714		VI_LOCK(vp);
715	} else
716		VOP_UNLOCK(vp, 0, td);
717	vn_finished_write(vnmp);
718	return (0);
719done:
720	VOP_UNLOCK(vp, 0, td);
721	vn_finished_write(vnmp);
722	return (error);
723}
724
725/*
726 * Return the next vnode from the free list.
727 */
728int
729getnewvnode(tag, mp, vops, vpp)
730	const char *tag;
731	struct mount *mp;
732	vop_t **vops;
733	struct vnode **vpp;
734{
735	struct vnode *vp = NULL;
736	struct vpollinfo *pollinfo = NULL;
737	struct bufobj *bo;
738
739	mtx_lock(&vnode_free_list_mtx);
740
741	/*
742	 * Try to reuse vnodes if we hit the max.  This situation only
743	 * occurs in certain large-memory (2G+) situations.  We cannot
744	 * attempt to directly reclaim vnodes due to nasty recursion
745	 * problems.
746	 */
747	while (numvnodes - freevnodes > desiredvnodes) {
748		if (vnlruproc_sig == 0) {
749			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
750			wakeup(vnlruproc);
751		}
752		mtx_unlock(&vnode_free_list_mtx);
753		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
754		mtx_lock(&vnode_free_list_mtx);
755	}
756
757	/*
758	 * Attempt to reuse a vnode already on the free list, allocating
759	 * a new vnode if we can't find one or if we have not reached a
760	 * good minimum for good LRU performance.
761	 */
762
763	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
764		int error;
765		int count;
766
767		for (count = 0; count < freevnodes; count++) {
768			vp = TAILQ_FIRST(&vnode_free_list);
769
770			KASSERT(vp->v_usecount == 0 &&
771			    (vp->v_iflag & VI_DOINGINACT) == 0,
772			    ("getnewvnode: free vnode isn't"));
773
774			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
775			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
776			mtx_unlock(&vnode_free_list_mtx);
777			error = vtryrecycle(vp);
778			mtx_lock(&vnode_free_list_mtx);
779			if (error == 0)
780				break;
781			vp = NULL;
782		}
783	}
784	if (vp) {
785		freevnodes--;
786		bo = &vp->v_bufobj;
787		mtx_unlock(&vnode_free_list_mtx);
788
789#ifdef INVARIANTS
790		{
791			if (vp->v_data)
792				printf("cleaned vnode isn't, "
793				       "address %p, inode %p\n",
794				       vp, vp->v_data);
795			if (bo->bo_numoutput)
796				panic("Clean vnode has pending I/O's");
797			if (vp->v_writecount != 0)
798				panic("Non-zero write count");
799		}
800#endif
801		if ((pollinfo = vp->v_pollinfo) != NULL) {
802			/*
803			 * To avoid lock order reversals, the call to
804			 * uma_zfree() must be delayed until the vnode
805			 * interlock is released.
806			 */
807			vp->v_pollinfo = NULL;
808		}
809#ifdef MAC
810		mac_destroy_vnode(vp);
811#endif
812		vp->v_iflag = 0;
813		vp->v_vflag = 0;
814		vp->v_lastw = 0;
815		vp->v_lasta = 0;
816		vp->v_cstart = 0;
817		vp->v_clen = 0;
818		vp->v_socket = 0;
819		lockdestroy(vp->v_vnlock);
820		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
821		KASSERT(bo->bo_clean.bv_cnt == 0, ("cleanbufcnt not 0"));
822		KASSERT(bo->bo_clean.bv_root == NULL, ("cleanblkroot not NULL"));
823		KASSERT(bo->bo_dirty.bv_cnt == 0, ("dirtybufcnt not 0"));
824		KASSERT(bo->bo_dirty.bv_root == NULL, ("dirtyblkroot not NULL"));
825	} else {
826		numvnodes++;
827		mtx_unlock(&vnode_free_list_mtx);
828
829		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
830		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
831		VI_LOCK(vp);
832		vp->v_dd = vp;
833		bo = &vp->v_bufobj;
834		bo->__bo_vnode = vp;
835		bo->bo_mtx = &vp->v_interlock;
836		vp->v_vnlock = &vp->v_lock;
837		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
838		cache_purge(vp);		/* Sets up v_id. */
839		LIST_INIT(&vp->v_cache_src);
840		TAILQ_INIT(&vp->v_cache_dst);
841	}
842
843	TAILQ_INIT(&bo->bo_clean.bv_hd);
844	TAILQ_INIT(&bo->bo_dirty.bv_hd);
845	bo->bo_ops = &buf_ops_bio;
846	bo->bo_private = vp;
847	vp->v_type = VNON;
848	vp->v_tag = tag;
849	vp->v_op = vops;
850	*vpp = vp;
851	vp->v_usecount = 1;
852	vp->v_data = 0;
853	vp->v_cachedid = -1;
854	VI_UNLOCK(vp);
855	if (pollinfo != NULL) {
856		knlist_destroy(&pollinfo->vpi_selinfo.si_note);
857		mtx_destroy(&pollinfo->vpi_lock);
858		uma_zfree(vnodepoll_zone, pollinfo);
859	}
860#ifdef MAC
861	mac_init_vnode(vp);
862	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
863		mac_associate_vnode_singlelabel(mp, vp);
864	else if (mp == NULL)
865		printf("NULL mp in getnewvnode()\n");
866#endif
867	delmntque(vp);
868	if (mp != NULL) {
869		insmntque(vp, mp);
870		bo->bo_bsize = mp->mnt_stat.f_iosize;
871	}
872
873	return (0);
874}
875
876/*
877 * Delete from old mount point vnode list, if on one.
878 */
879static void
880delmntque(struct vnode *vp)
881{
882	struct mount *mp;
883
884	if (vp->v_mount == NULL)
885		return;
886	mp = vp->v_mount;
887	MNT_ILOCK(mp);
888	vp->v_mount = NULL;
889	KASSERT(mp->mnt_nvnodelistsize > 0,
890		("bad mount point vnode list size"));
891	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
892	mp->mnt_nvnodelistsize--;
893	MNT_IUNLOCK(mp);
894}
895
896/*
897 * Insert into list of vnodes for the new mount point, if available.
898 */
899static void
900insmntque(struct vnode *vp, struct mount *mp)
901{
902
903	vp->v_mount = mp;
904	KASSERT(mp != NULL, ("Don't call insmntque(foo, NULL)"));
905	MNT_ILOCK(vp->v_mount);
906	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
907	mp->mnt_nvnodelistsize++;
908	MNT_IUNLOCK(vp->v_mount);
909}
910
911/*
912 * Flush out and invalidate all buffers associated with a vnode.
913 * Called with the underlying object locked.
914 */
915int
916vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
917	struct vnode *vp;
918	int flags;
919	struct ucred *cred;
920	struct thread *td;
921	int slpflag, slptimeo;
922{
923	struct buf *blist;
924	int error;
925	vm_object_t object;
926	struct bufobj *bo;
927
928	GIANT_REQUIRED;
929
930	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
931
932	bo = &vp->v_bufobj;
933	BO_LOCK(bo);
934	if (flags & V_SAVE) {
935		error = bufobj_wwait(bo, slpflag, slptimeo);
936		if (error) {
937			VI_UNLOCK(vp);
938			return (error);
939		}
940		if (bo->bo_dirty.bv_cnt > 0) {
941			VI_UNLOCK(vp);
942			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
943				return (error);
944			/*
945			 * XXX We could save a lock/unlock if this was only
946			 * enabled under INVARIANTS
947			 */
948			VI_LOCK(vp);
949			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
950				panic("vinvalbuf: dirty bufs");
951		}
952	}
953	/*
954	 * If you alter this loop please notice that interlock is dropped and
955	 * reacquired in flushbuflist.  Special care is needed to ensure that
956	 * no race conditions occur from this.
957	 */
958	for (error = 0;;) {
959		blist = TAILQ_FIRST(&vp->v_bufobj.bo_clean.bv_hd);
960		if (blist != NULL &&
961		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
962			if (error)
963				break;
964			continue;
965		}
966		blist = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd);
967		if (blist != NULL &&
968		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
969			if (error)
970				break;
971			continue;
972		}
973		break;
974	}
975	if (error) {
976		VI_UNLOCK(vp);
977		return (error);
978	}
979
980	/*
981	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
982	 * have write I/O in-progress but if there is a VM object then the
983	 * VM object can also have read-I/O in-progress.
984	 */
985	do {
986		bufobj_wwait(bo, 0, 0);
987		VI_UNLOCK(vp);
988		if (VOP_GETVOBJECT(vp, &object) == 0) {
989			VM_OBJECT_LOCK(object);
990			vm_object_pip_wait(object, "vnvlbx");
991			VM_OBJECT_UNLOCK(object);
992		}
993		VI_LOCK(vp);
994	} while (bo->bo_numoutput > 0);
995	VI_UNLOCK(vp);
996
997	/*
998	 * Destroy the copy in the VM cache, too.
999	 */
1000	if (VOP_GETVOBJECT(vp, &object) == 0) {
1001		VM_OBJECT_LOCK(object);
1002		vm_object_page_remove(object, 0, 0,
1003			(flags & V_SAVE) ? TRUE : FALSE);
1004		VM_OBJECT_UNLOCK(object);
1005	}
1006
1007#ifdef INVARIANTS
1008	VI_LOCK(vp);
1009	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1010	    (vp->v_bufobj.bo_dirty.bv_cnt > 0 ||
1011	     vp->v_bufobj.bo_clean.bv_cnt > 0))
1012		panic("vinvalbuf: flush failed");
1013	VI_UNLOCK(vp);
1014#endif
1015	return (0);
1016}
1017
1018/*
1019 * Flush out buffers on the specified list.
1020 *
1021 */
1022static int
1023flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1024	struct buf *blist;
1025	int flags;
1026	struct vnode *vp;
1027	int slpflag, slptimeo;
1028	int *errorp;
1029{
1030	struct buf *bp, *nbp;
1031	int found, error;
1032
1033	ASSERT_VI_LOCKED(vp, "flushbuflist");
1034
1035	for (found = 0, bp = blist; bp; bp = nbp) {
1036		nbp = TAILQ_NEXT(bp, b_bobufs);
1037		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1038		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1039			continue;
1040		}
1041		found += 1;
1042		error = BUF_TIMELOCK(bp,
1043		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1044		    "flushbuf", slpflag, slptimeo);
1045		if (error) {
1046			if (error != ENOLCK)
1047				*errorp = error;
1048			goto done;
1049		}
1050		/*
1051		 * XXX Since there are no node locks for NFS, I
1052		 * believe there is a slight chance that a delayed
1053		 * write will occur while sleeping just above, so
1054		 * check for it.  Note that vfs_bio_awrite expects
1055		 * buffers to reside on a queue, while bwrite and
1056		 * brelse do not.
1057		 */
1058		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1059			(flags & V_SAVE)) {
1060
1061			if (bp->b_vp == vp) {
1062				if (bp->b_flags & B_CLUSTEROK) {
1063					vfs_bio_awrite(bp);
1064				} else {
1065					bremfree(bp);
1066					bp->b_flags |= B_ASYNC;
1067					bwrite(bp);
1068				}
1069			} else {
1070				bremfree(bp);
1071				(void) bwrite(bp);
1072			}
1073			goto done;
1074		}
1075		bremfree(bp);
1076		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1077		bp->b_flags &= ~B_ASYNC;
1078		brelse(bp);
1079		VI_LOCK(vp);
1080	}
1081	return (found);
1082done:
1083	VI_LOCK(vp);
1084	return (found);
1085}
1086
1087/*
1088 * Truncate a file's buffer and pages to a specified length.  This
1089 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1090 * sync activity.
1091 */
1092int
1093vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, off_t length, int blksize)
1094{
1095	struct buf *bp, *nbp;
1096	int anyfreed;
1097	int trunclbn;
1098	struct bufobj *bo;
1099
1100	/*
1101	 * Round up to the *next* lbn.
1102	 */
1103	trunclbn = (length + blksize - 1) / blksize;
1104
1105	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1106restart:
1107	VI_LOCK(vp);
1108	bo = &vp->v_bufobj;
1109	anyfreed = 1;
1110	for (;anyfreed;) {
1111		anyfreed = 0;
1112		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1113			if (bp->b_lblkno < trunclbn)
1114				continue;
1115			if (BUF_LOCK(bp,
1116			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1117			    VI_MTX(vp)) == ENOLCK)
1118				goto restart;
1119
1120			bremfree(bp);
1121			bp->b_flags |= (B_INVAL | B_RELBUF);
1122			bp->b_flags &= ~B_ASYNC;
1123			brelse(bp);
1124			anyfreed = 1;
1125
1126			if (nbp != NULL &&
1127			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1128			    (nbp->b_vp != vp) ||
1129			    (nbp->b_flags & B_DELWRI))) {
1130				goto restart;
1131			}
1132			VI_LOCK(vp);
1133		}
1134
1135		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1136			if (bp->b_lblkno < trunclbn)
1137				continue;
1138			if (BUF_LOCK(bp,
1139			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1140			    VI_MTX(vp)) == ENOLCK)
1141				goto restart;
1142			bremfree(bp);
1143			bp->b_flags |= (B_INVAL | B_RELBUF);
1144			bp->b_flags &= ~B_ASYNC;
1145			brelse(bp);
1146			anyfreed = 1;
1147			if (nbp != NULL &&
1148			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1149			    (nbp->b_vp != vp) ||
1150			    (nbp->b_flags & B_DELWRI) == 0)) {
1151				goto restart;
1152			}
1153			VI_LOCK(vp);
1154		}
1155	}
1156
1157	if (length > 0) {
1158restartsync:
1159		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1160			if (bp->b_lblkno > 0)
1161				continue;
1162			/*
1163			 * Since we hold the vnode lock this should only
1164			 * fail if we're racing with the buf daemon.
1165			 */
1166			if (BUF_LOCK(bp,
1167			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1168			    VI_MTX(vp)) == ENOLCK) {
1169				goto restart;
1170			}
1171			KASSERT((bp->b_flags & B_DELWRI),
1172			    ("buf(%p) on dirty queue without DELWRI", bp));
1173
1174			bremfree(bp);
1175			bawrite(bp);
1176			VI_LOCK(vp);
1177			goto restartsync;
1178		}
1179	}
1180
1181	bufobj_wwait(bo, 0, 0);
1182	VI_UNLOCK(vp);
1183	vnode_pager_setsize(vp, length);
1184
1185	return (0);
1186}
1187
1188/*
1189 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1190 * 		 a vnode.
1191 *
1192 *	NOTE: We have to deal with the special case of a background bitmap
1193 *	buffer, a situation where two buffers will have the same logical
1194 *	block offset.  We want (1) only the foreground buffer to be accessed
1195 *	in a lookup and (2) must differentiate between the foreground and
1196 *	background buffer in the splay tree algorithm because the splay
1197 *	tree cannot normally handle multiple entities with the same 'index'.
1198 *	We accomplish this by adding differentiating flags to the splay tree's
1199 *	numerical domain.
1200 */
1201static
1202struct buf *
1203buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1204{
1205	struct buf dummy;
1206	struct buf *lefttreemax, *righttreemin, *y;
1207
1208	if (root == NULL)
1209		return (NULL);
1210	lefttreemax = righttreemin = &dummy;
1211	for (;;) {
1212		if (lblkno < root->b_lblkno ||
1213		    (lblkno == root->b_lblkno &&
1214		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1215			if ((y = root->b_left) == NULL)
1216				break;
1217			if (lblkno < y->b_lblkno) {
1218				/* Rotate right. */
1219				root->b_left = y->b_right;
1220				y->b_right = root;
1221				root = y;
1222				if ((y = root->b_left) == NULL)
1223					break;
1224			}
1225			/* Link into the new root's right tree. */
1226			righttreemin->b_left = root;
1227			righttreemin = root;
1228		} else if (lblkno > root->b_lblkno ||
1229		    (lblkno == root->b_lblkno &&
1230		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1231			if ((y = root->b_right) == NULL)
1232				break;
1233			if (lblkno > y->b_lblkno) {
1234				/* Rotate left. */
1235				root->b_right = y->b_left;
1236				y->b_left = root;
1237				root = y;
1238				if ((y = root->b_right) == NULL)
1239					break;
1240			}
1241			/* Link into the new root's left tree. */
1242			lefttreemax->b_right = root;
1243			lefttreemax = root;
1244		} else {
1245			break;
1246		}
1247		root = y;
1248	}
1249	/* Assemble the new root. */
1250	lefttreemax->b_right = root->b_left;
1251	righttreemin->b_left = root->b_right;
1252	root->b_left = dummy.b_right;
1253	root->b_right = dummy.b_left;
1254	return (root);
1255}
1256
1257static void
1258buf_vlist_remove(struct buf *bp)
1259{
1260	struct buf *root;
1261	struct bufv *bv;
1262
1263	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1264	ASSERT_BO_LOCKED(bp->b_bufobj);
1265	if (bp->b_xflags & BX_VNDIRTY)
1266		bv = &bp->b_bufobj->bo_dirty;
1267	else
1268		bv = &bp->b_bufobj->bo_clean;
1269	if (bp != bv->bv_root) {
1270		root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1271		KASSERT(root == bp, ("splay lookup failed in remove"));
1272	}
1273	if (bp->b_left == NULL) {
1274		root = bp->b_right;
1275	} else {
1276		root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1277		root->b_right = bp->b_right;
1278	}
1279	bv->bv_root = root;
1280	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1281	bv->bv_cnt--;
1282	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1283}
1284
1285/*
1286 * Add the buffer to the sorted clean or dirty block list using a
1287 * splay tree algorithm.
1288 *
1289 * NOTE: xflags is passed as a constant, optimizing this inline function!
1290 */
1291static void
1292buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1293{
1294	struct buf *root;
1295	struct bufv *bv;
1296
1297	ASSERT_BO_LOCKED(bo);
1298	bp->b_xflags |= xflags;
1299	if (xflags & BX_VNDIRTY)
1300		bv = &bo->bo_dirty;
1301	else
1302		bv = &bo->bo_clean;
1303
1304	root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1305	if (root == NULL) {
1306		bp->b_left = NULL;
1307		bp->b_right = NULL;
1308		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1309	} else if (bp->b_lblkno < root->b_lblkno ||
1310	    (bp->b_lblkno == root->b_lblkno &&
1311	    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1312		bp->b_left = root->b_left;
1313		bp->b_right = root;
1314		root->b_left = NULL;
1315		TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1316	} else {
1317		bp->b_right = root->b_right;
1318		bp->b_left = root;
1319		root->b_right = NULL;
1320		TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1321	}
1322	bv->bv_cnt++;
1323	bv->bv_root = bp;
1324}
1325
1326/*
1327 * Lookup a buffer using the splay tree.  Note that we specifically avoid
1328 * shadow buffers used in background bitmap writes.
1329 *
1330 * This code isn't quite efficient as it could be because we are maintaining
1331 * two sorted lists and do not know which list the block resides in.
1332 *
1333 * During a "make buildworld" the desired buffer is found at one of
1334 * the roots more than 60% of the time.  Thus, checking both roots
1335 * before performing either splay eliminates unnecessary splays on the
1336 * first tree splayed.
1337 */
1338struct buf *
1339gbincore(struct bufobj *bo, daddr_t lblkno)
1340{
1341	struct buf *bp;
1342
1343	GIANT_REQUIRED;
1344
1345	ASSERT_BO_LOCKED(bo);
1346	if ((bp = bo->bo_clean.bv_root) != NULL &&
1347	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1348		return (bp);
1349	if ((bp = bo->bo_dirty.bv_root) != NULL &&
1350	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1351		return (bp);
1352	if ((bp = bo->bo_clean.bv_root) != NULL) {
1353		bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1354		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1355			return (bp);
1356	}
1357	if ((bp = bo->bo_dirty.bv_root) != NULL) {
1358		bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1359		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1360			return (bp);
1361	}
1362	return (NULL);
1363}
1364
1365/*
1366 * Associate a buffer with a vnode.
1367 */
1368void
1369bgetvp(struct vnode *vp, struct buf *bp)
1370{
1371
1372	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1373
1374	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1375	    ("bgetvp: bp already attached! %p", bp));
1376
1377	ASSERT_VI_LOCKED(vp, "bgetvp");
1378	vholdl(vp);
1379	bp->b_vp = vp;
1380	bp->b_bufobj = &vp->v_bufobj;
1381	/*
1382	 * Insert onto list for new vnode.
1383	 */
1384	buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN);
1385}
1386
1387/*
1388 * Disassociate a buffer from a vnode.
1389 */
1390void
1391brelvp(struct buf *bp)
1392{
1393	struct bufobj *bo;
1394	struct vnode *vp;
1395
1396	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1397
1398	/*
1399	 * Delete from old vnode list, if on one.
1400	 */
1401	vp = bp->b_vp;		/* XXX */
1402	bo = bp->b_bufobj;
1403	BO_LOCK(bo);
1404	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1405		buf_vlist_remove(bp);
1406	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1407		bo->bo_flag &= ~BO_ONWORKLST;
1408		mtx_lock(&sync_mtx);
1409		LIST_REMOVE(bo, bo_synclist);
1410 		syncer_worklist_len--;
1411		mtx_unlock(&sync_mtx);
1412	}
1413	vdropl(vp);
1414	bp->b_vp = NULL;
1415	bp->b_bufobj = NULL;
1416	BO_UNLOCK(bo);
1417}
1418
1419/*
1420 * Add an item to the syncer work queue.
1421 */
1422static void
1423vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1424{
1425	int slot;
1426
1427	ASSERT_BO_LOCKED(bo);
1428
1429	mtx_lock(&sync_mtx);
1430	if (bo->bo_flag & BO_ONWORKLST)
1431		LIST_REMOVE(bo, bo_synclist);
1432	else {
1433		bo->bo_flag |= BO_ONWORKLST;
1434 		syncer_worklist_len++;
1435	}
1436
1437	if (delay > syncer_maxdelay - 2)
1438		delay = syncer_maxdelay - 2;
1439	slot = (syncer_delayno + delay) & syncer_mask;
1440
1441	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
1442	mtx_unlock(&sync_mtx);
1443}
1444
1445static int
1446sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1447{
1448	int error, len;
1449
1450	mtx_lock(&sync_mtx);
1451	len = syncer_worklist_len - sync_vnode_count;
1452	mtx_unlock(&sync_mtx);
1453	error = SYSCTL_OUT(req, &len, sizeof(len));
1454	return (error);
1455}
1456
1457SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1458    sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1459
1460struct  proc *updateproc;
1461static void sched_sync(void);
1462static struct kproc_desc up_kp = {
1463	"syncer",
1464	sched_sync,
1465	&updateproc
1466};
1467SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1468
1469/*
1470 * System filesystem synchronizer daemon.
1471 */
1472static void
1473sched_sync(void)
1474{
1475	struct synclist *next;
1476	struct synclist *slp;
1477	struct vnode *vp;
1478	struct bufobj *bo;
1479	struct mount *mp;
1480	long starttime;
1481	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1482	static int dummychan;
1483	int last_work_seen;
1484	int net_worklist_len;
1485	int syncer_final_iter;
1486	int first_printf;
1487
1488	mtx_lock(&Giant);
1489	last_work_seen = 0;
1490	syncer_final_iter = 0;
1491	first_printf = 1;
1492	syncer_state = SYNCER_RUNNING;
1493	starttime = time_second;
1494
1495	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1496	    SHUTDOWN_PRI_LAST);
1497
1498	for (;;) {
1499		mtx_lock(&sync_mtx);
1500		if (syncer_state == SYNCER_FINAL_DELAY &&
1501		    syncer_final_iter == 0) {
1502			mtx_unlock(&sync_mtx);
1503			kthread_suspend_check(td->td_proc);
1504			mtx_lock(&sync_mtx);
1505		}
1506		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1507		if (syncer_state != SYNCER_RUNNING &&
1508		    starttime != time_second) {
1509			if (first_printf) {
1510				printf("\nSyncing disks, vnodes remaining...");
1511				first_printf = 0;
1512			}
1513			printf("%d ", net_worklist_len);
1514		}
1515		starttime = time_second;
1516
1517		/*
1518		 * Push files whose dirty time has expired.  Be careful
1519		 * of interrupt race on slp queue.
1520		 *
1521		 * Skip over empty worklist slots when shutting down.
1522		 */
1523		do {
1524			slp = &syncer_workitem_pending[syncer_delayno];
1525			syncer_delayno += 1;
1526			if (syncer_delayno == syncer_maxdelay)
1527				syncer_delayno = 0;
1528			next = &syncer_workitem_pending[syncer_delayno];
1529			/*
1530			 * If the worklist has wrapped since the
1531			 * it was emptied of all but syncer vnodes,
1532			 * switch to the FINAL_DELAY state and run
1533			 * for one more second.
1534			 */
1535			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1536			    net_worklist_len == 0 &&
1537			    last_work_seen == syncer_delayno) {
1538				syncer_state = SYNCER_FINAL_DELAY;
1539				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1540			}
1541		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1542		    syncer_worklist_len > 0);
1543
1544		/*
1545		 * Keep track of the last time there was anything
1546		 * on the worklist other than syncer vnodes.
1547		 * Return to the SHUTTING_DOWN state if any
1548		 * new work appears.
1549		 */
1550		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1551			last_work_seen = syncer_delayno;
1552		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1553			syncer_state = SYNCER_SHUTTING_DOWN;
1554		while ((bo = LIST_FIRST(slp)) != NULL) {
1555			vp = bo->__bo_vnode; 	/* XXX */
1556			if (VOP_ISLOCKED(vp, NULL) != 0 ||
1557			    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1558				LIST_REMOVE(bo, bo_synclist);
1559				LIST_INSERT_HEAD(next, bo, bo_synclist);
1560				continue;
1561			}
1562			if (VI_TRYLOCK(vp) == 0) {
1563				LIST_REMOVE(bo, bo_synclist);
1564				LIST_INSERT_HEAD(next, bo, bo_synclist);
1565				vn_finished_write(mp);
1566				continue;
1567			}
1568			/*
1569			 * We use vhold in case the vnode does not
1570			 * successfully sync.  vhold prevents the vnode from
1571			 * going away when we unlock the sync_mtx so that
1572			 * we can acquire the vnode interlock.
1573			 */
1574			vholdl(vp);
1575			mtx_unlock(&sync_mtx);
1576			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
1577			(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1578			VOP_UNLOCK(vp, 0, td);
1579			vn_finished_write(mp);
1580			VI_LOCK(vp);
1581			if ((bo->bo_flag & BO_ONWORKLST) != 0) {
1582				/*
1583				 * Put us back on the worklist.  The worklist
1584				 * routine will remove us from our current
1585				 * position and then add us back in at a later
1586				 * position.
1587				 */
1588				vn_syncer_add_to_worklist(bo, syncdelay);
1589			}
1590			vdropl(vp);
1591			VI_UNLOCK(vp);
1592			mtx_lock(&sync_mtx);
1593		}
1594		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1595			syncer_final_iter--;
1596		mtx_unlock(&sync_mtx);
1597
1598		/*
1599		 * Do soft update processing.
1600		 */
1601		if (softdep_process_worklist_hook != NULL)
1602			(*softdep_process_worklist_hook)(NULL);
1603
1604		/*
1605		 * The variable rushjob allows the kernel to speed up the
1606		 * processing of the filesystem syncer process. A rushjob
1607		 * value of N tells the filesystem syncer to process the next
1608		 * N seconds worth of work on its queue ASAP. Currently rushjob
1609		 * is used by the soft update code to speed up the filesystem
1610		 * syncer process when the incore state is getting so far
1611		 * ahead of the disk that the kernel memory pool is being
1612		 * threatened with exhaustion.
1613		 */
1614		mtx_lock(&sync_mtx);
1615		if (rushjob > 0) {
1616			rushjob -= 1;
1617			mtx_unlock(&sync_mtx);
1618			continue;
1619		}
1620		mtx_unlock(&sync_mtx);
1621		/*
1622		 * Just sleep for a short period if time between
1623		 * iterations when shutting down to allow some I/O
1624		 * to happen.
1625		 *
1626		 * If it has taken us less than a second to process the
1627		 * current work, then wait. Otherwise start right over
1628		 * again. We can still lose time if any single round
1629		 * takes more than two seconds, but it does not really
1630		 * matter as we are just trying to generally pace the
1631		 * filesystem activity.
1632		 */
1633		if (syncer_state != SYNCER_RUNNING)
1634			tsleep(&dummychan, PPAUSE, "syncfnl",
1635			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1636		else if (time_second == starttime)
1637			tsleep(&lbolt, PPAUSE, "syncer", 0);
1638	}
1639}
1640
1641/*
1642 * Request the syncer daemon to speed up its work.
1643 * We never push it to speed up more than half of its
1644 * normal turn time, otherwise it could take over the cpu.
1645 */
1646int
1647speedup_syncer()
1648{
1649	struct thread *td;
1650	int ret = 0;
1651
1652	td = FIRST_THREAD_IN_PROC(updateproc);
1653	sleepq_remove(td, &lbolt);
1654	mtx_lock(&sync_mtx);
1655	if (rushjob < syncdelay / 2) {
1656		rushjob += 1;
1657		stat_rush_requests += 1;
1658		ret = 1;
1659	}
1660	mtx_unlock(&sync_mtx);
1661	return (ret);
1662}
1663
1664/*
1665 * Tell the syncer to speed up its work and run though its work
1666 * list several times, then tell it to shut down.
1667 */
1668static void
1669syncer_shutdown(void *arg, int howto)
1670{
1671	struct thread *td;
1672
1673	if (howto & RB_NOSYNC)
1674		return;
1675	td = FIRST_THREAD_IN_PROC(updateproc);
1676	sleepq_remove(td, &lbolt);
1677	mtx_lock(&sync_mtx);
1678	syncer_state = SYNCER_SHUTTING_DOWN;
1679	rushjob = 0;
1680	mtx_unlock(&sync_mtx);
1681	kproc_shutdown(arg, howto);
1682}
1683
1684/*
1685 * Associate a p-buffer with a vnode.
1686 *
1687 * Also sets B_PAGING flag to indicate that vnode is not fully associated
1688 * with the buffer.  i.e. the bp has not been linked into the vnode or
1689 * ref-counted.
1690 */
1691void
1692pbgetvp(vp, bp)
1693	register struct vnode *vp;
1694	register struct buf *bp;
1695{
1696
1697	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1698
1699	bp->b_vp = vp;
1700	bp->b_flags |= B_PAGING;
1701	bp->b_bufobj = &vp->v_bufobj;
1702}
1703
1704/*
1705 * Disassociate a p-buffer from a vnode.
1706 */
1707void
1708pbrelvp(bp)
1709	register struct buf *bp;
1710{
1711
1712	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1713	KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
1714
1715	/* XXX REMOVE ME */
1716	BO_LOCK(bp->b_bufobj);
1717	if (TAILQ_NEXT(bp, b_bobufs) != NULL) {
1718		panic(
1719		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1720		    bp,
1721		    (int)bp->b_flags
1722		);
1723	}
1724	BO_UNLOCK(bp->b_bufobj);
1725	bp->b_vp = NULL;
1726	bp->b_bufobj = NULL;
1727	bp->b_flags &= ~B_PAGING;
1728}
1729
1730/*
1731 * Reassign a buffer from one vnode to another.
1732 * Used to assign file specific control information
1733 * (indirect blocks) to the vnode to which they belong.
1734 */
1735void
1736reassignbuf(struct buf *bp)
1737{
1738	struct vnode *vp;
1739	struct bufobj *bo;
1740	int delay;
1741
1742	vp = bp->b_vp;
1743	bo = bp->b_bufobj;
1744	++reassignbufcalls;
1745
1746	/*
1747	 * B_PAGING flagged buffers cannot be reassigned because their vp
1748	 * is not fully linked in.
1749	 */
1750	if (bp->b_flags & B_PAGING)
1751		panic("cannot reassign paging buffer");
1752
1753	/*
1754	 * Delete from old vnode list, if on one.
1755	 */
1756	VI_LOCK(vp);
1757	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1758		buf_vlist_remove(bp);
1759	/*
1760	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1761	 * of clean buffers.
1762	 */
1763	if (bp->b_flags & B_DELWRI) {
1764		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1765			switch (vp->v_type) {
1766			case VDIR:
1767				delay = dirdelay;
1768				break;
1769			case VCHR:
1770				delay = metadelay;
1771				break;
1772			default:
1773				delay = filedelay;
1774			}
1775			vn_syncer_add_to_worklist(bo, delay);
1776		}
1777		buf_vlist_add(bp, bo, BX_VNDIRTY);
1778	} else {
1779		buf_vlist_add(bp, bo, BX_VNCLEAN);
1780
1781		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1782			mtx_lock(&sync_mtx);
1783			LIST_REMOVE(bo, bo_synclist);
1784 			syncer_worklist_len--;
1785			mtx_unlock(&sync_mtx);
1786			bo->bo_flag &= ~BO_ONWORKLST;
1787		}
1788	}
1789	VI_UNLOCK(vp);
1790}
1791
1792/*
1793 * Create a vnode for a device.
1794 * Used for mounting the root filesystem.
1795 */
1796int
1797bdevvp(dev, vpp)
1798	struct cdev *dev;
1799	struct vnode **vpp;
1800{
1801	register struct vnode *vp;
1802	struct vnode *nvp;
1803	int error;
1804
1805	if (dev == NULL) {
1806		*vpp = NULLVP;
1807		return (ENXIO);
1808	}
1809	if (vfinddev(dev, vpp))
1810		return (0);
1811
1812	error = getnewvnode("none", (struct mount *)0, devfs_specop_p, &nvp);
1813	if (error) {
1814		*vpp = NULLVP;
1815		return (error);
1816	}
1817	vp = nvp;
1818	vp->v_type = VCHR;
1819	vp->v_bufobj.bo_bsize = DEV_BSIZE;
1820	addalias(vp, dev);
1821	*vpp = vp;
1822	return (0);
1823}
1824
1825static void
1826v_incr_usecount(struct vnode *vp, int delta)
1827{
1828
1829	vp->v_usecount += delta;
1830	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1831		dev_lock();
1832		vp->v_rdev->si_usecount += delta;
1833		dev_unlock();
1834	}
1835}
1836
1837/*
1838 * Add vnode to the alias list hung off the struct cdev *.
1839 *
1840 * The reason for this gunk is that multiple vnodes can reference
1841 * the same physical device, so checking vp->v_usecount to see
1842 * how many users there are is inadequate; the v_usecount for
1843 * the vnodes need to be accumulated.  vcount() does that.
1844 */
1845struct vnode *
1846addaliasu(nvp, nvp_rdev)
1847	struct vnode *nvp;
1848	dev_t nvp_rdev;
1849{
1850	struct vnode *ovp;
1851	vop_t **ops;
1852	struct cdev *dev;
1853
1854	if (nvp->v_type == VBLK)
1855		return (nvp);
1856	if (nvp->v_type != VCHR)
1857		panic("addaliasu on non-special vnode");
1858	dev = findcdev(nvp_rdev);
1859	if (dev == NULL)
1860		return (nvp);
1861	/*
1862	 * Check to see if we have a bdevvp vnode with no associated
1863	 * filesystem. If so, we want to associate the filesystem of
1864	 * the new newly instigated vnode with the bdevvp vnode and
1865	 * discard the newly created vnode rather than leaving the
1866	 * bdevvp vnode lying around with no associated filesystem.
1867	 */
1868	if (vfinddev(dev, &ovp) == 0 || ovp->v_data != NULL) {
1869		addalias(nvp, dev);
1870		return (nvp);
1871	}
1872	/*
1873	 * Discard unneeded vnode, but save its node specific data.
1874	 * Note that if there is a lock, it is carried over in the
1875	 * node specific data to the replacement vnode.
1876	 */
1877	vref(ovp);
1878	ovp->v_data = nvp->v_data;
1879	ovp->v_tag = nvp->v_tag;
1880	nvp->v_data = NULL;
1881	lockdestroy(ovp->v_vnlock);
1882	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
1883	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
1884	ops = ovp->v_op;
1885	ovp->v_op = nvp->v_op;
1886	if (VOP_ISLOCKED(nvp, curthread)) {
1887		VOP_UNLOCK(nvp, 0, curthread);
1888		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
1889	}
1890	nvp->v_op = ops;
1891	delmntque(ovp);
1892	insmntque(ovp, nvp->v_mount);
1893	vrele(nvp);
1894	vgone(nvp);
1895	return (ovp);
1896}
1897
1898/* This is a local helper function that do the same as addaliasu, but for a
1899 * struct cdev *instead of an dev_t. */
1900static void
1901addalias(nvp, dev)
1902	struct vnode *nvp;
1903	struct cdev *dev;
1904{
1905
1906	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1907	VI_LOCK(nvp);
1908	dev_lock();
1909	dev->si_refcount++;
1910	nvp->v_rdev = dev;
1911	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1912	dev->si_usecount += nvp->v_usecount;
1913	dev_unlock();
1914	VI_UNLOCK(nvp);
1915}
1916
1917/*
1918 * Grab a particular vnode from the free list, increment its
1919 * reference count and lock it. The vnode lock bit is set if the
1920 * vnode is being eliminated in vgone. The process is awakened
1921 * when the transition is completed, and an error returned to
1922 * indicate that the vnode is no longer usable (possibly having
1923 * been changed to a new filesystem type).
1924 */
1925int
1926vget(vp, flags, td)
1927	register struct vnode *vp;
1928	int flags;
1929	struct thread *td;
1930{
1931	int error;
1932
1933	/*
1934	 * If the vnode is in the process of being cleaned out for
1935	 * another use, we wait for the cleaning to finish and then
1936	 * return failure. Cleaning is determined by checking that
1937	 * the VI_XLOCK flag is set.
1938	 */
1939	if ((flags & LK_INTERLOCK) == 0)
1940		VI_LOCK(vp);
1941	if (vp->v_iflag & VI_XLOCK && vp->v_vxthread != curthread) {
1942		if ((flags & LK_NOWAIT) == 0) {
1943			vp->v_iflag |= VI_XWANT;
1944			msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
1945			return (ENOENT);
1946		}
1947		VI_UNLOCK(vp);
1948		return (EBUSY);
1949	}
1950
1951	v_incr_usecount(vp, 1);
1952
1953	if (VSHOULDBUSY(vp))
1954		vbusy(vp);
1955	if (flags & LK_TYPE_MASK) {
1956		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
1957			/*
1958			 * must expand vrele here because we do not want
1959			 * to call VOP_INACTIVE if the reference count
1960			 * drops back to zero since it was never really
1961			 * active. We must remove it from the free list
1962			 * before sleeping so that multiple processes do
1963			 * not try to recycle it.
1964			 */
1965			VI_LOCK(vp);
1966			v_incr_usecount(vp, -1);
1967			if (VSHOULDFREE(vp))
1968				vfree(vp);
1969			else
1970				vlruvp(vp);
1971			VI_UNLOCK(vp);
1972		}
1973		return (error);
1974	}
1975	VI_UNLOCK(vp);
1976	return (0);
1977}
1978
1979/*
1980 * Increase the reference count of a vnode.
1981 */
1982void
1983vref(struct vnode *vp)
1984{
1985
1986	VI_LOCK(vp);
1987	v_incr_usecount(vp, 1);
1988	VI_UNLOCK(vp);
1989}
1990
1991/*
1992 * Return reference count of a vnode.
1993 *
1994 * The results of this call are only guaranteed when some mechanism other
1995 * than the VI lock is used to stop other processes from gaining references
1996 * to the vnode.  This may be the case if the caller holds the only reference.
1997 * This is also useful when stale data is acceptable as race conditions may
1998 * be accounted for by some other means.
1999 */
2000int
2001vrefcnt(struct vnode *vp)
2002{
2003	int usecnt;
2004
2005	VI_LOCK(vp);
2006	usecnt = vp->v_usecount;
2007	VI_UNLOCK(vp);
2008
2009	return (usecnt);
2010}
2011
2012
2013/*
2014 * Vnode put/release.
2015 * If count drops to zero, call inactive routine and return to freelist.
2016 */
2017void
2018vrele(vp)
2019	struct vnode *vp;
2020{
2021	struct thread *td = curthread;	/* XXX */
2022
2023	GIANT_REQUIRED;
2024
2025	KASSERT(vp != NULL, ("vrele: null vp"));
2026
2027	VI_LOCK(vp);
2028
2029	/* Skip this v_writecount check if we're going to panic below. */
2030	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2031	    ("vrele: missed vn_close"));
2032
2033	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2034	    vp->v_usecount == 1)) {
2035		v_incr_usecount(vp, -1);
2036		VI_UNLOCK(vp);
2037
2038		return;
2039	}
2040
2041	if (vp->v_usecount == 1) {
2042		v_incr_usecount(vp, -1);
2043		/*
2044		 * We must call VOP_INACTIVE with the node locked. Mark
2045		 * as VI_DOINGINACT to avoid recursion.
2046		 */
2047		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2048			VI_LOCK(vp);
2049			vp->v_iflag |= VI_DOINGINACT;
2050			VI_UNLOCK(vp);
2051			VOP_INACTIVE(vp, td);
2052			VI_LOCK(vp);
2053			KASSERT(vp->v_iflag & VI_DOINGINACT,
2054			    ("vrele: lost VI_DOINGINACT"));
2055			vp->v_iflag &= ~VI_DOINGINACT;
2056		} else
2057			VI_LOCK(vp);
2058		if (VSHOULDFREE(vp))
2059			vfree(vp);
2060		else
2061			vlruvp(vp);
2062		VI_UNLOCK(vp);
2063
2064	} else {
2065#ifdef DIAGNOSTIC
2066		vprint("vrele: negative ref count", vp);
2067#endif
2068		VI_UNLOCK(vp);
2069		panic("vrele: negative ref cnt");
2070	}
2071}
2072
2073/*
2074 * Release an already locked vnode.  This give the same effects as
2075 * unlock+vrele(), but takes less time and avoids releasing and
2076 * re-aquiring the lock (as vrele() aquires the lock internally.)
2077 */
2078void
2079vput(vp)
2080	struct vnode *vp;
2081{
2082	struct thread *td = curthread;	/* XXX */
2083
2084	GIANT_REQUIRED;
2085
2086	KASSERT(vp != NULL, ("vput: null vp"));
2087	VI_LOCK(vp);
2088	/* Skip this v_writecount check if we're going to panic below. */
2089	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2090	    ("vput: missed vn_close"));
2091
2092	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2093	    vp->v_usecount == 1)) {
2094		v_incr_usecount(vp, -1);
2095		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2096		return;
2097	}
2098
2099	if (vp->v_usecount == 1) {
2100		v_incr_usecount(vp, -1);
2101		/*
2102		 * We must call VOP_INACTIVE with the node locked, so
2103		 * we just need to release the vnode mutex. Mark as
2104		 * as VI_DOINGINACT to avoid recursion.
2105		 */
2106		vp->v_iflag |= VI_DOINGINACT;
2107		VI_UNLOCK(vp);
2108		VOP_INACTIVE(vp, td);
2109		VI_LOCK(vp);
2110		KASSERT(vp->v_iflag & VI_DOINGINACT,
2111		    ("vput: lost VI_DOINGINACT"));
2112		vp->v_iflag &= ~VI_DOINGINACT;
2113		if (VSHOULDFREE(vp))
2114			vfree(vp);
2115		else
2116			vlruvp(vp);
2117		VI_UNLOCK(vp);
2118
2119	} else {
2120#ifdef DIAGNOSTIC
2121		vprint("vput: negative ref count", vp);
2122#endif
2123		panic("vput: negative ref cnt");
2124	}
2125}
2126
2127/*
2128 * Somebody doesn't want the vnode recycled.
2129 */
2130void
2131vhold(struct vnode *vp)
2132{
2133
2134	VI_LOCK(vp);
2135	vholdl(vp);
2136	VI_UNLOCK(vp);
2137}
2138
2139void
2140vholdl(struct vnode *vp)
2141{
2142
2143	vp->v_holdcnt++;
2144	if (VSHOULDBUSY(vp))
2145		vbusy(vp);
2146}
2147
2148/*
2149 * Note that there is one less who cares about this vnode.  vdrop() is the
2150 * opposite of vhold().
2151 */
2152void
2153vdrop(struct vnode *vp)
2154{
2155
2156	VI_LOCK(vp);
2157	vdropl(vp);
2158	VI_UNLOCK(vp);
2159}
2160
2161void
2162vdropl(vp)
2163	register struct vnode *vp;
2164{
2165
2166	if (vp->v_holdcnt <= 0)
2167		panic("vdrop: holdcnt");
2168	vp->v_holdcnt--;
2169	if (VSHOULDFREE(vp))
2170		vfree(vp);
2171	else
2172		vlruvp(vp);
2173}
2174
2175/*
2176 * Remove any vnodes in the vnode table belonging to mount point mp.
2177 *
2178 * If FORCECLOSE is not specified, there should not be any active ones,
2179 * return error if any are found (nb: this is a user error, not a
2180 * system error). If FORCECLOSE is specified, detach any active vnodes
2181 * that are found.
2182 *
2183 * If WRITECLOSE is set, only flush out regular file vnodes open for
2184 * writing.
2185 *
2186 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2187 *
2188 * `rootrefs' specifies the base reference count for the root vnode
2189 * of this filesystem. The root vnode is considered busy if its
2190 * v_usecount exceeds this value. On a successful return, vflush(, td)
2191 * will call vrele() on the root vnode exactly rootrefs times.
2192 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2193 * be zero.
2194 */
2195#ifdef DIAGNOSTIC
2196static int busyprt = 0;		/* print out busy vnodes */
2197SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2198#endif
2199
2200int
2201vflush(mp, rootrefs, flags, td)
2202	struct mount *mp;
2203	int rootrefs;
2204	int flags;
2205	struct thread *td;
2206{
2207	struct vnode *vp, *nvp, *rootvp = NULL;
2208	struct vattr vattr;
2209	int busy = 0, error;
2210
2211	if (rootrefs > 0) {
2212		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2213		    ("vflush: bad args"));
2214		/*
2215		 * Get the filesystem root vnode. We can vput() it
2216		 * immediately, since with rootrefs > 0, it won't go away.
2217		 */
2218		if ((error = VFS_ROOT(mp, &rootvp, td)) != 0)
2219			return (error);
2220		vput(rootvp);
2221
2222	}
2223	MNT_ILOCK(mp);
2224loop:
2225	MNT_VNODE_FOREACH(vp, mp, nvp) {
2226
2227		VI_LOCK(vp);
2228		MNT_IUNLOCK(mp);
2229		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2230		if (error) {
2231			MNT_ILOCK(mp);
2232			goto loop;
2233		}
2234		/*
2235		 * Skip over a vnodes marked VV_SYSTEM.
2236		 */
2237		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2238			VOP_UNLOCK(vp, 0, td);
2239			MNT_ILOCK(mp);
2240			continue;
2241		}
2242		/*
2243		 * If WRITECLOSE is set, flush out unlinked but still open
2244		 * files (even if open only for reading) and regular file
2245		 * vnodes open for writing.
2246		 */
2247		if (flags & WRITECLOSE) {
2248			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2249			VI_LOCK(vp);
2250
2251			if ((vp->v_type == VNON ||
2252			    (error == 0 && vattr.va_nlink > 0)) &&
2253			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2254				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2255				MNT_ILOCK(mp);
2256				continue;
2257			}
2258		} else
2259			VI_LOCK(vp);
2260
2261		VOP_UNLOCK(vp, 0, td);
2262
2263		/*
2264		 * With v_usecount == 0, all we need to do is clear out the
2265		 * vnode data structures and we are done.
2266		 */
2267		if (vp->v_usecount == 0) {
2268			vgonel(vp, td);
2269			MNT_ILOCK(mp);
2270			continue;
2271		}
2272
2273		/*
2274		 * If FORCECLOSE is set, forcibly close the vnode. For block
2275		 * or character devices, revert to an anonymous device. For
2276		 * all other files, just kill them.
2277		 */
2278		if (flags & FORCECLOSE) {
2279			if (vp->v_type != VCHR)
2280				vgonel(vp, td);
2281			else
2282				vgonechrl(vp, td);
2283			MNT_ILOCK(mp);
2284			continue;
2285		}
2286#ifdef DIAGNOSTIC
2287		if (busyprt)
2288			vprint("vflush: busy vnode", vp);
2289#endif
2290		VI_UNLOCK(vp);
2291		MNT_ILOCK(mp);
2292		busy++;
2293	}
2294	MNT_IUNLOCK(mp);
2295	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2296		/*
2297		 * If just the root vnode is busy, and if its refcount
2298		 * is equal to `rootrefs', then go ahead and kill it.
2299		 */
2300		VI_LOCK(rootvp);
2301		KASSERT(busy > 0, ("vflush: not busy"));
2302		KASSERT(rootvp->v_usecount >= rootrefs,
2303		    ("vflush: usecount %d < rootrefs %d",
2304		     rootvp->v_usecount, rootrefs));
2305		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2306			vgonel(rootvp, td);
2307			busy = 0;
2308		} else
2309			VI_UNLOCK(rootvp);
2310	}
2311	if (busy)
2312		return (EBUSY);
2313	for (; rootrefs > 0; rootrefs--)
2314		vrele(rootvp);
2315	return (0);
2316}
2317
2318/*
2319 * This moves a now (likely recyclable) vnode to the end of the
2320 * mountlist.  XXX However, it is temporarily disabled until we
2321 * can clean up ffs_sync() and friends, which have loop restart
2322 * conditions which this code causes to operate O(N^2).
2323 */
2324static void
2325vlruvp(struct vnode *vp)
2326{
2327#if 0
2328	struct mount *mp;
2329
2330	if ((mp = vp->v_mount) != NULL) {
2331		MNT_ILOCK(mp);
2332		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2333		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2334		MNT_IUNLOCK(mp);
2335	}
2336#endif
2337}
2338
2339static void
2340vx_lock(struct vnode *vp)
2341{
2342
2343	ASSERT_VI_LOCKED(vp, "vx_lock");
2344
2345	/*
2346	 * Prevent the vnode from being recycled or brought into use while we
2347	 * clean it out.
2348	 */
2349	if (vp->v_iflag & VI_XLOCK)
2350		panic("vclean: deadlock");
2351	vp->v_iflag |= VI_XLOCK;
2352	vp->v_vxthread = curthread;
2353}
2354
2355static void
2356vx_unlock(struct vnode *vp)
2357{
2358	ASSERT_VI_LOCKED(vp, "vx_unlock");
2359	vp->v_iflag &= ~VI_XLOCK;
2360	vp->v_vxthread = NULL;
2361	if (vp->v_iflag & VI_XWANT) {
2362		vp->v_iflag &= ~VI_XWANT;
2363		wakeup(vp);
2364	}
2365}
2366
2367/*
2368 * Disassociate the underlying filesystem from a vnode.
2369 */
2370static void
2371vclean(vp, flags, td)
2372	struct vnode *vp;
2373	int flags;
2374	struct thread *td;
2375{
2376	int active;
2377
2378	ASSERT_VI_LOCKED(vp, "vclean");
2379	/*
2380	 * Check to see if the vnode is in use. If so we have to reference it
2381	 * before we clean it out so that its count cannot fall to zero and
2382	 * generate a race against ourselves to recycle it.
2383	 */
2384	if ((active = vp->v_usecount))
2385		v_incr_usecount(vp, 1);
2386
2387	/*
2388	 * Even if the count is zero, the VOP_INACTIVE routine may still
2389	 * have the object locked while it cleans it out. The VOP_LOCK
2390	 * ensures that the VOP_INACTIVE routine is done with its work.
2391	 * For active vnodes, it ensures that no other activity can
2392	 * occur while the underlying object is being cleaned out.
2393	 */
2394	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2395
2396	/*
2397	 * Clean out any buffers associated with the vnode.
2398	 * If the flush fails, just toss the buffers.
2399	 */
2400	if (flags & DOCLOSE) {
2401		struct buf *bp;
2402		bp = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd);
2403		if (bp != NULL)
2404			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2405		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2406			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2407	}
2408
2409	VOP_DESTROYVOBJECT(vp);
2410
2411	/*
2412	 * Any other processes trying to obtain this lock must first
2413	 * wait for VXLOCK to clear, then call the new lock operation.
2414	 */
2415	VOP_UNLOCK(vp, 0, td);
2416
2417	/*
2418	 * If purging an active vnode, it must be closed and
2419	 * deactivated before being reclaimed. Note that the
2420	 * VOP_INACTIVE will unlock the vnode.
2421	 */
2422	if (active) {
2423		if (flags & DOCLOSE)
2424			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2425		VI_LOCK(vp);
2426		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2427			vp->v_iflag |= VI_DOINGINACT;
2428			VI_UNLOCK(vp);
2429			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2430				panic("vclean: cannot relock.");
2431			VOP_INACTIVE(vp, td);
2432			VI_LOCK(vp);
2433			KASSERT(vp->v_iflag & VI_DOINGINACT,
2434			    ("vclean: lost VI_DOINGINACT"));
2435			vp->v_iflag &= ~VI_DOINGINACT;
2436		}
2437		VI_UNLOCK(vp);
2438	}
2439	/*
2440	 * Reclaim the vnode.
2441	 */
2442	if (VOP_RECLAIM(vp, td))
2443		panic("vclean: cannot reclaim");
2444
2445	if (active) {
2446		/*
2447		 * Inline copy of vrele() since VOP_INACTIVE
2448		 * has already been called.
2449		 */
2450		VI_LOCK(vp);
2451		v_incr_usecount(vp, -1);
2452		if (vp->v_usecount <= 0) {
2453#ifdef INVARIANTS
2454			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2455				vprint("vclean: bad ref count", vp);
2456				panic("vclean: ref cnt");
2457			}
2458#endif
2459			if (VSHOULDFREE(vp))
2460				vfree(vp);
2461		}
2462		VI_UNLOCK(vp);
2463	}
2464	/*
2465	 * Delete from old mount point vnode list.
2466	 */
2467	delmntque(vp);
2468	cache_purge(vp);
2469	VI_LOCK(vp);
2470	if (VSHOULDFREE(vp))
2471		vfree(vp);
2472
2473	/*
2474	 * Done with purge, reset to the standard lock and
2475	 * notify sleepers of the grim news.
2476	 */
2477	vp->v_vnlock = &vp->v_lock;
2478	vp->v_op = dead_vnodeop_p;
2479	if (vp->v_pollinfo != NULL)
2480		vn_pollgone(vp);
2481	vp->v_tag = "none";
2482}
2483
2484/*
2485 * Eliminate all activity associated with the requested vnode
2486 * and with all vnodes aliased to the requested vnode.
2487 */
2488int
2489vop_revoke(ap)
2490	struct vop_revoke_args /* {
2491		struct vnode *a_vp;
2492		int a_flags;
2493	} */ *ap;
2494{
2495	struct vnode *vp, *vq;
2496	struct cdev *dev;
2497
2498	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2499	vp = ap->a_vp;
2500	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2501
2502	VI_LOCK(vp);
2503	/*
2504	 * If a vgone (or vclean) is already in progress,
2505	 * wait until it is done and return.
2506	 */
2507	if (vp->v_iflag & VI_XLOCK) {
2508		vp->v_iflag |= VI_XWANT;
2509		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2510		    "vop_revokeall", 0);
2511		return (0);
2512	}
2513	VI_UNLOCK(vp);
2514	dev = vp->v_rdev;
2515	for (;;) {
2516		dev_lock();
2517		vq = SLIST_FIRST(&dev->si_hlist);
2518		dev_unlock();
2519		if (vq == NULL)
2520			break;
2521		vgone(vq);
2522	}
2523	return (0);
2524}
2525
2526/*
2527 * Recycle an unused vnode to the front of the free list.
2528 * Release the passed interlock if the vnode will be recycled.
2529 */
2530int
2531vrecycle(vp, inter_lkp, td)
2532	struct vnode *vp;
2533	struct mtx *inter_lkp;
2534	struct thread *td;
2535{
2536
2537	VI_LOCK(vp);
2538	if (vp->v_usecount == 0) {
2539		if (inter_lkp) {
2540			mtx_unlock(inter_lkp);
2541		}
2542		vgonel(vp, td);
2543		return (1);
2544	}
2545	VI_UNLOCK(vp);
2546	return (0);
2547}
2548
2549/*
2550 * Eliminate all activity associated with a vnode
2551 * in preparation for reuse.
2552 */
2553void
2554vgone(vp)
2555	register struct vnode *vp;
2556{
2557	struct thread *td = curthread;	/* XXX */
2558
2559	VI_LOCK(vp);
2560	vgonel(vp, td);
2561}
2562
2563/*
2564 * Disassociate a character device from the its underlying filesystem and
2565 * attach it to spec.  This is for use when the chr device is still active
2566 * and the filesystem is going away.
2567 */
2568static void
2569vgonechrl(struct vnode *vp, struct thread *td)
2570{
2571	ASSERT_VI_LOCKED(vp, "vgonechrl");
2572	vx_lock(vp);
2573	/*
2574	 * This is a custom version of vclean() which does not tearm down
2575	 * the bufs or vm objects held by this vnode.  This allows filesystems
2576	 * to continue using devices which were discovered via another
2577	 * filesystem that has been unmounted.
2578	 */
2579	if (vp->v_usecount != 0) {
2580		v_incr_usecount(vp, 1);
2581		/*
2582		 * Ensure that no other activity can occur while the
2583		 * underlying object is being cleaned out.
2584		 */
2585		VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2586		/*
2587		 * Any other processes trying to obtain this lock must first
2588		 * wait for VXLOCK to clear, then call the new lock operation.
2589		 */
2590		VOP_UNLOCK(vp, 0, td);
2591		vp->v_vnlock = &vp->v_lock;
2592		vp->v_tag = "orphanchr";
2593		vp->v_op = devfs_specop_p;
2594		delmntque(vp);
2595		cache_purge(vp);
2596		vrele(vp);
2597		VI_LOCK(vp);
2598	} else
2599		vclean(vp, 0, td);
2600	vp->v_op = devfs_specop_p;
2601	vx_unlock(vp);
2602	VI_UNLOCK(vp);
2603}
2604
2605/*
2606 * vgone, with the vp interlock held.
2607 */
2608void
2609vgonel(vp, td)
2610	struct vnode *vp;
2611	struct thread *td;
2612{
2613	/*
2614	 * If a vgone (or vclean) is already in progress,
2615	 * wait until it is done and return.
2616	 */
2617	ASSERT_VI_LOCKED(vp, "vgonel");
2618	if (vp->v_iflag & VI_XLOCK) {
2619		vp->v_iflag |= VI_XWANT;
2620		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2621		return;
2622	}
2623	vx_lock(vp);
2624
2625	/*
2626	 * Clean out the filesystem specific data.
2627	 */
2628	vclean(vp, DOCLOSE, td);
2629	VI_UNLOCK(vp);
2630
2631	/*
2632	 * If special device, remove it from special device alias list
2633	 * if it is on one.
2634	 */
2635	VI_LOCK(vp);
2636	if (vp->v_type == VCHR && vp->v_rdev != NULL)
2637		dev_rel(vp);
2638
2639	/*
2640	 * If it is on the freelist and not already at the head,
2641	 * move it to the head of the list. The test of the
2642	 * VDOOMED flag and the reference count of zero is because
2643	 * it will be removed from the free list by getnewvnode,
2644	 * but will not have its reference count incremented until
2645	 * after calling vgone. If the reference count were
2646	 * incremented first, vgone would (incorrectly) try to
2647	 * close the previous instance of the underlying object.
2648	 */
2649	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2650		mtx_lock(&vnode_free_list_mtx);
2651		if (vp->v_iflag & VI_FREE) {
2652			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2653		} else {
2654			vp->v_iflag |= VI_FREE;
2655			freevnodes++;
2656		}
2657		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2658		mtx_unlock(&vnode_free_list_mtx);
2659	}
2660
2661	vp->v_type = VBAD;
2662	vx_unlock(vp);
2663	VI_UNLOCK(vp);
2664}
2665
2666/*
2667 * Lookup a vnode by device number.
2668 */
2669int
2670vfinddev(dev, vpp)
2671	struct cdev *dev;
2672	struct vnode **vpp;
2673{
2674	struct vnode *vp;
2675
2676	dev_lock();
2677	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2678		*vpp = vp;
2679		dev_unlock();
2680		return (1);
2681	}
2682	dev_unlock();
2683	return (0);
2684}
2685
2686/*
2687 * Calculate the total number of references to a special device.
2688 */
2689int
2690vcount(vp)
2691	struct vnode *vp;
2692{
2693	int count;
2694
2695	dev_lock();
2696	count = vp->v_rdev->si_usecount;
2697	dev_unlock();
2698	return (count);
2699}
2700
2701/*
2702 * Same as above, but using the struct cdev *as argument
2703 */
2704int
2705count_dev(dev)
2706	struct cdev *dev;
2707{
2708	int count;
2709
2710	dev_lock();
2711	count = dev->si_usecount;
2712	dev_unlock();
2713	return(count);
2714}
2715
2716/*
2717 * Print out a description of a vnode.
2718 */
2719static char *typename[] =
2720{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2721
2722void
2723vprint(label, vp)
2724	char *label;
2725	struct vnode *vp;
2726{
2727	char buf[96];
2728
2729	if (label != NULL)
2730		printf("%s: %p: ", label, (void *)vp);
2731	else
2732		printf("%p: ", (void *)vp);
2733	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2734	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2735	    vp->v_writecount, vp->v_holdcnt);
2736	buf[0] = '\0';
2737	if (vp->v_vflag & VV_ROOT)
2738		strcat(buf, "|VV_ROOT");
2739	if (vp->v_vflag & VV_TEXT)
2740		strcat(buf, "|VV_TEXT");
2741	if (vp->v_vflag & VV_SYSTEM)
2742		strcat(buf, "|VV_SYSTEM");
2743	if (vp->v_iflag & VI_XLOCK)
2744		strcat(buf, "|VI_XLOCK");
2745	if (vp->v_iflag & VI_XWANT)
2746		strcat(buf, "|VI_XWANT");
2747	if (vp->v_iflag & VI_DOOMED)
2748		strcat(buf, "|VI_DOOMED");
2749	if (vp->v_iflag & VI_FREE)
2750		strcat(buf, "|VI_FREE");
2751	if (vp->v_vflag & VV_OBJBUF)
2752		strcat(buf, "|VV_OBJBUF");
2753	if (buf[0] != '\0')
2754		printf(" flags (%s),", &buf[1]);
2755	lockmgr_printinfo(vp->v_vnlock);
2756	printf("\n");
2757	if (vp->v_data != NULL)
2758		VOP_PRINT(vp);
2759}
2760
2761#ifdef DDB
2762#include <ddb/ddb.h>
2763/*
2764 * List all of the locked vnodes in the system.
2765 * Called when debugging the kernel.
2766 */
2767DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2768{
2769	struct mount *mp, *nmp;
2770	struct vnode *vp;
2771
2772	/*
2773	 * Note: because this is DDB, we can't obey the locking semantics
2774	 * for these structures, which means we could catch an inconsistent
2775	 * state and dereference a nasty pointer.  Not much to be done
2776	 * about that.
2777	 */
2778	printf("Locked vnodes\n");
2779	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2780		nmp = TAILQ_NEXT(mp, mnt_list);
2781		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2782			if (VOP_ISLOCKED(vp, NULL))
2783				vprint(NULL, vp);
2784		}
2785		nmp = TAILQ_NEXT(mp, mnt_list);
2786	}
2787}
2788#endif
2789
2790/*
2791 * Fill in a struct xvfsconf based on a struct vfsconf.
2792 */
2793static void
2794vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2795{
2796
2797	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2798	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2799	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2800	xvfsp->vfc_flags = vfsp->vfc_flags;
2801	/*
2802	 * These are unused in userland, we keep them
2803	 * to not break binary compatibility.
2804	 */
2805	xvfsp->vfc_vfsops = NULL;
2806	xvfsp->vfc_next = NULL;
2807}
2808
2809/*
2810 * Top level filesystem related information gathering.
2811 */
2812static int
2813sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2814{
2815	struct vfsconf *vfsp;
2816	struct xvfsconf xvfsp;
2817	int error;
2818
2819	error = 0;
2820	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2821		vfsconf2x(vfsp, &xvfsp);
2822		error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2823		if (error)
2824			break;
2825	}
2826	return (error);
2827}
2828
2829SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2830    "S,xvfsconf", "List of all configured filesystems");
2831
2832#ifndef BURN_BRIDGES
2833static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2834
2835static int
2836vfs_sysctl(SYSCTL_HANDLER_ARGS)
2837{
2838	int *name = (int *)arg1 - 1;	/* XXX */
2839	u_int namelen = arg2 + 1;	/* XXX */
2840	struct vfsconf *vfsp;
2841	struct xvfsconf xvfsp;
2842
2843	printf("WARNING: userland calling deprecated sysctl, "
2844	    "please rebuild world\n");
2845
2846#if 1 || defined(COMPAT_PRELITE2)
2847	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2848	if (namelen == 1)
2849		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2850#endif
2851
2852	switch (name[1]) {
2853	case VFS_MAXTYPENUM:
2854		if (namelen != 2)
2855			return (ENOTDIR);
2856		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2857	case VFS_CONF:
2858		if (namelen != 3)
2859			return (ENOTDIR);	/* overloaded */
2860		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2861			if (vfsp->vfc_typenum == name[2])
2862				break;
2863		if (vfsp == NULL)
2864			return (EOPNOTSUPP);
2865		vfsconf2x(vfsp, &xvfsp);
2866		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2867	}
2868	return (EOPNOTSUPP);
2869}
2870
2871SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2872	"Generic filesystem");
2873
2874#if 1 || defined(COMPAT_PRELITE2)
2875
2876static int
2877sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2878{
2879	int error;
2880	struct vfsconf *vfsp;
2881	struct ovfsconf ovfs;
2882
2883	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2884		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2885		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2886		ovfs.vfc_index = vfsp->vfc_typenum;
2887		ovfs.vfc_refcount = vfsp->vfc_refcount;
2888		ovfs.vfc_flags = vfsp->vfc_flags;
2889		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2890		if (error)
2891			return error;
2892	}
2893	return 0;
2894}
2895
2896#endif /* 1 || COMPAT_PRELITE2 */
2897#endif /* !BURN_BRIDGES */
2898
2899#define KINFO_VNODESLOP		10
2900#ifdef notyet
2901/*
2902 * Dump vnode list (via sysctl).
2903 */
2904/* ARGSUSED */
2905static int
2906sysctl_vnode(SYSCTL_HANDLER_ARGS)
2907{
2908	struct xvnode *xvn;
2909	struct thread *td = req->td;
2910	struct mount *mp;
2911	struct vnode *vp;
2912	int error, len, n;
2913
2914	/*
2915	 * Stale numvnodes access is not fatal here.
2916	 */
2917	req->lock = 0;
2918	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2919	if (!req->oldptr)
2920		/* Make an estimate */
2921		return (SYSCTL_OUT(req, 0, len));
2922
2923	error = sysctl_wire_old_buffer(req, 0);
2924	if (error != 0)
2925		return (error);
2926	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
2927	n = 0;
2928	mtx_lock(&mountlist_mtx);
2929	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2930		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2931			continue;
2932		MNT_ILOCK(mp);
2933		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2934			if (n == len)
2935				break;
2936			vref(vp);
2937			xvn[n].xv_size = sizeof *xvn;
2938			xvn[n].xv_vnode = vp;
2939#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
2940			XV_COPY(usecount);
2941			XV_COPY(writecount);
2942			XV_COPY(holdcnt);
2943			XV_COPY(id);
2944			XV_COPY(mount);
2945			XV_COPY(numoutput);
2946			XV_COPY(type);
2947#undef XV_COPY
2948			xvn[n].xv_flag = vp->v_vflag;
2949
2950			switch (vp->v_type) {
2951			case VREG:
2952			case VDIR:
2953			case VLNK:
2954				xvn[n].xv_ino = vp->v_cachedid;
2955				break;
2956			case VBLK:
2957			case VCHR:
2958				if (vp->v_rdev == NULL) {
2959					vrele(vp);
2960					continue;
2961				}
2962				xvn[n].xv_dev = dev2udev(vp->v_rdev);
2963				break;
2964			case VSOCK:
2965				xvn[n].xv_socket = vp->v_socket;
2966				break;
2967			case VFIFO:
2968				xvn[n].xv_fifo = vp->v_fifoinfo;
2969				break;
2970			case VNON:
2971			case VBAD:
2972			default:
2973				/* shouldn't happen? */
2974				vrele(vp);
2975				continue;
2976			}
2977			vrele(vp);
2978			++n;
2979		}
2980		MNT_IUNLOCK(mp);
2981		mtx_lock(&mountlist_mtx);
2982		vfs_unbusy(mp, td);
2983		if (n == len)
2984			break;
2985	}
2986	mtx_unlock(&mountlist_mtx);
2987
2988	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
2989	free(xvn, M_TEMP);
2990	return (error);
2991}
2992
2993SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2994	0, 0, sysctl_vnode, "S,xvnode", "");
2995#endif
2996
2997/*
2998 * Unmount all filesystems. The list is traversed in reverse order
2999 * of mounting to avoid dependencies.
3000 */
3001void
3002vfs_unmountall()
3003{
3004	struct mount *mp;
3005	struct thread *td;
3006	int error;
3007
3008	if (curthread != NULL)
3009		td = curthread;
3010	else
3011		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3012	/*
3013	 * Since this only runs when rebooting, it is not interlocked.
3014	 */
3015	while(!TAILQ_EMPTY(&mountlist)) {
3016		mp = TAILQ_LAST(&mountlist, mntlist);
3017		error = dounmount(mp, MNT_FORCE, td);
3018		if (error) {
3019			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3020			printf("unmount of %s failed (",
3021			    mp->mnt_stat.f_mntonname);
3022			if (error == EBUSY)
3023				printf("BUSY)\n");
3024			else
3025				printf("%d)\n", error);
3026		} else {
3027			/* The unmount has removed mp from the mountlist */
3028		}
3029	}
3030}
3031
3032/*
3033 * perform msync on all vnodes under a mount point
3034 * the mount point must be locked.
3035 */
3036void
3037vfs_msync(struct mount *mp, int flags)
3038{
3039	struct vnode *vp, *nvp;
3040	struct vm_object *obj;
3041	int tries;
3042
3043	GIANT_REQUIRED;
3044
3045	tries = 5;
3046	MNT_ILOCK(mp);
3047loop:
3048	TAILQ_FOREACH_SAFE(vp, &mp->mnt_nvnodelist, v_nmntvnodes, nvp) {
3049		if (vp->v_mount != mp) {
3050			if (--tries > 0)
3051				goto loop;
3052			break;
3053		}
3054
3055		VI_LOCK(vp);
3056		if (vp->v_iflag & VI_XLOCK) {
3057			VI_UNLOCK(vp);
3058			continue;
3059		}
3060
3061		if ((vp->v_iflag & VI_OBJDIRTY) &&
3062		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3063			MNT_IUNLOCK(mp);
3064			if (!vget(vp,
3065			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3066			    curthread)) {
3067				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3068					vput(vp);
3069					MNT_ILOCK(mp);
3070					continue;
3071				}
3072
3073				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3074					VM_OBJECT_LOCK(obj);
3075					vm_object_page_clean(obj, 0, 0,
3076					    flags == MNT_WAIT ?
3077					    OBJPC_SYNC : OBJPC_NOSYNC);
3078					VM_OBJECT_UNLOCK(obj);
3079				}
3080				vput(vp);
3081			}
3082			MNT_ILOCK(mp);
3083			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3084				if (--tries > 0)
3085					goto loop;
3086				break;
3087			}
3088		} else
3089			VI_UNLOCK(vp);
3090	}
3091	MNT_IUNLOCK(mp);
3092}
3093
3094/*
3095 * Create the VM object needed for VMIO and mmap support.  This
3096 * is done for all VREG files in the system.  Some filesystems might
3097 * afford the additional metadata buffering capability of the
3098 * VMIO code by making the device node be VMIO mode also.
3099 *
3100 * vp must be locked when vfs_object_create is called.
3101 */
3102int
3103vfs_object_create(struct vnode *vp, struct thread *td, struct ucred *cred)
3104{
3105
3106	GIANT_REQUIRED;
3107	return (VOP_CREATEVOBJECT(vp, cred, td));
3108}
3109
3110/*
3111 * Mark a vnode as free, putting it up for recycling.
3112 */
3113void
3114vfree(struct vnode *vp)
3115{
3116
3117	ASSERT_VI_LOCKED(vp, "vfree");
3118	mtx_lock(&vnode_free_list_mtx);
3119	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3120	if (vp->v_iflag & VI_AGE) {
3121		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3122	} else {
3123		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3124	}
3125	freevnodes++;
3126	mtx_unlock(&vnode_free_list_mtx);
3127	vp->v_iflag &= ~VI_AGE;
3128	vp->v_iflag |= VI_FREE;
3129}
3130
3131/*
3132 * Opposite of vfree() - mark a vnode as in use.
3133 */
3134void
3135vbusy(struct vnode *vp)
3136{
3137
3138	ASSERT_VI_LOCKED(vp, "vbusy");
3139	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3140
3141	mtx_lock(&vnode_free_list_mtx);
3142	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3143	freevnodes--;
3144	mtx_unlock(&vnode_free_list_mtx);
3145
3146	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3147}
3148
3149/*
3150 * Initalize per-vnode helper structure to hold poll-related state.
3151 */
3152void
3153v_addpollinfo(struct vnode *vp)
3154{
3155	struct vpollinfo *vi;
3156
3157	vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3158	if (vp->v_pollinfo != NULL) {
3159		uma_zfree(vnodepoll_zone, vi);
3160		return;
3161	}
3162	vp->v_pollinfo = vi;
3163	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3164	knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note,
3165	    &vp->v_pollinfo->vpi_lock);
3166}
3167
3168/*
3169 * Record a process's interest in events which might happen to
3170 * a vnode.  Because poll uses the historic select-style interface
3171 * internally, this routine serves as both the ``check for any
3172 * pending events'' and the ``record my interest in future events''
3173 * functions.  (These are done together, while the lock is held,
3174 * to avoid race conditions.)
3175 */
3176int
3177vn_pollrecord(vp, td, events)
3178	struct vnode *vp;
3179	struct thread *td;
3180	short events;
3181{
3182
3183	if (vp->v_pollinfo == NULL)
3184		v_addpollinfo(vp);
3185	mtx_lock(&vp->v_pollinfo->vpi_lock);
3186	if (vp->v_pollinfo->vpi_revents & events) {
3187		/*
3188		 * This leaves events we are not interested
3189		 * in available for the other process which
3190		 * which presumably had requested them
3191		 * (otherwise they would never have been
3192		 * recorded).
3193		 */
3194		events &= vp->v_pollinfo->vpi_revents;
3195		vp->v_pollinfo->vpi_revents &= ~events;
3196
3197		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3198		return events;
3199	}
3200	vp->v_pollinfo->vpi_events |= events;
3201	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3202	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3203	return 0;
3204}
3205
3206/*
3207 * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3208 * it is possible for us to miss an event due to race conditions, but
3209 * that condition is expected to be rare, so for the moment it is the
3210 * preferred interface.
3211 */
3212void
3213vn_pollevent(vp, events)
3214	struct vnode *vp;
3215	short events;
3216{
3217
3218	if (vp->v_pollinfo == NULL)
3219		v_addpollinfo(vp);
3220	mtx_lock(&vp->v_pollinfo->vpi_lock);
3221	if (vp->v_pollinfo->vpi_events & events) {
3222		/*
3223		 * We clear vpi_events so that we don't
3224		 * call selwakeup() twice if two events are
3225		 * posted before the polling process(es) is
3226		 * awakened.  This also ensures that we take at
3227		 * most one selwakeup() if the polling process
3228		 * is no longer interested.  However, it does
3229		 * mean that only one event can be noticed at
3230		 * a time.  (Perhaps we should only clear those
3231		 * event bits which we note?) XXX
3232		 */
3233		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3234		vp->v_pollinfo->vpi_revents |= events;
3235		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3236	}
3237	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3238}
3239
3240/*
3241 * Wake up anyone polling on vp because it is being revoked.
3242 * This depends on dead_poll() returning POLLHUP for correct
3243 * behavior.
3244 */
3245void
3246vn_pollgone(vp)
3247	struct vnode *vp;
3248{
3249
3250	mtx_lock(&vp->v_pollinfo->vpi_lock);
3251	VN_KNOTE_LOCKED(vp, NOTE_REVOKE);
3252	if (vp->v_pollinfo->vpi_events) {
3253		vp->v_pollinfo->vpi_events = 0;
3254		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3255	}
3256	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3257}
3258
3259
3260
3261/*
3262 * Routine to create and manage a filesystem syncer vnode.
3263 */
3264#define sync_close ((int (*)(struct  vop_close_args *))nullop)
3265static int	sync_fsync(struct  vop_fsync_args *);
3266static int	sync_inactive(struct  vop_inactive_args *);
3267static int	sync_reclaim(struct  vop_reclaim_args *);
3268
3269static vop_t **sync_vnodeop_p;
3270static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3271	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3272	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3273	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3274	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3275	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3276	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3277	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3278	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3279	{ NULL, NULL }
3280};
3281static struct vnodeopv_desc sync_vnodeop_opv_desc =
3282	{ &sync_vnodeop_p, sync_vnodeop_entries };
3283
3284VNODEOP_SET(sync_vnodeop_opv_desc);
3285
3286/*
3287 * Create a new filesystem syncer vnode for the specified mount point.
3288 */
3289int
3290vfs_allocate_syncvnode(mp)
3291	struct mount *mp;
3292{
3293	struct vnode *vp;
3294	static long start, incr, next;
3295	int error;
3296
3297	/* Allocate a new vnode */
3298	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3299		mp->mnt_syncer = NULL;
3300		return (error);
3301	}
3302	vp->v_type = VNON;
3303	/*
3304	 * Place the vnode onto the syncer worklist. We attempt to
3305	 * scatter them about on the list so that they will go off
3306	 * at evenly distributed times even if all the filesystems
3307	 * are mounted at once.
3308	 */
3309	next += incr;
3310	if (next == 0 || next > syncer_maxdelay) {
3311		start /= 2;
3312		incr /= 2;
3313		if (start == 0) {
3314			start = syncer_maxdelay / 2;
3315			incr = syncer_maxdelay;
3316		}
3317		next = start;
3318	}
3319	VI_LOCK(vp);
3320	vn_syncer_add_to_worklist(&vp->v_bufobj,
3321	    syncdelay > 0 ? next % syncdelay : 0);
3322	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3323	mtx_lock(&sync_mtx);
3324	sync_vnode_count++;
3325	mtx_unlock(&sync_mtx);
3326	VI_UNLOCK(vp);
3327	mp->mnt_syncer = vp;
3328	return (0);
3329}
3330
3331/*
3332 * Do a lazy sync of the filesystem.
3333 */
3334static int
3335sync_fsync(ap)
3336	struct vop_fsync_args /* {
3337		struct vnode *a_vp;
3338		struct ucred *a_cred;
3339		int a_waitfor;
3340		struct thread *a_td;
3341	} */ *ap;
3342{
3343	struct vnode *syncvp = ap->a_vp;
3344	struct mount *mp = syncvp->v_mount;
3345	struct thread *td = ap->a_td;
3346	int error, asyncflag;
3347	struct bufobj *bo;
3348
3349	/*
3350	 * We only need to do something if this is a lazy evaluation.
3351	 */
3352	if (ap->a_waitfor != MNT_LAZY)
3353		return (0);
3354
3355	/*
3356	 * Move ourselves to the back of the sync list.
3357	 */
3358	bo = &syncvp->v_bufobj;
3359	BO_LOCK(bo);
3360	vn_syncer_add_to_worklist(bo, syncdelay);
3361	BO_UNLOCK(bo);
3362
3363	/*
3364	 * Walk the list of vnodes pushing all that are dirty and
3365	 * not already on the sync list.
3366	 */
3367	mtx_lock(&mountlist_mtx);
3368	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3369		mtx_unlock(&mountlist_mtx);
3370		return (0);
3371	}
3372	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3373		vfs_unbusy(mp, td);
3374		return (0);
3375	}
3376	asyncflag = mp->mnt_flag & MNT_ASYNC;
3377	mp->mnt_flag &= ~MNT_ASYNC;
3378	vfs_msync(mp, MNT_NOWAIT);
3379	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3380	if (asyncflag)
3381		mp->mnt_flag |= MNT_ASYNC;
3382	vn_finished_write(mp);
3383	vfs_unbusy(mp, td);
3384	return (error);
3385}
3386
3387/*
3388 * The syncer vnode is no referenced.
3389 */
3390static int
3391sync_inactive(ap)
3392	struct vop_inactive_args /* {
3393		struct vnode *a_vp;
3394		struct thread *a_td;
3395	} */ *ap;
3396{
3397
3398	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3399	vgone(ap->a_vp);
3400	return (0);
3401}
3402
3403/*
3404 * The syncer vnode is no longer needed and is being decommissioned.
3405 *
3406 * Modifications to the worklist must be protected by sync_mtx.
3407 */
3408static int
3409sync_reclaim(ap)
3410	struct vop_reclaim_args /* {
3411		struct vnode *a_vp;
3412	} */ *ap;
3413{
3414	struct vnode *vp = ap->a_vp;
3415	struct bufobj *bo;
3416
3417	VI_LOCK(vp);
3418	bo = &vp->v_bufobj;
3419	vp->v_mount->mnt_syncer = NULL;
3420	if (bo->bo_flag & BO_ONWORKLST) {
3421		mtx_lock(&sync_mtx);
3422		LIST_REMOVE(bo, bo_synclist);
3423 		syncer_worklist_len--;
3424		sync_vnode_count--;
3425		mtx_unlock(&sync_mtx);
3426		bo->bo_flag &= ~BO_ONWORKLST;
3427	}
3428	VI_UNLOCK(vp);
3429
3430	return (0);
3431}
3432
3433/*
3434 * Check if vnode represents a disk device
3435 */
3436int
3437vn_isdisk(vp, errp)
3438	struct vnode *vp;
3439	int *errp;
3440{
3441	int error;
3442
3443	error = 0;
3444	dev_lock();
3445	if (vp->v_type != VCHR)
3446		error = ENOTBLK;
3447	else if (vp->v_rdev == NULL)
3448		error = ENXIO;
3449	else if (vp->v_rdev->si_devsw == NULL)
3450		error = ENXIO;
3451	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3452		error = ENOTBLK;
3453	dev_unlock();
3454	if (errp != NULL)
3455		*errp = error;
3456	return (error == 0);
3457}
3458
3459/*
3460 * Free data allocated by namei(); see namei(9) for details.
3461 */
3462void
3463NDFREE(ndp, flags)
3464     struct nameidata *ndp;
3465     const u_int flags;
3466{
3467
3468	if (!(flags & NDF_NO_FREE_PNBUF) &&
3469	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3470		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3471		ndp->ni_cnd.cn_flags &= ~HASBUF;
3472	}
3473	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3474	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3475	    ndp->ni_dvp != ndp->ni_vp)
3476		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3477	if (!(flags & NDF_NO_DVP_RELE) &&
3478	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3479		vrele(ndp->ni_dvp);
3480		ndp->ni_dvp = NULL;
3481	}
3482	if (!(flags & NDF_NO_VP_UNLOCK) &&
3483	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3484		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3485	if (!(flags & NDF_NO_VP_RELE) &&
3486	    ndp->ni_vp) {
3487		vrele(ndp->ni_vp);
3488		ndp->ni_vp = NULL;
3489	}
3490	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3491	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3492		vrele(ndp->ni_startdir);
3493		ndp->ni_startdir = NULL;
3494	}
3495}
3496
3497/*
3498 * Common filesystem object access control check routine.  Accepts a
3499 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3500 * and optional call-by-reference privused argument allowing vaccess()
3501 * to indicate to the caller whether privilege was used to satisfy the
3502 * request (obsoleted).  Returns 0 on success, or an errno on failure.
3503 */
3504int
3505vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3506	enum vtype type;
3507	mode_t file_mode;
3508	uid_t file_uid;
3509	gid_t file_gid;
3510	mode_t acc_mode;
3511	struct ucred *cred;
3512	int *privused;
3513{
3514	mode_t dac_granted;
3515#ifdef CAPABILITIES
3516	mode_t cap_granted;
3517#endif
3518
3519	/*
3520	 * Look for a normal, non-privileged way to access the file/directory
3521	 * as requested.  If it exists, go with that.
3522	 */
3523
3524	if (privused != NULL)
3525		*privused = 0;
3526
3527	dac_granted = 0;
3528
3529	/* Check the owner. */
3530	if (cred->cr_uid == file_uid) {
3531		dac_granted |= VADMIN;
3532		if (file_mode & S_IXUSR)
3533			dac_granted |= VEXEC;
3534		if (file_mode & S_IRUSR)
3535			dac_granted |= VREAD;
3536		if (file_mode & S_IWUSR)
3537			dac_granted |= (VWRITE | VAPPEND);
3538
3539		if ((acc_mode & dac_granted) == acc_mode)
3540			return (0);
3541
3542		goto privcheck;
3543	}
3544
3545	/* Otherwise, check the groups (first match) */
3546	if (groupmember(file_gid, cred)) {
3547		if (file_mode & S_IXGRP)
3548			dac_granted |= VEXEC;
3549		if (file_mode & S_IRGRP)
3550			dac_granted |= VREAD;
3551		if (file_mode & S_IWGRP)
3552			dac_granted |= (VWRITE | VAPPEND);
3553
3554		if ((acc_mode & dac_granted) == acc_mode)
3555			return (0);
3556
3557		goto privcheck;
3558	}
3559
3560	/* Otherwise, check everyone else. */
3561	if (file_mode & S_IXOTH)
3562		dac_granted |= VEXEC;
3563	if (file_mode & S_IROTH)
3564		dac_granted |= VREAD;
3565	if (file_mode & S_IWOTH)
3566		dac_granted |= (VWRITE | VAPPEND);
3567	if ((acc_mode & dac_granted) == acc_mode)
3568		return (0);
3569
3570privcheck:
3571	if (!suser_cred(cred, SUSER_ALLOWJAIL)) {
3572		/* XXX audit: privilege used */
3573		if (privused != NULL)
3574			*privused = 1;
3575		return (0);
3576	}
3577
3578#ifdef CAPABILITIES
3579	/*
3580	 * Build a capability mask to determine if the set of capabilities
3581	 * satisfies the requirements when combined with the granted mask
3582	 * from above.
3583	 * For each capability, if the capability is required, bitwise
3584	 * or the request type onto the cap_granted mask.
3585	 */
3586	cap_granted = 0;
3587
3588	if (type == VDIR) {
3589		/*
3590		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3591		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3592		 */
3593		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3594		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3595			cap_granted |= VEXEC;
3596	} else {
3597		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3598		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL))
3599			cap_granted |= VEXEC;
3600	}
3601
3602	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3603	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3604		cap_granted |= VREAD;
3605
3606	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3607	    !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL))
3608		cap_granted |= (VWRITE | VAPPEND);
3609
3610	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3611	    !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL))
3612		cap_granted |= VADMIN;
3613
3614	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3615		/* XXX audit: privilege used */
3616		if (privused != NULL)
3617			*privused = 1;
3618		return (0);
3619	}
3620#endif
3621
3622	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3623}
3624
3625/*
3626 * Credential check based on process requesting service, and per-attribute
3627 * permissions.
3628 */
3629int
3630extattr_check_cred(struct vnode *vp, int attrnamespace,
3631    struct ucred *cred, struct thread *td, int access)
3632{
3633
3634	/*
3635	 * Kernel-invoked always succeeds.
3636	 */
3637	if (cred == NOCRED)
3638		return (0);
3639
3640	/*
3641	 * Do not allow privileged processes in jail to directly
3642	 * manipulate system attributes.
3643	 *
3644	 * XXX What capability should apply here?
3645	 * Probably CAP_SYS_SETFFLAG.
3646	 */
3647	switch (attrnamespace) {
3648	case EXTATTR_NAMESPACE_SYSTEM:
3649		/* Potentially should be: return (EPERM); */
3650		return (suser_cred(cred, 0));
3651	case EXTATTR_NAMESPACE_USER:
3652		return (VOP_ACCESS(vp, access, cred, td));
3653	default:
3654		return (EPERM);
3655	}
3656}
3657
3658#ifdef DEBUG_VFS_LOCKS
3659/*
3660 * This only exists to supress warnings from unlocked specfs accesses.  It is
3661 * no longer ok to have an unlocked VFS.
3662 */
3663#define	IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3664
3665int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3666SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3667
3668int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3669SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3670
3671int vfs_badlock_print = 1;	/* Print lock violations. */
3672SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3673
3674#ifdef KDB
3675int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3676SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3677#endif
3678
3679static void
3680vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3681{
3682
3683#ifdef KDB
3684	if (vfs_badlock_backtrace)
3685		kdb_backtrace();
3686#endif
3687	if (vfs_badlock_print)
3688		printf("%s: %p %s\n", str, (void *)vp, msg);
3689	if (vfs_badlock_ddb)
3690		kdb_enter("lock violation");
3691}
3692
3693void
3694assert_vi_locked(struct vnode *vp, const char *str)
3695{
3696
3697	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3698		vfs_badlock("interlock is not locked but should be", str, vp);
3699}
3700
3701void
3702assert_vi_unlocked(struct vnode *vp, const char *str)
3703{
3704
3705	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3706		vfs_badlock("interlock is locked but should not be", str, vp);
3707}
3708
3709void
3710assert_vop_locked(struct vnode *vp, const char *str)
3711{
3712
3713	if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3714		vfs_badlock("is not locked but should be", str, vp);
3715}
3716
3717void
3718assert_vop_unlocked(struct vnode *vp, const char *str)
3719{
3720
3721	if (vp && !IGNORE_LOCK(vp) &&
3722	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3723		vfs_badlock("is locked but should not be", str, vp);
3724}
3725
3726#if 0
3727void
3728assert_vop_elocked(struct vnode *vp, const char *str)
3729{
3730
3731	if (vp && !IGNORE_LOCK(vp) &&
3732	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3733		vfs_badlock("is not exclusive locked but should be", str, vp);
3734}
3735
3736void
3737assert_vop_elocked_other(struct vnode *vp, const char *str)
3738{
3739
3740	if (vp && !IGNORE_LOCK(vp) &&
3741	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3742		vfs_badlock("is not exclusive locked by another thread",
3743		    str, vp);
3744}
3745
3746void
3747assert_vop_slocked(struct vnode *vp, const char *str)
3748{
3749
3750	if (vp && !IGNORE_LOCK(vp) &&
3751	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3752		vfs_badlock("is not locked shared but should be", str, vp);
3753}
3754#endif /* 0 */
3755
3756void
3757vop_rename_pre(void *ap)
3758{
3759	struct vop_rename_args *a = ap;
3760
3761	if (a->a_tvp)
3762		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3763	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3764	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3765	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3766
3767	/* Check the source (from). */
3768	if (a->a_tdvp != a->a_fdvp)
3769		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3770	if (a->a_tvp != a->a_fvp)
3771		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3772
3773	/* Check the target. */
3774	if (a->a_tvp)
3775		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3776	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3777}
3778
3779void
3780vop_strategy_pre(void *ap)
3781{
3782	struct vop_strategy_args *a;
3783	struct buf *bp;
3784
3785	a = ap;
3786	bp = a->a_bp;
3787
3788	/*
3789	 * Cluster ops lock their component buffers but not the IO container.
3790	 */
3791	if ((bp->b_flags & B_CLUSTER) != 0)
3792		return;
3793
3794	if (BUF_REFCNT(bp) < 1) {
3795		if (vfs_badlock_print)
3796			printf(
3797			    "VOP_STRATEGY: bp is not locked but should be\n");
3798		if (vfs_badlock_ddb)
3799			kdb_enter("lock violation");
3800	}
3801}
3802
3803void
3804vop_lookup_pre(void *ap)
3805{
3806	struct vop_lookup_args *a;
3807	struct vnode *dvp;
3808
3809	a = ap;
3810	dvp = a->a_dvp;
3811	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3812	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3813}
3814
3815void
3816vop_lookup_post(void *ap, int rc)
3817{
3818	struct vop_lookup_args *a;
3819	struct componentname *cnp;
3820	struct vnode *dvp;
3821	struct vnode *vp;
3822	int flags;
3823
3824	a = ap;
3825	dvp = a->a_dvp;
3826	cnp = a->a_cnp;
3827	vp = *(a->a_vpp);
3828	flags = cnp->cn_flags;
3829
3830	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3831
3832	/*
3833	 * If this is the last path component for this lookup and LOCKPARENT
3834	 * is set, OR if there is an error the directory has to be locked.
3835	 */
3836	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
3837		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
3838	else if (rc != 0)
3839		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
3840	else if (dvp != vp)
3841		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
3842	if (flags & PDIRUNLOCK)
3843		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
3844}
3845
3846void
3847vop_lock_pre(void *ap)
3848{
3849	struct vop_lock_args *a = ap;
3850
3851	if ((a->a_flags & LK_INTERLOCK) == 0)
3852		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3853	else
3854		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3855}
3856
3857void
3858vop_lock_post(void *ap, int rc)
3859{
3860	struct vop_lock_args *a = ap;
3861
3862	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3863	if (rc == 0)
3864		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3865}
3866
3867void
3868vop_unlock_pre(void *ap)
3869{
3870	struct vop_unlock_args *a = ap;
3871
3872	if (a->a_flags & LK_INTERLOCK)
3873		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3874	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3875}
3876
3877void
3878vop_unlock_post(void *ap, int rc)
3879{
3880	struct vop_unlock_args *a = ap;
3881
3882	if (a->a_flags & LK_INTERLOCK)
3883		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3884}
3885#endif /* DEBUG_VFS_LOCKS */
3886
3887static struct knlist fs_knlist;
3888
3889static void
3890vfs_event_init(void *arg)
3891{
3892	knlist_init(&fs_knlist, NULL);
3893}
3894/* XXX - correct order? */
3895SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3896
3897void
3898vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3899{
3900
3901	KNOTE_UNLOCKED(&fs_knlist, event);
3902}
3903
3904static int	filt_fsattach(struct knote *kn);
3905static void	filt_fsdetach(struct knote *kn);
3906static int	filt_fsevent(struct knote *kn, long hint);
3907
3908struct filterops fs_filtops =
3909	{ 0, filt_fsattach, filt_fsdetach, filt_fsevent };
3910
3911static int
3912filt_fsattach(struct knote *kn)
3913{
3914
3915	kn->kn_flags |= EV_CLEAR;
3916	knlist_add(&fs_knlist, kn, 0);
3917	return (0);
3918}
3919
3920static void
3921filt_fsdetach(struct knote *kn)
3922{
3923
3924	knlist_remove(&fs_knlist, kn, 0);
3925}
3926
3927static int
3928filt_fsevent(struct knote *kn, long hint)
3929{
3930
3931	kn->kn_fflags |= hint;
3932	return (kn->kn_fflags != 0);
3933}
3934
3935static int
3936sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
3937{
3938	struct vfsidctl vc;
3939	int error;
3940	struct mount *mp;
3941
3942	error = SYSCTL_IN(req, &vc, sizeof(vc));
3943	if (error)
3944		return (error);
3945	if (vc.vc_vers != VFS_CTL_VERS1)
3946		return (EINVAL);
3947	mp = vfs_getvfs(&vc.vc_fsid);
3948	if (mp == NULL)
3949		return (ENOENT);
3950	/* ensure that a specific sysctl goes to the right filesystem. */
3951	if (strcmp(vc.vc_fstypename, "*") != 0 &&
3952	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
3953		return (EINVAL);
3954	}
3955	VCTLTOREQ(&vc, req);
3956	return (VFS_SYSCTL(mp, vc.vc_op, req));
3957}
3958
3959SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR,
3960        NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid");
3961
3962/*
3963 * Function to initialize a va_filerev field sensibly.
3964 * XXX: Wouldn't a random number make a lot more sense ??
3965 */
3966u_quad_t
3967init_va_filerev(void)
3968{
3969	struct bintime bt;
3970
3971	getbinuptime(&bt);
3972	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
3973}
3974