vfs_subr.c revision 195285
187628Sdwmalone/*-
287303Sdwmalone * Copyright (c) 1989, 1993
31590Srgrimes *	The Regents of the University of California.  All rights reserved.
41590Srgrimes * (c) UNIX System Laboratories, Inc.
587700Smarkm * All or some portions of this file are derived from material licensed
6275042Sbapt * to the University of California by American Telephone and Telegraph
71590Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with
81590Srgrimes * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35 */
36
37/*
38 * External virtual filesystem routines
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/vfs_subr.c 195285 2009-07-02 14:19:33Z jamie $");
43
44#include "opt_ddb.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/bio.h>
49#include <sys/buf.h>
50#include <sys/condvar.h>
51#include <sys/conf.h>
52#include <sys/dirent.h>
53#include <sys/event.h>
54#include <sys/eventhandler.h>
55#include <sys/extattr.h>
56#include <sys/file.h>
57#include <sys/fcntl.h>
58#include <sys/jail.h>
59#include <sys/kdb.h>
60#include <sys/kernel.h>
61#include <sys/kthread.h>
62#include <sys/lockf.h>
63#include <sys/malloc.h>
64#include <sys/mount.h>
65#include <sys/namei.h>
66#include <sys/priv.h>
67#include <sys/reboot.h>
68#include <sys/sleepqueue.h>
69#include <sys/stat.h>
70#include <sys/sysctl.h>
71#include <sys/syslog.h>
72#include <sys/vmmeter.h>
73#include <sys/vnode.h>
74
75#include <machine/stdarg.h>
76
77#include <security/mac/mac_framework.h>
78
79#include <vm/vm.h>
80#include <vm/vm_object.h>
81#include <vm/vm_extern.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_page.h>
85#include <vm/vm_kern.h>
86#include <vm/uma.h>
87
88#ifdef DDB
89#include <ddb/ddb.h>
90#endif
91
92#define	WI_MPSAFEQ	0
93#define	WI_GIANTQ	1
94
95static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure");
96
97static void	delmntque(struct vnode *vp);
98static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
99		    int slpflag, int slptimeo);
100static void	syncer_shutdown(void *arg, int howto);
101static int	vtryrecycle(struct vnode *vp);
102static void	vbusy(struct vnode *vp);
103static void	vinactive(struct vnode *, struct thread *);
104static void	v_incr_usecount(struct vnode *);
105static void	v_decr_usecount(struct vnode *);
106static void	v_decr_useonly(struct vnode *);
107static void	v_upgrade_usecount(struct vnode *);
108static void	vfree(struct vnode *);
109static void	vnlru_free(int);
110static void	vgonel(struct vnode *);
111static void	vfs_knllock(void *arg);
112static void	vfs_knlunlock(void *arg);
113static void	vfs_knl_assert_locked(void *arg);
114static void	vfs_knl_assert_unlocked(void *arg);
115static void	destroy_vpollinfo(struct vpollinfo *vi);
116
117/*
118 * Number of vnodes in existence.  Increased whenever getnewvnode()
119 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed
120 * vnode.
121 */
122static unsigned long	numvnodes;
123
124SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
125
126/*
127 * Conversion tables for conversion from vnode types to inode formats
128 * and back.
129 */
130enum vtype iftovt_tab[16] = {
131	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
132	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
133};
134int vttoif_tab[10] = {
135	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
136	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
137};
138
139/*
140 * List of vnodes that are ready for recycling.
141 */
142static TAILQ_HEAD(freelst, vnode) vnode_free_list;
143
144/*
145 * Free vnode target.  Free vnodes may simply be files which have been stat'd
146 * but not read.  This is somewhat common, and a small cache of such files
147 * should be kept to avoid recreation costs.
148 */
149static u_long wantfreevnodes;
150SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
151/* Number of vnodes in the free list. */
152static u_long freevnodes;
153SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
154
155/*
156 * Various variables used for debugging the new implementation of
157 * reassignbuf().
158 * XXX these are probably of (very) limited utility now.
159 */
160static int reassignbufcalls;
161SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
162
163/*
164 * Cache for the mount type id assigned to NFS.  This is used for
165 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
166 */
167int	nfs_mount_type = -1;
168
169/* To keep more than one thread at a time from running vfs_getnewfsid */
170static struct mtx mntid_mtx;
171
172/*
173 * Lock for any access to the following:
174 *	vnode_free_list
175 *	numvnodes
176 *	freevnodes
177 */
178static struct mtx vnode_free_list_mtx;
179
180/* Publicly exported FS */
181struct nfs_public nfs_pub;
182
183/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
184static uma_zone_t vnode_zone;
185static uma_zone_t vnodepoll_zone;
186
187/* Set to 1 to print out reclaim of active vnodes */
188int	prtactive;
189
190/*
191 * The workitem queue.
192 *
193 * It is useful to delay writes of file data and filesystem metadata
194 * for tens of seconds so that quickly created and deleted files need
195 * not waste disk bandwidth being created and removed. To realize this,
196 * we append vnodes to a "workitem" queue. When running with a soft
197 * updates implementation, most pending metadata dependencies should
198 * not wait for more than a few seconds. Thus, mounted on block devices
199 * are delayed only about a half the time that file data is delayed.
200 * Similarly, directory updates are more critical, so are only delayed
201 * about a third the time that file data is delayed. Thus, there are
202 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
203 * one each second (driven off the filesystem syncer process). The
204 * syncer_delayno variable indicates the next queue that is to be processed.
205 * Items that need to be processed soon are placed in this queue:
206 *
207 *	syncer_workitem_pending[syncer_delayno]
208 *
209 * A delay of fifteen seconds is done by placing the request fifteen
210 * entries later in the queue:
211 *
212 *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
213 *
214 */
215static int syncer_delayno;
216static long syncer_mask;
217LIST_HEAD(synclist, bufobj);
218static struct synclist *syncer_workitem_pending[2];
219/*
220 * The sync_mtx protects:
221 *	bo->bo_synclist
222 *	sync_vnode_count
223 *	syncer_delayno
224 *	syncer_state
225 *	syncer_workitem_pending
226 *	syncer_worklist_len
227 *	rushjob
228 */
229static struct mtx sync_mtx;
230static struct cv sync_wakeup;
231
232#define SYNCER_MAXDELAY		32
233static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
234static int syncdelay = 30;		/* max time to delay syncing data */
235static int filedelay = 30;		/* time to delay syncing files */
236SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
237static int dirdelay = 29;		/* time to delay syncing directories */
238SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
239static int metadelay = 28;		/* time to delay syncing metadata */
240SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
241static int rushjob;		/* number of slots to run ASAP */
242static int stat_rush_requests;	/* number of times I/O speeded up */
243SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
244
245/*
246 * When shutting down the syncer, run it at four times normal speed.
247 */
248#define SYNCER_SHUTDOWN_SPEEDUP		4
249static int sync_vnode_count;
250static int syncer_worklist_len;
251static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
252    syncer_state;
253
254/*
255 * Number of vnodes we want to exist at any one time.  This is mostly used
256 * to size hash tables in vnode-related code.  It is normally not used in
257 * getnewvnode(), as wantfreevnodes is normally nonzero.)
258 *
259 * XXX desiredvnodes is historical cruft and should not exist.
260 */
261int desiredvnodes;
262SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
263    &desiredvnodes, 0, "Maximum number of vnodes");
264SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
265    &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
266static int vnlru_nowhere;
267SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
268    &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
269
270/*
271 * Macros to control when a vnode is freed and recycled.  All require
272 * the vnode interlock.
273 */
274#define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
275#define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
276#define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
277
278
279/*
280 * Initialize the vnode management data structures.
281 */
282#ifndef	MAXVNODES_MAX
283#define	MAXVNODES_MAX	100000
284#endif
285static void
286vntblinit(void *dummy __unused)
287{
288
289	/*
290	 * Desiredvnodes is a function of the physical memory size and
291	 * the kernel's heap size.  Specifically, desiredvnodes scales
292	 * in proportion to the physical memory size until two fifths
293	 * of the kernel's heap size is consumed by vnodes and vm
294	 * objects.
295	 */
296	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
297	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
298	if (desiredvnodes > MAXVNODES_MAX) {
299		if (bootverbose)
300			printf("Reducing kern.maxvnodes %d -> %d\n",
301			    desiredvnodes, MAXVNODES_MAX);
302		desiredvnodes = MAXVNODES_MAX;
303	}
304	wantfreevnodes = desiredvnodes / 4;
305	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
306	TAILQ_INIT(&vnode_free_list);
307	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
308	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
309	    NULL, NULL, UMA_ALIGN_PTR, 0);
310	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
311	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
312	/*
313	 * Initialize the filesystem syncer.
314	 */
315	syncer_workitem_pending[WI_MPSAFEQ] = hashinit(syncer_maxdelay, M_VNODE,
316	    &syncer_mask);
317	syncer_workitem_pending[WI_GIANTQ] = hashinit(syncer_maxdelay, M_VNODE,
318	    &syncer_mask);
319	syncer_maxdelay = syncer_mask + 1;
320	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
321	cv_init(&sync_wakeup, "syncer");
322}
323SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
324
325
326/*
327 * Mark a mount point as busy. Used to synchronize access and to delay
328 * unmounting. Eventually, mountlist_mtx is not released on failure.
329 */
330int
331vfs_busy(struct mount *mp, int flags)
332{
333
334	MPASS((flags & ~MBF_MASK) == 0);
335	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
336
337	MNT_ILOCK(mp);
338	MNT_REF(mp);
339	/*
340	 * If mount point is currenly being unmounted, sleep until the
341	 * mount point fate is decided.  If thread doing the unmounting fails,
342	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
343	 * that this mount point has survived the unmount attempt and vfs_busy
344	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
345	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
346	 * about to be really destroyed.  vfs_busy needs to release its
347	 * reference on the mount point in this case and return with ENOENT,
348	 * telling the caller that mount mount it tried to busy is no longer
349	 * valid.
350	 */
351	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
352		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
353			MNT_REL(mp);
354			MNT_IUNLOCK(mp);
355			CTR1(KTR_VFS, "%s: failed busying before sleeping",
356			    __func__);
357			return (ENOENT);
358		}
359		if (flags & MBF_MNTLSTLOCK)
360			mtx_unlock(&mountlist_mtx);
361		mp->mnt_kern_flag |= MNTK_MWAIT;
362		msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0);
363		if (flags & MBF_MNTLSTLOCK)
364			mtx_lock(&mountlist_mtx);
365	}
366	if (flags & MBF_MNTLSTLOCK)
367		mtx_unlock(&mountlist_mtx);
368	mp->mnt_lockref++;
369	MNT_IUNLOCK(mp);
370	return (0);
371}
372
373/*
374 * Free a busy filesystem.
375 */
376void
377vfs_unbusy(struct mount *mp)
378{
379
380	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
381	MNT_ILOCK(mp);
382	MNT_REL(mp);
383	KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
384	mp->mnt_lockref--;
385	if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
386		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
387		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
388		mp->mnt_kern_flag &= ~MNTK_DRAINING;
389		wakeup(&mp->mnt_lockref);
390	}
391	MNT_IUNLOCK(mp);
392}
393
394/*
395 * Lookup a mount point by filesystem identifier.
396 */
397struct mount *
398vfs_getvfs(fsid_t *fsid)
399{
400	struct mount *mp;
401
402	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
403	mtx_lock(&mountlist_mtx);
404	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
405		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
406		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
407			vfs_ref(mp);
408			mtx_unlock(&mountlist_mtx);
409			return (mp);
410		}
411	}
412	mtx_unlock(&mountlist_mtx);
413	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
414	return ((struct mount *) 0);
415}
416
417/*
418 * Lookup a mount point by filesystem identifier, busying it before
419 * returning.
420 */
421struct mount *
422vfs_busyfs(fsid_t *fsid)
423{
424	struct mount *mp;
425	int error;
426
427	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
428	mtx_lock(&mountlist_mtx);
429	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
430		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
431		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
432			error = vfs_busy(mp, MBF_MNTLSTLOCK);
433			if (error) {
434				mtx_unlock(&mountlist_mtx);
435				return (NULL);
436			}
437			return (mp);
438		}
439	}
440	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
441	mtx_unlock(&mountlist_mtx);
442	return ((struct mount *) 0);
443}
444
445/*
446 * Check if a user can access privileged mount options.
447 */
448int
449vfs_suser(struct mount *mp, struct thread *td)
450{
451	int error;
452
453	/*
454	 * If the thread is jailed, but this is not a jail-friendly file
455	 * system, deny immediately.
456	 */
457	if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred))
458		return (EPERM);
459
460	/*
461	 * If the file system was mounted outside the jail of the calling
462	 * thread, deny immediately.
463	 */
464	if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
465		return (EPERM);
466
467	/*
468	 * If file system supports delegated administration, we don't check
469	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
470	 * by the file system itself.
471	 * If this is not the user that did original mount, we check for
472	 * the PRIV_VFS_MOUNT_OWNER privilege.
473	 */
474	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
475	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
476		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
477			return (error);
478	}
479	return (0);
480}
481
482/*
483 * Get a new unique fsid.  Try to make its val[0] unique, since this value
484 * will be used to create fake device numbers for stat().  Also try (but
485 * not so hard) make its val[0] unique mod 2^16, since some emulators only
486 * support 16-bit device numbers.  We end up with unique val[0]'s for the
487 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
488 *
489 * Keep in mind that several mounts may be running in parallel.  Starting
490 * the search one past where the previous search terminated is both a
491 * micro-optimization and a defense against returning the same fsid to
492 * different mounts.
493 */
494void
495vfs_getnewfsid(struct mount *mp)
496{
497	static u_int16_t mntid_base;
498	struct mount *nmp;
499	fsid_t tfsid;
500	int mtype;
501
502	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
503	mtx_lock(&mntid_mtx);
504	mtype = mp->mnt_vfc->vfc_typenum;
505	tfsid.val[1] = mtype;
506	mtype = (mtype & 0xFF) << 24;
507	for (;;) {
508		tfsid.val[0] = makedev(255,
509		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
510		mntid_base++;
511		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
512			break;
513		vfs_rel(nmp);
514	}
515	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
516	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
517	mtx_unlock(&mntid_mtx);
518}
519
520/*
521 * Knob to control the precision of file timestamps:
522 *
523 *   0 = seconds only; nanoseconds zeroed.
524 *   1 = seconds and nanoseconds, accurate within 1/HZ.
525 *   2 = seconds and nanoseconds, truncated to microseconds.
526 * >=3 = seconds and nanoseconds, maximum precision.
527 */
528enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
529
530static int timestamp_precision = TSP_SEC;
531SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
532    &timestamp_precision, 0, "");
533
534/*
535 * Get a current timestamp.
536 */
537void
538vfs_timestamp(struct timespec *tsp)
539{
540	struct timeval tv;
541
542	switch (timestamp_precision) {
543	case TSP_SEC:
544		tsp->tv_sec = time_second;
545		tsp->tv_nsec = 0;
546		break;
547	case TSP_HZ:
548		getnanotime(tsp);
549		break;
550	case TSP_USEC:
551		microtime(&tv);
552		TIMEVAL_TO_TIMESPEC(&tv, tsp);
553		break;
554	case TSP_NSEC:
555	default:
556		nanotime(tsp);
557		break;
558	}
559}
560
561/*
562 * Set vnode attributes to VNOVAL
563 */
564void
565vattr_null(struct vattr *vap)
566{
567
568	vap->va_type = VNON;
569	vap->va_size = VNOVAL;
570	vap->va_bytes = VNOVAL;
571	vap->va_mode = VNOVAL;
572	vap->va_nlink = VNOVAL;
573	vap->va_uid = VNOVAL;
574	vap->va_gid = VNOVAL;
575	vap->va_fsid = VNOVAL;
576	vap->va_fileid = VNOVAL;
577	vap->va_blocksize = VNOVAL;
578	vap->va_rdev = VNOVAL;
579	vap->va_atime.tv_sec = VNOVAL;
580	vap->va_atime.tv_nsec = VNOVAL;
581	vap->va_mtime.tv_sec = VNOVAL;
582	vap->va_mtime.tv_nsec = VNOVAL;
583	vap->va_ctime.tv_sec = VNOVAL;
584	vap->va_ctime.tv_nsec = VNOVAL;
585	vap->va_birthtime.tv_sec = VNOVAL;
586	vap->va_birthtime.tv_nsec = VNOVAL;
587	vap->va_flags = VNOVAL;
588	vap->va_gen = VNOVAL;
589	vap->va_vaflags = 0;
590}
591
592/*
593 * This routine is called when we have too many vnodes.  It attempts
594 * to free <count> vnodes and will potentially free vnodes that still
595 * have VM backing store (VM backing store is typically the cause
596 * of a vnode blowout so we want to do this).  Therefore, this operation
597 * is not considered cheap.
598 *
599 * A number of conditions may prevent a vnode from being reclaimed.
600 * the buffer cache may have references on the vnode, a directory
601 * vnode may still have references due to the namei cache representing
602 * underlying files, or the vnode may be in active use.   It is not
603 * desireable to reuse such vnodes.  These conditions may cause the
604 * number of vnodes to reach some minimum value regardless of what
605 * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
606 */
607static int
608vlrureclaim(struct mount *mp)
609{
610	struct vnode *vp;
611	int done;
612	int trigger;
613	int usevnodes;
614	int count;
615
616	/*
617	 * Calculate the trigger point, don't allow user
618	 * screwups to blow us up.   This prevents us from
619	 * recycling vnodes with lots of resident pages.  We
620	 * aren't trying to free memory, we are trying to
621	 * free vnodes.
622	 */
623	usevnodes = desiredvnodes;
624	if (usevnodes <= 0)
625		usevnodes = 1;
626	trigger = cnt.v_page_count * 2 / usevnodes;
627	done = 0;
628	vn_start_write(NULL, &mp, V_WAIT);
629	MNT_ILOCK(mp);
630	count = mp->mnt_nvnodelistsize / 10 + 1;
631	while (count != 0) {
632		vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
633		while (vp != NULL && vp->v_type == VMARKER)
634			vp = TAILQ_NEXT(vp, v_nmntvnodes);
635		if (vp == NULL)
636			break;
637		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
638		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
639		--count;
640		if (!VI_TRYLOCK(vp))
641			goto next_iter;
642		/*
643		 * If it's been deconstructed already, it's still
644		 * referenced, or it exceeds the trigger, skip it.
645		 */
646		if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
647		    (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
648		    vp->v_object->resident_page_count > trigger)) {
649			VI_UNLOCK(vp);
650			goto next_iter;
651		}
652		MNT_IUNLOCK(mp);
653		vholdl(vp);
654		if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
655			vdrop(vp);
656			goto next_iter_mntunlocked;
657		}
658		VI_LOCK(vp);
659		/*
660		 * v_usecount may have been bumped after VOP_LOCK() dropped
661		 * the vnode interlock and before it was locked again.
662		 *
663		 * It is not necessary to recheck VI_DOOMED because it can
664		 * only be set by another thread that holds both the vnode
665		 * lock and vnode interlock.  If another thread has the
666		 * vnode lock before we get to VOP_LOCK() and obtains the
667		 * vnode interlock after VOP_LOCK() drops the vnode
668		 * interlock, the other thread will be unable to drop the
669		 * vnode lock before our VOP_LOCK() call fails.
670		 */
671		if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
672		    (vp->v_object != NULL &&
673		    vp->v_object->resident_page_count > trigger)) {
674			VOP_UNLOCK(vp, LK_INTERLOCK);
675			goto next_iter_mntunlocked;
676		}
677		KASSERT((vp->v_iflag & VI_DOOMED) == 0,
678		    ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
679		vgonel(vp);
680		VOP_UNLOCK(vp, 0);
681		vdropl(vp);
682		done++;
683next_iter_mntunlocked:
684		if ((count % 256) != 0)
685			goto relock_mnt;
686		goto yield;
687next_iter:
688		if ((count % 256) != 0)
689			continue;
690		MNT_IUNLOCK(mp);
691yield:
692		uio_yield();
693relock_mnt:
694		MNT_ILOCK(mp);
695	}
696	MNT_IUNLOCK(mp);
697	vn_finished_write(mp);
698	return done;
699}
700
701/*
702 * Attempt to keep the free list at wantfreevnodes length.
703 */
704static void
705vnlru_free(int count)
706{
707	struct vnode *vp;
708	int vfslocked;
709
710	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
711	for (; count > 0; count--) {
712		vp = TAILQ_FIRST(&vnode_free_list);
713		/*
714		 * The list can be modified while the free_list_mtx
715		 * has been dropped and vp could be NULL here.
716		 */
717		if (!vp)
718			break;
719		VNASSERT(vp->v_op != NULL, vp,
720		    ("vnlru_free: vnode already reclaimed."));
721		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
722		/*
723		 * Don't recycle if we can't get the interlock.
724		 */
725		if (!VI_TRYLOCK(vp)) {
726			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
727			continue;
728		}
729		VNASSERT(VCANRECYCLE(vp), vp,
730		    ("vp inconsistent on freelist"));
731		freevnodes--;
732		vp->v_iflag &= ~VI_FREE;
733		vholdl(vp);
734		mtx_unlock(&vnode_free_list_mtx);
735		VI_UNLOCK(vp);
736		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
737		vtryrecycle(vp);
738		VFS_UNLOCK_GIANT(vfslocked);
739		/*
740		 * If the recycled succeeded this vdrop will actually free
741		 * the vnode.  If not it will simply place it back on
742		 * the free list.
743		 */
744		vdrop(vp);
745		mtx_lock(&vnode_free_list_mtx);
746	}
747}
748/*
749 * Attempt to recycle vnodes in a context that is always safe to block.
750 * Calling vlrurecycle() from the bowels of filesystem code has some
751 * interesting deadlock problems.
752 */
753static struct proc *vnlruproc;
754static int vnlruproc_sig;
755
756static void
757vnlru_proc(void)
758{
759	struct mount *mp, *nmp;
760	int done, vfslocked;
761	struct proc *p = vnlruproc;
762
763	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
764	    SHUTDOWN_PRI_FIRST);
765
766	for (;;) {
767		kproc_suspend_check(p);
768		mtx_lock(&vnode_free_list_mtx);
769		if (freevnodes > wantfreevnodes)
770			vnlru_free(freevnodes - wantfreevnodes);
771		if (numvnodes <= desiredvnodes * 9 / 10) {
772			vnlruproc_sig = 0;
773			wakeup(&vnlruproc_sig);
774			msleep(vnlruproc, &vnode_free_list_mtx,
775			    PVFS|PDROP, "vlruwt", hz);
776			continue;
777		}
778		mtx_unlock(&vnode_free_list_mtx);
779		done = 0;
780		mtx_lock(&mountlist_mtx);
781		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
782			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
783				nmp = TAILQ_NEXT(mp, mnt_list);
784				continue;
785			}
786			vfslocked = VFS_LOCK_GIANT(mp);
787			done += vlrureclaim(mp);
788			VFS_UNLOCK_GIANT(vfslocked);
789			mtx_lock(&mountlist_mtx);
790			nmp = TAILQ_NEXT(mp, mnt_list);
791			vfs_unbusy(mp);
792		}
793		mtx_unlock(&mountlist_mtx);
794		if (done == 0) {
795			EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10);
796#if 0
797			/* These messages are temporary debugging aids */
798			if (vnlru_nowhere < 5)
799				printf("vnlru process getting nowhere..\n");
800			else if (vnlru_nowhere == 5)
801				printf("vnlru process messages stopped.\n");
802#endif
803			vnlru_nowhere++;
804			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
805		} else
806			uio_yield();
807	}
808}
809
810static struct kproc_desc vnlru_kp = {
811	"vnlru",
812	vnlru_proc,
813	&vnlruproc
814};
815SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
816    &vnlru_kp);
817
818/*
819 * Routines having to do with the management of the vnode table.
820 */
821
822void
823vdestroy(struct vnode *vp)
824{
825	struct bufobj *bo;
826
827	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
828	mtx_lock(&vnode_free_list_mtx);
829	numvnodes--;
830	mtx_unlock(&vnode_free_list_mtx);
831	bo = &vp->v_bufobj;
832	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
833	    ("cleaned vnode still on the free list."));
834	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
835	VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
836	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
837	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
838	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
839	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
840	VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
841	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
842	VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
843	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
844	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
845	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
846	VI_UNLOCK(vp);
847#ifdef MAC
848	mac_vnode_destroy(vp);
849#endif
850	if (vp->v_pollinfo != NULL)
851		destroy_vpollinfo(vp->v_pollinfo);
852#ifdef INVARIANTS
853	/* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
854	vp->v_op = NULL;
855#endif
856	lockdestroy(vp->v_vnlock);
857	mtx_destroy(&vp->v_interlock);
858	mtx_destroy(BO_MTX(bo));
859	uma_zfree(vnode_zone, vp);
860}
861
862/*
863 * Try to recycle a freed vnode.  We abort if anyone picks up a reference
864 * before we actually vgone().  This function must be called with the vnode
865 * held to prevent the vnode from being returned to the free list midway
866 * through vgone().
867 */
868static int
869vtryrecycle(struct vnode *vp)
870{
871	struct mount *vnmp;
872
873	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
874	VNASSERT(vp->v_holdcnt, vp,
875	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
876	/*
877	 * This vnode may found and locked via some other list, if so we
878	 * can't recycle it yet.
879	 */
880	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
881		CTR2(KTR_VFS,
882		    "%s: impossible to recycle, vp %p lock is already held",
883		    __func__, vp);
884		return (EWOULDBLOCK);
885	}
886	/*
887	 * Don't recycle if its filesystem is being suspended.
888	 */
889	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
890		VOP_UNLOCK(vp, 0);
891		CTR2(KTR_VFS,
892		    "%s: impossible to recycle, cannot start the write for %p",
893		    __func__, vp);
894		return (EBUSY);
895	}
896	/*
897	 * If we got this far, we need to acquire the interlock and see if
898	 * anyone picked up this vnode from another list.  If not, we will
899	 * mark it with DOOMED via vgonel() so that anyone who does find it
900	 * will skip over it.
901	 */
902	VI_LOCK(vp);
903	if (vp->v_usecount) {
904		VOP_UNLOCK(vp, LK_INTERLOCK);
905		vn_finished_write(vnmp);
906		CTR2(KTR_VFS,
907		    "%s: impossible to recycle, %p is already referenced",
908		    __func__, vp);
909		return (EBUSY);
910	}
911	if ((vp->v_iflag & VI_DOOMED) == 0)
912		vgonel(vp);
913	VOP_UNLOCK(vp, LK_INTERLOCK);
914	vn_finished_write(vnmp);
915	return (0);
916}
917
918/*
919 * Return the next vnode from the free list.
920 */
921int
922getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
923    struct vnode **vpp)
924{
925	struct vnode *vp = NULL;
926	struct bufobj *bo;
927
928	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
929	mtx_lock(&vnode_free_list_mtx);
930	/*
931	 * Lend our context to reclaim vnodes if they've exceeded the max.
932	 */
933	if (freevnodes > wantfreevnodes)
934		vnlru_free(1);
935	/*
936	 * Wait for available vnodes.
937	 */
938	if (numvnodes > desiredvnodes) {
939		if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
940			/*
941			 * File system is beeing suspended, we cannot risk a
942			 * deadlock here, so allocate new vnode anyway.
943			 */
944			if (freevnodes > wantfreevnodes)
945				vnlru_free(freevnodes - wantfreevnodes);
946			goto alloc;
947		}
948		if (vnlruproc_sig == 0) {
949			vnlruproc_sig = 1;	/* avoid unnecessary wakeups */
950			wakeup(vnlruproc);
951		}
952		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
953		    "vlruwk", hz);
954#if 0	/* XXX Not all VFS_VGET/ffs_vget callers check returns. */
955		if (numvnodes > desiredvnodes) {
956			mtx_unlock(&vnode_free_list_mtx);
957			return (ENFILE);
958		}
959#endif
960	}
961alloc:
962	numvnodes++;
963	mtx_unlock(&vnode_free_list_mtx);
964	vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
965	/*
966	 * Setup locks.
967	 */
968	vp->v_vnlock = &vp->v_lock;
969	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
970	/*
971	 * By default, don't allow shared locks unless filesystems
972	 * opt-in.
973	 */
974	lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
975	/*
976	 * Initialize bufobj.
977	 */
978	bo = &vp->v_bufobj;
979	bo->__bo_vnode = vp;
980	mtx_init(BO_MTX(bo), "bufobj interlock", NULL, MTX_DEF);
981	bo->bo_ops = &buf_ops_bio;
982	bo->bo_private = vp;
983	TAILQ_INIT(&bo->bo_clean.bv_hd);
984	TAILQ_INIT(&bo->bo_dirty.bv_hd);
985	/*
986	 * Initialize namecache.
987	 */
988	LIST_INIT(&vp->v_cache_src);
989	TAILQ_INIT(&vp->v_cache_dst);
990	/*
991	 * Finalize various vnode identity bits.
992	 */
993	vp->v_type = VNON;
994	vp->v_tag = tag;
995	vp->v_op = vops;
996	v_incr_usecount(vp);
997	vp->v_data = 0;
998#ifdef MAC
999	mac_vnode_init(vp);
1000	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1001		mac_vnode_associate_singlelabel(mp, vp);
1002	else if (mp == NULL && vops != &dead_vnodeops)
1003		printf("NULL mp in getnewvnode()\n");
1004#endif
1005	if (mp != NULL) {
1006		bo->bo_bsize = mp->mnt_stat.f_iosize;
1007		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1008			vp->v_vflag |= VV_NOKNOTE;
1009	}
1010
1011	*vpp = vp;
1012	return (0);
1013}
1014
1015/*
1016 * Delete from old mount point vnode list, if on one.
1017 */
1018static void
1019delmntque(struct vnode *vp)
1020{
1021	struct mount *mp;
1022
1023	mp = vp->v_mount;
1024	if (mp == NULL)
1025		return;
1026	MNT_ILOCK(mp);
1027	vp->v_mount = NULL;
1028	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1029		("bad mount point vnode list size"));
1030	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1031	mp->mnt_nvnodelistsize--;
1032	MNT_REL(mp);
1033	MNT_IUNLOCK(mp);
1034}
1035
1036static void
1037insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1038{
1039
1040	vp->v_data = NULL;
1041	vp->v_op = &dead_vnodeops;
1042	/* XXX non mp-safe fs may still call insmntque with vnode
1043	   unlocked */
1044	if (!VOP_ISLOCKED(vp))
1045		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1046	vgone(vp);
1047	vput(vp);
1048}
1049
1050/*
1051 * Insert into list of vnodes for the new mount point, if available.
1052 */
1053int
1054insmntque1(struct vnode *vp, struct mount *mp,
1055	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1056{
1057	int locked;
1058
1059	KASSERT(vp->v_mount == NULL,
1060		("insmntque: vnode already on per mount vnode list"));
1061	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1062#ifdef DEBUG_VFS_LOCKS
1063	if (!VFS_NEEDSGIANT(mp))
1064		ASSERT_VOP_ELOCKED(vp,
1065		    "insmntque: mp-safe fs and non-locked vp");
1066#endif
1067	MNT_ILOCK(mp);
1068	if ((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1069	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1070	     mp->mnt_nvnodelistsize == 0)) {
1071		locked = VOP_ISLOCKED(vp);
1072		if (!locked || (locked == LK_EXCLUSIVE &&
1073		     (vp->v_vflag & VV_FORCEINSMQ) == 0)) {
1074			MNT_IUNLOCK(mp);
1075			if (dtr != NULL)
1076				dtr(vp, dtr_arg);
1077			return (EBUSY);
1078		}
1079	}
1080	vp->v_mount = mp;
1081	MNT_REF(mp);
1082	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1083	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1084		("neg mount point vnode list size"));
1085	mp->mnt_nvnodelistsize++;
1086	MNT_IUNLOCK(mp);
1087	return (0);
1088}
1089
1090int
1091insmntque(struct vnode *vp, struct mount *mp)
1092{
1093
1094	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1095}
1096
1097/*
1098 * Flush out and invalidate all buffers associated with a bufobj
1099 * Called with the underlying object locked.
1100 */
1101int
1102bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1103{
1104	int error;
1105
1106	BO_LOCK(bo);
1107	if (flags & V_SAVE) {
1108		error = bufobj_wwait(bo, slpflag, slptimeo);
1109		if (error) {
1110			BO_UNLOCK(bo);
1111			return (error);
1112		}
1113		if (bo->bo_dirty.bv_cnt > 0) {
1114			BO_UNLOCK(bo);
1115			if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1116				return (error);
1117			/*
1118			 * XXX We could save a lock/unlock if this was only
1119			 * enabled under INVARIANTS
1120			 */
1121			BO_LOCK(bo);
1122			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1123				panic("vinvalbuf: dirty bufs");
1124		}
1125	}
1126	/*
1127	 * If you alter this loop please notice that interlock is dropped and
1128	 * reacquired in flushbuflist.  Special care is needed to ensure that
1129	 * no race conditions occur from this.
1130	 */
1131	do {
1132		error = flushbuflist(&bo->bo_clean,
1133		    flags, bo, slpflag, slptimeo);
1134		if (error == 0)
1135			error = flushbuflist(&bo->bo_dirty,
1136			    flags, bo, slpflag, slptimeo);
1137		if (error != 0 && error != EAGAIN) {
1138			BO_UNLOCK(bo);
1139			return (error);
1140		}
1141	} while (error != 0);
1142
1143	/*
1144	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1145	 * have write I/O in-progress but if there is a VM object then the
1146	 * VM object can also have read-I/O in-progress.
1147	 */
1148	do {
1149		bufobj_wwait(bo, 0, 0);
1150		BO_UNLOCK(bo);
1151		if (bo->bo_object != NULL) {
1152			VM_OBJECT_LOCK(bo->bo_object);
1153			vm_object_pip_wait(bo->bo_object, "bovlbx");
1154			VM_OBJECT_UNLOCK(bo->bo_object);
1155		}
1156		BO_LOCK(bo);
1157	} while (bo->bo_numoutput > 0);
1158	BO_UNLOCK(bo);
1159
1160	/*
1161	 * Destroy the copy in the VM cache, too.
1162	 */
1163	if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL)) == 0) {
1164		VM_OBJECT_LOCK(bo->bo_object);
1165		vm_object_page_remove(bo->bo_object, 0, 0,
1166			(flags & V_SAVE) ? TRUE : FALSE);
1167		VM_OBJECT_UNLOCK(bo->bo_object);
1168	}
1169
1170#ifdef INVARIANTS
1171	BO_LOCK(bo);
1172	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1173	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1174		panic("vinvalbuf: flush failed");
1175	BO_UNLOCK(bo);
1176#endif
1177	return (0);
1178}
1179
1180/*
1181 * Flush out and invalidate all buffers associated with a vnode.
1182 * Called with the underlying object locked.
1183 */
1184int
1185vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1186{
1187
1188	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1189	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1190	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1191}
1192
1193/*
1194 * Flush out buffers on the specified list.
1195 *
1196 */
1197static int
1198flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1199    int slptimeo)
1200{
1201	struct buf *bp, *nbp;
1202	int retval, error;
1203	daddr_t lblkno;
1204	b_xflags_t xflags;
1205
1206	ASSERT_BO_LOCKED(bo);
1207
1208	retval = 0;
1209	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1210		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1211		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1212			continue;
1213		}
1214		lblkno = 0;
1215		xflags = 0;
1216		if (nbp != NULL) {
1217			lblkno = nbp->b_lblkno;
1218			xflags = nbp->b_xflags &
1219				(BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN);
1220		}
1221		retval = EAGAIN;
1222		error = BUF_TIMELOCK(bp,
1223		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1224		    "flushbuf", slpflag, slptimeo);
1225		if (error) {
1226			BO_LOCK(bo);
1227			return (error != ENOLCK ? error : EAGAIN);
1228		}
1229		KASSERT(bp->b_bufobj == bo,
1230		    ("bp %p wrong b_bufobj %p should be %p",
1231		    bp, bp->b_bufobj, bo));
1232		if (bp->b_bufobj != bo) {	/* XXX: necessary ? */
1233			BUF_UNLOCK(bp);
1234			BO_LOCK(bo);
1235			return (EAGAIN);
1236		}
1237		/*
1238		 * XXX Since there are no node locks for NFS, I
1239		 * believe there is a slight chance that a delayed
1240		 * write will occur while sleeping just above, so
1241		 * check for it.
1242		 */
1243		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1244		    (flags & V_SAVE)) {
1245			bremfree(bp);
1246			bp->b_flags |= B_ASYNC;
1247			bwrite(bp);
1248			BO_LOCK(bo);
1249			return (EAGAIN);	/* XXX: why not loop ? */
1250		}
1251		bremfree(bp);
1252		bp->b_flags |= (B_INVAL | B_RELBUF);
1253		bp->b_flags &= ~B_ASYNC;
1254		brelse(bp);
1255		BO_LOCK(bo);
1256		if (nbp != NULL &&
1257		    (nbp->b_bufobj != bo ||
1258		     nbp->b_lblkno != lblkno ||
1259		     (nbp->b_xflags &
1260		      (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags))
1261			break;			/* nbp invalid */
1262	}
1263	return (retval);
1264}
1265
1266/*
1267 * Truncate a file's buffer and pages to a specified length.  This
1268 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1269 * sync activity.
1270 */
1271int
1272vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
1273    off_t length, int blksize)
1274{
1275	struct buf *bp, *nbp;
1276	int anyfreed;
1277	int trunclbn;
1278	struct bufobj *bo;
1279
1280	CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1281	    vp, cred, blksize, (uintmax_t)length);
1282
1283	/*
1284	 * Round up to the *next* lbn.
1285	 */
1286	trunclbn = (length + blksize - 1) / blksize;
1287
1288	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1289restart:
1290	bo = &vp->v_bufobj;
1291	BO_LOCK(bo);
1292	anyfreed = 1;
1293	for (;anyfreed;) {
1294		anyfreed = 0;
1295		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1296			if (bp->b_lblkno < trunclbn)
1297				continue;
1298			if (BUF_LOCK(bp,
1299			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1300			    BO_MTX(bo)) == ENOLCK)
1301				goto restart;
1302
1303			bremfree(bp);
1304			bp->b_flags |= (B_INVAL | B_RELBUF);
1305			bp->b_flags &= ~B_ASYNC;
1306			brelse(bp);
1307			anyfreed = 1;
1308
1309			if (nbp != NULL &&
1310			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1311			    (nbp->b_vp != vp) ||
1312			    (nbp->b_flags & B_DELWRI))) {
1313				goto restart;
1314			}
1315			BO_LOCK(bo);
1316		}
1317
1318		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1319			if (bp->b_lblkno < trunclbn)
1320				continue;
1321			if (BUF_LOCK(bp,
1322			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1323			    BO_MTX(bo)) == ENOLCK)
1324				goto restart;
1325			bremfree(bp);
1326			bp->b_flags |= (B_INVAL | B_RELBUF);
1327			bp->b_flags &= ~B_ASYNC;
1328			brelse(bp);
1329			anyfreed = 1;
1330			if (nbp != NULL &&
1331			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1332			    (nbp->b_vp != vp) ||
1333			    (nbp->b_flags & B_DELWRI) == 0)) {
1334				goto restart;
1335			}
1336			BO_LOCK(bo);
1337		}
1338	}
1339
1340	if (length > 0) {
1341restartsync:
1342		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1343			if (bp->b_lblkno > 0)
1344				continue;
1345			/*
1346			 * Since we hold the vnode lock this should only
1347			 * fail if we're racing with the buf daemon.
1348			 */
1349			if (BUF_LOCK(bp,
1350			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1351			    BO_MTX(bo)) == ENOLCK) {
1352				goto restart;
1353			}
1354			VNASSERT((bp->b_flags & B_DELWRI), vp,
1355			    ("buf(%p) on dirty queue without DELWRI", bp));
1356
1357			bremfree(bp);
1358			bawrite(bp);
1359			BO_LOCK(bo);
1360			goto restartsync;
1361		}
1362	}
1363
1364	bufobj_wwait(bo, 0, 0);
1365	BO_UNLOCK(bo);
1366	vnode_pager_setsize(vp, length);
1367
1368	return (0);
1369}
1370
1371/*
1372 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1373 * 		 a vnode.
1374 *
1375 *	NOTE: We have to deal with the special case of a background bitmap
1376 *	buffer, a situation where two buffers will have the same logical
1377 *	block offset.  We want (1) only the foreground buffer to be accessed
1378 *	in a lookup and (2) must differentiate between the foreground and
1379 *	background buffer in the splay tree algorithm because the splay
1380 *	tree cannot normally handle multiple entities with the same 'index'.
1381 *	We accomplish this by adding differentiating flags to the splay tree's
1382 *	numerical domain.
1383 */
1384static
1385struct buf *
1386buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1387{
1388	struct buf dummy;
1389	struct buf *lefttreemax, *righttreemin, *y;
1390
1391	if (root == NULL)
1392		return (NULL);
1393	lefttreemax = righttreemin = &dummy;
1394	for (;;) {
1395		if (lblkno < root->b_lblkno ||
1396		    (lblkno == root->b_lblkno &&
1397		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1398			if ((y = root->b_left) == NULL)
1399				break;
1400			if (lblkno < y->b_lblkno) {
1401				/* Rotate right. */
1402				root->b_left = y->b_right;
1403				y->b_right = root;
1404				root = y;
1405				if ((y = root->b_left) == NULL)
1406					break;
1407			}
1408			/* Link into the new root's right tree. */
1409			righttreemin->b_left = root;
1410			righttreemin = root;
1411		} else if (lblkno > root->b_lblkno ||
1412		    (lblkno == root->b_lblkno &&
1413		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1414			if ((y = root->b_right) == NULL)
1415				break;
1416			if (lblkno > y->b_lblkno) {
1417				/* Rotate left. */
1418				root->b_right = y->b_left;
1419				y->b_left = root;
1420				root = y;
1421				if ((y = root->b_right) == NULL)
1422					break;
1423			}
1424			/* Link into the new root's left tree. */
1425			lefttreemax->b_right = root;
1426			lefttreemax = root;
1427		} else {
1428			break;
1429		}
1430		root = y;
1431	}
1432	/* Assemble the new root. */
1433	lefttreemax->b_right = root->b_left;
1434	righttreemin->b_left = root->b_right;
1435	root->b_left = dummy.b_right;
1436	root->b_right = dummy.b_left;
1437	return (root);
1438}
1439
1440static void
1441buf_vlist_remove(struct buf *bp)
1442{
1443	struct buf *root;
1444	struct bufv *bv;
1445
1446	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1447	ASSERT_BO_LOCKED(bp->b_bufobj);
1448	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1449	    (BX_VNDIRTY|BX_VNCLEAN),
1450	    ("buf_vlist_remove: Buf %p is on two lists", bp));
1451	if (bp->b_xflags & BX_VNDIRTY)
1452		bv = &bp->b_bufobj->bo_dirty;
1453	else
1454		bv = &bp->b_bufobj->bo_clean;
1455	if (bp != bv->bv_root) {
1456		root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1457		KASSERT(root == bp, ("splay lookup failed in remove"));
1458	}
1459	if (bp->b_left == NULL) {
1460		root = bp->b_right;
1461	} else {
1462		root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1463		root->b_right = bp->b_right;
1464	}
1465	bv->bv_root = root;
1466	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1467	bv->bv_cnt--;
1468	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1469}
1470
1471/*
1472 * Add the buffer to the sorted clean or dirty block list using a
1473 * splay tree algorithm.
1474 *
1475 * NOTE: xflags is passed as a constant, optimizing this inline function!
1476 */
1477static void
1478buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1479{
1480	struct buf *root;
1481	struct bufv *bv;
1482
1483	ASSERT_BO_LOCKED(bo);
1484	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1485	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1486	bp->b_xflags |= xflags;
1487	if (xflags & BX_VNDIRTY)
1488		bv = &bo->bo_dirty;
1489	else
1490		bv = &bo->bo_clean;
1491
1492	root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1493	if (root == NULL) {
1494		bp->b_left = NULL;
1495		bp->b_right = NULL;
1496		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1497	} else if (bp->b_lblkno < root->b_lblkno ||
1498	    (bp->b_lblkno == root->b_lblkno &&
1499	    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1500		bp->b_left = root->b_left;
1501		bp->b_right = root;
1502		root->b_left = NULL;
1503		TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1504	} else {
1505		bp->b_right = root->b_right;
1506		bp->b_left = root;
1507		root->b_right = NULL;
1508		TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1509	}
1510	bv->bv_cnt++;
1511	bv->bv_root = bp;
1512}
1513
1514/*
1515 * Lookup a buffer using the splay tree.  Note that we specifically avoid
1516 * shadow buffers used in background bitmap writes.
1517 *
1518 * This code isn't quite efficient as it could be because we are maintaining
1519 * two sorted lists and do not know which list the block resides in.
1520 *
1521 * During a "make buildworld" the desired buffer is found at one of
1522 * the roots more than 60% of the time.  Thus, checking both roots
1523 * before performing either splay eliminates unnecessary splays on the
1524 * first tree splayed.
1525 */
1526struct buf *
1527gbincore(struct bufobj *bo, daddr_t lblkno)
1528{
1529	struct buf *bp;
1530
1531	ASSERT_BO_LOCKED(bo);
1532	if ((bp = bo->bo_clean.bv_root) != NULL &&
1533	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1534		return (bp);
1535	if ((bp = bo->bo_dirty.bv_root) != NULL &&
1536	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1537		return (bp);
1538	if ((bp = bo->bo_clean.bv_root) != NULL) {
1539		bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1540		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1541			return (bp);
1542	}
1543	if ((bp = bo->bo_dirty.bv_root) != NULL) {
1544		bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1545		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1546			return (bp);
1547	}
1548	return (NULL);
1549}
1550
1551/*
1552 * Associate a buffer with a vnode.
1553 */
1554void
1555bgetvp(struct vnode *vp, struct buf *bp)
1556{
1557	struct bufobj *bo;
1558
1559	bo = &vp->v_bufobj;
1560	ASSERT_BO_LOCKED(bo);
1561	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1562
1563	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1564	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1565	    ("bgetvp: bp already attached! %p", bp));
1566
1567	vhold(vp);
1568	if (VFS_NEEDSGIANT(vp->v_mount) || bo->bo_flag & BO_NEEDSGIANT)
1569		bp->b_flags |= B_NEEDSGIANT;
1570	bp->b_vp = vp;
1571	bp->b_bufobj = bo;
1572	/*
1573	 * Insert onto list for new vnode.
1574	 */
1575	buf_vlist_add(bp, bo, BX_VNCLEAN);
1576}
1577
1578/*
1579 * Disassociate a buffer from a vnode.
1580 */
1581void
1582brelvp(struct buf *bp)
1583{
1584	struct bufobj *bo;
1585	struct vnode *vp;
1586
1587	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1588	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1589
1590	/*
1591	 * Delete from old vnode list, if on one.
1592	 */
1593	vp = bp->b_vp;		/* XXX */
1594	bo = bp->b_bufobj;
1595	BO_LOCK(bo);
1596	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1597		buf_vlist_remove(bp);
1598	else
1599		panic("brelvp: Buffer %p not on queue.", bp);
1600	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1601		bo->bo_flag &= ~BO_ONWORKLST;
1602		mtx_lock(&sync_mtx);
1603		LIST_REMOVE(bo, bo_synclist);
1604		syncer_worklist_len--;
1605		mtx_unlock(&sync_mtx);
1606	}
1607	bp->b_flags &= ~B_NEEDSGIANT;
1608	bp->b_vp = NULL;
1609	bp->b_bufobj = NULL;
1610	BO_UNLOCK(bo);
1611	vdrop(vp);
1612}
1613
1614/*
1615 * Add an item to the syncer work queue.
1616 */
1617static void
1618vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1619{
1620	int queue, slot;
1621
1622	ASSERT_BO_LOCKED(bo);
1623
1624	mtx_lock(&sync_mtx);
1625	if (bo->bo_flag & BO_ONWORKLST)
1626		LIST_REMOVE(bo, bo_synclist);
1627	else {
1628		bo->bo_flag |= BO_ONWORKLST;
1629		syncer_worklist_len++;
1630	}
1631
1632	if (delay > syncer_maxdelay - 2)
1633		delay = syncer_maxdelay - 2;
1634	slot = (syncer_delayno + delay) & syncer_mask;
1635
1636	queue = VFS_NEEDSGIANT(bo->__bo_vnode->v_mount) ? WI_GIANTQ :
1637	    WI_MPSAFEQ;
1638	LIST_INSERT_HEAD(&syncer_workitem_pending[queue][slot], bo,
1639	    bo_synclist);
1640	mtx_unlock(&sync_mtx);
1641}
1642
1643static int
1644sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1645{
1646	int error, len;
1647
1648	mtx_lock(&sync_mtx);
1649	len = syncer_worklist_len - sync_vnode_count;
1650	mtx_unlock(&sync_mtx);
1651	error = SYSCTL_OUT(req, &len, sizeof(len));
1652	return (error);
1653}
1654
1655SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1656    sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1657
1658static struct proc *updateproc;
1659static void sched_sync(void);
1660static struct kproc_desc up_kp = {
1661	"syncer",
1662	sched_sync,
1663	&updateproc
1664};
1665SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
1666
1667static int
1668sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
1669{
1670	struct vnode *vp;
1671	struct mount *mp;
1672
1673	*bo = LIST_FIRST(slp);
1674	if (*bo == NULL)
1675		return (0);
1676	vp = (*bo)->__bo_vnode;	/* XXX */
1677	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
1678		return (1);
1679	/*
1680	 * We use vhold in case the vnode does not
1681	 * successfully sync.  vhold prevents the vnode from
1682	 * going away when we unlock the sync_mtx so that
1683	 * we can acquire the vnode interlock.
1684	 */
1685	vholdl(vp);
1686	mtx_unlock(&sync_mtx);
1687	VI_UNLOCK(vp);
1688	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1689		vdrop(vp);
1690		mtx_lock(&sync_mtx);
1691		return (*bo == LIST_FIRST(slp));
1692	}
1693	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1694	(void) VOP_FSYNC(vp, MNT_LAZY, td);
1695	VOP_UNLOCK(vp, 0);
1696	vn_finished_write(mp);
1697	BO_LOCK(*bo);
1698	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
1699		/*
1700		 * Put us back on the worklist.  The worklist
1701		 * routine will remove us from our current
1702		 * position and then add us back in at a later
1703		 * position.
1704		 */
1705		vn_syncer_add_to_worklist(*bo, syncdelay);
1706	}
1707	BO_UNLOCK(*bo);
1708	vdrop(vp);
1709	mtx_lock(&sync_mtx);
1710	return (0);
1711}
1712
1713/*
1714 * System filesystem synchronizer daemon.
1715 */
1716static void
1717sched_sync(void)
1718{
1719	struct synclist *gnext, *next;
1720	struct synclist *gslp, *slp;
1721	struct bufobj *bo;
1722	long starttime;
1723	struct thread *td = curthread;
1724	int last_work_seen;
1725	int net_worklist_len;
1726	int syncer_final_iter;
1727	int first_printf;
1728	int error;
1729
1730	last_work_seen = 0;
1731	syncer_final_iter = 0;
1732	first_printf = 1;
1733	syncer_state = SYNCER_RUNNING;
1734	starttime = time_uptime;
1735	td->td_pflags |= TDP_NORUNNINGBUF;
1736
1737	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1738	    SHUTDOWN_PRI_LAST);
1739
1740	mtx_lock(&sync_mtx);
1741	for (;;) {
1742		if (syncer_state == SYNCER_FINAL_DELAY &&
1743		    syncer_final_iter == 0) {
1744			mtx_unlock(&sync_mtx);
1745			kproc_suspend_check(td->td_proc);
1746			mtx_lock(&sync_mtx);
1747		}
1748		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1749		if (syncer_state != SYNCER_RUNNING &&
1750		    starttime != time_uptime) {
1751			if (first_printf) {
1752				printf("\nSyncing disks, vnodes remaining...");
1753				first_printf = 0;
1754			}
1755			printf("%d ", net_worklist_len);
1756		}
1757		starttime = time_uptime;
1758
1759		/*
1760		 * Push files whose dirty time has expired.  Be careful
1761		 * of interrupt race on slp queue.
1762		 *
1763		 * Skip over empty worklist slots when shutting down.
1764		 */
1765		do {
1766			slp = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno];
1767			gslp = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno];
1768			syncer_delayno += 1;
1769			if (syncer_delayno == syncer_maxdelay)
1770				syncer_delayno = 0;
1771			next = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno];
1772			gnext = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno];
1773			/*
1774			 * If the worklist has wrapped since the
1775			 * it was emptied of all but syncer vnodes,
1776			 * switch to the FINAL_DELAY state and run
1777			 * for one more second.
1778			 */
1779			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1780			    net_worklist_len == 0 &&
1781			    last_work_seen == syncer_delayno) {
1782				syncer_state = SYNCER_FINAL_DELAY;
1783				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1784			}
1785		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1786		    LIST_EMPTY(gslp) && syncer_worklist_len > 0);
1787
1788		/*
1789		 * Keep track of the last time there was anything
1790		 * on the worklist other than syncer vnodes.
1791		 * Return to the SHUTTING_DOWN state if any
1792		 * new work appears.
1793		 */
1794		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1795			last_work_seen = syncer_delayno;
1796		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1797			syncer_state = SYNCER_SHUTTING_DOWN;
1798		while (!LIST_EMPTY(slp)) {
1799			error = sync_vnode(slp, &bo, td);
1800			if (error == 1) {
1801				LIST_REMOVE(bo, bo_synclist);
1802				LIST_INSERT_HEAD(next, bo, bo_synclist);
1803				continue;
1804			}
1805		}
1806		if (!LIST_EMPTY(gslp)) {
1807			mtx_unlock(&sync_mtx);
1808			mtx_lock(&Giant);
1809			mtx_lock(&sync_mtx);
1810			while (!LIST_EMPTY(gslp)) {
1811				error = sync_vnode(gslp, &bo, td);
1812				if (error == 1) {
1813					LIST_REMOVE(bo, bo_synclist);
1814					LIST_INSERT_HEAD(gnext, bo,
1815					    bo_synclist);
1816					continue;
1817				}
1818			}
1819			mtx_unlock(&Giant);
1820		}
1821		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1822			syncer_final_iter--;
1823		/*
1824		 * The variable rushjob allows the kernel to speed up the
1825		 * processing of the filesystem syncer process. A rushjob
1826		 * value of N tells the filesystem syncer to process the next
1827		 * N seconds worth of work on its queue ASAP. Currently rushjob
1828		 * is used by the soft update code to speed up the filesystem
1829		 * syncer process when the incore state is getting so far
1830		 * ahead of the disk that the kernel memory pool is being
1831		 * threatened with exhaustion.
1832		 */
1833		if (rushjob > 0) {
1834			rushjob -= 1;
1835			continue;
1836		}
1837		/*
1838		 * Just sleep for a short period of time between
1839		 * iterations when shutting down to allow some I/O
1840		 * to happen.
1841		 *
1842		 * If it has taken us less than a second to process the
1843		 * current work, then wait. Otherwise start right over
1844		 * again. We can still lose time if any single round
1845		 * takes more than two seconds, but it does not really
1846		 * matter as we are just trying to generally pace the
1847		 * filesystem activity.
1848		 */
1849		if (syncer_state != SYNCER_RUNNING)
1850			cv_timedwait(&sync_wakeup, &sync_mtx,
1851			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1852		else if (time_uptime == starttime)
1853			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
1854	}
1855}
1856
1857/*
1858 * Request the syncer daemon to speed up its work.
1859 * We never push it to speed up more than half of its
1860 * normal turn time, otherwise it could take over the cpu.
1861 */
1862int
1863speedup_syncer(void)
1864{
1865	int ret = 0;
1866
1867	mtx_lock(&sync_mtx);
1868	if (rushjob < syncdelay / 2) {
1869		rushjob += 1;
1870		stat_rush_requests += 1;
1871		ret = 1;
1872	}
1873	mtx_unlock(&sync_mtx);
1874	cv_broadcast(&sync_wakeup);
1875	return (ret);
1876}
1877
1878/*
1879 * Tell the syncer to speed up its work and run though its work
1880 * list several times, then tell it to shut down.
1881 */
1882static void
1883syncer_shutdown(void *arg, int howto)
1884{
1885
1886	if (howto & RB_NOSYNC)
1887		return;
1888	mtx_lock(&sync_mtx);
1889	syncer_state = SYNCER_SHUTTING_DOWN;
1890	rushjob = 0;
1891	mtx_unlock(&sync_mtx);
1892	cv_broadcast(&sync_wakeup);
1893	kproc_shutdown(arg, howto);
1894}
1895
1896/*
1897 * Reassign a buffer from one vnode to another.
1898 * Used to assign file specific control information
1899 * (indirect blocks) to the vnode to which they belong.
1900 */
1901void
1902reassignbuf(struct buf *bp)
1903{
1904	struct vnode *vp;
1905	struct bufobj *bo;
1906	int delay;
1907#ifdef INVARIANTS
1908	struct bufv *bv;
1909#endif
1910
1911	vp = bp->b_vp;
1912	bo = bp->b_bufobj;
1913	++reassignbufcalls;
1914
1915	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
1916	    bp, bp->b_vp, bp->b_flags);
1917	/*
1918	 * B_PAGING flagged buffers cannot be reassigned because their vp
1919	 * is not fully linked in.
1920	 */
1921	if (bp->b_flags & B_PAGING)
1922		panic("cannot reassign paging buffer");
1923
1924	/*
1925	 * Delete from old vnode list, if on one.
1926	 */
1927	BO_LOCK(bo);
1928	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1929		buf_vlist_remove(bp);
1930	else
1931		panic("reassignbuf: Buffer %p not on queue.", bp);
1932	/*
1933	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1934	 * of clean buffers.
1935	 */
1936	if (bp->b_flags & B_DELWRI) {
1937		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1938			switch (vp->v_type) {
1939			case VDIR:
1940				delay = dirdelay;
1941				break;
1942			case VCHR:
1943				delay = metadelay;
1944				break;
1945			default:
1946				delay = filedelay;
1947			}
1948			vn_syncer_add_to_worklist(bo, delay);
1949		}
1950		buf_vlist_add(bp, bo, BX_VNDIRTY);
1951	} else {
1952		buf_vlist_add(bp, bo, BX_VNCLEAN);
1953
1954		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1955			mtx_lock(&sync_mtx);
1956			LIST_REMOVE(bo, bo_synclist);
1957			syncer_worklist_len--;
1958			mtx_unlock(&sync_mtx);
1959			bo->bo_flag &= ~BO_ONWORKLST;
1960		}
1961	}
1962#ifdef INVARIANTS
1963	bv = &bo->bo_clean;
1964	bp = TAILQ_FIRST(&bv->bv_hd);
1965	KASSERT(bp == NULL || bp->b_bufobj == bo,
1966	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1967	bp = TAILQ_LAST(&bv->bv_hd, buflists);
1968	KASSERT(bp == NULL || bp->b_bufobj == bo,
1969	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1970	bv = &bo->bo_dirty;
1971	bp = TAILQ_FIRST(&bv->bv_hd);
1972	KASSERT(bp == NULL || bp->b_bufobj == bo,
1973	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1974	bp = TAILQ_LAST(&bv->bv_hd, buflists);
1975	KASSERT(bp == NULL || bp->b_bufobj == bo,
1976	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1977#endif
1978	BO_UNLOCK(bo);
1979}
1980
1981/*
1982 * Increment the use and hold counts on the vnode, taking care to reference
1983 * the driver's usecount if this is a chardev.  The vholdl() will remove
1984 * the vnode from the free list if it is presently free.  Requires the
1985 * vnode interlock and returns with it held.
1986 */
1987static void
1988v_incr_usecount(struct vnode *vp)
1989{
1990
1991	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1992	vp->v_usecount++;
1993	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1994		dev_lock();
1995		vp->v_rdev->si_usecount++;
1996		dev_unlock();
1997	}
1998	vholdl(vp);
1999}
2000
2001/*
2002 * Turn a holdcnt into a use+holdcnt such that only one call to
2003 * v_decr_usecount is needed.
2004 */
2005static void
2006v_upgrade_usecount(struct vnode *vp)
2007{
2008
2009	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2010	vp->v_usecount++;
2011	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2012		dev_lock();
2013		vp->v_rdev->si_usecount++;
2014		dev_unlock();
2015	}
2016}
2017
2018/*
2019 * Decrement the vnode use and hold count along with the driver's usecount
2020 * if this is a chardev.  The vdropl() below releases the vnode interlock
2021 * as it may free the vnode.
2022 */
2023static void
2024v_decr_usecount(struct vnode *vp)
2025{
2026
2027	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2028	VNASSERT(vp->v_usecount > 0, vp,
2029	    ("v_decr_usecount: negative usecount"));
2030	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2031	vp->v_usecount--;
2032	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2033		dev_lock();
2034		vp->v_rdev->si_usecount--;
2035		dev_unlock();
2036	}
2037	vdropl(vp);
2038}
2039
2040/*
2041 * Decrement only the use count and driver use count.  This is intended to
2042 * be paired with a follow on vdropl() to release the remaining hold count.
2043 * In this way we may vgone() a vnode with a 0 usecount without risk of
2044 * having it end up on a free list because the hold count is kept above 0.
2045 */
2046static void
2047v_decr_useonly(struct vnode *vp)
2048{
2049
2050	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2051	VNASSERT(vp->v_usecount > 0, vp,
2052	    ("v_decr_useonly: negative usecount"));
2053	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2054	vp->v_usecount--;
2055	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2056		dev_lock();
2057		vp->v_rdev->si_usecount--;
2058		dev_unlock();
2059	}
2060}
2061
2062/*
2063 * Grab a particular vnode from the free list, increment its
2064 * reference count and lock it.  VI_DOOMED is set if the vnode
2065 * is being destroyed.  Only callers who specify LK_RETRY will
2066 * see doomed vnodes.  If inactive processing was delayed in
2067 * vput try to do it here.
2068 */
2069int
2070vget(struct vnode *vp, int flags, struct thread *td)
2071{
2072	int error;
2073
2074	error = 0;
2075	VFS_ASSERT_GIANT(vp->v_mount);
2076	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2077	    ("vget: invalid lock operation"));
2078	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2079
2080	if ((flags & LK_INTERLOCK) == 0)
2081		VI_LOCK(vp);
2082	vholdl(vp);
2083	if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) {
2084		vdrop(vp);
2085		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2086		    vp);
2087		return (error);
2088	}
2089	if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2090		panic("vget: vn_lock failed to return ENOENT\n");
2091	VI_LOCK(vp);
2092	/* Upgrade our holdcnt to a usecount. */
2093	v_upgrade_usecount(vp);
2094	/*
2095 	 * We don't guarantee that any particular close will
2096	 * trigger inactive processing so just make a best effort
2097	 * here at preventing a reference to a removed file.  If
2098	 * we don't succeed no harm is done.
2099	 */
2100	if (vp->v_iflag & VI_OWEINACT) {
2101		if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2102		    (flags & LK_NOWAIT) == 0)
2103			vinactive(vp, td);
2104		vp->v_iflag &= ~VI_OWEINACT;
2105	}
2106	VI_UNLOCK(vp);
2107	return (0);
2108}
2109
2110/*
2111 * Increase the reference count of a vnode.
2112 */
2113void
2114vref(struct vnode *vp)
2115{
2116
2117	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2118	VI_LOCK(vp);
2119	v_incr_usecount(vp);
2120	VI_UNLOCK(vp);
2121}
2122
2123/*
2124 * Return reference count of a vnode.
2125 *
2126 * The results of this call are only guaranteed when some mechanism other
2127 * than the VI lock is used to stop other processes from gaining references
2128 * to the vnode.  This may be the case if the caller holds the only reference.
2129 * This is also useful when stale data is acceptable as race conditions may
2130 * be accounted for by some other means.
2131 */
2132int
2133vrefcnt(struct vnode *vp)
2134{
2135	int usecnt;
2136
2137	VI_LOCK(vp);
2138	usecnt = vp->v_usecount;
2139	VI_UNLOCK(vp);
2140
2141	return (usecnt);
2142}
2143
2144
2145/*
2146 * Vnode put/release.
2147 * If count drops to zero, call inactive routine and return to freelist.
2148 */
2149void
2150vrele(struct vnode *vp)
2151{
2152	struct thread *td = curthread;	/* XXX */
2153
2154	KASSERT(vp != NULL, ("vrele: null vp"));
2155	VFS_ASSERT_GIANT(vp->v_mount);
2156
2157	VI_LOCK(vp);
2158
2159	/* Skip this v_writecount check if we're going to panic below. */
2160	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2161	    ("vrele: missed vn_close"));
2162	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2163
2164	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2165	    vp->v_usecount == 1)) {
2166		v_decr_usecount(vp);
2167		return;
2168	}
2169	if (vp->v_usecount != 1) {
2170#ifdef DIAGNOSTIC
2171		vprint("vrele: negative ref count", vp);
2172#endif
2173		VI_UNLOCK(vp);
2174		panic("vrele: negative ref cnt");
2175	}
2176	CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2177	/*
2178	 * We want to hold the vnode until the inactive finishes to
2179	 * prevent vgone() races.  We drop the use count here and the
2180	 * hold count below when we're done.
2181	 */
2182	v_decr_useonly(vp);
2183	/*
2184	 * We must call VOP_INACTIVE with the node locked. Mark
2185	 * as VI_DOINGINACT to avoid recursion.
2186	 */
2187	vp->v_iflag |= VI_OWEINACT;
2188	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
2189		VI_LOCK(vp);
2190		if (vp->v_usecount > 0)
2191			vp->v_iflag &= ~VI_OWEINACT;
2192		if (vp->v_iflag & VI_OWEINACT)
2193			vinactive(vp, td);
2194		VOP_UNLOCK(vp, 0);
2195	} else {
2196		VI_LOCK(vp);
2197		if (vp->v_usecount > 0)
2198			vp->v_iflag &= ~VI_OWEINACT;
2199	}
2200	vdropl(vp);
2201}
2202
2203/*
2204 * Release an already locked vnode.  This give the same effects as
2205 * unlock+vrele(), but takes less time and avoids releasing and
2206 * re-aquiring the lock (as vrele() acquires the lock internally.)
2207 */
2208void
2209vput(struct vnode *vp)
2210{
2211	struct thread *td = curthread;	/* XXX */
2212	int error;
2213
2214	KASSERT(vp != NULL, ("vput: null vp"));
2215	ASSERT_VOP_LOCKED(vp, "vput");
2216	VFS_ASSERT_GIANT(vp->v_mount);
2217	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2218	VI_LOCK(vp);
2219	/* Skip this v_writecount check if we're going to panic below. */
2220	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2221	    ("vput: missed vn_close"));
2222	error = 0;
2223
2224	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2225	    vp->v_usecount == 1)) {
2226		VOP_UNLOCK(vp, 0);
2227		v_decr_usecount(vp);
2228		return;
2229	}
2230
2231	if (vp->v_usecount != 1) {
2232#ifdef DIAGNOSTIC
2233		vprint("vput: negative ref count", vp);
2234#endif
2235		panic("vput: negative ref cnt");
2236	}
2237	CTR2(KTR_VFS, "%s: return to freelist the vnode %p", __func__, vp);
2238	/*
2239	 * We want to hold the vnode until the inactive finishes to
2240	 * prevent vgone() races.  We drop the use count here and the
2241	 * hold count below when we're done.
2242	 */
2243	v_decr_useonly(vp);
2244	vp->v_iflag |= VI_OWEINACT;
2245	if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2246		error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT);
2247		VI_LOCK(vp);
2248		if (error) {
2249			if (vp->v_usecount > 0)
2250				vp->v_iflag &= ~VI_OWEINACT;
2251			goto done;
2252		}
2253	}
2254	if (vp->v_usecount > 0)
2255		vp->v_iflag &= ~VI_OWEINACT;
2256	if (vp->v_iflag & VI_OWEINACT)
2257		vinactive(vp, td);
2258	VOP_UNLOCK(vp, 0);
2259done:
2260	vdropl(vp);
2261}
2262
2263/*
2264 * Somebody doesn't want the vnode recycled.
2265 */
2266void
2267vhold(struct vnode *vp)
2268{
2269
2270	VI_LOCK(vp);
2271	vholdl(vp);
2272	VI_UNLOCK(vp);
2273}
2274
2275void
2276vholdl(struct vnode *vp)
2277{
2278
2279	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2280	vp->v_holdcnt++;
2281	if (VSHOULDBUSY(vp))
2282		vbusy(vp);
2283}
2284
2285/*
2286 * Note that there is one less who cares about this vnode.  vdrop() is the
2287 * opposite of vhold().
2288 */
2289void
2290vdrop(struct vnode *vp)
2291{
2292
2293	VI_LOCK(vp);
2294	vdropl(vp);
2295}
2296
2297/*
2298 * Drop the hold count of the vnode.  If this is the last reference to
2299 * the vnode we will free it if it has been vgone'd otherwise it is
2300 * placed on the free list.
2301 */
2302void
2303vdropl(struct vnode *vp)
2304{
2305
2306	ASSERT_VI_LOCKED(vp, "vdropl");
2307	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2308	if (vp->v_holdcnt <= 0)
2309		panic("vdrop: holdcnt %d", vp->v_holdcnt);
2310	vp->v_holdcnt--;
2311	if (vp->v_holdcnt == 0) {
2312		if (vp->v_iflag & VI_DOOMED) {
2313			CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__,
2314			    vp);
2315			vdestroy(vp);
2316			return;
2317		} else
2318			vfree(vp);
2319	}
2320	VI_UNLOCK(vp);
2321}
2322
2323/*
2324 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2325 * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
2326 * OWEINACT tracks whether a vnode missed a call to inactive due to a
2327 * failed lock upgrade.
2328 */
2329static void
2330vinactive(struct vnode *vp, struct thread *td)
2331{
2332
2333	ASSERT_VOP_ELOCKED(vp, "vinactive");
2334	ASSERT_VI_LOCKED(vp, "vinactive");
2335	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2336	    ("vinactive: recursed on VI_DOINGINACT"));
2337	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2338	vp->v_iflag |= VI_DOINGINACT;
2339	vp->v_iflag &= ~VI_OWEINACT;
2340	VI_UNLOCK(vp);
2341	VOP_INACTIVE(vp, td);
2342	VI_LOCK(vp);
2343	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2344	    ("vinactive: lost VI_DOINGINACT"));
2345	vp->v_iflag &= ~VI_DOINGINACT;
2346}
2347
2348/*
2349 * Remove any vnodes in the vnode table belonging to mount point mp.
2350 *
2351 * If FORCECLOSE is not specified, there should not be any active ones,
2352 * return error if any are found (nb: this is a user error, not a
2353 * system error). If FORCECLOSE is specified, detach any active vnodes
2354 * that are found.
2355 *
2356 * If WRITECLOSE is set, only flush out regular file vnodes open for
2357 * writing.
2358 *
2359 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2360 *
2361 * `rootrefs' specifies the base reference count for the root vnode
2362 * of this filesystem. The root vnode is considered busy if its
2363 * v_usecount exceeds this value. On a successful return, vflush(, td)
2364 * will call vrele() on the root vnode exactly rootrefs times.
2365 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2366 * be zero.
2367 */
2368#ifdef DIAGNOSTIC
2369static int busyprt = 0;		/* print out busy vnodes */
2370SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2371#endif
2372
2373int
2374vflush( struct mount *mp, int rootrefs, int flags, struct thread *td)
2375{
2376	struct vnode *vp, *mvp, *rootvp = NULL;
2377	struct vattr vattr;
2378	int busy = 0, error;
2379
2380	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
2381	    rootrefs, flags);
2382	if (rootrefs > 0) {
2383		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2384		    ("vflush: bad args"));
2385		/*
2386		 * Get the filesystem root vnode. We can vput() it
2387		 * immediately, since with rootrefs > 0, it won't go away.
2388		 */
2389		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
2390			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
2391			    __func__, error);
2392			return (error);
2393		}
2394		vput(rootvp);
2395
2396	}
2397	MNT_ILOCK(mp);
2398loop:
2399	MNT_VNODE_FOREACH(vp, mp, mvp) {
2400
2401		VI_LOCK(vp);
2402		vholdl(vp);
2403		MNT_IUNLOCK(mp);
2404		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
2405		if (error) {
2406			vdrop(vp);
2407			MNT_ILOCK(mp);
2408			MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
2409			goto loop;
2410		}
2411		/*
2412		 * Skip over a vnodes marked VV_SYSTEM.
2413		 */
2414		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2415			VOP_UNLOCK(vp, 0);
2416			vdrop(vp);
2417			MNT_ILOCK(mp);
2418			continue;
2419		}
2420		/*
2421		 * If WRITECLOSE is set, flush out unlinked but still open
2422		 * files (even if open only for reading) and regular file
2423		 * vnodes open for writing.
2424		 */
2425		if (flags & WRITECLOSE) {
2426			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
2427			VI_LOCK(vp);
2428
2429			if ((vp->v_type == VNON ||
2430			    (error == 0 && vattr.va_nlink > 0)) &&
2431			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2432				VOP_UNLOCK(vp, 0);
2433				vdropl(vp);
2434				MNT_ILOCK(mp);
2435				continue;
2436			}
2437		} else
2438			VI_LOCK(vp);
2439		/*
2440		 * With v_usecount == 0, all we need to do is clear out the
2441		 * vnode data structures and we are done.
2442		 *
2443		 * If FORCECLOSE is set, forcibly close the vnode.
2444		 */
2445		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
2446			VNASSERT(vp->v_usecount == 0 ||
2447			    (vp->v_type != VCHR && vp->v_type != VBLK), vp,
2448			    ("device VNODE %p is FORCECLOSED", vp));
2449			vgonel(vp);
2450		} else {
2451			busy++;
2452#ifdef DIAGNOSTIC
2453			if (busyprt)
2454				vprint("vflush: busy vnode", vp);
2455#endif
2456		}
2457		VOP_UNLOCK(vp, 0);
2458		vdropl(vp);
2459		MNT_ILOCK(mp);
2460	}
2461	MNT_IUNLOCK(mp);
2462	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2463		/*
2464		 * If just the root vnode is busy, and if its refcount
2465		 * is equal to `rootrefs', then go ahead and kill it.
2466		 */
2467		VI_LOCK(rootvp);
2468		KASSERT(busy > 0, ("vflush: not busy"));
2469		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
2470		    ("vflush: usecount %d < rootrefs %d",
2471		     rootvp->v_usecount, rootrefs));
2472		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2473			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
2474			vgone(rootvp);
2475			VOP_UNLOCK(rootvp, 0);
2476			busy = 0;
2477		} else
2478			VI_UNLOCK(rootvp);
2479	}
2480	if (busy) {
2481		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
2482		    busy);
2483		return (EBUSY);
2484	}
2485	for (; rootrefs > 0; rootrefs--)
2486		vrele(rootvp);
2487	return (0);
2488}
2489
2490/*
2491 * Recycle an unused vnode to the front of the free list.
2492 */
2493int
2494vrecycle(struct vnode *vp, struct thread *td)
2495{
2496	int recycled;
2497
2498	ASSERT_VOP_ELOCKED(vp, "vrecycle");
2499	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2500	recycled = 0;
2501	VI_LOCK(vp);
2502	if (vp->v_usecount == 0) {
2503		recycled = 1;
2504		vgonel(vp);
2505	}
2506	VI_UNLOCK(vp);
2507	return (recycled);
2508}
2509
2510/*
2511 * Eliminate all activity associated with a vnode
2512 * in preparation for reuse.
2513 */
2514void
2515vgone(struct vnode *vp)
2516{
2517	VI_LOCK(vp);
2518	vgonel(vp);
2519	VI_UNLOCK(vp);
2520}
2521
2522/*
2523 * vgone, with the vp interlock held.
2524 */
2525void
2526vgonel(struct vnode *vp)
2527{
2528	struct thread *td;
2529	int oweinact;
2530	int active;
2531	struct mount *mp;
2532
2533	ASSERT_VOP_ELOCKED(vp, "vgonel");
2534	ASSERT_VI_LOCKED(vp, "vgonel");
2535	VNASSERT(vp->v_holdcnt, vp,
2536	    ("vgonel: vp %p has no reference.", vp));
2537	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2538	td = curthread;
2539
2540	/*
2541	 * Don't vgonel if we're already doomed.
2542	 */
2543	if (vp->v_iflag & VI_DOOMED)
2544		return;
2545	vp->v_iflag |= VI_DOOMED;
2546	/*
2547	 * Check to see if the vnode is in use.  If so, we have to call
2548	 * VOP_CLOSE() and VOP_INACTIVE().
2549	 */
2550	active = vp->v_usecount;
2551	oweinact = (vp->v_iflag & VI_OWEINACT);
2552	VI_UNLOCK(vp);
2553	/*
2554	 * Clean out any buffers associated with the vnode.
2555	 * If the flush fails, just toss the buffers.
2556	 */
2557	mp = NULL;
2558	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
2559		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
2560	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0)
2561		vinvalbuf(vp, 0, 0, 0);
2562
2563	/*
2564	 * If purging an active vnode, it must be closed and
2565	 * deactivated before being reclaimed.
2566	 */
2567	if (active)
2568		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2569	if (oweinact || active) {
2570		VI_LOCK(vp);
2571		if ((vp->v_iflag & VI_DOINGINACT) == 0)
2572			vinactive(vp, td);
2573		VI_UNLOCK(vp);
2574	}
2575	/*
2576	 * Reclaim the vnode.
2577	 */
2578	if (VOP_RECLAIM(vp, td))
2579		panic("vgone: cannot reclaim");
2580	if (mp != NULL)
2581		vn_finished_secondary_write(mp);
2582	VNASSERT(vp->v_object == NULL, vp,
2583	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2584	/*
2585	 * Clear the advisory locks and wake up waiting threads.
2586	 */
2587	lf_purgelocks(vp, &(vp->v_lockf));
2588	/*
2589	 * Delete from old mount point vnode list.
2590	 */
2591	delmntque(vp);
2592	cache_purge(vp);
2593	/*
2594	 * Done with purge, reset to the standard lock and invalidate
2595	 * the vnode.
2596	 */
2597	VI_LOCK(vp);
2598	vp->v_vnlock = &vp->v_lock;
2599	vp->v_op = &dead_vnodeops;
2600	vp->v_tag = "none";
2601	vp->v_type = VBAD;
2602}
2603
2604/*
2605 * Calculate the total number of references to a special device.
2606 */
2607int
2608vcount(struct vnode *vp)
2609{
2610	int count;
2611
2612	dev_lock();
2613	count = vp->v_rdev->si_usecount;
2614	dev_unlock();
2615	return (count);
2616}
2617
2618/*
2619 * Same as above, but using the struct cdev *as argument
2620 */
2621int
2622count_dev(struct cdev *dev)
2623{
2624	int count;
2625
2626	dev_lock();
2627	count = dev->si_usecount;
2628	dev_unlock();
2629	return(count);
2630}
2631
2632/*
2633 * Print out a description of a vnode.
2634 */
2635static char *typename[] =
2636{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2637 "VMARKER"};
2638
2639void
2640vn_printf(struct vnode *vp, const char *fmt, ...)
2641{
2642	va_list ap;
2643	char buf[256], buf2[16];
2644	u_long flags;
2645
2646	va_start(ap, fmt);
2647	vprintf(fmt, ap);
2648	va_end(ap);
2649	printf("%p: ", (void *)vp);
2650	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2651	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
2652	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2653	buf[0] = '\0';
2654	buf[1] = '\0';
2655	if (vp->v_vflag & VV_ROOT)
2656		strlcat(buf, "|VV_ROOT", sizeof(buf));
2657	if (vp->v_vflag & VV_ISTTY)
2658		strlcat(buf, "|VV_ISTTY", sizeof(buf));
2659	if (vp->v_vflag & VV_NOSYNC)
2660		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
2661	if (vp->v_vflag & VV_CACHEDLABEL)
2662		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
2663	if (vp->v_vflag & VV_TEXT)
2664		strlcat(buf, "|VV_TEXT", sizeof(buf));
2665	if (vp->v_vflag & VV_COPYONWRITE)
2666		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
2667	if (vp->v_vflag & VV_SYSTEM)
2668		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
2669	if (vp->v_vflag & VV_PROCDEP)
2670		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
2671	if (vp->v_vflag & VV_NOKNOTE)
2672		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
2673	if (vp->v_vflag & VV_DELETED)
2674		strlcat(buf, "|VV_DELETED", sizeof(buf));
2675	if (vp->v_vflag & VV_MD)
2676		strlcat(buf, "|VV_MD", sizeof(buf));
2677	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC |
2678	    VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
2679	    VV_NOKNOTE | VV_DELETED | VV_MD);
2680	if (flags != 0) {
2681		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
2682		strlcat(buf, buf2, sizeof(buf));
2683	}
2684	if (vp->v_iflag & VI_MOUNT)
2685		strlcat(buf, "|VI_MOUNT", sizeof(buf));
2686	if (vp->v_iflag & VI_AGE)
2687		strlcat(buf, "|VI_AGE", sizeof(buf));
2688	if (vp->v_iflag & VI_DOOMED)
2689		strlcat(buf, "|VI_DOOMED", sizeof(buf));
2690	if (vp->v_iflag & VI_FREE)
2691		strlcat(buf, "|VI_FREE", sizeof(buf));
2692	if (vp->v_iflag & VI_OBJDIRTY)
2693		strlcat(buf, "|VI_OBJDIRTY", sizeof(buf));
2694	if (vp->v_iflag & VI_DOINGINACT)
2695		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
2696	if (vp->v_iflag & VI_OWEINACT)
2697		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
2698	flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE |
2699	    VI_OBJDIRTY | VI_DOINGINACT | VI_OWEINACT);
2700	if (flags != 0) {
2701		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
2702		strlcat(buf, buf2, sizeof(buf));
2703	}
2704	printf("    flags (%s)\n", buf + 1);
2705	if (mtx_owned(VI_MTX(vp)))
2706		printf(" VI_LOCKed");
2707	if (vp->v_object != NULL)
2708		printf("    v_object %p ref %d pages %d\n",
2709		    vp->v_object, vp->v_object->ref_count,
2710		    vp->v_object->resident_page_count);
2711	printf("    ");
2712	lockmgr_printinfo(vp->v_vnlock);
2713	if (vp->v_data != NULL)
2714		VOP_PRINT(vp);
2715}
2716
2717#ifdef DDB
2718/*
2719 * List all of the locked vnodes in the system.
2720 * Called when debugging the kernel.
2721 */
2722DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2723{
2724	struct mount *mp, *nmp;
2725	struct vnode *vp;
2726
2727	/*
2728	 * Note: because this is DDB, we can't obey the locking semantics
2729	 * for these structures, which means we could catch an inconsistent
2730	 * state and dereference a nasty pointer.  Not much to be done
2731	 * about that.
2732	 */
2733	db_printf("Locked vnodes\n");
2734	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2735		nmp = TAILQ_NEXT(mp, mnt_list);
2736		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2737			if (vp->v_type != VMARKER &&
2738			    VOP_ISLOCKED(vp))
2739				vprint("", vp);
2740		}
2741		nmp = TAILQ_NEXT(mp, mnt_list);
2742	}
2743}
2744
2745/*
2746 * Show details about the given vnode.
2747 */
2748DB_SHOW_COMMAND(vnode, db_show_vnode)
2749{
2750	struct vnode *vp;
2751
2752	if (!have_addr)
2753		return;
2754	vp = (struct vnode *)addr;
2755	vn_printf(vp, "vnode ");
2756}
2757
2758/*
2759 * Show details about the given mount point.
2760 */
2761DB_SHOW_COMMAND(mount, db_show_mount)
2762{
2763	struct mount *mp;
2764	struct statfs *sp;
2765	struct vnode *vp;
2766	char buf[512];
2767	u_int flags;
2768
2769	if (!have_addr) {
2770		/* No address given, print short info about all mount points. */
2771		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2772			db_printf("%p %s on %s (%s)\n", mp,
2773			    mp->mnt_stat.f_mntfromname,
2774			    mp->mnt_stat.f_mntonname,
2775			    mp->mnt_stat.f_fstypename);
2776			if (db_pager_quit)
2777				break;
2778		}
2779		db_printf("\nMore info: show mount <addr>\n");
2780		return;
2781	}
2782
2783	mp = (struct mount *)addr;
2784	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
2785	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
2786
2787	buf[0] = '\0';
2788	flags = mp->mnt_flag;
2789#define	MNT_FLAG(flag)	do {						\
2790	if (flags & (flag)) {						\
2791		if (buf[0] != '\0')					\
2792			strlcat(buf, ", ", sizeof(buf));		\
2793		strlcat(buf, (#flag) + 4, sizeof(buf));			\
2794		flags &= ~(flag);					\
2795	}								\
2796} while (0)
2797	MNT_FLAG(MNT_RDONLY);
2798	MNT_FLAG(MNT_SYNCHRONOUS);
2799	MNT_FLAG(MNT_NOEXEC);
2800	MNT_FLAG(MNT_NOSUID);
2801	MNT_FLAG(MNT_UNION);
2802	MNT_FLAG(MNT_ASYNC);
2803	MNT_FLAG(MNT_SUIDDIR);
2804	MNT_FLAG(MNT_SOFTDEP);
2805	MNT_FLAG(MNT_NOSYMFOLLOW);
2806	MNT_FLAG(MNT_GJOURNAL);
2807	MNT_FLAG(MNT_MULTILABEL);
2808	MNT_FLAG(MNT_ACLS);
2809	MNT_FLAG(MNT_NOATIME);
2810	MNT_FLAG(MNT_NOCLUSTERR);
2811	MNT_FLAG(MNT_NOCLUSTERW);
2812	MNT_FLAG(MNT_EXRDONLY);
2813	MNT_FLAG(MNT_EXPORTED);
2814	MNT_FLAG(MNT_DEFEXPORTED);
2815	MNT_FLAG(MNT_EXPORTANON);
2816	MNT_FLAG(MNT_EXKERB);
2817	MNT_FLAG(MNT_EXPUBLIC);
2818	MNT_FLAG(MNT_LOCAL);
2819	MNT_FLAG(MNT_QUOTA);
2820	MNT_FLAG(MNT_ROOTFS);
2821	MNT_FLAG(MNT_USER);
2822	MNT_FLAG(MNT_IGNORE);
2823	MNT_FLAG(MNT_UPDATE);
2824	MNT_FLAG(MNT_DELEXPORT);
2825	MNT_FLAG(MNT_RELOAD);
2826	MNT_FLAG(MNT_FORCE);
2827	MNT_FLAG(MNT_SNAPSHOT);
2828	MNT_FLAG(MNT_BYFSID);
2829#undef MNT_FLAG
2830	if (flags != 0) {
2831		if (buf[0] != '\0')
2832			strlcat(buf, ", ", sizeof(buf));
2833		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2834		    "0x%08x", flags);
2835	}
2836	db_printf("    mnt_flag = %s\n", buf);
2837
2838	buf[0] = '\0';
2839	flags = mp->mnt_kern_flag;
2840#define	MNT_KERN_FLAG(flag)	do {					\
2841	if (flags & (flag)) {						\
2842		if (buf[0] != '\0')					\
2843			strlcat(buf, ", ", sizeof(buf));		\
2844		strlcat(buf, (#flag) + 5, sizeof(buf));			\
2845		flags &= ~(flag);					\
2846	}								\
2847} while (0)
2848	MNT_KERN_FLAG(MNTK_UNMOUNTF);
2849	MNT_KERN_FLAG(MNTK_ASYNC);
2850	MNT_KERN_FLAG(MNTK_SOFTDEP);
2851	MNT_KERN_FLAG(MNTK_NOINSMNTQ);
2852	MNT_KERN_FLAG(MNTK_UNMOUNT);
2853	MNT_KERN_FLAG(MNTK_MWAIT);
2854	MNT_KERN_FLAG(MNTK_SUSPEND);
2855	MNT_KERN_FLAG(MNTK_SUSPEND2);
2856	MNT_KERN_FLAG(MNTK_SUSPENDED);
2857	MNT_KERN_FLAG(MNTK_MPSAFE);
2858	MNT_KERN_FLAG(MNTK_NOKNOTE);
2859	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
2860#undef MNT_KERN_FLAG
2861	if (flags != 0) {
2862		if (buf[0] != '\0')
2863			strlcat(buf, ", ", sizeof(buf));
2864		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2865		    "0x%08x", flags);
2866	}
2867	db_printf("    mnt_kern_flag = %s\n", buf);
2868
2869	sp = &mp->mnt_stat;
2870	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
2871	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
2872	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
2873	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
2874	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
2875	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
2876	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
2877	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
2878	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
2879	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
2880	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
2881	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
2882
2883	db_printf("    mnt_cred = { uid=%u ruid=%u",
2884	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
2885	if (jailed(mp->mnt_cred))
2886		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
2887	db_printf(" }\n");
2888	db_printf("    mnt_ref = %d\n", mp->mnt_ref);
2889	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
2890	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
2891	db_printf("    mnt_writeopcount = %d\n", mp->mnt_writeopcount);
2892	db_printf("    mnt_noasync = %u\n", mp->mnt_noasync);
2893	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
2894	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
2895	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
2896	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
2897	db_printf("    mnt_secondary_accwrites = %d\n",
2898	    mp->mnt_secondary_accwrites);
2899	db_printf("    mnt_gjprovider = %s\n",
2900	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
2901	db_printf("\n");
2902
2903	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2904		if (vp->v_type != VMARKER) {
2905			vn_printf(vp, "vnode ");
2906			if (db_pager_quit)
2907				break;
2908		}
2909	}
2910}
2911#endif	/* DDB */
2912
2913/*
2914 * Fill in a struct xvfsconf based on a struct vfsconf.
2915 */
2916static void
2917vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2918{
2919
2920	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2921	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2922	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2923	xvfsp->vfc_flags = vfsp->vfc_flags;
2924	/*
2925	 * These are unused in userland, we keep them
2926	 * to not break binary compatibility.
2927	 */
2928	xvfsp->vfc_vfsops = NULL;
2929	xvfsp->vfc_next = NULL;
2930}
2931
2932/*
2933 * Top level filesystem related information gathering.
2934 */
2935static int
2936sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2937{
2938	struct vfsconf *vfsp;
2939	struct xvfsconf xvfsp;
2940	int error;
2941
2942	error = 0;
2943	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2944		bzero(&xvfsp, sizeof(xvfsp));
2945		vfsconf2x(vfsp, &xvfsp);
2946		error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2947		if (error)
2948			break;
2949	}
2950	return (error);
2951}
2952
2953SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2954    "S,xvfsconf", "List of all configured filesystems");
2955
2956#ifndef BURN_BRIDGES
2957static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2958
2959static int
2960vfs_sysctl(SYSCTL_HANDLER_ARGS)
2961{
2962	int *name = (int *)arg1 - 1;	/* XXX */
2963	u_int namelen = arg2 + 1;	/* XXX */
2964	struct vfsconf *vfsp;
2965	struct xvfsconf xvfsp;
2966
2967	printf("WARNING: userland calling deprecated sysctl, "
2968	    "please rebuild world\n");
2969
2970#if 1 || defined(COMPAT_PRELITE2)
2971	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2972	if (namelen == 1)
2973		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2974#endif
2975
2976	switch (name[1]) {
2977	case VFS_MAXTYPENUM:
2978		if (namelen != 2)
2979			return (ENOTDIR);
2980		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2981	case VFS_CONF:
2982		if (namelen != 3)
2983			return (ENOTDIR);	/* overloaded */
2984		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2985			if (vfsp->vfc_typenum == name[2])
2986				break;
2987		if (vfsp == NULL)
2988			return (EOPNOTSUPP);
2989		bzero(&xvfsp, sizeof(xvfsp));
2990		vfsconf2x(vfsp, &xvfsp);
2991		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2992	}
2993	return (EOPNOTSUPP);
2994}
2995
2996static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
2997	vfs_sysctl, "Generic filesystem");
2998
2999#if 1 || defined(COMPAT_PRELITE2)
3000
3001static int
3002sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3003{
3004	int error;
3005	struct vfsconf *vfsp;
3006	struct ovfsconf ovfs;
3007
3008	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3009		bzero(&ovfs, sizeof(ovfs));
3010		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3011		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3012		ovfs.vfc_index = vfsp->vfc_typenum;
3013		ovfs.vfc_refcount = vfsp->vfc_refcount;
3014		ovfs.vfc_flags = vfsp->vfc_flags;
3015		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3016		if (error)
3017			return error;
3018	}
3019	return 0;
3020}
3021
3022#endif /* 1 || COMPAT_PRELITE2 */
3023#endif /* !BURN_BRIDGES */
3024
3025#define KINFO_VNODESLOP		10
3026#ifdef notyet
3027/*
3028 * Dump vnode list (via sysctl).
3029 */
3030/* ARGSUSED */
3031static int
3032sysctl_vnode(SYSCTL_HANDLER_ARGS)
3033{
3034	struct xvnode *xvn;
3035	struct mount *mp;
3036	struct vnode *vp;
3037	int error, len, n;
3038
3039	/*
3040	 * Stale numvnodes access is not fatal here.
3041	 */
3042	req->lock = 0;
3043	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3044	if (!req->oldptr)
3045		/* Make an estimate */
3046		return (SYSCTL_OUT(req, 0, len));
3047
3048	error = sysctl_wire_old_buffer(req, 0);
3049	if (error != 0)
3050		return (error);
3051	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3052	n = 0;
3053	mtx_lock(&mountlist_mtx);
3054	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3055		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3056			continue;
3057		MNT_ILOCK(mp);
3058		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3059			if (n == len)
3060				break;
3061			vref(vp);
3062			xvn[n].xv_size = sizeof *xvn;
3063			xvn[n].xv_vnode = vp;
3064			xvn[n].xv_id = 0;	/* XXX compat */
3065#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3066			XV_COPY(usecount);
3067			XV_COPY(writecount);
3068			XV_COPY(holdcnt);
3069			XV_COPY(mount);
3070			XV_COPY(numoutput);
3071			XV_COPY(type);
3072#undef XV_COPY
3073			xvn[n].xv_flag = vp->v_vflag;
3074
3075			switch (vp->v_type) {
3076			case VREG:
3077			case VDIR:
3078			case VLNK:
3079				break;
3080			case VBLK:
3081			case VCHR:
3082				if (vp->v_rdev == NULL) {
3083					vrele(vp);
3084					continue;
3085				}
3086				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3087				break;
3088			case VSOCK:
3089				xvn[n].xv_socket = vp->v_socket;
3090				break;
3091			case VFIFO:
3092				xvn[n].xv_fifo = vp->v_fifoinfo;
3093				break;
3094			case VNON:
3095			case VBAD:
3096			default:
3097				/* shouldn't happen? */
3098				vrele(vp);
3099				continue;
3100			}
3101			vrele(vp);
3102			++n;
3103		}
3104		MNT_IUNLOCK(mp);
3105		mtx_lock(&mountlist_mtx);
3106		vfs_unbusy(mp);
3107		if (n == len)
3108			break;
3109	}
3110	mtx_unlock(&mountlist_mtx);
3111
3112	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3113	free(xvn, M_TEMP);
3114	return (error);
3115}
3116
3117SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3118	0, 0, sysctl_vnode, "S,xvnode", "");
3119#endif
3120
3121/*
3122 * Unmount all filesystems. The list is traversed in reverse order
3123 * of mounting to avoid dependencies.
3124 */
3125void
3126vfs_unmountall(void)
3127{
3128	struct mount *mp;
3129	struct thread *td;
3130	int error;
3131
3132	KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
3133	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
3134	td = curthread;
3135
3136	/*
3137	 * Since this only runs when rebooting, it is not interlocked.
3138	 */
3139	while(!TAILQ_EMPTY(&mountlist)) {
3140		mp = TAILQ_LAST(&mountlist, mntlist);
3141		error = dounmount(mp, MNT_FORCE, td);
3142		if (error) {
3143			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3144			/*
3145			 * XXX: Due to the way in which we mount the root
3146			 * file system off of devfs, devfs will generate a
3147			 * "busy" warning when we try to unmount it before
3148			 * the root.  Don't print a warning as a result in
3149			 * order to avoid false positive errors that may
3150			 * cause needless upset.
3151			 */
3152			if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
3153				printf("unmount of %s failed (",
3154				    mp->mnt_stat.f_mntonname);
3155				if (error == EBUSY)
3156					printf("BUSY)\n");
3157				else
3158					printf("%d)\n", error);
3159			}
3160		} else {
3161			/* The unmount has removed mp from the mountlist */
3162		}
3163	}
3164}
3165
3166/*
3167 * perform msync on all vnodes under a mount point
3168 * the mount point must be locked.
3169 */
3170void
3171vfs_msync(struct mount *mp, int flags)
3172{
3173	struct vnode *vp, *mvp;
3174	struct vm_object *obj;
3175
3176	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
3177	MNT_ILOCK(mp);
3178	MNT_VNODE_FOREACH(vp, mp, mvp) {
3179		VI_LOCK(vp);
3180		if ((vp->v_iflag & VI_OBJDIRTY) &&
3181		    (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
3182			MNT_IUNLOCK(mp);
3183			if (!vget(vp,
3184			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3185			    curthread)) {
3186				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3187					vput(vp);
3188					MNT_ILOCK(mp);
3189					continue;
3190				}
3191
3192				obj = vp->v_object;
3193				if (obj != NULL) {
3194					VM_OBJECT_LOCK(obj);
3195					vm_object_page_clean(obj, 0, 0,
3196					    flags == MNT_WAIT ?
3197					    OBJPC_SYNC : OBJPC_NOSYNC);
3198					VM_OBJECT_UNLOCK(obj);
3199				}
3200				vput(vp);
3201			}
3202			MNT_ILOCK(mp);
3203		} else
3204			VI_UNLOCK(vp);
3205	}
3206	MNT_IUNLOCK(mp);
3207}
3208
3209/*
3210 * Mark a vnode as free, putting it up for recycling.
3211 */
3212static void
3213vfree(struct vnode *vp)
3214{
3215
3216	ASSERT_VI_LOCKED(vp, "vfree");
3217	mtx_lock(&vnode_free_list_mtx);
3218	VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed."));
3219	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free"));
3220	VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't"));
3221	VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp,
3222	    ("vfree: Freeing doomed vnode"));
3223	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3224	if (vp->v_iflag & VI_AGE) {
3225		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3226	} else {
3227		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3228	}
3229	freevnodes++;
3230	vp->v_iflag &= ~VI_AGE;
3231	vp->v_iflag |= VI_FREE;
3232	mtx_unlock(&vnode_free_list_mtx);
3233}
3234
3235/*
3236 * Opposite of vfree() - mark a vnode as in use.
3237 */
3238static void
3239vbusy(struct vnode *vp)
3240{
3241	ASSERT_VI_LOCKED(vp, "vbusy");
3242	VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
3243	VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed."));
3244	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3245
3246	mtx_lock(&vnode_free_list_mtx);
3247	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3248	freevnodes--;
3249	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3250	mtx_unlock(&vnode_free_list_mtx);
3251}
3252
3253static void
3254destroy_vpollinfo(struct vpollinfo *vi)
3255{
3256	knlist_destroy(&vi->vpi_selinfo.si_note);
3257	mtx_destroy(&vi->vpi_lock);
3258	uma_zfree(vnodepoll_zone, vi);
3259}
3260
3261/*
3262 * Initalize per-vnode helper structure to hold poll-related state.
3263 */
3264void
3265v_addpollinfo(struct vnode *vp)
3266{
3267	struct vpollinfo *vi;
3268
3269	if (vp->v_pollinfo != NULL)
3270		return;
3271	vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3272	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3273	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
3274	    vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked);
3275	VI_LOCK(vp);
3276	if (vp->v_pollinfo != NULL) {
3277		VI_UNLOCK(vp);
3278		destroy_vpollinfo(vi);
3279		return;
3280	}
3281	vp->v_pollinfo = vi;
3282	VI_UNLOCK(vp);
3283}
3284
3285/*
3286 * Record a process's interest in events which might happen to
3287 * a vnode.  Because poll uses the historic select-style interface
3288 * internally, this routine serves as both the ``check for any
3289 * pending events'' and the ``record my interest in future events''
3290 * functions.  (These are done together, while the lock is held,
3291 * to avoid race conditions.)
3292 */
3293int
3294vn_pollrecord(struct vnode *vp, struct thread *td, int events)
3295{
3296
3297	v_addpollinfo(vp);
3298	mtx_lock(&vp->v_pollinfo->vpi_lock);
3299	if (vp->v_pollinfo->vpi_revents & events) {
3300		/*
3301		 * This leaves events we are not interested
3302		 * in available for the other process which
3303		 * which presumably had requested them
3304		 * (otherwise they would never have been
3305		 * recorded).
3306		 */
3307		events &= vp->v_pollinfo->vpi_revents;
3308		vp->v_pollinfo->vpi_revents &= ~events;
3309
3310		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3311		return (events);
3312	}
3313	vp->v_pollinfo->vpi_events |= events;
3314	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3315	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3316	return (0);
3317}
3318
3319/*
3320 * Routine to create and manage a filesystem syncer vnode.
3321 */
3322#define sync_close ((int (*)(struct  vop_close_args *))nullop)
3323static int	sync_fsync(struct  vop_fsync_args *);
3324static int	sync_inactive(struct  vop_inactive_args *);
3325static int	sync_reclaim(struct  vop_reclaim_args *);
3326
3327static struct vop_vector sync_vnodeops = {
3328	.vop_bypass =	VOP_EOPNOTSUPP,
3329	.vop_close =	sync_close,		/* close */
3330	.vop_fsync =	sync_fsync,		/* fsync */
3331	.vop_inactive =	sync_inactive,	/* inactive */
3332	.vop_reclaim =	sync_reclaim,	/* reclaim */
3333	.vop_lock1 =	vop_stdlock,	/* lock */
3334	.vop_unlock =	vop_stdunlock,	/* unlock */
3335	.vop_islocked =	vop_stdislocked,	/* islocked */
3336};
3337
3338/*
3339 * Create a new filesystem syncer vnode for the specified mount point.
3340 */
3341int
3342vfs_allocate_syncvnode(struct mount *mp)
3343{
3344	struct vnode *vp;
3345	struct bufobj *bo;
3346	static long start, incr, next;
3347	int error;
3348
3349	/* Allocate a new vnode */
3350	if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
3351		mp->mnt_syncer = NULL;
3352		return (error);
3353	}
3354	vp->v_type = VNON;
3355	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3356	vp->v_vflag |= VV_FORCEINSMQ;
3357	error = insmntque(vp, mp);
3358	if (error != 0)
3359		panic("vfs_allocate_syncvnode: insmntque failed");
3360	vp->v_vflag &= ~VV_FORCEINSMQ;
3361	VOP_UNLOCK(vp, 0);
3362	/*
3363	 * Place the vnode onto the syncer worklist. We attempt to
3364	 * scatter them about on the list so that they will go off
3365	 * at evenly distributed times even if all the filesystems
3366	 * are mounted at once.
3367	 */
3368	next += incr;
3369	if (next == 0 || next > syncer_maxdelay) {
3370		start /= 2;
3371		incr /= 2;
3372		if (start == 0) {
3373			start = syncer_maxdelay / 2;
3374			incr = syncer_maxdelay;
3375		}
3376		next = start;
3377	}
3378	bo = &vp->v_bufobj;
3379	BO_LOCK(bo);
3380	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
3381	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3382	mtx_lock(&sync_mtx);
3383	sync_vnode_count++;
3384	mtx_unlock(&sync_mtx);
3385	BO_UNLOCK(bo);
3386	mp->mnt_syncer = vp;
3387	return (0);
3388}
3389
3390/*
3391 * Do a lazy sync of the filesystem.
3392 */
3393static int
3394sync_fsync(struct vop_fsync_args *ap)
3395{
3396	struct vnode *syncvp = ap->a_vp;
3397	struct mount *mp = syncvp->v_mount;
3398	int error;
3399	struct bufobj *bo;
3400
3401	/*
3402	 * We only need to do something if this is a lazy evaluation.
3403	 */
3404	if (ap->a_waitfor != MNT_LAZY)
3405		return (0);
3406
3407	/*
3408	 * Move ourselves to the back of the sync list.
3409	 */
3410	bo = &syncvp->v_bufobj;
3411	BO_LOCK(bo);
3412	vn_syncer_add_to_worklist(bo, syncdelay);
3413	BO_UNLOCK(bo);
3414
3415	/*
3416	 * Walk the list of vnodes pushing all that are dirty and
3417	 * not already on the sync list.
3418	 */
3419	mtx_lock(&mountlist_mtx);
3420	if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) != 0) {
3421		mtx_unlock(&mountlist_mtx);
3422		return (0);
3423	}
3424	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3425		vfs_unbusy(mp);
3426		return (0);
3427	}
3428	MNT_ILOCK(mp);
3429	mp->mnt_noasync++;
3430	mp->mnt_kern_flag &= ~MNTK_ASYNC;
3431	MNT_IUNLOCK(mp);
3432	vfs_msync(mp, MNT_NOWAIT);
3433	error = VFS_SYNC(mp, MNT_LAZY);
3434	MNT_ILOCK(mp);
3435	mp->mnt_noasync--;
3436	if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
3437		mp->mnt_kern_flag |= MNTK_ASYNC;
3438	MNT_IUNLOCK(mp);
3439	vn_finished_write(mp);
3440	vfs_unbusy(mp);
3441	return (error);
3442}
3443
3444/*
3445 * The syncer vnode is no referenced.
3446 */
3447static int
3448sync_inactive(struct vop_inactive_args *ap)
3449{
3450
3451	vgone(ap->a_vp);
3452	return (0);
3453}
3454
3455/*
3456 * The syncer vnode is no longer needed and is being decommissioned.
3457 *
3458 * Modifications to the worklist must be protected by sync_mtx.
3459 */
3460static int
3461sync_reclaim(struct vop_reclaim_args *ap)
3462{
3463	struct vnode *vp = ap->a_vp;
3464	struct bufobj *bo;
3465
3466	bo = &vp->v_bufobj;
3467	BO_LOCK(bo);
3468	vp->v_mount->mnt_syncer = NULL;
3469	if (bo->bo_flag & BO_ONWORKLST) {
3470		mtx_lock(&sync_mtx);
3471		LIST_REMOVE(bo, bo_synclist);
3472		syncer_worklist_len--;
3473		sync_vnode_count--;
3474		mtx_unlock(&sync_mtx);
3475		bo->bo_flag &= ~BO_ONWORKLST;
3476	}
3477	BO_UNLOCK(bo);
3478
3479	return (0);
3480}
3481
3482/*
3483 * Check if vnode represents a disk device
3484 */
3485int
3486vn_isdisk(struct vnode *vp, int *errp)
3487{
3488	int error;
3489
3490	error = 0;
3491	dev_lock();
3492	if (vp->v_type != VCHR)
3493		error = ENOTBLK;
3494	else if (vp->v_rdev == NULL)
3495		error = ENXIO;
3496	else if (vp->v_rdev->si_devsw == NULL)
3497		error = ENXIO;
3498	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3499		error = ENOTBLK;
3500	dev_unlock();
3501	if (errp != NULL)
3502		*errp = error;
3503	return (error == 0);
3504}
3505
3506/*
3507 * Common filesystem object access control check routine.  Accepts a
3508 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3509 * and optional call-by-reference privused argument allowing vaccess()
3510 * to indicate to the caller whether privilege was used to satisfy the
3511 * request (obsoleted).  Returns 0 on success, or an errno on failure.
3512 *
3513 * The ifdef'd CAPABILITIES version is here for reference, but is not
3514 * actually used.
3515 */
3516int
3517vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
3518    accmode_t accmode, struct ucred *cred, int *privused)
3519{
3520	accmode_t dac_granted;
3521	accmode_t priv_granted;
3522
3523	/*
3524	 * Look for a normal, non-privileged way to access the file/directory
3525	 * as requested.  If it exists, go with that.
3526	 */
3527
3528	if (privused != NULL)
3529		*privused = 0;
3530
3531	dac_granted = 0;
3532
3533	/* Check the owner. */
3534	if (cred->cr_uid == file_uid) {
3535		dac_granted |= VADMIN;
3536		if (file_mode & S_IXUSR)
3537			dac_granted |= VEXEC;
3538		if (file_mode & S_IRUSR)
3539			dac_granted |= VREAD;
3540		if (file_mode & S_IWUSR)
3541			dac_granted |= (VWRITE | VAPPEND);
3542
3543		if ((accmode & dac_granted) == accmode)
3544			return (0);
3545
3546		goto privcheck;
3547	}
3548
3549	/* Otherwise, check the groups (first match) */
3550	if (groupmember(file_gid, cred)) {
3551		if (file_mode & S_IXGRP)
3552			dac_granted |= VEXEC;
3553		if (file_mode & S_IRGRP)
3554			dac_granted |= VREAD;
3555		if (file_mode & S_IWGRP)
3556			dac_granted |= (VWRITE | VAPPEND);
3557
3558		if ((accmode & dac_granted) == accmode)
3559			return (0);
3560
3561		goto privcheck;
3562	}
3563
3564	/* Otherwise, check everyone else. */
3565	if (file_mode & S_IXOTH)
3566		dac_granted |= VEXEC;
3567	if (file_mode & S_IROTH)
3568		dac_granted |= VREAD;
3569	if (file_mode & S_IWOTH)
3570		dac_granted |= (VWRITE | VAPPEND);
3571	if ((accmode & dac_granted) == accmode)
3572		return (0);
3573
3574privcheck:
3575	/*
3576	 * Build a privilege mask to determine if the set of privileges
3577	 * satisfies the requirements when combined with the granted mask
3578	 * from above.  For each privilege, if the privilege is required,
3579	 * bitwise or the request type onto the priv_granted mask.
3580	 */
3581	priv_granted = 0;
3582
3583	if (type == VDIR) {
3584		/*
3585		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
3586		 * requests, instead of PRIV_VFS_EXEC.
3587		 */
3588		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3589		    !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
3590			priv_granted |= VEXEC;
3591	} else {
3592		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3593		    !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
3594			priv_granted |= VEXEC;
3595	}
3596
3597	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
3598	    !priv_check_cred(cred, PRIV_VFS_READ, 0))
3599		priv_granted |= VREAD;
3600
3601	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3602	    !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
3603		priv_granted |= (VWRITE | VAPPEND);
3604
3605	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3606	    !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
3607		priv_granted |= VADMIN;
3608
3609	if ((accmode & (priv_granted | dac_granted)) == accmode) {
3610		/* XXX audit: privilege used */
3611		if (privused != NULL)
3612			*privused = 1;
3613		return (0);
3614	}
3615
3616	return ((accmode & VADMIN) ? EPERM : EACCES);
3617}
3618
3619/*
3620 * Credential check based on process requesting service, and per-attribute
3621 * permissions.
3622 */
3623int
3624extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
3625    struct thread *td, accmode_t accmode)
3626{
3627
3628	/*
3629	 * Kernel-invoked always succeeds.
3630	 */
3631	if (cred == NOCRED)
3632		return (0);
3633
3634	/*
3635	 * Do not allow privileged processes in jail to directly manipulate
3636	 * system attributes.
3637	 */
3638	switch (attrnamespace) {
3639	case EXTATTR_NAMESPACE_SYSTEM:
3640		/* Potentially should be: return (EPERM); */
3641		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
3642	case EXTATTR_NAMESPACE_USER:
3643		return (VOP_ACCESS(vp, accmode, cred, td));
3644	default:
3645		return (EPERM);
3646	}
3647}
3648
3649#ifdef DEBUG_VFS_LOCKS
3650/*
3651 * This only exists to supress warnings from unlocked specfs accesses.  It is
3652 * no longer ok to have an unlocked VFS.
3653 */
3654#define	IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL ||		\
3655	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
3656
3657int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3658SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3659
3660int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3661SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3662
3663int vfs_badlock_print = 1;	/* Print lock violations. */
3664SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3665
3666#ifdef KDB
3667int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3668SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3669#endif
3670
3671static void
3672vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3673{
3674
3675#ifdef KDB
3676	if (vfs_badlock_backtrace)
3677		kdb_backtrace();
3678#endif
3679	if (vfs_badlock_print)
3680		printf("%s: %p %s\n", str, (void *)vp, msg);
3681	if (vfs_badlock_ddb)
3682		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3683}
3684
3685void
3686assert_vi_locked(struct vnode *vp, const char *str)
3687{
3688
3689	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3690		vfs_badlock("interlock is not locked but should be", str, vp);
3691}
3692
3693void
3694assert_vi_unlocked(struct vnode *vp, const char *str)
3695{
3696
3697	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3698		vfs_badlock("interlock is locked but should not be", str, vp);
3699}
3700
3701void
3702assert_vop_locked(struct vnode *vp, const char *str)
3703{
3704
3705	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
3706		vfs_badlock("is not locked but should be", str, vp);
3707}
3708
3709void
3710assert_vop_unlocked(struct vnode *vp, const char *str)
3711{
3712
3713	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
3714		vfs_badlock("is locked but should not be", str, vp);
3715}
3716
3717void
3718assert_vop_elocked(struct vnode *vp, const char *str)
3719{
3720
3721	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
3722		vfs_badlock("is not exclusive locked but should be", str, vp);
3723}
3724
3725#if 0
3726void
3727assert_vop_elocked_other(struct vnode *vp, const char *str)
3728{
3729
3730	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER)
3731		vfs_badlock("is not exclusive locked by another thread",
3732		    str, vp);
3733}
3734
3735void
3736assert_vop_slocked(struct vnode *vp, const char *str)
3737{
3738
3739	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED)
3740		vfs_badlock("is not locked shared but should be", str, vp);
3741}
3742#endif /* 0 */
3743#endif /* DEBUG_VFS_LOCKS */
3744
3745void
3746vop_rename_pre(void *ap)
3747{
3748	struct vop_rename_args *a = ap;
3749
3750#ifdef DEBUG_VFS_LOCKS
3751	if (a->a_tvp)
3752		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3753	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3754	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3755	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3756
3757	/* Check the source (from). */
3758	if (a->a_tdvp != a->a_fdvp && a->a_tvp != a->a_fdvp)
3759		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3760	if (a->a_tvp != a->a_fvp)
3761		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
3762
3763	/* Check the target. */
3764	if (a->a_tvp)
3765		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3766	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3767#endif
3768	if (a->a_tdvp != a->a_fdvp)
3769		vhold(a->a_fdvp);
3770	if (a->a_tvp != a->a_fvp)
3771		vhold(a->a_fvp);
3772	vhold(a->a_tdvp);
3773	if (a->a_tvp)
3774		vhold(a->a_tvp);
3775}
3776
3777void
3778vop_strategy_pre(void *ap)
3779{
3780#ifdef DEBUG_VFS_LOCKS
3781	struct vop_strategy_args *a;
3782	struct buf *bp;
3783
3784	a = ap;
3785	bp = a->a_bp;
3786
3787	/*
3788	 * Cluster ops lock their component buffers but not the IO container.
3789	 */
3790	if ((bp->b_flags & B_CLUSTER) != 0)
3791		return;
3792
3793	if (!BUF_ISLOCKED(bp)) {
3794		if (vfs_badlock_print)
3795			printf(
3796			    "VOP_STRATEGY: bp is not locked but should be\n");
3797		if (vfs_badlock_ddb)
3798			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3799	}
3800#endif
3801}
3802
3803void
3804vop_lookup_pre(void *ap)
3805{
3806#ifdef DEBUG_VFS_LOCKS
3807	struct vop_lookup_args *a;
3808	struct vnode *dvp;
3809
3810	a = ap;
3811	dvp = a->a_dvp;
3812	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3813	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3814#endif
3815}
3816
3817void
3818vop_lookup_post(void *ap, int rc)
3819{
3820#ifdef DEBUG_VFS_LOCKS
3821	struct vop_lookup_args *a;
3822	struct vnode *dvp;
3823	struct vnode *vp;
3824
3825	a = ap;
3826	dvp = a->a_dvp;
3827	vp = *(a->a_vpp);
3828
3829	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3830	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3831
3832	if (!rc)
3833		ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)");
3834#endif
3835}
3836
3837void
3838vop_lock_pre(void *ap)
3839{
3840#ifdef DEBUG_VFS_LOCKS
3841	struct vop_lock1_args *a = ap;
3842
3843	if ((a->a_flags & LK_INTERLOCK) == 0)
3844		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3845	else
3846		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3847#endif
3848}
3849
3850void
3851vop_lock_post(void *ap, int rc)
3852{
3853#ifdef DEBUG_VFS_LOCKS
3854	struct vop_lock1_args *a = ap;
3855
3856	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3857	if (rc == 0)
3858		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3859#endif
3860}
3861
3862void
3863vop_unlock_pre(void *ap)
3864{
3865#ifdef DEBUG_VFS_LOCKS
3866	struct vop_unlock_args *a = ap;
3867
3868	if (a->a_flags & LK_INTERLOCK)
3869		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3870	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3871#endif
3872}
3873
3874void
3875vop_unlock_post(void *ap, int rc)
3876{
3877#ifdef DEBUG_VFS_LOCKS
3878	struct vop_unlock_args *a = ap;
3879
3880	if (a->a_flags & LK_INTERLOCK)
3881		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3882#endif
3883}
3884
3885void
3886vop_create_post(void *ap, int rc)
3887{
3888	struct vop_create_args *a = ap;
3889
3890	if (!rc)
3891		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3892}
3893
3894void
3895vop_link_post(void *ap, int rc)
3896{
3897	struct vop_link_args *a = ap;
3898
3899	if (!rc) {
3900		VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
3901		VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
3902	}
3903}
3904
3905void
3906vop_mkdir_post(void *ap, int rc)
3907{
3908	struct vop_mkdir_args *a = ap;
3909
3910	if (!rc)
3911		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3912}
3913
3914void
3915vop_mknod_post(void *ap, int rc)
3916{
3917	struct vop_mknod_args *a = ap;
3918
3919	if (!rc)
3920		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3921}
3922
3923void
3924vop_remove_post(void *ap, int rc)
3925{
3926	struct vop_remove_args *a = ap;
3927
3928	if (!rc) {
3929		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3930		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3931	}
3932}
3933
3934void
3935vop_rename_post(void *ap, int rc)
3936{
3937	struct vop_rename_args *a = ap;
3938
3939	if (!rc) {
3940		VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
3941		VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
3942		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
3943		if (a->a_tvp)
3944			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
3945	}
3946	if (a->a_tdvp != a->a_fdvp)
3947		vdrop(a->a_fdvp);
3948	if (a->a_tvp != a->a_fvp)
3949		vdrop(a->a_fvp);
3950	vdrop(a->a_tdvp);
3951	if (a->a_tvp)
3952		vdrop(a->a_tvp);
3953}
3954
3955void
3956vop_rmdir_post(void *ap, int rc)
3957{
3958	struct vop_rmdir_args *a = ap;
3959
3960	if (!rc) {
3961		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3962		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3963	}
3964}
3965
3966void
3967vop_setattr_post(void *ap, int rc)
3968{
3969	struct vop_setattr_args *a = ap;
3970
3971	if (!rc)
3972		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
3973}
3974
3975void
3976vop_symlink_post(void *ap, int rc)
3977{
3978	struct vop_symlink_args *a = ap;
3979
3980	if (!rc)
3981		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3982}
3983
3984static struct knlist fs_knlist;
3985
3986static void
3987vfs_event_init(void *arg)
3988{
3989	knlist_init_mtx(&fs_knlist, NULL);
3990}
3991/* XXX - correct order? */
3992SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3993
3994void
3995vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3996{
3997
3998	KNOTE_UNLOCKED(&fs_knlist, event);
3999}
4000
4001static int	filt_fsattach(struct knote *kn);
4002static void	filt_fsdetach(struct knote *kn);
4003static int	filt_fsevent(struct knote *kn, long hint);
4004
4005struct filterops fs_filtops =
4006	{ 0, filt_fsattach, filt_fsdetach, filt_fsevent };
4007
4008static int
4009filt_fsattach(struct knote *kn)
4010{
4011
4012	kn->kn_flags |= EV_CLEAR;
4013	knlist_add(&fs_knlist, kn, 0);
4014	return (0);
4015}
4016
4017static void
4018filt_fsdetach(struct knote *kn)
4019{
4020
4021	knlist_remove(&fs_knlist, kn, 0);
4022}
4023
4024static int
4025filt_fsevent(struct knote *kn, long hint)
4026{
4027
4028	kn->kn_fflags |= hint;
4029	return (kn->kn_fflags != 0);
4030}
4031
4032static int
4033sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4034{
4035	struct vfsidctl vc;
4036	int error;
4037	struct mount *mp;
4038
4039	error = SYSCTL_IN(req, &vc, sizeof(vc));
4040	if (error)
4041		return (error);
4042	if (vc.vc_vers != VFS_CTL_VERS1)
4043		return (EINVAL);
4044	mp = vfs_getvfs(&vc.vc_fsid);
4045	if (mp == NULL)
4046		return (ENOENT);
4047	/* ensure that a specific sysctl goes to the right filesystem. */
4048	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4049	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4050		vfs_rel(mp);
4051		return (EINVAL);
4052	}
4053	VCTLTOREQ(&vc, req);
4054	error = VFS_SYSCTL(mp, vc.vc_op, req);
4055	vfs_rel(mp);
4056	return (error);
4057}
4058
4059SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR, NULL, 0, sysctl_vfs_ctl, "",
4060    "Sysctl by fsid");
4061
4062/*
4063 * Function to initialize a va_filerev field sensibly.
4064 * XXX: Wouldn't a random number make a lot more sense ??
4065 */
4066u_quad_t
4067init_va_filerev(void)
4068{
4069	struct bintime bt;
4070
4071	getbinuptime(&bt);
4072	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4073}
4074
4075static int	filt_vfsread(struct knote *kn, long hint);
4076static int	filt_vfswrite(struct knote *kn, long hint);
4077static int	filt_vfsvnode(struct knote *kn, long hint);
4078static void	filt_vfsdetach(struct knote *kn);
4079static struct filterops vfsread_filtops =
4080	{ 1, NULL, filt_vfsdetach, filt_vfsread };
4081static struct filterops vfswrite_filtops =
4082	{ 1, NULL, filt_vfsdetach, filt_vfswrite };
4083static struct filterops vfsvnode_filtops =
4084	{ 1, NULL, filt_vfsdetach, filt_vfsvnode };
4085
4086static void
4087vfs_knllock(void *arg)
4088{
4089	struct vnode *vp = arg;
4090
4091	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4092}
4093
4094static void
4095vfs_knlunlock(void *arg)
4096{
4097	struct vnode *vp = arg;
4098
4099	VOP_UNLOCK(vp, 0);
4100}
4101
4102static void
4103vfs_knl_assert_locked(void *arg)
4104{
4105#ifdef DEBUG_VFS_LOCKS
4106	struct vnode *vp = arg;
4107
4108	ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
4109#endif
4110}
4111
4112static void
4113vfs_knl_assert_unlocked(void *arg)
4114{
4115#ifdef DEBUG_VFS_LOCKS
4116	struct vnode *vp = arg;
4117
4118	ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
4119#endif
4120}
4121
4122int
4123vfs_kqfilter(struct vop_kqfilter_args *ap)
4124{
4125	struct vnode *vp = ap->a_vp;
4126	struct knote *kn = ap->a_kn;
4127	struct knlist *knl;
4128
4129	switch (kn->kn_filter) {
4130	case EVFILT_READ:
4131		kn->kn_fop = &vfsread_filtops;
4132		break;
4133	case EVFILT_WRITE:
4134		kn->kn_fop = &vfswrite_filtops;
4135		break;
4136	case EVFILT_VNODE:
4137		kn->kn_fop = &vfsvnode_filtops;
4138		break;
4139	default:
4140		return (EINVAL);
4141	}
4142
4143	kn->kn_hook = (caddr_t)vp;
4144
4145	v_addpollinfo(vp);
4146	if (vp->v_pollinfo == NULL)
4147		return (ENOMEM);
4148	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
4149	knlist_add(knl, kn, 0);
4150
4151	return (0);
4152}
4153
4154/*
4155 * Detach knote from vnode
4156 */
4157static void
4158filt_vfsdetach(struct knote *kn)
4159{
4160	struct vnode *vp = (struct vnode *)kn->kn_hook;
4161
4162	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
4163	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
4164}
4165
4166/*ARGSUSED*/
4167static int
4168filt_vfsread(struct knote *kn, long hint)
4169{
4170	struct vnode *vp = (struct vnode *)kn->kn_hook;
4171	struct vattr va;
4172	int res;
4173
4174	/*
4175	 * filesystem is gone, so set the EOF flag and schedule
4176	 * the knote for deletion.
4177	 */
4178	if (hint == NOTE_REVOKE) {
4179		VI_LOCK(vp);
4180		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4181		VI_UNLOCK(vp);
4182		return (1);
4183	}
4184
4185	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
4186		return (0);
4187
4188	VI_LOCK(vp);
4189	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
4190	res = (kn->kn_data != 0);
4191	VI_UNLOCK(vp);
4192	return (res);
4193}
4194
4195/*ARGSUSED*/
4196static int
4197filt_vfswrite(struct knote *kn, long hint)
4198{
4199	struct vnode *vp = (struct vnode *)kn->kn_hook;
4200
4201	VI_LOCK(vp);
4202
4203	/*
4204	 * filesystem is gone, so set the EOF flag and schedule
4205	 * the knote for deletion.
4206	 */
4207	if (hint == NOTE_REVOKE)
4208		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4209
4210	kn->kn_data = 0;
4211	VI_UNLOCK(vp);
4212	return (1);
4213}
4214
4215static int
4216filt_vfsvnode(struct knote *kn, long hint)
4217{
4218	struct vnode *vp = (struct vnode *)kn->kn_hook;
4219	int res;
4220
4221	VI_LOCK(vp);
4222	if (kn->kn_sfflags & hint)
4223		kn->kn_fflags |= hint;
4224	if (hint == NOTE_REVOKE) {
4225		kn->kn_flags |= EV_EOF;
4226		VI_UNLOCK(vp);
4227		return (1);
4228	}
4229	res = (kn->kn_fflags != 0);
4230	VI_UNLOCK(vp);
4231	return (res);
4232}
4233
4234int
4235vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
4236{
4237	int error;
4238
4239	if (dp->d_reclen > ap->a_uio->uio_resid)
4240		return (ENAMETOOLONG);
4241	error = uiomove(dp, dp->d_reclen, ap->a_uio);
4242	if (error) {
4243		if (ap->a_ncookies != NULL) {
4244			if (ap->a_cookies != NULL)
4245				free(ap->a_cookies, M_TEMP);
4246			ap->a_cookies = NULL;
4247			*ap->a_ncookies = 0;
4248		}
4249		return (error);
4250	}
4251	if (ap->a_ncookies == NULL)
4252		return (0);
4253
4254	KASSERT(ap->a_cookies,
4255	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
4256
4257	*ap->a_cookies = realloc(*ap->a_cookies,
4258	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
4259	(*ap->a_cookies)[*ap->a_ncookies] = off;
4260	return (0);
4261}
4262
4263/*
4264 * Mark for update the access time of the file if the filesystem
4265 * supports VOP_MARKATIME.  This functionality is used by execve and
4266 * mmap, so we want to avoid the I/O implied by directly setting
4267 * va_atime for the sake of efficiency.
4268 */
4269void
4270vfs_mark_atime(struct vnode *vp, struct ucred *cred)
4271{
4272
4273	if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
4274		(void)VOP_MARKATIME(vp);
4275}
4276
4277/*
4278 * The purpose of this routine is to remove granularity from accmode_t,
4279 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
4280 * VADMIN and VAPPEND.
4281 *
4282 * If it returns 0, the caller is supposed to continue with the usual
4283 * access checks using 'accmode' as modified by this routine.  If it
4284 * returns nonzero value, the caller is supposed to return that value
4285 * as errno.
4286 *
4287 * Note that after this routine runs, accmode may be zero.
4288 */
4289int
4290vfs_unixify_accmode(accmode_t *accmode)
4291{
4292	/*
4293	 * There is no way to specify explicit "deny" rule using
4294	 * file mode or POSIX.1e ACLs.
4295	 */
4296	if (*accmode & VEXPLICIT_DENY) {
4297		*accmode = 0;
4298		return (0);
4299	}
4300
4301	/*
4302	 * None of these can be translated into usual access bits.
4303	 * Also, the common case for NFSv4 ACLs is to not contain
4304	 * either of these bits. Caller should check for VWRITE
4305	 * on the containing directory instead.
4306	 */
4307	if (*accmode & (VDELETE_CHILD | VDELETE))
4308		return (EPERM);
4309
4310	if (*accmode & VADMIN_PERMS) {
4311		*accmode &= ~VADMIN_PERMS;
4312		*accmode |= VADMIN;
4313	}
4314
4315	/*
4316	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
4317	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
4318	 */
4319	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
4320
4321	return (0);
4322}
4323