vfs_subr.c revision 298921
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35 */
36
37/*
38 * External virtual filesystem routines
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/vfs_subr.c 298921 2016-05-02 13:13:32Z kib $");
43
44#include "opt_compat.h"
45#include "opt_ddb.h"
46#include "opt_watchdog.h"
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/bio.h>
51#include <sys/buf.h>
52#include <sys/condvar.h>
53#include <sys/conf.h>
54#include <sys/dirent.h>
55#include <sys/event.h>
56#include <sys/eventhandler.h>
57#include <sys/extattr.h>
58#include <sys/file.h>
59#include <sys/fcntl.h>
60#include <sys/jail.h>
61#include <sys/kdb.h>
62#include <sys/kernel.h>
63#include <sys/kthread.h>
64#include <sys/lockf.h>
65#include <sys/malloc.h>
66#include <sys/mount.h>
67#include <sys/namei.h>
68#include <sys/pctrie.h>
69#include <sys/priv.h>
70#include <sys/reboot.h>
71#include <sys/refcount.h>
72#include <sys/rwlock.h>
73#include <sys/sched.h>
74#include <sys/sleepqueue.h>
75#include <sys/smp.h>
76#include <sys/stat.h>
77#include <sys/sysctl.h>
78#include <sys/syslog.h>
79#include <sys/vmmeter.h>
80#include <sys/vnode.h>
81#include <sys/watchdog.h>
82
83#include <machine/stdarg.h>
84
85#include <security/mac/mac_framework.h>
86
87#include <vm/vm.h>
88#include <vm/vm_object.h>
89#include <vm/vm_extern.h>
90#include <vm/pmap.h>
91#include <vm/vm_map.h>
92#include <vm/vm_page.h>
93#include <vm/vm_kern.h>
94#include <vm/uma.h>
95
96#ifdef DDB
97#include <ddb/ddb.h>
98#endif
99
100static void	delmntque(struct vnode *vp);
101static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
102		    int slpflag, int slptimeo);
103static void	syncer_shutdown(void *arg, int howto);
104static int	vtryrecycle(struct vnode *vp);
105static void	v_init_counters(struct vnode *);
106static void	v_incr_usecount(struct vnode *);
107static void	v_incr_usecount_locked(struct vnode *);
108static void	v_incr_devcount(struct vnode *);
109static void	v_decr_devcount(struct vnode *);
110static void	vnlru_free(int);
111static void	vgonel(struct vnode *);
112static void	vfs_knllock(void *arg);
113static void	vfs_knlunlock(void *arg);
114static void	vfs_knl_assert_locked(void *arg);
115static void	vfs_knl_assert_unlocked(void *arg);
116static void	destroy_vpollinfo(struct vpollinfo *vi);
117
118/*
119 * Number of vnodes in existence.  Increased whenever getnewvnode()
120 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode.
121 */
122static unsigned long	numvnodes;
123
124SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
125    "Number of vnodes in existence");
126
127static u_long vnodes_created;
128SYSCTL_ULONG(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
129    0, "Number of vnodes created by getnewvnode");
130
131/*
132 * Conversion tables for conversion from vnode types to inode formats
133 * and back.
134 */
135enum vtype iftovt_tab[16] = {
136	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
137	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
138};
139int vttoif_tab[10] = {
140	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
141	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
142};
143
144/*
145 * List of vnodes that are ready for recycling.
146 */
147static TAILQ_HEAD(freelst, vnode) vnode_free_list;
148
149/*
150 * "Free" vnode target.  Free vnodes are rarely completely free, but are
151 * just ones that are cheap to recycle.  Usually they are for files which
152 * have been stat'd but not read; these usually have inode and namecache
153 * data attached to them.  This target is the preferred minimum size of a
154 * sub-cache consisting mostly of such files. The system balances the size
155 * of this sub-cache with its complement to try to prevent either from
156 * thrashing while the other is relatively inactive.  The targets express
157 * a preference for the best balance.
158 *
159 * "Above" this target there are 2 further targets (watermarks) related
160 * to recyling of free vnodes.  In the best-operating case, the cache is
161 * exactly full, the free list has size between vlowat and vhiwat above the
162 * free target, and recycling from it and normal use maintains this state.
163 * Sometimes the free list is below vlowat or even empty, but this state
164 * is even better for immediate use provided the cache is not full.
165 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
166 * ones) to reach one of these states.  The watermarks are currently hard-
167 * coded as 4% and 9% of the available space higher.  These and the default
168 * of 25% for wantfreevnodes are too large if the memory size is large.
169 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim
170 * whenever vnlru_proc() becomes active.
171 */
172static u_long wantfreevnodes;
173SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
174    &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes");
175static u_long freevnodes;
176SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD,
177    &freevnodes, 0, "Number of \"free\" vnodes");
178
179static u_long recycles_count;
180SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 0,
181    "Number of vnodes recycled to meet vnode cache targets");
182
183/*
184 * Various variables used for debugging the new implementation of
185 * reassignbuf().
186 * XXX these are probably of (very) limited utility now.
187 */
188static int reassignbufcalls;
189SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0,
190    "Number of calls to reassignbuf");
191
192static u_long free_owe_inact;
193SYSCTL_ULONG(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 0,
194    "Number of times free vnodes kept on active list due to VFS "
195    "owing inactivation");
196
197/* To keep more than one thread at a time from running vfs_getnewfsid */
198static struct mtx mntid_mtx;
199
200/*
201 * Lock for any access to the following:
202 *	vnode_free_list
203 *	numvnodes
204 *	freevnodes
205 */
206static struct mtx vnode_free_list_mtx;
207
208/* Publicly exported FS */
209struct nfs_public nfs_pub;
210
211static uma_zone_t buf_trie_zone;
212
213/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
214static uma_zone_t vnode_zone;
215static uma_zone_t vnodepoll_zone;
216
217/*
218 * The workitem queue.
219 *
220 * It is useful to delay writes of file data and filesystem metadata
221 * for tens of seconds so that quickly created and deleted files need
222 * not waste disk bandwidth being created and removed. To realize this,
223 * we append vnodes to a "workitem" queue. When running with a soft
224 * updates implementation, most pending metadata dependencies should
225 * not wait for more than a few seconds. Thus, mounted on block devices
226 * are delayed only about a half the time that file data is delayed.
227 * Similarly, directory updates are more critical, so are only delayed
228 * about a third the time that file data is delayed. Thus, there are
229 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
230 * one each second (driven off the filesystem syncer process). The
231 * syncer_delayno variable indicates the next queue that is to be processed.
232 * Items that need to be processed soon are placed in this queue:
233 *
234 *	syncer_workitem_pending[syncer_delayno]
235 *
236 * A delay of fifteen seconds is done by placing the request fifteen
237 * entries later in the queue:
238 *
239 *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
240 *
241 */
242static int syncer_delayno;
243static long syncer_mask;
244LIST_HEAD(synclist, bufobj);
245static struct synclist *syncer_workitem_pending;
246/*
247 * The sync_mtx protects:
248 *	bo->bo_synclist
249 *	sync_vnode_count
250 *	syncer_delayno
251 *	syncer_state
252 *	syncer_workitem_pending
253 *	syncer_worklist_len
254 *	rushjob
255 */
256static struct mtx sync_mtx;
257static struct cv sync_wakeup;
258
259#define SYNCER_MAXDELAY		32
260static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
261static int syncdelay = 30;		/* max time to delay syncing data */
262static int filedelay = 30;		/* time to delay syncing files */
263SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
264    "Time to delay syncing files (in seconds)");
265static int dirdelay = 29;		/* time to delay syncing directories */
266SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
267    "Time to delay syncing directories (in seconds)");
268static int metadelay = 28;		/* time to delay syncing metadata */
269SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
270    "Time to delay syncing metadata (in seconds)");
271static int rushjob;		/* number of slots to run ASAP */
272static int stat_rush_requests;	/* number of times I/O speeded up */
273SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
274    "Number of times I/O speeded up (rush requests)");
275
276/*
277 * When shutting down the syncer, run it at four times normal speed.
278 */
279#define SYNCER_SHUTDOWN_SPEEDUP		4
280static int sync_vnode_count;
281static int syncer_worklist_len;
282static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
283    syncer_state;
284
285/* Target for maximum number of vnodes. */
286int desiredvnodes;
287static int gapvnodes;		/* gap between wanted and desired */
288static int vhiwat;		/* enough extras after expansion */
289static int vlowat;		/* minimal extras before expansion */
290static int vstir;		/* nonzero to stir non-free vnodes */
291static volatile int vsmalltrigger = 8;	/* pref to keep if > this many pages */
292
293static int
294sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS)
295{
296	int error, old_desiredvnodes;
297
298	old_desiredvnodes = desiredvnodes;
299	if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0)
300		return (error);
301	if (old_desiredvnodes != desiredvnodes) {
302		wantfreevnodes = desiredvnodes / 4;
303		/* XXX locking seems to be incomplete. */
304		vfs_hash_changesize(desiredvnodes);
305		cache_changesize(desiredvnodes);
306	}
307	return (0);
308}
309
310SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
311    CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0,
312    sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes");
313SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
314    &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)");
315static int vnlru_nowhere;
316SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
317    &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
318
319/* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
320static int vnsz2log;
321
322/*
323 * Support for the bufobj clean & dirty pctrie.
324 */
325static void *
326buf_trie_alloc(struct pctrie *ptree)
327{
328
329	return uma_zalloc(buf_trie_zone, M_NOWAIT);
330}
331
332static void
333buf_trie_free(struct pctrie *ptree, void *node)
334{
335
336	uma_zfree(buf_trie_zone, node);
337}
338PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free);
339
340/*
341 * Initialize the vnode management data structures.
342 *
343 * Reevaluate the following cap on the number of vnodes after the physical
344 * memory size exceeds 512GB.  In the limit, as the physical memory size
345 * grows, the ratio of the memory size in KB to to vnodes approaches 64:1.
346 */
347#ifndef	MAXVNODES_MAX
348#define	MAXVNODES_MAX	(512 * 1024 * 1024 / 64)	/* 8M */
349#endif
350
351/*
352 * Initialize a vnode as it first enters the zone.
353 */
354static int
355vnode_init(void *mem, int size, int flags)
356{
357	struct vnode *vp;
358	struct bufobj *bo;
359
360	vp = mem;
361	bzero(vp, size);
362	/*
363	 * Setup locks.
364	 */
365	vp->v_vnlock = &vp->v_lock;
366	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
367	/*
368	 * By default, don't allow shared locks unless filesystems opt-in.
369	 */
370	lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
371	    LK_NOSHARE | LK_IS_VNODE);
372	/*
373	 * Initialize bufobj.
374	 */
375	bo = &vp->v_bufobj;
376	bo->__bo_vnode = vp;
377	rw_init(BO_LOCKPTR(bo), "bufobj interlock");
378	bo->bo_private = vp;
379	TAILQ_INIT(&bo->bo_clean.bv_hd);
380	TAILQ_INIT(&bo->bo_dirty.bv_hd);
381	/*
382	 * Initialize namecache.
383	 */
384	LIST_INIT(&vp->v_cache_src);
385	TAILQ_INIT(&vp->v_cache_dst);
386	/*
387	 * Initialize rangelocks.
388	 */
389	rangelock_init(&vp->v_rl);
390	return (0);
391}
392
393/*
394 * Free a vnode when it is cleared from the zone.
395 */
396static void
397vnode_fini(void *mem, int size)
398{
399	struct vnode *vp;
400	struct bufobj *bo;
401
402	vp = mem;
403	rangelock_destroy(&vp->v_rl);
404	lockdestroy(vp->v_vnlock);
405	mtx_destroy(&vp->v_interlock);
406	bo = &vp->v_bufobj;
407	rw_destroy(BO_LOCKPTR(bo));
408}
409
410/*
411 * Provide the size of NFS nclnode and NFS fh for calculation of the
412 * vnode memory consumption.  The size is specified directly to
413 * eliminate dependency on NFS-private header.
414 *
415 * Other filesystems may use bigger or smaller (like UFS and ZFS)
416 * private inode data, but the NFS-based estimation is ample enough.
417 * Still, we care about differences in the size between 64- and 32-bit
418 * platforms.
419 *
420 * Namecache structure size is heuristically
421 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1.
422 */
423#ifdef _LP64
424#define	NFS_NCLNODE_SZ	(528 + 64)
425#define	NC_SZ		148
426#else
427#define	NFS_NCLNODE_SZ	(360 + 32)
428#define	NC_SZ		92
429#endif
430
431static void
432vntblinit(void *dummy __unused)
433{
434	u_int i;
435	int physvnodes, virtvnodes;
436
437	/*
438	 * Desiredvnodes is a function of the physical memory size and the
439	 * kernel's heap size.  Generally speaking, it scales with the
440	 * physical memory size.  The ratio of desiredvnodes to the physical
441	 * memory size is 1:16 until desiredvnodes exceeds 98,304.
442	 * Thereafter, the
443	 * marginal ratio of desiredvnodes to the physical memory size is
444	 * 1:64.  However, desiredvnodes is limited by the kernel's heap
445	 * size.  The memory required by desiredvnodes vnodes and vm objects
446	 * must not exceed 1/10th of the kernel's heap size.
447	 */
448	physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 +
449	    3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64;
450	virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) +
451	    sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
452	desiredvnodes = min(physvnodes, virtvnodes);
453	if (desiredvnodes > MAXVNODES_MAX) {
454		if (bootverbose)
455			printf("Reducing kern.maxvnodes %d -> %d\n",
456			    desiredvnodes, MAXVNODES_MAX);
457		desiredvnodes = MAXVNODES_MAX;
458	}
459	wantfreevnodes = desiredvnodes / 4;
460	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
461	TAILQ_INIT(&vnode_free_list);
462	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
463	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
464	    vnode_init, vnode_fini, UMA_ALIGN_PTR, 0);
465	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
466	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
467	/*
468	 * Preallocate enough nodes to support one-per buf so that
469	 * we can not fail an insert.  reassignbuf() callers can not
470	 * tolerate the insertion failure.
471	 */
472	buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(),
473	    NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
474	    UMA_ZONE_NOFREE | UMA_ZONE_VM);
475	uma_prealloc(buf_trie_zone, nbuf);
476	/*
477	 * Initialize the filesystem syncer.
478	 */
479	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
480	    &syncer_mask);
481	syncer_maxdelay = syncer_mask + 1;
482	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
483	cv_init(&sync_wakeup, "syncer");
484	for (i = 1; i <= sizeof(struct vnode); i <<= 1)
485		vnsz2log++;
486	vnsz2log--;
487}
488SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
489
490
491/*
492 * Mark a mount point as busy. Used to synchronize access and to delay
493 * unmounting. Eventually, mountlist_mtx is not released on failure.
494 *
495 * vfs_busy() is a custom lock, it can block the caller.
496 * vfs_busy() only sleeps if the unmount is active on the mount point.
497 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
498 * vnode belonging to mp.
499 *
500 * Lookup uses vfs_busy() to traverse mount points.
501 * root fs			var fs
502 * / vnode lock		A	/ vnode lock (/var)		D
503 * /var vnode lock	B	/log vnode lock(/var/log)	E
504 * vfs_busy lock	C	vfs_busy lock			F
505 *
506 * Within each file system, the lock order is C->A->B and F->D->E.
507 *
508 * When traversing across mounts, the system follows that lock order:
509 *
510 *        C->A->B
511 *              |
512 *              +->F->D->E
513 *
514 * The lookup() process for namei("/var") illustrates the process:
515 *  VOP_LOOKUP() obtains B while A is held
516 *  vfs_busy() obtains a shared lock on F while A and B are held
517 *  vput() releases lock on B
518 *  vput() releases lock on A
519 *  VFS_ROOT() obtains lock on D while shared lock on F is held
520 *  vfs_unbusy() releases shared lock on F
521 *  vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
522 *    Attempt to lock A (instead of vp_crossmp) while D is held would
523 *    violate the global order, causing deadlocks.
524 *
525 * dounmount() locks B while F is drained.
526 */
527int
528vfs_busy(struct mount *mp, int flags)
529{
530
531	MPASS((flags & ~MBF_MASK) == 0);
532	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
533
534	MNT_ILOCK(mp);
535	MNT_REF(mp);
536	/*
537	 * If mount point is currently being unmounted, sleep until the
538	 * mount point fate is decided.  If thread doing the unmounting fails,
539	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
540	 * that this mount point has survived the unmount attempt and vfs_busy
541	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
542	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
543	 * about to be really destroyed.  vfs_busy needs to release its
544	 * reference on the mount point in this case and return with ENOENT,
545	 * telling the caller that mount mount it tried to busy is no longer
546	 * valid.
547	 */
548	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
549		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
550			MNT_REL(mp);
551			MNT_IUNLOCK(mp);
552			CTR1(KTR_VFS, "%s: failed busying before sleeping",
553			    __func__);
554			return (ENOENT);
555		}
556		if (flags & MBF_MNTLSTLOCK)
557			mtx_unlock(&mountlist_mtx);
558		mp->mnt_kern_flag |= MNTK_MWAIT;
559		msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
560		if (flags & MBF_MNTLSTLOCK)
561			mtx_lock(&mountlist_mtx);
562		MNT_ILOCK(mp);
563	}
564	if (flags & MBF_MNTLSTLOCK)
565		mtx_unlock(&mountlist_mtx);
566	mp->mnt_lockref++;
567	MNT_IUNLOCK(mp);
568	return (0);
569}
570
571/*
572 * Free a busy filesystem.
573 */
574void
575vfs_unbusy(struct mount *mp)
576{
577
578	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
579	MNT_ILOCK(mp);
580	MNT_REL(mp);
581	KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
582	mp->mnt_lockref--;
583	if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
584		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
585		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
586		mp->mnt_kern_flag &= ~MNTK_DRAINING;
587		wakeup(&mp->mnt_lockref);
588	}
589	MNT_IUNLOCK(mp);
590}
591
592/*
593 * Lookup a mount point by filesystem identifier.
594 */
595struct mount *
596vfs_getvfs(fsid_t *fsid)
597{
598	struct mount *mp;
599
600	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
601	mtx_lock(&mountlist_mtx);
602	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
603		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
604		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
605			vfs_ref(mp);
606			mtx_unlock(&mountlist_mtx);
607			return (mp);
608		}
609	}
610	mtx_unlock(&mountlist_mtx);
611	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
612	return ((struct mount *) 0);
613}
614
615/*
616 * Lookup a mount point by filesystem identifier, busying it before
617 * returning.
618 *
619 * To avoid congestion on mountlist_mtx, implement simple direct-mapped
620 * cache for popular filesystem identifiers.  The cache is lockess, using
621 * the fact that struct mount's are never freed.  In worst case we may
622 * get pointer to unmounted or even different filesystem, so we have to
623 * check what we got, and go slow way if so.
624 */
625struct mount *
626vfs_busyfs(fsid_t *fsid)
627{
628#define	FSID_CACHE_SIZE	256
629	typedef struct mount * volatile vmp_t;
630	static vmp_t cache[FSID_CACHE_SIZE];
631	struct mount *mp;
632	int error;
633	uint32_t hash;
634
635	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
636	hash = fsid->val[0] ^ fsid->val[1];
637	hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1);
638	mp = cache[hash];
639	if (mp == NULL ||
640	    mp->mnt_stat.f_fsid.val[0] != fsid->val[0] ||
641	    mp->mnt_stat.f_fsid.val[1] != fsid->val[1])
642		goto slow;
643	if (vfs_busy(mp, 0) != 0) {
644		cache[hash] = NULL;
645		goto slow;
646	}
647	if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
648	    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
649		return (mp);
650	else
651	    vfs_unbusy(mp);
652
653slow:
654	mtx_lock(&mountlist_mtx);
655	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
656		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
657		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
658			error = vfs_busy(mp, MBF_MNTLSTLOCK);
659			if (error) {
660				cache[hash] = NULL;
661				mtx_unlock(&mountlist_mtx);
662				return (NULL);
663			}
664			cache[hash] = mp;
665			return (mp);
666		}
667	}
668	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
669	mtx_unlock(&mountlist_mtx);
670	return ((struct mount *) 0);
671}
672
673/*
674 * Check if a user can access privileged mount options.
675 */
676int
677vfs_suser(struct mount *mp, struct thread *td)
678{
679	int error;
680
681	/*
682	 * If the thread is jailed, but this is not a jail-friendly file
683	 * system, deny immediately.
684	 */
685	if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred))
686		return (EPERM);
687
688	/*
689	 * If the file system was mounted outside the jail of the calling
690	 * thread, deny immediately.
691	 */
692	if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
693		return (EPERM);
694
695	/*
696	 * If file system supports delegated administration, we don't check
697	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
698	 * by the file system itself.
699	 * If this is not the user that did original mount, we check for
700	 * the PRIV_VFS_MOUNT_OWNER privilege.
701	 */
702	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
703	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
704		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
705			return (error);
706	}
707	return (0);
708}
709
710/*
711 * Get a new unique fsid.  Try to make its val[0] unique, since this value
712 * will be used to create fake device numbers for stat().  Also try (but
713 * not so hard) make its val[0] unique mod 2^16, since some emulators only
714 * support 16-bit device numbers.  We end up with unique val[0]'s for the
715 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
716 *
717 * Keep in mind that several mounts may be running in parallel.  Starting
718 * the search one past where the previous search terminated is both a
719 * micro-optimization and a defense against returning the same fsid to
720 * different mounts.
721 */
722void
723vfs_getnewfsid(struct mount *mp)
724{
725	static uint16_t mntid_base;
726	struct mount *nmp;
727	fsid_t tfsid;
728	int mtype;
729
730	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
731	mtx_lock(&mntid_mtx);
732	mtype = mp->mnt_vfc->vfc_typenum;
733	tfsid.val[1] = mtype;
734	mtype = (mtype & 0xFF) << 24;
735	for (;;) {
736		tfsid.val[0] = makedev(255,
737		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
738		mntid_base++;
739		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
740			break;
741		vfs_rel(nmp);
742	}
743	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
744	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
745	mtx_unlock(&mntid_mtx);
746}
747
748/*
749 * Knob to control the precision of file timestamps:
750 *
751 *   0 = seconds only; nanoseconds zeroed.
752 *   1 = seconds and nanoseconds, accurate within 1/HZ.
753 *   2 = seconds and nanoseconds, truncated to microseconds.
754 * >=3 = seconds and nanoseconds, maximum precision.
755 */
756enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
757
758static int timestamp_precision = TSP_USEC;
759SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
760    &timestamp_precision, 0, "File timestamp precision (0: seconds, "
761    "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, "
762    "3+: sec + ns (max. precision))");
763
764/*
765 * Get a current timestamp.
766 */
767void
768vfs_timestamp(struct timespec *tsp)
769{
770	struct timeval tv;
771
772	switch (timestamp_precision) {
773	case TSP_SEC:
774		tsp->tv_sec = time_second;
775		tsp->tv_nsec = 0;
776		break;
777	case TSP_HZ:
778		getnanotime(tsp);
779		break;
780	case TSP_USEC:
781		microtime(&tv);
782		TIMEVAL_TO_TIMESPEC(&tv, tsp);
783		break;
784	case TSP_NSEC:
785	default:
786		nanotime(tsp);
787		break;
788	}
789}
790
791/*
792 * Set vnode attributes to VNOVAL
793 */
794void
795vattr_null(struct vattr *vap)
796{
797
798	vap->va_type = VNON;
799	vap->va_size = VNOVAL;
800	vap->va_bytes = VNOVAL;
801	vap->va_mode = VNOVAL;
802	vap->va_nlink = VNOVAL;
803	vap->va_uid = VNOVAL;
804	vap->va_gid = VNOVAL;
805	vap->va_fsid = VNOVAL;
806	vap->va_fileid = VNOVAL;
807	vap->va_blocksize = VNOVAL;
808	vap->va_rdev = VNOVAL;
809	vap->va_atime.tv_sec = VNOVAL;
810	vap->va_atime.tv_nsec = VNOVAL;
811	vap->va_mtime.tv_sec = VNOVAL;
812	vap->va_mtime.tv_nsec = VNOVAL;
813	vap->va_ctime.tv_sec = VNOVAL;
814	vap->va_ctime.tv_nsec = VNOVAL;
815	vap->va_birthtime.tv_sec = VNOVAL;
816	vap->va_birthtime.tv_nsec = VNOVAL;
817	vap->va_flags = VNOVAL;
818	vap->va_gen = VNOVAL;
819	vap->va_vaflags = 0;
820}
821
822/*
823 * This routine is called when we have too many vnodes.  It attempts
824 * to free <count> vnodes and will potentially free vnodes that still
825 * have VM backing store (VM backing store is typically the cause
826 * of a vnode blowout so we want to do this).  Therefore, this operation
827 * is not considered cheap.
828 *
829 * A number of conditions may prevent a vnode from being reclaimed.
830 * the buffer cache may have references on the vnode, a directory
831 * vnode may still have references due to the namei cache representing
832 * underlying files, or the vnode may be in active use.   It is not
833 * desirable to reuse such vnodes.  These conditions may cause the
834 * number of vnodes to reach some minimum value regardless of what
835 * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
836 */
837static int
838vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger)
839{
840	struct vnode *vp;
841	int count, done, target;
842
843	done = 0;
844	vn_start_write(NULL, &mp, V_WAIT);
845	MNT_ILOCK(mp);
846	count = mp->mnt_nvnodelistsize;
847	target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1);
848	target = target / 10 + 1;
849	while (count != 0 && done < target) {
850		vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
851		while (vp != NULL && vp->v_type == VMARKER)
852			vp = TAILQ_NEXT(vp, v_nmntvnodes);
853		if (vp == NULL)
854			break;
855		/*
856		 * XXX LRU is completely broken for non-free vnodes.  First
857		 * by calling here in mountpoint order, then by moving
858		 * unselected vnodes to the end here, and most grossly by
859		 * removing the vlruvp() function that was supposed to
860		 * maintain the order.  (This function was born broken
861		 * since syncer problems prevented it doing anything.)  The
862		 * order is closer to LRC (C = Created).
863		 *
864		 * LRU reclaiming of vnodes seems to have last worked in
865		 * FreeBSD-3 where LRU wasn't mentioned under any spelling.
866		 * Then there was no hold count, and inactive vnodes were
867		 * simply put on the free list in LRU order.  The separate
868		 * lists also break LRU.  We prefer to reclaim from the
869		 * free list for technical reasons.  This tends to thrash
870		 * the free list to keep very unrecently used held vnodes.
871		 * The problem is mitigated by keeping the free list large.
872		 */
873		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
874		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
875		--count;
876		if (!VI_TRYLOCK(vp))
877			goto next_iter;
878		/*
879		 * If it's been deconstructed already, it's still
880		 * referenced, or it exceeds the trigger, skip it.
881		 * Also skip free vnodes.  We are trying to make space
882		 * to expand the free list, not reduce it.
883		 */
884		if (vp->v_usecount ||
885		    (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
886		    ((vp->v_iflag & VI_FREE) != 0) ||
887		    (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
888		    vp->v_object->resident_page_count > trigger)) {
889			VI_UNLOCK(vp);
890			goto next_iter;
891		}
892		MNT_IUNLOCK(mp);
893		vholdl(vp);
894		if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
895			vdrop(vp);
896			goto next_iter_mntunlocked;
897		}
898		VI_LOCK(vp);
899		/*
900		 * v_usecount may have been bumped after VOP_LOCK() dropped
901		 * the vnode interlock and before it was locked again.
902		 *
903		 * It is not necessary to recheck VI_DOOMED because it can
904		 * only be set by another thread that holds both the vnode
905		 * lock and vnode interlock.  If another thread has the
906		 * vnode lock before we get to VOP_LOCK() and obtains the
907		 * vnode interlock after VOP_LOCK() drops the vnode
908		 * interlock, the other thread will be unable to drop the
909		 * vnode lock before our VOP_LOCK() call fails.
910		 */
911		if (vp->v_usecount ||
912		    (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
913		    (vp->v_iflag & VI_FREE) != 0 ||
914		    (vp->v_object != NULL &&
915		    vp->v_object->resident_page_count > trigger)) {
916			VOP_UNLOCK(vp, LK_INTERLOCK);
917			vdrop(vp);
918			goto next_iter_mntunlocked;
919		}
920		KASSERT((vp->v_iflag & VI_DOOMED) == 0,
921		    ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
922		atomic_add_long(&recycles_count, 1);
923		vgonel(vp);
924		VOP_UNLOCK(vp, 0);
925		vdropl(vp);
926		done++;
927next_iter_mntunlocked:
928		if (!should_yield())
929			goto relock_mnt;
930		goto yield;
931next_iter:
932		if (!should_yield())
933			continue;
934		MNT_IUNLOCK(mp);
935yield:
936		kern_yield(PRI_USER);
937relock_mnt:
938		MNT_ILOCK(mp);
939	}
940	MNT_IUNLOCK(mp);
941	vn_finished_write(mp);
942	return done;
943}
944
945/*
946 * Attempt to reduce the free list by the requested amount.
947 */
948static void
949vnlru_free(int count)
950{
951	struct vnode *vp;
952
953	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
954	for (; count > 0; count--) {
955		vp = TAILQ_FIRST(&vnode_free_list);
956		/*
957		 * The list can be modified while the free_list_mtx
958		 * has been dropped and vp could be NULL here.
959		 */
960		if (!vp)
961			break;
962		VNASSERT(vp->v_op != NULL, vp,
963		    ("vnlru_free: vnode already reclaimed."));
964		KASSERT((vp->v_iflag & VI_FREE) != 0,
965		    ("Removing vnode not on freelist"));
966		KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
967		    ("Mangling active vnode"));
968		TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
969		/*
970		 * Don't recycle if we can't get the interlock.
971		 */
972		if (!VI_TRYLOCK(vp)) {
973			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist);
974			continue;
975		}
976		VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0,
977		    vp, ("vp inconsistent on freelist"));
978
979		/*
980		 * The clear of VI_FREE prevents activation of the
981		 * vnode.  There is no sense in putting the vnode on
982		 * the mount point active list, only to remove it
983		 * later during recycling.  Inline the relevant part
984		 * of vholdl(), to avoid triggering assertions or
985		 * activating.
986		 */
987		freevnodes--;
988		vp->v_iflag &= ~VI_FREE;
989		refcount_acquire(&vp->v_holdcnt);
990
991		mtx_unlock(&vnode_free_list_mtx);
992		VI_UNLOCK(vp);
993		vtryrecycle(vp);
994		/*
995		 * If the recycled succeeded this vdrop will actually free
996		 * the vnode.  If not it will simply place it back on
997		 * the free list.
998		 */
999		vdrop(vp);
1000		mtx_lock(&vnode_free_list_mtx);
1001	}
1002}
1003
1004/* XXX some names and initialization are bad for limits and watermarks. */
1005static int
1006vspace(void)
1007{
1008	int space;
1009
1010	gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
1011	vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
1012	vlowat = vhiwat / 2;
1013	if (numvnodes > desiredvnodes)
1014		return (0);
1015	space = desiredvnodes - numvnodes;
1016	if (freevnodes > wantfreevnodes)
1017		space += freevnodes - wantfreevnodes;
1018	return (space);
1019}
1020
1021/*
1022 * Attempt to recycle vnodes in a context that is always safe to block.
1023 * Calling vlrurecycle() from the bowels of filesystem code has some
1024 * interesting deadlock problems.
1025 */
1026static struct proc *vnlruproc;
1027static int vnlruproc_sig;
1028
1029static void
1030vnlru_proc(void)
1031{
1032	struct mount *mp, *nmp;
1033	unsigned long ofreevnodes, onumvnodes;
1034	int done, force, reclaim_nc_src, trigger, usevnodes;
1035
1036	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
1037	    SHUTDOWN_PRI_FIRST);
1038
1039	force = 0;
1040	for (;;) {
1041		kproc_suspend_check(vnlruproc);
1042		mtx_lock(&vnode_free_list_mtx);
1043		/*
1044		 * If numvnodes is too large (due to desiredvnodes being
1045		 * adjusted using its sysctl, or emergency growth), first
1046		 * try to reduce it by discarding from the free list.
1047		 */
1048		if (numvnodes > desiredvnodes && freevnodes > 0)
1049			vnlru_free(ulmin(numvnodes - desiredvnodes,
1050			    freevnodes));
1051		/*
1052		 * Sleep if the vnode cache is in a good state.  This is
1053		 * when it is not over-full and has space for about a 4%
1054		 * or 9% expansion (by growing its size or inexcessively
1055		 * reducing its free list).  Otherwise, try to reclaim
1056		 * space for a 10% expansion.
1057		 */
1058		if (vstir && force == 0) {
1059			force = 1;
1060			vstir = 0;
1061		}
1062		if (vspace() >= vlowat && force == 0) {
1063			vnlruproc_sig = 0;
1064			wakeup(&vnlruproc_sig);
1065			msleep(vnlruproc, &vnode_free_list_mtx,
1066			    PVFS|PDROP, "vlruwt", hz);
1067			continue;
1068		}
1069		mtx_unlock(&vnode_free_list_mtx);
1070		done = 0;
1071		ofreevnodes = freevnodes;
1072		onumvnodes = numvnodes;
1073		/*
1074		 * Calculate parameters for recycling.  These are the same
1075		 * throughout the loop to give some semblance of fairness.
1076		 * The trigger point is to avoid recycling vnodes with lots
1077		 * of resident pages.  We aren't trying to free memory; we
1078		 * are trying to recycle or at least free vnodes.
1079		 */
1080		if (numvnodes <= desiredvnodes)
1081			usevnodes = numvnodes - freevnodes;
1082		else
1083			usevnodes = numvnodes;
1084		if (usevnodes <= 0)
1085			usevnodes = 1;
1086		/*
1087		 * The trigger value is is chosen to give a conservatively
1088		 * large value to ensure that it alone doesn't prevent
1089		 * making progress.  The value can easily be so large that
1090		 * it is effectively infinite in some congested and
1091		 * misconfigured cases, and this is necessary.  Normally
1092		 * it is about 8 to 100 (pages), which is quite large.
1093		 */
1094		trigger = vm_cnt.v_page_count * 2 / usevnodes;
1095		if (force < 2)
1096			trigger = vsmalltrigger;
1097		reclaim_nc_src = force >= 3;
1098		mtx_lock(&mountlist_mtx);
1099		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1100			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
1101				nmp = TAILQ_NEXT(mp, mnt_list);
1102				continue;
1103			}
1104			done += vlrureclaim(mp, reclaim_nc_src, trigger);
1105			mtx_lock(&mountlist_mtx);
1106			nmp = TAILQ_NEXT(mp, mnt_list);
1107			vfs_unbusy(mp);
1108		}
1109		mtx_unlock(&mountlist_mtx);
1110		if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes)
1111			uma_reclaim();
1112		if (done == 0) {
1113			if (force == 0 || force == 1) {
1114				force = 2;
1115				continue;
1116			}
1117			if (force == 2) {
1118				force = 3;
1119				continue;
1120			}
1121			force = 0;
1122			vnlru_nowhere++;
1123			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
1124		} else
1125			kern_yield(PRI_USER);
1126		/*
1127		 * After becoming active to expand above low water, keep
1128		 * active until above high water.
1129		 */
1130		force = vspace() < vhiwat;
1131	}
1132}
1133
1134static struct kproc_desc vnlru_kp = {
1135	"vnlru",
1136	vnlru_proc,
1137	&vnlruproc
1138};
1139SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
1140    &vnlru_kp);
1141
1142/*
1143 * Routines having to do with the management of the vnode table.
1144 */
1145
1146/*
1147 * Try to recycle a freed vnode.  We abort if anyone picks up a reference
1148 * before we actually vgone().  This function must be called with the vnode
1149 * held to prevent the vnode from being returned to the free list midway
1150 * through vgone().
1151 */
1152static int
1153vtryrecycle(struct vnode *vp)
1154{
1155	struct mount *vnmp;
1156
1157	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1158	VNASSERT(vp->v_holdcnt, vp,
1159	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
1160	/*
1161	 * This vnode may found and locked via some other list, if so we
1162	 * can't recycle it yet.
1163	 */
1164	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1165		CTR2(KTR_VFS,
1166		    "%s: impossible to recycle, vp %p lock is already held",
1167		    __func__, vp);
1168		return (EWOULDBLOCK);
1169	}
1170	/*
1171	 * Don't recycle if its filesystem is being suspended.
1172	 */
1173	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1174		VOP_UNLOCK(vp, 0);
1175		CTR2(KTR_VFS,
1176		    "%s: impossible to recycle, cannot start the write for %p",
1177		    __func__, vp);
1178		return (EBUSY);
1179	}
1180	/*
1181	 * If we got this far, we need to acquire the interlock and see if
1182	 * anyone picked up this vnode from another list.  If not, we will
1183	 * mark it with DOOMED via vgonel() so that anyone who does find it
1184	 * will skip over it.
1185	 */
1186	VI_LOCK(vp);
1187	if (vp->v_usecount) {
1188		VOP_UNLOCK(vp, LK_INTERLOCK);
1189		vn_finished_write(vnmp);
1190		CTR2(KTR_VFS,
1191		    "%s: impossible to recycle, %p is already referenced",
1192		    __func__, vp);
1193		return (EBUSY);
1194	}
1195	if ((vp->v_iflag & VI_DOOMED) == 0) {
1196		atomic_add_long(&recycles_count, 1);
1197		vgonel(vp);
1198	}
1199	VOP_UNLOCK(vp, LK_INTERLOCK);
1200	vn_finished_write(vnmp);
1201	return (0);
1202}
1203
1204static void
1205vcheckspace(void)
1206{
1207
1208	if (vspace() < vlowat && vnlruproc_sig == 0) {
1209		vnlruproc_sig = 1;
1210		wakeup(vnlruproc);
1211	}
1212}
1213
1214/*
1215 * Wait if necessary for space for a new vnode.
1216 */
1217static int
1218getnewvnode_wait(int suspended)
1219{
1220
1221	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
1222	if (numvnodes >= desiredvnodes) {
1223		if (suspended) {
1224			/*
1225			 * The file system is being suspended.  We cannot
1226			 * risk a deadlock here, so allow allocation of
1227			 * another vnode even if this would give too many.
1228			 */
1229			return (0);
1230		}
1231		if (vnlruproc_sig == 0) {
1232			vnlruproc_sig = 1;	/* avoid unnecessary wakeups */
1233			wakeup(vnlruproc);
1234		}
1235		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
1236		    "vlruwk", hz);
1237	}
1238	/* Post-adjust like the pre-adjust in getnewvnode(). */
1239	if (numvnodes + 1 > desiredvnodes && freevnodes > 1)
1240		vnlru_free(1);
1241	return (numvnodes >= desiredvnodes ? ENFILE : 0);
1242}
1243
1244/*
1245 * This hack is fragile, and probably not needed any more now that the
1246 * watermark handling works.
1247 */
1248void
1249getnewvnode_reserve(u_int count)
1250{
1251	struct thread *td;
1252
1253	/* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */
1254	/* XXX no longer so quick, but this part is not racy. */
1255	mtx_lock(&vnode_free_list_mtx);
1256	if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes)
1257		vnlru_free(ulmin(numvnodes + count - desiredvnodes,
1258		    freevnodes - wantfreevnodes));
1259	mtx_unlock(&vnode_free_list_mtx);
1260
1261	td = curthread;
1262	/* First try to be quick and racy. */
1263	if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) {
1264		td->td_vp_reserv += count;
1265		vcheckspace();	/* XXX no longer so quick, but more racy */
1266		return;
1267	} else
1268		atomic_subtract_long(&numvnodes, count);
1269
1270	mtx_lock(&vnode_free_list_mtx);
1271	while (count > 0) {
1272		if (getnewvnode_wait(0) == 0) {
1273			count--;
1274			td->td_vp_reserv++;
1275			atomic_add_long(&numvnodes, 1);
1276		}
1277	}
1278	vcheckspace();
1279	mtx_unlock(&vnode_free_list_mtx);
1280}
1281
1282/*
1283 * This hack is fragile, especially if desiredvnodes or wantvnodes are
1284 * misconfgured or changed significantly.  Reducing desiredvnodes below
1285 * the reserved amount should cause bizarre behaviour like reducing it
1286 * below the number of active vnodes -- the system will try to reduce
1287 * numvnodes to match, but should fail, so the subtraction below should
1288 * not overflow.
1289 */
1290void
1291getnewvnode_drop_reserve(void)
1292{
1293	struct thread *td;
1294
1295	td = curthread;
1296	atomic_subtract_long(&numvnodes, td->td_vp_reserv);
1297	td->td_vp_reserv = 0;
1298}
1299
1300/*
1301 * Return the next vnode from the free list.
1302 */
1303int
1304getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
1305    struct vnode **vpp)
1306{
1307	struct vnode *vp;
1308	struct thread *td;
1309	struct lock_object *lo;
1310	static int cyclecount;
1311	int error;
1312
1313	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
1314	vp = NULL;
1315	td = curthread;
1316	if (td->td_vp_reserv > 0) {
1317		td->td_vp_reserv -= 1;
1318		goto alloc;
1319	}
1320	mtx_lock(&vnode_free_list_mtx);
1321	if (numvnodes < desiredvnodes)
1322		cyclecount = 0;
1323	else if (cyclecount++ >= freevnodes) {
1324		cyclecount = 0;
1325		vstir = 1;
1326	}
1327	/*
1328	 * Grow the vnode cache if it will not be above its target max
1329	 * after growing.  Otherwise, if the free list is nonempty, try
1330	 * to reclaim 1 item from it before growing the cache (possibly
1331	 * above its target max if the reclamation failed or is delayed).
1332	 * Otherwise, wait for some space.  In all cases, schedule
1333	 * vnlru_proc() if we are getting short of space.  The watermarks
1334	 * should be chosen so that we never wait or even reclaim from
1335	 * the free list to below its target minimum.
1336	 */
1337	if (numvnodes + 1 <= desiredvnodes)
1338		;
1339	else if (freevnodes > 0)
1340		vnlru_free(1);
1341	else {
1342		error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag &
1343		    MNTK_SUSPEND));
1344#if 0	/* XXX Not all VFS_VGET/ffs_vget callers check returns. */
1345		if (error != 0) {
1346			mtx_unlock(&vnode_free_list_mtx);
1347			return (error);
1348		}
1349#endif
1350	}
1351	vcheckspace();
1352	atomic_add_long(&numvnodes, 1);
1353	mtx_unlock(&vnode_free_list_mtx);
1354alloc:
1355	atomic_add_long(&vnodes_created, 1);
1356	vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK);
1357	/*
1358	 * Locks are given the generic name "vnode" when created.
1359	 * Follow the historic practice of using the filesystem
1360	 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc.
1361	 *
1362	 * Locks live in a witness group keyed on their name. Thus,
1363	 * when a lock is renamed, it must also move from the witness
1364	 * group of its old name to the witness group of its new name.
1365	 *
1366	 * The change only needs to be made when the vnode moves
1367	 * from one filesystem type to another. We ensure that each
1368	 * filesystem use a single static name pointer for its tag so
1369	 * that we can compare pointers rather than doing a strcmp().
1370	 */
1371	lo = &vp->v_vnlock->lock_object;
1372	if (lo->lo_name != tag) {
1373		lo->lo_name = tag;
1374		WITNESS_DESTROY(lo);
1375		WITNESS_INIT(lo, tag);
1376	}
1377	/*
1378	 * By default, don't allow shared locks unless filesystems opt-in.
1379	 */
1380	vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
1381	/*
1382	 * Finalize various vnode identity bits.
1383	 */
1384	KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
1385	KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
1386	KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
1387	vp->v_type = VNON;
1388	vp->v_tag = tag;
1389	vp->v_op = vops;
1390	v_init_counters(vp);
1391	vp->v_bufobj.bo_ops = &buf_ops_bio;
1392#ifdef MAC
1393	mac_vnode_init(vp);
1394	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1395		mac_vnode_associate_singlelabel(mp, vp);
1396	else if (mp == NULL && vops != &dead_vnodeops)
1397		printf("NULL mp in getnewvnode()\n");
1398#endif
1399	if (mp != NULL) {
1400		vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
1401		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1402			vp->v_vflag |= VV_NOKNOTE;
1403	}
1404
1405	/*
1406	 * For the filesystems which do not use vfs_hash_insert(),
1407	 * still initialize v_hash to have vfs_hash_index() useful.
1408	 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
1409	 * its own hashing.
1410	 */
1411	vp->v_hash = (uintptr_t)vp >> vnsz2log;
1412
1413	*vpp = vp;
1414	return (0);
1415}
1416
1417/*
1418 * Delete from old mount point vnode list, if on one.
1419 */
1420static void
1421delmntque(struct vnode *vp)
1422{
1423	struct mount *mp;
1424	int active;
1425
1426	mp = vp->v_mount;
1427	if (mp == NULL)
1428		return;
1429	MNT_ILOCK(mp);
1430	VI_LOCK(vp);
1431	KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize,
1432	    ("Active vnode list size %d > Vnode list size %d",
1433	     mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize));
1434	active = vp->v_iflag & VI_ACTIVE;
1435	vp->v_iflag &= ~VI_ACTIVE;
1436	if (active) {
1437		mtx_lock(&vnode_free_list_mtx);
1438		TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist);
1439		mp->mnt_activevnodelistsize--;
1440		mtx_unlock(&vnode_free_list_mtx);
1441	}
1442	vp->v_mount = NULL;
1443	VI_UNLOCK(vp);
1444	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1445		("bad mount point vnode list size"));
1446	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1447	mp->mnt_nvnodelistsize--;
1448	MNT_REL(mp);
1449	MNT_IUNLOCK(mp);
1450}
1451
1452static void
1453insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1454{
1455
1456	vp->v_data = NULL;
1457	vp->v_op = &dead_vnodeops;
1458	vgone(vp);
1459	vput(vp);
1460}
1461
1462/*
1463 * Insert into list of vnodes for the new mount point, if available.
1464 */
1465int
1466insmntque1(struct vnode *vp, struct mount *mp,
1467	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1468{
1469
1470	KASSERT(vp->v_mount == NULL,
1471		("insmntque: vnode already on per mount vnode list"));
1472	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1473	ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
1474
1475	/*
1476	 * We acquire the vnode interlock early to ensure that the
1477	 * vnode cannot be recycled by another process releasing a
1478	 * holdcnt on it before we get it on both the vnode list
1479	 * and the active vnode list. The mount mutex protects only
1480	 * manipulation of the vnode list and the vnode freelist
1481	 * mutex protects only manipulation of the active vnode list.
1482	 * Hence the need to hold the vnode interlock throughout.
1483	 */
1484	MNT_ILOCK(mp);
1485	VI_LOCK(vp);
1486	if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1487	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1488	    mp->mnt_nvnodelistsize == 0)) &&
1489	    (vp->v_vflag & VV_FORCEINSMQ) == 0) {
1490		VI_UNLOCK(vp);
1491		MNT_IUNLOCK(mp);
1492		if (dtr != NULL)
1493			dtr(vp, dtr_arg);
1494		return (EBUSY);
1495	}
1496	vp->v_mount = mp;
1497	MNT_REF(mp);
1498	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1499	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1500		("neg mount point vnode list size"));
1501	mp->mnt_nvnodelistsize++;
1502	KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
1503	    ("Activating already active vnode"));
1504	vp->v_iflag |= VI_ACTIVE;
1505	mtx_lock(&vnode_free_list_mtx);
1506	TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
1507	mp->mnt_activevnodelistsize++;
1508	mtx_unlock(&vnode_free_list_mtx);
1509	VI_UNLOCK(vp);
1510	MNT_IUNLOCK(mp);
1511	return (0);
1512}
1513
1514int
1515insmntque(struct vnode *vp, struct mount *mp)
1516{
1517
1518	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1519}
1520
1521/*
1522 * Flush out and invalidate all buffers associated with a bufobj
1523 * Called with the underlying object locked.
1524 */
1525int
1526bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1527{
1528	int error;
1529
1530	BO_LOCK(bo);
1531	if (flags & V_SAVE) {
1532		error = bufobj_wwait(bo, slpflag, slptimeo);
1533		if (error) {
1534			BO_UNLOCK(bo);
1535			return (error);
1536		}
1537		if (bo->bo_dirty.bv_cnt > 0) {
1538			BO_UNLOCK(bo);
1539			if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1540				return (error);
1541			/*
1542			 * XXX We could save a lock/unlock if this was only
1543			 * enabled under INVARIANTS
1544			 */
1545			BO_LOCK(bo);
1546			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1547				panic("vinvalbuf: dirty bufs");
1548		}
1549	}
1550	/*
1551	 * If you alter this loop please notice that interlock is dropped and
1552	 * reacquired in flushbuflist.  Special care is needed to ensure that
1553	 * no race conditions occur from this.
1554	 */
1555	do {
1556		error = flushbuflist(&bo->bo_clean,
1557		    flags, bo, slpflag, slptimeo);
1558		if (error == 0 && !(flags & V_CLEANONLY))
1559			error = flushbuflist(&bo->bo_dirty,
1560			    flags, bo, slpflag, slptimeo);
1561		if (error != 0 && error != EAGAIN) {
1562			BO_UNLOCK(bo);
1563			return (error);
1564		}
1565	} while (error != 0);
1566
1567	/*
1568	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1569	 * have write I/O in-progress but if there is a VM object then the
1570	 * VM object can also have read-I/O in-progress.
1571	 */
1572	do {
1573		bufobj_wwait(bo, 0, 0);
1574		BO_UNLOCK(bo);
1575		if (bo->bo_object != NULL) {
1576			VM_OBJECT_WLOCK(bo->bo_object);
1577			vm_object_pip_wait(bo->bo_object, "bovlbx");
1578			VM_OBJECT_WUNLOCK(bo->bo_object);
1579		}
1580		BO_LOCK(bo);
1581	} while (bo->bo_numoutput > 0);
1582	BO_UNLOCK(bo);
1583
1584	/*
1585	 * Destroy the copy in the VM cache, too.
1586	 */
1587	if (bo->bo_object != NULL &&
1588	    (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
1589		VM_OBJECT_WLOCK(bo->bo_object);
1590		vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
1591		    OBJPR_CLEANONLY : 0);
1592		VM_OBJECT_WUNLOCK(bo->bo_object);
1593	}
1594
1595#ifdef INVARIANTS
1596	BO_LOCK(bo);
1597	if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 &&
1598	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1599		panic("vinvalbuf: flush failed");
1600	BO_UNLOCK(bo);
1601#endif
1602	return (0);
1603}
1604
1605/*
1606 * Flush out and invalidate all buffers associated with a vnode.
1607 * Called with the underlying object locked.
1608 */
1609int
1610vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1611{
1612
1613	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1614	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1615	if (vp->v_object != NULL && vp->v_object->handle != vp)
1616		return (0);
1617	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1618}
1619
1620/*
1621 * Flush out buffers on the specified list.
1622 *
1623 */
1624static int
1625flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1626    int slptimeo)
1627{
1628	struct buf *bp, *nbp;
1629	int retval, error;
1630	daddr_t lblkno;
1631	b_xflags_t xflags;
1632
1633	ASSERT_BO_WLOCKED(bo);
1634
1635	retval = 0;
1636	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1637		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1638		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1639			continue;
1640		}
1641		lblkno = 0;
1642		xflags = 0;
1643		if (nbp != NULL) {
1644			lblkno = nbp->b_lblkno;
1645			xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
1646		}
1647		retval = EAGAIN;
1648		error = BUF_TIMELOCK(bp,
1649		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo),
1650		    "flushbuf", slpflag, slptimeo);
1651		if (error) {
1652			BO_LOCK(bo);
1653			return (error != ENOLCK ? error : EAGAIN);
1654		}
1655		KASSERT(bp->b_bufobj == bo,
1656		    ("bp %p wrong b_bufobj %p should be %p",
1657		    bp, bp->b_bufobj, bo));
1658		/*
1659		 * XXX Since there are no node locks for NFS, I
1660		 * believe there is a slight chance that a delayed
1661		 * write will occur while sleeping just above, so
1662		 * check for it.
1663		 */
1664		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1665		    (flags & V_SAVE)) {
1666			bremfree(bp);
1667			bp->b_flags |= B_ASYNC;
1668			bwrite(bp);
1669			BO_LOCK(bo);
1670			return (EAGAIN);	/* XXX: why not loop ? */
1671		}
1672		bremfree(bp);
1673		bp->b_flags |= (B_INVAL | B_RELBUF);
1674		bp->b_flags &= ~B_ASYNC;
1675		brelse(bp);
1676		BO_LOCK(bo);
1677		nbp = gbincore(bo, lblkno);
1678		if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1679		    != xflags)
1680			break;			/* nbp invalid */
1681	}
1682	return (retval);
1683}
1684
1685int
1686bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn)
1687{
1688	struct buf *bp;
1689	int error;
1690	daddr_t lblkno;
1691
1692	ASSERT_BO_LOCKED(bo);
1693
1694	for (lblkno = startn;;) {
1695again:
1696		bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno);
1697		if (bp == NULL || bp->b_lblkno >= endn ||
1698		    bp->b_lblkno < startn)
1699			break;
1700		error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
1701		    LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0);
1702		if (error != 0) {
1703			BO_RLOCK(bo);
1704			if (error == ENOLCK)
1705				goto again;
1706			return (error);
1707		}
1708		KASSERT(bp->b_bufobj == bo,
1709		    ("bp %p wrong b_bufobj %p should be %p",
1710		    bp, bp->b_bufobj, bo));
1711		lblkno = bp->b_lblkno + 1;
1712		if ((bp->b_flags & B_MANAGED) == 0)
1713			bremfree(bp);
1714		bp->b_flags |= B_RELBUF;
1715		/*
1716		 * In the VMIO case, use the B_NOREUSE flag to hint that the
1717		 * pages backing each buffer in the range are unlikely to be
1718		 * reused.  Dirty buffers will have the hint applied once
1719		 * they've been written.
1720		 */
1721		if (bp->b_vp->v_object != NULL)
1722			bp->b_flags |= B_NOREUSE;
1723		brelse(bp);
1724		BO_RLOCK(bo);
1725	}
1726	return (0);
1727}
1728
1729/*
1730 * Truncate a file's buffer and pages to a specified length.  This
1731 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1732 * sync activity.
1733 */
1734int
1735vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize)
1736{
1737	struct buf *bp, *nbp;
1738	int anyfreed;
1739	int trunclbn;
1740	struct bufobj *bo;
1741
1742	CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1743	    vp, cred, blksize, (uintmax_t)length);
1744
1745	/*
1746	 * Round up to the *next* lbn.
1747	 */
1748	trunclbn = howmany(length, blksize);
1749
1750	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1751restart:
1752	bo = &vp->v_bufobj;
1753	BO_LOCK(bo);
1754	anyfreed = 1;
1755	for (;anyfreed;) {
1756		anyfreed = 0;
1757		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1758			if (bp->b_lblkno < trunclbn)
1759				continue;
1760			if (BUF_LOCK(bp,
1761			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1762			    BO_LOCKPTR(bo)) == ENOLCK)
1763				goto restart;
1764
1765			bremfree(bp);
1766			bp->b_flags |= (B_INVAL | B_RELBUF);
1767			bp->b_flags &= ~B_ASYNC;
1768			brelse(bp);
1769			anyfreed = 1;
1770
1771			BO_LOCK(bo);
1772			if (nbp != NULL &&
1773			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1774			    (nbp->b_vp != vp) ||
1775			    (nbp->b_flags & B_DELWRI))) {
1776				BO_UNLOCK(bo);
1777				goto restart;
1778			}
1779		}
1780
1781		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1782			if (bp->b_lblkno < trunclbn)
1783				continue;
1784			if (BUF_LOCK(bp,
1785			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1786			    BO_LOCKPTR(bo)) == ENOLCK)
1787				goto restart;
1788			bremfree(bp);
1789			bp->b_flags |= (B_INVAL | B_RELBUF);
1790			bp->b_flags &= ~B_ASYNC;
1791			brelse(bp);
1792			anyfreed = 1;
1793
1794			BO_LOCK(bo);
1795			if (nbp != NULL &&
1796			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1797			    (nbp->b_vp != vp) ||
1798			    (nbp->b_flags & B_DELWRI) == 0)) {
1799				BO_UNLOCK(bo);
1800				goto restart;
1801			}
1802		}
1803	}
1804
1805	if (length > 0) {
1806restartsync:
1807		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1808			if (bp->b_lblkno > 0)
1809				continue;
1810			/*
1811			 * Since we hold the vnode lock this should only
1812			 * fail if we're racing with the buf daemon.
1813			 */
1814			if (BUF_LOCK(bp,
1815			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1816			    BO_LOCKPTR(bo)) == ENOLCK) {
1817				goto restart;
1818			}
1819			VNASSERT((bp->b_flags & B_DELWRI), vp,
1820			    ("buf(%p) on dirty queue without DELWRI", bp));
1821
1822			bremfree(bp);
1823			bawrite(bp);
1824			BO_LOCK(bo);
1825			goto restartsync;
1826		}
1827	}
1828
1829	bufobj_wwait(bo, 0, 0);
1830	BO_UNLOCK(bo);
1831	vnode_pager_setsize(vp, length);
1832
1833	return (0);
1834}
1835
1836static void
1837buf_vlist_remove(struct buf *bp)
1838{
1839	struct bufv *bv;
1840
1841	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1842	ASSERT_BO_WLOCKED(bp->b_bufobj);
1843	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1844	    (BX_VNDIRTY|BX_VNCLEAN),
1845	    ("buf_vlist_remove: Buf %p is on two lists", bp));
1846	if (bp->b_xflags & BX_VNDIRTY)
1847		bv = &bp->b_bufobj->bo_dirty;
1848	else
1849		bv = &bp->b_bufobj->bo_clean;
1850	BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno);
1851	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1852	bv->bv_cnt--;
1853	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1854}
1855
1856/*
1857 * Add the buffer to the sorted clean or dirty block list.
1858 *
1859 * NOTE: xflags is passed as a constant, optimizing this inline function!
1860 */
1861static void
1862buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1863{
1864	struct bufv *bv;
1865	struct buf *n;
1866	int error;
1867
1868	ASSERT_BO_WLOCKED(bo);
1869	KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0,
1870	    ("dead bo %p", bo));
1871	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1872	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1873	bp->b_xflags |= xflags;
1874	if (xflags & BX_VNDIRTY)
1875		bv = &bo->bo_dirty;
1876	else
1877		bv = &bo->bo_clean;
1878
1879	/*
1880	 * Keep the list ordered.  Optimize empty list insertion.  Assume
1881	 * we tend to grow at the tail so lookup_le should usually be cheaper
1882	 * than _ge.
1883	 */
1884	if (bv->bv_cnt == 0 ||
1885	    bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno)
1886		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1887	else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL)
1888		TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs);
1889	else
1890		TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs);
1891	error = BUF_PCTRIE_INSERT(&bv->bv_root, bp);
1892	if (error)
1893		panic("buf_vlist_add:  Preallocated nodes insufficient.");
1894	bv->bv_cnt++;
1895}
1896
1897/*
1898 * Look up a buffer using the buffer tries.
1899 */
1900struct buf *
1901gbincore(struct bufobj *bo, daddr_t lblkno)
1902{
1903	struct buf *bp;
1904
1905	ASSERT_BO_LOCKED(bo);
1906	bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno);
1907	if (bp != NULL)
1908		return (bp);
1909	return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno);
1910}
1911
1912/*
1913 * Associate a buffer with a vnode.
1914 */
1915void
1916bgetvp(struct vnode *vp, struct buf *bp)
1917{
1918	struct bufobj *bo;
1919
1920	bo = &vp->v_bufobj;
1921	ASSERT_BO_WLOCKED(bo);
1922	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1923
1924	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1925	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1926	    ("bgetvp: bp already attached! %p", bp));
1927
1928	vhold(vp);
1929	bp->b_vp = vp;
1930	bp->b_bufobj = bo;
1931	/*
1932	 * Insert onto list for new vnode.
1933	 */
1934	buf_vlist_add(bp, bo, BX_VNCLEAN);
1935}
1936
1937/*
1938 * Disassociate a buffer from a vnode.
1939 */
1940void
1941brelvp(struct buf *bp)
1942{
1943	struct bufobj *bo;
1944	struct vnode *vp;
1945
1946	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1947	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1948
1949	/*
1950	 * Delete from old vnode list, if on one.
1951	 */
1952	vp = bp->b_vp;		/* XXX */
1953	bo = bp->b_bufobj;
1954	BO_LOCK(bo);
1955	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1956		buf_vlist_remove(bp);
1957	else
1958		panic("brelvp: Buffer %p not on queue.", bp);
1959	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1960		bo->bo_flag &= ~BO_ONWORKLST;
1961		mtx_lock(&sync_mtx);
1962		LIST_REMOVE(bo, bo_synclist);
1963		syncer_worklist_len--;
1964		mtx_unlock(&sync_mtx);
1965	}
1966	bp->b_vp = NULL;
1967	bp->b_bufobj = NULL;
1968	BO_UNLOCK(bo);
1969	vdrop(vp);
1970}
1971
1972/*
1973 * Add an item to the syncer work queue.
1974 */
1975static void
1976vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1977{
1978	int slot;
1979
1980	ASSERT_BO_WLOCKED(bo);
1981
1982	mtx_lock(&sync_mtx);
1983	if (bo->bo_flag & BO_ONWORKLST)
1984		LIST_REMOVE(bo, bo_synclist);
1985	else {
1986		bo->bo_flag |= BO_ONWORKLST;
1987		syncer_worklist_len++;
1988	}
1989
1990	if (delay > syncer_maxdelay - 2)
1991		delay = syncer_maxdelay - 2;
1992	slot = (syncer_delayno + delay) & syncer_mask;
1993
1994	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
1995	mtx_unlock(&sync_mtx);
1996}
1997
1998static int
1999sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
2000{
2001	int error, len;
2002
2003	mtx_lock(&sync_mtx);
2004	len = syncer_worklist_len - sync_vnode_count;
2005	mtx_unlock(&sync_mtx);
2006	error = SYSCTL_OUT(req, &len, sizeof(len));
2007	return (error);
2008}
2009
2010SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
2011    sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
2012
2013static struct proc *updateproc;
2014static void sched_sync(void);
2015static struct kproc_desc up_kp = {
2016	"syncer",
2017	sched_sync,
2018	&updateproc
2019};
2020SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
2021
2022static int
2023sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
2024{
2025	struct vnode *vp;
2026	struct mount *mp;
2027
2028	*bo = LIST_FIRST(slp);
2029	if (*bo == NULL)
2030		return (0);
2031	vp = (*bo)->__bo_vnode;	/* XXX */
2032	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2033		return (1);
2034	/*
2035	 * We use vhold in case the vnode does not
2036	 * successfully sync.  vhold prevents the vnode from
2037	 * going away when we unlock the sync_mtx so that
2038	 * we can acquire the vnode interlock.
2039	 */
2040	vholdl(vp);
2041	mtx_unlock(&sync_mtx);
2042	VI_UNLOCK(vp);
2043	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2044		vdrop(vp);
2045		mtx_lock(&sync_mtx);
2046		return (*bo == LIST_FIRST(slp));
2047	}
2048	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2049	(void) VOP_FSYNC(vp, MNT_LAZY, td);
2050	VOP_UNLOCK(vp, 0);
2051	vn_finished_write(mp);
2052	BO_LOCK(*bo);
2053	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
2054		/*
2055		 * Put us back on the worklist.  The worklist
2056		 * routine will remove us from our current
2057		 * position and then add us back in at a later
2058		 * position.
2059		 */
2060		vn_syncer_add_to_worklist(*bo, syncdelay);
2061	}
2062	BO_UNLOCK(*bo);
2063	vdrop(vp);
2064	mtx_lock(&sync_mtx);
2065	return (0);
2066}
2067
2068static int first_printf = 1;
2069
2070/*
2071 * System filesystem synchronizer daemon.
2072 */
2073static void
2074sched_sync(void)
2075{
2076	struct synclist *next, *slp;
2077	struct bufobj *bo;
2078	long starttime;
2079	struct thread *td = curthread;
2080	int last_work_seen;
2081	int net_worklist_len;
2082	int syncer_final_iter;
2083	int error;
2084
2085	last_work_seen = 0;
2086	syncer_final_iter = 0;
2087	syncer_state = SYNCER_RUNNING;
2088	starttime = time_uptime;
2089	td->td_pflags |= TDP_NORUNNINGBUF;
2090
2091	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
2092	    SHUTDOWN_PRI_LAST);
2093
2094	mtx_lock(&sync_mtx);
2095	for (;;) {
2096		if (syncer_state == SYNCER_FINAL_DELAY &&
2097		    syncer_final_iter == 0) {
2098			mtx_unlock(&sync_mtx);
2099			kproc_suspend_check(td->td_proc);
2100			mtx_lock(&sync_mtx);
2101		}
2102		net_worklist_len = syncer_worklist_len - sync_vnode_count;
2103		if (syncer_state != SYNCER_RUNNING &&
2104		    starttime != time_uptime) {
2105			if (first_printf) {
2106				printf("\nSyncing disks, vnodes remaining...");
2107				first_printf = 0;
2108			}
2109			printf("%d ", net_worklist_len);
2110		}
2111		starttime = time_uptime;
2112
2113		/*
2114		 * Push files whose dirty time has expired.  Be careful
2115		 * of interrupt race on slp queue.
2116		 *
2117		 * Skip over empty worklist slots when shutting down.
2118		 */
2119		do {
2120			slp = &syncer_workitem_pending[syncer_delayno];
2121			syncer_delayno += 1;
2122			if (syncer_delayno == syncer_maxdelay)
2123				syncer_delayno = 0;
2124			next = &syncer_workitem_pending[syncer_delayno];
2125			/*
2126			 * If the worklist has wrapped since the
2127			 * it was emptied of all but syncer vnodes,
2128			 * switch to the FINAL_DELAY state and run
2129			 * for one more second.
2130			 */
2131			if (syncer_state == SYNCER_SHUTTING_DOWN &&
2132			    net_worklist_len == 0 &&
2133			    last_work_seen == syncer_delayno) {
2134				syncer_state = SYNCER_FINAL_DELAY;
2135				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
2136			}
2137		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
2138		    syncer_worklist_len > 0);
2139
2140		/*
2141		 * Keep track of the last time there was anything
2142		 * on the worklist other than syncer vnodes.
2143		 * Return to the SHUTTING_DOWN state if any
2144		 * new work appears.
2145		 */
2146		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
2147			last_work_seen = syncer_delayno;
2148		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
2149			syncer_state = SYNCER_SHUTTING_DOWN;
2150		while (!LIST_EMPTY(slp)) {
2151			error = sync_vnode(slp, &bo, td);
2152			if (error == 1) {
2153				LIST_REMOVE(bo, bo_synclist);
2154				LIST_INSERT_HEAD(next, bo, bo_synclist);
2155				continue;
2156			}
2157
2158			if (first_printf == 0) {
2159				/*
2160				 * Drop the sync mutex, because some watchdog
2161				 * drivers need to sleep while patting
2162				 */
2163				mtx_unlock(&sync_mtx);
2164				wdog_kern_pat(WD_LASTVAL);
2165				mtx_lock(&sync_mtx);
2166			}
2167
2168		}
2169		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
2170			syncer_final_iter--;
2171		/*
2172		 * The variable rushjob allows the kernel to speed up the
2173		 * processing of the filesystem syncer process. A rushjob
2174		 * value of N tells the filesystem syncer to process the next
2175		 * N seconds worth of work on its queue ASAP. Currently rushjob
2176		 * is used by the soft update code to speed up the filesystem
2177		 * syncer process when the incore state is getting so far
2178		 * ahead of the disk that the kernel memory pool is being
2179		 * threatened with exhaustion.
2180		 */
2181		if (rushjob > 0) {
2182			rushjob -= 1;
2183			continue;
2184		}
2185		/*
2186		 * Just sleep for a short period of time between
2187		 * iterations when shutting down to allow some I/O
2188		 * to happen.
2189		 *
2190		 * If it has taken us less than a second to process the
2191		 * current work, then wait. Otherwise start right over
2192		 * again. We can still lose time if any single round
2193		 * takes more than two seconds, but it does not really
2194		 * matter as we are just trying to generally pace the
2195		 * filesystem activity.
2196		 */
2197		if (syncer_state != SYNCER_RUNNING ||
2198		    time_uptime == starttime) {
2199			thread_lock(td);
2200			sched_prio(td, PPAUSE);
2201			thread_unlock(td);
2202		}
2203		if (syncer_state != SYNCER_RUNNING)
2204			cv_timedwait(&sync_wakeup, &sync_mtx,
2205			    hz / SYNCER_SHUTDOWN_SPEEDUP);
2206		else if (time_uptime == starttime)
2207			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
2208	}
2209}
2210
2211/*
2212 * Request the syncer daemon to speed up its work.
2213 * We never push it to speed up more than half of its
2214 * normal turn time, otherwise it could take over the cpu.
2215 */
2216int
2217speedup_syncer(void)
2218{
2219	int ret = 0;
2220
2221	mtx_lock(&sync_mtx);
2222	if (rushjob < syncdelay / 2) {
2223		rushjob += 1;
2224		stat_rush_requests += 1;
2225		ret = 1;
2226	}
2227	mtx_unlock(&sync_mtx);
2228	cv_broadcast(&sync_wakeup);
2229	return (ret);
2230}
2231
2232/*
2233 * Tell the syncer to speed up its work and run though its work
2234 * list several times, then tell it to shut down.
2235 */
2236static void
2237syncer_shutdown(void *arg, int howto)
2238{
2239
2240	if (howto & RB_NOSYNC)
2241		return;
2242	mtx_lock(&sync_mtx);
2243	syncer_state = SYNCER_SHUTTING_DOWN;
2244	rushjob = 0;
2245	mtx_unlock(&sync_mtx);
2246	cv_broadcast(&sync_wakeup);
2247	kproc_shutdown(arg, howto);
2248}
2249
2250void
2251syncer_suspend(void)
2252{
2253
2254	syncer_shutdown(updateproc, 0);
2255}
2256
2257void
2258syncer_resume(void)
2259{
2260
2261	mtx_lock(&sync_mtx);
2262	first_printf = 1;
2263	syncer_state = SYNCER_RUNNING;
2264	mtx_unlock(&sync_mtx);
2265	cv_broadcast(&sync_wakeup);
2266	kproc_resume(updateproc);
2267}
2268
2269/*
2270 * Reassign a buffer from one vnode to another.
2271 * Used to assign file specific control information
2272 * (indirect blocks) to the vnode to which they belong.
2273 */
2274void
2275reassignbuf(struct buf *bp)
2276{
2277	struct vnode *vp;
2278	struct bufobj *bo;
2279	int delay;
2280#ifdef INVARIANTS
2281	struct bufv *bv;
2282#endif
2283
2284	vp = bp->b_vp;
2285	bo = bp->b_bufobj;
2286	++reassignbufcalls;
2287
2288	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
2289	    bp, bp->b_vp, bp->b_flags);
2290	/*
2291	 * B_PAGING flagged buffers cannot be reassigned because their vp
2292	 * is not fully linked in.
2293	 */
2294	if (bp->b_flags & B_PAGING)
2295		panic("cannot reassign paging buffer");
2296
2297	/*
2298	 * Delete from old vnode list, if on one.
2299	 */
2300	BO_LOCK(bo);
2301	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2302		buf_vlist_remove(bp);
2303	else
2304		panic("reassignbuf: Buffer %p not on queue.", bp);
2305	/*
2306	 * If dirty, put on list of dirty buffers; otherwise insert onto list
2307	 * of clean buffers.
2308	 */
2309	if (bp->b_flags & B_DELWRI) {
2310		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
2311			switch (vp->v_type) {
2312			case VDIR:
2313				delay = dirdelay;
2314				break;
2315			case VCHR:
2316				delay = metadelay;
2317				break;
2318			default:
2319				delay = filedelay;
2320			}
2321			vn_syncer_add_to_worklist(bo, delay);
2322		}
2323		buf_vlist_add(bp, bo, BX_VNDIRTY);
2324	} else {
2325		buf_vlist_add(bp, bo, BX_VNCLEAN);
2326
2327		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2328			mtx_lock(&sync_mtx);
2329			LIST_REMOVE(bo, bo_synclist);
2330			syncer_worklist_len--;
2331			mtx_unlock(&sync_mtx);
2332			bo->bo_flag &= ~BO_ONWORKLST;
2333		}
2334	}
2335#ifdef INVARIANTS
2336	bv = &bo->bo_clean;
2337	bp = TAILQ_FIRST(&bv->bv_hd);
2338	KASSERT(bp == NULL || bp->b_bufobj == bo,
2339	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2340	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2341	KASSERT(bp == NULL || bp->b_bufobj == bo,
2342	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2343	bv = &bo->bo_dirty;
2344	bp = TAILQ_FIRST(&bv->bv_hd);
2345	KASSERT(bp == NULL || bp->b_bufobj == bo,
2346	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2347	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2348	KASSERT(bp == NULL || bp->b_bufobj == bo,
2349	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2350#endif
2351	BO_UNLOCK(bo);
2352}
2353
2354/*
2355 * A temporary hack until refcount_* APIs are sorted out.
2356 */
2357static __inline int
2358vfs_refcount_acquire_if_not_zero(volatile u_int *count)
2359{
2360	u_int old;
2361
2362	for (;;) {
2363		old = *count;
2364		if (old == 0)
2365			return (0);
2366		if (atomic_cmpset_int(count, old, old + 1))
2367			return (1);
2368	}
2369}
2370
2371static __inline int
2372vfs_refcount_release_if_not_last(volatile u_int *count)
2373{
2374	u_int old;
2375
2376	for (;;) {
2377		old = *count;
2378		if (old == 1)
2379			return (0);
2380		if (atomic_cmpset_int(count, old, old - 1))
2381			return (1);
2382	}
2383}
2384
2385static void
2386v_init_counters(struct vnode *vp)
2387{
2388
2389	VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
2390	    vp, ("%s called for an initialized vnode", __FUNCTION__));
2391	ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
2392
2393	refcount_init(&vp->v_holdcnt, 1);
2394	refcount_init(&vp->v_usecount, 1);
2395}
2396
2397static void
2398v_incr_usecount_locked(struct vnode *vp)
2399{
2400
2401	ASSERT_VI_LOCKED(vp, __func__);
2402	if ((vp->v_iflag & VI_OWEINACT) != 0) {
2403		VNASSERT(vp->v_usecount == 0, vp,
2404		    ("vnode with usecount and VI_OWEINACT set"));
2405		vp->v_iflag &= ~VI_OWEINACT;
2406	}
2407	refcount_acquire(&vp->v_usecount);
2408	v_incr_devcount(vp);
2409}
2410
2411/*
2412 * Increment the use and hold counts on the vnode, taking care to reference
2413 * the driver's usecount if this is a chardev.  The _vhold() will remove
2414 * the vnode from the free list if it is presently free.
2415 */
2416static void
2417v_incr_usecount(struct vnode *vp)
2418{
2419
2420	ASSERT_VI_UNLOCKED(vp, __func__);
2421	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2422
2423	if (vp->v_type != VCHR &&
2424	    vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) {
2425		VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
2426		    ("vnode with usecount and VI_OWEINACT set"));
2427	} else {
2428		VI_LOCK(vp);
2429		v_incr_usecount_locked(vp);
2430		VI_UNLOCK(vp);
2431	}
2432}
2433
2434/*
2435 * Increment si_usecount of the associated device, if any.
2436 */
2437static void
2438v_incr_devcount(struct vnode *vp)
2439{
2440
2441	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2442	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2443		dev_lock();
2444		vp->v_rdev->si_usecount++;
2445		dev_unlock();
2446	}
2447}
2448
2449/*
2450 * Decrement si_usecount of the associated device, if any.
2451 */
2452static void
2453v_decr_devcount(struct vnode *vp)
2454{
2455
2456	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2457	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2458		dev_lock();
2459		vp->v_rdev->si_usecount--;
2460		dev_unlock();
2461	}
2462}
2463
2464/*
2465 * Grab a particular vnode from the free list, increment its
2466 * reference count and lock it.  VI_DOOMED is set if the vnode
2467 * is being destroyed.  Only callers who specify LK_RETRY will
2468 * see doomed vnodes.  If inactive processing was delayed in
2469 * vput try to do it here.
2470 *
2471 * Notes on lockless counter manipulation:
2472 * _vhold, vputx and other routines make various decisions based
2473 * on either holdcnt or usecount being 0. As long as either contuner
2474 * is not transitioning 0->1 nor 1->0, the manipulation can be done
2475 * with atomic operations. Otherwise the interlock is taken.
2476 */
2477int
2478vget(struct vnode *vp, int flags, struct thread *td)
2479{
2480	int error, oweinact;
2481
2482	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2483	    ("vget: invalid lock operation"));
2484
2485	if ((flags & LK_INTERLOCK) != 0)
2486		ASSERT_VI_LOCKED(vp, __func__);
2487	else
2488		ASSERT_VI_UNLOCKED(vp, __func__);
2489	if ((flags & LK_VNHELD) != 0)
2490		VNASSERT((vp->v_holdcnt > 0), vp,
2491		    ("vget: LK_VNHELD passed but vnode not held"));
2492
2493	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2494
2495	if ((flags & LK_VNHELD) == 0)
2496		_vhold(vp, (flags & LK_INTERLOCK) != 0);
2497
2498	if ((error = vn_lock(vp, flags)) != 0) {
2499		vdrop(vp);
2500		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2501		    vp);
2502		return (error);
2503	}
2504	if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2505		panic("vget: vn_lock failed to return ENOENT\n");
2506	/*
2507	 * We don't guarantee that any particular close will
2508	 * trigger inactive processing so just make a best effort
2509	 * here at preventing a reference to a removed file.  If
2510	 * we don't succeed no harm is done.
2511	 *
2512	 * Upgrade our holdcnt to a usecount.
2513	 */
2514	if (vp->v_type != VCHR &&
2515	    vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) {
2516		VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
2517		    ("vnode with usecount and VI_OWEINACT set"));
2518	} else {
2519		VI_LOCK(vp);
2520		if ((vp->v_iflag & VI_OWEINACT) == 0) {
2521			oweinact = 0;
2522		} else {
2523			oweinact = 1;
2524			vp->v_iflag &= ~VI_OWEINACT;
2525		}
2526		refcount_acquire(&vp->v_usecount);
2527		v_incr_devcount(vp);
2528		if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2529		    (flags & LK_NOWAIT) == 0)
2530			vinactive(vp, td);
2531		VI_UNLOCK(vp);
2532	}
2533	return (0);
2534}
2535
2536/*
2537 * Increase the reference count of a vnode.
2538 */
2539void
2540vref(struct vnode *vp)
2541{
2542
2543	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2544	_vhold(vp, false);
2545	v_incr_usecount(vp);
2546}
2547
2548void
2549vrefl(struct vnode *vp)
2550{
2551
2552	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2553	_vhold(vp, true);
2554	v_incr_usecount_locked(vp);
2555}
2556
2557/*
2558 * Return reference count of a vnode.
2559 *
2560 * The results of this call are only guaranteed when some mechanism is used to
2561 * stop other processes from gaining references to the vnode.  This may be the
2562 * case if the caller holds the only reference.  This is also useful when stale
2563 * data is acceptable as race conditions may be accounted for by some other
2564 * means.
2565 */
2566int
2567vrefcnt(struct vnode *vp)
2568{
2569
2570	return (vp->v_usecount);
2571}
2572
2573#define	VPUTX_VRELE	1
2574#define	VPUTX_VPUT	2
2575#define	VPUTX_VUNREF	3
2576
2577/*
2578 * Decrement the use and hold counts for a vnode.
2579 *
2580 * See an explanation near vget() as to why atomic operation is safe.
2581 */
2582static void
2583vputx(struct vnode *vp, int func)
2584{
2585	int error;
2586
2587	KASSERT(vp != NULL, ("vputx: null vp"));
2588	if (func == VPUTX_VUNREF)
2589		ASSERT_VOP_LOCKED(vp, "vunref");
2590	else if (func == VPUTX_VPUT)
2591		ASSERT_VOP_LOCKED(vp, "vput");
2592	else
2593		KASSERT(func == VPUTX_VRELE, ("vputx: wrong func"));
2594	ASSERT_VI_UNLOCKED(vp, __func__);
2595	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2596
2597	if (vp->v_type != VCHR &&
2598	    vfs_refcount_release_if_not_last(&vp->v_usecount)) {
2599		if (func == VPUTX_VPUT)
2600			VOP_UNLOCK(vp, 0);
2601		vdrop(vp);
2602		return;
2603	}
2604
2605	VI_LOCK(vp);
2606
2607	/*
2608	 * We want to hold the vnode until the inactive finishes to
2609	 * prevent vgone() races.  We drop the use count here and the
2610	 * hold count below when we're done.
2611	 */
2612	if (!refcount_release(&vp->v_usecount) ||
2613	    (vp->v_iflag & VI_DOINGINACT)) {
2614		if (func == VPUTX_VPUT)
2615			VOP_UNLOCK(vp, 0);
2616		v_decr_devcount(vp);
2617		vdropl(vp);
2618		return;
2619	}
2620
2621	v_decr_devcount(vp);
2622
2623	error = 0;
2624
2625	if (vp->v_usecount != 0) {
2626		vprint("vputx: usecount not zero", vp);
2627		panic("vputx: usecount not zero");
2628	}
2629
2630	CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2631
2632	/*
2633	 * We must call VOP_INACTIVE with the node locked. Mark
2634	 * as VI_DOINGINACT to avoid recursion.
2635	 */
2636	vp->v_iflag |= VI_OWEINACT;
2637	switch (func) {
2638	case VPUTX_VRELE:
2639		error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
2640		VI_LOCK(vp);
2641		break;
2642	case VPUTX_VPUT:
2643		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2644			error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
2645			    LK_NOWAIT);
2646			VI_LOCK(vp);
2647		}
2648		break;
2649	case VPUTX_VUNREF:
2650		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2651			error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
2652			VI_LOCK(vp);
2653		}
2654		break;
2655	}
2656	VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp,
2657	    ("vnode with usecount and VI_OWEINACT set"));
2658	if (error == 0) {
2659		if (vp->v_iflag & VI_OWEINACT)
2660			vinactive(vp, curthread);
2661		if (func != VPUTX_VUNREF)
2662			VOP_UNLOCK(vp, 0);
2663	}
2664	vdropl(vp);
2665}
2666
2667/*
2668 * Vnode put/release.
2669 * If count drops to zero, call inactive routine and return to freelist.
2670 */
2671void
2672vrele(struct vnode *vp)
2673{
2674
2675	vputx(vp, VPUTX_VRELE);
2676}
2677
2678/*
2679 * Release an already locked vnode.  This give the same effects as
2680 * unlock+vrele(), but takes less time and avoids releasing and
2681 * re-aquiring the lock (as vrele() acquires the lock internally.)
2682 */
2683void
2684vput(struct vnode *vp)
2685{
2686
2687	vputx(vp, VPUTX_VPUT);
2688}
2689
2690/*
2691 * Release an exclusively locked vnode. Do not unlock the vnode lock.
2692 */
2693void
2694vunref(struct vnode *vp)
2695{
2696
2697	vputx(vp, VPUTX_VUNREF);
2698}
2699
2700/*
2701 * Increase the hold count and activate if this is the first reference.
2702 */
2703void
2704_vhold(struct vnode *vp, bool locked)
2705{
2706	struct mount *mp;
2707
2708	if (locked)
2709		ASSERT_VI_LOCKED(vp, __func__);
2710	else
2711		ASSERT_VI_UNLOCKED(vp, __func__);
2712	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2713	if (!locked && vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt)) {
2714		VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2715		    ("_vhold: vnode with holdcnt is free"));
2716		return;
2717	}
2718
2719	if (!locked)
2720		VI_LOCK(vp);
2721	if ((vp->v_iflag & VI_FREE) == 0) {
2722		refcount_acquire(&vp->v_holdcnt);
2723		if (!locked)
2724			VI_UNLOCK(vp);
2725		return;
2726	}
2727	VNASSERT(vp->v_holdcnt == 0, vp,
2728	    ("%s: wrong hold count", __func__));
2729	VNASSERT(vp->v_op != NULL, vp,
2730	    ("%s: vnode already reclaimed.", __func__));
2731	/*
2732	 * Remove a vnode from the free list, mark it as in use,
2733	 * and put it on the active list.
2734	 */
2735	mtx_lock(&vnode_free_list_mtx);
2736	TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
2737	freevnodes--;
2738	vp->v_iflag &= ~VI_FREE;
2739	KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
2740	    ("Activating already active vnode"));
2741	vp->v_iflag |= VI_ACTIVE;
2742	mp = vp->v_mount;
2743	TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
2744	mp->mnt_activevnodelistsize++;
2745	mtx_unlock(&vnode_free_list_mtx);
2746	refcount_acquire(&vp->v_holdcnt);
2747	if (!locked)
2748		VI_UNLOCK(vp);
2749}
2750
2751/*
2752 * Drop the hold count of the vnode.  If this is the last reference to
2753 * the vnode we place it on the free list unless it has been vgone'd
2754 * (marked VI_DOOMED) in which case we will free it.
2755 *
2756 * Because the vnode vm object keeps a hold reference on the vnode if
2757 * there is at least one resident non-cached page, the vnode cannot
2758 * leave the active list without the page cleanup done.
2759 */
2760void
2761_vdrop(struct vnode *vp, bool locked)
2762{
2763	struct bufobj *bo;
2764	struct mount *mp;
2765	int active;
2766
2767	if (locked)
2768		ASSERT_VI_LOCKED(vp, __func__);
2769	else
2770		ASSERT_VI_UNLOCKED(vp, __func__);
2771	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2772	if ((int)vp->v_holdcnt <= 0)
2773		panic("vdrop: holdcnt %d", vp->v_holdcnt);
2774	if (vfs_refcount_release_if_not_last(&vp->v_holdcnt)) {
2775		if (locked)
2776			VI_UNLOCK(vp);
2777		return;
2778	}
2779
2780	if (!locked)
2781		VI_LOCK(vp);
2782	if (refcount_release(&vp->v_holdcnt) == 0) {
2783		VI_UNLOCK(vp);
2784		return;
2785	}
2786	if ((vp->v_iflag & VI_DOOMED) == 0) {
2787		/*
2788		 * Mark a vnode as free: remove it from its active list
2789		 * and put it up for recycling on the freelist.
2790		 */
2791		VNASSERT(vp->v_op != NULL, vp,
2792		    ("vdropl: vnode already reclaimed."));
2793		VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2794		    ("vnode already free"));
2795		VNASSERT(vp->v_holdcnt == 0, vp,
2796		    ("vdropl: freeing when we shouldn't"));
2797		active = vp->v_iflag & VI_ACTIVE;
2798		if ((vp->v_iflag & VI_OWEINACT) == 0) {
2799			vp->v_iflag &= ~VI_ACTIVE;
2800			mp = vp->v_mount;
2801			mtx_lock(&vnode_free_list_mtx);
2802			if (active) {
2803				TAILQ_REMOVE(&mp->mnt_activevnodelist, vp,
2804				    v_actfreelist);
2805				mp->mnt_activevnodelistsize--;
2806			}
2807			TAILQ_INSERT_TAIL(&vnode_free_list, vp,
2808			    v_actfreelist);
2809			freevnodes++;
2810			vp->v_iflag |= VI_FREE;
2811			mtx_unlock(&vnode_free_list_mtx);
2812		} else {
2813			atomic_add_long(&free_owe_inact, 1);
2814		}
2815		VI_UNLOCK(vp);
2816		return;
2817	}
2818	/*
2819	 * The vnode has been marked for destruction, so free it.
2820	 *
2821	 * The vnode will be returned to the zone where it will
2822	 * normally remain until it is needed for another vnode. We
2823	 * need to cleanup (or verify that the cleanup has already
2824	 * been done) any residual data left from its current use
2825	 * so as not to contaminate the freshly allocated vnode.
2826	 */
2827	CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2828	atomic_subtract_long(&numvnodes, 1);
2829	bo = &vp->v_bufobj;
2830	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2831	    ("cleaned vnode still on the free list."));
2832	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2833	VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
2834	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2835	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2836	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2837	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2838	VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2839	    ("clean blk trie not empty"));
2840	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2841	VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2842	    ("dirty blk trie not empty"));
2843	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
2844	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
2845	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
2846	VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp,
2847	    ("Dangling rangelock waiters"));
2848	VI_UNLOCK(vp);
2849#ifdef MAC
2850	mac_vnode_destroy(vp);
2851#endif
2852	if (vp->v_pollinfo != NULL) {
2853		destroy_vpollinfo(vp->v_pollinfo);
2854		vp->v_pollinfo = NULL;
2855	}
2856#ifdef INVARIANTS
2857	/* XXX Elsewhere we detect an already freed vnode via NULL v_op. */
2858	vp->v_op = NULL;
2859#endif
2860	bzero(&vp->v_un, sizeof(vp->v_un));
2861	vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
2862	vp->v_iflag = 0;
2863	vp->v_vflag = 0;
2864	bo->bo_flag = 0;
2865	uma_zfree(vnode_zone, vp);
2866}
2867
2868/*
2869 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2870 * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
2871 * OWEINACT tracks whether a vnode missed a call to inactive due to a
2872 * failed lock upgrade.
2873 */
2874void
2875vinactive(struct vnode *vp, struct thread *td)
2876{
2877	struct vm_object *obj;
2878
2879	ASSERT_VOP_ELOCKED(vp, "vinactive");
2880	ASSERT_VI_LOCKED(vp, "vinactive");
2881	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2882	    ("vinactive: recursed on VI_DOINGINACT"));
2883	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2884	vp->v_iflag |= VI_DOINGINACT;
2885	vp->v_iflag &= ~VI_OWEINACT;
2886	VI_UNLOCK(vp);
2887	/*
2888	 * Before moving off the active list, we must be sure that any
2889	 * modified pages are converted into the vnode's dirty
2890	 * buffers, since these will no longer be checked once the
2891	 * vnode is on the inactive list.
2892	 *
2893	 * The write-out of the dirty pages is asynchronous.  At the
2894	 * point that VOP_INACTIVE() is called, there could still be
2895	 * pending I/O and dirty pages in the object.
2896	 */
2897	obj = vp->v_object;
2898	if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
2899		VM_OBJECT_WLOCK(obj);
2900		vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
2901		VM_OBJECT_WUNLOCK(obj);
2902	}
2903	VOP_INACTIVE(vp, td);
2904	VI_LOCK(vp);
2905	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2906	    ("vinactive: lost VI_DOINGINACT"));
2907	vp->v_iflag &= ~VI_DOINGINACT;
2908}
2909
2910/*
2911 * Remove any vnodes in the vnode table belonging to mount point mp.
2912 *
2913 * If FORCECLOSE is not specified, there should not be any active ones,
2914 * return error if any are found (nb: this is a user error, not a
2915 * system error). If FORCECLOSE is specified, detach any active vnodes
2916 * that are found.
2917 *
2918 * If WRITECLOSE is set, only flush out regular file vnodes open for
2919 * writing.
2920 *
2921 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2922 *
2923 * `rootrefs' specifies the base reference count for the root vnode
2924 * of this filesystem. The root vnode is considered busy if its
2925 * v_usecount exceeds this value. On a successful return, vflush(, td)
2926 * will call vrele() on the root vnode exactly rootrefs times.
2927 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2928 * be zero.
2929 */
2930#ifdef DIAGNOSTIC
2931static int busyprt = 0;		/* print out busy vnodes */
2932SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
2933#endif
2934
2935int
2936vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
2937{
2938	struct vnode *vp, *mvp, *rootvp = NULL;
2939	struct vattr vattr;
2940	int busy = 0, error;
2941
2942	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
2943	    rootrefs, flags);
2944	if (rootrefs > 0) {
2945		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2946		    ("vflush: bad args"));
2947		/*
2948		 * Get the filesystem root vnode. We can vput() it
2949		 * immediately, since with rootrefs > 0, it won't go away.
2950		 */
2951		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
2952			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
2953			    __func__, error);
2954			return (error);
2955		}
2956		vput(rootvp);
2957	}
2958loop:
2959	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
2960		vholdl(vp);
2961		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
2962		if (error) {
2963			vdrop(vp);
2964			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
2965			goto loop;
2966		}
2967		/*
2968		 * Skip over a vnodes marked VV_SYSTEM.
2969		 */
2970		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2971			VOP_UNLOCK(vp, 0);
2972			vdrop(vp);
2973			continue;
2974		}
2975		/*
2976		 * If WRITECLOSE is set, flush out unlinked but still open
2977		 * files (even if open only for reading) and regular file
2978		 * vnodes open for writing.
2979		 */
2980		if (flags & WRITECLOSE) {
2981			if (vp->v_object != NULL) {
2982				VM_OBJECT_WLOCK(vp->v_object);
2983				vm_object_page_clean(vp->v_object, 0, 0, 0);
2984				VM_OBJECT_WUNLOCK(vp->v_object);
2985			}
2986			error = VOP_FSYNC(vp, MNT_WAIT, td);
2987			if (error != 0) {
2988				VOP_UNLOCK(vp, 0);
2989				vdrop(vp);
2990				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
2991				return (error);
2992			}
2993			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
2994			VI_LOCK(vp);
2995
2996			if ((vp->v_type == VNON ||
2997			    (error == 0 && vattr.va_nlink > 0)) &&
2998			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2999				VOP_UNLOCK(vp, 0);
3000				vdropl(vp);
3001				continue;
3002			}
3003		} else
3004			VI_LOCK(vp);
3005		/*
3006		 * With v_usecount == 0, all we need to do is clear out the
3007		 * vnode data structures and we are done.
3008		 *
3009		 * If FORCECLOSE is set, forcibly close the vnode.
3010		 */
3011		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
3012			vgonel(vp);
3013		} else {
3014			busy++;
3015#ifdef DIAGNOSTIC
3016			if (busyprt)
3017				vprint("vflush: busy vnode", vp);
3018#endif
3019		}
3020		VOP_UNLOCK(vp, 0);
3021		vdropl(vp);
3022	}
3023	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
3024		/*
3025		 * If just the root vnode is busy, and if its refcount
3026		 * is equal to `rootrefs', then go ahead and kill it.
3027		 */
3028		VI_LOCK(rootvp);
3029		KASSERT(busy > 0, ("vflush: not busy"));
3030		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
3031		    ("vflush: usecount %d < rootrefs %d",
3032		     rootvp->v_usecount, rootrefs));
3033		if (busy == 1 && rootvp->v_usecount == rootrefs) {
3034			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
3035			vgone(rootvp);
3036			VOP_UNLOCK(rootvp, 0);
3037			busy = 0;
3038		} else
3039			VI_UNLOCK(rootvp);
3040	}
3041	if (busy) {
3042		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
3043		    busy);
3044		return (EBUSY);
3045	}
3046	for (; rootrefs > 0; rootrefs--)
3047		vrele(rootvp);
3048	return (0);
3049}
3050
3051/*
3052 * Recycle an unused vnode to the front of the free list.
3053 */
3054int
3055vrecycle(struct vnode *vp)
3056{
3057	int recycled;
3058
3059	ASSERT_VOP_ELOCKED(vp, "vrecycle");
3060	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3061	recycled = 0;
3062	VI_LOCK(vp);
3063	if (vp->v_usecount == 0) {
3064		recycled = 1;
3065		vgonel(vp);
3066	}
3067	VI_UNLOCK(vp);
3068	return (recycled);
3069}
3070
3071/*
3072 * Eliminate all activity associated with a vnode
3073 * in preparation for reuse.
3074 */
3075void
3076vgone(struct vnode *vp)
3077{
3078	VI_LOCK(vp);
3079	vgonel(vp);
3080	VI_UNLOCK(vp);
3081}
3082
3083static void
3084notify_lowervp_vfs_dummy(struct mount *mp __unused,
3085    struct vnode *lowervp __unused)
3086{
3087}
3088
3089/*
3090 * Notify upper mounts about reclaimed or unlinked vnode.
3091 */
3092void
3093vfs_notify_upper(struct vnode *vp, int event)
3094{
3095	static struct vfsops vgonel_vfsops = {
3096		.vfs_reclaim_lowervp = notify_lowervp_vfs_dummy,
3097		.vfs_unlink_lowervp = notify_lowervp_vfs_dummy,
3098	};
3099	struct mount *mp, *ump, *mmp;
3100
3101	mp = vp->v_mount;
3102	if (mp == NULL)
3103		return;
3104
3105	MNT_ILOCK(mp);
3106	if (TAILQ_EMPTY(&mp->mnt_uppers))
3107		goto unlock;
3108	MNT_IUNLOCK(mp);
3109	mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO);
3110	mmp->mnt_op = &vgonel_vfsops;
3111	mmp->mnt_kern_flag |= MNTK_MARKER;
3112	MNT_ILOCK(mp);
3113	mp->mnt_kern_flag |= MNTK_VGONE_UPPER;
3114	for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) {
3115		if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) {
3116			ump = TAILQ_NEXT(ump, mnt_upper_link);
3117			continue;
3118		}
3119		TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link);
3120		MNT_IUNLOCK(mp);
3121		switch (event) {
3122		case VFS_NOTIFY_UPPER_RECLAIM:
3123			VFS_RECLAIM_LOWERVP(ump, vp);
3124			break;
3125		case VFS_NOTIFY_UPPER_UNLINK:
3126			VFS_UNLINK_LOWERVP(ump, vp);
3127			break;
3128		default:
3129			KASSERT(0, ("invalid event %d", event));
3130			break;
3131		}
3132		MNT_ILOCK(mp);
3133		ump = TAILQ_NEXT(mmp, mnt_upper_link);
3134		TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link);
3135	}
3136	free(mmp, M_TEMP);
3137	mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER;
3138	if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) {
3139		mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER;
3140		wakeup(&mp->mnt_uppers);
3141	}
3142unlock:
3143	MNT_IUNLOCK(mp);
3144}
3145
3146/*
3147 * vgone, with the vp interlock held.
3148 */
3149static void
3150vgonel(struct vnode *vp)
3151{
3152	struct thread *td;
3153	int oweinact;
3154	int active;
3155	struct mount *mp;
3156
3157	ASSERT_VOP_ELOCKED(vp, "vgonel");
3158	ASSERT_VI_LOCKED(vp, "vgonel");
3159	VNASSERT(vp->v_holdcnt, vp,
3160	    ("vgonel: vp %p has no reference.", vp));
3161	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3162	td = curthread;
3163
3164	/*
3165	 * Don't vgonel if we're already doomed.
3166	 */
3167	if (vp->v_iflag & VI_DOOMED)
3168		return;
3169	vp->v_iflag |= VI_DOOMED;
3170
3171	/*
3172	 * Check to see if the vnode is in use.  If so, we have to call
3173	 * VOP_CLOSE() and VOP_INACTIVE().
3174	 */
3175	active = vp->v_usecount;
3176	oweinact = (vp->v_iflag & VI_OWEINACT);
3177	VI_UNLOCK(vp);
3178	vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
3179
3180	/*
3181	 * If purging an active vnode, it must be closed and
3182	 * deactivated before being reclaimed.
3183	 */
3184	if (active)
3185		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
3186	if (oweinact || active) {
3187		VI_LOCK(vp);
3188		if ((vp->v_iflag & VI_DOINGINACT) == 0)
3189			vinactive(vp, td);
3190		VI_UNLOCK(vp);
3191	}
3192	if (vp->v_type == VSOCK)
3193		vfs_unp_reclaim(vp);
3194
3195	/*
3196	 * Clean out any buffers associated with the vnode.
3197	 * If the flush fails, just toss the buffers.
3198	 */
3199	mp = NULL;
3200	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
3201		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
3202	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
3203		while (vinvalbuf(vp, 0, 0, 0) != 0)
3204			;
3205	}
3206
3207	BO_LOCK(&vp->v_bufobj);
3208	KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
3209	    vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
3210	    TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
3211	    vp->v_bufobj.bo_clean.bv_cnt == 0,
3212	    ("vp %p bufobj not invalidated", vp));
3213	vp->v_bufobj.bo_flag |= BO_DEAD;
3214	BO_UNLOCK(&vp->v_bufobj);
3215
3216	/*
3217	 * Reclaim the vnode.
3218	 */
3219	if (VOP_RECLAIM(vp, td))
3220		panic("vgone: cannot reclaim");
3221	if (mp != NULL)
3222		vn_finished_secondary_write(mp);
3223	VNASSERT(vp->v_object == NULL, vp,
3224	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
3225	/*
3226	 * Clear the advisory locks and wake up waiting threads.
3227	 */
3228	(void)VOP_ADVLOCKPURGE(vp);
3229	vp->v_lockf = NULL;
3230	/*
3231	 * Delete from old mount point vnode list.
3232	 */
3233	delmntque(vp);
3234	cache_purge(vp);
3235	/*
3236	 * Done with purge, reset to the standard lock and invalidate
3237	 * the vnode.
3238	 */
3239	VI_LOCK(vp);
3240	vp->v_vnlock = &vp->v_lock;
3241	vp->v_op = &dead_vnodeops;
3242	vp->v_tag = "none";
3243	vp->v_type = VBAD;
3244}
3245
3246/*
3247 * Calculate the total number of references to a special device.
3248 */
3249int
3250vcount(struct vnode *vp)
3251{
3252	int count;
3253
3254	dev_lock();
3255	count = vp->v_rdev->si_usecount;
3256	dev_unlock();
3257	return (count);
3258}
3259
3260/*
3261 * Same as above, but using the struct cdev *as argument
3262 */
3263int
3264count_dev(struct cdev *dev)
3265{
3266	int count;
3267
3268	dev_lock();
3269	count = dev->si_usecount;
3270	dev_unlock();
3271	return(count);
3272}
3273
3274/*
3275 * Print out a description of a vnode.
3276 */
3277static char *typename[] =
3278{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
3279 "VMARKER"};
3280
3281void
3282vn_printf(struct vnode *vp, const char *fmt, ...)
3283{
3284	va_list ap;
3285	char buf[256], buf2[16];
3286	u_long flags;
3287
3288	va_start(ap, fmt);
3289	vprintf(fmt, ap);
3290	va_end(ap);
3291	printf("%p: ", (void *)vp);
3292	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
3293	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
3294	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
3295	buf[0] = '\0';
3296	buf[1] = '\0';
3297	if (vp->v_vflag & VV_ROOT)
3298		strlcat(buf, "|VV_ROOT", sizeof(buf));
3299	if (vp->v_vflag & VV_ISTTY)
3300		strlcat(buf, "|VV_ISTTY", sizeof(buf));
3301	if (vp->v_vflag & VV_NOSYNC)
3302		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
3303	if (vp->v_vflag & VV_ETERNALDEV)
3304		strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
3305	if (vp->v_vflag & VV_CACHEDLABEL)
3306		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
3307	if (vp->v_vflag & VV_TEXT)
3308		strlcat(buf, "|VV_TEXT", sizeof(buf));
3309	if (vp->v_vflag & VV_COPYONWRITE)
3310		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
3311	if (vp->v_vflag & VV_SYSTEM)
3312		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
3313	if (vp->v_vflag & VV_PROCDEP)
3314		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
3315	if (vp->v_vflag & VV_NOKNOTE)
3316		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
3317	if (vp->v_vflag & VV_DELETED)
3318		strlcat(buf, "|VV_DELETED", sizeof(buf));
3319	if (vp->v_vflag & VV_MD)
3320		strlcat(buf, "|VV_MD", sizeof(buf));
3321	if (vp->v_vflag & VV_FORCEINSMQ)
3322		strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
3323	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
3324	    VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
3325	    VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ);
3326	if (flags != 0) {
3327		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
3328		strlcat(buf, buf2, sizeof(buf));
3329	}
3330	if (vp->v_iflag & VI_MOUNT)
3331		strlcat(buf, "|VI_MOUNT", sizeof(buf));
3332	if (vp->v_iflag & VI_DOOMED)
3333		strlcat(buf, "|VI_DOOMED", sizeof(buf));
3334	if (vp->v_iflag & VI_FREE)
3335		strlcat(buf, "|VI_FREE", sizeof(buf));
3336	if (vp->v_iflag & VI_ACTIVE)
3337		strlcat(buf, "|VI_ACTIVE", sizeof(buf));
3338	if (vp->v_iflag & VI_DOINGINACT)
3339		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
3340	if (vp->v_iflag & VI_OWEINACT)
3341		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
3342	flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE |
3343	    VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT);
3344	if (flags != 0) {
3345		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
3346		strlcat(buf, buf2, sizeof(buf));
3347	}
3348	printf("    flags (%s)\n", buf + 1);
3349	if (mtx_owned(VI_MTX(vp)))
3350		printf(" VI_LOCKed");
3351	if (vp->v_object != NULL)
3352		printf("    v_object %p ref %d pages %d "
3353		    "cleanbuf %d dirtybuf %d\n",
3354		    vp->v_object, vp->v_object->ref_count,
3355		    vp->v_object->resident_page_count,
3356		    vp->v_bufobj.bo_clean.bv_cnt,
3357		    vp->v_bufobj.bo_dirty.bv_cnt);
3358	printf("    ");
3359	lockmgr_printinfo(vp->v_vnlock);
3360	if (vp->v_data != NULL)
3361		VOP_PRINT(vp);
3362}
3363
3364#ifdef DDB
3365/*
3366 * List all of the locked vnodes in the system.
3367 * Called when debugging the kernel.
3368 */
3369DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
3370{
3371	struct mount *mp;
3372	struct vnode *vp;
3373
3374	/*
3375	 * Note: because this is DDB, we can't obey the locking semantics
3376	 * for these structures, which means we could catch an inconsistent
3377	 * state and dereference a nasty pointer.  Not much to be done
3378	 * about that.
3379	 */
3380	db_printf("Locked vnodes\n");
3381	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3382		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3383			if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
3384				vprint("", vp);
3385		}
3386	}
3387}
3388
3389/*
3390 * Show details about the given vnode.
3391 */
3392DB_SHOW_COMMAND(vnode, db_show_vnode)
3393{
3394	struct vnode *vp;
3395
3396	if (!have_addr)
3397		return;
3398	vp = (struct vnode *)addr;
3399	vn_printf(vp, "vnode ");
3400}
3401
3402/*
3403 * Show details about the given mount point.
3404 */
3405DB_SHOW_COMMAND(mount, db_show_mount)
3406{
3407	struct mount *mp;
3408	struct vfsopt *opt;
3409	struct statfs *sp;
3410	struct vnode *vp;
3411	char buf[512];
3412	uint64_t mflags;
3413	u_int flags;
3414
3415	if (!have_addr) {
3416		/* No address given, print short info about all mount points. */
3417		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3418			db_printf("%p %s on %s (%s)\n", mp,
3419			    mp->mnt_stat.f_mntfromname,
3420			    mp->mnt_stat.f_mntonname,
3421			    mp->mnt_stat.f_fstypename);
3422			if (db_pager_quit)
3423				break;
3424		}
3425		db_printf("\nMore info: show mount <addr>\n");
3426		return;
3427	}
3428
3429	mp = (struct mount *)addr;
3430	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
3431	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
3432
3433	buf[0] = '\0';
3434	mflags = mp->mnt_flag;
3435#define	MNT_FLAG(flag)	do {						\
3436	if (mflags & (flag)) {						\
3437		if (buf[0] != '\0')					\
3438			strlcat(buf, ", ", sizeof(buf));		\
3439		strlcat(buf, (#flag) + 4, sizeof(buf));			\
3440		mflags &= ~(flag);					\
3441	}								\
3442} while (0)
3443	MNT_FLAG(MNT_RDONLY);
3444	MNT_FLAG(MNT_SYNCHRONOUS);
3445	MNT_FLAG(MNT_NOEXEC);
3446	MNT_FLAG(MNT_NOSUID);
3447	MNT_FLAG(MNT_NFS4ACLS);
3448	MNT_FLAG(MNT_UNION);
3449	MNT_FLAG(MNT_ASYNC);
3450	MNT_FLAG(MNT_SUIDDIR);
3451	MNT_FLAG(MNT_SOFTDEP);
3452	MNT_FLAG(MNT_NOSYMFOLLOW);
3453	MNT_FLAG(MNT_GJOURNAL);
3454	MNT_FLAG(MNT_MULTILABEL);
3455	MNT_FLAG(MNT_ACLS);
3456	MNT_FLAG(MNT_NOATIME);
3457	MNT_FLAG(MNT_NOCLUSTERR);
3458	MNT_FLAG(MNT_NOCLUSTERW);
3459	MNT_FLAG(MNT_SUJ);
3460	MNT_FLAG(MNT_EXRDONLY);
3461	MNT_FLAG(MNT_EXPORTED);
3462	MNT_FLAG(MNT_DEFEXPORTED);
3463	MNT_FLAG(MNT_EXPORTANON);
3464	MNT_FLAG(MNT_EXKERB);
3465	MNT_FLAG(MNT_EXPUBLIC);
3466	MNT_FLAG(MNT_LOCAL);
3467	MNT_FLAG(MNT_QUOTA);
3468	MNT_FLAG(MNT_ROOTFS);
3469	MNT_FLAG(MNT_USER);
3470	MNT_FLAG(MNT_IGNORE);
3471	MNT_FLAG(MNT_UPDATE);
3472	MNT_FLAG(MNT_DELEXPORT);
3473	MNT_FLAG(MNT_RELOAD);
3474	MNT_FLAG(MNT_FORCE);
3475	MNT_FLAG(MNT_SNAPSHOT);
3476	MNT_FLAG(MNT_BYFSID);
3477#undef MNT_FLAG
3478	if (mflags != 0) {
3479		if (buf[0] != '\0')
3480			strlcat(buf, ", ", sizeof(buf));
3481		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3482		    "0x%016jx", mflags);
3483	}
3484	db_printf("    mnt_flag = %s\n", buf);
3485
3486	buf[0] = '\0';
3487	flags = mp->mnt_kern_flag;
3488#define	MNT_KERN_FLAG(flag)	do {					\
3489	if (flags & (flag)) {						\
3490		if (buf[0] != '\0')					\
3491			strlcat(buf, ", ", sizeof(buf));		\
3492		strlcat(buf, (#flag) + 5, sizeof(buf));			\
3493		flags &= ~(flag);					\
3494	}								\
3495} while (0)
3496	MNT_KERN_FLAG(MNTK_UNMOUNTF);
3497	MNT_KERN_FLAG(MNTK_ASYNC);
3498	MNT_KERN_FLAG(MNTK_SOFTDEP);
3499	MNT_KERN_FLAG(MNTK_NOINSMNTQ);
3500	MNT_KERN_FLAG(MNTK_DRAINING);
3501	MNT_KERN_FLAG(MNTK_REFEXPIRE);
3502	MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
3503	MNT_KERN_FLAG(MNTK_SHARED_WRITES);
3504	MNT_KERN_FLAG(MNTK_NO_IOPF);
3505	MNT_KERN_FLAG(MNTK_VGONE_UPPER);
3506	MNT_KERN_FLAG(MNTK_VGONE_WAITER);
3507	MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT);
3508	MNT_KERN_FLAG(MNTK_MARKER);
3509	MNT_KERN_FLAG(MNTK_USES_BCACHE);
3510	MNT_KERN_FLAG(MNTK_NOASYNC);
3511	MNT_KERN_FLAG(MNTK_UNMOUNT);
3512	MNT_KERN_FLAG(MNTK_MWAIT);
3513	MNT_KERN_FLAG(MNTK_SUSPEND);
3514	MNT_KERN_FLAG(MNTK_SUSPEND2);
3515	MNT_KERN_FLAG(MNTK_SUSPENDED);
3516	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
3517	MNT_KERN_FLAG(MNTK_NOKNOTE);
3518#undef MNT_KERN_FLAG
3519	if (flags != 0) {
3520		if (buf[0] != '\0')
3521			strlcat(buf, ", ", sizeof(buf));
3522		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3523		    "0x%08x", flags);
3524	}
3525	db_printf("    mnt_kern_flag = %s\n", buf);
3526
3527	db_printf("    mnt_opt = ");
3528	opt = TAILQ_FIRST(mp->mnt_opt);
3529	if (opt != NULL) {
3530		db_printf("%s", opt->name);
3531		opt = TAILQ_NEXT(opt, link);
3532		while (opt != NULL) {
3533			db_printf(", %s", opt->name);
3534			opt = TAILQ_NEXT(opt, link);
3535		}
3536	}
3537	db_printf("\n");
3538
3539	sp = &mp->mnt_stat;
3540	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
3541	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
3542	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
3543	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
3544	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
3545	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
3546	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
3547	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
3548	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
3549	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
3550	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
3551	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
3552
3553	db_printf("    mnt_cred = { uid=%u ruid=%u",
3554	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
3555	if (jailed(mp->mnt_cred))
3556		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
3557	db_printf(" }\n");
3558	db_printf("    mnt_ref = %d\n", mp->mnt_ref);
3559	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
3560	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
3561	db_printf("    mnt_activevnodelistsize = %d\n",
3562	    mp->mnt_activevnodelistsize);
3563	db_printf("    mnt_writeopcount = %d\n", mp->mnt_writeopcount);
3564	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
3565	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
3566	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
3567	db_printf("    mnt_lockref = %d\n", mp->mnt_lockref);
3568	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
3569	db_printf("    mnt_secondary_accwrites = %d\n",
3570	    mp->mnt_secondary_accwrites);
3571	db_printf("    mnt_gjprovider = %s\n",
3572	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
3573
3574	db_printf("\n\nList of active vnodes\n");
3575	TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) {
3576		if (vp->v_type != VMARKER) {
3577			vn_printf(vp, "vnode ");
3578			if (db_pager_quit)
3579				break;
3580		}
3581	}
3582	db_printf("\n\nList of inactive vnodes\n");
3583	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3584		if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) {
3585			vn_printf(vp, "vnode ");
3586			if (db_pager_quit)
3587				break;
3588		}
3589	}
3590}
3591#endif	/* DDB */
3592
3593/*
3594 * Fill in a struct xvfsconf based on a struct vfsconf.
3595 */
3596static int
3597vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
3598{
3599	struct xvfsconf xvfsp;
3600
3601	bzero(&xvfsp, sizeof(xvfsp));
3602	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3603	xvfsp.vfc_typenum = vfsp->vfc_typenum;
3604	xvfsp.vfc_refcount = vfsp->vfc_refcount;
3605	xvfsp.vfc_flags = vfsp->vfc_flags;
3606	/*
3607	 * These are unused in userland, we keep them
3608	 * to not break binary compatibility.
3609	 */
3610	xvfsp.vfc_vfsops = NULL;
3611	xvfsp.vfc_next = NULL;
3612	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3613}
3614
3615#ifdef COMPAT_FREEBSD32
3616struct xvfsconf32 {
3617	uint32_t	vfc_vfsops;
3618	char		vfc_name[MFSNAMELEN];
3619	int32_t		vfc_typenum;
3620	int32_t		vfc_refcount;
3621	int32_t		vfc_flags;
3622	uint32_t	vfc_next;
3623};
3624
3625static int
3626vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
3627{
3628	struct xvfsconf32 xvfsp;
3629
3630	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3631	xvfsp.vfc_typenum = vfsp->vfc_typenum;
3632	xvfsp.vfc_refcount = vfsp->vfc_refcount;
3633	xvfsp.vfc_flags = vfsp->vfc_flags;
3634	xvfsp.vfc_vfsops = 0;
3635	xvfsp.vfc_next = 0;
3636	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3637}
3638#endif
3639
3640/*
3641 * Top level filesystem related information gathering.
3642 */
3643static int
3644sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
3645{
3646	struct vfsconf *vfsp;
3647	int error;
3648
3649	error = 0;
3650	vfsconf_slock();
3651	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3652#ifdef COMPAT_FREEBSD32
3653		if (req->flags & SCTL_MASK32)
3654			error = vfsconf2x32(req, vfsp);
3655		else
3656#endif
3657			error = vfsconf2x(req, vfsp);
3658		if (error)
3659			break;
3660	}
3661	vfsconf_sunlock();
3662	return (error);
3663}
3664
3665SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD |
3666    CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist,
3667    "S,xvfsconf", "List of all configured filesystems");
3668
3669#ifndef BURN_BRIDGES
3670static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
3671
3672static int
3673vfs_sysctl(SYSCTL_HANDLER_ARGS)
3674{
3675	int *name = (int *)arg1 - 1;	/* XXX */
3676	u_int namelen = arg2 + 1;	/* XXX */
3677	struct vfsconf *vfsp;
3678
3679	log(LOG_WARNING, "userland calling deprecated sysctl, "
3680	    "please rebuild world\n");
3681
3682#if 1 || defined(COMPAT_PRELITE2)
3683	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3684	if (namelen == 1)
3685		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3686#endif
3687
3688	switch (name[1]) {
3689	case VFS_MAXTYPENUM:
3690		if (namelen != 2)
3691			return (ENOTDIR);
3692		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3693	case VFS_CONF:
3694		if (namelen != 3)
3695			return (ENOTDIR);	/* overloaded */
3696		vfsconf_slock();
3697		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3698			if (vfsp->vfc_typenum == name[2])
3699				break;
3700		}
3701		vfsconf_sunlock();
3702		if (vfsp == NULL)
3703			return (EOPNOTSUPP);
3704#ifdef COMPAT_FREEBSD32
3705		if (req->flags & SCTL_MASK32)
3706			return (vfsconf2x32(req, vfsp));
3707		else
3708#endif
3709			return (vfsconf2x(req, vfsp));
3710	}
3711	return (EOPNOTSUPP);
3712}
3713
3714static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP |
3715    CTLFLAG_MPSAFE, vfs_sysctl,
3716    "Generic filesystem");
3717
3718#if 1 || defined(COMPAT_PRELITE2)
3719
3720static int
3721sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3722{
3723	int error;
3724	struct vfsconf *vfsp;
3725	struct ovfsconf ovfs;
3726
3727	vfsconf_slock();
3728	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3729		bzero(&ovfs, sizeof(ovfs));
3730		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3731		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3732		ovfs.vfc_index = vfsp->vfc_typenum;
3733		ovfs.vfc_refcount = vfsp->vfc_refcount;
3734		ovfs.vfc_flags = vfsp->vfc_flags;
3735		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3736		if (error != 0) {
3737			vfsconf_sunlock();
3738			return (error);
3739		}
3740	}
3741	vfsconf_sunlock();
3742	return (0);
3743}
3744
3745#endif /* 1 || COMPAT_PRELITE2 */
3746#endif /* !BURN_BRIDGES */
3747
3748#define KINFO_VNODESLOP		10
3749#ifdef notyet
3750/*
3751 * Dump vnode list (via sysctl).
3752 */
3753/* ARGSUSED */
3754static int
3755sysctl_vnode(SYSCTL_HANDLER_ARGS)
3756{
3757	struct xvnode *xvn;
3758	struct mount *mp;
3759	struct vnode *vp;
3760	int error, len, n;
3761
3762	/*
3763	 * Stale numvnodes access is not fatal here.
3764	 */
3765	req->lock = 0;
3766	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3767	if (!req->oldptr)
3768		/* Make an estimate */
3769		return (SYSCTL_OUT(req, 0, len));
3770
3771	error = sysctl_wire_old_buffer(req, 0);
3772	if (error != 0)
3773		return (error);
3774	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3775	n = 0;
3776	mtx_lock(&mountlist_mtx);
3777	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3778		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3779			continue;
3780		MNT_ILOCK(mp);
3781		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3782			if (n == len)
3783				break;
3784			vref(vp);
3785			xvn[n].xv_size = sizeof *xvn;
3786			xvn[n].xv_vnode = vp;
3787			xvn[n].xv_id = 0;	/* XXX compat */
3788#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3789			XV_COPY(usecount);
3790			XV_COPY(writecount);
3791			XV_COPY(holdcnt);
3792			XV_COPY(mount);
3793			XV_COPY(numoutput);
3794			XV_COPY(type);
3795#undef XV_COPY
3796			xvn[n].xv_flag = vp->v_vflag;
3797
3798			switch (vp->v_type) {
3799			case VREG:
3800			case VDIR:
3801			case VLNK:
3802				break;
3803			case VBLK:
3804			case VCHR:
3805				if (vp->v_rdev == NULL) {
3806					vrele(vp);
3807					continue;
3808				}
3809				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3810				break;
3811			case VSOCK:
3812				xvn[n].xv_socket = vp->v_socket;
3813				break;
3814			case VFIFO:
3815				xvn[n].xv_fifo = vp->v_fifoinfo;
3816				break;
3817			case VNON:
3818			case VBAD:
3819			default:
3820				/* shouldn't happen? */
3821				vrele(vp);
3822				continue;
3823			}
3824			vrele(vp);
3825			++n;
3826		}
3827		MNT_IUNLOCK(mp);
3828		mtx_lock(&mountlist_mtx);
3829		vfs_unbusy(mp);
3830		if (n == len)
3831			break;
3832	}
3833	mtx_unlock(&mountlist_mtx);
3834
3835	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3836	free(xvn, M_TEMP);
3837	return (error);
3838}
3839
3840SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD |
3841    CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode",
3842    "");
3843#endif
3844
3845static void
3846unmount_or_warn(struct mount *mp)
3847{
3848	int error;
3849
3850	error = dounmount(mp, MNT_FORCE, curthread);
3851	if (error != 0) {
3852		printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
3853		if (error == EBUSY)
3854			printf("BUSY)\n");
3855		else
3856			printf("%d)\n", error);
3857	}
3858}
3859
3860/*
3861 * Unmount all filesystems. The list is traversed in reverse order
3862 * of mounting to avoid dependencies.
3863 */
3864void
3865vfs_unmountall(void)
3866{
3867	struct mount *mp, *tmp;
3868
3869	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
3870
3871	/*
3872	 * Since this only runs when rebooting, it is not interlocked.
3873	 */
3874	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) {
3875		vfs_ref(mp);
3876
3877		/*
3878		 * Forcibly unmounting "/dev" before "/" would prevent clean
3879		 * unmount of the latter.
3880		 */
3881		if (mp == rootdevmp)
3882			continue;
3883
3884		unmount_or_warn(mp);
3885	}
3886
3887	if (rootdevmp != NULL)
3888		unmount_or_warn(rootdevmp);
3889}
3890
3891/*
3892 * perform msync on all vnodes under a mount point
3893 * the mount point must be locked.
3894 */
3895void
3896vfs_msync(struct mount *mp, int flags)
3897{
3898	struct vnode *vp, *mvp;
3899	struct vm_object *obj;
3900
3901	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
3902	MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
3903		obj = vp->v_object;
3904		if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
3905		    (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
3906			if (!vget(vp,
3907			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3908			    curthread)) {
3909				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3910					vput(vp);
3911					continue;
3912				}
3913
3914				obj = vp->v_object;
3915				if (obj != NULL) {
3916					VM_OBJECT_WLOCK(obj);
3917					vm_object_page_clean(obj, 0, 0,
3918					    flags == MNT_WAIT ?
3919					    OBJPC_SYNC : OBJPC_NOSYNC);
3920					VM_OBJECT_WUNLOCK(obj);
3921				}
3922				vput(vp);
3923			}
3924		} else
3925			VI_UNLOCK(vp);
3926	}
3927}
3928
3929static void
3930destroy_vpollinfo_free(struct vpollinfo *vi)
3931{
3932
3933	knlist_destroy(&vi->vpi_selinfo.si_note);
3934	mtx_destroy(&vi->vpi_lock);
3935	uma_zfree(vnodepoll_zone, vi);
3936}
3937
3938static void
3939destroy_vpollinfo(struct vpollinfo *vi)
3940{
3941
3942	knlist_clear(&vi->vpi_selinfo.si_note, 1);
3943	seldrain(&vi->vpi_selinfo);
3944	destroy_vpollinfo_free(vi);
3945}
3946
3947/*
3948 * Initialize per-vnode helper structure to hold poll-related state.
3949 */
3950void
3951v_addpollinfo(struct vnode *vp)
3952{
3953	struct vpollinfo *vi;
3954
3955	if (vp->v_pollinfo != NULL)
3956		return;
3957	vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO);
3958	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3959	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
3960	    vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked);
3961	VI_LOCK(vp);
3962	if (vp->v_pollinfo != NULL) {
3963		VI_UNLOCK(vp);
3964		destroy_vpollinfo_free(vi);
3965		return;
3966	}
3967	vp->v_pollinfo = vi;
3968	VI_UNLOCK(vp);
3969}
3970
3971/*
3972 * Record a process's interest in events which might happen to
3973 * a vnode.  Because poll uses the historic select-style interface
3974 * internally, this routine serves as both the ``check for any
3975 * pending events'' and the ``record my interest in future events''
3976 * functions.  (These are done together, while the lock is held,
3977 * to avoid race conditions.)
3978 */
3979int
3980vn_pollrecord(struct vnode *vp, struct thread *td, int events)
3981{
3982
3983	v_addpollinfo(vp);
3984	mtx_lock(&vp->v_pollinfo->vpi_lock);
3985	if (vp->v_pollinfo->vpi_revents & events) {
3986		/*
3987		 * This leaves events we are not interested
3988		 * in available for the other process which
3989		 * which presumably had requested them
3990		 * (otherwise they would never have been
3991		 * recorded).
3992		 */
3993		events &= vp->v_pollinfo->vpi_revents;
3994		vp->v_pollinfo->vpi_revents &= ~events;
3995
3996		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3997		return (events);
3998	}
3999	vp->v_pollinfo->vpi_events |= events;
4000	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
4001	mtx_unlock(&vp->v_pollinfo->vpi_lock);
4002	return (0);
4003}
4004
4005/*
4006 * Routine to create and manage a filesystem syncer vnode.
4007 */
4008#define sync_close ((int (*)(struct  vop_close_args *))nullop)
4009static int	sync_fsync(struct  vop_fsync_args *);
4010static int	sync_inactive(struct  vop_inactive_args *);
4011static int	sync_reclaim(struct  vop_reclaim_args *);
4012
4013static struct vop_vector sync_vnodeops = {
4014	.vop_bypass =	VOP_EOPNOTSUPP,
4015	.vop_close =	sync_close,		/* close */
4016	.vop_fsync =	sync_fsync,		/* fsync */
4017	.vop_inactive =	sync_inactive,	/* inactive */
4018	.vop_reclaim =	sync_reclaim,	/* reclaim */
4019	.vop_lock1 =	vop_stdlock,	/* lock */
4020	.vop_unlock =	vop_stdunlock,	/* unlock */
4021	.vop_islocked =	vop_stdislocked,	/* islocked */
4022};
4023
4024/*
4025 * Create a new filesystem syncer vnode for the specified mount point.
4026 */
4027void
4028vfs_allocate_syncvnode(struct mount *mp)
4029{
4030	struct vnode *vp;
4031	struct bufobj *bo;
4032	static long start, incr, next;
4033	int error;
4034
4035	/* Allocate a new vnode */
4036	error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
4037	if (error != 0)
4038		panic("vfs_allocate_syncvnode: getnewvnode() failed");
4039	vp->v_type = VNON;
4040	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4041	vp->v_vflag |= VV_FORCEINSMQ;
4042	error = insmntque(vp, mp);
4043	if (error != 0)
4044		panic("vfs_allocate_syncvnode: insmntque() failed");
4045	vp->v_vflag &= ~VV_FORCEINSMQ;
4046	VOP_UNLOCK(vp, 0);
4047	/*
4048	 * Place the vnode onto the syncer worklist. We attempt to
4049	 * scatter them about on the list so that they will go off
4050	 * at evenly distributed times even if all the filesystems
4051	 * are mounted at once.
4052	 */
4053	next += incr;
4054	if (next == 0 || next > syncer_maxdelay) {
4055		start /= 2;
4056		incr /= 2;
4057		if (start == 0) {
4058			start = syncer_maxdelay / 2;
4059			incr = syncer_maxdelay;
4060		}
4061		next = start;
4062	}
4063	bo = &vp->v_bufobj;
4064	BO_LOCK(bo);
4065	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
4066	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
4067	mtx_lock(&sync_mtx);
4068	sync_vnode_count++;
4069	if (mp->mnt_syncer == NULL) {
4070		mp->mnt_syncer = vp;
4071		vp = NULL;
4072	}
4073	mtx_unlock(&sync_mtx);
4074	BO_UNLOCK(bo);
4075	if (vp != NULL) {
4076		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4077		vgone(vp);
4078		vput(vp);
4079	}
4080}
4081
4082void
4083vfs_deallocate_syncvnode(struct mount *mp)
4084{
4085	struct vnode *vp;
4086
4087	mtx_lock(&sync_mtx);
4088	vp = mp->mnt_syncer;
4089	if (vp != NULL)
4090		mp->mnt_syncer = NULL;
4091	mtx_unlock(&sync_mtx);
4092	if (vp != NULL)
4093		vrele(vp);
4094}
4095
4096/*
4097 * Do a lazy sync of the filesystem.
4098 */
4099static int
4100sync_fsync(struct vop_fsync_args *ap)
4101{
4102	struct vnode *syncvp = ap->a_vp;
4103	struct mount *mp = syncvp->v_mount;
4104	int error, save;
4105	struct bufobj *bo;
4106
4107	/*
4108	 * We only need to do something if this is a lazy evaluation.
4109	 */
4110	if (ap->a_waitfor != MNT_LAZY)
4111		return (0);
4112
4113	/*
4114	 * Move ourselves to the back of the sync list.
4115	 */
4116	bo = &syncvp->v_bufobj;
4117	BO_LOCK(bo);
4118	vn_syncer_add_to_worklist(bo, syncdelay);
4119	BO_UNLOCK(bo);
4120
4121	/*
4122	 * Walk the list of vnodes pushing all that are dirty and
4123	 * not already on the sync list.
4124	 */
4125	if (vfs_busy(mp, MBF_NOWAIT) != 0)
4126		return (0);
4127	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
4128		vfs_unbusy(mp);
4129		return (0);
4130	}
4131	save = curthread_pflags_set(TDP_SYNCIO);
4132	vfs_msync(mp, MNT_NOWAIT);
4133	error = VFS_SYNC(mp, MNT_LAZY);
4134	curthread_pflags_restore(save);
4135	vn_finished_write(mp);
4136	vfs_unbusy(mp);
4137	return (error);
4138}
4139
4140/*
4141 * The syncer vnode is no referenced.
4142 */
4143static int
4144sync_inactive(struct vop_inactive_args *ap)
4145{
4146
4147	vgone(ap->a_vp);
4148	return (0);
4149}
4150
4151/*
4152 * The syncer vnode is no longer needed and is being decommissioned.
4153 *
4154 * Modifications to the worklist must be protected by sync_mtx.
4155 */
4156static int
4157sync_reclaim(struct vop_reclaim_args *ap)
4158{
4159	struct vnode *vp = ap->a_vp;
4160	struct bufobj *bo;
4161
4162	bo = &vp->v_bufobj;
4163	BO_LOCK(bo);
4164	mtx_lock(&sync_mtx);
4165	if (vp->v_mount->mnt_syncer == vp)
4166		vp->v_mount->mnt_syncer = NULL;
4167	if (bo->bo_flag & BO_ONWORKLST) {
4168		LIST_REMOVE(bo, bo_synclist);
4169		syncer_worklist_len--;
4170		sync_vnode_count--;
4171		bo->bo_flag &= ~BO_ONWORKLST;
4172	}
4173	mtx_unlock(&sync_mtx);
4174	BO_UNLOCK(bo);
4175
4176	return (0);
4177}
4178
4179/*
4180 * Check if vnode represents a disk device
4181 */
4182int
4183vn_isdisk(struct vnode *vp, int *errp)
4184{
4185	int error;
4186
4187	if (vp->v_type != VCHR) {
4188		error = ENOTBLK;
4189		goto out;
4190	}
4191	error = 0;
4192	dev_lock();
4193	if (vp->v_rdev == NULL)
4194		error = ENXIO;
4195	else if (vp->v_rdev->si_devsw == NULL)
4196		error = ENXIO;
4197	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
4198		error = ENOTBLK;
4199	dev_unlock();
4200out:
4201	if (errp != NULL)
4202		*errp = error;
4203	return (error == 0);
4204}
4205
4206/*
4207 * Common filesystem object access control check routine.  Accepts a
4208 * vnode's type, "mode", uid and gid, requested access mode, credentials,
4209 * and optional call-by-reference privused argument allowing vaccess()
4210 * to indicate to the caller whether privilege was used to satisfy the
4211 * request (obsoleted).  Returns 0 on success, or an errno on failure.
4212 */
4213int
4214vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
4215    accmode_t accmode, struct ucred *cred, int *privused)
4216{
4217	accmode_t dac_granted;
4218	accmode_t priv_granted;
4219
4220	KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
4221	    ("invalid bit in accmode"));
4222	KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
4223	    ("VAPPEND without VWRITE"));
4224
4225	/*
4226	 * Look for a normal, non-privileged way to access the file/directory
4227	 * as requested.  If it exists, go with that.
4228	 */
4229
4230	if (privused != NULL)
4231		*privused = 0;
4232
4233	dac_granted = 0;
4234
4235	/* Check the owner. */
4236	if (cred->cr_uid == file_uid) {
4237		dac_granted |= VADMIN;
4238		if (file_mode & S_IXUSR)
4239			dac_granted |= VEXEC;
4240		if (file_mode & S_IRUSR)
4241			dac_granted |= VREAD;
4242		if (file_mode & S_IWUSR)
4243			dac_granted |= (VWRITE | VAPPEND);
4244
4245		if ((accmode & dac_granted) == accmode)
4246			return (0);
4247
4248		goto privcheck;
4249	}
4250
4251	/* Otherwise, check the groups (first match) */
4252	if (groupmember(file_gid, cred)) {
4253		if (file_mode & S_IXGRP)
4254			dac_granted |= VEXEC;
4255		if (file_mode & S_IRGRP)
4256			dac_granted |= VREAD;
4257		if (file_mode & S_IWGRP)
4258			dac_granted |= (VWRITE | VAPPEND);
4259
4260		if ((accmode & dac_granted) == accmode)
4261			return (0);
4262
4263		goto privcheck;
4264	}
4265
4266	/* Otherwise, check everyone else. */
4267	if (file_mode & S_IXOTH)
4268		dac_granted |= VEXEC;
4269	if (file_mode & S_IROTH)
4270		dac_granted |= VREAD;
4271	if (file_mode & S_IWOTH)
4272		dac_granted |= (VWRITE | VAPPEND);
4273	if ((accmode & dac_granted) == accmode)
4274		return (0);
4275
4276privcheck:
4277	/*
4278	 * Build a privilege mask to determine if the set of privileges
4279	 * satisfies the requirements when combined with the granted mask
4280	 * from above.  For each privilege, if the privilege is required,
4281	 * bitwise or the request type onto the priv_granted mask.
4282	 */
4283	priv_granted = 0;
4284
4285	if (type == VDIR) {
4286		/*
4287		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
4288		 * requests, instead of PRIV_VFS_EXEC.
4289		 */
4290		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
4291		    !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
4292			priv_granted |= VEXEC;
4293	} else {
4294		/*
4295		 * Ensure that at least one execute bit is on. Otherwise,
4296		 * a privileged user will always succeed, and we don't want
4297		 * this to happen unless the file really is executable.
4298		 */
4299		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
4300		    (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
4301		    !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
4302			priv_granted |= VEXEC;
4303	}
4304
4305	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
4306	    !priv_check_cred(cred, PRIV_VFS_READ, 0))
4307		priv_granted |= VREAD;
4308
4309	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
4310	    !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
4311		priv_granted |= (VWRITE | VAPPEND);
4312
4313	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
4314	    !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
4315		priv_granted |= VADMIN;
4316
4317	if ((accmode & (priv_granted | dac_granted)) == accmode) {
4318		/* XXX audit: privilege used */
4319		if (privused != NULL)
4320			*privused = 1;
4321		return (0);
4322	}
4323
4324	return ((accmode & VADMIN) ? EPERM : EACCES);
4325}
4326
4327/*
4328 * Credential check based on process requesting service, and per-attribute
4329 * permissions.
4330 */
4331int
4332extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
4333    struct thread *td, accmode_t accmode)
4334{
4335
4336	/*
4337	 * Kernel-invoked always succeeds.
4338	 */
4339	if (cred == NOCRED)
4340		return (0);
4341
4342	/*
4343	 * Do not allow privileged processes in jail to directly manipulate
4344	 * system attributes.
4345	 */
4346	switch (attrnamespace) {
4347	case EXTATTR_NAMESPACE_SYSTEM:
4348		/* Potentially should be: return (EPERM); */
4349		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
4350	case EXTATTR_NAMESPACE_USER:
4351		return (VOP_ACCESS(vp, accmode, cred, td));
4352	default:
4353		return (EPERM);
4354	}
4355}
4356
4357#ifdef DEBUG_VFS_LOCKS
4358/*
4359 * This only exists to suppress warnings from unlocked specfs accesses.  It is
4360 * no longer ok to have an unlocked VFS.
4361 */
4362#define	IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL ||		\
4363	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
4364
4365int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
4366SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
4367    "Drop into debugger on lock violation");
4368
4369int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
4370SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
4371    0, "Check for interlock across VOPs");
4372
4373int vfs_badlock_print = 1;	/* Print lock violations. */
4374SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
4375    0, "Print lock violations");
4376
4377#ifdef KDB
4378int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
4379SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
4380    &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
4381#endif
4382
4383static void
4384vfs_badlock(const char *msg, const char *str, struct vnode *vp)
4385{
4386
4387#ifdef KDB
4388	if (vfs_badlock_backtrace)
4389		kdb_backtrace();
4390#endif
4391	if (vfs_badlock_print)
4392		printf("%s: %p %s\n", str, (void *)vp, msg);
4393	if (vfs_badlock_ddb)
4394		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4395}
4396
4397void
4398assert_vi_locked(struct vnode *vp, const char *str)
4399{
4400
4401	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
4402		vfs_badlock("interlock is not locked but should be", str, vp);
4403}
4404
4405void
4406assert_vi_unlocked(struct vnode *vp, const char *str)
4407{
4408
4409	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
4410		vfs_badlock("interlock is locked but should not be", str, vp);
4411}
4412
4413void
4414assert_vop_locked(struct vnode *vp, const char *str)
4415{
4416	int locked;
4417
4418	if (!IGNORE_LOCK(vp)) {
4419		locked = VOP_ISLOCKED(vp);
4420		if (locked == 0 || locked == LK_EXCLOTHER)
4421			vfs_badlock("is not locked but should be", str, vp);
4422	}
4423}
4424
4425void
4426assert_vop_unlocked(struct vnode *vp, const char *str)
4427{
4428
4429	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
4430		vfs_badlock("is locked but should not be", str, vp);
4431}
4432
4433void
4434assert_vop_elocked(struct vnode *vp, const char *str)
4435{
4436
4437	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
4438		vfs_badlock("is not exclusive locked but should be", str, vp);
4439}
4440
4441#if 0
4442void
4443assert_vop_elocked_other(struct vnode *vp, const char *str)
4444{
4445
4446	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER)
4447		vfs_badlock("is not exclusive locked by another thread",
4448		    str, vp);
4449}
4450
4451void
4452assert_vop_slocked(struct vnode *vp, const char *str)
4453{
4454
4455	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED)
4456		vfs_badlock("is not locked shared but should be", str, vp);
4457}
4458#endif /* 0 */
4459#endif /* DEBUG_VFS_LOCKS */
4460
4461void
4462vop_rename_fail(struct vop_rename_args *ap)
4463{
4464
4465	if (ap->a_tvp != NULL)
4466		vput(ap->a_tvp);
4467	if (ap->a_tdvp == ap->a_tvp)
4468		vrele(ap->a_tdvp);
4469	else
4470		vput(ap->a_tdvp);
4471	vrele(ap->a_fdvp);
4472	vrele(ap->a_fvp);
4473}
4474
4475void
4476vop_rename_pre(void *ap)
4477{
4478	struct vop_rename_args *a = ap;
4479
4480#ifdef DEBUG_VFS_LOCKS
4481	if (a->a_tvp)
4482		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
4483	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
4484	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
4485	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
4486
4487	/* Check the source (from). */
4488	if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
4489	    (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
4490		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
4491	if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
4492		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
4493
4494	/* Check the target. */
4495	if (a->a_tvp)
4496		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
4497	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
4498#endif
4499	if (a->a_tdvp != a->a_fdvp)
4500		vhold(a->a_fdvp);
4501	if (a->a_tvp != a->a_fvp)
4502		vhold(a->a_fvp);
4503	vhold(a->a_tdvp);
4504	if (a->a_tvp)
4505		vhold(a->a_tvp);
4506}
4507
4508void
4509vop_strategy_pre(void *ap)
4510{
4511#ifdef DEBUG_VFS_LOCKS
4512	struct vop_strategy_args *a;
4513	struct buf *bp;
4514
4515	a = ap;
4516	bp = a->a_bp;
4517
4518	/*
4519	 * Cluster ops lock their component buffers but not the IO container.
4520	 */
4521	if ((bp->b_flags & B_CLUSTER) != 0)
4522		return;
4523
4524	if (panicstr == NULL && !BUF_ISLOCKED(bp)) {
4525		if (vfs_badlock_print)
4526			printf(
4527			    "VOP_STRATEGY: bp is not locked but should be\n");
4528		if (vfs_badlock_ddb)
4529			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4530	}
4531#endif
4532}
4533
4534void
4535vop_lock_pre(void *ap)
4536{
4537#ifdef DEBUG_VFS_LOCKS
4538	struct vop_lock1_args *a = ap;
4539
4540	if ((a->a_flags & LK_INTERLOCK) == 0)
4541		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4542	else
4543		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
4544#endif
4545}
4546
4547void
4548vop_lock_post(void *ap, int rc)
4549{
4550#ifdef DEBUG_VFS_LOCKS
4551	struct vop_lock1_args *a = ap;
4552
4553	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4554	if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
4555		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
4556#endif
4557}
4558
4559void
4560vop_unlock_pre(void *ap)
4561{
4562#ifdef DEBUG_VFS_LOCKS
4563	struct vop_unlock_args *a = ap;
4564
4565	if (a->a_flags & LK_INTERLOCK)
4566		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
4567	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
4568#endif
4569}
4570
4571void
4572vop_unlock_post(void *ap, int rc)
4573{
4574#ifdef DEBUG_VFS_LOCKS
4575	struct vop_unlock_args *a = ap;
4576
4577	if (a->a_flags & LK_INTERLOCK)
4578		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
4579#endif
4580}
4581
4582void
4583vop_create_post(void *ap, int rc)
4584{
4585	struct vop_create_args *a = ap;
4586
4587	if (!rc)
4588		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4589}
4590
4591void
4592vop_deleteextattr_post(void *ap, int rc)
4593{
4594	struct vop_deleteextattr_args *a = ap;
4595
4596	if (!rc)
4597		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4598}
4599
4600void
4601vop_link_post(void *ap, int rc)
4602{
4603	struct vop_link_args *a = ap;
4604
4605	if (!rc) {
4606		VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
4607		VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
4608	}
4609}
4610
4611void
4612vop_mkdir_post(void *ap, int rc)
4613{
4614	struct vop_mkdir_args *a = ap;
4615
4616	if (!rc)
4617		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4618}
4619
4620void
4621vop_mknod_post(void *ap, int rc)
4622{
4623	struct vop_mknod_args *a = ap;
4624
4625	if (!rc)
4626		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4627}
4628
4629void
4630vop_reclaim_post(void *ap, int rc)
4631{
4632	struct vop_reclaim_args *a = ap;
4633
4634	if (!rc)
4635		VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE);
4636}
4637
4638void
4639vop_remove_post(void *ap, int rc)
4640{
4641	struct vop_remove_args *a = ap;
4642
4643	if (!rc) {
4644		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4645		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4646	}
4647}
4648
4649void
4650vop_rename_post(void *ap, int rc)
4651{
4652	struct vop_rename_args *a = ap;
4653	long hint;
4654
4655	if (!rc) {
4656		hint = NOTE_WRITE;
4657		if (a->a_fdvp == a->a_tdvp) {
4658			if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR)
4659				hint |= NOTE_LINK;
4660			VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
4661			VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
4662		} else {
4663			if (a->a_fvp->v_type == VDIR)
4664				hint |= NOTE_LINK;
4665			VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
4666
4667			if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL &&
4668			    a->a_tvp->v_type == VDIR)
4669				hint &= ~NOTE_LINK;
4670			VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
4671		}
4672
4673		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
4674		if (a->a_tvp)
4675			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
4676	}
4677	if (a->a_tdvp != a->a_fdvp)
4678		vdrop(a->a_fdvp);
4679	if (a->a_tvp != a->a_fvp)
4680		vdrop(a->a_fvp);
4681	vdrop(a->a_tdvp);
4682	if (a->a_tvp)
4683		vdrop(a->a_tvp);
4684}
4685
4686void
4687vop_rmdir_post(void *ap, int rc)
4688{
4689	struct vop_rmdir_args *a = ap;
4690
4691	if (!rc) {
4692		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4693		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4694	}
4695}
4696
4697void
4698vop_setattr_post(void *ap, int rc)
4699{
4700	struct vop_setattr_args *a = ap;
4701
4702	if (!rc)
4703		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4704}
4705
4706void
4707vop_setextattr_post(void *ap, int rc)
4708{
4709	struct vop_setextattr_args *a = ap;
4710
4711	if (!rc)
4712		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4713}
4714
4715void
4716vop_symlink_post(void *ap, int rc)
4717{
4718	struct vop_symlink_args *a = ap;
4719
4720	if (!rc)
4721		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4722}
4723
4724static struct knlist fs_knlist;
4725
4726static void
4727vfs_event_init(void *arg)
4728{
4729	knlist_init_mtx(&fs_knlist, NULL);
4730}
4731/* XXX - correct order? */
4732SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
4733
4734void
4735vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
4736{
4737
4738	KNOTE_UNLOCKED(&fs_knlist, event);
4739}
4740
4741static int	filt_fsattach(struct knote *kn);
4742static void	filt_fsdetach(struct knote *kn);
4743static int	filt_fsevent(struct knote *kn, long hint);
4744
4745struct filterops fs_filtops = {
4746	.f_isfd = 0,
4747	.f_attach = filt_fsattach,
4748	.f_detach = filt_fsdetach,
4749	.f_event = filt_fsevent
4750};
4751
4752static int
4753filt_fsattach(struct knote *kn)
4754{
4755
4756	kn->kn_flags |= EV_CLEAR;
4757	knlist_add(&fs_knlist, kn, 0);
4758	return (0);
4759}
4760
4761static void
4762filt_fsdetach(struct knote *kn)
4763{
4764
4765	knlist_remove(&fs_knlist, kn, 0);
4766}
4767
4768static int
4769filt_fsevent(struct knote *kn, long hint)
4770{
4771
4772	kn->kn_fflags |= hint;
4773	return (kn->kn_fflags != 0);
4774}
4775
4776static int
4777sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4778{
4779	struct vfsidctl vc;
4780	int error;
4781	struct mount *mp;
4782
4783	error = SYSCTL_IN(req, &vc, sizeof(vc));
4784	if (error)
4785		return (error);
4786	if (vc.vc_vers != VFS_CTL_VERS1)
4787		return (EINVAL);
4788	mp = vfs_getvfs(&vc.vc_fsid);
4789	if (mp == NULL)
4790		return (ENOENT);
4791	/* ensure that a specific sysctl goes to the right filesystem. */
4792	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4793	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4794		vfs_rel(mp);
4795		return (EINVAL);
4796	}
4797	VCTLTOREQ(&vc, req);
4798	error = VFS_SYSCTL(mp, vc.vc_op, req);
4799	vfs_rel(mp);
4800	return (error);
4801}
4802
4803SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR,
4804    NULL, 0, sysctl_vfs_ctl, "",
4805    "Sysctl by fsid");
4806
4807/*
4808 * Function to initialize a va_filerev field sensibly.
4809 * XXX: Wouldn't a random number make a lot more sense ??
4810 */
4811u_quad_t
4812init_va_filerev(void)
4813{
4814	struct bintime bt;
4815
4816	getbinuptime(&bt);
4817	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4818}
4819
4820static int	filt_vfsread(struct knote *kn, long hint);
4821static int	filt_vfswrite(struct knote *kn, long hint);
4822static int	filt_vfsvnode(struct knote *kn, long hint);
4823static void	filt_vfsdetach(struct knote *kn);
4824static struct filterops vfsread_filtops = {
4825	.f_isfd = 1,
4826	.f_detach = filt_vfsdetach,
4827	.f_event = filt_vfsread
4828};
4829static struct filterops vfswrite_filtops = {
4830	.f_isfd = 1,
4831	.f_detach = filt_vfsdetach,
4832	.f_event = filt_vfswrite
4833};
4834static struct filterops vfsvnode_filtops = {
4835	.f_isfd = 1,
4836	.f_detach = filt_vfsdetach,
4837	.f_event = filt_vfsvnode
4838};
4839
4840static void
4841vfs_knllock(void *arg)
4842{
4843	struct vnode *vp = arg;
4844
4845	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4846}
4847
4848static void
4849vfs_knlunlock(void *arg)
4850{
4851	struct vnode *vp = arg;
4852
4853	VOP_UNLOCK(vp, 0);
4854}
4855
4856static void
4857vfs_knl_assert_locked(void *arg)
4858{
4859#ifdef DEBUG_VFS_LOCKS
4860	struct vnode *vp = arg;
4861
4862	ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
4863#endif
4864}
4865
4866static void
4867vfs_knl_assert_unlocked(void *arg)
4868{
4869#ifdef DEBUG_VFS_LOCKS
4870	struct vnode *vp = arg;
4871
4872	ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
4873#endif
4874}
4875
4876int
4877vfs_kqfilter(struct vop_kqfilter_args *ap)
4878{
4879	struct vnode *vp = ap->a_vp;
4880	struct knote *kn = ap->a_kn;
4881	struct knlist *knl;
4882
4883	switch (kn->kn_filter) {
4884	case EVFILT_READ:
4885		kn->kn_fop = &vfsread_filtops;
4886		break;
4887	case EVFILT_WRITE:
4888		kn->kn_fop = &vfswrite_filtops;
4889		break;
4890	case EVFILT_VNODE:
4891		kn->kn_fop = &vfsvnode_filtops;
4892		break;
4893	default:
4894		return (EINVAL);
4895	}
4896
4897	kn->kn_hook = (caddr_t)vp;
4898
4899	v_addpollinfo(vp);
4900	if (vp->v_pollinfo == NULL)
4901		return (ENOMEM);
4902	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
4903	vhold(vp);
4904	knlist_add(knl, kn, 0);
4905
4906	return (0);
4907}
4908
4909/*
4910 * Detach knote from vnode
4911 */
4912static void
4913filt_vfsdetach(struct knote *kn)
4914{
4915	struct vnode *vp = (struct vnode *)kn->kn_hook;
4916
4917	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
4918	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
4919	vdrop(vp);
4920}
4921
4922/*ARGSUSED*/
4923static int
4924filt_vfsread(struct knote *kn, long hint)
4925{
4926	struct vnode *vp = (struct vnode *)kn->kn_hook;
4927	struct vattr va;
4928	int res;
4929
4930	/*
4931	 * filesystem is gone, so set the EOF flag and schedule
4932	 * the knote for deletion.
4933	 */
4934	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
4935		VI_LOCK(vp);
4936		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4937		VI_UNLOCK(vp);
4938		return (1);
4939	}
4940
4941	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
4942		return (0);
4943
4944	VI_LOCK(vp);
4945	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
4946	res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0;
4947	VI_UNLOCK(vp);
4948	return (res);
4949}
4950
4951/*ARGSUSED*/
4952static int
4953filt_vfswrite(struct knote *kn, long hint)
4954{
4955	struct vnode *vp = (struct vnode *)kn->kn_hook;
4956
4957	VI_LOCK(vp);
4958
4959	/*
4960	 * filesystem is gone, so set the EOF flag and schedule
4961	 * the knote for deletion.
4962	 */
4963	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
4964		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4965
4966	kn->kn_data = 0;
4967	VI_UNLOCK(vp);
4968	return (1);
4969}
4970
4971static int
4972filt_vfsvnode(struct knote *kn, long hint)
4973{
4974	struct vnode *vp = (struct vnode *)kn->kn_hook;
4975	int res;
4976
4977	VI_LOCK(vp);
4978	if (kn->kn_sfflags & hint)
4979		kn->kn_fflags |= hint;
4980	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
4981		kn->kn_flags |= EV_EOF;
4982		VI_UNLOCK(vp);
4983		return (1);
4984	}
4985	res = (kn->kn_fflags != 0);
4986	VI_UNLOCK(vp);
4987	return (res);
4988}
4989
4990int
4991vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
4992{
4993	int error;
4994
4995	if (dp->d_reclen > ap->a_uio->uio_resid)
4996		return (ENAMETOOLONG);
4997	error = uiomove(dp, dp->d_reclen, ap->a_uio);
4998	if (error) {
4999		if (ap->a_ncookies != NULL) {
5000			if (ap->a_cookies != NULL)
5001				free(ap->a_cookies, M_TEMP);
5002			ap->a_cookies = NULL;
5003			*ap->a_ncookies = 0;
5004		}
5005		return (error);
5006	}
5007	if (ap->a_ncookies == NULL)
5008		return (0);
5009
5010	KASSERT(ap->a_cookies,
5011	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
5012
5013	*ap->a_cookies = realloc(*ap->a_cookies,
5014	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
5015	(*ap->a_cookies)[*ap->a_ncookies] = off;
5016	return (0);
5017}
5018
5019/*
5020 * Mark for update the access time of the file if the filesystem
5021 * supports VOP_MARKATIME.  This functionality is used by execve and
5022 * mmap, so we want to avoid the I/O implied by directly setting
5023 * va_atime for the sake of efficiency.
5024 */
5025void
5026vfs_mark_atime(struct vnode *vp, struct ucred *cred)
5027{
5028	struct mount *mp;
5029
5030	mp = vp->v_mount;
5031	ASSERT_VOP_LOCKED(vp, "vfs_mark_atime");
5032	if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
5033		(void)VOP_MARKATIME(vp);
5034}
5035
5036/*
5037 * The purpose of this routine is to remove granularity from accmode_t,
5038 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
5039 * VADMIN and VAPPEND.
5040 *
5041 * If it returns 0, the caller is supposed to continue with the usual
5042 * access checks using 'accmode' as modified by this routine.  If it
5043 * returns nonzero value, the caller is supposed to return that value
5044 * as errno.
5045 *
5046 * Note that after this routine runs, accmode may be zero.
5047 */
5048int
5049vfs_unixify_accmode(accmode_t *accmode)
5050{
5051	/*
5052	 * There is no way to specify explicit "deny" rule using
5053	 * file mode or POSIX.1e ACLs.
5054	 */
5055	if (*accmode & VEXPLICIT_DENY) {
5056		*accmode = 0;
5057		return (0);
5058	}
5059
5060	/*
5061	 * None of these can be translated into usual access bits.
5062	 * Also, the common case for NFSv4 ACLs is to not contain
5063	 * either of these bits. Caller should check for VWRITE
5064	 * on the containing directory instead.
5065	 */
5066	if (*accmode & (VDELETE_CHILD | VDELETE))
5067		return (EPERM);
5068
5069	if (*accmode & VADMIN_PERMS) {
5070		*accmode &= ~VADMIN_PERMS;
5071		*accmode |= VADMIN;
5072	}
5073
5074	/*
5075	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
5076	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
5077	 */
5078	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
5079
5080	return (0);
5081}
5082
5083/*
5084 * These are helper functions for filesystems to traverse all
5085 * their vnodes.  See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
5086 *
5087 * This interface replaces MNT_VNODE_FOREACH.
5088 */
5089
5090MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
5091
5092struct vnode *
5093__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
5094{
5095	struct vnode *vp;
5096
5097	if (should_yield())
5098		kern_yield(PRI_USER);
5099	MNT_ILOCK(mp);
5100	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5101	vp = TAILQ_NEXT(*mvp, v_nmntvnodes);
5102	while (vp != NULL && (vp->v_type == VMARKER ||
5103	    (vp->v_iflag & VI_DOOMED) != 0))
5104		vp = TAILQ_NEXT(vp, v_nmntvnodes);
5105
5106	/* Check if we are done */
5107	if (vp == NULL) {
5108		__mnt_vnode_markerfree_all(mvp, mp);
5109		/* MNT_IUNLOCK(mp); -- done in above function */
5110		mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
5111		return (NULL);
5112	}
5113	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
5114	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
5115	VI_LOCK(vp);
5116	MNT_IUNLOCK(mp);
5117	return (vp);
5118}
5119
5120struct vnode *
5121__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
5122{
5123	struct vnode *vp;
5124
5125	*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
5126	MNT_ILOCK(mp);
5127	MNT_REF(mp);
5128	(*mvp)->v_type = VMARKER;
5129
5130	vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
5131	while (vp != NULL && (vp->v_type == VMARKER ||
5132	    (vp->v_iflag & VI_DOOMED) != 0))
5133		vp = TAILQ_NEXT(vp, v_nmntvnodes);
5134
5135	/* Check if we are done */
5136	if (vp == NULL) {
5137		MNT_REL(mp);
5138		MNT_IUNLOCK(mp);
5139		free(*mvp, M_VNODE_MARKER);
5140		*mvp = NULL;
5141		return (NULL);
5142	}
5143	(*mvp)->v_mount = mp;
5144	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
5145	VI_LOCK(vp);
5146	MNT_IUNLOCK(mp);
5147	return (vp);
5148}
5149
5150
5151void
5152__mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
5153{
5154
5155	if (*mvp == NULL) {
5156		MNT_IUNLOCK(mp);
5157		return;
5158	}
5159
5160	mtx_assert(MNT_MTX(mp), MA_OWNED);
5161
5162	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5163	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
5164	MNT_REL(mp);
5165	MNT_IUNLOCK(mp);
5166	free(*mvp, M_VNODE_MARKER);
5167	*mvp = NULL;
5168}
5169
5170/*
5171 * These are helper functions for filesystems to traverse their
5172 * active vnodes.  See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h
5173 */
5174static void
5175mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
5176{
5177
5178	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5179
5180	MNT_ILOCK(mp);
5181	MNT_REL(mp);
5182	MNT_IUNLOCK(mp);
5183	free(*mvp, M_VNODE_MARKER);
5184	*mvp = NULL;
5185}
5186
5187static struct vnode *
5188mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
5189{
5190	struct vnode *vp, *nvp;
5191
5192	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
5193	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5194restart:
5195	vp = TAILQ_NEXT(*mvp, v_actfreelist);
5196	TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
5197	while (vp != NULL) {
5198		if (vp->v_type == VMARKER) {
5199			vp = TAILQ_NEXT(vp, v_actfreelist);
5200			continue;
5201		}
5202		if (!VI_TRYLOCK(vp)) {
5203			if (mp_ncpus == 1 || should_yield()) {
5204				TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
5205				mtx_unlock(&vnode_free_list_mtx);
5206				pause("vnacti", 1);
5207				mtx_lock(&vnode_free_list_mtx);
5208				goto restart;
5209			}
5210			continue;
5211		}
5212		KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
5213		KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
5214		    ("alien vnode on the active list %p %p", vp, mp));
5215		if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0)
5216			break;
5217		nvp = TAILQ_NEXT(vp, v_actfreelist);
5218		VI_UNLOCK(vp);
5219		vp = nvp;
5220	}
5221
5222	/* Check if we are done */
5223	if (vp == NULL) {
5224		mtx_unlock(&vnode_free_list_mtx);
5225		mnt_vnode_markerfree_active(mvp, mp);
5226		return (NULL);
5227	}
5228	TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist);
5229	mtx_unlock(&vnode_free_list_mtx);
5230	ASSERT_VI_LOCKED(vp, "active iter");
5231	KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
5232	return (vp);
5233}
5234
5235struct vnode *
5236__mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
5237{
5238
5239	if (should_yield())
5240		kern_yield(PRI_USER);
5241	mtx_lock(&vnode_free_list_mtx);
5242	return (mnt_vnode_next_active(mvp, mp));
5243}
5244
5245struct vnode *
5246__mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
5247{
5248	struct vnode *vp;
5249
5250	*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
5251	MNT_ILOCK(mp);
5252	MNT_REF(mp);
5253	MNT_IUNLOCK(mp);
5254	(*mvp)->v_type = VMARKER;
5255	(*mvp)->v_mount = mp;
5256
5257	mtx_lock(&vnode_free_list_mtx);
5258	vp = TAILQ_FIRST(&mp->mnt_activevnodelist);
5259	if (vp == NULL) {
5260		mtx_unlock(&vnode_free_list_mtx);
5261		mnt_vnode_markerfree_active(mvp, mp);
5262		return (NULL);
5263	}
5264	TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
5265	return (mnt_vnode_next_active(mvp, mp));
5266}
5267
5268void
5269__mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
5270{
5271
5272	if (*mvp == NULL)
5273		return;
5274
5275	mtx_lock(&vnode_free_list_mtx);
5276	TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
5277	mtx_unlock(&vnode_free_list_mtx);
5278	mnt_vnode_markerfree_active(mvp, mp);
5279}
5280