1/*	$NetBSD$	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 1989, 1993
35 *	The Regents of the University of California.  All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 *    notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 *    notice, this list of conditions and the following disclaimer in the
49 *    documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 *    may be used to endorse or promote products derived from this software
52 *    without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
67 */
68
69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD$");
71
72#include "opt_ddb.h"
73#include "opt_compat_netbsd.h"
74#include "opt_compat_43.h"
75
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/conf.h>
79#include <sys/dirent.h>
80#include <sys/filedesc.h>
81#include <sys/kernel.h>
82#include <sys/mount.h>
83#include <sys/vnode.h>
84#include <sys/stat.h>
85#include <sys/sysctl.h>
86#include <sys/namei.h>
87#include <sys/buf.h>
88#include <sys/errno.h>
89#include <sys/kmem.h>
90#include <sys/syscallargs.h>
91#include <sys/kauth.h>
92#include <sys/module.h>
93
94#include <miscfs/genfs/genfs.h>
95#include <miscfs/syncfs/syncfs.h>
96#include <miscfs/specfs/specdev.h>
97#include <uvm/uvm_ddb.h>
98
99const enum vtype iftovt_tab[16] = {
100	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
101	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
102};
103const int	vttoif_tab[9] = {
104	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
105	S_IFSOCK, S_IFIFO, S_IFMT,
106};
107
108/*
109 * Insq/Remq for the vnode usage lists.
110 */
111#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
112#define	bufremvn(bp) {							\
113	LIST_REMOVE(bp, b_vnbufs);					\
114	(bp)->b_vnbufs.le_next = NOLIST;				\
115}
116
117int doforce = 1;		/* 1 => permit forcible unmounting */
118int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
119
120/*
121 * Local declarations.
122 */
123
124static int getdevvp(dev_t, vnode_t **, enum vtype);
125
126/*
127 * Initialize the vnode management data structures.
128 */
129void
130vntblinit(void)
131{
132
133	vn_initialize_syncerd();
134	vfs_vnode_sysinit();
135	vfs_mount_sysinit();
136}
137
138/*
139 * Flush out and invalidate all buffers associated with a vnode.
140 * Called with the underlying vnode locked, which should prevent new dirty
141 * buffers from being queued.
142 */
143int
144vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred, struct lwp *l,
145	  bool catch, int slptimeo)
146{
147	struct buf *bp, *nbp;
148	int error;
149	int flushflags = PGO_ALLPAGES | PGO_FREE | PGO_SYNCIO |
150	    (flags & V_SAVE ? PGO_CLEANIT | PGO_RECLAIM : 0);
151
152	/* XXXUBC this doesn't look at flags or slp* */
153	mutex_enter(vp->v_interlock);
154	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
155	if (error) {
156		return error;
157	}
158
159	if (flags & V_SAVE) {
160		error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0);
161		if (error)
162		        return (error);
163		KASSERT(LIST_EMPTY(&vp->v_dirtyblkhd));
164	}
165
166	mutex_enter(&bufcache_lock);
167restart:
168	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
169		KASSERT(bp->b_vp == vp);
170		nbp = LIST_NEXT(bp, b_vnbufs);
171		error = bbusy(bp, catch, slptimeo, NULL);
172		if (error != 0) {
173			if (error == EPASSTHROUGH)
174				goto restart;
175			mutex_exit(&bufcache_lock);
176			return (error);
177		}
178		brelsel(bp, BC_INVAL | BC_VFLUSH);
179	}
180
181	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
182		KASSERT(bp->b_vp == vp);
183		nbp = LIST_NEXT(bp, b_vnbufs);
184		error = bbusy(bp, catch, slptimeo, NULL);
185		if (error != 0) {
186			if (error == EPASSTHROUGH)
187				goto restart;
188			mutex_exit(&bufcache_lock);
189			return (error);
190		}
191		/*
192		 * XXX Since there are no node locks for NFS, I believe
193		 * there is a slight chance that a delayed write will
194		 * occur while sleeping just above, so check for it.
195		 */
196		if ((bp->b_oflags & BO_DELWRI) && (flags & V_SAVE)) {
197#ifdef DEBUG
198			printf("buffer still DELWRI\n");
199#endif
200			bp->b_cflags |= BC_BUSY | BC_VFLUSH;
201			mutex_exit(&bufcache_lock);
202			VOP_BWRITE(bp->b_vp, bp);
203			mutex_enter(&bufcache_lock);
204			goto restart;
205		}
206		brelsel(bp, BC_INVAL | BC_VFLUSH);
207	}
208
209#ifdef DIAGNOSTIC
210	if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))
211		panic("vinvalbuf: flush failed, vp %p", vp);
212#endif
213
214	mutex_exit(&bufcache_lock);
215
216	return (0);
217}
218
219/*
220 * Destroy any in core blocks past the truncation length.
221 * Called with the underlying vnode locked, which should prevent new dirty
222 * buffers from being queued.
223 */
224int
225vtruncbuf(struct vnode *vp, daddr_t lbn, bool catch, int slptimeo)
226{
227	struct buf *bp, *nbp;
228	int error;
229	voff_t off;
230
231	off = round_page((voff_t)lbn << vp->v_mount->mnt_fs_bshift);
232	mutex_enter(vp->v_interlock);
233	error = VOP_PUTPAGES(vp, off, 0, PGO_FREE | PGO_SYNCIO);
234	if (error) {
235		return error;
236	}
237
238	mutex_enter(&bufcache_lock);
239restart:
240	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
241		KASSERT(bp->b_vp == vp);
242		nbp = LIST_NEXT(bp, b_vnbufs);
243		if (bp->b_lblkno < lbn)
244			continue;
245		error = bbusy(bp, catch, slptimeo, NULL);
246		if (error != 0) {
247			if (error == EPASSTHROUGH)
248				goto restart;
249			mutex_exit(&bufcache_lock);
250			return (error);
251		}
252		brelsel(bp, BC_INVAL | BC_VFLUSH);
253	}
254
255	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
256		KASSERT(bp->b_vp == vp);
257		nbp = LIST_NEXT(bp, b_vnbufs);
258		if (bp->b_lblkno < lbn)
259			continue;
260		error = bbusy(bp, catch, slptimeo, NULL);
261		if (error != 0) {
262			if (error == EPASSTHROUGH)
263				goto restart;
264			mutex_exit(&bufcache_lock);
265			return (error);
266		}
267		brelsel(bp, BC_INVAL | BC_VFLUSH);
268	}
269	mutex_exit(&bufcache_lock);
270
271	return (0);
272}
273
274/*
275 * Flush all dirty buffers from a vnode.
276 * Called with the underlying vnode locked, which should prevent new dirty
277 * buffers from being queued.
278 */
279int
280vflushbuf(struct vnode *vp, int flags)
281{
282	struct buf *bp, *nbp;
283	int error, pflags;
284	bool dirty, sync;
285
286	sync = (flags & FSYNC_WAIT) != 0;
287	pflags = PGO_CLEANIT | PGO_ALLPAGES |
288		(sync ? PGO_SYNCIO : 0) |
289		((flags & FSYNC_LAZY) ? PGO_LAZY : 0);
290	mutex_enter(vp->v_interlock);
291	(void) VOP_PUTPAGES(vp, 0, 0, pflags);
292
293loop:
294	mutex_enter(&bufcache_lock);
295	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
296		KASSERT(bp->b_vp == vp);
297		nbp = LIST_NEXT(bp, b_vnbufs);
298		if ((bp->b_cflags & BC_BUSY))
299			continue;
300		if ((bp->b_oflags & BO_DELWRI) == 0)
301			panic("vflushbuf: not dirty, bp %p", bp);
302		bp->b_cflags |= BC_BUSY | BC_VFLUSH;
303		mutex_exit(&bufcache_lock);
304		/*
305		 * Wait for I/O associated with indirect blocks to complete,
306		 * since there is no way to quickly wait for them below.
307		 */
308		if (bp->b_vp == vp || !sync)
309			(void) bawrite(bp);
310		else {
311			error = bwrite(bp);
312			if (error)
313				return error;
314		}
315		goto loop;
316	}
317	mutex_exit(&bufcache_lock);
318
319	if (!sync)
320		return 0;
321
322	mutex_enter(vp->v_interlock);
323	while (vp->v_numoutput != 0)
324		cv_wait(&vp->v_cv, vp->v_interlock);
325	dirty = !LIST_EMPTY(&vp->v_dirtyblkhd);
326	mutex_exit(vp->v_interlock);
327
328	if (dirty) {
329		vprint("vflushbuf: dirty", vp);
330		goto loop;
331	}
332
333	return 0;
334}
335
336/*
337 * Create a vnode for a block device.
338 * Used for root filesystem and swap areas.
339 * Also used for memory file system special devices.
340 */
341int
342bdevvp(dev_t dev, vnode_t **vpp)
343{
344
345	return (getdevvp(dev, vpp, VBLK));
346}
347
348/*
349 * Create a vnode for a character device.
350 * Used for kernfs and some console handling.
351 */
352int
353cdevvp(dev_t dev, vnode_t **vpp)
354{
355
356	return (getdevvp(dev, vpp, VCHR));
357}
358
359/*
360 * Associate a buffer with a vnode.  There must already be a hold on
361 * the vnode.
362 */
363void
364bgetvp(struct vnode *vp, struct buf *bp)
365{
366
367	KASSERT(bp->b_vp == NULL);
368	KASSERT(bp->b_objlock == &buffer_lock);
369	KASSERT(mutex_owned(vp->v_interlock));
370	KASSERT(mutex_owned(&bufcache_lock));
371	KASSERT((bp->b_cflags & BC_BUSY) != 0);
372	KASSERT(!cv_has_waiters(&bp->b_done));
373
374	vholdl(vp);
375	bp->b_vp = vp;
376	if (vp->v_type == VBLK || vp->v_type == VCHR)
377		bp->b_dev = vp->v_rdev;
378	else
379		bp->b_dev = NODEV;
380
381	/*
382	 * Insert onto list for new vnode.
383	 */
384	bufinsvn(bp, &vp->v_cleanblkhd);
385	bp->b_objlock = vp->v_interlock;
386}
387
388/*
389 * Disassociate a buffer from a vnode.
390 */
391void
392brelvp(struct buf *bp)
393{
394	struct vnode *vp = bp->b_vp;
395
396	KASSERT(vp != NULL);
397	KASSERT(bp->b_objlock == vp->v_interlock);
398	KASSERT(mutex_owned(vp->v_interlock));
399	KASSERT(mutex_owned(&bufcache_lock));
400	KASSERT((bp->b_cflags & BC_BUSY) != 0);
401	KASSERT(!cv_has_waiters(&bp->b_done));
402
403	/*
404	 * Delete from old vnode list, if on one.
405	 */
406	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
407		bufremvn(bp);
408
409	if (vp->v_uobj.uo_npages == 0 && (vp->v_iflag & VI_ONWORKLST) &&
410	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
411		vp->v_iflag &= ~VI_WRMAPDIRTY;
412		vn_syncer_remove_from_worklist(vp);
413	}
414
415	bp->b_objlock = &buffer_lock;
416	bp->b_vp = NULL;
417	holdrelel(vp);
418}
419
420/*
421 * Reassign a buffer from one vnode list to another.
422 * The list reassignment must be within the same vnode.
423 * Used to assign file specific control information
424 * (indirect blocks) to the list to which they belong.
425 */
426void
427reassignbuf(struct buf *bp, struct vnode *vp)
428{
429	struct buflists *listheadp;
430	int delayx;
431
432	KASSERT(mutex_owned(&bufcache_lock));
433	KASSERT(bp->b_objlock == vp->v_interlock);
434	KASSERT(mutex_owned(vp->v_interlock));
435	KASSERT((bp->b_cflags & BC_BUSY) != 0);
436
437	/*
438	 * Delete from old vnode list, if on one.
439	 */
440	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
441		bufremvn(bp);
442
443	/*
444	 * If dirty, put on list of dirty buffers;
445	 * otherwise insert onto list of clean buffers.
446	 */
447	if ((bp->b_oflags & BO_DELWRI) == 0) {
448		listheadp = &vp->v_cleanblkhd;
449		if (vp->v_uobj.uo_npages == 0 &&
450		    (vp->v_iflag & VI_ONWORKLST) &&
451		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
452			vp->v_iflag &= ~VI_WRMAPDIRTY;
453			vn_syncer_remove_from_worklist(vp);
454		}
455	} else {
456		listheadp = &vp->v_dirtyblkhd;
457		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
458			switch (vp->v_type) {
459			case VDIR:
460				delayx = dirdelay;
461				break;
462			case VBLK:
463				if (vp->v_specmountpoint != NULL) {
464					delayx = metadelay;
465					break;
466				}
467				/* fall through */
468			default:
469				delayx = filedelay;
470				break;
471			}
472			if (!vp->v_mount ||
473			    (vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
474				vn_syncer_add_to_worklist(vp, delayx);
475		}
476	}
477	bufinsvn(bp, listheadp);
478}
479
480/*
481 * Create a vnode for a device.
482 * Used by bdevvp (block device) for root file system etc.,
483 * and by cdevvp (character device) for console and kernfs.
484 */
485static int
486getdevvp(dev_t dev, vnode_t **vpp, enum vtype type)
487{
488	vnode_t *vp;
489	vnode_t *nvp;
490	int error;
491
492	if (dev == NODEV) {
493		*vpp = NULL;
494		return (0);
495	}
496	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, NULL, &nvp);
497	if (error) {
498		*vpp = NULL;
499		return (error);
500	}
501	vp = nvp;
502	vp->v_type = type;
503	vp->v_vflag |= VV_MPSAFE;
504	uvm_vnp_setsize(vp, 0);
505	spec_node_init(vp, dev);
506	*vpp = vp;
507	return (0);
508}
509
510/*
511 * Lookup a vnode by device number and return it referenced.
512 */
513int
514vfinddev(dev_t dev, enum vtype type, vnode_t **vpp)
515{
516	vnode_t *vp;
517
518	mutex_enter(&device_lock);
519	for (vp = specfs_hash[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
520		if (type == vp->v_type && dev == vp->v_rdev)
521			break;
522	}
523	if (vp == NULL) {
524		mutex_exit(&device_lock);
525		return 0;
526	}
527	mutex_enter(vp->v_interlock);
528	mutex_exit(&device_lock);
529	if (vget(vp, 0) != 0)
530		return 0;
531	*vpp = vp;
532	return 1;
533}
534
535/*
536 * Revoke all the vnodes corresponding to the specified minor number
537 * range (endpoints inclusive) of the specified major.
538 */
539void
540vdevgone(int maj, int minl, int minh, enum vtype type)
541{
542	vnode_t *vp, **vpp;
543	dev_t dev;
544	int mn;
545
546	vp = NULL;	/* XXX gcc */
547
548	mutex_enter(&device_lock);
549	for (mn = minl; mn <= minh; mn++) {
550		dev = makedev(maj, mn);
551		vpp = &specfs_hash[SPECHASH(dev)];
552		for (vp = *vpp; vp != NULL;) {
553			mutex_enter(vp->v_interlock);
554			if ((vp->v_iflag & VI_CLEAN) != 0 ||
555			    type != vp->v_type || dev != vp->v_rdev) {
556				mutex_exit(vp->v_interlock);
557				vp = vp->v_specnext;
558				continue;
559			}
560			mutex_exit(&device_lock);
561			if (vget(vp, 0) == 0) {
562				VOP_REVOKE(vp, REVOKEALL);
563				vrele(vp);
564			}
565			mutex_enter(&device_lock);
566			vp = *vpp;
567		}
568	}
569	mutex_exit(&device_lock);
570}
571
572/*
573 * sysctl helper routine to return list of supported fstypes
574 */
575int
576sysctl_vfs_generic_fstypes(SYSCTLFN_ARGS)
577{
578	char bf[sizeof(((struct statvfs *)NULL)->f_fstypename)];
579	char *where = oldp;
580	struct vfsops *v;
581	size_t needed, left, slen;
582	int error, first;
583
584	if (newp != NULL)
585		return (EPERM);
586	if (namelen != 0)
587		return (EINVAL);
588
589	first = 1;
590	error = 0;
591	needed = 0;
592	left = *oldlenp;
593
594	sysctl_unlock();
595	mutex_enter(&vfs_list_lock);
596	LIST_FOREACH(v, &vfs_list, vfs_list) {
597		if (where == NULL)
598			needed += strlen(v->vfs_name) + 1;
599		else {
600			memset(bf, 0, sizeof(bf));
601			if (first) {
602				strncpy(bf, v->vfs_name, sizeof(bf));
603				first = 0;
604			} else {
605				bf[0] = ' ';
606				strncpy(bf + 1, v->vfs_name, sizeof(bf) - 1);
607			}
608			bf[sizeof(bf)-1] = '\0';
609			slen = strlen(bf);
610			if (left < slen + 1)
611				break;
612			v->vfs_refcount++;
613			mutex_exit(&vfs_list_lock);
614			/* +1 to copy out the trailing NUL byte */
615			error = copyout(bf, where, slen + 1);
616			mutex_enter(&vfs_list_lock);
617			v->vfs_refcount--;
618			if (error)
619				break;
620			where += slen;
621			needed += slen;
622			left -= slen;
623		}
624	}
625	mutex_exit(&vfs_list_lock);
626	sysctl_relock();
627	*oldlenp = needed;
628	return (error);
629}
630
631int kinfo_vdebug = 1;
632int kinfo_vgetfailed;
633
634#define KINFO_VNODESLOP	10
635
636/*
637 * Dump vnode list (via sysctl).
638 * Copyout address of vnode followed by vnode.
639 */
640int
641sysctl_kern_vnode(SYSCTLFN_ARGS)
642{
643	char *where = oldp;
644	size_t *sizep = oldlenp;
645	struct mount *mp, *nmp;
646	vnode_t *vp, *mvp, vbuf;
647	char *bp = where;
648	char *ewhere;
649	int error;
650
651	if (namelen != 0)
652		return (EOPNOTSUPP);
653	if (newp != NULL)
654		return (EPERM);
655
656#define VPTRSZ	sizeof(vnode_t *)
657#define VNODESZ	sizeof(vnode_t)
658	if (where == NULL) {
659		*sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ);
660		return (0);
661	}
662	ewhere = where + *sizep;
663
664	sysctl_unlock();
665	mutex_enter(&mountlist_lock);
666	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
667	    mp = nmp) {
668		if (vfs_busy(mp, &nmp)) {
669			continue;
670		}
671		/* Allocate a marker vnode. */
672		mvp = vnalloc(mp);
673		/* Should never fail for mp != NULL */
674		KASSERT(mvp != NULL);
675		mutex_enter(&mntvnode_lock);
676		for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp;
677		    vp = vunmark(mvp)) {
678			vmark(mvp, vp);
679			/*
680			 * Check that the vp is still associated with
681			 * this filesystem.  RACE: could have been
682			 * recycled onto the same filesystem.
683			 */
684			if (vp->v_mount != mp || vismarker(vp))
685				continue;
686			if (bp + VPTRSZ + VNODESZ > ewhere) {
687				(void)vunmark(mvp);
688				mutex_exit(&mntvnode_lock);
689				vnfree(mvp);
690				vfs_unbusy(mp, false, NULL);
691				sysctl_relock();
692				*sizep = bp - where;
693				return (ENOMEM);
694			}
695			memcpy(&vbuf, vp, VNODESZ);
696			mutex_exit(&mntvnode_lock);
697			if ((error = copyout(&vp, bp, VPTRSZ)) ||
698			    (error = copyout(&vbuf, bp + VPTRSZ, VNODESZ))) {
699			   	mutex_enter(&mntvnode_lock);
700				(void)vunmark(mvp);
701				mutex_exit(&mntvnode_lock);
702				vnfree(mvp);
703				vfs_unbusy(mp, false, NULL);
704				sysctl_relock();
705				return (error);
706			}
707			bp += VPTRSZ + VNODESZ;
708			mutex_enter(&mntvnode_lock);
709		}
710		mutex_exit(&mntvnode_lock);
711		vnfree(mvp);
712		vfs_unbusy(mp, false, &nmp);
713	}
714	mutex_exit(&mountlist_lock);
715	sysctl_relock();
716
717	*sizep = bp - where;
718	return (0);
719}
720
721/*
722 * Set vnode attributes to VNOVAL
723 */
724void
725vattr_null(struct vattr *vap)
726{
727
728	memset(vap, 0, sizeof(*vap));
729
730	vap->va_type = VNON;
731
732	/*
733	 * Assign individually so that it is safe even if size and
734	 * sign of each member are varied.
735	 */
736	vap->va_mode = VNOVAL;
737	vap->va_nlink = VNOVAL;
738	vap->va_uid = VNOVAL;
739	vap->va_gid = VNOVAL;
740	vap->va_fsid = VNOVAL;
741	vap->va_fileid = VNOVAL;
742	vap->va_size = VNOVAL;
743	vap->va_blocksize = VNOVAL;
744	vap->va_atime.tv_sec =
745	    vap->va_mtime.tv_sec =
746	    vap->va_ctime.tv_sec =
747	    vap->va_birthtime.tv_sec = VNOVAL;
748	vap->va_atime.tv_nsec =
749	    vap->va_mtime.tv_nsec =
750	    vap->va_ctime.tv_nsec =
751	    vap->va_birthtime.tv_nsec = VNOVAL;
752	vap->va_gen = VNOVAL;
753	vap->va_flags = VNOVAL;
754	vap->va_rdev = VNOVAL;
755	vap->va_bytes = VNOVAL;
756}
757
758#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
759#define ARRAY_PRINT(idx, arr) \
760    ((unsigned int)(idx) < ARRAY_SIZE(arr) ? (arr)[(idx)] : "UNKNOWN")
761
762const char * const vnode_tags[] = { VNODE_TAGS };
763const char * const vnode_types[] = { VNODE_TYPES };
764const char vnode_flagbits[] = VNODE_FLAGBITS;
765
766/*
767 * Print out a description of a vnode.
768 */
769void
770vprint(const char *label, struct vnode *vp)
771{
772	char bf[96];
773	int flag;
774
775	flag = vp->v_iflag | vp->v_vflag | vp->v_uflag;
776	snprintb(bf, sizeof(bf), vnode_flagbits, flag);
777
778	if (label != NULL)
779		printf("%s: ", label);
780	printf("vnode @ %p, flags (%s)\n\ttag %s(%d), type %s(%d), "
781	    "usecount %d, writecount %d, holdcount %d\n"
782	    "\tfreelisthd %p, mount %p, data %p lock %p\n",
783	    vp, bf, ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
784	    ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
785	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt,
786	    vp->v_freelisthd, vp->v_mount, vp->v_data, &vp->v_lock);
787	if (vp->v_data != NULL) {
788		printf("\t");
789		VOP_PRINT(vp);
790	}
791}
792
793/* Deprecated. Kept for KPI compatibility. */
794int
795vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
796    mode_t acc_mode, kauth_cred_t cred)
797{
798
799#ifdef DIAGNOSTIC
800	printf("vaccess: deprecated interface used.\n");
801#endif /* DIAGNOSTIC */
802
803	return genfs_can_access(type, file_mode, uid, gid, acc_mode, cred);
804}
805
806/*
807 * Given a file system name, look up the vfsops for that
808 * file system, or return NULL if file system isn't present
809 * in the kernel.
810 */
811struct vfsops *
812vfs_getopsbyname(const char *name)
813{
814	struct vfsops *v;
815
816	mutex_enter(&vfs_list_lock);
817	LIST_FOREACH(v, &vfs_list, vfs_list) {
818		if (strcmp(v->vfs_name, name) == 0)
819			break;
820	}
821	if (v != NULL)
822		v->vfs_refcount++;
823	mutex_exit(&vfs_list_lock);
824
825	return (v);
826}
827
828void
829copy_statvfs_info(struct statvfs *sbp, const struct mount *mp)
830{
831	const struct statvfs *mbp;
832
833	if (sbp == (mbp = &mp->mnt_stat))
834		return;
835
836	(void)memcpy(&sbp->f_fsidx, &mbp->f_fsidx, sizeof(sbp->f_fsidx));
837	sbp->f_fsid = mbp->f_fsid;
838	sbp->f_owner = mbp->f_owner;
839	sbp->f_flag = mbp->f_flag;
840	sbp->f_syncwrites = mbp->f_syncwrites;
841	sbp->f_asyncwrites = mbp->f_asyncwrites;
842	sbp->f_syncreads = mbp->f_syncreads;
843	sbp->f_asyncreads = mbp->f_asyncreads;
844	(void)memcpy(sbp->f_spare, mbp->f_spare, sizeof(mbp->f_spare));
845	(void)memcpy(sbp->f_fstypename, mbp->f_fstypename,
846	    sizeof(sbp->f_fstypename));
847	(void)memcpy(sbp->f_mntonname, mbp->f_mntonname,
848	    sizeof(sbp->f_mntonname));
849	(void)memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname,
850	    sizeof(sbp->f_mntfromname));
851	sbp->f_namemax = mbp->f_namemax;
852}
853
854int
855set_statvfs_info(const char *onp, int ukon, const char *fromp, int ukfrom,
856    const char *vfsname, struct mount *mp, struct lwp *l)
857{
858	int error;
859	size_t size;
860	struct statvfs *sfs = &mp->mnt_stat;
861	int (*fun)(const void *, void *, size_t, size_t *);
862
863	(void)strlcpy(mp->mnt_stat.f_fstypename, vfsname,
864	    sizeof(mp->mnt_stat.f_fstypename));
865
866	if (onp) {
867		struct cwdinfo *cwdi = l->l_proc->p_cwdi;
868		fun = (ukon == UIO_SYSSPACE) ? copystr : copyinstr;
869		if (cwdi->cwdi_rdir != NULL) {
870			size_t len;
871			char *bp;
872			char *path = PNBUF_GET();
873
874			bp = path + MAXPATHLEN;
875			*--bp = '\0';
876			rw_enter(&cwdi->cwdi_lock, RW_READER);
877			error = getcwd_common(cwdi->cwdi_rdir, rootvnode, &bp,
878			    path, MAXPATHLEN / 2, 0, l);
879			rw_exit(&cwdi->cwdi_lock);
880			if (error) {
881				PNBUF_PUT(path);
882				return error;
883			}
884
885			len = strlen(bp);
886			if (len > sizeof(sfs->f_mntonname) - 1)
887				len = sizeof(sfs->f_mntonname) - 1;
888			(void)strncpy(sfs->f_mntonname, bp, len);
889			PNBUF_PUT(path);
890
891			if (len < sizeof(sfs->f_mntonname) - 1) {
892				error = (*fun)(onp, &sfs->f_mntonname[len],
893				    sizeof(sfs->f_mntonname) - len - 1, &size);
894				if (error)
895					return error;
896				size += len;
897			} else {
898				size = len;
899			}
900		} else {
901			error = (*fun)(onp, &sfs->f_mntonname,
902			    sizeof(sfs->f_mntonname) - 1, &size);
903			if (error)
904				return error;
905		}
906		(void)memset(sfs->f_mntonname + size, 0,
907		    sizeof(sfs->f_mntonname) - size);
908	}
909
910	if (fromp) {
911		fun = (ukfrom == UIO_SYSSPACE) ? copystr : copyinstr;
912		error = (*fun)(fromp, sfs->f_mntfromname,
913		    sizeof(sfs->f_mntfromname) - 1, &size);
914		if (error)
915			return error;
916		(void)memset(sfs->f_mntfromname + size, 0,
917		    sizeof(sfs->f_mntfromname) - size);
918	}
919	return 0;
920}
921
922void
923vfs_timestamp(struct timespec *ts)
924{
925
926	nanotime(ts);
927}
928
929time_t	rootfstime;			/* recorded root fs time, if known */
930void
931setrootfstime(time_t t)
932{
933	rootfstime = t;
934}
935
936static const uint8_t vttodt_tab[9] = {
937	DT_UNKNOWN,	/* VNON  */
938	DT_REG,		/* VREG  */
939	DT_DIR,		/* VDIR  */
940	DT_BLK,		/* VBLK  */
941	DT_CHR,		/* VCHR  */
942	DT_LNK,		/* VLNK  */
943	DT_SOCK,	/* VSUCK */
944	DT_FIFO,	/* VFIFO */
945	DT_UNKNOWN	/* VBAD  */
946};
947
948uint8_t
949vtype2dt(enum vtype vt)
950{
951
952	CTASSERT(VBAD == __arraycount(vttodt_tab) - 1);
953	return vttodt_tab[vt];
954}
955
956int
957VFS_MOUNT(struct mount *mp, const char *a, void *b, size_t *c)
958{
959	int error;
960
961	KERNEL_LOCK(1, NULL);
962	error = (*(mp->mnt_op->vfs_mount))(mp, a, b, c);
963	KERNEL_UNLOCK_ONE(NULL);
964
965	return error;
966}
967
968int
969VFS_START(struct mount *mp, int a)
970{
971	int error;
972
973	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
974		KERNEL_LOCK(1, NULL);
975	}
976	error = (*(mp->mnt_op->vfs_start))(mp, a);
977	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
978		KERNEL_UNLOCK_ONE(NULL);
979	}
980
981	return error;
982}
983
984int
985VFS_UNMOUNT(struct mount *mp, int a)
986{
987	int error;
988
989	KERNEL_LOCK(1, NULL);
990	error = (*(mp->mnt_op->vfs_unmount))(mp, a);
991	KERNEL_UNLOCK_ONE(NULL);
992
993	return error;
994}
995
996int
997VFS_ROOT(struct mount *mp, struct vnode **a)
998{
999	int error;
1000
1001	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1002		KERNEL_LOCK(1, NULL);
1003	}
1004	error = (*(mp->mnt_op->vfs_root))(mp, a);
1005	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1006		KERNEL_UNLOCK_ONE(NULL);
1007	}
1008
1009	return error;
1010}
1011
1012int
1013VFS_QUOTACTL(struct mount *mp, struct quotactl_args *args)
1014{
1015	int error;
1016
1017	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1018		KERNEL_LOCK(1, NULL);
1019	}
1020	error = (*(mp->mnt_op->vfs_quotactl))(mp, args);
1021	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1022		KERNEL_UNLOCK_ONE(NULL);
1023	}
1024
1025	return error;
1026}
1027
1028int
1029VFS_STATVFS(struct mount *mp, struct statvfs *a)
1030{
1031	int error;
1032
1033	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1034		KERNEL_LOCK(1, NULL);
1035	}
1036	error = (*(mp->mnt_op->vfs_statvfs))(mp, a);
1037	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1038		KERNEL_UNLOCK_ONE(NULL);
1039	}
1040
1041	return error;
1042}
1043
1044int
1045VFS_SYNC(struct mount *mp, int a, struct kauth_cred *b)
1046{
1047	int error;
1048
1049	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1050		KERNEL_LOCK(1, NULL);
1051	}
1052	error = (*(mp->mnt_op->vfs_sync))(mp, a, b);
1053	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1054		KERNEL_UNLOCK_ONE(NULL);
1055	}
1056
1057	return error;
1058}
1059
1060int
1061VFS_FHTOVP(struct mount *mp, struct fid *a, struct vnode **b)
1062{
1063	int error;
1064
1065	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1066		KERNEL_LOCK(1, NULL);
1067	}
1068	error = (*(mp->mnt_op->vfs_fhtovp))(mp, a, b);
1069	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1070		KERNEL_UNLOCK_ONE(NULL);
1071	}
1072
1073	return error;
1074}
1075
1076int
1077VFS_VPTOFH(struct vnode *vp, struct fid *a, size_t *b)
1078{
1079	int error;
1080
1081	if ((vp->v_vflag & VV_MPSAFE) == 0) {
1082		KERNEL_LOCK(1, NULL);
1083	}
1084	error = (*(vp->v_mount->mnt_op->vfs_vptofh))(vp, a, b);
1085	if ((vp->v_vflag & VV_MPSAFE) == 0) {
1086		KERNEL_UNLOCK_ONE(NULL);
1087	}
1088
1089	return error;
1090}
1091
1092int
1093VFS_SNAPSHOT(struct mount *mp, struct vnode *a, struct timespec *b)
1094{
1095	int error;
1096
1097	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1098		KERNEL_LOCK(1, NULL);
1099	}
1100	error = (*(mp->mnt_op->vfs_snapshot))(mp, a, b);
1101	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1102		KERNEL_UNLOCK_ONE(NULL);
1103	}
1104
1105	return error;
1106}
1107
1108int
1109VFS_EXTATTRCTL(struct mount *mp, int a, struct vnode *b, int c, const char *d)
1110{
1111	int error;
1112
1113	KERNEL_LOCK(1, NULL);		/* XXXSMP check ffs */
1114	error = (*(mp->mnt_op->vfs_extattrctl))(mp, a, b, c, d);
1115	KERNEL_UNLOCK_ONE(NULL);	/* XXX */
1116
1117	return error;
1118}
1119
1120int
1121VFS_SUSPENDCTL(struct mount *mp, int a)
1122{
1123	int error;
1124
1125	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1126		KERNEL_LOCK(1, NULL);
1127	}
1128	error = (*(mp->mnt_op->vfs_suspendctl))(mp, a);
1129	if ((mp->mnt_iflag & IMNT_MPSAFE) == 0) {
1130		KERNEL_UNLOCK_ONE(NULL);
1131	}
1132
1133	return error;
1134}
1135
1136#if defined(DDB) || defined(DEBUGPRINT)
1137static const char buf_flagbits[] = BUF_FLAGBITS;
1138
1139void
1140vfs_buf_print(struct buf *bp, int full, void (*pr)(const char *, ...))
1141{
1142	char bf[1024];
1143
1144	(*pr)("  vp %p lblkno 0x%"PRIx64" blkno 0x%"PRIx64" rawblkno 0x%"
1145	    PRIx64 " dev 0x%x\n",
1146	    bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_rawblkno, bp->b_dev);
1147
1148	snprintb(bf, sizeof(bf),
1149	    buf_flagbits, bp->b_flags | bp->b_oflags | bp->b_cflags);
1150	(*pr)("  error %d flags 0x%s\n", bp->b_error, bf);
1151
1152	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n",
1153		  bp->b_bufsize, bp->b_bcount, bp->b_resid);
1154	(*pr)("  data %p saveaddr %p\n",
1155		  bp->b_data, bp->b_saveaddr);
1156	(*pr)("  iodone %p objlock %p\n", bp->b_iodone, bp->b_objlock);
1157}
1158
1159void
1160vfs_vnode_print(struct vnode *vp, int full, void (*pr)(const char *, ...))
1161{
1162	char bf[256];
1163
1164	uvm_object_printit(&vp->v_uobj, full, pr);
1165	snprintb(bf, sizeof(bf),
1166	    vnode_flagbits, vp->v_iflag | vp->v_vflag | vp->v_uflag);
1167	(*pr)("\nVNODE flags %s\n", bf);
1168	(*pr)("mp %p numoutput %d size 0x%llx writesize 0x%llx\n",
1169	      vp->v_mount, vp->v_numoutput, vp->v_size, vp->v_writesize);
1170
1171	(*pr)("data %p writecount %ld holdcnt %ld\n",
1172	      vp->v_data, vp->v_writecount, vp->v_holdcnt);
1173
1174	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
1175	      ARRAY_PRINT(vp->v_tag, vnode_tags), vp->v_tag,
1176	      ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type,
1177	      vp->v_mount, vp->v_mountedhere);
1178
1179	(*pr)("v_lock %p\n", &vp->v_lock);
1180
1181	if (full) {
1182		struct buf *bp;
1183
1184		(*pr)("clean bufs:\n");
1185		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
1186			(*pr)(" bp %p\n", bp);
1187			vfs_buf_print(bp, full, pr);
1188		}
1189
1190		(*pr)("dirty bufs:\n");
1191		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
1192			(*pr)(" bp %p\n", bp);
1193			vfs_buf_print(bp, full, pr);
1194		}
1195	}
1196}
1197
1198void
1199vfs_mount_print(struct mount *mp, int full, void (*pr)(const char *, ...))
1200{
1201	char sbuf[256];
1202
1203	(*pr)("vnodecovered = %p syncer = %p data = %p\n",
1204			mp->mnt_vnodecovered,mp->mnt_syncer,mp->mnt_data);
1205
1206	(*pr)("fs_bshift %d dev_bshift = %d\n",
1207			mp->mnt_fs_bshift,mp->mnt_dev_bshift);
1208
1209	snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_flag);
1210	(*pr)("flag = %s\n", sbuf);
1211
1212	snprintb(sbuf, sizeof(sbuf), __IMNT_FLAG_BITS, mp->mnt_iflag);
1213	(*pr)("iflag = %s\n", sbuf);
1214
1215	(*pr)("refcnt = %d unmounting @ %p updating @ %p\n", mp->mnt_refcnt,
1216	    &mp->mnt_unmounting, &mp->mnt_updating);
1217
1218	(*pr)("statvfs cache:\n");
1219	(*pr)("\tbsize = %lu\n",mp->mnt_stat.f_bsize);
1220	(*pr)("\tfrsize = %lu\n",mp->mnt_stat.f_frsize);
1221	(*pr)("\tiosize = %lu\n",mp->mnt_stat.f_iosize);
1222
1223	(*pr)("\tblocks = %"PRIu64"\n",mp->mnt_stat.f_blocks);
1224	(*pr)("\tbfree = %"PRIu64"\n",mp->mnt_stat.f_bfree);
1225	(*pr)("\tbavail = %"PRIu64"\n",mp->mnt_stat.f_bavail);
1226	(*pr)("\tbresvd = %"PRIu64"\n",mp->mnt_stat.f_bresvd);
1227
1228	(*pr)("\tfiles = %"PRIu64"\n",mp->mnt_stat.f_files);
1229	(*pr)("\tffree = %"PRIu64"\n",mp->mnt_stat.f_ffree);
1230	(*pr)("\tfavail = %"PRIu64"\n",mp->mnt_stat.f_favail);
1231	(*pr)("\tfresvd = %"PRIu64"\n",mp->mnt_stat.f_fresvd);
1232
1233	(*pr)("\tf_fsidx = { 0x%"PRIx32", 0x%"PRIx32" }\n",
1234			mp->mnt_stat.f_fsidx.__fsid_val[0],
1235			mp->mnt_stat.f_fsidx.__fsid_val[1]);
1236
1237	(*pr)("\towner = %"PRIu32"\n",mp->mnt_stat.f_owner);
1238	(*pr)("\tnamemax = %lu\n",mp->mnt_stat.f_namemax);
1239
1240	snprintb(sbuf, sizeof(sbuf), __MNT_FLAG_BITS, mp->mnt_stat.f_flag);
1241
1242	(*pr)("\tflag = %s\n",sbuf);
1243	(*pr)("\tsyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_syncwrites);
1244	(*pr)("\tasyncwrites = %" PRIu64 "\n",mp->mnt_stat.f_asyncwrites);
1245	(*pr)("\tsyncreads = %" PRIu64 "\n",mp->mnt_stat.f_syncreads);
1246	(*pr)("\tasyncreads = %" PRIu64 "\n",mp->mnt_stat.f_asyncreads);
1247	(*pr)("\tfstypename = %s\n",mp->mnt_stat.f_fstypename);
1248	(*pr)("\tmntonname = %s\n",mp->mnt_stat.f_mntonname);
1249	(*pr)("\tmntfromname = %s\n",mp->mnt_stat.f_mntfromname);
1250
1251	{
1252		int cnt = 0;
1253		struct vnode *vp;
1254		(*pr)("locked vnodes =");
1255		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1256			if (VOP_ISLOCKED(vp)) {
1257				if ((++cnt % 6) == 0) {
1258					(*pr)(" %p,\n\t", vp);
1259				} else {
1260					(*pr)(" %p,", vp);
1261				}
1262			}
1263		}
1264		(*pr)("\n");
1265	}
1266
1267	if (full) {
1268		int cnt = 0;
1269		struct vnode *vp;
1270		(*pr)("all vnodes =");
1271		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1272			if (!TAILQ_NEXT(vp, v_mntvnodes)) {
1273				(*pr)(" %p", vp);
1274			} else if ((++cnt % 6) == 0) {
1275				(*pr)(" %p,\n\t", vp);
1276			} else {
1277				(*pr)(" %p,", vp);
1278			}
1279		}
1280		(*pr)("\n", vp);
1281	}
1282}
1283
1284/*
1285 * List all of the locked vnodes in the system.
1286 */
1287void printlockedvnodes(void);
1288
1289void
1290printlockedvnodes(void)
1291{
1292	struct mount *mp, *nmp;
1293	struct vnode *vp;
1294
1295	printf("Locked vnodes\n");
1296	mutex_enter(&mountlist_lock);
1297	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
1298	     mp = nmp) {
1299		if (vfs_busy(mp, &nmp)) {
1300			continue;
1301		}
1302		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1303			if (VOP_ISLOCKED(vp))
1304				vprint(NULL, vp);
1305		}
1306		mutex_enter(&mountlist_lock);
1307		vfs_unbusy(mp, false, &nmp);
1308	}
1309	mutex_exit(&mountlist_lock);
1310}
1311
1312#endif /* DDB || DEBUGPRINT */
1313