vfs_subr.c revision 1.263
1/*	$OpenBSD: vfs_subr.c,v 1.263 2017/12/11 14:11:22 bluhm Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/sysctl.h>
48#include <sys/mount.h>
49#include <sys/time.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/conf.h>
53#include <sys/vnode.h>
54#include <sys/lock.h>
55#include <sys/stat.h>
56#include <sys/acct.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/syscallargs.h>
64#include <sys/pool.h>
65#include <sys/tree.h>
66#include <sys/specdev.h>
67
68#include <netinet/in.h>
69
70#include <uvm/uvm_extern.h>
71#include <uvm/uvm_vnode.h>
72
73#include "softraid.h"
74
75void sr_shutdown(void);
76
77enum vtype iftovt_tab[16] = {
78	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80};
81
82int	vttoif_tab[9] = {
83	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84	S_IFSOCK, S_IFIFO, S_IFMT,
85};
86
87int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89
90/*
91 * Insq/Remq for the vnode usage lists.
92 */
93#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94#define	bufremvn(bp) {							\
95	LIST_REMOVE(bp, b_vnbufs);					\
96	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97}
98
99struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100struct freelst vnode_free_list;	/* vnode free list */
101
102struct mntlist mountlist;	/* mounted filesystem list */
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *, u_int);
112void vfs_free_addrlist(struct netexport *);
113void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116int maxvnodes;
117
118#ifdef DEBUG
119void printlockedvnodes(void);
120#endif
121
122struct pool vnode_pool;
123struct pool uvm_vnode_pool;
124
125static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
126RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
127
128static inline int
129rb_buf_compare(const struct buf *b1, const struct buf *b2)
130{
131	if (b1->b_lblkno < b2->b_lblkno)
132		return(-1);
133	if (b1->b_lblkno > b2->b_lblkno)
134		return(1);
135	return(0);
136}
137
138/*
139 * Initialize the vnode management data structures.
140 */
141void
142vntblinit(void)
143{
144	/* buffer cache may need a vnode for each buffer */
145	maxvnodes = 2 * initialvnodes;
146	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
147	    PR_WAITOK, "vnodes", NULL);
148	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
149	    PR_WAITOK, "uvmvnodes", NULL);
150	TAILQ_INIT(&vnode_hold_list);
151	TAILQ_INIT(&vnode_free_list);
152	TAILQ_INIT(&mountlist);
153	/*
154	 * Initialize the filesystem syncer.
155	 */
156	vn_initialize_syncerd();
157
158#ifdef NFSSERVER
159	rn_init(sizeof(struct sockaddr_in));
160#endif /* NFSSERVER */
161}
162
163/*
164 * Mark a mount point as busy. Used to synchronize access and to delay
165 * unmounting.
166 *
167 * Default behaviour is to attempt getting a READ lock and in case of an
168 * ongoing unmount, to wait for it to finish and then return failure.
169 */
170int
171vfs_busy(struct mount *mp, int flags)
172{
173	int rwflags = 0;
174
175	/* new mountpoints need their lock initialised */
176	if (mp->mnt_lock.rwl_name == NULL)
177		rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
178
179	if (flags & VB_WRITE)
180		rwflags |= RW_WRITE;
181	else
182		rwflags |= RW_READ;
183
184	if (flags & VB_WAIT)
185		rwflags |= RW_SLEEPFAIL;
186	else
187		rwflags |= RW_NOSLEEP;
188
189	if (rw_enter(&mp->mnt_lock, rwflags))
190		return (EBUSY);
191
192	return (0);
193}
194
195/*
196 * Free a busy file system
197 */
198void
199vfs_unbusy(struct mount *mp)
200{
201	rw_exit(&mp->mnt_lock);
202}
203
204int
205vfs_isbusy(struct mount *mp)
206{
207	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
208		return (1);
209	else
210		return (0);
211}
212
213/*
214 * Lookup a filesystem type, and if found allocate and initialize
215 * a mount structure for it.
216 *
217 * Devname is usually updated by mount(8) after booting.
218 */
219int
220vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
221{
222	struct vfsconf *vfsp;
223	struct mount *mp;
224
225	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
226		if (!strcmp(vfsp->vfc_name, fstypename))
227			break;
228	if (vfsp == NULL)
229		return (ENODEV);
230	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
231	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
232	LIST_INIT(&mp->mnt_vnodelist);
233	mp->mnt_vfc = vfsp;
234	mp->mnt_op = vfsp->vfc_vfsops;
235	mp->mnt_flag = MNT_RDONLY;
236	mp->mnt_vnodecovered = NULLVP;
237	vfsp->vfc_refcount++;
238	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
239	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
240	mp->mnt_stat.f_mntonname[0] = '/';
241	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
242	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
243	*mpp = mp;
244 	return (0);
245 }
246
247/*
248 * Lookup a mount point by filesystem identifier.
249 */
250struct mount *
251vfs_getvfs(fsid_t *fsid)
252{
253	struct mount *mp;
254
255	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
256		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
257		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
258			return (mp);
259		}
260	}
261
262	return (NULL);
263}
264
265
266/*
267 * Get a new unique fsid
268 */
269void
270vfs_getnewfsid(struct mount *mp)
271{
272	static u_short xxxfs_mntid;
273
274	fsid_t tfsid;
275	int mtype;
276
277	mtype = mp->mnt_vfc->vfc_typenum;
278	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
279	mp->mnt_stat.f_fsid.val[1] = mtype;
280	if (xxxfs_mntid == 0)
281		++xxxfs_mntid;
282	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
283	tfsid.val[1] = mtype;
284	if (!TAILQ_EMPTY(&mountlist)) {
285		while (vfs_getvfs(&tfsid)) {
286			tfsid.val[0]++;
287			xxxfs_mntid++;
288		}
289	}
290	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
291}
292
293/*
294 * Set vnode attributes to VNOVAL
295 */
296void
297vattr_null(struct vattr *vap)
298{
299
300	vap->va_type = VNON;
301	/*
302	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
303	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
304	 * the compiler do its job.
305	 */
306	vap->va_mode = VNOVAL;
307	vap->va_nlink = VNOVAL;
308	vap->va_uid = VNOVAL;
309	vap->va_gid = VNOVAL;
310	vap->va_fsid = VNOVAL;
311	vap->va_fileid = VNOVAL;
312	vap->va_size = VNOVAL;
313	vap->va_blocksize = VNOVAL;
314	vap->va_atime.tv_sec = VNOVAL;
315	vap->va_atime.tv_nsec = VNOVAL;
316	vap->va_mtime.tv_sec = VNOVAL;
317	vap->va_mtime.tv_nsec = VNOVAL;
318	vap->va_ctime.tv_sec = VNOVAL;
319	vap->va_ctime.tv_nsec = VNOVAL;
320	vap->va_gen = VNOVAL;
321	vap->va_flags = VNOVAL;
322	vap->va_rdev = VNOVAL;
323	vap->va_bytes = VNOVAL;
324	vap->va_filerev = VNOVAL;
325	vap->va_vaflags = 0;
326}
327
328/*
329 * Routines having to do with the management of the vnode table.
330 */
331long numvnodes;
332
333/*
334 * Return the next vnode from the free list.
335 */
336int
337getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
338    struct vnode **vpp)
339{
340	struct proc *p = curproc;
341	struct freelst *listhd;
342	static int toggle;
343	struct vnode *vp;
344	int s;
345
346	/*
347	 * allow maxvnodes to increase if the buffer cache itself
348	 * is big enough to justify it. (we don't shrink it ever)
349	 */
350	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
351	    : maxvnodes;
352
353	/*
354	 * We must choose whether to allocate a new vnode or recycle an
355	 * existing one. The criterion for allocating a new one is that
356	 * the total number of vnodes is less than the number desired or
357	 * there are no vnodes on either free list. Generally we only
358	 * want to recycle vnodes that have no buffers associated with
359	 * them, so we look first on the vnode_free_list. If it is empty,
360	 * we next consider vnodes with referencing buffers on the
361	 * vnode_hold_list. The toggle ensures that half the time we
362	 * will use a buffer from the vnode_hold_list, and half the time
363	 * we will allocate a new one unless the list has grown to twice
364	 * the desired size. We are reticent to recycle vnodes from the
365	 * vnode_hold_list because we will lose the identity of all its
366	 * referencing buffers.
367	 */
368	toggle ^= 1;
369	if (numvnodes / 2 > maxvnodes)
370		toggle = 0;
371
372	s = splbio();
373	if ((numvnodes < maxvnodes) ||
374	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
375	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
376		splx(s);
377		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
378		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
379		vp->v_uvm->u_vnode = vp;
380		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
381		cache_tree_init(&vp->v_nc_tree);
382		TAILQ_INIT(&vp->v_cache_dst);
383		numvnodes++;
384	} else {
385		TAILQ_FOREACH(vp, listhd, v_freelist) {
386			if (VOP_ISLOCKED(vp) == 0)
387				break;
388		}
389		/*
390		 * Unless this is a bad time of the month, at most
391		 * the first NCPUS items on the free list are
392		 * locked, so this is close enough to being empty.
393		 */
394		if (vp == NULL) {
395			splx(s);
396			tablefull("vnode");
397			*vpp = 0;
398			return (ENFILE);
399		}
400
401#ifdef DIAGNOSTIC
402		if (vp->v_usecount) {
403			vprint("free vnode", vp);
404			panic("free vnode isn't");
405		}
406#endif
407
408		TAILQ_REMOVE(listhd, vp, v_freelist);
409		vp->v_bioflag &= ~VBIOONFREELIST;
410		splx(s);
411
412		if (vp->v_type != VBAD)
413			vgonel(vp, p);
414#ifdef DIAGNOSTIC
415		if (vp->v_data) {
416			vprint("cleaned vnode", vp);
417			panic("cleaned vnode isn't");
418		}
419		s = splbio();
420		if (vp->v_numoutput)
421			panic("Clean vnode has pending I/O's");
422		splx(s);
423#endif
424		vp->v_flag = 0;
425		vp->v_socket = 0;
426	}
427	cache_purge(vp);
428	vp->v_type = VNON;
429	vp->v_tag = tag;
430	vp->v_op = vops;
431	insmntque(vp, mp);
432	*vpp = vp;
433	vp->v_usecount = 1;
434	vp->v_data = 0;
435	return (0);
436}
437
438/*
439 * Move a vnode from one mount queue to another.
440 */
441void
442insmntque(struct vnode *vp, struct mount *mp)
443{
444	/*
445	 * Delete from old mount point vnode list, if on one.
446	 */
447	if (vp->v_mount != NULL)
448		LIST_REMOVE(vp, v_mntvnodes);
449	/*
450	 * Insert into list of vnodes for the new mount point, if available.
451	 */
452	if ((vp->v_mount = mp) != NULL)
453		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
454}
455
456/*
457 * Create a vnode for a block device.
458 * Used for root filesystem, argdev, and swap areas.
459 * Also used for memory file system special devices.
460 */
461int
462bdevvp(dev_t dev, struct vnode **vpp)
463{
464	return (getdevvp(dev, vpp, VBLK));
465}
466
467/*
468 * Create a vnode for a character device.
469 * Used for console handling.
470 */
471int
472cdevvp(dev_t dev, struct vnode **vpp)
473{
474	return (getdevvp(dev, vpp, VCHR));
475}
476
477/*
478 * Create a vnode for a device.
479 * Used by bdevvp (block device) for root file system etc.,
480 * and by cdevvp (character device) for console.
481 */
482int
483getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
484{
485	struct vnode *vp;
486	struct vnode *nvp;
487	int error;
488
489	if (dev == NODEV) {
490		*vpp = NULLVP;
491		return (0);
492	}
493	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
494	if (error) {
495		*vpp = NULLVP;
496		return (error);
497	}
498	vp = nvp;
499	vp->v_type = type;
500	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
501		vput(vp);
502		vp = nvp;
503	}
504	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
505		vp->v_flag |= VISTTY;
506	*vpp = vp;
507	return (0);
508}
509
510/*
511 * Check to see if the new vnode represents a special device
512 * for which we already have a vnode (either because of
513 * bdevvp() or because of a different vnode representing
514 * the same block device). If such an alias exists, deallocate
515 * the existing contents and return the aliased vnode. The
516 * caller is responsible for filling it with its new contents.
517 */
518struct vnode *
519checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
520{
521	struct proc *p = curproc;
522	struct vnode *vp;
523	struct vnode **vpp;
524
525	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
526		return (NULLVP);
527
528	vpp = &speclisth[SPECHASH(nvp_rdev)];
529loop:
530	for (vp = *vpp; vp; vp = vp->v_specnext) {
531		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
532			continue;
533		}
534		/*
535		 * Alias, but not in use, so flush it out.
536		 */
537		if (vp->v_usecount == 0) {
538			vgonel(vp, p);
539			goto loop;
540		}
541		if (vget(vp, LK_EXCLUSIVE, p)) {
542			goto loop;
543		}
544		break;
545	}
546
547	/*
548	 * Common case is actually in the if statement
549	 */
550	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
551		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
552			M_WAITOK);
553		nvp->v_rdev = nvp_rdev;
554		nvp->v_hashchain = vpp;
555		nvp->v_specnext = *vpp;
556		nvp->v_specmountpoint = NULL;
557		nvp->v_speclockf = NULL;
558		nvp->v_specbitmap = NULL;
559		if (nvp->v_type == VCHR &&
560		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
561		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
562			if (vp != NULLVP)
563				nvp->v_specbitmap = vp->v_specbitmap;
564			else
565				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
566				    M_VNODE, M_WAITOK | M_ZERO);
567		}
568		*vpp = nvp;
569		if (vp != NULLVP) {
570			nvp->v_flag |= VALIASED;
571			vp->v_flag |= VALIASED;
572			vput(vp);
573		}
574		return (NULLVP);
575	}
576
577	/*
578	 * This code is the uncommon case. It is called in case
579	 * we found an alias that was VT_NON && vtype of VBLK
580	 * This means we found a block device that was created
581	 * using bdevvp.
582	 * An example of such a vnode is the root partition device vnode
583	 * created in ffs_mountroot.
584	 *
585	 * The vnodes created by bdevvp should not be aliased (why?).
586	 */
587
588	VOP_UNLOCK(vp, p);
589	vclean(vp, 0, p);
590	vp->v_op = nvp->v_op;
591	vp->v_tag = nvp->v_tag;
592	nvp->v_type = VNON;
593	insmntque(vp, mp);
594	return (vp);
595}
596
597/*
598 * Grab a particular vnode from the free list, increment its
599 * reference count and lock it. If the vnode lock bit is set,
600 * the vnode is being eliminated in vgone. In that case, we
601 * cannot grab it, so the process is awakened when the
602 * transition is completed, and an error code is returned to
603 * indicate that the vnode is no longer usable, possibly
604 * having been changed to a new file system type.
605 */
606int
607vget(struct vnode *vp, int flags, struct proc *p)
608{
609	int error, s, onfreelist;
610
611	/*
612	 * If the vnode is in the process of being cleaned out for
613	 * another use, we wait for the cleaning to finish and then
614	 * return failure. Cleaning is determined by checking that
615	 * the VXLOCK flag is set.
616	 */
617
618	if (vp->v_flag & VXLOCK) {
619		if (flags & LK_NOWAIT) {
620			return (EBUSY);
621		}
622
623		vp->v_flag |= VXWANT;
624		tsleep(vp, PINOD, "vget", 0);
625		return (ENOENT);
626	}
627
628	onfreelist = vp->v_bioflag & VBIOONFREELIST;
629	if (vp->v_usecount == 0 && onfreelist) {
630		s = splbio();
631		if (vp->v_holdcnt > 0)
632			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
633		else
634			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
635		vp->v_bioflag &= ~VBIOONFREELIST;
636		splx(s);
637	}
638
639 	vp->v_usecount++;
640	if (flags & LK_TYPE_MASK) {
641		if ((error = vn_lock(vp, flags, p)) != 0) {
642			vp->v_usecount--;
643			if (vp->v_usecount == 0 && onfreelist)
644				vputonfreelist(vp);
645		}
646		return (error);
647	}
648
649	return (0);
650}
651
652
653/* Vnode reference. */
654void
655vref(struct vnode *vp)
656{
657#ifdef DIAGNOSTIC
658	if (vp->v_usecount == 0)
659		panic("vref used where vget required");
660	if (vp->v_type == VNON)
661		panic("vref on a VNON vnode");
662#endif
663	vp->v_usecount++;
664}
665
666void
667vputonfreelist(struct vnode *vp)
668{
669	int s;
670	struct freelst *lst;
671
672	s = splbio();
673#ifdef DIAGNOSTIC
674	if (vp->v_usecount != 0)
675		panic("Use count is not zero!");
676
677	if (vp->v_bioflag & VBIOONFREELIST) {
678		vprint("vnode already on free list: ", vp);
679		panic("vnode already on free list");
680	}
681#endif
682
683	vp->v_bioflag |= VBIOONFREELIST;
684
685	if (vp->v_holdcnt > 0)
686		lst = &vnode_hold_list;
687	else
688		lst = &vnode_free_list;
689
690	if (vp->v_type == VBAD)
691		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
692	else
693		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
694
695	splx(s);
696}
697
698/*
699 * vput(), just unlock and vrele()
700 */
701void
702vput(struct vnode *vp)
703{
704	struct proc *p = curproc;
705
706#ifdef DIAGNOSTIC
707	if (vp == NULL)
708		panic("vput: null vp");
709#endif
710
711#ifdef DIAGNOSTIC
712	if (vp->v_usecount == 0) {
713		vprint("vput: bad ref count", vp);
714		panic("vput: ref cnt");
715	}
716#endif
717	vp->v_usecount--;
718	if (vp->v_usecount > 0) {
719		VOP_UNLOCK(vp, p);
720		return;
721	}
722
723#ifdef DIAGNOSTIC
724	if (vp->v_writecount != 0) {
725		vprint("vput: bad writecount", vp);
726		panic("vput: v_writecount != 0");
727	}
728#endif
729
730	VOP_INACTIVE(vp, p);
731
732	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
733		vputonfreelist(vp);
734}
735
736/*
737 * Vnode release - use for active VNODES.
738 * If count drops to zero, call inactive routine and return to freelist.
739 * Returns 0 if it did not sleep.
740 */
741int
742vrele(struct vnode *vp)
743{
744	struct proc *p = curproc;
745
746#ifdef DIAGNOSTIC
747	if (vp == NULL)
748		panic("vrele: null vp");
749#endif
750#ifdef DIAGNOSTIC
751	if (vp->v_usecount == 0) {
752		vprint("vrele: bad ref count", vp);
753		panic("vrele: ref cnt");
754	}
755#endif
756	vp->v_usecount--;
757	if (vp->v_usecount > 0) {
758		return (0);
759	}
760
761#ifdef DIAGNOSTIC
762	if (vp->v_writecount != 0) {
763		vprint("vrele: bad writecount", vp);
764		panic("vrele: v_writecount != 0");
765	}
766#endif
767
768	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
769#ifdef DIAGNOSTIC
770		vprint("vrele: cannot lock", vp);
771#endif
772		return (1);
773	}
774
775	VOP_INACTIVE(vp, p);
776
777	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
778		vputonfreelist(vp);
779	return (1);
780}
781
782/* Page or buffer structure gets a reference. */
783void
784vhold(struct vnode *vp)
785{
786	/*
787	 * If it is on the freelist and the hold count is currently
788	 * zero, move it to the hold list.
789	 */
790	if ((vp->v_bioflag & VBIOONFREELIST) &&
791	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
792		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
793		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
794	}
795	vp->v_holdcnt++;
796}
797
798/* Lose interest in a vnode. */
799void
800vdrop(struct vnode *vp)
801{
802#ifdef DIAGNOSTIC
803	if (vp->v_holdcnt == 0)
804		panic("vdrop: zero holdcnt");
805#endif
806
807	vp->v_holdcnt--;
808
809	/*
810	 * If it is on the holdlist and the hold count drops to
811	 * zero, move it to the free list.
812	 */
813	if ((vp->v_bioflag & VBIOONFREELIST) &&
814	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
815		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
816		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
817	}
818}
819
820/*
821 * Remove any vnodes in the vnode table belonging to mount point mp.
822 *
823 * If MNT_NOFORCE is specified, there should not be any active ones,
824 * return error if any are found (nb: this is a user error, not a
825 * system error). If MNT_FORCE is specified, detach any active vnodes
826 * that are found.
827 */
828#ifdef DEBUG
829int busyprt = 0;	/* print out busy vnodes */
830struct ctldebug debug1 = { "busyprt", &busyprt };
831#endif
832
833int
834vfs_mount_foreach_vnode(struct mount *mp,
835    int (*func)(struct vnode *, void *), void *arg) {
836	struct vnode *vp, *nvp;
837	int error = 0;
838
839loop:
840	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
841		if (vp->v_mount != mp)
842			goto loop;
843
844		error = func(vp, arg);
845
846		if (error != 0)
847			break;
848	}
849
850	return (error);
851}
852
853struct vflush_args {
854	struct vnode *skipvp;
855	int busy;
856	int flags;
857};
858
859int
860vflush_vnode(struct vnode *vp, void *arg)
861{
862	struct vflush_args *va = arg;
863	struct proc *p = curproc;
864
865	if (vp == va->skipvp) {
866		return (0);
867	}
868
869	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
870		return (0);
871	}
872
873	/*
874	 * If WRITECLOSE is set, only flush out regular file
875	 * vnodes open for writing.
876	 */
877	if ((va->flags & WRITECLOSE) &&
878	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
879		return (0);
880	}
881
882	/*
883	 * With v_usecount == 0, all we need to do is clear
884	 * out the vnode data structures and we are done.
885	 */
886	if (vp->v_usecount == 0) {
887		vgonel(vp, p);
888		return (0);
889	}
890
891	/*
892	 * If FORCECLOSE is set, forcibly close the vnode.
893	 * For block or character devices, revert to an
894	 * anonymous device. For all other files, just kill them.
895	 */
896	if (va->flags & FORCECLOSE) {
897		if (vp->v_type != VBLK && vp->v_type != VCHR) {
898			vgonel(vp, p);
899		} else {
900			vclean(vp, 0, p);
901			vp->v_op = &spec_vops;
902			insmntque(vp, NULL);
903		}
904		return (0);
905	}
906
907	if (va->flags & WRITEDEMOTE) {
908		vp->v_op = &dead_vops;
909		vp->v_tag = VT_NON;
910		return (0);
911	}
912
913#ifdef DEBUG
914	if (busyprt)
915		vprint("vflush: busy vnode", vp);
916#endif
917	va->busy++;
918	return (0);
919}
920
921int
922vflush(struct mount *mp, struct vnode *skipvp, int flags)
923{
924	struct vflush_args va;
925	va.skipvp = skipvp;
926	va.busy = 0;
927	va.flags = flags;
928
929	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
930
931	if (va.busy)
932		return (EBUSY);
933	return (0);
934}
935
936/*
937 * Disassociate the underlying file system from a vnode.
938 */
939void
940vclean(struct vnode *vp, int flags, struct proc *p)
941{
942	int active;
943
944	/*
945	 * Check to see if the vnode is in use.
946	 * If so we have to reference it before we clean it out
947	 * so that its count cannot fall to zero and generate a
948	 * race against ourselves to recycle it.
949	 */
950	if ((active = vp->v_usecount) != 0)
951		vp->v_usecount++;
952
953	/*
954	 * Prevent the vnode from being recycled or
955	 * brought into use while we clean it out.
956	 */
957	if (vp->v_flag & VXLOCK)
958		panic("vclean: deadlock");
959	vp->v_flag |= VXLOCK;
960	/*
961	 * Even if the count is zero, the VOP_INACTIVE routine may still
962	 * have the object locked while it cleans it out. The VOP_LOCK
963	 * ensures that the VOP_INACTIVE routine is done with its work.
964	 * For active vnodes, it ensures that no other activity can
965	 * occur while the underlying object is being cleaned out.
966	 */
967	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE, p);
968
969	/*
970	 * Clean out any VM data associated with the vnode.
971	 */
972	uvm_vnp_terminate(vp);
973	/*
974	 * Clean out any buffers associated with the vnode.
975	 */
976	if (flags & DOCLOSE)
977		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
978	/*
979	 * If purging an active vnode, it must be closed and
980	 * deactivated before being reclaimed. Note that the
981	 * VOP_INACTIVE will unlock the vnode
982	 */
983	if (active) {
984		if (flags & DOCLOSE)
985			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
986		VOP_INACTIVE(vp, p);
987	} else {
988		/*
989		 * Any other processes trying to obtain this lock must first
990		 * wait for VXLOCK to clear, then call the new lock operation.
991		 */
992		VOP_UNLOCK(vp, p);
993	}
994
995	/*
996	 * Reclaim the vnode.
997	 */
998	if (VOP_RECLAIM(vp, p))
999		panic("vclean: cannot reclaim");
1000	if (active) {
1001		vp->v_usecount--;
1002		if (vp->v_usecount == 0) {
1003			if (vp->v_holdcnt > 0)
1004				panic("vclean: not clean");
1005			vputonfreelist(vp);
1006		}
1007	}
1008	cache_purge(vp);
1009
1010	/*
1011	 * Done with purge, notify sleepers of the grim news.
1012	 */
1013	vp->v_op = &dead_vops;
1014	VN_KNOTE(vp, NOTE_REVOKE);
1015	vp->v_tag = VT_NON;
1016	vp->v_flag &= ~VXLOCK;
1017#ifdef VFSLCKDEBUG
1018	vp->v_flag &= ~VLOCKSWORK;
1019#endif
1020	if (vp->v_flag & VXWANT) {
1021		vp->v_flag &= ~VXWANT;
1022		wakeup(vp);
1023	}
1024}
1025
1026/*
1027 * Recycle an unused vnode to the front of the free list.
1028 */
1029int
1030vrecycle(struct vnode *vp, struct proc *p)
1031{
1032	if (vp->v_usecount == 0) {
1033		vgonel(vp, p);
1034		return (1);
1035	}
1036	return (0);
1037}
1038
1039/*
1040 * Eliminate all activity associated with a vnode
1041 * in preparation for reuse.
1042 */
1043void
1044vgone(struct vnode *vp)
1045{
1046	struct proc *p = curproc;
1047	vgonel(vp, p);
1048}
1049
1050/*
1051 * vgone, with struct proc.
1052 */
1053void
1054vgonel(struct vnode *vp, struct proc *p)
1055{
1056	struct vnode *vq;
1057	struct vnode *vx;
1058
1059	/*
1060	 * If a vgone (or vclean) is already in progress,
1061	 * wait until it is done and return.
1062	 */
1063	if (vp->v_flag & VXLOCK) {
1064		vp->v_flag |= VXWANT;
1065		tsleep(vp, PINOD, "vgone", 0);
1066		return;
1067	}
1068
1069	/*
1070	 * Clean out the filesystem specific data.
1071	 */
1072	vclean(vp, DOCLOSE, p);
1073	/*
1074	 * Delete from old mount point vnode list, if on one.
1075	 */
1076	if (vp->v_mount != NULL)
1077		insmntque(vp, NULL);
1078	/*
1079	 * If special device, remove it from special device alias list
1080	 * if it is on one.
1081	 */
1082	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1083		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1084		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1085		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1086			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1087		}
1088		if (*vp->v_hashchain == vp) {
1089			*vp->v_hashchain = vp->v_specnext;
1090		} else {
1091			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1092				if (vq->v_specnext != vp)
1093					continue;
1094				vq->v_specnext = vp->v_specnext;
1095				break;
1096			}
1097			if (vq == NULL)
1098				panic("missing bdev");
1099		}
1100		if (vp->v_flag & VALIASED) {
1101			vx = NULL;
1102			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1103				if (vq->v_rdev != vp->v_rdev ||
1104				    vq->v_type != vp->v_type)
1105					continue;
1106				if (vx)
1107					break;
1108				vx = vq;
1109			}
1110			if (vx == NULL)
1111				panic("missing alias");
1112			if (vq == NULL)
1113				vx->v_flag &= ~VALIASED;
1114			vp->v_flag &= ~VALIASED;
1115		}
1116		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1117		vp->v_specinfo = NULL;
1118	}
1119	/*
1120	 * If it is on the freelist and not already at the head,
1121	 * move it to the head of the list.
1122	 */
1123	vp->v_type = VBAD;
1124
1125	/*
1126	 * Move onto the free list, unless we were called from
1127	 * getnewvnode and we're not on any free list
1128	 */
1129	if (vp->v_usecount == 0 &&
1130	    (vp->v_bioflag & VBIOONFREELIST)) {
1131		int s;
1132
1133		s = splbio();
1134
1135		if (vp->v_holdcnt > 0)
1136			panic("vgonel: not clean");
1137
1138		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1139			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1140			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1141		}
1142		splx(s);
1143	}
1144}
1145
1146/*
1147 * Lookup a vnode by device number.
1148 */
1149int
1150vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1151{
1152	struct vnode *vp;
1153	int rc =0;
1154
1155	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1156		if (dev != vp->v_rdev || type != vp->v_type)
1157			continue;
1158		*vpp = vp;
1159		rc = 1;
1160		break;
1161	}
1162	return (rc);
1163}
1164
1165/*
1166 * Revoke all the vnodes corresponding to the specified minor number
1167 * range (endpoints inclusive) of the specified major.
1168 */
1169void
1170vdevgone(int maj, int minl, int minh, enum vtype type)
1171{
1172	struct vnode *vp;
1173	int mn;
1174
1175	for (mn = minl; mn <= minh; mn++)
1176		if (vfinddev(makedev(maj, mn), type, &vp))
1177			VOP_REVOKE(vp, REVOKEALL);
1178}
1179
1180/*
1181 * Calculate the total number of references to a special device.
1182 */
1183int
1184vcount(struct vnode *vp)
1185{
1186	struct vnode *vq, *vnext;
1187	int count;
1188
1189loop:
1190	if ((vp->v_flag & VALIASED) == 0)
1191		return (vp->v_usecount);
1192	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1193		vnext = vq->v_specnext;
1194		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1195			continue;
1196		/*
1197		 * Alias, but not in use, so flush it out.
1198		 */
1199		if (vq->v_usecount == 0 && vq != vp) {
1200			vgone(vq);
1201			goto loop;
1202		}
1203		count += vq->v_usecount;
1204	}
1205	return (count);
1206}
1207
1208#if defined(DEBUG) || defined(DIAGNOSTIC)
1209/*
1210 * Print out a description of a vnode.
1211 */
1212static char *typename[] =
1213   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1214
1215void
1216vprint(char *label, struct vnode *vp)
1217{
1218	char buf[64];
1219
1220	if (label != NULL)
1221		printf("%s: ", label);
1222	printf("%p, type %s, use %u, write %u, hold %u,",
1223		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1224		vp->v_holdcnt);
1225	buf[0] = '\0';
1226	if (vp->v_flag & VROOT)
1227		strlcat(buf, "|VROOT", sizeof buf);
1228	if (vp->v_flag & VTEXT)
1229		strlcat(buf, "|VTEXT", sizeof buf);
1230	if (vp->v_flag & VSYSTEM)
1231		strlcat(buf, "|VSYSTEM", sizeof buf);
1232	if (vp->v_flag & VXLOCK)
1233		strlcat(buf, "|VXLOCK", sizeof buf);
1234	if (vp->v_flag & VXWANT)
1235		strlcat(buf, "|VXWANT", sizeof buf);
1236	if (vp->v_bioflag & VBIOWAIT)
1237		strlcat(buf, "|VBIOWAIT", sizeof buf);
1238	if (vp->v_bioflag & VBIOONFREELIST)
1239		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1240	if (vp->v_bioflag & VBIOONSYNCLIST)
1241		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1242	if (vp->v_flag & VALIASED)
1243		strlcat(buf, "|VALIASED", sizeof buf);
1244	if (buf[0] != '\0')
1245		printf(" flags (%s)", &buf[1]);
1246	if (vp->v_data == NULL) {
1247		printf("\n");
1248	} else {
1249		printf("\n\t");
1250		VOP_PRINT(vp);
1251	}
1252}
1253#endif /* DEBUG || DIAGNOSTIC */
1254
1255#ifdef DEBUG
1256/*
1257 * List all of the locked vnodes in the system.
1258 * Called when debugging the kernel.
1259 */
1260void
1261printlockedvnodes(void)
1262{
1263	struct mount *mp;
1264	struct vnode *vp;
1265
1266	printf("Locked vnodes\n");
1267
1268	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1269		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1270			continue;
1271		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1272			if (VOP_ISLOCKED(vp))
1273				vprint(NULL, vp);
1274		}
1275		vfs_unbusy(mp);
1276 	}
1277
1278}
1279#endif
1280
1281/*
1282 * Top level filesystem related information gathering.
1283 */
1284int
1285vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1286    size_t newlen, struct proc *p)
1287{
1288	struct vfsconf *vfsp, *tmpvfsp;
1289	int ret;
1290
1291	/* all sysctl names at this level are at least name and field */
1292	if (namelen < 2)
1293		return (ENOTDIR);		/* overloaded */
1294
1295	if (name[0] != VFS_GENERIC) {
1296		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1297			if (vfsp->vfc_typenum == name[0])
1298				break;
1299
1300		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1301			return (EOPNOTSUPP);
1302
1303		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1304		    oldp, oldlenp, newp, newlen, p));
1305	}
1306
1307	switch (name[1]) {
1308	case VFS_MAXTYPENUM:
1309		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1310
1311	case VFS_CONF:
1312		if (namelen < 3)
1313			return (ENOTDIR);	/* overloaded */
1314
1315		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1316			if (vfsp->vfc_typenum == name[2])
1317				break;
1318
1319		if (vfsp == NULL)
1320			return (EOPNOTSUPP);
1321
1322		/* Make a copy, clear out kernel pointers */
1323		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1324		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1325		tmpvfsp->vfc_vfsops = NULL;
1326		tmpvfsp->vfc_next = NULL;
1327
1328		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1329		    sizeof(struct vfsconf));
1330
1331		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1332		return (ret);
1333	case VFS_BCACHESTAT:	/* buffer cache statistics */
1334		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1335		    sizeof(struct bcachestats));
1336		return(ret);
1337	}
1338	return (EOPNOTSUPP);
1339}
1340
1341/*
1342 * Check to see if a filesystem is mounted on a block device.
1343 */
1344int
1345vfs_mountedon(struct vnode *vp)
1346{
1347	struct vnode *vq;
1348	int error = 0;
1349
1350 	if (vp->v_specmountpoint != NULL)
1351		return (EBUSY);
1352	if (vp->v_flag & VALIASED) {
1353		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1354			if (vq->v_rdev != vp->v_rdev ||
1355			    vq->v_type != vp->v_type)
1356				continue;
1357			if (vq->v_specmountpoint != NULL) {
1358				error = EBUSY;
1359				break;
1360			}
1361 		}
1362	}
1363	return (error);
1364}
1365
1366#ifdef NFSSERVER
1367/*
1368 * Build hash lists of net addresses and hang them off the mount point.
1369 * Called by vfs_export() to set up the lists of export addresses.
1370 */
1371int
1372vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1373    struct export_args *argp)
1374{
1375	struct netcred *np;
1376	struct radix_node_head *rnh;
1377	int nplen, i;
1378	struct radix_node *rn;
1379	struct sockaddr *saddr, *smask = 0;
1380	int error;
1381
1382	if (argp->ex_addrlen == 0) {
1383		if (mp->mnt_flag & MNT_DEFEXPORTED)
1384			return (EPERM);
1385		np = &nep->ne_defexported;
1386		/* fill in the kernel's ucred from userspace's xucred */
1387		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1388			return (error);
1389		mp->mnt_flag |= MNT_DEFEXPORTED;
1390		goto finish;
1391	}
1392	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1393	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1394		return (EINVAL);
1395	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1396	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1397	saddr = (struct sockaddr *)(np + 1);
1398	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1399	if (error)
1400		goto out;
1401	if (saddr->sa_len > argp->ex_addrlen)
1402		saddr->sa_len = argp->ex_addrlen;
1403	if (argp->ex_masklen) {
1404		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1405		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1406		if (error)
1407			goto out;
1408		if (smask->sa_len > argp->ex_masklen)
1409			smask->sa_len = argp->ex_masklen;
1410	}
1411	/* fill in the kernel's ucred from userspace's xucred */
1412	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1413		goto out;
1414	i = saddr->sa_family;
1415	switch (i) {
1416	case AF_INET:
1417		if ((rnh = nep->ne_rtable_inet) == NULL) {
1418			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1419			    offsetof(struct sockaddr_in, sin_addr))) {
1420				error = ENOBUFS;
1421				goto out;
1422			}
1423			rnh = nep->ne_rtable_inet;
1424		}
1425		break;
1426	default:
1427		error = EINVAL;
1428		goto out;
1429	}
1430	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1431	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1432		error = EPERM;
1433		goto out;
1434	}
1435finish:
1436	np->netc_exflags = argp->ex_flags;
1437	return (0);
1438out:
1439	free(np, M_NETADDR, nplen);
1440	return (error);
1441}
1442
1443int
1444vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1445{
1446	struct radix_node_head *rnh = (struct radix_node_head *)w;
1447
1448	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1449	free(rn, M_NETADDR, 0);
1450	return (0);
1451}
1452
1453/*
1454 * Free the net address hash lists that are hanging off the mount points.
1455 */
1456void
1457vfs_free_addrlist(struct netexport *nep)
1458{
1459	struct radix_node_head *rnh;
1460
1461	if ((rnh = nep->ne_rtable_inet) != NULL) {
1462		rn_walktree(rnh, vfs_free_netcred, rnh);
1463		free(rnh, M_RTABLE, 0);
1464		nep->ne_rtable_inet = NULL;
1465	}
1466}
1467#endif /* NFSSERVER */
1468
1469int
1470vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1471{
1472#ifdef NFSSERVER
1473	int error;
1474
1475	if (argp->ex_flags & MNT_DELEXPORT) {
1476		vfs_free_addrlist(nep);
1477		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1478	}
1479	if (argp->ex_flags & MNT_EXPORTED) {
1480		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1481			return (error);
1482		mp->mnt_flag |= MNT_EXPORTED;
1483	}
1484	return (0);
1485#else
1486	return (ENOTSUP);
1487#endif /* NFSSERVER */
1488}
1489
1490struct netcred *
1491vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1492{
1493#ifdef NFSSERVER
1494	struct netcred *np;
1495	struct radix_node_head *rnh;
1496	struct sockaddr *saddr;
1497
1498	np = NULL;
1499	if (mp->mnt_flag & MNT_EXPORTED) {
1500		/*
1501		 * Lookup in the export list first.
1502		 */
1503		if (nam != NULL) {
1504			saddr = mtod(nam, struct sockaddr *);
1505			switch(saddr->sa_family) {
1506			case AF_INET:
1507				rnh = nep->ne_rtable_inet;
1508				break;
1509			default:
1510				rnh = NULL;
1511				break;
1512			}
1513			if (rnh != NULL)
1514				np = (struct netcred *)rn_match(saddr, rnh);
1515		}
1516		/*
1517		 * If no address match, use the default if it exists.
1518		 */
1519		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1520			np = &nep->ne_defexported;
1521	}
1522	return (np);
1523#else
1524	return (NULL);
1525#endif /* NFSSERVER */
1526}
1527
1528/*
1529 * Do the usual access checking.
1530 * file_mode, uid and gid are from the vnode in question,
1531 * while acc_mode and cred are from the VOP_ACCESS parameter list
1532 */
1533int
1534vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1535    mode_t acc_mode, struct ucred *cred)
1536{
1537	mode_t mask;
1538
1539	/* User id 0 always gets read/write access. */
1540	if (cred->cr_uid == 0) {
1541		/* For VEXEC, at least one of the execute bits must be set. */
1542		if ((acc_mode & VEXEC) && type != VDIR &&
1543		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1544			return EACCES;
1545		return 0;
1546	}
1547
1548	mask = 0;
1549
1550	/* Otherwise, check the owner. */
1551	if (cred->cr_uid == uid) {
1552		if (acc_mode & VEXEC)
1553			mask |= S_IXUSR;
1554		if (acc_mode & VREAD)
1555			mask |= S_IRUSR;
1556		if (acc_mode & VWRITE)
1557			mask |= S_IWUSR;
1558		return (file_mode & mask) == mask ? 0 : EACCES;
1559	}
1560
1561	/* Otherwise, check the groups. */
1562	if (groupmember(gid, cred)) {
1563		if (acc_mode & VEXEC)
1564			mask |= S_IXGRP;
1565		if (acc_mode & VREAD)
1566			mask |= S_IRGRP;
1567		if (acc_mode & VWRITE)
1568			mask |= S_IWGRP;
1569		return (file_mode & mask) == mask ? 0 : EACCES;
1570	}
1571
1572	/* Otherwise, check everyone else. */
1573	if (acc_mode & VEXEC)
1574		mask |= S_IXOTH;
1575	if (acc_mode & VREAD)
1576		mask |= S_IROTH;
1577	if (acc_mode & VWRITE)
1578		mask |= S_IWOTH;
1579	return (file_mode & mask) == mask ? 0 : EACCES;
1580}
1581
1582int
1583vfs_readonly(struct mount *mp, struct proc *p)
1584{
1585	int error;
1586
1587	error = vfs_busy(mp, VB_WRITE|VB_WAIT);
1588	if (error) {
1589		printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1590		return (error);
1591	}
1592	uvm_vnp_sync(mp);
1593	error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p);
1594	if (error) {
1595		printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1596		vfs_unbusy(mp);
1597		return (error);
1598	}
1599
1600	mp->mnt_flag |= MNT_UPDATE | MNT_RDONLY;
1601	mp->mnt_flag &= ~MNT_SOFTDEP;
1602	error = VFS_MOUNT(mp, mp->mnt_stat.f_mntonname, NULL, NULL, curproc);
1603	if (error) {
1604		printf("%s: failed to remount rdonly, error %d\n",
1605		    mp->mnt_stat.f_mntonname, error);
1606		vfs_unbusy(mp);
1607		return (error);
1608	}
1609	if (mp->mnt_syncer != NULL)
1610		vgone(mp->mnt_syncer);
1611	mp->mnt_syncer = NULL;
1612	vfs_unbusy(mp);
1613	return (error);
1614}
1615
1616/*
1617 * Read-only all file systems.
1618 * We traverse the list in reverse order under the assumption that doing so
1619 * will avoid needing to worry about dependencies.
1620 */
1621void
1622vfs_rofs(struct proc *p)
1623{
1624	struct mount *mp, *nmp;
1625
1626	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1627		/* XXX Here is a race, the next pointer is not locked. */
1628		(void) vfs_readonly(mp, p);
1629	}
1630}
1631
1632/*
1633 * Sync and unmount file systems before shutting down.
1634 */
1635void
1636vfs_shutdown(struct proc *p)
1637{
1638#ifdef ACCOUNTING
1639	acct_shutdown();
1640#endif
1641
1642	printf("syncing disks... ");
1643
1644	if (panicstr == 0) {
1645		/* Take all filesystems to read-only */
1646		sys_sync(p, NULL, NULL);
1647		vfs_rofs(p);
1648	}
1649
1650	if (vfs_syncwait(p, 1))
1651		printf("giving up\n");
1652	else
1653		printf("done\n");
1654
1655#if NSOFTRAID > 0
1656	sr_shutdown();
1657#endif
1658}
1659
1660/*
1661 * perform sync() operation and wait for buffers to flush.
1662 */
1663int
1664vfs_syncwait(struct proc *p, int verbose)
1665{
1666	struct buf *bp;
1667	int iter, nbusy, dcount, s;
1668#ifdef MULTIPROCESSOR
1669	int hold_count;
1670#endif
1671
1672	sys_sync(p, NULL, NULL);
1673
1674	/* Wait for sync to finish. */
1675	dcount = 10000;
1676	for (iter = 0; iter < 20; iter++) {
1677		nbusy = 0;
1678		LIST_FOREACH(bp, &bufhead, b_list) {
1679			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1680				nbusy++;
1681			/*
1682			 * With soft updates, some buffers that are
1683			 * written will be remarked as dirty until other
1684			 * buffers are written.
1685			 */
1686			if (bp->b_flags & B_DELWRI) {
1687				s = splbio();
1688				bremfree(bp);
1689				buf_acquire(bp);
1690				splx(s);
1691				nbusy++;
1692				bawrite(bp);
1693				if (dcount-- <= 0) {
1694					if (verbose)
1695						printf("softdep ");
1696					return 1;
1697				}
1698			}
1699		}
1700		if (nbusy == 0)
1701			break;
1702		if (verbose)
1703			printf("%d ", nbusy);
1704#ifdef MULTIPROCESSOR
1705		if (_kernel_lock_held())
1706			hold_count = __mp_release_all(&kernel_lock);
1707		else
1708			hold_count = 0;
1709#endif
1710		DELAY(40000 * iter);
1711#ifdef MULTIPROCESSOR
1712		if (hold_count)
1713			__mp_acquire_count(&kernel_lock, hold_count);
1714#endif
1715	}
1716
1717	return nbusy;
1718}
1719
1720/*
1721 * posix file system related system variables.
1722 */
1723int
1724fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1725    void *newp, size_t newlen, struct proc *p)
1726{
1727	/* all sysctl names at this level are terminal */
1728	if (namelen != 1)
1729		return (ENOTDIR);
1730
1731	switch (name[0]) {
1732	case FS_POSIX_SETUID:
1733		if (newp && securelevel > 0)
1734			return (EPERM);
1735		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1736	default:
1737		return (EOPNOTSUPP);
1738	}
1739	/* NOTREACHED */
1740}
1741
1742/*
1743 * file system related system variables.
1744 */
1745int
1746fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1747    size_t newlen, struct proc *p)
1748{
1749	sysctlfn *fn;
1750
1751	switch (name[0]) {
1752	case FS_POSIX:
1753		fn = fs_posix_sysctl;
1754		break;
1755	default:
1756		return (EOPNOTSUPP);
1757	}
1758	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1759}
1760
1761
1762/*
1763 * Routines dealing with vnodes and buffers
1764 */
1765
1766/*
1767 * Wait for all outstanding I/Os to complete
1768 *
1769 * Manipulates v_numoutput. Must be called at splbio()
1770 */
1771int
1772vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1773{
1774	int error = 0;
1775
1776	splassert(IPL_BIO);
1777
1778	while (vp->v_numoutput) {
1779		vp->v_bioflag |= VBIOWAIT;
1780		error = tsleep(&vp->v_numoutput,
1781		    slpflag | (PRIBIO + 1), wmesg, timeo);
1782		if (error)
1783			break;
1784	}
1785
1786	return (error);
1787}
1788
1789/*
1790 * Update outstanding I/O count and do wakeup if requested.
1791 *
1792 * Manipulates v_numoutput. Must be called at splbio()
1793 */
1794void
1795vwakeup(struct vnode *vp)
1796{
1797	splassert(IPL_BIO);
1798
1799	if (vp != NULL) {
1800		if (vp->v_numoutput-- == 0)
1801			panic("vwakeup: neg numoutput");
1802		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1803			vp->v_bioflag &= ~VBIOWAIT;
1804			wakeup(&vp->v_numoutput);
1805		}
1806	}
1807}
1808
1809/*
1810 * Flush out and invalidate all buffers associated with a vnode.
1811 * Called with the underlying object locked.
1812 */
1813int
1814vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1815    int slpflag, int slptimeo)
1816{
1817	struct buf *bp;
1818	struct buf *nbp, *blist;
1819	int s, error;
1820
1821#ifdef VFSLCKDEBUG
1822	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1823		panic("vinvalbuf(): vp isn't locked");
1824#endif
1825
1826	if (flags & V_SAVE) {
1827		s = splbio();
1828		vwaitforio(vp, 0, "vinvalbuf", 0);
1829		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1830			splx(s);
1831			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1832				return (error);
1833			s = splbio();
1834			if (vp->v_numoutput > 0 ||
1835			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1836				panic("vinvalbuf: dirty bufs");
1837		}
1838		splx(s);
1839	}
1840loop:
1841	s = splbio();
1842	for (;;) {
1843		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1844		    (flags & V_SAVEMETA))
1845			while (blist && blist->b_lblkno < 0)
1846				blist = LIST_NEXT(blist, b_vnbufs);
1847		if (blist == NULL &&
1848		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1849		    (flags & V_SAVEMETA))
1850			while (blist && blist->b_lblkno < 0)
1851				blist = LIST_NEXT(blist, b_vnbufs);
1852		if (!blist)
1853			break;
1854
1855		for (bp = blist; bp; bp = nbp) {
1856			nbp = LIST_NEXT(bp, b_vnbufs);
1857			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1858				continue;
1859			if (bp->b_flags & B_BUSY) {
1860				bp->b_flags |= B_WANTED;
1861				error = tsleep(bp, slpflag | (PRIBIO + 1),
1862				    "vinvalbuf", slptimeo);
1863				if (error) {
1864					splx(s);
1865					return (error);
1866				}
1867				break;
1868			}
1869			bremfree(bp);
1870			/*
1871			 * XXX Since there are no node locks for NFS, I believe
1872			 * there is a slight chance that a delayed write will
1873			 * occur while sleeping just above, so check for it.
1874			 */
1875			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1876				buf_acquire(bp);
1877				splx(s);
1878				(void) VOP_BWRITE(bp);
1879				goto loop;
1880			}
1881			buf_acquire_nomap(bp);
1882			bp->b_flags |= B_INVAL;
1883			brelse(bp);
1884		}
1885	}
1886	if (!(flags & V_SAVEMETA) &&
1887	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1888		panic("vinvalbuf: flush failed");
1889	splx(s);
1890	return (0);
1891}
1892
1893void
1894vflushbuf(struct vnode *vp, int sync)
1895{
1896	struct buf *bp, *nbp;
1897	int s;
1898
1899loop:
1900	s = splbio();
1901	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1902		if ((bp->b_flags & B_BUSY))
1903			continue;
1904		if ((bp->b_flags & B_DELWRI) == 0)
1905			panic("vflushbuf: not dirty");
1906		bremfree(bp);
1907		buf_acquire(bp);
1908		splx(s);
1909		/*
1910		 * Wait for I/O associated with indirect blocks to complete,
1911		 * since there is no way to quickly wait for them below.
1912		 */
1913		if (bp->b_vp == vp || sync == 0)
1914			(void) bawrite(bp);
1915		else
1916			(void) bwrite(bp);
1917		goto loop;
1918	}
1919	if (sync == 0) {
1920		splx(s);
1921		return;
1922	}
1923	vwaitforio(vp, 0, "vflushbuf", 0);
1924	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1925		splx(s);
1926#ifdef DIAGNOSTIC
1927		vprint("vflushbuf: dirty", vp);
1928#endif
1929		goto loop;
1930	}
1931	splx(s);
1932}
1933
1934/*
1935 * Associate a buffer with a vnode.
1936 *
1937 * Manipulates buffer vnode queues. Must be called at splbio().
1938 */
1939void
1940bgetvp(struct vnode *vp, struct buf *bp)
1941{
1942	splassert(IPL_BIO);
1943
1944
1945	if (bp->b_vp)
1946		panic("bgetvp: not free");
1947	vhold(vp);
1948	bp->b_vp = vp;
1949	if (vp->v_type == VBLK || vp->v_type == VCHR)
1950		bp->b_dev = vp->v_rdev;
1951	else
1952		bp->b_dev = NODEV;
1953	/*
1954	 * Insert onto list for new vnode.
1955	 */
1956	bufinsvn(bp, &vp->v_cleanblkhd);
1957}
1958
1959/*
1960 * Disassociate a buffer from a vnode.
1961 *
1962 * Manipulates vnode buffer queues. Must be called at splbio().
1963 */
1964void
1965brelvp(struct buf *bp)
1966{
1967	struct vnode *vp;
1968
1969	splassert(IPL_BIO);
1970
1971	if ((vp = bp->b_vp) == (struct vnode *) 0)
1972		panic("brelvp: NULL");
1973	/*
1974	 * Delete from old vnode list, if on one.
1975	 */
1976	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1977		bufremvn(bp);
1978	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1979	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1980		vp->v_bioflag &= ~VBIOONSYNCLIST;
1981		LIST_REMOVE(vp, v_synclist);
1982	}
1983	bp->b_vp = NULL;
1984
1985	vdrop(vp);
1986}
1987
1988/*
1989 * Replaces the current vnode associated with the buffer, if any,
1990 * with a new vnode.
1991 *
1992 * If an output I/O is pending on the buffer, the old vnode
1993 * I/O count is adjusted.
1994 *
1995 * Ignores vnode buffer queues. Must be called at splbio().
1996 */
1997void
1998buf_replacevnode(struct buf *bp, struct vnode *newvp)
1999{
2000	struct vnode *oldvp = bp->b_vp;
2001
2002	splassert(IPL_BIO);
2003
2004	if (oldvp)
2005		brelvp(bp);
2006
2007	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2008		newvp->v_numoutput++;	/* put it on swapdev */
2009		vwakeup(oldvp);
2010	}
2011
2012	bgetvp(newvp, bp);
2013	bufremvn(bp);
2014}
2015
2016/*
2017 * Used to assign buffers to the appropriate clean or dirty list on
2018 * the vnode and to add newly dirty vnodes to the appropriate
2019 * filesystem syncer list.
2020 *
2021 * Manipulates vnode buffer queues. Must be called at splbio().
2022 */
2023void
2024reassignbuf(struct buf *bp)
2025{
2026	struct buflists *listheadp;
2027	int delay;
2028	struct vnode *vp = bp->b_vp;
2029
2030	splassert(IPL_BIO);
2031
2032	/*
2033	 * Delete from old vnode list, if on one.
2034	 */
2035	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2036		bufremvn(bp);
2037
2038	/*
2039	 * If dirty, put on list of dirty buffers;
2040	 * otherwise insert onto list of clean buffers.
2041	 */
2042	if ((bp->b_flags & B_DELWRI) == 0) {
2043		listheadp = &vp->v_cleanblkhd;
2044		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2045		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2046			vp->v_bioflag &= ~VBIOONSYNCLIST;
2047			LIST_REMOVE(vp, v_synclist);
2048		}
2049	} else {
2050		listheadp = &vp->v_dirtyblkhd;
2051		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2052			switch (vp->v_type) {
2053			case VDIR:
2054				delay = syncdelay / 2;
2055				break;
2056			case VBLK:
2057				if (vp->v_specmountpoint != NULL) {
2058					delay = syncdelay / 3;
2059					break;
2060				}
2061				/* FALLTHROUGH */
2062			default:
2063				delay = syncdelay;
2064			}
2065			vn_syncer_add_to_worklist(vp, delay);
2066		}
2067	}
2068	bufinsvn(bp, listheadp);
2069}
2070
2071int
2072vfs_register(struct vfsconf *vfs)
2073{
2074	struct vfsconf *vfsp;
2075	struct vfsconf **vfspp;
2076
2077#ifdef DIAGNOSTIC
2078	/* Paranoia? */
2079	if (vfs->vfc_refcount != 0)
2080		printf("vfs_register called with vfc_refcount > 0\n");
2081#endif
2082
2083	/* Check if filesystem already known */
2084	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2085	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2086		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2087			return (EEXIST);
2088
2089	if (vfs->vfc_typenum > maxvfsconf)
2090		maxvfsconf = vfs->vfc_typenum;
2091
2092	vfs->vfc_next = NULL;
2093
2094	/* Add to the end of the list */
2095	*vfspp = vfs;
2096
2097	/* Call vfs_init() */
2098	if (vfs->vfc_vfsops->vfs_init)
2099		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2100
2101	return 0;
2102}
2103
2104int
2105vfs_unregister(struct vfsconf *vfs)
2106{
2107	struct vfsconf *vfsp;
2108	struct vfsconf **vfspp;
2109	int maxtypenum;
2110
2111	/* Find our vfsconf struct */
2112	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2113	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2114		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2115			break;
2116	}
2117
2118	if (!vfsp)			/* Not found */
2119		return (ENOENT);
2120
2121	if (vfsp->vfc_refcount)		/* In use */
2122		return (EBUSY);
2123
2124	/* Remove from list and free */
2125	*vfspp = vfsp->vfc_next;
2126
2127	maxtypenum = 0;
2128
2129	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2130		if (vfsp->vfc_typenum > maxtypenum)
2131			maxtypenum = vfsp->vfc_typenum;
2132
2133	maxvfsconf = maxtypenum;
2134	return 0;
2135}
2136
2137/*
2138 * Check if vnode represents a disk device
2139 */
2140int
2141vn_isdisk(struct vnode *vp, int *errp)
2142{
2143	if (vp->v_type != VBLK && vp->v_type != VCHR)
2144		return (0);
2145
2146	return (1);
2147}
2148
2149#ifdef DDB
2150#include <machine/db_machdep.h>
2151#include <ddb/db_interface.h>
2152
2153void
2154vfs_buf_print(void *b, int full,
2155    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2156{
2157	struct buf *bp = b;
2158
2159	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2160	      "  proc %p error %d flags %lb\n",
2161	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2162	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2163
2164	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2165	      "  data %p saveaddr %p dep %p iodone %p\n",
2166	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2167	    bp->b_data, bp->b_saveaddr,
2168	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2169
2170	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2171	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2172
2173#ifdef FFS_SOFTUPDATES
2174	if (full)
2175		softdep_print(bp, full, pr);
2176#endif
2177}
2178
2179const char *vtypes[] = { VTYPE_NAMES };
2180const char *vtags[] = { VTAG_NAMES };
2181
2182void
2183vfs_vnode_print(void *v, int full,
2184    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2185{
2186	struct vnode *vp = v;
2187
2188	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2189	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2190	      vp->v_tag,
2191	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2192	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2193
2194	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2195	      vp->v_data, vp->v_usecount, vp->v_writecount,
2196	      vp->v_holdcnt, vp->v_numoutput);
2197
2198	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2199
2200	if (full) {
2201		struct buf *bp;
2202
2203		(*pr)("clean bufs:\n");
2204		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2205			(*pr)(" bp %p\n", bp);
2206			vfs_buf_print(bp, full, pr);
2207		}
2208
2209		(*pr)("dirty bufs:\n");
2210		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2211			(*pr)(" bp %p\n", bp);
2212			vfs_buf_print(bp, full, pr);
2213		}
2214	}
2215}
2216
2217void
2218vfs_mount_print(struct mount *mp, int full,
2219    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2220{
2221	struct vfsconf *vfc = mp->mnt_vfc;
2222	struct vnode *vp;
2223	int cnt;
2224
2225	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2226	    mp->mnt_flag, MNT_BITS,
2227	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2228
2229	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2230            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2231	    vfc->vfc_refcount, vfc->vfc_flags);
2232
2233	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2234	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2235	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2236
2237	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2238	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2239
2240	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2241	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2242	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2243
2244 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2245	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2246
2247 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2248	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2249
2250	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2251	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2252	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2253
2254	(*pr)("locked vnodes:");
2255	/* XXX would take mountlist lock, except ddb has no context */
2256	cnt = 0;
2257	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2258		if (VOP_ISLOCKED(vp)) {
2259			if (cnt == 0)
2260				(*pr)("\n  %p", vp);
2261			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2262				(*pr)(",\n  %p", vp);
2263			else
2264				(*pr)(", %p", vp);
2265			cnt++;
2266		}
2267	}
2268	(*pr)("\n");
2269
2270	if (full) {
2271		(*pr)("all vnodes:");
2272		/* XXX would take mountlist lock, except ddb has no context */
2273		cnt = 0;
2274		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2275			if (cnt == 0)
2276				(*pr)("\n  %p", vp);
2277			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2278				(*pr)(",\n  %p", vp);
2279			else
2280				(*pr)(", %p", vp);
2281			cnt++;
2282		}
2283		(*pr)("\n");
2284	}
2285}
2286#endif /* DDB */
2287
2288void
2289copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2290{
2291	const struct statfs *mbp;
2292
2293	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2294
2295	if (sbp == (mbp = &mp->mnt_stat))
2296		return;
2297
2298	sbp->f_fsid = mbp->f_fsid;
2299	sbp->f_owner = mbp->f_owner;
2300	sbp->f_flags = mbp->f_flags;
2301	sbp->f_syncwrites = mbp->f_syncwrites;
2302	sbp->f_asyncwrites = mbp->f_asyncwrites;
2303	sbp->f_syncreads = mbp->f_syncreads;
2304	sbp->f_asyncreads = mbp->f_asyncreads;
2305	sbp->f_namemax = mbp->f_namemax;
2306	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2307	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2308	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2309	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2310	    sizeof(union mount_info));
2311}
2312