vfs_subr.c revision 1.264
1/*	$OpenBSD: vfs_subr.c,v 1.264 2017/12/14 20:20:38 deraadt Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/sysctl.h>
48#include <sys/mount.h>
49#include <sys/time.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/conf.h>
53#include <sys/vnode.h>
54#include <sys/lock.h>
55#include <sys/stat.h>
56#include <sys/acct.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/syscallargs.h>
64#include <sys/pool.h>
65#include <sys/tree.h>
66#include <sys/specdev.h>
67
68#include <netinet/in.h>
69
70#include <uvm/uvm_extern.h>
71#include <uvm/uvm_vnode.h>
72
73#include "softraid.h"
74
75void sr_shutdown(void);
76
77enum vtype iftovt_tab[16] = {
78	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80};
81
82int	vttoif_tab[9] = {
83	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84	S_IFSOCK, S_IFIFO, S_IFMT,
85};
86
87int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89
90/*
91 * Insq/Remq for the vnode usage lists.
92 */
93#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94#define	bufremvn(bp) {							\
95	LIST_REMOVE(bp, b_vnbufs);					\
96	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97}
98
99struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100struct freelst vnode_free_list;	/* vnode free list */
101
102struct mntlist mountlist;	/* mounted filesystem list */
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *, u_int);
112void vfs_free_addrlist(struct netexport *);
113void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116int maxvnodes;
117
118#ifdef DEBUG
119void printlockedvnodes(void);
120#endif
121
122struct pool vnode_pool;
123struct pool uvm_vnode_pool;
124
125static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
126RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
127
128static inline int
129rb_buf_compare(const struct buf *b1, const struct buf *b2)
130{
131	if (b1->b_lblkno < b2->b_lblkno)
132		return(-1);
133	if (b1->b_lblkno > b2->b_lblkno)
134		return(1);
135	return(0);
136}
137
138/*
139 * Initialize the vnode management data structures.
140 */
141void
142vntblinit(void)
143{
144	/* buffer cache may need a vnode for each buffer */
145	maxvnodes = 2 * initialvnodes;
146	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
147	    PR_WAITOK, "vnodes", NULL);
148	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
149	    PR_WAITOK, "uvmvnodes", NULL);
150	TAILQ_INIT(&vnode_hold_list);
151	TAILQ_INIT(&vnode_free_list);
152	TAILQ_INIT(&mountlist);
153	/*
154	 * Initialize the filesystem syncer.
155	 */
156	vn_initialize_syncerd();
157
158#ifdef NFSSERVER
159	rn_init(sizeof(struct sockaddr_in));
160#endif /* NFSSERVER */
161}
162
163/*
164 * Mark a mount point as busy. Used to synchronize access and to delay
165 * unmounting.
166 *
167 * Default behaviour is to attempt getting a READ lock and in case of an
168 * ongoing unmount, to wait for it to finish and then return failure.
169 */
170int
171vfs_busy(struct mount *mp, int flags)
172{
173	int rwflags = 0;
174
175	/* new mountpoints need their lock initialised */
176	if (mp->mnt_lock.rwl_name == NULL)
177		rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
178
179	if (flags & VB_WRITE)
180		rwflags |= RW_WRITE;
181	else
182		rwflags |= RW_READ;
183
184	if (flags & VB_WAIT)
185		rwflags |= RW_SLEEPFAIL;
186	else
187		rwflags |= RW_NOSLEEP;
188
189	if (rw_enter(&mp->mnt_lock, rwflags))
190		return (EBUSY);
191
192	return (0);
193}
194
195/*
196 * Free a busy file system
197 */
198void
199vfs_unbusy(struct mount *mp)
200{
201	rw_exit(&mp->mnt_lock);
202}
203
204int
205vfs_isbusy(struct mount *mp)
206{
207	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
208		return (1);
209	else
210		return (0);
211}
212
213/*
214 * Lookup a filesystem type, and if found allocate and initialize
215 * a mount structure for it.
216 *
217 * Devname is usually updated by mount(8) after booting.
218 */
219int
220vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
221{
222	struct vfsconf *vfsp;
223	struct mount *mp;
224
225	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
226		if (!strcmp(vfsp->vfc_name, fstypename))
227			break;
228	if (vfsp == NULL)
229		return (ENODEV);
230	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
231	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
232	LIST_INIT(&mp->mnt_vnodelist);
233	mp->mnt_vfc = vfsp;
234	mp->mnt_op = vfsp->vfc_vfsops;
235	mp->mnt_flag = MNT_RDONLY;
236	mp->mnt_vnodecovered = NULLVP;
237	vfsp->vfc_refcount++;
238	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
239	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
240	mp->mnt_stat.f_mntonname[0] = '/';
241	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
242	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
243	*mpp = mp;
244 	return (0);
245 }
246
247/*
248 * Lookup a mount point by filesystem identifier.
249 */
250struct mount *
251vfs_getvfs(fsid_t *fsid)
252{
253	struct mount *mp;
254
255	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
256		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
257		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
258			return (mp);
259		}
260	}
261
262	return (NULL);
263}
264
265
266/*
267 * Get a new unique fsid
268 */
269void
270vfs_getnewfsid(struct mount *mp)
271{
272	static u_short xxxfs_mntid;
273
274	fsid_t tfsid;
275	int mtype;
276
277	mtype = mp->mnt_vfc->vfc_typenum;
278	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
279	mp->mnt_stat.f_fsid.val[1] = mtype;
280	if (xxxfs_mntid == 0)
281		++xxxfs_mntid;
282	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
283	tfsid.val[1] = mtype;
284	if (!TAILQ_EMPTY(&mountlist)) {
285		while (vfs_getvfs(&tfsid)) {
286			tfsid.val[0]++;
287			xxxfs_mntid++;
288		}
289	}
290	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
291}
292
293/*
294 * Set vnode attributes to VNOVAL
295 */
296void
297vattr_null(struct vattr *vap)
298{
299
300	vap->va_type = VNON;
301	/*
302	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
303	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
304	 * the compiler do its job.
305	 */
306	vap->va_mode = VNOVAL;
307	vap->va_nlink = VNOVAL;
308	vap->va_uid = VNOVAL;
309	vap->va_gid = VNOVAL;
310	vap->va_fsid = VNOVAL;
311	vap->va_fileid = VNOVAL;
312	vap->va_size = VNOVAL;
313	vap->va_blocksize = VNOVAL;
314	vap->va_atime.tv_sec = VNOVAL;
315	vap->va_atime.tv_nsec = VNOVAL;
316	vap->va_mtime.tv_sec = VNOVAL;
317	vap->va_mtime.tv_nsec = VNOVAL;
318	vap->va_ctime.tv_sec = VNOVAL;
319	vap->va_ctime.tv_nsec = VNOVAL;
320	vap->va_gen = VNOVAL;
321	vap->va_flags = VNOVAL;
322	vap->va_rdev = VNOVAL;
323	vap->va_bytes = VNOVAL;
324	vap->va_filerev = VNOVAL;
325	vap->va_vaflags = 0;
326}
327
328/*
329 * Routines having to do with the management of the vnode table.
330 */
331long numvnodes;
332
333/*
334 * Return the next vnode from the free list.
335 */
336int
337getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
338    struct vnode **vpp)
339{
340	struct proc *p = curproc;
341	struct freelst *listhd;
342	static int toggle;
343	struct vnode *vp;
344	int s;
345
346	/*
347	 * allow maxvnodes to increase if the buffer cache itself
348	 * is big enough to justify it. (we don't shrink it ever)
349	 */
350	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
351	    : maxvnodes;
352
353	/*
354	 * We must choose whether to allocate a new vnode or recycle an
355	 * existing one. The criterion for allocating a new one is that
356	 * the total number of vnodes is less than the number desired or
357	 * there are no vnodes on either free list. Generally we only
358	 * want to recycle vnodes that have no buffers associated with
359	 * them, so we look first on the vnode_free_list. If it is empty,
360	 * we next consider vnodes with referencing buffers on the
361	 * vnode_hold_list. The toggle ensures that half the time we
362	 * will use a buffer from the vnode_hold_list, and half the time
363	 * we will allocate a new one unless the list has grown to twice
364	 * the desired size. We are reticent to recycle vnodes from the
365	 * vnode_hold_list because we will lose the identity of all its
366	 * referencing buffers.
367	 */
368	toggle ^= 1;
369	if (numvnodes / 2 > maxvnodes)
370		toggle = 0;
371
372	s = splbio();
373	if ((numvnodes < maxvnodes) ||
374	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
375	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
376		splx(s);
377		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
378		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
379		vp->v_uvm->u_vnode = vp;
380		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
381		cache_tree_init(&vp->v_nc_tree);
382		TAILQ_INIT(&vp->v_cache_dst);
383		numvnodes++;
384	} else {
385		TAILQ_FOREACH(vp, listhd, v_freelist) {
386			if (VOP_ISLOCKED(vp) == 0)
387				break;
388		}
389		/*
390		 * Unless this is a bad time of the month, at most
391		 * the first NCPUS items on the free list are
392		 * locked, so this is close enough to being empty.
393		 */
394		if (vp == NULL) {
395			splx(s);
396			tablefull("vnode");
397			*vpp = 0;
398			return (ENFILE);
399		}
400
401#ifdef DIAGNOSTIC
402		if (vp->v_usecount) {
403			vprint("free vnode", vp);
404			panic("free vnode isn't");
405		}
406#endif
407
408		TAILQ_REMOVE(listhd, vp, v_freelist);
409		vp->v_bioflag &= ~VBIOONFREELIST;
410		splx(s);
411
412		if (vp->v_type != VBAD)
413			vgonel(vp, p);
414#ifdef DIAGNOSTIC
415		if (vp->v_data) {
416			vprint("cleaned vnode", vp);
417			panic("cleaned vnode isn't");
418		}
419		s = splbio();
420		if (vp->v_numoutput)
421			panic("Clean vnode has pending I/O's");
422		splx(s);
423#endif
424		vp->v_flag = 0;
425		vp->v_socket = 0;
426	}
427	cache_purge(vp);
428	vp->v_type = VNON;
429	vp->v_tag = tag;
430	vp->v_op = vops;
431	insmntque(vp, mp);
432	*vpp = vp;
433	vp->v_usecount = 1;
434	vp->v_data = 0;
435	return (0);
436}
437
438/*
439 * Move a vnode from one mount queue to another.
440 */
441void
442insmntque(struct vnode *vp, struct mount *mp)
443{
444	/*
445	 * Delete from old mount point vnode list, if on one.
446	 */
447	if (vp->v_mount != NULL)
448		LIST_REMOVE(vp, v_mntvnodes);
449	/*
450	 * Insert into list of vnodes for the new mount point, if available.
451	 */
452	if ((vp->v_mount = mp) != NULL)
453		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
454}
455
456/*
457 * Create a vnode for a block device.
458 * Used for root filesystem, argdev, and swap areas.
459 * Also used for memory file system special devices.
460 */
461int
462bdevvp(dev_t dev, struct vnode **vpp)
463{
464	return (getdevvp(dev, vpp, VBLK));
465}
466
467/*
468 * Create a vnode for a character device.
469 * Used for console handling.
470 */
471int
472cdevvp(dev_t dev, struct vnode **vpp)
473{
474	return (getdevvp(dev, vpp, VCHR));
475}
476
477/*
478 * Create a vnode for a device.
479 * Used by bdevvp (block device) for root file system etc.,
480 * and by cdevvp (character device) for console.
481 */
482int
483getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
484{
485	struct vnode *vp;
486	struct vnode *nvp;
487	int error;
488
489	if (dev == NODEV) {
490		*vpp = NULLVP;
491		return (0);
492	}
493	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
494	if (error) {
495		*vpp = NULLVP;
496		return (error);
497	}
498	vp = nvp;
499	vp->v_type = type;
500	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
501		vput(vp);
502		vp = nvp;
503	}
504	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
505		vp->v_flag |= VISTTY;
506	*vpp = vp;
507	return (0);
508}
509
510/*
511 * Check to see if the new vnode represents a special device
512 * for which we already have a vnode (either because of
513 * bdevvp() or because of a different vnode representing
514 * the same block device). If such an alias exists, deallocate
515 * the existing contents and return the aliased vnode. The
516 * caller is responsible for filling it with its new contents.
517 */
518struct vnode *
519checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
520{
521	struct proc *p = curproc;
522	struct vnode *vp;
523	struct vnode **vpp;
524
525	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
526		return (NULLVP);
527
528	vpp = &speclisth[SPECHASH(nvp_rdev)];
529loop:
530	for (vp = *vpp; vp; vp = vp->v_specnext) {
531		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
532			continue;
533		}
534		/*
535		 * Alias, but not in use, so flush it out.
536		 */
537		if (vp->v_usecount == 0) {
538			vgonel(vp, p);
539			goto loop;
540		}
541		if (vget(vp, LK_EXCLUSIVE, p)) {
542			goto loop;
543		}
544		break;
545	}
546
547	/*
548	 * Common case is actually in the if statement
549	 */
550	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
551		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
552			M_WAITOK);
553		nvp->v_rdev = nvp_rdev;
554		nvp->v_hashchain = vpp;
555		nvp->v_specnext = *vpp;
556		nvp->v_specmountpoint = NULL;
557		nvp->v_speclockf = NULL;
558		nvp->v_specbitmap = NULL;
559		if (nvp->v_type == VCHR &&
560		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
561		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
562			if (vp != NULLVP)
563				nvp->v_specbitmap = vp->v_specbitmap;
564			else
565				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
566				    M_VNODE, M_WAITOK | M_ZERO);
567		}
568		*vpp = nvp;
569		if (vp != NULLVP) {
570			nvp->v_flag |= VALIASED;
571			vp->v_flag |= VALIASED;
572			vput(vp);
573		}
574		return (NULLVP);
575	}
576
577	/*
578	 * This code is the uncommon case. It is called in case
579	 * we found an alias that was VT_NON && vtype of VBLK
580	 * This means we found a block device that was created
581	 * using bdevvp.
582	 * An example of such a vnode is the root partition device vnode
583	 * created in ffs_mountroot.
584	 *
585	 * The vnodes created by bdevvp should not be aliased (why?).
586	 */
587
588	VOP_UNLOCK(vp, p);
589	vclean(vp, 0, p);
590	vp->v_op = nvp->v_op;
591	vp->v_tag = nvp->v_tag;
592	nvp->v_type = VNON;
593	insmntque(vp, mp);
594	return (vp);
595}
596
597/*
598 * Grab a particular vnode from the free list, increment its
599 * reference count and lock it. If the vnode lock bit is set,
600 * the vnode is being eliminated in vgone. In that case, we
601 * cannot grab it, so the process is awakened when the
602 * transition is completed, and an error code is returned to
603 * indicate that the vnode is no longer usable, possibly
604 * having been changed to a new file system type.
605 */
606int
607vget(struct vnode *vp, int flags, struct proc *p)
608{
609	int error, s, onfreelist;
610
611	/*
612	 * If the vnode is in the process of being cleaned out for
613	 * another use, we wait for the cleaning to finish and then
614	 * return failure. Cleaning is determined by checking that
615	 * the VXLOCK flag is set.
616	 */
617
618	if (vp->v_flag & VXLOCK) {
619		if (flags & LK_NOWAIT) {
620			return (EBUSY);
621		}
622
623		vp->v_flag |= VXWANT;
624		tsleep(vp, PINOD, "vget", 0);
625		return (ENOENT);
626	}
627
628	onfreelist = vp->v_bioflag & VBIOONFREELIST;
629	if (vp->v_usecount == 0 && onfreelist) {
630		s = splbio();
631		if (vp->v_holdcnt > 0)
632			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
633		else
634			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
635		vp->v_bioflag &= ~VBIOONFREELIST;
636		splx(s);
637	}
638
639 	vp->v_usecount++;
640	if (flags & LK_TYPE_MASK) {
641		if ((error = vn_lock(vp, flags, p)) != 0) {
642			vp->v_usecount--;
643			if (vp->v_usecount == 0 && onfreelist)
644				vputonfreelist(vp);
645		}
646		return (error);
647	}
648
649	return (0);
650}
651
652
653/* Vnode reference. */
654void
655vref(struct vnode *vp)
656{
657#ifdef DIAGNOSTIC
658	if (vp->v_usecount == 0)
659		panic("vref used where vget required");
660	if (vp->v_type == VNON)
661		panic("vref on a VNON vnode");
662#endif
663	vp->v_usecount++;
664}
665
666void
667vputonfreelist(struct vnode *vp)
668{
669	int s;
670	struct freelst *lst;
671
672	s = splbio();
673#ifdef DIAGNOSTIC
674	if (vp->v_usecount != 0)
675		panic("Use count is not zero!");
676
677	if (vp->v_bioflag & VBIOONFREELIST) {
678		vprint("vnode already on free list: ", vp);
679		panic("vnode already on free list");
680	}
681#endif
682
683	vp->v_bioflag |= VBIOONFREELIST;
684
685	if (vp->v_holdcnt > 0)
686		lst = &vnode_hold_list;
687	else
688		lst = &vnode_free_list;
689
690	if (vp->v_type == VBAD)
691		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
692	else
693		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
694
695	splx(s);
696}
697
698/*
699 * vput(), just unlock and vrele()
700 */
701void
702vput(struct vnode *vp)
703{
704	struct proc *p = curproc;
705
706#ifdef DIAGNOSTIC
707	if (vp == NULL)
708		panic("vput: null vp");
709#endif
710
711#ifdef DIAGNOSTIC
712	if (vp->v_usecount == 0) {
713		vprint("vput: bad ref count", vp);
714		panic("vput: ref cnt");
715	}
716#endif
717	vp->v_usecount--;
718	if (vp->v_usecount > 0) {
719		VOP_UNLOCK(vp, p);
720		return;
721	}
722
723#ifdef DIAGNOSTIC
724	if (vp->v_writecount != 0) {
725		vprint("vput: bad writecount", vp);
726		panic("vput: v_writecount != 0");
727	}
728#endif
729
730	VOP_INACTIVE(vp, p);
731
732	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
733		vputonfreelist(vp);
734}
735
736/*
737 * Vnode release - use for active VNODES.
738 * If count drops to zero, call inactive routine and return to freelist.
739 * Returns 0 if it did not sleep.
740 */
741int
742vrele(struct vnode *vp)
743{
744	struct proc *p = curproc;
745
746#ifdef DIAGNOSTIC
747	if (vp == NULL)
748		panic("vrele: null vp");
749#endif
750#ifdef DIAGNOSTIC
751	if (vp->v_usecount == 0) {
752		vprint("vrele: bad ref count", vp);
753		panic("vrele: ref cnt");
754	}
755#endif
756	vp->v_usecount--;
757	if (vp->v_usecount > 0) {
758		return (0);
759	}
760
761#ifdef DIAGNOSTIC
762	if (vp->v_writecount != 0) {
763		vprint("vrele: bad writecount", vp);
764		panic("vrele: v_writecount != 0");
765	}
766#endif
767
768	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
769#ifdef DIAGNOSTIC
770		vprint("vrele: cannot lock", vp);
771#endif
772		return (1);
773	}
774
775	VOP_INACTIVE(vp, p);
776
777	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
778		vputonfreelist(vp);
779	return (1);
780}
781
782/* Page or buffer structure gets a reference. */
783void
784vhold(struct vnode *vp)
785{
786	/*
787	 * If it is on the freelist and the hold count is currently
788	 * zero, move it to the hold list.
789	 */
790	if ((vp->v_bioflag & VBIOONFREELIST) &&
791	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
792		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
793		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
794	}
795	vp->v_holdcnt++;
796}
797
798/* Lose interest in a vnode. */
799void
800vdrop(struct vnode *vp)
801{
802#ifdef DIAGNOSTIC
803	if (vp->v_holdcnt == 0)
804		panic("vdrop: zero holdcnt");
805#endif
806
807	vp->v_holdcnt--;
808
809	/*
810	 * If it is on the holdlist and the hold count drops to
811	 * zero, move it to the free list.
812	 */
813	if ((vp->v_bioflag & VBIOONFREELIST) &&
814	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
815		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
816		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
817	}
818}
819
820/*
821 * Remove any vnodes in the vnode table belonging to mount point mp.
822 *
823 * If MNT_NOFORCE is specified, there should not be any active ones,
824 * return error if any are found (nb: this is a user error, not a
825 * system error). If MNT_FORCE is specified, detach any active vnodes
826 * that are found.
827 */
828#ifdef DEBUG
829int busyprt = 0;	/* print out busy vnodes */
830struct ctldebug debug1 = { "busyprt", &busyprt };
831#endif
832
833int
834vfs_mount_foreach_vnode(struct mount *mp,
835    int (*func)(struct vnode *, void *), void *arg) {
836	struct vnode *vp, *nvp;
837	int error = 0;
838
839loop:
840	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
841		if (vp->v_mount != mp)
842			goto loop;
843
844		error = func(vp, arg);
845
846		if (error != 0)
847			break;
848	}
849
850	return (error);
851}
852
853struct vflush_args {
854	struct vnode *skipvp;
855	int busy;
856	int flags;
857};
858
859int
860vflush_vnode(struct vnode *vp, void *arg)
861{
862	struct vflush_args *va = arg;
863	struct proc *p = curproc;
864
865	if (vp == va->skipvp) {
866		return (0);
867	}
868
869	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
870		return (0);
871	}
872
873	/*
874	 * If WRITECLOSE is set, only flush out regular file
875	 * vnodes open for writing.
876	 */
877	if ((va->flags & WRITECLOSE) &&
878	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
879		return (0);
880	}
881
882	/*
883	 * With v_usecount == 0, all we need to do is clear
884	 * out the vnode data structures and we are done.
885	 */
886	if (vp->v_usecount == 0) {
887		vgonel(vp, p);
888		return (0);
889	}
890
891	/*
892	 * If FORCECLOSE is set, forcibly close the vnode.
893	 * For block or character devices, revert to an
894	 * anonymous device. For all other files, just kill them.
895	 */
896	if (va->flags & FORCECLOSE) {
897		if (vp->v_type != VBLK && vp->v_type != VCHR) {
898			vgonel(vp, p);
899		} else {
900			vclean(vp, 0, p);
901			vp->v_op = &spec_vops;
902			insmntque(vp, NULL);
903		}
904		return (0);
905	}
906
907	/*
908	 * If set, this is allowed to ignore vnodes which don't
909	 * have changes pending to disk.
910	 * XXX Might be nice to check per-fs "inode" flags, but
911	 * generally the filesystem is sync'd already, right?
912	 */
913	if ((va->flags & IGNORECLEAN) &&
914	    LIST_EMPTY(&vp->v_dirtyblkhd))
915		return (0);
916
917#ifdef DEBUG
918	if (busyprt)
919		vprint("vflush: busy vnode", vp);
920#endif
921	va->busy++;
922	return (0);
923}
924
925int
926vflush(struct mount *mp, struct vnode *skipvp, int flags)
927{
928	struct vflush_args va;
929	va.skipvp = skipvp;
930	va.busy = 0;
931	va.flags = flags;
932
933	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
934
935	if (va.busy)
936		return (EBUSY);
937	return (0);
938}
939
940/*
941 * Disassociate the underlying file system from a vnode.
942 */
943void
944vclean(struct vnode *vp, int flags, struct proc *p)
945{
946	int active;
947
948	/*
949	 * Check to see if the vnode is in use.
950	 * If so we have to reference it before we clean it out
951	 * so that its count cannot fall to zero and generate a
952	 * race against ourselves to recycle it.
953	 */
954	if ((active = vp->v_usecount) != 0)
955		vp->v_usecount++;
956
957	/*
958	 * Prevent the vnode from being recycled or
959	 * brought into use while we clean it out.
960	 */
961	if (vp->v_flag & VXLOCK)
962		panic("vclean: deadlock");
963	vp->v_flag |= VXLOCK;
964	/*
965	 * Even if the count is zero, the VOP_INACTIVE routine may still
966	 * have the object locked while it cleans it out. The VOP_LOCK
967	 * ensures that the VOP_INACTIVE routine is done with its work.
968	 * For active vnodes, it ensures that no other activity can
969	 * occur while the underlying object is being cleaned out.
970	 */
971	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE, p);
972
973	/*
974	 * Clean out any VM data associated with the vnode.
975	 */
976	uvm_vnp_terminate(vp);
977	/*
978	 * Clean out any buffers associated with the vnode.
979	 */
980	if (flags & DOCLOSE)
981		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
982	/*
983	 * If purging an active vnode, it must be closed and
984	 * deactivated before being reclaimed. Note that the
985	 * VOP_INACTIVE will unlock the vnode
986	 */
987	if (active) {
988		if (flags & DOCLOSE)
989			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
990		VOP_INACTIVE(vp, p);
991	} else {
992		/*
993		 * Any other processes trying to obtain this lock must first
994		 * wait for VXLOCK to clear, then call the new lock operation.
995		 */
996		VOP_UNLOCK(vp, p);
997	}
998
999	/*
1000	 * Reclaim the vnode.
1001	 */
1002	if (VOP_RECLAIM(vp, p))
1003		panic("vclean: cannot reclaim");
1004	if (active) {
1005		vp->v_usecount--;
1006		if (vp->v_usecount == 0) {
1007			if (vp->v_holdcnt > 0)
1008				panic("vclean: not clean");
1009			vputonfreelist(vp);
1010		}
1011	}
1012	cache_purge(vp);
1013
1014	/*
1015	 * Done with purge, notify sleepers of the grim news.
1016	 */
1017	vp->v_op = &dead_vops;
1018	VN_KNOTE(vp, NOTE_REVOKE);
1019	vp->v_tag = VT_NON;
1020	vp->v_flag &= ~VXLOCK;
1021#ifdef VFSLCKDEBUG
1022	vp->v_flag &= ~VLOCKSWORK;
1023#endif
1024	if (vp->v_flag & VXWANT) {
1025		vp->v_flag &= ~VXWANT;
1026		wakeup(vp);
1027	}
1028}
1029
1030/*
1031 * Recycle an unused vnode to the front of the free list.
1032 */
1033int
1034vrecycle(struct vnode *vp, struct proc *p)
1035{
1036	if (vp->v_usecount == 0) {
1037		vgonel(vp, p);
1038		return (1);
1039	}
1040	return (0);
1041}
1042
1043/*
1044 * Eliminate all activity associated with a vnode
1045 * in preparation for reuse.
1046 */
1047void
1048vgone(struct vnode *vp)
1049{
1050	struct proc *p = curproc;
1051	vgonel(vp, p);
1052}
1053
1054/*
1055 * vgone, with struct proc.
1056 */
1057void
1058vgonel(struct vnode *vp, struct proc *p)
1059{
1060	struct vnode *vq;
1061	struct vnode *vx;
1062
1063	/*
1064	 * If a vgone (or vclean) is already in progress,
1065	 * wait until it is done and return.
1066	 */
1067	if (vp->v_flag & VXLOCK) {
1068		vp->v_flag |= VXWANT;
1069		tsleep(vp, PINOD, "vgone", 0);
1070		return;
1071	}
1072
1073	/*
1074	 * Clean out the filesystem specific data.
1075	 */
1076	vclean(vp, DOCLOSE, p);
1077	/*
1078	 * Delete from old mount point vnode list, if on one.
1079	 */
1080	if (vp->v_mount != NULL)
1081		insmntque(vp, NULL);
1082	/*
1083	 * If special device, remove it from special device alias list
1084	 * if it is on one.
1085	 */
1086	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1087		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1088		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1089		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1090			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1091		}
1092		if (*vp->v_hashchain == vp) {
1093			*vp->v_hashchain = vp->v_specnext;
1094		} else {
1095			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1096				if (vq->v_specnext != vp)
1097					continue;
1098				vq->v_specnext = vp->v_specnext;
1099				break;
1100			}
1101			if (vq == NULL)
1102				panic("missing bdev");
1103		}
1104		if (vp->v_flag & VALIASED) {
1105			vx = NULL;
1106			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1107				if (vq->v_rdev != vp->v_rdev ||
1108				    vq->v_type != vp->v_type)
1109					continue;
1110				if (vx)
1111					break;
1112				vx = vq;
1113			}
1114			if (vx == NULL)
1115				panic("missing alias");
1116			if (vq == NULL)
1117				vx->v_flag &= ~VALIASED;
1118			vp->v_flag &= ~VALIASED;
1119		}
1120		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1121		vp->v_specinfo = NULL;
1122	}
1123	/*
1124	 * If it is on the freelist and not already at the head,
1125	 * move it to the head of the list.
1126	 */
1127	vp->v_type = VBAD;
1128
1129	/*
1130	 * Move onto the free list, unless we were called from
1131	 * getnewvnode and we're not on any free list
1132	 */
1133	if (vp->v_usecount == 0 &&
1134	    (vp->v_bioflag & VBIOONFREELIST)) {
1135		int s;
1136
1137		s = splbio();
1138
1139		if (vp->v_holdcnt > 0)
1140			panic("vgonel: not clean");
1141
1142		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1143			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1144			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1145		}
1146		splx(s);
1147	}
1148}
1149
1150/*
1151 * Lookup a vnode by device number.
1152 */
1153int
1154vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1155{
1156	struct vnode *vp;
1157	int rc =0;
1158
1159	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1160		if (dev != vp->v_rdev || type != vp->v_type)
1161			continue;
1162		*vpp = vp;
1163		rc = 1;
1164		break;
1165	}
1166	return (rc);
1167}
1168
1169/*
1170 * Revoke all the vnodes corresponding to the specified minor number
1171 * range (endpoints inclusive) of the specified major.
1172 */
1173void
1174vdevgone(int maj, int minl, int minh, enum vtype type)
1175{
1176	struct vnode *vp;
1177	int mn;
1178
1179	for (mn = minl; mn <= minh; mn++)
1180		if (vfinddev(makedev(maj, mn), type, &vp))
1181			VOP_REVOKE(vp, REVOKEALL);
1182}
1183
1184/*
1185 * Calculate the total number of references to a special device.
1186 */
1187int
1188vcount(struct vnode *vp)
1189{
1190	struct vnode *vq, *vnext;
1191	int count;
1192
1193loop:
1194	if ((vp->v_flag & VALIASED) == 0)
1195		return (vp->v_usecount);
1196	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1197		vnext = vq->v_specnext;
1198		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1199			continue;
1200		/*
1201		 * Alias, but not in use, so flush it out.
1202		 */
1203		if (vq->v_usecount == 0 && vq != vp) {
1204			vgone(vq);
1205			goto loop;
1206		}
1207		count += vq->v_usecount;
1208	}
1209	return (count);
1210}
1211
1212#if defined(DEBUG) || defined(DIAGNOSTIC)
1213/*
1214 * Print out a description of a vnode.
1215 */
1216static char *typename[] =
1217   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1218
1219void
1220vprint(char *label, struct vnode *vp)
1221{
1222	char buf[64];
1223
1224	if (label != NULL)
1225		printf("%s: ", label);
1226	printf("%p, type %s, use %u, write %u, hold %u,",
1227		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1228		vp->v_holdcnt);
1229	buf[0] = '\0';
1230	if (vp->v_flag & VROOT)
1231		strlcat(buf, "|VROOT", sizeof buf);
1232	if (vp->v_flag & VTEXT)
1233		strlcat(buf, "|VTEXT", sizeof buf);
1234	if (vp->v_flag & VSYSTEM)
1235		strlcat(buf, "|VSYSTEM", sizeof buf);
1236	if (vp->v_flag & VXLOCK)
1237		strlcat(buf, "|VXLOCK", sizeof buf);
1238	if (vp->v_flag & VXWANT)
1239		strlcat(buf, "|VXWANT", sizeof buf);
1240	if (vp->v_bioflag & VBIOWAIT)
1241		strlcat(buf, "|VBIOWAIT", sizeof buf);
1242	if (vp->v_bioflag & VBIOONFREELIST)
1243		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1244	if (vp->v_bioflag & VBIOONSYNCLIST)
1245		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1246	if (vp->v_flag & VALIASED)
1247		strlcat(buf, "|VALIASED", sizeof buf);
1248	if (buf[0] != '\0')
1249		printf(" flags (%s)", &buf[1]);
1250	if (vp->v_data == NULL) {
1251		printf("\n");
1252	} else {
1253		printf("\n\t");
1254		VOP_PRINT(vp);
1255	}
1256}
1257#endif /* DEBUG || DIAGNOSTIC */
1258
1259#ifdef DEBUG
1260/*
1261 * List all of the locked vnodes in the system.
1262 * Called when debugging the kernel.
1263 */
1264void
1265printlockedvnodes(void)
1266{
1267	struct mount *mp;
1268	struct vnode *vp;
1269
1270	printf("Locked vnodes\n");
1271
1272	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1273		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1274			continue;
1275		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1276			if (VOP_ISLOCKED(vp))
1277				vprint(NULL, vp);
1278		}
1279		vfs_unbusy(mp);
1280 	}
1281
1282}
1283#endif
1284
1285/*
1286 * Top level filesystem related information gathering.
1287 */
1288int
1289vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1290    size_t newlen, struct proc *p)
1291{
1292	struct vfsconf *vfsp, *tmpvfsp;
1293	int ret;
1294
1295	/* all sysctl names at this level are at least name and field */
1296	if (namelen < 2)
1297		return (ENOTDIR);		/* overloaded */
1298
1299	if (name[0] != VFS_GENERIC) {
1300		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1301			if (vfsp->vfc_typenum == name[0])
1302				break;
1303
1304		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1305			return (EOPNOTSUPP);
1306
1307		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1308		    oldp, oldlenp, newp, newlen, p));
1309	}
1310
1311	switch (name[1]) {
1312	case VFS_MAXTYPENUM:
1313		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1314
1315	case VFS_CONF:
1316		if (namelen < 3)
1317			return (ENOTDIR);	/* overloaded */
1318
1319		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1320			if (vfsp->vfc_typenum == name[2])
1321				break;
1322
1323		if (vfsp == NULL)
1324			return (EOPNOTSUPP);
1325
1326		/* Make a copy, clear out kernel pointers */
1327		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1328		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1329		tmpvfsp->vfc_vfsops = NULL;
1330		tmpvfsp->vfc_next = NULL;
1331
1332		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1333		    sizeof(struct vfsconf));
1334
1335		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1336		return (ret);
1337	case VFS_BCACHESTAT:	/* buffer cache statistics */
1338		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1339		    sizeof(struct bcachestats));
1340		return(ret);
1341	}
1342	return (EOPNOTSUPP);
1343}
1344
1345/*
1346 * Check to see if a filesystem is mounted on a block device.
1347 */
1348int
1349vfs_mountedon(struct vnode *vp)
1350{
1351	struct vnode *vq;
1352	int error = 0;
1353
1354 	if (vp->v_specmountpoint != NULL)
1355		return (EBUSY);
1356	if (vp->v_flag & VALIASED) {
1357		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1358			if (vq->v_rdev != vp->v_rdev ||
1359			    vq->v_type != vp->v_type)
1360				continue;
1361			if (vq->v_specmountpoint != NULL) {
1362				error = EBUSY;
1363				break;
1364			}
1365 		}
1366	}
1367	return (error);
1368}
1369
1370#ifdef NFSSERVER
1371/*
1372 * Build hash lists of net addresses and hang them off the mount point.
1373 * Called by vfs_export() to set up the lists of export addresses.
1374 */
1375int
1376vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1377    struct export_args *argp)
1378{
1379	struct netcred *np;
1380	struct radix_node_head *rnh;
1381	int nplen, i;
1382	struct radix_node *rn;
1383	struct sockaddr *saddr, *smask = 0;
1384	int error;
1385
1386	if (argp->ex_addrlen == 0) {
1387		if (mp->mnt_flag & MNT_DEFEXPORTED)
1388			return (EPERM);
1389		np = &nep->ne_defexported;
1390		/* fill in the kernel's ucred from userspace's xucred */
1391		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1392			return (error);
1393		mp->mnt_flag |= MNT_DEFEXPORTED;
1394		goto finish;
1395	}
1396	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1397	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1398		return (EINVAL);
1399	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1400	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1401	saddr = (struct sockaddr *)(np + 1);
1402	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1403	if (error)
1404		goto out;
1405	if (saddr->sa_len > argp->ex_addrlen)
1406		saddr->sa_len = argp->ex_addrlen;
1407	if (argp->ex_masklen) {
1408		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1409		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1410		if (error)
1411			goto out;
1412		if (smask->sa_len > argp->ex_masklen)
1413			smask->sa_len = argp->ex_masklen;
1414	}
1415	/* fill in the kernel's ucred from userspace's xucred */
1416	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1417		goto out;
1418	i = saddr->sa_family;
1419	switch (i) {
1420	case AF_INET:
1421		if ((rnh = nep->ne_rtable_inet) == NULL) {
1422			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1423			    offsetof(struct sockaddr_in, sin_addr))) {
1424				error = ENOBUFS;
1425				goto out;
1426			}
1427			rnh = nep->ne_rtable_inet;
1428		}
1429		break;
1430	default:
1431		error = EINVAL;
1432		goto out;
1433	}
1434	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1435	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1436		error = EPERM;
1437		goto out;
1438	}
1439finish:
1440	np->netc_exflags = argp->ex_flags;
1441	return (0);
1442out:
1443	free(np, M_NETADDR, nplen);
1444	return (error);
1445}
1446
1447int
1448vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1449{
1450	struct radix_node_head *rnh = (struct radix_node_head *)w;
1451
1452	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1453	free(rn, M_NETADDR, 0);
1454	return (0);
1455}
1456
1457/*
1458 * Free the net address hash lists that are hanging off the mount points.
1459 */
1460void
1461vfs_free_addrlist(struct netexport *nep)
1462{
1463	struct radix_node_head *rnh;
1464
1465	if ((rnh = nep->ne_rtable_inet) != NULL) {
1466		rn_walktree(rnh, vfs_free_netcred, rnh);
1467		free(rnh, M_RTABLE, 0);
1468		nep->ne_rtable_inet = NULL;
1469	}
1470}
1471#endif /* NFSSERVER */
1472
1473int
1474vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1475{
1476#ifdef NFSSERVER
1477	int error;
1478
1479	if (argp->ex_flags & MNT_DELEXPORT) {
1480		vfs_free_addrlist(nep);
1481		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1482	}
1483	if (argp->ex_flags & MNT_EXPORTED) {
1484		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1485			return (error);
1486		mp->mnt_flag |= MNT_EXPORTED;
1487	}
1488	return (0);
1489#else
1490	return (ENOTSUP);
1491#endif /* NFSSERVER */
1492}
1493
1494struct netcred *
1495vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1496{
1497#ifdef NFSSERVER
1498	struct netcred *np;
1499	struct radix_node_head *rnh;
1500	struct sockaddr *saddr;
1501
1502	np = NULL;
1503	if (mp->mnt_flag & MNT_EXPORTED) {
1504		/*
1505		 * Lookup in the export list first.
1506		 */
1507		if (nam != NULL) {
1508			saddr = mtod(nam, struct sockaddr *);
1509			switch(saddr->sa_family) {
1510			case AF_INET:
1511				rnh = nep->ne_rtable_inet;
1512				break;
1513			default:
1514				rnh = NULL;
1515				break;
1516			}
1517			if (rnh != NULL)
1518				np = (struct netcred *)rn_match(saddr, rnh);
1519		}
1520		/*
1521		 * If no address match, use the default if it exists.
1522		 */
1523		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1524			np = &nep->ne_defexported;
1525	}
1526	return (np);
1527#else
1528	return (NULL);
1529#endif /* NFSSERVER */
1530}
1531
1532/*
1533 * Do the usual access checking.
1534 * file_mode, uid and gid are from the vnode in question,
1535 * while acc_mode and cred are from the VOP_ACCESS parameter list
1536 */
1537int
1538vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1539    mode_t acc_mode, struct ucred *cred)
1540{
1541	mode_t mask;
1542
1543	/* User id 0 always gets read/write access. */
1544	if (cred->cr_uid == 0) {
1545		/* For VEXEC, at least one of the execute bits must be set. */
1546		if ((acc_mode & VEXEC) && type != VDIR &&
1547		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1548			return EACCES;
1549		return 0;
1550	}
1551
1552	mask = 0;
1553
1554	/* Otherwise, check the owner. */
1555	if (cred->cr_uid == uid) {
1556		if (acc_mode & VEXEC)
1557			mask |= S_IXUSR;
1558		if (acc_mode & VREAD)
1559			mask |= S_IRUSR;
1560		if (acc_mode & VWRITE)
1561			mask |= S_IWUSR;
1562		return (file_mode & mask) == mask ? 0 : EACCES;
1563	}
1564
1565	/* Otherwise, check the groups. */
1566	if (groupmember(gid, cred)) {
1567		if (acc_mode & VEXEC)
1568			mask |= S_IXGRP;
1569		if (acc_mode & VREAD)
1570			mask |= S_IRGRP;
1571		if (acc_mode & VWRITE)
1572			mask |= S_IWGRP;
1573		return (file_mode & mask) == mask ? 0 : EACCES;
1574	}
1575
1576	/* Otherwise, check everyone else. */
1577	if (acc_mode & VEXEC)
1578		mask |= S_IXOTH;
1579	if (acc_mode & VREAD)
1580		mask |= S_IROTH;
1581	if (acc_mode & VWRITE)
1582		mask |= S_IWOTH;
1583	return (file_mode & mask) == mask ? 0 : EACCES;
1584}
1585
1586int
1587vfs_readonly(struct mount *mp, struct proc *p)
1588{
1589	int error;
1590
1591	error = vfs_busy(mp, VB_WRITE|VB_WAIT);
1592	if (error) {
1593		printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1594		return (error);
1595	}
1596	uvm_vnp_sync(mp);
1597	error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p);
1598	if (error) {
1599		printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1600		vfs_unbusy(mp);
1601		return (error);
1602	}
1603
1604	mp->mnt_flag |= MNT_UPDATE | MNT_RDONLY;
1605	mp->mnt_flag &= ~MNT_SOFTDEP;
1606	error = VFS_MOUNT(mp, mp->mnt_stat.f_mntonname, NULL, NULL, curproc);
1607	if (error) {
1608		printf("%s: failed to remount rdonly, error %d\n",
1609		    mp->mnt_stat.f_mntonname, error);
1610		vfs_unbusy(mp);
1611		return (error);
1612	}
1613	if (mp->mnt_syncer != NULL)
1614		vgone(mp->mnt_syncer);
1615	mp->mnt_syncer = NULL;
1616	vfs_unbusy(mp);
1617	return (error);
1618}
1619
1620/*
1621 * Read-only all file systems.
1622 * We traverse the list in reverse order under the assumption that doing so
1623 * will avoid needing to worry about dependencies.
1624 */
1625void
1626vfs_rofs(struct proc *p)
1627{
1628	struct mount *mp, *nmp;
1629
1630	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1631		/* XXX Here is a race, the next pointer is not locked. */
1632		(void) vfs_readonly(mp, p);
1633	}
1634}
1635
1636/*
1637 * Sync and unmount file systems before shutting down.
1638 */
1639void
1640vfs_shutdown(struct proc *p)
1641{
1642#ifdef ACCOUNTING
1643	acct_shutdown();
1644#endif
1645
1646	printf("syncing disks... ");
1647
1648	if (panicstr == 0) {
1649		/* Take all filesystems to read-only */
1650		sys_sync(p, NULL, NULL);
1651		vfs_rofs(p);
1652	}
1653
1654	if (vfs_syncwait(p, 1))
1655		printf("giving up\n");
1656	else
1657		printf("done\n");
1658
1659#if NSOFTRAID > 0
1660	sr_shutdown();
1661#endif
1662}
1663
1664/*
1665 * perform sync() operation and wait for buffers to flush.
1666 */
1667int
1668vfs_syncwait(struct proc *p, int verbose)
1669{
1670	struct buf *bp;
1671	int iter, nbusy, dcount, s;
1672#ifdef MULTIPROCESSOR
1673	int hold_count;
1674#endif
1675
1676	sys_sync(p, NULL, NULL);
1677
1678	/* Wait for sync to finish. */
1679	dcount = 10000;
1680	for (iter = 0; iter < 20; iter++) {
1681		nbusy = 0;
1682		LIST_FOREACH(bp, &bufhead, b_list) {
1683			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1684				nbusy++;
1685			/*
1686			 * With soft updates, some buffers that are
1687			 * written will be remarked as dirty until other
1688			 * buffers are written.
1689			 */
1690			if (bp->b_flags & B_DELWRI) {
1691				s = splbio();
1692				bremfree(bp);
1693				buf_acquire(bp);
1694				splx(s);
1695				nbusy++;
1696				bawrite(bp);
1697				if (dcount-- <= 0) {
1698					if (verbose)
1699						printf("softdep ");
1700					return 1;
1701				}
1702			}
1703		}
1704		if (nbusy == 0)
1705			break;
1706		if (verbose)
1707			printf("%d ", nbusy);
1708#ifdef MULTIPROCESSOR
1709		if (_kernel_lock_held())
1710			hold_count = __mp_release_all(&kernel_lock);
1711		else
1712			hold_count = 0;
1713#endif
1714		DELAY(40000 * iter);
1715#ifdef MULTIPROCESSOR
1716		if (hold_count)
1717			__mp_acquire_count(&kernel_lock, hold_count);
1718#endif
1719	}
1720
1721	return nbusy;
1722}
1723
1724/*
1725 * posix file system related system variables.
1726 */
1727int
1728fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1729    void *newp, size_t newlen, struct proc *p)
1730{
1731	/* all sysctl names at this level are terminal */
1732	if (namelen != 1)
1733		return (ENOTDIR);
1734
1735	switch (name[0]) {
1736	case FS_POSIX_SETUID:
1737		if (newp && securelevel > 0)
1738			return (EPERM);
1739		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1740	default:
1741		return (EOPNOTSUPP);
1742	}
1743	/* NOTREACHED */
1744}
1745
1746/*
1747 * file system related system variables.
1748 */
1749int
1750fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1751    size_t newlen, struct proc *p)
1752{
1753	sysctlfn *fn;
1754
1755	switch (name[0]) {
1756	case FS_POSIX:
1757		fn = fs_posix_sysctl;
1758		break;
1759	default:
1760		return (EOPNOTSUPP);
1761	}
1762	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1763}
1764
1765
1766/*
1767 * Routines dealing with vnodes and buffers
1768 */
1769
1770/*
1771 * Wait for all outstanding I/Os to complete
1772 *
1773 * Manipulates v_numoutput. Must be called at splbio()
1774 */
1775int
1776vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1777{
1778	int error = 0;
1779
1780	splassert(IPL_BIO);
1781
1782	while (vp->v_numoutput) {
1783		vp->v_bioflag |= VBIOWAIT;
1784		error = tsleep(&vp->v_numoutput,
1785		    slpflag | (PRIBIO + 1), wmesg, timeo);
1786		if (error)
1787			break;
1788	}
1789
1790	return (error);
1791}
1792
1793/*
1794 * Update outstanding I/O count and do wakeup if requested.
1795 *
1796 * Manipulates v_numoutput. Must be called at splbio()
1797 */
1798void
1799vwakeup(struct vnode *vp)
1800{
1801	splassert(IPL_BIO);
1802
1803	if (vp != NULL) {
1804		if (vp->v_numoutput-- == 0)
1805			panic("vwakeup: neg numoutput");
1806		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1807			vp->v_bioflag &= ~VBIOWAIT;
1808			wakeup(&vp->v_numoutput);
1809		}
1810	}
1811}
1812
1813/*
1814 * Flush out and invalidate all buffers associated with a vnode.
1815 * Called with the underlying object locked.
1816 */
1817int
1818vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1819    int slpflag, int slptimeo)
1820{
1821	struct buf *bp;
1822	struct buf *nbp, *blist;
1823	int s, error;
1824
1825#ifdef VFSLCKDEBUG
1826	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1827		panic("vinvalbuf(): vp isn't locked");
1828#endif
1829
1830	if (flags & V_SAVE) {
1831		s = splbio();
1832		vwaitforio(vp, 0, "vinvalbuf", 0);
1833		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1834			splx(s);
1835			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1836				return (error);
1837			s = splbio();
1838			if (vp->v_numoutput > 0 ||
1839			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1840				panic("vinvalbuf: dirty bufs");
1841		}
1842		splx(s);
1843	}
1844loop:
1845	s = splbio();
1846	for (;;) {
1847		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1848		    (flags & V_SAVEMETA))
1849			while (blist && blist->b_lblkno < 0)
1850				blist = LIST_NEXT(blist, b_vnbufs);
1851		if (blist == NULL &&
1852		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1853		    (flags & V_SAVEMETA))
1854			while (blist && blist->b_lblkno < 0)
1855				blist = LIST_NEXT(blist, b_vnbufs);
1856		if (!blist)
1857			break;
1858
1859		for (bp = blist; bp; bp = nbp) {
1860			nbp = LIST_NEXT(bp, b_vnbufs);
1861			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1862				continue;
1863			if (bp->b_flags & B_BUSY) {
1864				bp->b_flags |= B_WANTED;
1865				error = tsleep(bp, slpflag | (PRIBIO + 1),
1866				    "vinvalbuf", slptimeo);
1867				if (error) {
1868					splx(s);
1869					return (error);
1870				}
1871				break;
1872			}
1873			bremfree(bp);
1874			/*
1875			 * XXX Since there are no node locks for NFS, I believe
1876			 * there is a slight chance that a delayed write will
1877			 * occur while sleeping just above, so check for it.
1878			 */
1879			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1880				buf_acquire(bp);
1881				splx(s);
1882				(void) VOP_BWRITE(bp);
1883				goto loop;
1884			}
1885			buf_acquire_nomap(bp);
1886			bp->b_flags |= B_INVAL;
1887			brelse(bp);
1888		}
1889	}
1890	if (!(flags & V_SAVEMETA) &&
1891	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1892		panic("vinvalbuf: flush failed");
1893	splx(s);
1894	return (0);
1895}
1896
1897void
1898vflushbuf(struct vnode *vp, int sync)
1899{
1900	struct buf *bp, *nbp;
1901	int s;
1902
1903loop:
1904	s = splbio();
1905	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1906		if ((bp->b_flags & B_BUSY))
1907			continue;
1908		if ((bp->b_flags & B_DELWRI) == 0)
1909			panic("vflushbuf: not dirty");
1910		bremfree(bp);
1911		buf_acquire(bp);
1912		splx(s);
1913		/*
1914		 * Wait for I/O associated with indirect blocks to complete,
1915		 * since there is no way to quickly wait for them below.
1916		 */
1917		if (bp->b_vp == vp || sync == 0)
1918			(void) bawrite(bp);
1919		else
1920			(void) bwrite(bp);
1921		goto loop;
1922	}
1923	if (sync == 0) {
1924		splx(s);
1925		return;
1926	}
1927	vwaitforio(vp, 0, "vflushbuf", 0);
1928	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1929		splx(s);
1930#ifdef DIAGNOSTIC
1931		vprint("vflushbuf: dirty", vp);
1932#endif
1933		goto loop;
1934	}
1935	splx(s);
1936}
1937
1938/*
1939 * Associate a buffer with a vnode.
1940 *
1941 * Manipulates buffer vnode queues. Must be called at splbio().
1942 */
1943void
1944bgetvp(struct vnode *vp, struct buf *bp)
1945{
1946	splassert(IPL_BIO);
1947
1948
1949	if (bp->b_vp)
1950		panic("bgetvp: not free");
1951	vhold(vp);
1952	bp->b_vp = vp;
1953	if (vp->v_type == VBLK || vp->v_type == VCHR)
1954		bp->b_dev = vp->v_rdev;
1955	else
1956		bp->b_dev = NODEV;
1957	/*
1958	 * Insert onto list for new vnode.
1959	 */
1960	bufinsvn(bp, &vp->v_cleanblkhd);
1961}
1962
1963/*
1964 * Disassociate a buffer from a vnode.
1965 *
1966 * Manipulates vnode buffer queues. Must be called at splbio().
1967 */
1968void
1969brelvp(struct buf *bp)
1970{
1971	struct vnode *vp;
1972
1973	splassert(IPL_BIO);
1974
1975	if ((vp = bp->b_vp) == (struct vnode *) 0)
1976		panic("brelvp: NULL");
1977	/*
1978	 * Delete from old vnode list, if on one.
1979	 */
1980	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1981		bufremvn(bp);
1982	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1983	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1984		vp->v_bioflag &= ~VBIOONSYNCLIST;
1985		LIST_REMOVE(vp, v_synclist);
1986	}
1987	bp->b_vp = NULL;
1988
1989	vdrop(vp);
1990}
1991
1992/*
1993 * Replaces the current vnode associated with the buffer, if any,
1994 * with a new vnode.
1995 *
1996 * If an output I/O is pending on the buffer, the old vnode
1997 * I/O count is adjusted.
1998 *
1999 * Ignores vnode buffer queues. Must be called at splbio().
2000 */
2001void
2002buf_replacevnode(struct buf *bp, struct vnode *newvp)
2003{
2004	struct vnode *oldvp = bp->b_vp;
2005
2006	splassert(IPL_BIO);
2007
2008	if (oldvp)
2009		brelvp(bp);
2010
2011	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2012		newvp->v_numoutput++;	/* put it on swapdev */
2013		vwakeup(oldvp);
2014	}
2015
2016	bgetvp(newvp, bp);
2017	bufremvn(bp);
2018}
2019
2020/*
2021 * Used to assign buffers to the appropriate clean or dirty list on
2022 * the vnode and to add newly dirty vnodes to the appropriate
2023 * filesystem syncer list.
2024 *
2025 * Manipulates vnode buffer queues. Must be called at splbio().
2026 */
2027void
2028reassignbuf(struct buf *bp)
2029{
2030	struct buflists *listheadp;
2031	int delay;
2032	struct vnode *vp = bp->b_vp;
2033
2034	splassert(IPL_BIO);
2035
2036	/*
2037	 * Delete from old vnode list, if on one.
2038	 */
2039	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2040		bufremvn(bp);
2041
2042	/*
2043	 * If dirty, put on list of dirty buffers;
2044	 * otherwise insert onto list of clean buffers.
2045	 */
2046	if ((bp->b_flags & B_DELWRI) == 0) {
2047		listheadp = &vp->v_cleanblkhd;
2048		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2049		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2050			vp->v_bioflag &= ~VBIOONSYNCLIST;
2051			LIST_REMOVE(vp, v_synclist);
2052		}
2053	} else {
2054		listheadp = &vp->v_dirtyblkhd;
2055		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2056			switch (vp->v_type) {
2057			case VDIR:
2058				delay = syncdelay / 2;
2059				break;
2060			case VBLK:
2061				if (vp->v_specmountpoint != NULL) {
2062					delay = syncdelay / 3;
2063					break;
2064				}
2065				/* FALLTHROUGH */
2066			default:
2067				delay = syncdelay;
2068			}
2069			vn_syncer_add_to_worklist(vp, delay);
2070		}
2071	}
2072	bufinsvn(bp, listheadp);
2073}
2074
2075int
2076vfs_register(struct vfsconf *vfs)
2077{
2078	struct vfsconf *vfsp;
2079	struct vfsconf **vfspp;
2080
2081#ifdef DIAGNOSTIC
2082	/* Paranoia? */
2083	if (vfs->vfc_refcount != 0)
2084		printf("vfs_register called with vfc_refcount > 0\n");
2085#endif
2086
2087	/* Check if filesystem already known */
2088	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2089	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2090		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2091			return (EEXIST);
2092
2093	if (vfs->vfc_typenum > maxvfsconf)
2094		maxvfsconf = vfs->vfc_typenum;
2095
2096	vfs->vfc_next = NULL;
2097
2098	/* Add to the end of the list */
2099	*vfspp = vfs;
2100
2101	/* Call vfs_init() */
2102	if (vfs->vfc_vfsops->vfs_init)
2103		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2104
2105	return 0;
2106}
2107
2108int
2109vfs_unregister(struct vfsconf *vfs)
2110{
2111	struct vfsconf *vfsp;
2112	struct vfsconf **vfspp;
2113	int maxtypenum;
2114
2115	/* Find our vfsconf struct */
2116	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2117	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2118		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2119			break;
2120	}
2121
2122	if (!vfsp)			/* Not found */
2123		return (ENOENT);
2124
2125	if (vfsp->vfc_refcount)		/* In use */
2126		return (EBUSY);
2127
2128	/* Remove from list and free */
2129	*vfspp = vfsp->vfc_next;
2130
2131	maxtypenum = 0;
2132
2133	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2134		if (vfsp->vfc_typenum > maxtypenum)
2135			maxtypenum = vfsp->vfc_typenum;
2136
2137	maxvfsconf = maxtypenum;
2138	return 0;
2139}
2140
2141/*
2142 * Check if vnode represents a disk device
2143 */
2144int
2145vn_isdisk(struct vnode *vp, int *errp)
2146{
2147	if (vp->v_type != VBLK && vp->v_type != VCHR)
2148		return (0);
2149
2150	return (1);
2151}
2152
2153#ifdef DDB
2154#include <machine/db_machdep.h>
2155#include <ddb/db_interface.h>
2156
2157void
2158vfs_buf_print(void *b, int full,
2159    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2160{
2161	struct buf *bp = b;
2162
2163	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2164	      "  proc %p error %d flags %lb\n",
2165	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2166	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2167
2168	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2169	      "  data %p saveaddr %p dep %p iodone %p\n",
2170	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2171	    bp->b_data, bp->b_saveaddr,
2172	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2173
2174	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2175	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2176
2177#ifdef FFS_SOFTUPDATES
2178	if (full)
2179		softdep_print(bp, full, pr);
2180#endif
2181}
2182
2183const char *vtypes[] = { VTYPE_NAMES };
2184const char *vtags[] = { VTAG_NAMES };
2185
2186void
2187vfs_vnode_print(void *v, int full,
2188    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2189{
2190	struct vnode *vp = v;
2191
2192	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2193	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2194	      vp->v_tag,
2195	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2196	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2197
2198	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2199	      vp->v_data, vp->v_usecount, vp->v_writecount,
2200	      vp->v_holdcnt, vp->v_numoutput);
2201
2202	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2203
2204	if (full) {
2205		struct buf *bp;
2206
2207		(*pr)("clean bufs:\n");
2208		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2209			(*pr)(" bp %p\n", bp);
2210			vfs_buf_print(bp, full, pr);
2211		}
2212
2213		(*pr)("dirty bufs:\n");
2214		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2215			(*pr)(" bp %p\n", bp);
2216			vfs_buf_print(bp, full, pr);
2217		}
2218	}
2219}
2220
2221void
2222vfs_mount_print(struct mount *mp, int full,
2223    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2224{
2225	struct vfsconf *vfc = mp->mnt_vfc;
2226	struct vnode *vp;
2227	int cnt;
2228
2229	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2230	    mp->mnt_flag, MNT_BITS,
2231	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2232
2233	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2234            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2235	    vfc->vfc_refcount, vfc->vfc_flags);
2236
2237	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2238	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2239	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2240
2241	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2242	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2243
2244	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2245	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2246	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2247
2248 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2249	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2250
2251 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2252	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2253
2254	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2255	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2256	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2257
2258	(*pr)("locked vnodes:");
2259	/* XXX would take mountlist lock, except ddb has no context */
2260	cnt = 0;
2261	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2262		if (VOP_ISLOCKED(vp)) {
2263			if (cnt == 0)
2264				(*pr)("\n  %p", vp);
2265			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2266				(*pr)(",\n  %p", vp);
2267			else
2268				(*pr)(", %p", vp);
2269			cnt++;
2270		}
2271	}
2272	(*pr)("\n");
2273
2274	if (full) {
2275		(*pr)("all vnodes:");
2276		/* XXX would take mountlist lock, except ddb has no context */
2277		cnt = 0;
2278		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2279			if (cnt == 0)
2280				(*pr)("\n  %p", vp);
2281			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2282				(*pr)(",\n  %p", vp);
2283			else
2284				(*pr)(", %p", vp);
2285			cnt++;
2286		}
2287		(*pr)("\n");
2288	}
2289}
2290#endif /* DDB */
2291
2292void
2293copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2294{
2295	const struct statfs *mbp;
2296
2297	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2298
2299	if (sbp == (mbp = &mp->mnt_stat))
2300		return;
2301
2302	sbp->f_fsid = mbp->f_fsid;
2303	sbp->f_owner = mbp->f_owner;
2304	sbp->f_flags = mbp->f_flags;
2305	sbp->f_syncwrites = mbp->f_syncwrites;
2306	sbp->f_asyncwrites = mbp->f_asyncwrites;
2307	sbp->f_syncreads = mbp->f_syncreads;
2308	sbp->f_asyncreads = mbp->f_asyncreads;
2309	sbp->f_namemax = mbp->f_namemax;
2310	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2311	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2312	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2313	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2314	    sizeof(union mount_info));
2315}
2316