vfs_subr.c revision 1.281
1/*	$OpenBSD: vfs_subr.c,v 1.281 2018/09/26 14:51:44 visa Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/sysctl.h>
48#include <sys/mount.h>
49#include <sys/time.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/conf.h>
53#include <sys/vnode.h>
54#include <sys/lock.h>
55#include <sys/stat.h>
56#include <sys/acct.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/syscallargs.h>
64#include <sys/pool.h>
65#include <sys/tree.h>
66#include <sys/specdev.h>
67
68#include <netinet/in.h>
69
70#include <uvm/uvm_extern.h>
71#include <uvm/uvm_vnode.h>
72
73#include "softraid.h"
74
75void sr_quiesce(void);
76
77enum vtype iftovt_tab[16] = {
78	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80};
81
82int	vttoif_tab[9] = {
83	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84	S_IFSOCK, S_IFIFO, S_IFMT,
85};
86
87int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89
90/*
91 * Insq/Remq for the vnode usage lists.
92 */
93#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94#define	bufremvn(bp) {							\
95	LIST_REMOVE(bp, b_vnbufs);					\
96	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97}
98
99struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100struct freelst vnode_free_list;	/* vnode free list */
101
102struct mntlist mountlist;	/* mounted filesystem list */
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *, u_int);
112void vfs_free_addrlist(struct netexport *);
113void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116int maxvnodes;
117
118void vfs_unmountall(void);
119
120#ifdef DEBUG
121void printlockedvnodes(void);
122#endif
123
124struct pool vnode_pool;
125struct pool uvm_vnode_pool;
126
127static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
128RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
129
130static inline int
131rb_buf_compare(const struct buf *b1, const struct buf *b2)
132{
133	if (b1->b_lblkno < b2->b_lblkno)
134		return(-1);
135	if (b1->b_lblkno > b2->b_lblkno)
136		return(1);
137	return(0);
138}
139
140/*
141 * Initialize the vnode management data structures.
142 */
143void
144vntblinit(void)
145{
146	/* buffer cache may need a vnode for each buffer */
147	maxvnodes = 2 * initialvnodes;
148	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
149	    PR_WAITOK, "vnodes", NULL);
150	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
151	    PR_WAITOK, "uvmvnodes", NULL);
152	TAILQ_INIT(&vnode_hold_list);
153	TAILQ_INIT(&vnode_free_list);
154	TAILQ_INIT(&mountlist);
155	/*
156	 * Initialize the filesystem syncer.
157	 */
158	vn_initialize_syncerd();
159
160#ifdef NFSSERVER
161	rn_init(sizeof(struct sockaddr_in));
162#endif /* NFSSERVER */
163}
164
165/*
166 * Allocate a mount point.
167 *
168 * The returned mount point is marked as busy.
169 */
170struct mount *
171vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp)
172{
173	struct mount *mp;
174
175	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
176	rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
177	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
178
179	LIST_INIT(&mp->mnt_vnodelist);
180	mp->mnt_vnodecovered = vp;
181
182	vfsp->vfc_refcount++;
183	mp->mnt_vfc = vfsp;
184	mp->mnt_op = vfsp->vfc_vfsops;
185	mp->mnt_flag = vfsp->vfc_flags & MNT_VISFLAGMASK;
186	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
187
188	return (mp);
189}
190
191/*
192 * Release a mount point.
193 */
194void
195vfs_mount_free(struct mount *mp)
196{
197	mp->mnt_vfc->vfc_refcount--;
198	free(mp, M_MOUNT, sizeof(*mp));
199}
200
201/*
202 * Mark a mount point as busy. Used to synchronize access and to delay
203 * unmounting.
204 *
205 * Default behaviour is to attempt getting a READ lock and in case of an
206 * ongoing unmount, to wait for it to finish and then return failure.
207 */
208int
209vfs_busy(struct mount *mp, int flags)
210{
211	int rwflags = 0;
212
213	if (flags & VB_WRITE)
214		rwflags |= RW_WRITE;
215	else
216		rwflags |= RW_READ;
217
218	if (flags & VB_WAIT)
219		rwflags |= RW_SLEEPFAIL;
220	else
221		rwflags |= RW_NOSLEEP;
222
223#ifdef WITNESS
224	if (flags & VB_DUPOK)
225		rwflags |= RW_DUPOK;
226#endif
227
228	if (rw_enter(&mp->mnt_lock, rwflags))
229		return (EBUSY);
230
231	return (0);
232}
233
234/*
235 * Free a busy file system
236 */
237void
238vfs_unbusy(struct mount *mp)
239{
240	rw_exit(&mp->mnt_lock);
241}
242
243int
244vfs_isbusy(struct mount *mp)
245{
246	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
247		return (1);
248	else
249		return (0);
250}
251
252/*
253 * Lookup a filesystem type, and if found allocate and initialize
254 * a mount structure for it.
255 *
256 * Devname is usually updated by mount(8) after booting.
257 */
258int
259vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
260{
261	struct vfsconf *vfsp;
262	struct mount *mp;
263
264	vfsp = vfs_byname(fstypename);
265	if (vfsp == NULL)
266		return (ENODEV);
267	mp = vfs_mount_alloc(NULLVP, vfsp);
268	mp->mnt_flag |= MNT_RDONLY;
269	mp->mnt_stat.f_mntonname[0] = '/';
270	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
271	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
272	*mpp = mp;
273 	return (0);
274 }
275
276/*
277 * Lookup a mount point by filesystem identifier.
278 */
279struct mount *
280vfs_getvfs(fsid_t *fsid)
281{
282	struct mount *mp;
283
284	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
285		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
286		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
287			return (mp);
288		}
289	}
290
291	return (NULL);
292}
293
294
295/*
296 * Get a new unique fsid
297 */
298void
299vfs_getnewfsid(struct mount *mp)
300{
301	static u_short xxxfs_mntid;
302
303	fsid_t tfsid;
304	int mtype;
305
306	mtype = mp->mnt_vfc->vfc_typenum;
307	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
308	mp->mnt_stat.f_fsid.val[1] = mtype;
309	if (xxxfs_mntid == 0)
310		++xxxfs_mntid;
311	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
312	tfsid.val[1] = mtype;
313	if (!TAILQ_EMPTY(&mountlist)) {
314		while (vfs_getvfs(&tfsid)) {
315			tfsid.val[0]++;
316			xxxfs_mntid++;
317		}
318	}
319	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
320}
321
322/*
323 * Set vnode attributes to VNOVAL
324 */
325void
326vattr_null(struct vattr *vap)
327{
328
329	vap->va_type = VNON;
330	/*
331	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
332	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
333	 * the compiler do its job.
334	 */
335	vap->va_mode = VNOVAL;
336	vap->va_nlink = VNOVAL;
337	vap->va_uid = VNOVAL;
338	vap->va_gid = VNOVAL;
339	vap->va_fsid = VNOVAL;
340	vap->va_fileid = VNOVAL;
341	vap->va_size = VNOVAL;
342	vap->va_blocksize = VNOVAL;
343	vap->va_atime.tv_sec = VNOVAL;
344	vap->va_atime.tv_nsec = VNOVAL;
345	vap->va_mtime.tv_sec = VNOVAL;
346	vap->va_mtime.tv_nsec = VNOVAL;
347	vap->va_ctime.tv_sec = VNOVAL;
348	vap->va_ctime.tv_nsec = VNOVAL;
349	vap->va_gen = VNOVAL;
350	vap->va_flags = VNOVAL;
351	vap->va_rdev = VNOVAL;
352	vap->va_bytes = VNOVAL;
353	vap->va_filerev = VNOVAL;
354	vap->va_vaflags = 0;
355}
356
357/*
358 * Routines having to do with the management of the vnode table.
359 */
360long numvnodes;
361
362/*
363 * Return the next vnode from the free list.
364 */
365int
366getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
367    struct vnode **vpp)
368{
369	struct proc *p = curproc;
370	struct freelst *listhd;
371	static int toggle;
372	struct vnode *vp;
373	int s;
374
375	/*
376	 * allow maxvnodes to increase if the buffer cache itself
377	 * is big enough to justify it. (we don't shrink it ever)
378	 */
379	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
380	    : maxvnodes;
381
382	/*
383	 * We must choose whether to allocate a new vnode or recycle an
384	 * existing one. The criterion for allocating a new one is that
385	 * the total number of vnodes is less than the number desired or
386	 * there are no vnodes on either free list. Generally we only
387	 * want to recycle vnodes that have no buffers associated with
388	 * them, so we look first on the vnode_free_list. If it is empty,
389	 * we next consider vnodes with referencing buffers on the
390	 * vnode_hold_list. The toggle ensures that half the time we
391	 * will use a buffer from the vnode_hold_list, and half the time
392	 * we will allocate a new one unless the list has grown to twice
393	 * the desired size. We are reticent to recycle vnodes from the
394	 * vnode_hold_list because we will lose the identity of all its
395	 * referencing buffers.
396	 */
397	toggle ^= 1;
398	if (numvnodes / 2 > maxvnodes)
399		toggle = 0;
400
401	s = splbio();
402	if ((numvnodes < maxvnodes) ||
403	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
404	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
405		splx(s);
406		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
407		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
408		vp->v_uvm->u_vnode = vp;
409		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
410		cache_tree_init(&vp->v_nc_tree);
411		TAILQ_INIT(&vp->v_cache_dst);
412		numvnodes++;
413	} else {
414		TAILQ_FOREACH(vp, listhd, v_freelist) {
415			if (VOP_ISLOCKED(vp) == 0)
416				break;
417		}
418		/*
419		 * Unless this is a bad time of the month, at most
420		 * the first NCPUS items on the free list are
421		 * locked, so this is close enough to being empty.
422		 */
423		if (vp == NULL) {
424			splx(s);
425			tablefull("vnode");
426			*vpp = 0;
427			return (ENFILE);
428		}
429
430#ifdef DIAGNOSTIC
431		if (vp->v_usecount) {
432			vprint("free vnode", vp);
433			panic("free vnode isn't");
434		}
435#endif
436
437		TAILQ_REMOVE(listhd, vp, v_freelist);
438		vp->v_bioflag &= ~VBIOONFREELIST;
439		splx(s);
440
441		if (vp->v_type != VBAD)
442			vgonel(vp, p);
443#ifdef DIAGNOSTIC
444		if (vp->v_data) {
445			vprint("cleaned vnode", vp);
446			panic("cleaned vnode isn't");
447		}
448		s = splbio();
449		if (vp->v_numoutput)
450			panic("Clean vnode has pending I/O's");
451		splx(s);
452#endif
453		vp->v_flag = 0;
454		vp->v_socket = 0;
455	}
456	cache_purge(vp);
457	vp->v_type = VNON;
458	vp->v_tag = tag;
459	vp->v_op = vops;
460	insmntque(vp, mp);
461	*vpp = vp;
462	vp->v_usecount = 1;
463	vp->v_data = 0;
464	return (0);
465}
466
467/*
468 * Move a vnode from one mount queue to another.
469 */
470void
471insmntque(struct vnode *vp, struct mount *mp)
472{
473	/*
474	 * Delete from old mount point vnode list, if on one.
475	 */
476	if (vp->v_mount != NULL)
477		LIST_REMOVE(vp, v_mntvnodes);
478	/*
479	 * Insert into list of vnodes for the new mount point, if available.
480	 */
481	if ((vp->v_mount = mp) != NULL)
482		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
483}
484
485/*
486 * Create a vnode for a block device.
487 * Used for root filesystem, argdev, and swap areas.
488 * Also used for memory file system special devices.
489 */
490int
491bdevvp(dev_t dev, struct vnode **vpp)
492{
493	return (getdevvp(dev, vpp, VBLK));
494}
495
496/*
497 * Create a vnode for a character device.
498 * Used for console handling.
499 */
500int
501cdevvp(dev_t dev, struct vnode **vpp)
502{
503	return (getdevvp(dev, vpp, VCHR));
504}
505
506/*
507 * Create a vnode for a device.
508 * Used by bdevvp (block device) for root file system etc.,
509 * and by cdevvp (character device) for console.
510 */
511int
512getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
513{
514	struct vnode *vp;
515	struct vnode *nvp;
516	int error;
517
518	if (dev == NODEV) {
519		*vpp = NULLVP;
520		return (0);
521	}
522	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
523	if (error) {
524		*vpp = NULLVP;
525		return (error);
526	}
527	vp = nvp;
528	vp->v_type = type;
529	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
530		vput(vp);
531		vp = nvp;
532	}
533	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
534		vp->v_flag |= VISTTY;
535	*vpp = vp;
536	return (0);
537}
538
539/*
540 * Check to see if the new vnode represents a special device
541 * for which we already have a vnode (either because of
542 * bdevvp() or because of a different vnode representing
543 * the same block device). If such an alias exists, deallocate
544 * the existing contents and return the aliased vnode. The
545 * caller is responsible for filling it with its new contents.
546 */
547struct vnode *
548checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
549{
550	struct proc *p = curproc;
551	struct vnode *vp;
552	struct vnode **vpp;
553
554	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
555		return (NULLVP);
556
557	vpp = &speclisth[SPECHASH(nvp_rdev)];
558loop:
559	for (vp = *vpp; vp; vp = vp->v_specnext) {
560		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
561			continue;
562		}
563		/*
564		 * Alias, but not in use, so flush it out.
565		 */
566		if (vp->v_usecount == 0) {
567			vgonel(vp, p);
568			goto loop;
569		}
570		if (vget(vp, LK_EXCLUSIVE)) {
571			goto loop;
572		}
573		break;
574	}
575
576	/*
577	 * Common case is actually in the if statement
578	 */
579	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
580		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
581			M_WAITOK);
582		nvp->v_rdev = nvp_rdev;
583		nvp->v_hashchain = vpp;
584		nvp->v_specnext = *vpp;
585		nvp->v_specmountpoint = NULL;
586		nvp->v_speclockf = NULL;
587		nvp->v_specbitmap = NULL;
588		if (nvp->v_type == VCHR &&
589		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
590		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
591			if (vp != NULLVP)
592				nvp->v_specbitmap = vp->v_specbitmap;
593			else
594				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
595				    M_VNODE, M_WAITOK | M_ZERO);
596		}
597		*vpp = nvp;
598		if (vp != NULLVP) {
599			nvp->v_flag |= VALIASED;
600			vp->v_flag |= VALIASED;
601			vput(vp);
602		}
603		return (NULLVP);
604	}
605
606	/*
607	 * This code is the uncommon case. It is called in case
608	 * we found an alias that was VT_NON && vtype of VBLK
609	 * This means we found a block device that was created
610	 * using bdevvp.
611	 * An example of such a vnode is the root partition device vnode
612	 * created in ffs_mountroot.
613	 *
614	 * The vnodes created by bdevvp should not be aliased (why?).
615	 */
616
617	VOP_UNLOCK(vp);
618	vclean(vp, 0, p);
619	vp->v_op = nvp->v_op;
620	vp->v_tag = nvp->v_tag;
621	nvp->v_type = VNON;
622	insmntque(vp, mp);
623	return (vp);
624}
625
626/*
627 * Grab a particular vnode from the free list, increment its
628 * reference count and lock it. If the vnode lock bit is set,
629 * the vnode is being eliminated in vgone. In that case, we
630 * cannot grab it, so the process is awakened when the
631 * transition is completed, and an error code is returned to
632 * indicate that the vnode is no longer usable, possibly
633 * having been changed to a new file system type.
634 */
635int
636vget(struct vnode *vp, int flags)
637{
638	int error, s, onfreelist;
639
640	/*
641	 * If the vnode is in the process of being cleaned out for
642	 * another use, we wait for the cleaning to finish and then
643	 * return failure. Cleaning is determined by checking that
644	 * the VXLOCK flag is set.
645	 */
646
647	if (vp->v_flag & VXLOCK) {
648		if (flags & LK_NOWAIT) {
649			return (EBUSY);
650		}
651
652		vp->v_flag |= VXWANT;
653		tsleep(vp, PINOD, "vget", 0);
654		return (ENOENT);
655	}
656
657	onfreelist = vp->v_bioflag & VBIOONFREELIST;
658	if (vp->v_usecount == 0 && onfreelist) {
659		s = splbio();
660		if (vp->v_holdcnt > 0)
661			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
662		else
663			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
664		vp->v_bioflag &= ~VBIOONFREELIST;
665		splx(s);
666	}
667
668 	vp->v_usecount++;
669	if (flags & LK_TYPE_MASK) {
670		if ((error = vn_lock(vp, flags)) != 0) {
671			vp->v_usecount--;
672			if (vp->v_usecount == 0 && onfreelist)
673				vputonfreelist(vp);
674		}
675		return (error);
676	}
677
678	return (0);
679}
680
681
682/* Vnode reference. */
683void
684vref(struct vnode *vp)
685{
686#ifdef DIAGNOSTIC
687	if (vp->v_usecount == 0)
688		panic("vref used where vget required");
689	if (vp->v_type == VNON)
690		panic("vref on a VNON vnode");
691#endif
692	vp->v_usecount++;
693}
694
695void
696vputonfreelist(struct vnode *vp)
697{
698	int s;
699	struct freelst *lst;
700
701	s = splbio();
702#ifdef DIAGNOSTIC
703	if (vp->v_usecount != 0)
704		panic("Use count is not zero!");
705
706	if (vp->v_bioflag & VBIOONFREELIST) {
707		vprint("vnode already on free list: ", vp);
708		panic("vnode already on free list");
709	}
710#endif
711
712	vp->v_bioflag |= VBIOONFREELIST;
713
714	if (vp->v_holdcnt > 0)
715		lst = &vnode_hold_list;
716	else
717		lst = &vnode_free_list;
718
719	if (vp->v_type == VBAD)
720		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
721	else
722		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
723
724	splx(s);
725}
726
727/*
728 * vput(), just unlock and vrele()
729 */
730void
731vput(struct vnode *vp)
732{
733	struct proc *p = curproc;
734
735#ifdef DIAGNOSTIC
736	if (vp == NULL)
737		panic("vput: null vp");
738#endif
739
740#ifdef DIAGNOSTIC
741	if (vp->v_usecount == 0) {
742		vprint("vput: bad ref count", vp);
743		panic("vput: ref cnt");
744	}
745#endif
746	vp->v_usecount--;
747	KASSERT(vp->v_usecount > 0 || vp->v_uvcount == 0);
748	if (vp->v_usecount > 0) {
749		VOP_UNLOCK(vp);
750		return;
751	}
752
753#ifdef DIAGNOSTIC
754	if (vp->v_writecount != 0) {
755		vprint("vput: bad writecount", vp);
756		panic("vput: v_writecount != 0");
757	}
758#endif
759
760	VOP_INACTIVE(vp, p);
761
762	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
763		vputonfreelist(vp);
764}
765
766/*
767 * Vnode release - use for active VNODES.
768 * If count drops to zero, call inactive routine and return to freelist.
769 * Returns 0 if it did not sleep.
770 */
771int
772vrele(struct vnode *vp)
773{
774	struct proc *p = curproc;
775
776#ifdef DIAGNOSTIC
777	if (vp == NULL)
778		panic("vrele: null vp");
779#endif
780#ifdef DIAGNOSTIC
781	if (vp->v_usecount == 0) {
782		vprint("vrele: bad ref count", vp);
783		panic("vrele: ref cnt");
784	}
785#endif
786	vp->v_usecount--;
787	if (vp->v_usecount > 0) {
788		return (0);
789	}
790
791#ifdef DIAGNOSTIC
792	if (vp->v_writecount != 0) {
793		vprint("vrele: bad writecount", vp);
794		panic("vrele: v_writecount != 0");
795	}
796#endif
797
798	if (vn_lock(vp, LK_EXCLUSIVE)) {
799#ifdef DIAGNOSTIC
800		vprint("vrele: cannot lock", vp);
801#endif
802		return (1);
803	}
804
805	VOP_INACTIVE(vp, p);
806
807	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
808		vputonfreelist(vp);
809	return (1);
810}
811
812/* Page or buffer structure gets a reference. */
813void
814vhold(struct vnode *vp)
815{
816	/*
817	 * If it is on the freelist and the hold count is currently
818	 * zero, move it to the hold list.
819	 */
820	if ((vp->v_bioflag & VBIOONFREELIST) &&
821	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
822		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
823		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
824	}
825	vp->v_holdcnt++;
826}
827
828/* Lose interest in a vnode. */
829void
830vdrop(struct vnode *vp)
831{
832#ifdef DIAGNOSTIC
833	if (vp->v_holdcnt == 0)
834		panic("vdrop: zero holdcnt");
835#endif
836
837	vp->v_holdcnt--;
838
839	/*
840	 * If it is on the holdlist and the hold count drops to
841	 * zero, move it to the free list.
842	 */
843	if ((vp->v_bioflag & VBIOONFREELIST) &&
844	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
845		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
846		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
847	}
848}
849
850/*
851 * Remove any vnodes in the vnode table belonging to mount point mp.
852 *
853 * If MNT_NOFORCE is specified, there should not be any active ones,
854 * return error if any are found (nb: this is a user error, not a
855 * system error). If MNT_FORCE is specified, detach any active vnodes
856 * that are found.
857 */
858#ifdef DEBUG
859int busyprt = 0;	/* print out busy vnodes */
860struct ctldebug debug1 = { "busyprt", &busyprt };
861#endif
862
863int
864vfs_mount_foreach_vnode(struct mount *mp,
865    int (*func)(struct vnode *, void *), void *arg) {
866	struct vnode *vp, *nvp;
867	int error = 0;
868
869loop:
870	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
871		if (vp->v_mount != mp)
872			goto loop;
873
874		error = func(vp, arg);
875
876		if (error != 0)
877			break;
878	}
879
880	return (error);
881}
882
883struct vflush_args {
884	struct vnode *skipvp;
885	int busy;
886	int flags;
887};
888
889int
890vflush_vnode(struct vnode *vp, void *arg)
891{
892	struct vflush_args *va = arg;
893	struct proc *p = curproc;
894
895	if (vp == va->skipvp) {
896		return (0);
897	}
898
899	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
900		return (0);
901	}
902
903	/*
904	 * If WRITECLOSE is set, only flush out regular file
905	 * vnodes open for writing.
906	 */
907	if ((va->flags & WRITECLOSE) &&
908	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
909		return (0);
910	}
911
912	/*
913	 * With v_usecount == 0, all we need to do is clear
914	 * out the vnode data structures and we are done.
915	 */
916	if (vp->v_usecount == 0) {
917		vgonel(vp, p);
918		return (0);
919	}
920
921	/*
922	 * If FORCECLOSE is set, forcibly close the vnode.
923	 * For block or character devices, revert to an
924	 * anonymous device. For all other files, just kill them.
925	 */
926	if (va->flags & FORCECLOSE) {
927		if (vp->v_type != VBLK && vp->v_type != VCHR) {
928			vgonel(vp, p);
929		} else {
930			vclean(vp, 0, p);
931			vp->v_op = &spec_vops;
932			insmntque(vp, NULL);
933		}
934		return (0);
935	}
936
937	/*
938	 * If set, this is allowed to ignore vnodes which don't
939	 * have changes pending to disk.
940	 * XXX Might be nice to check per-fs "inode" flags, but
941	 * generally the filesystem is sync'd already, right?
942	 */
943	if ((va->flags & IGNORECLEAN) &&
944	    LIST_EMPTY(&vp->v_dirtyblkhd))
945		return (0);
946
947#ifdef DEBUG
948	if (busyprt)
949		vprint("vflush: busy vnode", vp);
950#endif
951	va->busy++;
952	return (0);
953}
954
955int
956vflush(struct mount *mp, struct vnode *skipvp, int flags)
957{
958	struct vflush_args va;
959	va.skipvp = skipvp;
960	va.busy = 0;
961	va.flags = flags;
962
963	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
964
965	if (va.busy)
966		return (EBUSY);
967	return (0);
968}
969
970/*
971 * Disassociate the underlying file system from a vnode.
972 */
973void
974vclean(struct vnode *vp, int flags, struct proc *p)
975{
976	int active;
977
978	/*
979	 * Check to see if the vnode is in use.
980	 * If so we have to reference it before we clean it out
981	 * so that its count cannot fall to zero and generate a
982	 * race against ourselves to recycle it.
983	 */
984	if ((active = vp->v_usecount) != 0)
985		vp->v_usecount++;
986
987	/*
988	 * Prevent the vnode from being recycled or
989	 * brought into use while we clean it out.
990	 */
991	if (vp->v_flag & VXLOCK)
992		panic("vclean: deadlock");
993	vp->v_flag |= VXLOCK;
994	/*
995	 * Even if the count is zero, the VOP_INACTIVE routine may still
996	 * have the object locked while it cleans it out. The VOP_LOCK
997	 * ensures that the VOP_INACTIVE routine is done with its work.
998	 * For active vnodes, it ensures that no other activity can
999	 * occur while the underlying object is being cleaned out.
1000	 */
1001	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE);
1002
1003	/*
1004	 * Clean out any VM data associated with the vnode.
1005	 */
1006	uvm_vnp_terminate(vp);
1007	/*
1008	 * Clean out any buffers associated with the vnode.
1009	 */
1010	if (flags & DOCLOSE)
1011		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1012	/*
1013	 * If purging an active vnode, it must be closed and
1014	 * deactivated before being reclaimed. Note that the
1015	 * VOP_INACTIVE will unlock the vnode
1016	 */
1017	if (active) {
1018		if (flags & DOCLOSE)
1019			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1020		VOP_INACTIVE(vp, p);
1021	} else {
1022		/*
1023		 * Any other processes trying to obtain this lock must first
1024		 * wait for VXLOCK to clear, then call the new lock operation.
1025		 */
1026		VOP_UNLOCK(vp);
1027	}
1028
1029	/*
1030	 * Reclaim the vnode.
1031	 */
1032	if (VOP_RECLAIM(vp, p))
1033		panic("vclean: cannot reclaim");
1034	if (active) {
1035		vp->v_usecount--;
1036		if (vp->v_usecount == 0) {
1037			if (vp->v_holdcnt > 0)
1038				panic("vclean: not clean");
1039			vputonfreelist(vp);
1040		}
1041	}
1042	cache_purge(vp);
1043
1044	/*
1045	 * Done with purge, notify sleepers of the grim news.
1046	 */
1047	vp->v_op = &dead_vops;
1048	VN_KNOTE(vp, NOTE_REVOKE);
1049	vp->v_tag = VT_NON;
1050	vp->v_flag &= ~VXLOCK;
1051#ifdef VFSLCKDEBUG
1052	vp->v_flag &= ~VLOCKSWORK;
1053#endif
1054	if (vp->v_flag & VXWANT) {
1055		vp->v_flag &= ~VXWANT;
1056		wakeup(vp);
1057	}
1058}
1059
1060/*
1061 * Recycle an unused vnode to the front of the free list.
1062 */
1063int
1064vrecycle(struct vnode *vp, struct proc *p)
1065{
1066	if (vp->v_usecount == 0) {
1067		vgonel(vp, p);
1068		return (1);
1069	}
1070	return (0);
1071}
1072
1073/*
1074 * Eliminate all activity associated with a vnode
1075 * in preparation for reuse.
1076 */
1077void
1078vgone(struct vnode *vp)
1079{
1080	struct proc *p = curproc;
1081	vgonel(vp, p);
1082}
1083
1084/*
1085 * vgone, with struct proc.
1086 */
1087void
1088vgonel(struct vnode *vp, struct proc *p)
1089{
1090	struct vnode *vq;
1091	struct vnode *vx;
1092
1093	KASSERT(vp->v_uvcount == 0);
1094
1095	/*
1096	 * If a vgone (or vclean) is already in progress,
1097	 * wait until it is done and return.
1098	 */
1099	if (vp->v_flag & VXLOCK) {
1100		vp->v_flag |= VXWANT;
1101		tsleep(vp, PINOD, "vgone", 0);
1102		return;
1103	}
1104
1105	/*
1106	 * Clean out the filesystem specific data.
1107	 */
1108	vclean(vp, DOCLOSE, p);
1109	/*
1110	 * Delete from old mount point vnode list, if on one.
1111	 */
1112	if (vp->v_mount != NULL)
1113		insmntque(vp, NULL);
1114	/*
1115	 * If special device, remove it from special device alias list
1116	 * if it is on one.
1117	 */
1118	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1119		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1120		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1121		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1122			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1123		}
1124		if (*vp->v_hashchain == vp) {
1125			*vp->v_hashchain = vp->v_specnext;
1126		} else {
1127			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1128				if (vq->v_specnext != vp)
1129					continue;
1130				vq->v_specnext = vp->v_specnext;
1131				break;
1132			}
1133			if (vq == NULL)
1134				panic("missing bdev");
1135		}
1136		if (vp->v_flag & VALIASED) {
1137			vx = NULL;
1138			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1139				if (vq->v_rdev != vp->v_rdev ||
1140				    vq->v_type != vp->v_type)
1141					continue;
1142				if (vx)
1143					break;
1144				vx = vq;
1145			}
1146			if (vx == NULL)
1147				panic("missing alias");
1148			if (vq == NULL)
1149				vx->v_flag &= ~VALIASED;
1150			vp->v_flag &= ~VALIASED;
1151		}
1152		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1153		vp->v_specinfo = NULL;
1154	}
1155	/*
1156	 * If it is on the freelist and not already at the head,
1157	 * move it to the head of the list.
1158	 */
1159	vp->v_type = VBAD;
1160
1161	/*
1162	 * Move onto the free list, unless we were called from
1163	 * getnewvnode and we're not on any free list
1164	 */
1165	if (vp->v_usecount == 0 &&
1166	    (vp->v_bioflag & VBIOONFREELIST)) {
1167		int s;
1168
1169		s = splbio();
1170
1171		if (vp->v_holdcnt > 0)
1172			panic("vgonel: not clean");
1173
1174		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1175			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1176			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1177		}
1178		splx(s);
1179	}
1180}
1181
1182/*
1183 * Lookup a vnode by device number.
1184 */
1185int
1186vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1187{
1188	struct vnode *vp;
1189	int rc =0;
1190
1191	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1192		if (dev != vp->v_rdev || type != vp->v_type)
1193			continue;
1194		*vpp = vp;
1195		rc = 1;
1196		break;
1197	}
1198	return (rc);
1199}
1200
1201/*
1202 * Revoke all the vnodes corresponding to the specified minor number
1203 * range (endpoints inclusive) of the specified major.
1204 */
1205void
1206vdevgone(int maj, int minl, int minh, enum vtype type)
1207{
1208	struct vnode *vp;
1209	int mn;
1210
1211	for (mn = minl; mn <= minh; mn++)
1212		if (vfinddev(makedev(maj, mn), type, &vp))
1213			VOP_REVOKE(vp, REVOKEALL);
1214}
1215
1216/*
1217 * Calculate the total number of references to a special device.
1218 */
1219int
1220vcount(struct vnode *vp)
1221{
1222	struct vnode *vq, *vnext;
1223	int count;
1224
1225loop:
1226	if ((vp->v_flag & VALIASED) == 0)
1227		return (vp->v_usecount);
1228	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1229		vnext = vq->v_specnext;
1230		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1231			continue;
1232		/*
1233		 * Alias, but not in use, so flush it out.
1234		 */
1235		if (vq->v_usecount == 0 && vq != vp) {
1236			vgone(vq);
1237			goto loop;
1238		}
1239		count += vq->v_usecount;
1240	}
1241	return (count);
1242}
1243
1244#if defined(DEBUG) || defined(DIAGNOSTIC)
1245/*
1246 * Print out a description of a vnode.
1247 */
1248static char *typename[] =
1249   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1250
1251void
1252vprint(char *label, struct vnode *vp)
1253{
1254	char buf[64];
1255
1256	if (label != NULL)
1257		printf("%s: ", label);
1258	printf("%p, type %s, use %u, write %u, hold %u,",
1259		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1260		vp->v_holdcnt);
1261	buf[0] = '\0';
1262	if (vp->v_flag & VROOT)
1263		strlcat(buf, "|VROOT", sizeof buf);
1264	if (vp->v_flag & VTEXT)
1265		strlcat(buf, "|VTEXT", sizeof buf);
1266	if (vp->v_flag & VSYSTEM)
1267		strlcat(buf, "|VSYSTEM", sizeof buf);
1268	if (vp->v_flag & VXLOCK)
1269		strlcat(buf, "|VXLOCK", sizeof buf);
1270	if (vp->v_flag & VXWANT)
1271		strlcat(buf, "|VXWANT", sizeof buf);
1272	if (vp->v_bioflag & VBIOWAIT)
1273		strlcat(buf, "|VBIOWAIT", sizeof buf);
1274	if (vp->v_bioflag & VBIOONFREELIST)
1275		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1276	if (vp->v_bioflag & VBIOONSYNCLIST)
1277		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1278	if (vp->v_flag & VALIASED)
1279		strlcat(buf, "|VALIASED", sizeof buf);
1280	if (buf[0] != '\0')
1281		printf(" flags (%s)", &buf[1]);
1282	if (vp->v_data == NULL) {
1283		printf("\n");
1284	} else {
1285		printf("\n\t");
1286		VOP_PRINT(vp);
1287	}
1288}
1289#endif /* DEBUG || DIAGNOSTIC */
1290
1291#ifdef DEBUG
1292/*
1293 * List all of the locked vnodes in the system.
1294 * Called when debugging the kernel.
1295 */
1296void
1297printlockedvnodes(void)
1298{
1299	struct mount *mp;
1300	struct vnode *vp;
1301
1302	printf("Locked vnodes\n");
1303
1304	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1305		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1306			continue;
1307		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1308			if (VOP_ISLOCKED(vp))
1309				vprint(NULL, vp);
1310		}
1311		vfs_unbusy(mp);
1312 	}
1313
1314}
1315#endif
1316
1317/*
1318 * Top level filesystem related information gathering.
1319 */
1320int
1321vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1322    size_t newlen, struct proc *p)
1323{
1324	struct vfsconf *vfsp, *tmpvfsp;
1325	int ret;
1326
1327	/* all sysctl names at this level are at least name and field */
1328	if (namelen < 2)
1329		return (ENOTDIR);		/* overloaded */
1330
1331	if (name[0] != VFS_GENERIC) {
1332		vfsp = vfs_bytypenum(name[0]);
1333		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1334			return (EOPNOTSUPP);
1335
1336		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1337		    oldp, oldlenp, newp, newlen, p));
1338	}
1339
1340	switch (name[1]) {
1341	case VFS_MAXTYPENUM:
1342		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1343
1344	case VFS_CONF:
1345		if (namelen < 3)
1346			return (ENOTDIR);	/* overloaded */
1347
1348		vfsp = vfs_bytypenum(name[2]);
1349		if (vfsp == NULL)
1350			return (EOPNOTSUPP);
1351
1352		/* Make a copy, clear out kernel pointers */
1353		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1354		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1355		tmpvfsp->vfc_vfsops = NULL;
1356
1357		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1358		    sizeof(struct vfsconf));
1359
1360		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1361		return (ret);
1362	case VFS_BCACHESTAT:	/* buffer cache statistics */
1363		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1364		    sizeof(struct bcachestats));
1365		return(ret);
1366	}
1367	return (EOPNOTSUPP);
1368}
1369
1370/*
1371 * Check to see if a filesystem is mounted on a block device.
1372 */
1373int
1374vfs_mountedon(struct vnode *vp)
1375{
1376	struct vnode *vq;
1377	int error = 0;
1378
1379 	if (vp->v_specmountpoint != NULL)
1380		return (EBUSY);
1381	if (vp->v_flag & VALIASED) {
1382		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1383			if (vq->v_rdev != vp->v_rdev ||
1384			    vq->v_type != vp->v_type)
1385				continue;
1386			if (vq->v_specmountpoint != NULL) {
1387				error = EBUSY;
1388				break;
1389			}
1390 		}
1391	}
1392	return (error);
1393}
1394
1395#ifdef NFSSERVER
1396/*
1397 * Build hash lists of net addresses and hang them off the mount point.
1398 * Called by vfs_export() to set up the lists of export addresses.
1399 */
1400int
1401vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1402    struct export_args *argp)
1403{
1404	struct netcred *np;
1405	struct radix_node_head *rnh;
1406	int nplen, i;
1407	struct radix_node *rn;
1408	struct sockaddr *saddr, *smask = 0;
1409	int error;
1410
1411	if (argp->ex_addrlen == 0) {
1412		if (mp->mnt_flag & MNT_DEFEXPORTED)
1413			return (EPERM);
1414		np = &nep->ne_defexported;
1415		/* fill in the kernel's ucred from userspace's xucred */
1416		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1417			return (error);
1418		mp->mnt_flag |= MNT_DEFEXPORTED;
1419		goto finish;
1420	}
1421	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1422	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1423		return (EINVAL);
1424	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1425	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1426	saddr = (struct sockaddr *)(np + 1);
1427	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1428	if (error)
1429		goto out;
1430	if (saddr->sa_len > argp->ex_addrlen)
1431		saddr->sa_len = argp->ex_addrlen;
1432	if (argp->ex_masklen) {
1433		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1434		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1435		if (error)
1436			goto out;
1437		if (smask->sa_len > argp->ex_masklen)
1438			smask->sa_len = argp->ex_masklen;
1439	}
1440	/* fill in the kernel's ucred from userspace's xucred */
1441	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1442		goto out;
1443	i = saddr->sa_family;
1444	switch (i) {
1445	case AF_INET:
1446		if ((rnh = nep->ne_rtable_inet) == NULL) {
1447			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1448			    offsetof(struct sockaddr_in, sin_addr))) {
1449				error = ENOBUFS;
1450				goto out;
1451			}
1452			rnh = nep->ne_rtable_inet;
1453		}
1454		break;
1455	default:
1456		error = EINVAL;
1457		goto out;
1458	}
1459	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1460	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1461		error = EPERM;
1462		goto out;
1463	}
1464finish:
1465	np->netc_exflags = argp->ex_flags;
1466	return (0);
1467out:
1468	free(np, M_NETADDR, nplen);
1469	return (error);
1470}
1471
1472int
1473vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1474{
1475	struct radix_node_head *rnh = (struct radix_node_head *)w;
1476
1477	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1478	free(rn, M_NETADDR, 0);
1479	return (0);
1480}
1481
1482/*
1483 * Free the net address hash lists that are hanging off the mount points.
1484 */
1485void
1486vfs_free_addrlist(struct netexport *nep)
1487{
1488	struct radix_node_head *rnh;
1489
1490	if ((rnh = nep->ne_rtable_inet) != NULL) {
1491		rn_walktree(rnh, vfs_free_netcred, rnh);
1492		free(rnh, M_RTABLE, 0);
1493		nep->ne_rtable_inet = NULL;
1494	}
1495}
1496#endif /* NFSSERVER */
1497
1498int
1499vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1500{
1501#ifdef NFSSERVER
1502	int error;
1503
1504	if (argp->ex_flags & MNT_DELEXPORT) {
1505		vfs_free_addrlist(nep);
1506		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1507	}
1508	if (argp->ex_flags & MNT_EXPORTED) {
1509		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1510			return (error);
1511		mp->mnt_flag |= MNT_EXPORTED;
1512	}
1513	return (0);
1514#else
1515	return (ENOTSUP);
1516#endif /* NFSSERVER */
1517}
1518
1519struct netcred *
1520vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1521{
1522#ifdef NFSSERVER
1523	struct netcred *np;
1524	struct radix_node_head *rnh;
1525	struct sockaddr *saddr;
1526
1527	np = NULL;
1528	if (mp->mnt_flag & MNT_EXPORTED) {
1529		/*
1530		 * Lookup in the export list first.
1531		 */
1532		if (nam != NULL) {
1533			saddr = mtod(nam, struct sockaddr *);
1534			switch(saddr->sa_family) {
1535			case AF_INET:
1536				rnh = nep->ne_rtable_inet;
1537				break;
1538			default:
1539				rnh = NULL;
1540				break;
1541			}
1542			if (rnh != NULL)
1543				np = (struct netcred *)rn_match(saddr, rnh);
1544		}
1545		/*
1546		 * If no address match, use the default if it exists.
1547		 */
1548		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1549			np = &nep->ne_defexported;
1550	}
1551	return (np);
1552#else
1553	return (NULL);
1554#endif /* NFSSERVER */
1555}
1556
1557/*
1558 * Do the usual access checking.
1559 * file_mode, uid and gid are from the vnode in question,
1560 * while acc_mode and cred are from the VOP_ACCESS parameter list
1561 */
1562int
1563vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1564    mode_t acc_mode, struct ucred *cred)
1565{
1566	mode_t mask;
1567
1568	/* User id 0 always gets read/write access. */
1569	if (cred->cr_uid == 0) {
1570		/* For VEXEC, at least one of the execute bits must be set. */
1571		if ((acc_mode & VEXEC) && type != VDIR &&
1572		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1573			return EACCES;
1574		return 0;
1575	}
1576
1577	mask = 0;
1578
1579	/* Otherwise, check the owner. */
1580	if (cred->cr_uid == uid) {
1581		if (acc_mode & VEXEC)
1582			mask |= S_IXUSR;
1583		if (acc_mode & VREAD)
1584			mask |= S_IRUSR;
1585		if (acc_mode & VWRITE)
1586			mask |= S_IWUSR;
1587		return (file_mode & mask) == mask ? 0 : EACCES;
1588	}
1589
1590	/* Otherwise, check the groups. */
1591	if (groupmember(gid, cred)) {
1592		if (acc_mode & VEXEC)
1593			mask |= S_IXGRP;
1594		if (acc_mode & VREAD)
1595			mask |= S_IRGRP;
1596		if (acc_mode & VWRITE)
1597			mask |= S_IWGRP;
1598		return (file_mode & mask) == mask ? 0 : EACCES;
1599	}
1600
1601	/* Otherwise, check everyone else. */
1602	if (acc_mode & VEXEC)
1603		mask |= S_IXOTH;
1604	if (acc_mode & VREAD)
1605		mask |= S_IROTH;
1606	if (acc_mode & VWRITE)
1607		mask |= S_IWOTH;
1608	return (file_mode & mask) == mask ? 0 : EACCES;
1609}
1610
1611struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1612
1613int
1614vfs_stall(struct proc *p, int stall)
1615{
1616	struct mount *mp;
1617	int allerror = 0, error;
1618
1619	if (stall)
1620		rw_enter_write(&vfs_stall_lock);
1621
1622	/*
1623	 * The loop variable mp is protected by vfs_busy() so that it cannot
1624	 * be unmounted while VFS_SYNC() sleeps.  Traverse forward to keep the
1625	 * lock order consistent with dounmount().
1626	 */
1627	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1628		if (stall) {
1629			error = vfs_busy(mp, VB_WRITE|VB_WAIT|VB_DUPOK);
1630			if (error) {
1631				printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1632				allerror = error;
1633				continue;
1634			}
1635			uvm_vnp_sync(mp);
1636			error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1637			if (error) {
1638				printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1639				vfs_unbusy(mp);
1640				allerror = error;
1641				continue;
1642			}
1643			mp->mnt_flag |= MNT_STALLED;
1644		} else {
1645			if (mp->mnt_flag & MNT_STALLED) {
1646				vfs_unbusy(mp);
1647				mp->mnt_flag &= ~MNT_STALLED;
1648			}
1649		}
1650	}
1651
1652	if (!stall)
1653		rw_exit_write(&vfs_stall_lock);
1654
1655	return (allerror);
1656}
1657
1658void
1659vfs_stall_barrier(void)
1660{
1661	rw_enter_read(&vfs_stall_lock);
1662	rw_exit_read(&vfs_stall_lock);
1663}
1664
1665/*
1666 * Unmount all file systems.
1667 * We traverse the list in reverse order under the assumption that doing so
1668 * will avoid needing to worry about dependencies.
1669 */
1670void
1671vfs_unmountall(void)
1672{
1673	struct mount *mp, *nmp;
1674	int allerror, error, again = 1;
1675
1676 retry:
1677	allerror = 0;
1678	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1679		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1680			continue;
1681		/* XXX Here is a race, the next pointer is not locked. */
1682		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1683			printf("unmount of %s failed with error %d\n",
1684			    mp->mnt_stat.f_mntonname, error);
1685			allerror = 1;
1686		}
1687	}
1688
1689	if (allerror) {
1690		printf("WARNING: some file systems would not unmount\n");
1691		if (again) {
1692			printf("retrying\n");
1693			again = 0;
1694			goto retry;
1695		}
1696	}
1697}
1698
1699/*
1700 * Sync and unmount file systems before shutting down.
1701 */
1702void
1703vfs_shutdown(struct proc *p)
1704{
1705#ifdef ACCOUNTING
1706	acct_shutdown();
1707#endif
1708
1709	printf("syncing disks...");
1710
1711	if (panicstr == 0) {
1712		/* Sync before unmount, in case we hang on something. */
1713		sys_sync(p, NULL, NULL);
1714		vfs_unmountall();
1715	}
1716
1717#if NSOFTRAID > 0
1718	sr_quiesce();
1719#endif
1720
1721	if (vfs_syncwait(p, 1))
1722		printf(" giving up\n");
1723	else
1724		printf(" done\n");
1725}
1726
1727/*
1728 * perform sync() operation and wait for buffers to flush.
1729 */
1730int
1731vfs_syncwait(struct proc *p, int verbose)
1732{
1733	struct buf *bp;
1734	int iter, nbusy, dcount, s;
1735#ifdef MULTIPROCESSOR
1736	int hold_count;
1737#endif
1738
1739	sys_sync(p, NULL, NULL);
1740
1741	/* Wait for sync to finish. */
1742	dcount = 10000;
1743	for (iter = 0; iter < 20; iter++) {
1744		nbusy = 0;
1745		LIST_FOREACH(bp, &bufhead, b_list) {
1746			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1747				nbusy++;
1748			/*
1749			 * With soft updates, some buffers that are
1750			 * written will be remarked as dirty until other
1751			 * buffers are written.
1752			 */
1753			if (bp->b_flags & B_DELWRI) {
1754				s = splbio();
1755				bremfree(bp);
1756				buf_acquire(bp);
1757				splx(s);
1758				nbusy++;
1759				bawrite(bp);
1760				if (dcount-- <= 0) {
1761					if (verbose)
1762						printf("softdep ");
1763					return 1;
1764				}
1765			}
1766		}
1767		if (nbusy == 0)
1768			break;
1769		if (verbose)
1770			printf("%d ", nbusy);
1771#ifdef MULTIPROCESSOR
1772		if (_kernel_lock_held())
1773			hold_count = __mp_release_all(&kernel_lock);
1774		else
1775			hold_count = 0;
1776#endif
1777		DELAY(40000 * iter);
1778#ifdef MULTIPROCESSOR
1779		if (hold_count)
1780			__mp_acquire_count(&kernel_lock, hold_count);
1781#endif
1782	}
1783
1784	return nbusy;
1785}
1786
1787/*
1788 * posix file system related system variables.
1789 */
1790int
1791fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1792    void *newp, size_t newlen, struct proc *p)
1793{
1794	/* all sysctl names at this level are terminal */
1795	if (namelen != 1)
1796		return (ENOTDIR);
1797
1798	switch (name[0]) {
1799	case FS_POSIX_SETUID:
1800		if (newp && securelevel > 0)
1801			return (EPERM);
1802		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1803	default:
1804		return (EOPNOTSUPP);
1805	}
1806	/* NOTREACHED */
1807}
1808
1809/*
1810 * file system related system variables.
1811 */
1812int
1813fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1814    size_t newlen, struct proc *p)
1815{
1816	sysctlfn *fn;
1817
1818	switch (name[0]) {
1819	case FS_POSIX:
1820		fn = fs_posix_sysctl;
1821		break;
1822	default:
1823		return (EOPNOTSUPP);
1824	}
1825	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1826}
1827
1828
1829/*
1830 * Routines dealing with vnodes and buffers
1831 */
1832
1833/*
1834 * Wait for all outstanding I/Os to complete
1835 *
1836 * Manipulates v_numoutput. Must be called at splbio()
1837 */
1838int
1839vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1840{
1841	int error = 0;
1842
1843	splassert(IPL_BIO);
1844
1845	while (vp->v_numoutput) {
1846		vp->v_bioflag |= VBIOWAIT;
1847		error = tsleep(&vp->v_numoutput,
1848		    slpflag | (PRIBIO + 1), wmesg, timeo);
1849		if (error)
1850			break;
1851	}
1852
1853	return (error);
1854}
1855
1856/*
1857 * Update outstanding I/O count and do wakeup if requested.
1858 *
1859 * Manipulates v_numoutput. Must be called at splbio()
1860 */
1861void
1862vwakeup(struct vnode *vp)
1863{
1864	splassert(IPL_BIO);
1865
1866	if (vp != NULL) {
1867		if (vp->v_numoutput-- == 0)
1868			panic("vwakeup: neg numoutput");
1869		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1870			vp->v_bioflag &= ~VBIOWAIT;
1871			wakeup(&vp->v_numoutput);
1872		}
1873	}
1874}
1875
1876/*
1877 * Flush out and invalidate all buffers associated with a vnode.
1878 * Called with the underlying object locked.
1879 */
1880int
1881vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1882    int slpflag, int slptimeo)
1883{
1884	struct buf *bp;
1885	struct buf *nbp, *blist;
1886	int s, error;
1887
1888#ifdef VFSLCKDEBUG
1889	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1890		panic("%s: vp isn't locked, vp %p", __func__, vp);
1891#endif
1892
1893	if (flags & V_SAVE) {
1894		s = splbio();
1895		vwaitforio(vp, 0, "vinvalbuf", 0);
1896		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1897			splx(s);
1898			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1899				return (error);
1900			s = splbio();
1901			if (vp->v_numoutput > 0 ||
1902			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1903				panic("%s: dirty bufs, vp %p", __func__, vp);
1904		}
1905		splx(s);
1906	}
1907loop:
1908	s = splbio();
1909	for (;;) {
1910		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1911		    (flags & V_SAVEMETA))
1912			while (blist && blist->b_lblkno < 0)
1913				blist = LIST_NEXT(blist, b_vnbufs);
1914		if (blist == NULL &&
1915		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1916		    (flags & V_SAVEMETA))
1917			while (blist && blist->b_lblkno < 0)
1918				blist = LIST_NEXT(blist, b_vnbufs);
1919		if (!blist)
1920			break;
1921
1922		for (bp = blist; bp; bp = nbp) {
1923			nbp = LIST_NEXT(bp, b_vnbufs);
1924			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1925				continue;
1926			if (bp->b_flags & B_BUSY) {
1927				bp->b_flags |= B_WANTED;
1928				error = tsleep(bp, slpflag | (PRIBIO + 1),
1929				    "vinvalbuf", slptimeo);
1930				if (error) {
1931					splx(s);
1932					return (error);
1933				}
1934				break;
1935			}
1936			bremfree(bp);
1937			/*
1938			 * XXX Since there are no node locks for NFS, I believe
1939			 * there is a slight chance that a delayed write will
1940			 * occur while sleeping just above, so check for it.
1941			 */
1942			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1943				buf_acquire(bp);
1944				splx(s);
1945				(void) VOP_BWRITE(bp);
1946				goto loop;
1947			}
1948			buf_acquire_nomap(bp);
1949			bp->b_flags |= B_INVAL;
1950			brelse(bp);
1951		}
1952	}
1953	if (!(flags & V_SAVEMETA) &&
1954	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1955		panic("%s: flush failed, vp %p", __func__, vp);
1956	splx(s);
1957	return (0);
1958}
1959
1960void
1961vflushbuf(struct vnode *vp, int sync)
1962{
1963	struct buf *bp, *nbp;
1964	int s;
1965
1966loop:
1967	s = splbio();
1968	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1969		if ((bp->b_flags & B_BUSY))
1970			continue;
1971		if ((bp->b_flags & B_DELWRI) == 0)
1972			panic("vflushbuf: not dirty");
1973		bremfree(bp);
1974		buf_acquire(bp);
1975		splx(s);
1976		/*
1977		 * Wait for I/O associated with indirect blocks to complete,
1978		 * since there is no way to quickly wait for them below.
1979		 */
1980		if (bp->b_vp == vp || sync == 0)
1981			(void) bawrite(bp);
1982		else
1983			(void) bwrite(bp);
1984		goto loop;
1985	}
1986	if (sync == 0) {
1987		splx(s);
1988		return;
1989	}
1990	vwaitforio(vp, 0, "vflushbuf", 0);
1991	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1992		splx(s);
1993#ifdef DIAGNOSTIC
1994		vprint("vflushbuf: dirty", vp);
1995#endif
1996		goto loop;
1997	}
1998	splx(s);
1999}
2000
2001/*
2002 * Associate a buffer with a vnode.
2003 *
2004 * Manipulates buffer vnode queues. Must be called at splbio().
2005 */
2006void
2007bgetvp(struct vnode *vp, struct buf *bp)
2008{
2009	splassert(IPL_BIO);
2010
2011
2012	if (bp->b_vp)
2013		panic("bgetvp: not free");
2014	vhold(vp);
2015	bp->b_vp = vp;
2016	if (vp->v_type == VBLK || vp->v_type == VCHR)
2017		bp->b_dev = vp->v_rdev;
2018	else
2019		bp->b_dev = NODEV;
2020	/*
2021	 * Insert onto list for new vnode.
2022	 */
2023	bufinsvn(bp, &vp->v_cleanblkhd);
2024}
2025
2026/*
2027 * Disassociate a buffer from a vnode.
2028 *
2029 * Manipulates vnode buffer queues. Must be called at splbio().
2030 */
2031void
2032brelvp(struct buf *bp)
2033{
2034	struct vnode *vp;
2035
2036	splassert(IPL_BIO);
2037
2038	if ((vp = bp->b_vp) == (struct vnode *) 0)
2039		panic("brelvp: NULL");
2040	/*
2041	 * Delete from old vnode list, if on one.
2042	 */
2043	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2044		bufremvn(bp);
2045	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2046	    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2047		vp->v_bioflag &= ~VBIOONSYNCLIST;
2048		LIST_REMOVE(vp, v_synclist);
2049	}
2050	bp->b_vp = NULL;
2051
2052	vdrop(vp);
2053}
2054
2055/*
2056 * Replaces the current vnode associated with the buffer, if any,
2057 * with a new vnode.
2058 *
2059 * If an output I/O is pending on the buffer, the old vnode
2060 * I/O count is adjusted.
2061 *
2062 * Ignores vnode buffer queues. Must be called at splbio().
2063 */
2064void
2065buf_replacevnode(struct buf *bp, struct vnode *newvp)
2066{
2067	struct vnode *oldvp = bp->b_vp;
2068
2069	splassert(IPL_BIO);
2070
2071	if (oldvp)
2072		brelvp(bp);
2073
2074	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2075		newvp->v_numoutput++;	/* put it on swapdev */
2076		vwakeup(oldvp);
2077	}
2078
2079	bgetvp(newvp, bp);
2080	bufremvn(bp);
2081}
2082
2083/*
2084 * Used to assign buffers to the appropriate clean or dirty list on
2085 * the vnode and to add newly dirty vnodes to the appropriate
2086 * filesystem syncer list.
2087 *
2088 * Manipulates vnode buffer queues. Must be called at splbio().
2089 */
2090void
2091reassignbuf(struct buf *bp)
2092{
2093	struct buflists *listheadp;
2094	int delay;
2095	struct vnode *vp = bp->b_vp;
2096
2097	splassert(IPL_BIO);
2098
2099	/*
2100	 * Delete from old vnode list, if on one.
2101	 */
2102	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2103		bufremvn(bp);
2104
2105	/*
2106	 * If dirty, put on list of dirty buffers;
2107	 * otherwise insert onto list of clean buffers.
2108	 */
2109	if ((bp->b_flags & B_DELWRI) == 0) {
2110		listheadp = &vp->v_cleanblkhd;
2111		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2112		    LIST_EMPTY(&vp->v_dirtyblkhd)) {
2113			vp->v_bioflag &= ~VBIOONSYNCLIST;
2114			LIST_REMOVE(vp, v_synclist);
2115		}
2116	} else {
2117		listheadp = &vp->v_dirtyblkhd;
2118		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2119			switch (vp->v_type) {
2120			case VDIR:
2121				delay = syncdelay / 2;
2122				break;
2123			case VBLK:
2124				if (vp->v_specmountpoint != NULL) {
2125					delay = syncdelay / 3;
2126					break;
2127				}
2128				/* FALLTHROUGH */
2129			default:
2130				delay = syncdelay;
2131			}
2132			vn_syncer_add_to_worklist(vp, delay);
2133		}
2134	}
2135	bufinsvn(bp, listheadp);
2136}
2137
2138/*
2139 * Check if vnode represents a disk device
2140 */
2141int
2142vn_isdisk(struct vnode *vp, int *errp)
2143{
2144	if (vp->v_type != VBLK && vp->v_type != VCHR)
2145		return (0);
2146
2147	return (1);
2148}
2149
2150#ifdef DDB
2151#include <machine/db_machdep.h>
2152#include <ddb/db_interface.h>
2153
2154void
2155vfs_buf_print(void *b, int full,
2156    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2157{
2158	struct buf *bp = b;
2159
2160	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2161	      "  proc %p error %d flags %lb\n",
2162	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2163	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2164
2165	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2166	      "  data %p saveaddr %p dep %p iodone %p\n",
2167	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2168	    bp->b_data, bp->b_saveaddr,
2169	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2170
2171	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2172	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2173
2174#ifdef FFS_SOFTUPDATES
2175	if (full)
2176		softdep_print(bp, full, pr);
2177#endif
2178}
2179
2180const char *vtypes[] = { VTYPE_NAMES };
2181const char *vtags[] = { VTAG_NAMES };
2182
2183void
2184vfs_vnode_print(void *v, int full,
2185    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2186{
2187	struct vnode *vp = v;
2188
2189	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2190	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2191	      vp->v_tag,
2192	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2193	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2194
2195	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2196	      vp->v_data, vp->v_usecount, vp->v_writecount,
2197	      vp->v_holdcnt, vp->v_numoutput);
2198
2199	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2200
2201	if (full) {
2202		struct buf *bp;
2203
2204		(*pr)("clean bufs:\n");
2205		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2206			(*pr)(" bp %p\n", bp);
2207			vfs_buf_print(bp, full, pr);
2208		}
2209
2210		(*pr)("dirty bufs:\n");
2211		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2212			(*pr)(" bp %p\n", bp);
2213			vfs_buf_print(bp, full, pr);
2214		}
2215	}
2216}
2217
2218void
2219vfs_mount_print(struct mount *mp, int full,
2220    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2221{
2222	struct vfsconf *vfc = mp->mnt_vfc;
2223	struct vnode *vp;
2224	int cnt;
2225
2226	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2227	    mp->mnt_flag, MNT_BITS,
2228	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2229
2230	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2231            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2232	    vfc->vfc_refcount, vfc->vfc_flags);
2233
2234	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2235	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2236	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2237
2238	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2239	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2240
2241	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2242	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2243	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2244
2245 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2246	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2247
2248 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2249	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2250
2251	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2252	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2253	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2254
2255	(*pr)("locked vnodes:");
2256	/* XXX would take mountlist lock, except ddb has no context */
2257	cnt = 0;
2258	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2259		if (VOP_ISLOCKED(vp)) {
2260			if (cnt == 0)
2261				(*pr)("\n  %p", vp);
2262			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2263				(*pr)(",\n  %p", vp);
2264			else
2265				(*pr)(", %p", vp);
2266			cnt++;
2267		}
2268	}
2269	(*pr)("\n");
2270
2271	if (full) {
2272		(*pr)("all vnodes:");
2273		/* XXX would take mountlist lock, except ddb has no context */
2274		cnt = 0;
2275		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2276			if (cnt == 0)
2277				(*pr)("\n  %p", vp);
2278			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2279				(*pr)(",\n  %p", vp);
2280			else
2281				(*pr)(", %p", vp);
2282			cnt++;
2283		}
2284		(*pr)("\n");
2285	}
2286}
2287#endif /* DDB */
2288
2289void
2290copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2291{
2292	const struct statfs *mbp;
2293
2294	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2295
2296	if (sbp == (mbp = &mp->mnt_stat))
2297		return;
2298
2299	sbp->f_fsid = mbp->f_fsid;
2300	sbp->f_owner = mbp->f_owner;
2301	sbp->f_flags = mbp->f_flags;
2302	sbp->f_syncwrites = mbp->f_syncwrites;
2303	sbp->f_asyncwrites = mbp->f_asyncwrites;
2304	sbp->f_syncreads = mbp->f_syncreads;
2305	sbp->f_asyncreads = mbp->f_asyncreads;
2306	sbp->f_namemax = mbp->f_namemax;
2307	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2308	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2309	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2310	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2311	    sizeof(union mount_info));
2312}
2313