vfs_subr.c revision 1.176
1/*	$OpenBSD: vfs_subr.c,v 1.176 2009/06/03 04:30:57 beck Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/mount.h>
48#include <sys/time.h>
49#include <sys/fcntl.h>
50#include <sys/kernel.h>
51#include <sys/vnode.h>
52#include <sys/stat.h>
53#include <sys/namei.h>
54#include <sys/ucred.h>
55#include <sys/buf.h>
56#include <sys/errno.h>
57#include <sys/malloc.h>
58#include <sys/domain.h>
59#include <sys/mbuf.h>
60#include <sys/syscallargs.h>
61#include <sys/pool.h>
62#include <sys/tree.h>
63
64#include <uvm/uvm_extern.h>
65#include <sys/sysctl.h>
66
67#include <miscfs/specfs/specdev.h>
68
69enum vtype iftovt_tab[16] = {
70	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
71	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
72};
73
74int	vttoif_tab[9] = {
75	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
76	S_IFSOCK, S_IFIFO, S_IFMT,
77};
78
79int doforce = 1;		/* 1 => permit forcible unmounting */
80int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
81int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
82
83/*
84 * Insq/Remq for the vnode usage lists.
85 */
86#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
87#define	bufremvn(bp) {							\
88	LIST_REMOVE(bp, b_vnbufs);					\
89	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
90}
91
92struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
93struct freelst vnode_free_list;	/* vnode free list */
94
95struct mntlist mountlist;	/* mounted filesystem list */
96
97void	vclean(struct vnode *, int, struct proc *);
98void	vhold(struct vnode *);
99void	vdrop(struct vnode *);
100
101void insmntque(struct vnode *, struct mount *);
102int getdevvp(dev_t, struct vnode **, enum vtype);
103
104int vfs_hang_addrlist(struct mount *, struct netexport *,
105				  struct export_args *);
106int vfs_free_netcred(struct radix_node *, void *);
107void vfs_free_addrlist(struct netexport *);
108void vputonfreelist(struct vnode *);
109
110int vflush_vnode(struct vnode *, void *);
111int maxvnodes;
112
113#ifdef DEBUG
114void printlockedvnodes(void);
115#endif
116
117struct pool vnode_pool;
118
119static int rb_buf_compare(struct buf *b1, struct buf *b2);
120RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
121
122static int
123rb_buf_compare(struct buf *b1, struct buf *b2)
124{
125	if (b1->b_lblkno < b2->b_lblkno)
126		return(-1);
127	if (b1->b_lblkno > b2->b_lblkno)
128		return(1);
129	return(0);
130}
131
132/*
133 * Initialize the vnode management data structures.
134 */
135void
136vntblinit(void)
137{
138	/* buffer cache may need a vnode for each buffer */
139	maxvnodes = desiredvnodes;
140	pool_init(&vnode_pool, sizeof(struct vnode), 0, 0, 0, "vnodes",
141	    &pool_allocator_nointr);
142	TAILQ_INIT(&vnode_hold_list);
143	TAILQ_INIT(&vnode_free_list);
144	CIRCLEQ_INIT(&mountlist);
145	/*
146	 * Initialize the filesystem syncer.
147	 */
148	vn_initialize_syncerd();
149}
150
151/*
152 * Mark a mount point as busy. Used to synchronize access and to delay
153 * unmounting.
154 *
155 * Default behaviour is to attempt getting a READ lock and in case of an
156 * ongoing unmount, to wait for it to finish and then return failure.
157 */
158int
159vfs_busy(struct mount *mp, int flags)
160{
161	int rwflags = 0;
162
163	/* new mountpoints need their lock initialised */
164	if (mp->mnt_lock.rwl_name == NULL)
165		rw_init(&mp->mnt_lock, "vfslock");
166
167	if (flags & VB_WRITE)
168		rwflags |= RW_WRITE;
169	else
170		rwflags |= RW_READ;
171
172	if (flags & VB_WAIT)
173		rwflags |= RW_SLEEPFAIL;
174	else
175		rwflags |= RW_NOSLEEP;
176
177	if (rw_enter(&mp->mnt_lock, rwflags))
178		return (EBUSY);
179
180	return (0);
181}
182
183/*
184 * Free a busy file system
185 */
186void
187vfs_unbusy(struct mount *mp)
188{
189	rw_exit(&mp->mnt_lock);
190}
191
192int
193vfs_isbusy(struct mount *mp)
194{
195	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
196		return (1);
197	else
198		return (0);
199}
200
201/*
202 * Lookup a filesystem type, and if found allocate and initialize
203 * a mount structure for it.
204 *
205 * Devname is usually updated by mount(8) after booting.
206 */
207int
208vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
209{
210	struct vfsconf *vfsp;
211	struct mount *mp;
212
213	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
214		if (!strcmp(vfsp->vfc_name, fstypename))
215			break;
216	if (vfsp == NULL)
217		return (ENODEV);
218	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK|M_ZERO);
219	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
220	LIST_INIT(&mp->mnt_vnodelist);
221	mp->mnt_vfc = vfsp;
222	mp->mnt_op = vfsp->vfc_vfsops;
223	mp->mnt_flag = MNT_RDONLY;
224	mp->mnt_vnodecovered = NULLVP;
225	vfsp->vfc_refcount++;
226	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
227	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
228	mp->mnt_stat.f_mntonname[0] = '/';
229	(void)copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
230	*mpp = mp;
231 	return (0);
232 }
233
234/*
235 * Lookup a mount point by filesystem identifier.
236 */
237struct mount *
238vfs_getvfs(fsid_t *fsid)
239{
240	struct mount *mp;
241
242	CIRCLEQ_FOREACH(mp, &mountlist, mnt_list) {
243		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
244		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
245			return (mp);
246		}
247	}
248
249	return (NULL);
250}
251
252
253/*
254 * Get a new unique fsid
255 */
256void
257vfs_getnewfsid(struct mount *mp)
258{
259	static u_short xxxfs_mntid;
260
261	fsid_t tfsid;
262	int mtype;
263
264	mtype = mp->mnt_vfc->vfc_typenum;
265	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
266	mp->mnt_stat.f_fsid.val[1] = mtype;
267	if (xxxfs_mntid == 0)
268		++xxxfs_mntid;
269	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
270	tfsid.val[1] = mtype;
271	if (!CIRCLEQ_EMPTY(&mountlist)) {
272		while (vfs_getvfs(&tfsid)) {
273			tfsid.val[0]++;
274			xxxfs_mntid++;
275		}
276	}
277	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
278}
279
280/*
281 * Make a 'unique' number from a mount type name.
282 * Note that this is no longer used for ffs which
283 * now has an on-disk filesystem id.
284 */
285long
286makefstype(char *type)
287{
288	long rv;
289
290	for (rv = 0; *type; type++) {
291		rv <<= 2;
292		rv ^= *type;
293	}
294	return rv;
295}
296
297/*
298 * Set vnode attributes to VNOVAL
299 */
300void
301vattr_null(struct vattr *vap)
302{
303
304	vap->va_type = VNON;
305	/* XXX These next two used to be one line, but for a GCC bug. */
306	vap->va_size = VNOVAL;
307	vap->va_bytes = VNOVAL;
308	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
309		vap->va_fsid = vap->va_fileid =
310		vap->va_blocksize = vap->va_rdev =
311		vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
312		vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
313		vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
314		vap->va_flags = vap->va_gen = VNOVAL;
315	vap->va_vaflags = 0;
316}
317
318/*
319 * Routines having to do with the management of the vnode table.
320 */
321extern int (**dead_vnodeop_p)(void *);
322long numvnodes;
323
324/*
325 * Return the next vnode from the free list.
326 */
327int
328getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
329    struct vnode **vpp)
330{
331	struct proc *p = curproc;
332	struct freelst *listhd;
333	static int toggle;
334	struct vnode *vp;
335	int s;
336
337	/*
338	 * We must choose whether to allocate a new vnode or recycle an
339	 * existing one. The criterion for allocating a new one is that
340	 * the total number of vnodes is less than the number desired or
341	 * there are no vnodes on either free list. Generally we only
342	 * want to recycle vnodes that have no buffers associated with
343	 * them, so we look first on the vnode_free_list. If it is empty,
344	 * we next consider vnodes with referencing buffers on the
345	 * vnode_hold_list. The toggle ensures that half the time we
346	 * will use a buffer from the vnode_hold_list, and half the time
347	 * we will allocate a new one unless the list has grown to twice
348	 * the desired size. We are reticent to recycle vnodes from the
349	 * vnode_hold_list because we will lose the identity of all its
350	 * referencing buffers.
351	 */
352	toggle ^= 1;
353	if (numvnodes > 2 * maxvnodes)
354		toggle = 0;
355
356	s = splbio();
357	if ((numvnodes < maxvnodes) ||
358	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
359	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
360		splx(s);
361		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
362		RB_INIT(&vp->v_bufs_tree);
363		numvnodes++;
364	} else {
365		for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
366		    vp = TAILQ_NEXT(vp, v_freelist)) {
367			if (VOP_ISLOCKED(vp) == 0)
368				break;
369		}
370		/*
371		 * Unless this is a bad time of the month, at most
372		 * the first NCPUS items on the free list are
373		 * locked, so this is close enough to being empty.
374		 */
375		if (vp == NULL) {
376			splx(s);
377			tablefull("vnode");
378			*vpp = 0;
379			return (ENFILE);
380		}
381
382#ifdef DIAGNOSTIC
383		if (vp->v_usecount) {
384			vprint("free vnode", vp);
385			panic("free vnode isn't");
386		}
387#endif
388
389		TAILQ_REMOVE(listhd, vp, v_freelist);
390		vp->v_bioflag &= ~VBIOONFREELIST;
391		splx(s);
392
393		if (vp->v_type != VBAD)
394			vgonel(vp, p);
395#ifdef DIAGNOSTIC
396		if (vp->v_data) {
397			vprint("cleaned vnode", vp);
398			panic("cleaned vnode isn't");
399		}
400		s = splbio();
401		if (vp->v_numoutput)
402			panic("Clean vnode has pending I/O's");
403		splx(s);
404#endif
405		vp->v_flag = 0;
406		vp->v_socket = 0;
407	}
408	vp->v_type = VNON;
409	cache_purge(vp);
410	vp->v_tag = tag;
411	vp->v_op = vops;
412	insmntque(vp, mp);
413	*vpp = vp;
414	vp->v_usecount = 1;
415	vp->v_data = 0;
416	simple_lock_init(&vp->v_uvm.u_obj.vmobjlock);
417	return (0);
418}
419
420/*
421 * Move a vnode from one mount queue to another.
422 */
423void
424insmntque(struct vnode *vp, struct mount *mp)
425{
426	/*
427	 * Delete from old mount point vnode list, if on one.
428	 */
429	if (vp->v_mount != NULL)
430		LIST_REMOVE(vp, v_mntvnodes);
431	/*
432	 * Insert into list of vnodes for the new mount point, if available.
433	 */
434	if ((vp->v_mount = mp) != NULL)
435		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
436}
437
438/*
439 * Create a vnode for a block device.
440 * Used for root filesystem, argdev, and swap areas.
441 * Also used for memory file system special devices.
442 */
443int
444bdevvp(dev_t dev, struct vnode **vpp)
445{
446	return (getdevvp(dev, vpp, VBLK));
447}
448
449/*
450 * Create a vnode for a character device.
451 * Used for console handling.
452 */
453int
454cdevvp(dev_t dev, struct vnode **vpp)
455{
456	return (getdevvp(dev, vpp, VCHR));
457}
458
459/*
460 * Create a vnode for a device.
461 * Used by bdevvp (block device) for root file system etc.,
462 * and by cdevvp (character device) for console.
463 */
464int
465getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
466{
467	struct vnode *vp;
468	struct vnode *nvp;
469	int error;
470
471	if (dev == NODEV) {
472		*vpp = NULLVP;
473		return (0);
474	}
475	error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
476	if (error) {
477		*vpp = NULLVP;
478		return (error);
479	}
480	vp = nvp;
481	vp->v_type = type;
482	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
483		vput(vp);
484		vp = nvp;
485	}
486	*vpp = vp;
487	return (0);
488}
489
490/*
491 * Check to see if the new vnode represents a special device
492 * for which we already have a vnode (either because of
493 * bdevvp() or because of a different vnode representing
494 * the same block device). If such an alias exists, deallocate
495 * the existing contents and return the aliased vnode. The
496 * caller is responsible for filling it with its new contents.
497 */
498struct vnode *
499checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
500{
501	struct proc *p = curproc;
502	struct vnode *vp;
503	struct vnode **vpp;
504
505	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
506		return (NULLVP);
507
508	vpp = &speclisth[SPECHASH(nvp_rdev)];
509loop:
510	for (vp = *vpp; vp; vp = vp->v_specnext) {
511		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
512			continue;
513		}
514		/*
515		 * Alias, but not in use, so flush it out.
516		 */
517		if (vp->v_usecount == 0) {
518			vgonel(vp, p);
519			goto loop;
520		}
521		if (vget(vp, LK_EXCLUSIVE, p)) {
522			goto loop;
523		}
524		break;
525	}
526
527	/*
528	 * Common case is actually in the if statement
529	 */
530	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
531		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
532			M_WAITOK);
533		nvp->v_rdev = nvp_rdev;
534		nvp->v_hashchain = vpp;
535		nvp->v_specnext = *vpp;
536		nvp->v_specmountpoint = NULL;
537		nvp->v_speclockf = NULL;
538		bzero(nvp->v_specbitmap, sizeof(nvp->v_specbitmap));
539		*vpp = nvp;
540		if (vp != NULLVP) {
541			nvp->v_flag |= VALIASED;
542			vp->v_flag |= VALIASED;
543			vput(vp);
544		}
545		return (NULLVP);
546	}
547
548	/*
549	 * This code is the uncommon case. It is called in case
550	 * we found an alias that was VT_NON && vtype of VBLK
551	 * This means we found a block device that was created
552	 * using bdevvp.
553	 * An example of such a vnode is the root partition device vnode
554	 * created in ffs_mountroot.
555	 *
556	 * The vnodes created by bdevvp should not be aliased (why?).
557	 */
558
559	VOP_UNLOCK(vp, 0, p);
560	vclean(vp, 0, p);
561	vp->v_op = nvp->v_op;
562	vp->v_tag = nvp->v_tag;
563	nvp->v_type = VNON;
564	insmntque(vp, mp);
565	return (vp);
566}
567
568/*
569 * Grab a particular vnode from the free list, increment its
570 * reference count and lock it. If the vnode lock bit is set,
571 * the vnode is being eliminated in vgone. In that case, we
572 * cannot grab it, so the process is awakened when the
573 * transition is completed, and an error code is returned to
574 * indicate that the vnode is no longer usable, possibly
575 * having been changed to a new file system type.
576 */
577int
578vget(struct vnode *vp, int flags, struct proc *p)
579{
580	int error, s, onfreelist;
581
582	/*
583	 * If the vnode is in the process of being cleaned out for
584	 * another use, we wait for the cleaning to finish and then
585	 * return failure. Cleaning is determined by checking that
586	 * the VXLOCK flag is set.
587	 */
588
589	if (vp->v_flag & VXLOCK) {
590		if (flags & LK_NOWAIT) {
591			return (EBUSY);
592		}
593
594		vp->v_flag |= VXWANT;
595		tsleep(vp, PINOD, "vget", 0);
596		return (ENOENT);
597	}
598
599	onfreelist = vp->v_bioflag & VBIOONFREELIST;
600	if (vp->v_usecount == 0 && onfreelist) {
601		s = splbio();
602		if (vp->v_holdcnt > 0)
603			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
604		else
605			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
606		vp->v_bioflag &= ~VBIOONFREELIST;
607		splx(s);
608	}
609
610 	vp->v_usecount++;
611	if (flags & LK_TYPE_MASK) {
612		if ((error = vn_lock(vp, flags, p)) != 0) {
613			vp->v_usecount--;
614			if (vp->v_usecount == 0 && onfreelist)
615				vputonfreelist(vp);
616		}
617		return (error);
618	}
619
620	return (0);
621}
622
623
624/* Vnode reference. */
625void
626vref(struct vnode *vp)
627{
628#ifdef DIAGNOSTIC
629	if (vp->v_usecount == 0)
630		panic("vref used where vget required");
631#endif
632	vp->v_usecount++;
633}
634
635void
636vputonfreelist(struct vnode *vp)
637{
638	int s;
639	struct freelst *lst;
640
641	s = splbio();
642#ifdef DIAGNOSTIC
643	if (vp->v_usecount != 0)
644		panic("Use count is not zero!");
645
646	if (vp->v_bioflag & VBIOONFREELIST) {
647		vprint("vnode already on free list: ", vp);
648		panic("vnode already on free list");
649	}
650#endif
651
652	vp->v_bioflag |= VBIOONFREELIST;
653
654	if (vp->v_holdcnt > 0)
655		lst = &vnode_hold_list;
656	else
657		lst = &vnode_free_list;
658
659	if (vp->v_type == VBAD)
660		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
661	else
662		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
663
664	splx(s);
665}
666
667/*
668 * vput(), just unlock and vrele()
669 */
670void
671vput(struct vnode *vp)
672{
673	struct proc *p = curproc;
674
675#ifdef DIAGNOSTIC
676	if (vp == NULL)
677		panic("vput: null vp");
678#endif
679
680#ifdef DIAGNOSTIC
681	if (vp->v_usecount == 0) {
682		vprint("vput: bad ref count", vp);
683		panic("vput: ref cnt");
684	}
685#endif
686	vp->v_usecount--;
687	if (vp->v_usecount > 0) {
688		VOP_UNLOCK(vp, 0, p);
689		return;
690	}
691
692#ifdef DIAGNOSTIC
693	if (vp->v_writecount != 0) {
694		vprint("vput: bad writecount", vp);
695		panic("vput: v_writecount != 0");
696	}
697#endif
698
699	VOP_INACTIVE(vp, p);
700
701	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
702		vputonfreelist(vp);
703}
704
705/*
706 * Vnode release - use for active VNODES.
707 * If count drops to zero, call inactive routine and return to freelist.
708 * Returns 0 if it did not sleep.
709 */
710int
711vrele(struct vnode *vp)
712{
713	struct proc *p = curproc;
714
715#ifdef DIAGNOSTIC
716	if (vp == NULL)
717		panic("vrele: null vp");
718#endif
719#ifdef DIAGNOSTIC
720	if (vp->v_usecount == 0) {
721		vprint("vrele: bad ref count", vp);
722		panic("vrele: ref cnt");
723	}
724#endif
725	vp->v_usecount--;
726	if (vp->v_usecount > 0) {
727		return (0);
728	}
729
730#ifdef DIAGNOSTIC
731	if (vp->v_writecount != 0) {
732		vprint("vrele: bad writecount", vp);
733		panic("vrele: v_writecount != 0");
734	}
735#endif
736
737	if (vn_lock(vp, LK_EXCLUSIVE, p)) {
738#ifdef DIAGNOSTIC
739		vprint("vrele: cannot lock", vp);
740#endif
741		return (1);
742	}
743
744	VOP_INACTIVE(vp, p);
745
746	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
747		vputonfreelist(vp);
748	return (1);
749}
750
751/* Page or buffer structure gets a reference. */
752void
753vhold(struct vnode *vp)
754{
755	/*
756	 * If it is on the freelist and the hold count is currently
757	 * zero, move it to the hold list.
758	 */
759	if ((vp->v_bioflag & VBIOONFREELIST) &&
760	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
761		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
762		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
763	}
764	vp->v_holdcnt++;
765}
766
767/* Lose interest in a vnode. */
768void
769vdrop(struct vnode *vp)
770{
771#ifdef DIAGNOSTIC
772	if (vp->v_holdcnt == 0)
773		panic("vdrop: zero holdcnt");
774#endif
775
776	vp->v_holdcnt--;
777
778	/*
779	 * If it is on the holdlist and the hold count drops to
780	 * zero, move it to the free list.
781	 */
782	if ((vp->v_bioflag & VBIOONFREELIST) &&
783	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
784		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
785		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
786	}
787}
788
789/*
790 * Remove any vnodes in the vnode table belonging to mount point mp.
791 *
792 * If MNT_NOFORCE is specified, there should not be any active ones,
793 * return error if any are found (nb: this is a user error, not a
794 * system error). If MNT_FORCE is specified, detach any active vnodes
795 * that are found.
796 */
797#ifdef DEBUG
798int busyprt = 0;	/* print out busy vnodes */
799struct ctldebug debug1 = { "busyprt", &busyprt };
800#endif
801
802int
803vfs_mount_foreach_vnode(struct mount *mp,
804    int (*func)(struct vnode *, void *), void *arg) {
805	struct vnode *vp, *nvp;
806	int error = 0;
807
808loop:
809	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
810		if (vp->v_mount != mp)
811			goto loop;
812		nvp = LIST_NEXT(vp, v_mntvnodes);
813
814		error = func(vp, arg);
815
816		if (error != 0)
817			break;
818	}
819
820	return (error);
821}
822
823struct vflush_args {
824	struct vnode *skipvp;
825	int busy;
826	int flags;
827};
828
829int
830vflush_vnode(struct vnode *vp, void *arg) {
831	struct vflush_args *va = arg;
832	struct proc *p = curproc;
833
834	if (vp == va->skipvp) {
835		return (0);
836	}
837
838	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
839		return (0);
840	}
841
842	/*
843	 * If WRITECLOSE is set, only flush out regular file
844	 * vnodes open for writing.
845	 */
846	if ((va->flags & WRITECLOSE) &&
847	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
848		return (0);
849	}
850
851	/*
852	 * With v_usecount == 0, all we need to do is clear
853	 * out the vnode data structures and we are done.
854	 */
855	if (vp->v_usecount == 0) {
856		vgonel(vp, p);
857		return (0);
858	}
859
860	/*
861	 * If FORCECLOSE is set, forcibly close the vnode.
862	 * For block or character devices, revert to an
863	 * anonymous device. For all other files, just kill them.
864	 */
865	if (va->flags & FORCECLOSE) {
866		if (vp->v_type != VBLK && vp->v_type != VCHR) {
867			vgonel(vp, p);
868		} else {
869			vclean(vp, 0, p);
870			vp->v_op = spec_vnodeop_p;
871			insmntque(vp, (struct mount *)0);
872		}
873		return (0);
874	}
875
876#ifdef DEBUG
877	if (busyprt)
878		vprint("vflush: busy vnode", vp);
879#endif
880	va->busy++;
881	return (0);
882}
883
884int
885vflush(struct mount *mp, struct vnode *skipvp, int flags)
886{
887	struct vflush_args va;
888	va.skipvp = skipvp;
889	va.busy = 0;
890	va.flags = flags;
891
892	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
893
894	if (va.busy)
895		return (EBUSY);
896	return (0);
897}
898
899/*
900 * Disassociate the underlying file system from a vnode.
901 */
902void
903vclean(struct vnode *vp, int flags, struct proc *p)
904{
905	int active;
906
907	/*
908	 * Check to see if the vnode is in use.
909	 * If so we have to reference it before we clean it out
910	 * so that its count cannot fall to zero and generate a
911	 * race against ourselves to recycle it.
912	 */
913	if ((active = vp->v_usecount) != 0)
914		vp->v_usecount++;
915
916	/*
917	 * Prevent the vnode from being recycled or
918	 * brought into use while we clean it out.
919	 */
920	if (vp->v_flag & VXLOCK)
921		panic("vclean: deadlock");
922	vp->v_flag |= VXLOCK;
923	/*
924	 * Even if the count is zero, the VOP_INACTIVE routine may still
925	 * have the object locked while it cleans it out. The VOP_LOCK
926	 * ensures that the VOP_INACTIVE routine is done with its work.
927	 * For active vnodes, it ensures that no other activity can
928	 * occur while the underlying object is being cleaned out.
929	 */
930	VOP_LOCK(vp, LK_DRAIN, p);
931
932	/*
933	 * Clean out any VM data associated with the vnode.
934	 */
935	uvm_vnp_terminate(vp);
936	/*
937	 * Clean out any buffers associated with the vnode.
938	 */
939	if (flags & DOCLOSE)
940		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
941	/*
942	 * If purging an active vnode, it must be closed and
943	 * deactivated before being reclaimed. Note that the
944	 * VOP_INACTIVE will unlock the vnode
945	 */
946	if (active) {
947		if (flags & DOCLOSE)
948			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
949		VOP_INACTIVE(vp, p);
950	} else {
951		/*
952		 * Any other processes trying to obtain this lock must first
953		 * wait for VXLOCK to clear, then call the new lock operation.
954		 */
955		VOP_UNLOCK(vp, 0, p);
956	}
957
958	/*
959	 * Reclaim the vnode.
960	 */
961	if (VOP_RECLAIM(vp, p))
962		panic("vclean: cannot reclaim");
963	if (active) {
964		vp->v_usecount--;
965		if (vp->v_usecount == 0) {
966			if (vp->v_holdcnt > 0)
967				panic("vclean: not clean");
968			vputonfreelist(vp);
969		}
970	}
971	cache_purge(vp);
972
973	/*
974	 * Done with purge, notify sleepers of the grim news.
975	 */
976	vp->v_op = dead_vnodeop_p;
977	VN_KNOTE(vp, NOTE_REVOKE);
978	vp->v_tag = VT_NON;
979	vp->v_flag &= ~VXLOCK;
980#ifdef VFSDEBUG
981	vp->v_flag &= ~VLOCKSWORK;
982#endif
983	if (vp->v_flag & VXWANT) {
984		vp->v_flag &= ~VXWANT;
985		wakeup(vp);
986	}
987}
988
989/*
990 * Recycle an unused vnode to the front of the free list.
991 */
992int
993vrecycle(struct vnode *vp, struct proc *p)
994{
995	if (vp->v_usecount == 0) {
996		vgonel(vp, p);
997		return (1);
998	}
999	return (0);
1000}
1001
1002/*
1003 * Eliminate all activity associated with a vnode
1004 * in preparation for reuse.
1005 */
1006void
1007vgone(struct vnode *vp)
1008{
1009	struct proc *p = curproc;
1010	vgonel(vp, p);
1011}
1012
1013/*
1014 * vgone, with struct proc.
1015 */
1016void
1017vgonel(struct vnode *vp, struct proc *p)
1018{
1019	struct vnode *vq;
1020	struct vnode *vx;
1021
1022	/*
1023	 * If a vgone (or vclean) is already in progress,
1024	 * wait until it is done and return.
1025	 */
1026	if (vp->v_flag & VXLOCK) {
1027		vp->v_flag |= VXWANT;
1028		tsleep(vp, PINOD, "vgone", 0);
1029		return;
1030	}
1031
1032	/*
1033	 * Clean out the filesystem specific data.
1034	 */
1035	vclean(vp, DOCLOSE, p);
1036	/*
1037	 * Delete from old mount point vnode list, if on one.
1038	 */
1039	if (vp->v_mount != NULL)
1040		insmntque(vp, (struct mount *)0);
1041	/*
1042	 * If special device, remove it from special device alias list
1043	 * if it is on one.
1044	 */
1045	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1046		if (*vp->v_hashchain == vp) {
1047			*vp->v_hashchain = vp->v_specnext;
1048		} else {
1049			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1050				if (vq->v_specnext != vp)
1051					continue;
1052				vq->v_specnext = vp->v_specnext;
1053				break;
1054			}
1055			if (vq == NULL)
1056				panic("missing bdev");
1057		}
1058		if (vp->v_flag & VALIASED) {
1059			vx = NULL;
1060			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1061				if (vq->v_rdev != vp->v_rdev ||
1062				    vq->v_type != vp->v_type)
1063					continue;
1064				if (vx)
1065					break;
1066				vx = vq;
1067			}
1068			if (vx == NULL)
1069				panic("missing alias");
1070			if (vq == NULL)
1071				vx->v_flag &= ~VALIASED;
1072			vp->v_flag &= ~VALIASED;
1073		}
1074		free(vp->v_specinfo, M_VNODE);
1075		vp->v_specinfo = NULL;
1076	}
1077	/*
1078	 * If it is on the freelist and not already at the head,
1079	 * move it to the head of the list.
1080	 */
1081	vp->v_type = VBAD;
1082
1083	/*
1084	 * Move onto the free list, unless we were called from
1085	 * getnewvnode and we're not on any free list
1086	 */
1087	if (vp->v_usecount == 0 &&
1088	    (vp->v_bioflag & VBIOONFREELIST)) {
1089		int s;
1090
1091		s = splbio();
1092
1093		if (vp->v_holdcnt > 0)
1094			panic("vgonel: not clean");
1095
1096		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1097			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1098			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1099		}
1100		splx(s);
1101	}
1102}
1103
1104/*
1105 * Lookup a vnode by device number.
1106 */
1107int
1108vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1109{
1110	struct vnode *vp;
1111	int rc =0;
1112
1113	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1114		if (dev != vp->v_rdev || type != vp->v_type)
1115			continue;
1116		*vpp = vp;
1117		rc = 1;
1118		break;
1119	}
1120	return (rc);
1121}
1122
1123/*
1124 * Revoke all the vnodes corresponding to the specified minor number
1125 * range (endpoints inclusive) of the specified major.
1126 */
1127void
1128vdevgone(int maj, int minl, int minh, enum vtype type)
1129{
1130	struct vnode *vp;
1131	int mn;
1132
1133	for (mn = minl; mn <= minh; mn++)
1134		if (vfinddev(makedev(maj, mn), type, &vp))
1135			VOP_REVOKE(vp, REVOKEALL);
1136}
1137
1138/*
1139 * Calculate the total number of references to a special device.
1140 */
1141int
1142vcount(struct vnode *vp)
1143{
1144	struct vnode *vq, *vnext;
1145	int count;
1146
1147loop:
1148	if ((vp->v_flag & VALIASED) == 0)
1149		return (vp->v_usecount);
1150	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1151		vnext = vq->v_specnext;
1152		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1153			continue;
1154		/*
1155		 * Alias, but not in use, so flush it out.
1156		 */
1157		if (vq->v_usecount == 0 && vq != vp) {
1158			vgone(vq);
1159			goto loop;
1160		}
1161		count += vq->v_usecount;
1162	}
1163	return (count);
1164}
1165
1166#if defined(DEBUG) || defined(DIAGNOSTIC)
1167/*
1168 * Print out a description of a vnode.
1169 */
1170static char *typename[] =
1171   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1172
1173void
1174vprint(char *label, struct vnode *vp)
1175{
1176	char buf[64];
1177
1178	if (label != NULL)
1179		printf("%s: ", label);
1180	printf("%p, type %s, use %u, write %u, hold %u,",
1181		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1182		vp->v_holdcnt);
1183	buf[0] = '\0';
1184	if (vp->v_flag & VROOT)
1185		strlcat(buf, "|VROOT", sizeof buf);
1186	if (vp->v_flag & VTEXT)
1187		strlcat(buf, "|VTEXT", sizeof buf);
1188	if (vp->v_flag & VSYSTEM)
1189		strlcat(buf, "|VSYSTEM", sizeof buf);
1190	if (vp->v_flag & VXLOCK)
1191		strlcat(buf, "|VXLOCK", sizeof buf);
1192	if (vp->v_flag & VXWANT)
1193		strlcat(buf, "|VXWANT", sizeof buf);
1194	if (vp->v_bioflag & VBIOWAIT)
1195		strlcat(buf, "|VBIOWAIT", sizeof buf);
1196	if (vp->v_bioflag & VBIOONFREELIST)
1197		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1198	if (vp->v_bioflag & VBIOONSYNCLIST)
1199		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1200	if (vp->v_flag & VALIASED)
1201		strlcat(buf, "|VALIASED", sizeof buf);
1202	if (buf[0] != '\0')
1203		printf(" flags (%s)", &buf[1]);
1204	if (vp->v_data == NULL) {
1205		printf("\n");
1206	} else {
1207		printf("\n\t");
1208		VOP_PRINT(vp);
1209	}
1210}
1211#endif /* DEBUG || DIAGNOSTIC */
1212
1213#ifdef DEBUG
1214/*
1215 * List all of the locked vnodes in the system.
1216 * Called when debugging the kernel.
1217 */
1218void
1219printlockedvnodes(void)
1220{
1221	struct mount *mp, *nmp;
1222	struct vnode *vp;
1223
1224	printf("Locked vnodes\n");
1225
1226	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1227	    mp = nmp) {
1228		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1229			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1230			continue;
1231		}
1232		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1233			if (VOP_ISLOCKED(vp))
1234				vprint((char *)0, vp);
1235		}
1236		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1237		vfs_unbusy(mp);
1238 	}
1239
1240}
1241#endif
1242
1243/*
1244 * Top level filesystem related information gathering.
1245 */
1246int
1247vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1248    size_t newlen, struct proc *p)
1249{
1250	struct vfsconf *vfsp, *tmpvfsp;
1251	int ret;
1252
1253	/* all sysctl names at this level are at least name and field */
1254	if (namelen < 2)
1255		return (ENOTDIR);		/* overloaded */
1256
1257	if (name[0] != VFS_GENERIC) {
1258		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1259			if (vfsp->vfc_typenum == name[0])
1260				break;
1261
1262		if (vfsp == NULL)
1263			return (EOPNOTSUPP);
1264
1265		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1266		    oldp, oldlenp, newp, newlen, p));
1267	}
1268
1269	switch (name[1]) {
1270	case VFS_MAXTYPENUM:
1271		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1272
1273	case VFS_CONF:
1274		if (namelen < 3)
1275			return (ENOTDIR);	/* overloaded */
1276
1277		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1278			if (vfsp->vfc_typenum == name[2])
1279				break;
1280
1281		if (vfsp == NULL)
1282			return (EOPNOTSUPP);
1283
1284		/* Make a copy, clear out kernel pointers */
1285		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK);
1286		bcopy(vfsp, tmpvfsp, sizeof(*tmpvfsp));
1287		tmpvfsp->vfc_vfsops = NULL;
1288		tmpvfsp->vfc_next = NULL;
1289
1290		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1291		    sizeof(struct vfsconf));
1292
1293		free(tmpvfsp, M_TEMP);
1294		return (ret);
1295	case VFS_BCACHESTAT:	/* buffer cache statistics */
1296		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1297		    sizeof(struct bcachestats));
1298		return(ret);
1299	}
1300	return (EOPNOTSUPP);
1301}
1302
1303int kinfo_vdebug = 1;
1304#define KINFO_VNODESLOP	10
1305/*
1306 * Dump vnode list (via sysctl).
1307 * Copyout address of vnode followed by vnode.
1308 */
1309/* ARGSUSED */
1310int
1311sysctl_vnode(char *where, size_t *sizep, struct proc *p)
1312{
1313	struct mount *mp, *nmp;
1314	struct vnode *vp, *nvp;
1315	char *bp = where, *savebp;
1316	char *ewhere;
1317	int error;
1318
1319	if (where == NULL) {
1320		*sizep = (numvnodes + KINFO_VNODESLOP) * sizeof(struct e_vnode);
1321		return (0);
1322	}
1323	ewhere = where + *sizep;
1324
1325	for (mp = CIRCLEQ_FIRST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1326	    mp = nmp) {
1327		if (vfs_busy(mp, VB_READ|VB_NOWAIT)) {
1328			nmp = CIRCLEQ_NEXT(mp, mnt_list);
1329			continue;
1330		}
1331		savebp = bp;
1332again:
1333		for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL;
1334		    vp = nvp) {
1335			/*
1336			 * Check that the vp is still associated with
1337			 * this filesystem.  RACE: could have been
1338			 * recycled onto the same filesystem.
1339			 */
1340			if (vp->v_mount != mp) {
1341				if (kinfo_vdebug)
1342					printf("kinfo: vp changed\n");
1343				bp = savebp;
1344				goto again;
1345			}
1346			nvp = LIST_NEXT(vp, v_mntvnodes);
1347			if (bp + sizeof(struct e_vnode) > ewhere) {
1348				*sizep = bp - where;
1349				vfs_unbusy(mp);
1350				return (ENOMEM);
1351			}
1352			if ((error = copyout(&vp,
1353			    &((struct e_vnode *)bp)->vptr,
1354			    sizeof(struct vnode *))) ||
1355			   (error = copyout(vp,
1356			    &((struct e_vnode *)bp)->vnode,
1357			    sizeof(struct vnode)))) {
1358				vfs_unbusy(mp);
1359				return (error);
1360			}
1361			bp += sizeof(struct e_vnode);
1362		}
1363
1364		nmp = CIRCLEQ_NEXT(mp, mnt_list);
1365		vfs_unbusy(mp);
1366	}
1367
1368	*sizep = bp - where;
1369
1370	return (0);
1371}
1372
1373/*
1374 * Check to see if a filesystem is mounted on a block device.
1375 */
1376int
1377vfs_mountedon(struct vnode *vp)
1378{
1379	struct vnode *vq;
1380	int error = 0;
1381
1382 	if (vp->v_specmountpoint != NULL)
1383		return (EBUSY);
1384	if (vp->v_flag & VALIASED) {
1385		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1386			if (vq->v_rdev != vp->v_rdev ||
1387			    vq->v_type != vp->v_type)
1388				continue;
1389			if (vq->v_specmountpoint != NULL) {
1390				error = EBUSY;
1391				break;
1392			}
1393 		}
1394	}
1395	return (error);
1396}
1397
1398/*
1399 * Build hash lists of net addresses and hang them off the mount point.
1400 * Called by ufs_mount() to set up the lists of export addresses.
1401 */
1402int
1403vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1404    struct export_args *argp)
1405{
1406	struct netcred *np;
1407	struct radix_node_head *rnh;
1408	int i;
1409	struct radix_node *rn;
1410	struct sockaddr *saddr, *smask = 0;
1411	struct domain *dom;
1412	int error;
1413
1414	if (argp->ex_addrlen == 0) {
1415		if (mp->mnt_flag & MNT_DEFEXPORTED)
1416			return (EPERM);
1417		np = &nep->ne_defexported;
1418		np->netc_exflags = argp->ex_flags;
1419		np->netc_anon = argp->ex_anon;
1420		np->netc_anon.cr_ref = 1;
1421		mp->mnt_flag |= MNT_DEFEXPORTED;
1422		return (0);
1423	}
1424	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1425	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1426		return (EINVAL);
1427	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1428	np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK|M_ZERO);
1429	saddr = (struct sockaddr *)(np + 1);
1430	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1431	if (error)
1432		goto out;
1433	if (saddr->sa_len > argp->ex_addrlen)
1434		saddr->sa_len = argp->ex_addrlen;
1435	if (argp->ex_masklen) {
1436		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1437		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1438		if (error)
1439			goto out;
1440		if (smask->sa_len > argp->ex_masklen)
1441			smask->sa_len = argp->ex_masklen;
1442	}
1443	i = saddr->sa_family;
1444	if (i < 0 || i > AF_MAX) {
1445		error = EINVAL;
1446		goto out;
1447	}
1448	if ((rnh = nep->ne_rtable[i]) == 0) {
1449		/*
1450		 * Seems silly to initialize every AF when most are not
1451		 * used, do so on demand here
1452		 */
1453		for (dom = domains; dom; dom = dom->dom_next)
1454			if (dom->dom_family == i && dom->dom_rtattach) {
1455				dom->dom_rtattach((void **)&nep->ne_rtable[i],
1456					dom->dom_rtoffset);
1457				break;
1458			}
1459		if ((rnh = nep->ne_rtable[i]) == 0) {
1460			error = ENOBUFS;
1461			goto out;
1462		}
1463	}
1464	rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
1465		np->netc_rnodes, 0);
1466	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1467		error = EPERM;
1468		goto out;
1469	}
1470	np->netc_exflags = argp->ex_flags;
1471	np->netc_anon = argp->ex_anon;
1472	np->netc_anon.cr_ref = 1;
1473	return (0);
1474out:
1475	free(np, M_NETADDR);
1476	return (error);
1477}
1478
1479/* ARGSUSED */
1480int
1481vfs_free_netcred(struct radix_node *rn, void *w)
1482{
1483	struct radix_node_head *rnh = (struct radix_node_head *)w;
1484
1485	(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh, NULL);
1486	free(rn, M_NETADDR);
1487	return (0);
1488}
1489
1490/*
1491 * Free the net address hash lists that are hanging off the mount points.
1492 */
1493void
1494vfs_free_addrlist(struct netexport *nep)
1495{
1496	int i;
1497	struct radix_node_head *rnh;
1498
1499	for (i = 0; i <= AF_MAX; i++)
1500		if ((rnh = nep->ne_rtable[i]) != NULL) {
1501			(*rnh->rnh_walktree)(rnh, vfs_free_netcred, rnh);
1502			free(rnh, M_RTABLE);
1503			nep->ne_rtable[i] = 0;
1504		}
1505}
1506
1507int
1508vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1509{
1510	int error;
1511
1512	if (argp->ex_flags & MNT_DELEXPORT) {
1513		vfs_free_addrlist(nep);
1514		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1515	}
1516	if (argp->ex_flags & MNT_EXPORTED) {
1517		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1518			return (error);
1519		mp->mnt_flag |= MNT_EXPORTED;
1520	}
1521	return (0);
1522}
1523
1524struct netcred *
1525vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1526{
1527	struct netcred *np;
1528	struct radix_node_head *rnh;
1529	struct sockaddr *saddr;
1530
1531	np = NULL;
1532	if (mp->mnt_flag & MNT_EXPORTED) {
1533		/*
1534		 * Lookup in the export list first.
1535		 */
1536		if (nam != NULL) {
1537			saddr = mtod(nam, struct sockaddr *);
1538			rnh = nep->ne_rtable[saddr->sa_family];
1539			if (rnh != NULL) {
1540				np = (struct netcred *)
1541					(*rnh->rnh_matchaddr)((caddr_t)saddr,
1542					    rnh);
1543				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1544					np = NULL;
1545			}
1546		}
1547		/*
1548		 * If no address match, use the default if it exists.
1549		 */
1550		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1551			np = &nep->ne_defexported;
1552	}
1553	return (np);
1554}
1555
1556/*
1557 * Do the usual access checking.
1558 * file_mode, uid and gid are from the vnode in question,
1559 * while acc_mode and cred are from the VOP_ACCESS parameter list
1560 */
1561int
1562vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1563    mode_t acc_mode, struct ucred *cred)
1564{
1565	mode_t mask;
1566
1567	/* User id 0 always gets read/write access. */
1568	if (cred->cr_uid == 0) {
1569		/* For VEXEC, at least one of the execute bits must be set. */
1570		if ((acc_mode & VEXEC) && type != VDIR &&
1571		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1572			return EACCES;
1573		return 0;
1574	}
1575
1576	mask = 0;
1577
1578	/* Otherwise, check the owner. */
1579	if (cred->cr_uid == uid) {
1580		if (acc_mode & VEXEC)
1581			mask |= S_IXUSR;
1582		if (acc_mode & VREAD)
1583			mask |= S_IRUSR;
1584		if (acc_mode & VWRITE)
1585			mask |= S_IWUSR;
1586		return (file_mode & mask) == mask ? 0 : EACCES;
1587	}
1588
1589	/* Otherwise, check the groups. */
1590	if (cred->cr_gid == gid || groupmember(gid, cred)) {
1591		if (acc_mode & VEXEC)
1592			mask |= S_IXGRP;
1593		if (acc_mode & VREAD)
1594			mask |= S_IRGRP;
1595		if (acc_mode & VWRITE)
1596			mask |= S_IWGRP;
1597		return (file_mode & mask) == mask ? 0 : EACCES;
1598	}
1599
1600	/* Otherwise, check everyone else. */
1601	if (acc_mode & VEXEC)
1602		mask |= S_IXOTH;
1603	if (acc_mode & VREAD)
1604		mask |= S_IROTH;
1605	if (acc_mode & VWRITE)
1606		mask |= S_IWOTH;
1607	return (file_mode & mask) == mask ? 0 : EACCES;
1608}
1609
1610/*
1611 * Unmount all file systems.
1612 * We traverse the list in reverse order under the assumption that doing so
1613 * will avoid needing to worry about dependencies.
1614 */
1615void
1616vfs_unmountall(void)
1617{
1618	struct mount *mp, *nmp;
1619	int allerror, error, again = 1;
1620
1621 retry:
1622	allerror = 0;
1623	for (mp = CIRCLEQ_LAST(&mountlist); mp != CIRCLEQ_END(&mountlist);
1624	    mp = nmp) {
1625		nmp = CIRCLEQ_PREV(mp, mnt_list);
1626		if ((vfs_busy(mp, VB_WRITE|VB_NOWAIT)) != 0)
1627			continue;
1628		if ((error = dounmount(mp, MNT_FORCE, curproc, NULL)) != 0) {
1629			printf("unmount of %s failed with error %d\n",
1630			    mp->mnt_stat.f_mntonname, error);
1631			allerror = 1;
1632		}
1633	}
1634
1635	if (allerror) {
1636		printf("WARNING: some file systems would not unmount\n");
1637		if (again) {
1638			printf("retrying\n");
1639			again = 0;
1640			goto retry;
1641		}
1642	}
1643}
1644
1645/*
1646 * Sync and unmount file systems before shutting down.
1647 */
1648void
1649vfs_shutdown(void)
1650{
1651#ifdef ACCOUNTING
1652	extern void acct_shutdown(void);
1653
1654	acct_shutdown();
1655#endif
1656
1657	/* XXX Should suspend scheduling. */
1658	(void) spl0();
1659
1660	printf("syncing disks... ");
1661
1662	if (panicstr == 0) {
1663		/* Sync before unmount, in case we hang on something. */
1664		sys_sync(&proc0, (void *)0, (register_t *)0);
1665
1666		/* Unmount file systems. */
1667		vfs_unmountall();
1668	}
1669
1670	if (vfs_syncwait(1))
1671		printf("giving up\n");
1672	else
1673		printf("done\n");
1674}
1675
1676/*
1677 * perform sync() operation and wait for buffers to flush.
1678 * assumptions: called w/ scheduler disabled and physical io enabled
1679 * for now called at spl0() XXX
1680 */
1681int
1682vfs_syncwait(int verbose)
1683{
1684	struct buf *bp;
1685	int iter, nbusy, dcount, s;
1686	struct proc *p;
1687
1688	p = curproc? curproc : &proc0;
1689	sys_sync(p, (void *)0, (register_t *)0);
1690
1691	/* Wait for sync to finish. */
1692	dcount = 10000;
1693	for (iter = 0; iter < 20; iter++) {
1694		nbusy = 0;
1695		LIST_FOREACH(bp, &bufhead, b_list) {
1696			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1697				nbusy++;
1698			/*
1699			 * With soft updates, some buffers that are
1700			 * written will be remarked as dirty until other
1701			 * buffers are written.
1702			 */
1703			if (bp->b_flags & B_DELWRI) {
1704				s = splbio();
1705				bremfree(bp);
1706				buf_acquire(bp);
1707				splx(s);
1708				nbusy++;
1709				bawrite(bp);
1710				if (dcount-- <= 0) {
1711					if (verbose)
1712						printf("softdep ");
1713					return 1;
1714				}
1715			}
1716		}
1717		if (nbusy == 0)
1718			break;
1719		if (verbose)
1720			printf("%d ", nbusy);
1721		DELAY(40000 * iter);
1722	}
1723
1724	return nbusy;
1725}
1726
1727/*
1728 * posix file system related system variables.
1729 */
1730int
1731fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1732    void *newp, size_t newlen, struct proc *p)
1733{
1734	/* all sysctl names at this level are terminal */
1735	if (namelen != 1)
1736		return (ENOTDIR);
1737
1738	switch (name[0]) {
1739	case FS_POSIX_SETUID:
1740		if (newp && securelevel > 0)
1741			return (EPERM);
1742		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1743	default:
1744		return (EOPNOTSUPP);
1745	}
1746	/* NOTREACHED */
1747}
1748
1749/*
1750 * file system related system variables.
1751 */
1752int
1753fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1754    size_t newlen, struct proc *p)
1755{
1756	sysctlfn *fn;
1757
1758	switch (name[0]) {
1759	case FS_POSIX:
1760		fn = fs_posix_sysctl;
1761		break;
1762	default:
1763		return (EOPNOTSUPP);
1764	}
1765	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1766}
1767
1768
1769/*
1770 * Routines dealing with vnodes and buffers
1771 */
1772
1773/*
1774 * Wait for all outstanding I/Os to complete
1775 *
1776 * Manipulates v_numoutput. Must be called at splbio()
1777 */
1778int
1779vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1780{
1781	int error = 0;
1782
1783	splassert(IPL_BIO);
1784
1785	while (vp->v_numoutput) {
1786		vp->v_bioflag |= VBIOWAIT;
1787		error = tsleep(&vp->v_numoutput,
1788		    slpflag | (PRIBIO + 1), wmesg, timeo);
1789		if (error)
1790			break;
1791	}
1792
1793	return (error);
1794}
1795
1796/*
1797 * Update outstanding I/O count and do wakeup if requested.
1798 *
1799 * Manipulates v_numoutput. Must be called at splbio()
1800 */
1801void
1802vwakeup(struct vnode *vp)
1803{
1804	splassert(IPL_BIO);
1805
1806	if (vp != NULL) {
1807		if (vp->v_numoutput-- == 0)
1808			panic("vwakeup: neg numoutput");
1809		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1810			vp->v_bioflag &= ~VBIOWAIT;
1811			wakeup(&vp->v_numoutput);
1812		}
1813	}
1814}
1815
1816/*
1817 * Flush out and invalidate all buffers associated with a vnode.
1818 * Called with the underlying object locked.
1819 */
1820int
1821vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1822    int slpflag, int slptimeo)
1823{
1824	struct buf *bp;
1825	struct buf *nbp, *blist;
1826	int s, error;
1827
1828#ifdef VFSDEBUG
1829	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1830		panic("vinvalbuf(): vp isn't locked");
1831#endif
1832
1833	if (flags & V_SAVE) {
1834		s = splbio();
1835		vwaitforio(vp, 0, "vinvalbuf", 0);
1836		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1837			splx(s);
1838			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1839				return (error);
1840			s = splbio();
1841			if (vp->v_numoutput > 0 ||
1842			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1843				panic("vinvalbuf: dirty bufs");
1844		}
1845		splx(s);
1846	}
1847loop:
1848	s = splbio();
1849	for (;;) {
1850		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1851		    (flags & V_SAVEMETA))
1852			while (blist && blist->b_lblkno < 0)
1853				blist = LIST_NEXT(blist, b_vnbufs);
1854		if (blist == NULL &&
1855		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1856		    (flags & V_SAVEMETA))
1857			while (blist && blist->b_lblkno < 0)
1858				blist = LIST_NEXT(blist, b_vnbufs);
1859		if (!blist)
1860			break;
1861
1862		for (bp = blist; bp; bp = nbp) {
1863			nbp = LIST_NEXT(bp, b_vnbufs);
1864			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1865				continue;
1866			if (bp->b_flags & B_BUSY) {
1867				bp->b_flags |= B_WANTED;
1868				error = tsleep(bp, slpflag | (PRIBIO + 1),
1869				    "vinvalbuf", slptimeo);
1870				if (error) {
1871					splx(s);
1872					return (error);
1873				}
1874				break;
1875			}
1876			bremfree(bp);
1877			buf_acquire(bp);
1878			/*
1879			 * XXX Since there are no node locks for NFS, I believe
1880			 * there is a slight chance that a delayed write will
1881			 * occur while sleeping just above, so check for it.
1882			 */
1883			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1884				splx(s);
1885				(void) VOP_BWRITE(bp);
1886				goto loop;
1887			}
1888			bp->b_flags |= B_INVAL;
1889			brelse(bp);
1890		}
1891	}
1892	if (!(flags & V_SAVEMETA) &&
1893	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1894		panic("vinvalbuf: flush failed");
1895	splx(s);
1896	return (0);
1897}
1898
1899void
1900vflushbuf(struct vnode *vp, int sync)
1901{
1902	struct buf *bp, *nbp;
1903	int s;
1904
1905loop:
1906	s = splbio();
1907	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1908	    bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) {
1909		nbp = LIST_NEXT(bp, b_vnbufs);
1910		if ((bp->b_flags & B_BUSY))
1911			continue;
1912		if ((bp->b_flags & B_DELWRI) == 0)
1913			panic("vflushbuf: not dirty");
1914		bremfree(bp);
1915		buf_acquire(bp);
1916		splx(s);
1917		/*
1918		 * Wait for I/O associated with indirect blocks to complete,
1919		 * since there is no way to quickly wait for them below.
1920		 */
1921		if (bp->b_vp == vp || sync == 0)
1922			(void) bawrite(bp);
1923		else
1924			(void) bwrite(bp);
1925		goto loop;
1926	}
1927	if (sync == 0) {
1928		splx(s);
1929		return;
1930	}
1931	vwaitforio(vp, 0, "vflushbuf", 0);
1932	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1933		splx(s);
1934#ifdef DIAGNOSTIC
1935		vprint("vflushbuf: dirty", vp);
1936#endif
1937		goto loop;
1938	}
1939	splx(s);
1940}
1941
1942/*
1943 * Associate a buffer with a vnode.
1944 *
1945 * Manipulates buffer vnode queues. Must be called at splbio().
1946 */
1947void
1948bgetvp(struct vnode *vp, struct buf *bp)
1949{
1950	splassert(IPL_BIO);
1951
1952
1953	if (bp->b_vp)
1954		panic("bgetvp: not free");
1955	vhold(vp);
1956	bp->b_vp = vp;
1957	if (vp->v_type == VBLK || vp->v_type == VCHR)
1958		bp->b_dev = vp->v_rdev;
1959	else
1960		bp->b_dev = NODEV;
1961	/*
1962	 * Insert onto list for new vnode.
1963	 */
1964	bufinsvn(bp, &vp->v_cleanblkhd);
1965}
1966
1967/*
1968 * Disassociate a buffer from a vnode.
1969 *
1970 * Manipulates vnode buffer queues. Must be called at splbio().
1971 */
1972void
1973brelvp(struct buf *bp)
1974{
1975	struct vnode *vp;
1976
1977	splassert(IPL_BIO);
1978
1979	if ((vp = bp->b_vp) == (struct vnode *) 0)
1980		panic("brelvp: NULL");
1981	/*
1982	 * Delete from old vnode list, if on one.
1983	 */
1984	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
1985		bufremvn(bp);
1986	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
1987	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
1988		vp->v_bioflag &= ~VBIOONSYNCLIST;
1989		LIST_REMOVE(vp, v_synclist);
1990	}
1991	bp->b_vp = NULL;
1992
1993	vdrop(vp);
1994}
1995
1996/*
1997 * Replaces the current vnode associated with the buffer, if any,
1998 * with a new vnode.
1999 *
2000 * If an output I/O is pending on the buffer, the old vnode
2001 * I/O count is adjusted.
2002 *
2003 * Ignores vnode buffer queues. Must be called at splbio().
2004 */
2005void
2006buf_replacevnode(struct buf *bp, struct vnode *newvp)
2007{
2008	struct vnode *oldvp = bp->b_vp;
2009
2010	splassert(IPL_BIO);
2011
2012	if (oldvp)
2013		brelvp(bp);
2014
2015	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2016		newvp->v_numoutput++;	/* put it on swapdev */
2017		vwakeup(oldvp);
2018	}
2019
2020	bgetvp(newvp, bp);
2021	bufremvn(bp);
2022}
2023
2024/*
2025 * Used to assign buffers to the appropriate clean or dirty list on
2026 * the vnode and to add newly dirty vnodes to the appropriate
2027 * filesystem syncer list.
2028 *
2029 * Manipulates vnode buffer queues. Must be called at splbio().
2030 */
2031void
2032reassignbuf(struct buf *bp)
2033{
2034	struct buflists *listheadp;
2035	int delay;
2036	struct vnode *vp = bp->b_vp;
2037
2038	splassert(IPL_BIO);
2039
2040	/*
2041	 * Delete from old vnode list, if on one.
2042	 */
2043	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2044		bufremvn(bp);
2045
2046	/*
2047	 * If dirty, put on list of dirty buffers;
2048	 * otherwise insert onto list of clean buffers.
2049	 */
2050	if ((bp->b_flags & B_DELWRI) == 0) {
2051		listheadp = &vp->v_cleanblkhd;
2052		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2053		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2054			vp->v_bioflag &= ~VBIOONSYNCLIST;
2055			LIST_REMOVE(vp, v_synclist);
2056		}
2057	} else {
2058		listheadp = &vp->v_dirtyblkhd;
2059		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2060			switch (vp->v_type) {
2061			case VDIR:
2062				delay = syncdelay / 2;
2063				break;
2064			case VBLK:
2065				if (vp->v_specmountpoint != NULL) {
2066					delay = syncdelay / 3;
2067					break;
2068				}
2069				/* FALLTHROUGH */
2070			default:
2071				delay = syncdelay;
2072			}
2073			vn_syncer_add_to_worklist(vp, delay);
2074		}
2075	}
2076	bufinsvn(bp, listheadp);
2077}
2078
2079int
2080vfs_register(struct vfsconf *vfs)
2081{
2082	struct vfsconf *vfsp;
2083	struct vfsconf **vfspp;
2084
2085#ifdef DIAGNOSTIC
2086	/* Paranoia? */
2087	if (vfs->vfc_refcount != 0)
2088		printf("vfs_register called with vfc_refcount > 0\n");
2089#endif
2090
2091	/* Check if filesystem already known */
2092	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2093	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2094		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2095			return (EEXIST);
2096
2097	if (vfs->vfc_typenum > maxvfsconf)
2098		maxvfsconf = vfs->vfc_typenum;
2099
2100	vfs->vfc_next = NULL;
2101
2102	/* Add to the end of the list */
2103	*vfspp = vfs;
2104
2105	/* Call vfs_init() */
2106	if (vfs->vfc_vfsops->vfs_init)
2107		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2108
2109	return 0;
2110}
2111
2112int
2113vfs_unregister(struct vfsconf *vfs)
2114{
2115	struct vfsconf *vfsp;
2116	struct vfsconf **vfspp;
2117	int maxtypenum;
2118
2119	/* Find our vfsconf struct */
2120	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2121	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2122		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2123			break;
2124	}
2125
2126	if (!vfsp)			/* Not found */
2127		return (ENOENT);
2128
2129	if (vfsp->vfc_refcount)		/* In use */
2130		return (EBUSY);
2131
2132	/* Remove from list and free */
2133	*vfspp = vfsp->vfc_next;
2134
2135	maxtypenum = 0;
2136
2137	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2138		if (vfsp->vfc_typenum > maxtypenum)
2139			maxtypenum = vfsp->vfc_typenum;
2140
2141	maxvfsconf = maxtypenum;
2142	return 0;
2143}
2144
2145/*
2146 * Check if vnode represents a disk device
2147 */
2148int
2149vn_isdisk(struct vnode *vp, int *errp)
2150{
2151	if (vp->v_type != VBLK && vp->v_type != VCHR)
2152		return (0);
2153
2154	return (1);
2155}
2156
2157#ifdef DDB
2158#include <machine/db_machdep.h>
2159#include <ddb/db_interface.h>
2160#include <ddb/db_output.h>
2161
2162void
2163vfs_buf_print(struct buf *bp, int full, int (*pr)(const char *, ...))
2164{
2165
2166	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2167	      "  proc %p error %d flags %b\n",
2168	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2169	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2170
2171	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx sync 0x%x\n"
2172	      "  data %p saveaddr %p dep %p iodone %p\n",
2173	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid, bp->b_synctime,
2174	    bp->b_data, bp->b_saveaddr, LIST_FIRST(&bp->b_dep), bp->b_iodone);
2175
2176	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2177	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2178
2179#ifdef FFS_SOFTUPDATES
2180	if (full)
2181		softdep_print(bp, full, pr);
2182#endif
2183}
2184
2185const char *vtypes[] = { VTYPE_NAMES };
2186const char *vtags[] = { VTAG_NAMES };
2187
2188void
2189vfs_vnode_print(struct vnode *vp, int full, int (*pr)(const char *, ...))
2190{
2191
2192#define	NENTS(n)	(sizeof n / sizeof(n[0]))
2193	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2194	      vp->v_tag > NENTS(vtags)? "<unk>":vtags[vp->v_tag], vp->v_tag,
2195	      vp->v_type > NENTS(vtypes)? "<unk>":vtypes[vp->v_type],
2196	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2197
2198	(*pr)("data %p usecount %d writecount %ld holdcnt %ld numoutput %d\n",
2199	      vp->v_data, vp->v_usecount, vp->v_writecount,
2200	      vp->v_holdcnt, vp->v_numoutput);
2201
2202	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2203
2204	if (full) {
2205		struct buf *bp;
2206
2207		(*pr)("clean bufs:\n");
2208		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2209			(*pr)(" bp %p\n", bp);
2210			vfs_buf_print(bp, full, pr);
2211		}
2212
2213		(*pr)("dirty bufs:\n");
2214		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2215			(*pr)(" bp %p\n", bp);
2216			vfs_buf_print(bp, full, pr);
2217		}
2218	}
2219}
2220
2221void
2222vfs_mount_print(struct mount *mp, int full, int (*pr)(const char *, ...))
2223{
2224	struct vfsconf *vfc = mp->mnt_vfc;
2225	struct vnode *vp;
2226	int cnt = 0;
2227
2228	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2229	    mp->mnt_flag, MNT_BITS,
2230	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2231
2232	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2233            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2234	    vfc->vfc_refcount, vfc->vfc_flags);
2235
2236	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2237	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2238	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2239
2240	(*pr)("  files %llu ffiles %llu favail $lld\n", mp->mnt_stat.f_files,
2241	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2242
2243	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%x\n",
2244	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2245	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2246
2247 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2248	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2249
2250 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2251	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2252
2253	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\"\n",
2254	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2255	    mp->mnt_stat.f_mntfromname);
2256
2257	(*pr)("locked vnodes:");
2258	/* XXX would take mountlist lock, except ddb has no context */
2259	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2260		if (VOP_ISLOCKED(vp)) {
2261			if (!LIST_NEXT(vp, v_mntvnodes))
2262				(*pr)(" %p", vp);
2263			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2264				(*pr)("\n\t%p", vp);
2265			else
2266				(*pr)(", %p", vp);
2267		}
2268	(*pr)("\n");
2269
2270	if (full) {
2271		(*pr)("all vnodes:\n\t");
2272		/* XXX would take mountlist lock, except ddb has no context */
2273		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)
2274			if (!LIST_NEXT(vp, v_mntvnodes))
2275				(*pr)(" %p", vp);
2276			else if (!(cnt++ % (72 / (sizeof(void *) * 2 + 4))))
2277				(*pr)(" %p,\n\t", vp);
2278			else
2279				(*pr)(" %p,", vp);
2280		(*pr)("\n");
2281	}
2282}
2283#endif /* DDB */
2284
2285void
2286copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2287{
2288	const struct statfs *mbp;
2289
2290	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2291
2292	if (sbp == (mbp = &mp->mnt_stat))
2293		return;
2294
2295	sbp->f_fsid = mbp->f_fsid;
2296	sbp->f_owner = mbp->f_owner;
2297	sbp->f_flags = mbp->f_flags;
2298	sbp->f_syncwrites = mbp->f_syncwrites;
2299	sbp->f_asyncwrites = mbp->f_asyncwrites;
2300	sbp->f_syncreads = mbp->f_syncreads;
2301	sbp->f_asyncreads = mbp->f_asyncreads;
2302	sbp->f_namemax = mbp->f_namemax;
2303	bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
2304	bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
2305	bcopy(&mp->mnt_stat.mount_info.ufs_args, &sbp->mount_info.ufs_args,
2306	    sizeof(struct ufs_args));
2307}
2308
2309