vfs_subr.c revision 1.272
1/*	$OpenBSD: vfs_subr.c,v 1.272 2018/05/08 10:53:35 bluhm Exp $	*/
2/*	$NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $	*/
3
4/*
5 * Copyright (c) 1989, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/sysctl.h>
48#include <sys/mount.h>
49#include <sys/time.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/conf.h>
53#include <sys/vnode.h>
54#include <sys/lock.h>
55#include <sys/stat.h>
56#include <sys/acct.h>
57#include <sys/namei.h>
58#include <sys/ucred.h>
59#include <sys/buf.h>
60#include <sys/errno.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/syscallargs.h>
64#include <sys/pool.h>
65#include <sys/tree.h>
66#include <sys/specdev.h>
67
68#include <netinet/in.h>
69
70#include <uvm/uvm_extern.h>
71#include <uvm/uvm_vnode.h>
72
73#include "softraid.h"
74
75void sr_quiesce(void);
76
77enum vtype iftovt_tab[16] = {
78	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80};
81
82int	vttoif_tab[9] = {
83	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84	S_IFSOCK, S_IFIFO, S_IFMT,
85};
86
87int prtactive = 0;		/* 1 => print out reclaim of active vnodes */
88int suid_clear = 1;		/* 1 => clear SUID / SGID on owner change */
89
90/*
91 * Insq/Remq for the vnode usage lists.
92 */
93#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94#define	bufremvn(bp) {							\
95	LIST_REMOVE(bp, b_vnbufs);					\
96	LIST_NEXT(bp, b_vnbufs) = NOLIST;				\
97}
98
99struct freelst vnode_hold_list;	/* list of vnodes referencing buffers */
100struct freelst vnode_free_list;	/* vnode free list */
101
102struct mntlist mountlist;	/* mounted filesystem list */
103
104void	vclean(struct vnode *, int, struct proc *);
105
106void insmntque(struct vnode *, struct mount *);
107int getdevvp(dev_t, struct vnode **, enum vtype);
108
109int vfs_hang_addrlist(struct mount *, struct netexport *,
110				  struct export_args *);
111int vfs_free_netcred(struct radix_node *, void *, u_int);
112void vfs_free_addrlist(struct netexport *);
113void vputonfreelist(struct vnode *);
114
115int vflush_vnode(struct vnode *, void *);
116int maxvnodes;
117
118void vfs_unmountall(void);
119
120#ifdef DEBUG
121void printlockedvnodes(void);
122#endif
123
124struct pool vnode_pool;
125struct pool uvm_vnode_pool;
126
127static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
128RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare);
129
130static inline int
131rb_buf_compare(const struct buf *b1, const struct buf *b2)
132{
133	if (b1->b_lblkno < b2->b_lblkno)
134		return(-1);
135	if (b1->b_lblkno > b2->b_lblkno)
136		return(1);
137	return(0);
138}
139
140/*
141 * Initialize the vnode management data structures.
142 */
143void
144vntblinit(void)
145{
146	/* buffer cache may need a vnode for each buffer */
147	maxvnodes = 2 * initialvnodes;
148	pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE,
149	    PR_WAITOK, "vnodes", NULL);
150	pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE,
151	    PR_WAITOK, "uvmvnodes", NULL);
152	TAILQ_INIT(&vnode_hold_list);
153	TAILQ_INIT(&vnode_free_list);
154	TAILQ_INIT(&mountlist);
155	/*
156	 * Initialize the filesystem syncer.
157	 */
158	vn_initialize_syncerd();
159
160#ifdef NFSSERVER
161	rn_init(sizeof(struct sockaddr_in));
162#endif /* NFSSERVER */
163}
164
165/*
166 * Mark a mount point as busy. Used to synchronize access and to delay
167 * unmounting.
168 *
169 * Default behaviour is to attempt getting a READ lock and in case of an
170 * ongoing unmount, to wait for it to finish and then return failure.
171 */
172int
173vfs_busy(struct mount *mp, int flags)
174{
175	int rwflags = 0;
176
177	/* new mountpoints need their lock initialised */
178	if (mp->mnt_lock.rwl_name == NULL)
179		rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE);
180
181	if (flags & VB_WRITE)
182		rwflags |= RW_WRITE;
183	else
184		rwflags |= RW_READ;
185
186	if (flags & VB_WAIT)
187		rwflags |= RW_SLEEPFAIL;
188	else
189		rwflags |= RW_NOSLEEP;
190
191	if (rw_enter(&mp->mnt_lock, rwflags))
192		return (EBUSY);
193
194	return (0);
195}
196
197/*
198 * Free a busy file system
199 */
200void
201vfs_unbusy(struct mount *mp)
202{
203	rw_exit(&mp->mnt_lock);
204}
205
206int
207vfs_isbusy(struct mount *mp)
208{
209	if (RWLOCK_OWNER(&mp->mnt_lock) > 0)
210		return (1);
211	else
212		return (0);
213}
214
215/*
216 * Lookup a filesystem type, and if found allocate and initialize
217 * a mount structure for it.
218 *
219 * Devname is usually updated by mount(8) after booting.
220 */
221int
222vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
223{
224	struct vfsconf *vfsp;
225	struct mount *mp;
226
227	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
228		if (!strcmp(vfsp->vfc_name, fstypename))
229			break;
230	if (vfsp == NULL)
231		return (ENODEV);
232	mp = malloc(sizeof(*mp), M_MOUNT, M_WAITOK|M_ZERO);
233	(void)vfs_busy(mp, VB_READ|VB_NOWAIT);
234	LIST_INIT(&mp->mnt_vnodelist);
235	mp->mnt_vfc = vfsp;
236	mp->mnt_op = vfsp->vfc_vfsops;
237	mp->mnt_flag = MNT_RDONLY;
238	mp->mnt_vnodecovered = NULLVP;
239	vfsp->vfc_refcount++;
240	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
241	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
242	mp->mnt_stat.f_mntonname[0] = '/';
243	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN, 0);
244	copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN, 0);
245	*mpp = mp;
246 	return (0);
247 }
248
249/*
250 * Lookup a mount point by filesystem identifier.
251 */
252struct mount *
253vfs_getvfs(fsid_t *fsid)
254{
255	struct mount *mp;
256
257	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
258		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
259		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
260			return (mp);
261		}
262	}
263
264	return (NULL);
265}
266
267
268/*
269 * Get a new unique fsid
270 */
271void
272vfs_getnewfsid(struct mount *mp)
273{
274	static u_short xxxfs_mntid;
275
276	fsid_t tfsid;
277	int mtype;
278
279	mtype = mp->mnt_vfc->vfc_typenum;
280	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
281	mp->mnt_stat.f_fsid.val[1] = mtype;
282	if (xxxfs_mntid == 0)
283		++xxxfs_mntid;
284	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
285	tfsid.val[1] = mtype;
286	if (!TAILQ_EMPTY(&mountlist)) {
287		while (vfs_getvfs(&tfsid)) {
288			tfsid.val[0]++;
289			xxxfs_mntid++;
290		}
291	}
292	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
293}
294
295/*
296 * Set vnode attributes to VNOVAL
297 */
298void
299vattr_null(struct vattr *vap)
300{
301
302	vap->va_type = VNON;
303	/*
304	 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
305	 * with 2^31-1 instead of 2^64-1.  Just write'm out and let
306	 * the compiler do its job.
307	 */
308	vap->va_mode = VNOVAL;
309	vap->va_nlink = VNOVAL;
310	vap->va_uid = VNOVAL;
311	vap->va_gid = VNOVAL;
312	vap->va_fsid = VNOVAL;
313	vap->va_fileid = VNOVAL;
314	vap->va_size = VNOVAL;
315	vap->va_blocksize = VNOVAL;
316	vap->va_atime.tv_sec = VNOVAL;
317	vap->va_atime.tv_nsec = VNOVAL;
318	vap->va_mtime.tv_sec = VNOVAL;
319	vap->va_mtime.tv_nsec = VNOVAL;
320	vap->va_ctime.tv_sec = VNOVAL;
321	vap->va_ctime.tv_nsec = VNOVAL;
322	vap->va_gen = VNOVAL;
323	vap->va_flags = VNOVAL;
324	vap->va_rdev = VNOVAL;
325	vap->va_bytes = VNOVAL;
326	vap->va_filerev = VNOVAL;
327	vap->va_vaflags = 0;
328}
329
330/*
331 * Routines having to do with the management of the vnode table.
332 */
333long numvnodes;
334
335/*
336 * Return the next vnode from the free list.
337 */
338int
339getnewvnode(enum vtagtype tag, struct mount *mp, struct vops *vops,
340    struct vnode **vpp)
341{
342	struct proc *p = curproc;
343	struct freelst *listhd;
344	static int toggle;
345	struct vnode *vp;
346	int s;
347
348	/*
349	 * allow maxvnodes to increase if the buffer cache itself
350	 * is big enough to justify it. (we don't shrink it ever)
351	 */
352	maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
353	    : maxvnodes;
354
355	/*
356	 * We must choose whether to allocate a new vnode or recycle an
357	 * existing one. The criterion for allocating a new one is that
358	 * the total number of vnodes is less than the number desired or
359	 * there are no vnodes on either free list. Generally we only
360	 * want to recycle vnodes that have no buffers associated with
361	 * them, so we look first on the vnode_free_list. If it is empty,
362	 * we next consider vnodes with referencing buffers on the
363	 * vnode_hold_list. The toggle ensures that half the time we
364	 * will use a buffer from the vnode_hold_list, and half the time
365	 * we will allocate a new one unless the list has grown to twice
366	 * the desired size. We are reticent to recycle vnodes from the
367	 * vnode_hold_list because we will lose the identity of all its
368	 * referencing buffers.
369	 */
370	toggle ^= 1;
371	if (numvnodes / 2 > maxvnodes)
372		toggle = 0;
373
374	s = splbio();
375	if ((numvnodes < maxvnodes) ||
376	    ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
377	    ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
378		splx(s);
379		vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO);
380		vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO);
381		vp->v_uvm->u_vnode = vp;
382		RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree);
383		cache_tree_init(&vp->v_nc_tree);
384		TAILQ_INIT(&vp->v_cache_dst);
385		numvnodes++;
386	} else {
387		TAILQ_FOREACH(vp, listhd, v_freelist) {
388			if (VOP_ISLOCKED(vp) == 0)
389				break;
390		}
391		/*
392		 * Unless this is a bad time of the month, at most
393		 * the first NCPUS items on the free list are
394		 * locked, so this is close enough to being empty.
395		 */
396		if (vp == NULL) {
397			splx(s);
398			tablefull("vnode");
399			*vpp = 0;
400			return (ENFILE);
401		}
402
403#ifdef DIAGNOSTIC
404		if (vp->v_usecount) {
405			vprint("free vnode", vp);
406			panic("free vnode isn't");
407		}
408#endif
409
410		TAILQ_REMOVE(listhd, vp, v_freelist);
411		vp->v_bioflag &= ~VBIOONFREELIST;
412		splx(s);
413
414		if (vp->v_type != VBAD)
415			vgonel(vp, p);
416#ifdef DIAGNOSTIC
417		if (vp->v_data) {
418			vprint("cleaned vnode", vp);
419			panic("cleaned vnode isn't");
420		}
421		s = splbio();
422		if (vp->v_numoutput)
423			panic("Clean vnode has pending I/O's");
424		splx(s);
425#endif
426		vp->v_flag = 0;
427		vp->v_socket = 0;
428	}
429	cache_purge(vp);
430	vp->v_type = VNON;
431	vp->v_tag = tag;
432	vp->v_op = vops;
433	insmntque(vp, mp);
434	*vpp = vp;
435	vp->v_usecount = 1;
436	vp->v_data = 0;
437	return (0);
438}
439
440/*
441 * Move a vnode from one mount queue to another.
442 */
443void
444insmntque(struct vnode *vp, struct mount *mp)
445{
446	/*
447	 * Delete from old mount point vnode list, if on one.
448	 */
449	if (vp->v_mount != NULL)
450		LIST_REMOVE(vp, v_mntvnodes);
451	/*
452	 * Insert into list of vnodes for the new mount point, if available.
453	 */
454	if ((vp->v_mount = mp) != NULL)
455		LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
456}
457
458/*
459 * Create a vnode for a block device.
460 * Used for root filesystem, argdev, and swap areas.
461 * Also used for memory file system special devices.
462 */
463int
464bdevvp(dev_t dev, struct vnode **vpp)
465{
466	return (getdevvp(dev, vpp, VBLK));
467}
468
469/*
470 * Create a vnode for a character device.
471 * Used for console handling.
472 */
473int
474cdevvp(dev_t dev, struct vnode **vpp)
475{
476	return (getdevvp(dev, vpp, VCHR));
477}
478
479/*
480 * Create a vnode for a device.
481 * Used by bdevvp (block device) for root file system etc.,
482 * and by cdevvp (character device) for console.
483 */
484int
485getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
486{
487	struct vnode *vp;
488	struct vnode *nvp;
489	int error;
490
491	if (dev == NODEV) {
492		*vpp = NULLVP;
493		return (0);
494	}
495	error = getnewvnode(VT_NON, NULL, &spec_vops, &nvp);
496	if (error) {
497		*vpp = NULLVP;
498		return (error);
499	}
500	vp = nvp;
501	vp->v_type = type;
502	if ((nvp = checkalias(vp, dev, NULL)) != 0) {
503		vput(vp);
504		vp = nvp;
505	}
506	if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)].d_type == D_TTY)
507		vp->v_flag |= VISTTY;
508	*vpp = vp;
509	return (0);
510}
511
512/*
513 * Check to see if the new vnode represents a special device
514 * for which we already have a vnode (either because of
515 * bdevvp() or because of a different vnode representing
516 * the same block device). If such an alias exists, deallocate
517 * the existing contents and return the aliased vnode. The
518 * caller is responsible for filling it with its new contents.
519 */
520struct vnode *
521checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
522{
523	struct proc *p = curproc;
524	struct vnode *vp;
525	struct vnode **vpp;
526
527	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
528		return (NULLVP);
529
530	vpp = &speclisth[SPECHASH(nvp_rdev)];
531loop:
532	for (vp = *vpp; vp; vp = vp->v_specnext) {
533		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) {
534			continue;
535		}
536		/*
537		 * Alias, but not in use, so flush it out.
538		 */
539		if (vp->v_usecount == 0) {
540			vgonel(vp, p);
541			goto loop;
542		}
543		if (vget(vp, LK_EXCLUSIVE, p)) {
544			goto loop;
545		}
546		break;
547	}
548
549	/*
550	 * Common case is actually in the if statement
551	 */
552	if (vp == NULL || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
553		nvp->v_specinfo = malloc(sizeof(struct specinfo), M_VNODE,
554			M_WAITOK);
555		nvp->v_rdev = nvp_rdev;
556		nvp->v_hashchain = vpp;
557		nvp->v_specnext = *vpp;
558		nvp->v_specmountpoint = NULL;
559		nvp->v_speclockf = NULL;
560		nvp->v_specbitmap = NULL;
561		if (nvp->v_type == VCHR &&
562		    (cdevsw[major(nvp_rdev)].d_flags & D_CLONE) &&
563		    (minor(nvp_rdev) >> CLONE_SHIFT == 0)) {
564			if (vp != NULLVP)
565				nvp->v_specbitmap = vp->v_specbitmap;
566			else
567				nvp->v_specbitmap = malloc(CLONE_MAPSZ,
568				    M_VNODE, M_WAITOK | M_ZERO);
569		}
570		*vpp = nvp;
571		if (vp != NULLVP) {
572			nvp->v_flag |= VALIASED;
573			vp->v_flag |= VALIASED;
574			vput(vp);
575		}
576		return (NULLVP);
577	}
578
579	/*
580	 * This code is the uncommon case. It is called in case
581	 * we found an alias that was VT_NON && vtype of VBLK
582	 * This means we found a block device that was created
583	 * using bdevvp.
584	 * An example of such a vnode is the root partition device vnode
585	 * created in ffs_mountroot.
586	 *
587	 * The vnodes created by bdevvp should not be aliased (why?).
588	 */
589
590	VOP_UNLOCK(vp);
591	vclean(vp, 0, p);
592	vp->v_op = nvp->v_op;
593	vp->v_tag = nvp->v_tag;
594	nvp->v_type = VNON;
595	insmntque(vp, mp);
596	return (vp);
597}
598
599/*
600 * Grab a particular vnode from the free list, increment its
601 * reference count and lock it. If the vnode lock bit is set,
602 * the vnode is being eliminated in vgone. In that case, we
603 * cannot grab it, so the process is awakened when the
604 * transition is completed, and an error code is returned to
605 * indicate that the vnode is no longer usable, possibly
606 * having been changed to a new file system type.
607 */
608int
609vget(struct vnode *vp, int flags, struct proc *p)
610{
611	int error, s, onfreelist;
612
613	/*
614	 * If the vnode is in the process of being cleaned out for
615	 * another use, we wait for the cleaning to finish and then
616	 * return failure. Cleaning is determined by checking that
617	 * the VXLOCK flag is set.
618	 */
619
620	if (vp->v_flag & VXLOCK) {
621		if (flags & LK_NOWAIT) {
622			return (EBUSY);
623		}
624
625		vp->v_flag |= VXWANT;
626		tsleep(vp, PINOD, "vget", 0);
627		return (ENOENT);
628	}
629
630	onfreelist = vp->v_bioflag & VBIOONFREELIST;
631	if (vp->v_usecount == 0 && onfreelist) {
632		s = splbio();
633		if (vp->v_holdcnt > 0)
634			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
635		else
636			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
637		vp->v_bioflag &= ~VBIOONFREELIST;
638		splx(s);
639	}
640
641 	vp->v_usecount++;
642	if (flags & LK_TYPE_MASK) {
643		if ((error = vn_lock(vp, flags)) != 0) {
644			vp->v_usecount--;
645			if (vp->v_usecount == 0 && onfreelist)
646				vputonfreelist(vp);
647		}
648		return (error);
649	}
650
651	return (0);
652}
653
654
655/* Vnode reference. */
656void
657vref(struct vnode *vp)
658{
659#ifdef DIAGNOSTIC
660	if (vp->v_usecount == 0)
661		panic("vref used where vget required");
662	if (vp->v_type == VNON)
663		panic("vref on a VNON vnode");
664#endif
665	vp->v_usecount++;
666}
667
668void
669vputonfreelist(struct vnode *vp)
670{
671	int s;
672	struct freelst *lst;
673
674	s = splbio();
675#ifdef DIAGNOSTIC
676	if (vp->v_usecount != 0)
677		panic("Use count is not zero!");
678
679	if (vp->v_bioflag & VBIOONFREELIST) {
680		vprint("vnode already on free list: ", vp);
681		panic("vnode already on free list");
682	}
683#endif
684
685	vp->v_bioflag |= VBIOONFREELIST;
686
687	if (vp->v_holdcnt > 0)
688		lst = &vnode_hold_list;
689	else
690		lst = &vnode_free_list;
691
692	if (vp->v_type == VBAD)
693		TAILQ_INSERT_HEAD(lst, vp, v_freelist);
694	else
695		TAILQ_INSERT_TAIL(lst, vp, v_freelist);
696
697	splx(s);
698}
699
700/*
701 * vput(), just unlock and vrele()
702 */
703void
704vput(struct vnode *vp)
705{
706	struct proc *p = curproc;
707
708#ifdef DIAGNOSTIC
709	if (vp == NULL)
710		panic("vput: null vp");
711#endif
712
713#ifdef DIAGNOSTIC
714	if (vp->v_usecount == 0) {
715		vprint("vput: bad ref count", vp);
716		panic("vput: ref cnt");
717	}
718#endif
719	vp->v_usecount--;
720	if (vp->v_usecount > 0) {
721		VOP_UNLOCK(vp);
722		return;
723	}
724
725#ifdef DIAGNOSTIC
726	if (vp->v_writecount != 0) {
727		vprint("vput: bad writecount", vp);
728		panic("vput: v_writecount != 0");
729	}
730#endif
731
732	VOP_INACTIVE(vp, p);
733
734	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
735		vputonfreelist(vp);
736}
737
738/*
739 * Vnode release - use for active VNODES.
740 * If count drops to zero, call inactive routine and return to freelist.
741 * Returns 0 if it did not sleep.
742 */
743int
744vrele(struct vnode *vp)
745{
746	struct proc *p = curproc;
747
748#ifdef DIAGNOSTIC
749	if (vp == NULL)
750		panic("vrele: null vp");
751#endif
752#ifdef DIAGNOSTIC
753	if (vp->v_usecount == 0) {
754		vprint("vrele: bad ref count", vp);
755		panic("vrele: ref cnt");
756	}
757#endif
758	vp->v_usecount--;
759	if (vp->v_usecount > 0) {
760		return (0);
761	}
762
763#ifdef DIAGNOSTIC
764	if (vp->v_writecount != 0) {
765		vprint("vrele: bad writecount", vp);
766		panic("vrele: v_writecount != 0");
767	}
768#endif
769
770	if (vn_lock(vp, LK_EXCLUSIVE)) {
771#ifdef DIAGNOSTIC
772		vprint("vrele: cannot lock", vp);
773#endif
774		return (1);
775	}
776
777	VOP_INACTIVE(vp, p);
778
779	if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST))
780		vputonfreelist(vp);
781	return (1);
782}
783
784/* Page or buffer structure gets a reference. */
785void
786vhold(struct vnode *vp)
787{
788	/*
789	 * If it is on the freelist and the hold count is currently
790	 * zero, move it to the hold list.
791	 */
792	if ((vp->v_bioflag & VBIOONFREELIST) &&
793	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
794		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
795		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
796	}
797	vp->v_holdcnt++;
798}
799
800/* Lose interest in a vnode. */
801void
802vdrop(struct vnode *vp)
803{
804#ifdef DIAGNOSTIC
805	if (vp->v_holdcnt == 0)
806		panic("vdrop: zero holdcnt");
807#endif
808
809	vp->v_holdcnt--;
810
811	/*
812	 * If it is on the holdlist and the hold count drops to
813	 * zero, move it to the free list.
814	 */
815	if ((vp->v_bioflag & VBIOONFREELIST) &&
816	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {
817		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
818		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
819	}
820}
821
822/*
823 * Remove any vnodes in the vnode table belonging to mount point mp.
824 *
825 * If MNT_NOFORCE is specified, there should not be any active ones,
826 * return error if any are found (nb: this is a user error, not a
827 * system error). If MNT_FORCE is specified, detach any active vnodes
828 * that are found.
829 */
830#ifdef DEBUG
831int busyprt = 0;	/* print out busy vnodes */
832struct ctldebug debug1 = { "busyprt", &busyprt };
833#endif
834
835int
836vfs_mount_foreach_vnode(struct mount *mp,
837    int (*func)(struct vnode *, void *), void *arg) {
838	struct vnode *vp, *nvp;
839	int error = 0;
840
841loop:
842	LIST_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp) {
843		if (vp->v_mount != mp)
844			goto loop;
845
846		error = func(vp, arg);
847
848		if (error != 0)
849			break;
850	}
851
852	return (error);
853}
854
855struct vflush_args {
856	struct vnode *skipvp;
857	int busy;
858	int flags;
859};
860
861int
862vflush_vnode(struct vnode *vp, void *arg)
863{
864	struct vflush_args *va = arg;
865	struct proc *p = curproc;
866
867	if (vp == va->skipvp) {
868		return (0);
869	}
870
871	if ((va->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
872		return (0);
873	}
874
875	/*
876	 * If WRITECLOSE is set, only flush out regular file
877	 * vnodes open for writing.
878	 */
879	if ((va->flags & WRITECLOSE) &&
880	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
881		return (0);
882	}
883
884	/*
885	 * With v_usecount == 0, all we need to do is clear
886	 * out the vnode data structures and we are done.
887	 */
888	if (vp->v_usecount == 0) {
889		vgonel(vp, p);
890		return (0);
891	}
892
893	/*
894	 * If FORCECLOSE is set, forcibly close the vnode.
895	 * For block or character devices, revert to an
896	 * anonymous device. For all other files, just kill them.
897	 */
898	if (va->flags & FORCECLOSE) {
899		if (vp->v_type != VBLK && vp->v_type != VCHR) {
900			vgonel(vp, p);
901		} else {
902			vclean(vp, 0, p);
903			vp->v_op = &spec_vops;
904			insmntque(vp, NULL);
905		}
906		return (0);
907	}
908
909	/*
910	 * If set, this is allowed to ignore vnodes which don't
911	 * have changes pending to disk.
912	 * XXX Might be nice to check per-fs "inode" flags, but
913	 * generally the filesystem is sync'd already, right?
914	 */
915	if ((va->flags & IGNORECLEAN) &&
916	    LIST_EMPTY(&vp->v_dirtyblkhd))
917		return (0);
918
919#ifdef DEBUG
920	if (busyprt)
921		vprint("vflush: busy vnode", vp);
922#endif
923	va->busy++;
924	return (0);
925}
926
927int
928vflush(struct mount *mp, struct vnode *skipvp, int flags)
929{
930	struct vflush_args va;
931	va.skipvp = skipvp;
932	va.busy = 0;
933	va.flags = flags;
934
935	vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
936
937	if (va.busy)
938		return (EBUSY);
939	return (0);
940}
941
942/*
943 * Disassociate the underlying file system from a vnode.
944 */
945void
946vclean(struct vnode *vp, int flags, struct proc *p)
947{
948	int active;
949
950	/*
951	 * Check to see if the vnode is in use.
952	 * If so we have to reference it before we clean it out
953	 * so that its count cannot fall to zero and generate a
954	 * race against ourselves to recycle it.
955	 */
956	if ((active = vp->v_usecount) != 0)
957		vp->v_usecount++;
958
959	/*
960	 * Prevent the vnode from being recycled or
961	 * brought into use while we clean it out.
962	 */
963	if (vp->v_flag & VXLOCK)
964		panic("vclean: deadlock");
965	vp->v_flag |= VXLOCK;
966	/*
967	 * Even if the count is zero, the VOP_INACTIVE routine may still
968	 * have the object locked while it cleans it out. The VOP_LOCK
969	 * ensures that the VOP_INACTIVE routine is done with its work.
970	 * For active vnodes, it ensures that no other activity can
971	 * occur while the underlying object is being cleaned out.
972	 */
973	VOP_LOCK(vp, LK_DRAIN | LK_EXCLUSIVE);
974
975	/*
976	 * Clean out any VM data associated with the vnode.
977	 */
978	uvm_vnp_terminate(vp);
979	/*
980	 * Clean out any buffers associated with the vnode.
981	 */
982	if (flags & DOCLOSE)
983		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
984	/*
985	 * If purging an active vnode, it must be closed and
986	 * deactivated before being reclaimed. Note that the
987	 * VOP_INACTIVE will unlock the vnode
988	 */
989	if (active) {
990		if (flags & DOCLOSE)
991			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
992		VOP_INACTIVE(vp, p);
993	} else {
994		/*
995		 * Any other processes trying to obtain this lock must first
996		 * wait for VXLOCK to clear, then call the new lock operation.
997		 */
998		VOP_UNLOCK(vp);
999	}
1000
1001	/*
1002	 * Reclaim the vnode.
1003	 */
1004	if (VOP_RECLAIM(vp, p))
1005		panic("vclean: cannot reclaim");
1006	if (active) {
1007		vp->v_usecount--;
1008		if (vp->v_usecount == 0) {
1009			if (vp->v_holdcnt > 0)
1010				panic("vclean: not clean");
1011			vputonfreelist(vp);
1012		}
1013	}
1014	cache_purge(vp);
1015
1016	/*
1017	 * Done with purge, notify sleepers of the grim news.
1018	 */
1019	vp->v_op = &dead_vops;
1020	VN_KNOTE(vp, NOTE_REVOKE);
1021	vp->v_tag = VT_NON;
1022	vp->v_flag &= ~VXLOCK;
1023#ifdef VFSLCKDEBUG
1024	vp->v_flag &= ~VLOCKSWORK;
1025#endif
1026	if (vp->v_flag & VXWANT) {
1027		vp->v_flag &= ~VXWANT;
1028		wakeup(vp);
1029	}
1030}
1031
1032/*
1033 * Recycle an unused vnode to the front of the free list.
1034 */
1035int
1036vrecycle(struct vnode *vp, struct proc *p)
1037{
1038	if (vp->v_usecount == 0) {
1039		vgonel(vp, p);
1040		return (1);
1041	}
1042	return (0);
1043}
1044
1045/*
1046 * Eliminate all activity associated with a vnode
1047 * in preparation for reuse.
1048 */
1049void
1050vgone(struct vnode *vp)
1051{
1052	struct proc *p = curproc;
1053	vgonel(vp, p);
1054}
1055
1056/*
1057 * vgone, with struct proc.
1058 */
1059void
1060vgonel(struct vnode *vp, struct proc *p)
1061{
1062	struct vnode *vq;
1063	struct vnode *vx;
1064
1065	/*
1066	 * If a vgone (or vclean) is already in progress,
1067	 * wait until it is done and return.
1068	 */
1069	if (vp->v_flag & VXLOCK) {
1070		vp->v_flag |= VXWANT;
1071		tsleep(vp, PINOD, "vgone", 0);
1072		return;
1073	}
1074
1075	/*
1076	 * Clean out the filesystem specific data.
1077	 */
1078	vclean(vp, DOCLOSE, p);
1079	/*
1080	 * Delete from old mount point vnode list, if on one.
1081	 */
1082	if (vp->v_mount != NULL)
1083		insmntque(vp, NULL);
1084	/*
1085	 * If special device, remove it from special device alias list
1086	 * if it is on one.
1087	 */
1088	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1089		if ((vp->v_flag & VALIASED) == 0 && vp->v_type == VCHR &&
1090		    (cdevsw[major(vp->v_rdev)].d_flags & D_CLONE) &&
1091		    (minor(vp->v_rdev) >> CLONE_SHIFT == 0)) {
1092			free(vp->v_specbitmap, M_VNODE, CLONE_MAPSZ);
1093		}
1094		if (*vp->v_hashchain == vp) {
1095			*vp->v_hashchain = vp->v_specnext;
1096		} else {
1097			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1098				if (vq->v_specnext != vp)
1099					continue;
1100				vq->v_specnext = vp->v_specnext;
1101				break;
1102			}
1103			if (vq == NULL)
1104				panic("missing bdev");
1105		}
1106		if (vp->v_flag & VALIASED) {
1107			vx = NULL;
1108			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1109				if (vq->v_rdev != vp->v_rdev ||
1110				    vq->v_type != vp->v_type)
1111					continue;
1112				if (vx)
1113					break;
1114				vx = vq;
1115			}
1116			if (vx == NULL)
1117				panic("missing alias");
1118			if (vq == NULL)
1119				vx->v_flag &= ~VALIASED;
1120			vp->v_flag &= ~VALIASED;
1121		}
1122		free(vp->v_specinfo, M_VNODE, sizeof(struct specinfo));
1123		vp->v_specinfo = NULL;
1124	}
1125	/*
1126	 * If it is on the freelist and not already at the head,
1127	 * move it to the head of the list.
1128	 */
1129	vp->v_type = VBAD;
1130
1131	/*
1132	 * Move onto the free list, unless we were called from
1133	 * getnewvnode and we're not on any free list
1134	 */
1135	if (vp->v_usecount == 0 &&
1136	    (vp->v_bioflag & VBIOONFREELIST)) {
1137		int s;
1138
1139		s = splbio();
1140
1141		if (vp->v_holdcnt > 0)
1142			panic("vgonel: not clean");
1143
1144		if (TAILQ_FIRST(&vnode_free_list) != vp) {
1145			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1146			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1147		}
1148		splx(s);
1149	}
1150}
1151
1152/*
1153 * Lookup a vnode by device number.
1154 */
1155int
1156vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1157{
1158	struct vnode *vp;
1159	int rc =0;
1160
1161	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1162		if (dev != vp->v_rdev || type != vp->v_type)
1163			continue;
1164		*vpp = vp;
1165		rc = 1;
1166		break;
1167	}
1168	return (rc);
1169}
1170
1171/*
1172 * Revoke all the vnodes corresponding to the specified minor number
1173 * range (endpoints inclusive) of the specified major.
1174 */
1175void
1176vdevgone(int maj, int minl, int minh, enum vtype type)
1177{
1178	struct vnode *vp;
1179	int mn;
1180
1181	for (mn = minl; mn <= minh; mn++)
1182		if (vfinddev(makedev(maj, mn), type, &vp))
1183			VOP_REVOKE(vp, REVOKEALL);
1184}
1185
1186/*
1187 * Calculate the total number of references to a special device.
1188 */
1189int
1190vcount(struct vnode *vp)
1191{
1192	struct vnode *vq, *vnext;
1193	int count;
1194
1195loop:
1196	if ((vp->v_flag & VALIASED) == 0)
1197		return (vp->v_usecount);
1198	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1199		vnext = vq->v_specnext;
1200		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1201			continue;
1202		/*
1203		 * Alias, but not in use, so flush it out.
1204		 */
1205		if (vq->v_usecount == 0 && vq != vp) {
1206			vgone(vq);
1207			goto loop;
1208		}
1209		count += vq->v_usecount;
1210	}
1211	return (count);
1212}
1213
1214#if defined(DEBUG) || defined(DIAGNOSTIC)
1215/*
1216 * Print out a description of a vnode.
1217 */
1218static char *typename[] =
1219   { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1220
1221void
1222vprint(char *label, struct vnode *vp)
1223{
1224	char buf[64];
1225
1226	if (label != NULL)
1227		printf("%s: ", label);
1228	printf("%p, type %s, use %u, write %u, hold %u,",
1229		vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1230		vp->v_holdcnt);
1231	buf[0] = '\0';
1232	if (vp->v_flag & VROOT)
1233		strlcat(buf, "|VROOT", sizeof buf);
1234	if (vp->v_flag & VTEXT)
1235		strlcat(buf, "|VTEXT", sizeof buf);
1236	if (vp->v_flag & VSYSTEM)
1237		strlcat(buf, "|VSYSTEM", sizeof buf);
1238	if (vp->v_flag & VXLOCK)
1239		strlcat(buf, "|VXLOCK", sizeof buf);
1240	if (vp->v_flag & VXWANT)
1241		strlcat(buf, "|VXWANT", sizeof buf);
1242	if (vp->v_bioflag & VBIOWAIT)
1243		strlcat(buf, "|VBIOWAIT", sizeof buf);
1244	if (vp->v_bioflag & VBIOONFREELIST)
1245		strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1246	if (vp->v_bioflag & VBIOONSYNCLIST)
1247		strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1248	if (vp->v_flag & VALIASED)
1249		strlcat(buf, "|VALIASED", sizeof buf);
1250	if (buf[0] != '\0')
1251		printf(" flags (%s)", &buf[1]);
1252	if (vp->v_data == NULL) {
1253		printf("\n");
1254	} else {
1255		printf("\n\t");
1256		VOP_PRINT(vp);
1257	}
1258}
1259#endif /* DEBUG || DIAGNOSTIC */
1260
1261#ifdef DEBUG
1262/*
1263 * List all of the locked vnodes in the system.
1264 * Called when debugging the kernel.
1265 */
1266void
1267printlockedvnodes(void)
1268{
1269	struct mount *mp;
1270	struct vnode *vp;
1271
1272	printf("Locked vnodes\n");
1273
1274	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1275		if (vfs_busy(mp, VB_READ|VB_NOWAIT))
1276			continue;
1277		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
1278			if (VOP_ISLOCKED(vp))
1279				vprint(NULL, vp);
1280		}
1281		vfs_unbusy(mp);
1282 	}
1283
1284}
1285#endif
1286
1287/*
1288 * Top level filesystem related information gathering.
1289 */
1290int
1291vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1292    size_t newlen, struct proc *p)
1293{
1294	struct vfsconf *vfsp, *tmpvfsp;
1295	int ret;
1296
1297	/* all sysctl names at this level are at least name and field */
1298	if (namelen < 2)
1299		return (ENOTDIR);		/* overloaded */
1300
1301	if (name[0] != VFS_GENERIC) {
1302		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1303			if (vfsp->vfc_typenum == name[0])
1304				break;
1305
1306		if (vfsp == NULL || vfsp->vfc_vfsops->vfs_sysctl == NULL)
1307			return (EOPNOTSUPP);
1308
1309		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1310		    oldp, oldlenp, newp, newlen, p));
1311	}
1312
1313	switch (name[1]) {
1314	case VFS_MAXTYPENUM:
1315		return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1316
1317	case VFS_CONF:
1318		if (namelen < 3)
1319			return (ENOTDIR);	/* overloaded */
1320
1321		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1322			if (vfsp->vfc_typenum == name[2])
1323				break;
1324
1325		if (vfsp == NULL)
1326			return (EOPNOTSUPP);
1327
1328		/* Make a copy, clear out kernel pointers */
1329		tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP, M_WAITOK|M_ZERO);
1330		memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp));
1331		tmpvfsp->vfc_vfsops = NULL;
1332		tmpvfsp->vfc_next = NULL;
1333
1334		ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1335		    sizeof(struct vfsconf));
1336
1337		free(tmpvfsp, M_TEMP, sizeof(*tmpvfsp));
1338		return (ret);
1339	case VFS_BCACHESTAT:	/* buffer cache statistics */
1340		ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1341		    sizeof(struct bcachestats));
1342		return(ret);
1343	}
1344	return (EOPNOTSUPP);
1345}
1346
1347/*
1348 * Check to see if a filesystem is mounted on a block device.
1349 */
1350int
1351vfs_mountedon(struct vnode *vp)
1352{
1353	struct vnode *vq;
1354	int error = 0;
1355
1356 	if (vp->v_specmountpoint != NULL)
1357		return (EBUSY);
1358	if (vp->v_flag & VALIASED) {
1359		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1360			if (vq->v_rdev != vp->v_rdev ||
1361			    vq->v_type != vp->v_type)
1362				continue;
1363			if (vq->v_specmountpoint != NULL) {
1364				error = EBUSY;
1365				break;
1366			}
1367 		}
1368	}
1369	return (error);
1370}
1371
1372#ifdef NFSSERVER
1373/*
1374 * Build hash lists of net addresses and hang them off the mount point.
1375 * Called by vfs_export() to set up the lists of export addresses.
1376 */
1377int
1378vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1379    struct export_args *argp)
1380{
1381	struct netcred *np;
1382	struct radix_node_head *rnh;
1383	int nplen, i;
1384	struct radix_node *rn;
1385	struct sockaddr *saddr, *smask = 0;
1386	int error;
1387
1388	if (argp->ex_addrlen == 0) {
1389		if (mp->mnt_flag & MNT_DEFEXPORTED)
1390			return (EPERM);
1391		np = &nep->ne_defexported;
1392		/* fill in the kernel's ucred from userspace's xucred */
1393		if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1394			return (error);
1395		mp->mnt_flag |= MNT_DEFEXPORTED;
1396		goto finish;
1397	}
1398	if (argp->ex_addrlen > MLEN || argp->ex_masklen > MLEN ||
1399	    argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1400		return (EINVAL);
1401	nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1402	np = (struct netcred *)malloc(nplen, M_NETADDR, M_WAITOK|M_ZERO);
1403	saddr = (struct sockaddr *)(np + 1);
1404	error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1405	if (error)
1406		goto out;
1407	if (saddr->sa_len > argp->ex_addrlen)
1408		saddr->sa_len = argp->ex_addrlen;
1409	if (argp->ex_masklen) {
1410		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1411		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1412		if (error)
1413			goto out;
1414		if (smask->sa_len > argp->ex_masklen)
1415			smask->sa_len = argp->ex_masklen;
1416	}
1417	/* fill in the kernel's ucred from userspace's xucred */
1418	if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1419		goto out;
1420	i = saddr->sa_family;
1421	switch (i) {
1422	case AF_INET:
1423		if ((rnh = nep->ne_rtable_inet) == NULL) {
1424			if (!rn_inithead((void **)&nep->ne_rtable_inet,
1425			    offsetof(struct sockaddr_in, sin_addr))) {
1426				error = ENOBUFS;
1427				goto out;
1428			}
1429			rnh = nep->ne_rtable_inet;
1430		}
1431		break;
1432	default:
1433		error = EINVAL;
1434		goto out;
1435	}
1436	rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1437	if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
1438		error = EPERM;
1439		goto out;
1440	}
1441finish:
1442	np->netc_exflags = argp->ex_flags;
1443	return (0);
1444out:
1445	free(np, M_NETADDR, nplen);
1446	return (error);
1447}
1448
1449int
1450vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1451{
1452	struct radix_node_head *rnh = (struct radix_node_head *)w;
1453
1454	rn_delete(rn->rn_key, rn->rn_mask, rnh, NULL);
1455	free(rn, M_NETADDR, 0);
1456	return (0);
1457}
1458
1459/*
1460 * Free the net address hash lists that are hanging off the mount points.
1461 */
1462void
1463vfs_free_addrlist(struct netexport *nep)
1464{
1465	struct radix_node_head *rnh;
1466
1467	if ((rnh = nep->ne_rtable_inet) != NULL) {
1468		rn_walktree(rnh, vfs_free_netcred, rnh);
1469		free(rnh, M_RTABLE, 0);
1470		nep->ne_rtable_inet = NULL;
1471	}
1472}
1473#endif /* NFSSERVER */
1474
1475int
1476vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1477{
1478#ifdef NFSSERVER
1479	int error;
1480
1481	if (argp->ex_flags & MNT_DELEXPORT) {
1482		vfs_free_addrlist(nep);
1483		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1484	}
1485	if (argp->ex_flags & MNT_EXPORTED) {
1486		if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1487			return (error);
1488		mp->mnt_flag |= MNT_EXPORTED;
1489	}
1490	return (0);
1491#else
1492	return (ENOTSUP);
1493#endif /* NFSSERVER */
1494}
1495
1496struct netcred *
1497vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1498{
1499#ifdef NFSSERVER
1500	struct netcred *np;
1501	struct radix_node_head *rnh;
1502	struct sockaddr *saddr;
1503
1504	np = NULL;
1505	if (mp->mnt_flag & MNT_EXPORTED) {
1506		/*
1507		 * Lookup in the export list first.
1508		 */
1509		if (nam != NULL) {
1510			saddr = mtod(nam, struct sockaddr *);
1511			switch(saddr->sa_family) {
1512			case AF_INET:
1513				rnh = nep->ne_rtable_inet;
1514				break;
1515			default:
1516				rnh = NULL;
1517				break;
1518			}
1519			if (rnh != NULL)
1520				np = (struct netcred *)rn_match(saddr, rnh);
1521		}
1522		/*
1523		 * If no address match, use the default if it exists.
1524		 */
1525		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1526			np = &nep->ne_defexported;
1527	}
1528	return (np);
1529#else
1530	return (NULL);
1531#endif /* NFSSERVER */
1532}
1533
1534/*
1535 * Do the usual access checking.
1536 * file_mode, uid and gid are from the vnode in question,
1537 * while acc_mode and cred are from the VOP_ACCESS parameter list
1538 */
1539int
1540vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1541    mode_t acc_mode, struct ucred *cred)
1542{
1543	mode_t mask;
1544
1545	/* User id 0 always gets read/write access. */
1546	if (cred->cr_uid == 0) {
1547		/* For VEXEC, at least one of the execute bits must be set. */
1548		if ((acc_mode & VEXEC) && type != VDIR &&
1549		    (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)
1550			return EACCES;
1551		return 0;
1552	}
1553
1554	mask = 0;
1555
1556	/* Otherwise, check the owner. */
1557	if (cred->cr_uid == uid) {
1558		if (acc_mode & VEXEC)
1559			mask |= S_IXUSR;
1560		if (acc_mode & VREAD)
1561			mask |= S_IRUSR;
1562		if (acc_mode & VWRITE)
1563			mask |= S_IWUSR;
1564		return (file_mode & mask) == mask ? 0 : EACCES;
1565	}
1566
1567	/* Otherwise, check the groups. */
1568	if (groupmember(gid, cred)) {
1569		if (acc_mode & VEXEC)
1570			mask |= S_IXGRP;
1571		if (acc_mode & VREAD)
1572			mask |= S_IRGRP;
1573		if (acc_mode & VWRITE)
1574			mask |= S_IWGRP;
1575		return (file_mode & mask) == mask ? 0 : EACCES;
1576	}
1577
1578	/* Otherwise, check everyone else. */
1579	if (acc_mode & VEXEC)
1580		mask |= S_IXOTH;
1581	if (acc_mode & VREAD)
1582		mask |= S_IROTH;
1583	if (acc_mode & VWRITE)
1584		mask |= S_IWOTH;
1585	return (file_mode & mask) == mask ? 0 : EACCES;
1586}
1587
1588struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall");
1589
1590int
1591vfs_stall(struct proc *p, int stall)
1592{
1593	struct mount *mp;
1594	int allerror = 0, error;
1595
1596	if (stall)
1597		rw_enter_write(&vfs_stall_lock);
1598
1599	/*
1600	 * The loop variable mp is protected by vfs_busy() so that it cannot
1601	 * be unmounted while VFS_SYNC() sleeps.
1602	 */
1603	TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1604		if (stall) {
1605			error = vfs_busy(mp, VB_WRITE|VB_WAIT);
1606			if (error) {
1607				printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1608				allerror = error;
1609				continue;
1610			}
1611			uvm_vnp_sync(mp);
1612			error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p);
1613			if (error) {
1614				printf("%s: failed to sync\n", mp->mnt_stat.f_mntonname);
1615				vfs_unbusy(mp);
1616				allerror = error;
1617				continue;
1618			}
1619			mp->mnt_flag |= MNT_STALLED;
1620		} else {
1621			if (mp->mnt_flag & MNT_STALLED) {
1622				vfs_unbusy(mp);
1623				mp->mnt_flag &= ~MNT_STALLED;
1624			}
1625		}
1626	}
1627
1628	if (!stall)
1629		rw_exit_write(&vfs_stall_lock);
1630
1631	return (allerror);
1632}
1633
1634void
1635vfs_stall_barrier(void)
1636{
1637	rw_enter_read(&vfs_stall_lock);
1638	rw_exit_read(&vfs_stall_lock);
1639}
1640
1641/*
1642 * Unmount all file systems.
1643 * We traverse the list in reverse order under the assumption that doing so
1644 * will avoid needing to worry about dependencies.
1645 */
1646void
1647vfs_unmountall(void)
1648{
1649	struct mount *mp, *nmp;
1650	int allerror, error, again = 1;
1651
1652 retry:
1653	allerror = 0;
1654	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp) {
1655		if (vfs_busy(mp, VB_WRITE|VB_NOWAIT))
1656			continue;
1657		/* XXX Here is a race, the next pointer is not locked. */
1658		if ((error = dounmount(mp, MNT_FORCE, curproc)) != 0) {
1659			printf("unmount of %s failed with error %d\n",
1660			    mp->mnt_stat.f_mntonname, error);
1661			allerror = 1;
1662		}
1663	}
1664
1665	if (allerror) {
1666		printf("WARNING: some file systems would not unmount\n");
1667		if (again) {
1668			printf("retrying\n");
1669			again = 0;
1670			goto retry;
1671		}
1672	}
1673}
1674
1675/*
1676 * Sync and unmount file systems before shutting down.
1677 */
1678void
1679vfs_shutdown(struct proc *p)
1680{
1681#ifdef ACCOUNTING
1682	acct_shutdown();
1683#endif
1684
1685	printf("syncing disks... ");
1686
1687	if (panicstr == 0) {
1688		/* Sync before unmount, in case we hang on something. */
1689		sys_sync(p, NULL, NULL);
1690		vfs_unmountall();
1691	}
1692
1693#if NSOFTRAID > 0
1694	sr_quiesce();
1695#endif
1696
1697	if (vfs_syncwait(p, 1))
1698		printf("giving up\n");
1699	else
1700		printf("done\n");
1701}
1702
1703/*
1704 * perform sync() operation and wait for buffers to flush.
1705 */
1706int
1707vfs_syncwait(struct proc *p, int verbose)
1708{
1709	struct buf *bp;
1710	int iter, nbusy, dcount, s;
1711#ifdef MULTIPROCESSOR
1712	int hold_count;
1713#endif
1714
1715	sys_sync(p, NULL, NULL);
1716
1717	/* Wait for sync to finish. */
1718	dcount = 10000;
1719	for (iter = 0; iter < 20; iter++) {
1720		nbusy = 0;
1721		LIST_FOREACH(bp, &bufhead, b_list) {
1722			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1723				nbusy++;
1724			/*
1725			 * With soft updates, some buffers that are
1726			 * written will be remarked as dirty until other
1727			 * buffers are written.
1728			 */
1729			if (bp->b_flags & B_DELWRI) {
1730				s = splbio();
1731				bremfree(bp);
1732				buf_acquire(bp);
1733				splx(s);
1734				nbusy++;
1735				bawrite(bp);
1736				if (dcount-- <= 0) {
1737					if (verbose)
1738						printf("softdep ");
1739					return 1;
1740				}
1741			}
1742		}
1743		if (nbusy == 0)
1744			break;
1745		if (verbose)
1746			printf("%d ", nbusy);
1747#ifdef MULTIPROCESSOR
1748		if (_kernel_lock_held())
1749			hold_count = __mp_release_all(&kernel_lock);
1750		else
1751			hold_count = 0;
1752#endif
1753		DELAY(40000 * iter);
1754#ifdef MULTIPROCESSOR
1755		if (hold_count)
1756			__mp_acquire_count(&kernel_lock, hold_count);
1757#endif
1758	}
1759
1760	return nbusy;
1761}
1762
1763/*
1764 * posix file system related system variables.
1765 */
1766int
1767fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1768    void *newp, size_t newlen, struct proc *p)
1769{
1770	/* all sysctl names at this level are terminal */
1771	if (namelen != 1)
1772		return (ENOTDIR);
1773
1774	switch (name[0]) {
1775	case FS_POSIX_SETUID:
1776		if (newp && securelevel > 0)
1777			return (EPERM);
1778		return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1779	default:
1780		return (EOPNOTSUPP);
1781	}
1782	/* NOTREACHED */
1783}
1784
1785/*
1786 * file system related system variables.
1787 */
1788int
1789fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1790    size_t newlen, struct proc *p)
1791{
1792	sysctlfn *fn;
1793
1794	switch (name[0]) {
1795	case FS_POSIX:
1796		fn = fs_posix_sysctl;
1797		break;
1798	default:
1799		return (EOPNOTSUPP);
1800	}
1801	return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1802}
1803
1804
1805/*
1806 * Routines dealing with vnodes and buffers
1807 */
1808
1809/*
1810 * Wait for all outstanding I/Os to complete
1811 *
1812 * Manipulates v_numoutput. Must be called at splbio()
1813 */
1814int
1815vwaitforio(struct vnode *vp, int slpflag, char *wmesg, int timeo)
1816{
1817	int error = 0;
1818
1819	splassert(IPL_BIO);
1820
1821	while (vp->v_numoutput) {
1822		vp->v_bioflag |= VBIOWAIT;
1823		error = tsleep(&vp->v_numoutput,
1824		    slpflag | (PRIBIO + 1), wmesg, timeo);
1825		if (error)
1826			break;
1827	}
1828
1829	return (error);
1830}
1831
1832/*
1833 * Update outstanding I/O count and do wakeup if requested.
1834 *
1835 * Manipulates v_numoutput. Must be called at splbio()
1836 */
1837void
1838vwakeup(struct vnode *vp)
1839{
1840	splassert(IPL_BIO);
1841
1842	if (vp != NULL) {
1843		if (vp->v_numoutput-- == 0)
1844			panic("vwakeup: neg numoutput");
1845		if ((vp->v_bioflag & VBIOWAIT) && vp->v_numoutput == 0) {
1846			vp->v_bioflag &= ~VBIOWAIT;
1847			wakeup(&vp->v_numoutput);
1848		}
1849	}
1850}
1851
1852/*
1853 * Flush out and invalidate all buffers associated with a vnode.
1854 * Called with the underlying object locked.
1855 */
1856int
1857vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1858    int slpflag, int slptimeo)
1859{
1860	struct buf *bp;
1861	struct buf *nbp, *blist;
1862	int s, error;
1863
1864#ifdef VFSLCKDEBUG
1865	if ((vp->v_flag & VLOCKSWORK) && !VOP_ISLOCKED(vp))
1866		panic("%s: vp isn't locked, vp %p", __func__, vp);
1867#endif
1868
1869	if (flags & V_SAVE) {
1870		s = splbio();
1871		vwaitforio(vp, 0, "vinvalbuf", 0);
1872		if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1873			splx(s);
1874			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
1875				return (error);
1876			s = splbio();
1877			if (vp->v_numoutput > 0 ||
1878			    !LIST_EMPTY(&vp->v_dirtyblkhd))
1879				panic("%s: dirty bufs, vp %p", __func__, vp);
1880		}
1881		splx(s);
1882	}
1883loop:
1884	s = splbio();
1885	for (;;) {
1886		if ((blist = LIST_FIRST(&vp->v_cleanblkhd)) &&
1887		    (flags & V_SAVEMETA))
1888			while (blist && blist->b_lblkno < 0)
1889				blist = LIST_NEXT(blist, b_vnbufs);
1890		if (blist == NULL &&
1891		    (blist = LIST_FIRST(&vp->v_dirtyblkhd)) &&
1892		    (flags & V_SAVEMETA))
1893			while (blist && blist->b_lblkno < 0)
1894				blist = LIST_NEXT(blist, b_vnbufs);
1895		if (!blist)
1896			break;
1897
1898		for (bp = blist; bp; bp = nbp) {
1899			nbp = LIST_NEXT(bp, b_vnbufs);
1900			if (flags & V_SAVEMETA && bp->b_lblkno < 0)
1901				continue;
1902			if (bp->b_flags & B_BUSY) {
1903				bp->b_flags |= B_WANTED;
1904				error = tsleep(bp, slpflag | (PRIBIO + 1),
1905				    "vinvalbuf", slptimeo);
1906				if (error) {
1907					splx(s);
1908					return (error);
1909				}
1910				break;
1911			}
1912			bremfree(bp);
1913			/*
1914			 * XXX Since there are no node locks for NFS, I believe
1915			 * there is a slight chance that a delayed write will
1916			 * occur while sleeping just above, so check for it.
1917			 */
1918			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
1919				buf_acquire(bp);
1920				splx(s);
1921				(void) VOP_BWRITE(bp);
1922				goto loop;
1923			}
1924			buf_acquire_nomap(bp);
1925			bp->b_flags |= B_INVAL;
1926			brelse(bp);
1927		}
1928	}
1929	if (!(flags & V_SAVEMETA) &&
1930	    (!LIST_EMPTY(&vp->v_dirtyblkhd) || !LIST_EMPTY(&vp->v_cleanblkhd)))
1931		panic("%s: flush failed, vp %p", __func__, vp);
1932	splx(s);
1933	return (0);
1934}
1935
1936void
1937vflushbuf(struct vnode *vp, int sync)
1938{
1939	struct buf *bp, *nbp;
1940	int s;
1941
1942loop:
1943	s = splbio();
1944	LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) {
1945		if ((bp->b_flags & B_BUSY))
1946			continue;
1947		if ((bp->b_flags & B_DELWRI) == 0)
1948			panic("vflushbuf: not dirty");
1949		bremfree(bp);
1950		buf_acquire(bp);
1951		splx(s);
1952		/*
1953		 * Wait for I/O associated with indirect blocks to complete,
1954		 * since there is no way to quickly wait for them below.
1955		 */
1956		if (bp->b_vp == vp || sync == 0)
1957			(void) bawrite(bp);
1958		else
1959			(void) bwrite(bp);
1960		goto loop;
1961	}
1962	if (sync == 0) {
1963		splx(s);
1964		return;
1965	}
1966	vwaitforio(vp, 0, "vflushbuf", 0);
1967	if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
1968		splx(s);
1969#ifdef DIAGNOSTIC
1970		vprint("vflushbuf: dirty", vp);
1971#endif
1972		goto loop;
1973	}
1974	splx(s);
1975}
1976
1977/*
1978 * Associate a buffer with a vnode.
1979 *
1980 * Manipulates buffer vnode queues. Must be called at splbio().
1981 */
1982void
1983bgetvp(struct vnode *vp, struct buf *bp)
1984{
1985	splassert(IPL_BIO);
1986
1987
1988	if (bp->b_vp)
1989		panic("bgetvp: not free");
1990	vhold(vp);
1991	bp->b_vp = vp;
1992	if (vp->v_type == VBLK || vp->v_type == VCHR)
1993		bp->b_dev = vp->v_rdev;
1994	else
1995		bp->b_dev = NODEV;
1996	/*
1997	 * Insert onto list for new vnode.
1998	 */
1999	bufinsvn(bp, &vp->v_cleanblkhd);
2000}
2001
2002/*
2003 * Disassociate a buffer from a vnode.
2004 *
2005 * Manipulates vnode buffer queues. Must be called at splbio().
2006 */
2007void
2008brelvp(struct buf *bp)
2009{
2010	struct vnode *vp;
2011
2012	splassert(IPL_BIO);
2013
2014	if ((vp = bp->b_vp) == (struct vnode *) 0)
2015		panic("brelvp: NULL");
2016	/*
2017	 * Delete from old vnode list, if on one.
2018	 */
2019	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2020		bufremvn(bp);
2021	if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2022	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2023		vp->v_bioflag &= ~VBIOONSYNCLIST;
2024		LIST_REMOVE(vp, v_synclist);
2025	}
2026	bp->b_vp = NULL;
2027
2028	vdrop(vp);
2029}
2030
2031/*
2032 * Replaces the current vnode associated with the buffer, if any,
2033 * with a new vnode.
2034 *
2035 * If an output I/O is pending on the buffer, the old vnode
2036 * I/O count is adjusted.
2037 *
2038 * Ignores vnode buffer queues. Must be called at splbio().
2039 */
2040void
2041buf_replacevnode(struct buf *bp, struct vnode *newvp)
2042{
2043	struct vnode *oldvp = bp->b_vp;
2044
2045	splassert(IPL_BIO);
2046
2047	if (oldvp)
2048		brelvp(bp);
2049
2050	if ((bp->b_flags & (B_READ | B_DONE)) == 0) {
2051		newvp->v_numoutput++;	/* put it on swapdev */
2052		vwakeup(oldvp);
2053	}
2054
2055	bgetvp(newvp, bp);
2056	bufremvn(bp);
2057}
2058
2059/*
2060 * Used to assign buffers to the appropriate clean or dirty list on
2061 * the vnode and to add newly dirty vnodes to the appropriate
2062 * filesystem syncer list.
2063 *
2064 * Manipulates vnode buffer queues. Must be called at splbio().
2065 */
2066void
2067reassignbuf(struct buf *bp)
2068{
2069	struct buflists *listheadp;
2070	int delay;
2071	struct vnode *vp = bp->b_vp;
2072
2073	splassert(IPL_BIO);
2074
2075	/*
2076	 * Delete from old vnode list, if on one.
2077	 */
2078	if (LIST_NEXT(bp, b_vnbufs) != NOLIST)
2079		bufremvn(bp);
2080
2081	/*
2082	 * If dirty, put on list of dirty buffers;
2083	 * otherwise insert onto list of clean buffers.
2084	 */
2085	if ((bp->b_flags & B_DELWRI) == 0) {
2086		listheadp = &vp->v_cleanblkhd;
2087		if ((vp->v_bioflag & VBIOONSYNCLIST) &&
2088		    LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
2089			vp->v_bioflag &= ~VBIOONSYNCLIST;
2090			LIST_REMOVE(vp, v_synclist);
2091		}
2092	} else {
2093		listheadp = &vp->v_dirtyblkhd;
2094		if ((vp->v_bioflag & VBIOONSYNCLIST) == 0) {
2095			switch (vp->v_type) {
2096			case VDIR:
2097				delay = syncdelay / 2;
2098				break;
2099			case VBLK:
2100				if (vp->v_specmountpoint != NULL) {
2101					delay = syncdelay / 3;
2102					break;
2103				}
2104				/* FALLTHROUGH */
2105			default:
2106				delay = syncdelay;
2107			}
2108			vn_syncer_add_to_worklist(vp, delay);
2109		}
2110	}
2111	bufinsvn(bp, listheadp);
2112}
2113
2114int
2115vfs_register(struct vfsconf *vfs)
2116{
2117	struct vfsconf *vfsp;
2118	struct vfsconf **vfspp;
2119
2120#ifdef DIAGNOSTIC
2121	/* Paranoia? */
2122	if (vfs->vfc_refcount != 0)
2123		printf("vfs_register called with vfc_refcount > 0\n");
2124#endif
2125
2126	/* Check if filesystem already known */
2127	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2128	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next)
2129		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2130			return (EEXIST);
2131
2132	if (vfs->vfc_typenum > maxvfsconf)
2133		maxvfsconf = vfs->vfc_typenum;
2134
2135	vfs->vfc_next = NULL;
2136
2137	/* Add to the end of the list */
2138	*vfspp = vfs;
2139
2140	/* Call vfs_init() */
2141	if (vfs->vfc_vfsops->vfs_init)
2142		(*(vfs->vfc_vfsops->vfs_init))(vfs);
2143
2144	return 0;
2145}
2146
2147int
2148vfs_unregister(struct vfsconf *vfs)
2149{
2150	struct vfsconf *vfsp;
2151	struct vfsconf **vfspp;
2152	int maxtypenum;
2153
2154	/* Find our vfsconf struct */
2155	for (vfspp = &vfsconf, vfsp = vfsconf; vfsp;
2156	    vfspp = &vfsp->vfc_next, vfsp = vfsp->vfc_next) {
2157		if (strcmp(vfsp->vfc_name, vfs->vfc_name) == 0)
2158			break;
2159	}
2160
2161	if (!vfsp)			/* Not found */
2162		return (ENOENT);
2163
2164	if (vfsp->vfc_refcount)		/* In use */
2165		return (EBUSY);
2166
2167	/* Remove from list and free */
2168	*vfspp = vfsp->vfc_next;
2169
2170	maxtypenum = 0;
2171
2172	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2173		if (vfsp->vfc_typenum > maxtypenum)
2174			maxtypenum = vfsp->vfc_typenum;
2175
2176	maxvfsconf = maxtypenum;
2177	return 0;
2178}
2179
2180/*
2181 * Check if vnode represents a disk device
2182 */
2183int
2184vn_isdisk(struct vnode *vp, int *errp)
2185{
2186	if (vp->v_type != VBLK && vp->v_type != VCHR)
2187		return (0);
2188
2189	return (1);
2190}
2191
2192#ifdef DDB
2193#include <machine/db_machdep.h>
2194#include <ddb/db_interface.h>
2195
2196void
2197vfs_buf_print(void *b, int full,
2198    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2199{
2200	struct buf *bp = b;
2201
2202	(*pr)("  vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2203	      "  proc %p error %d flags %lb\n",
2204	    bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2205	    bp->b_proc, bp->b_error, bp->b_flags, B_BITS);
2206
2207	(*pr)("  bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2208	      "  data %p saveaddr %p dep %p iodone %p\n",
2209	    bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2210	    bp->b_data, bp->b_saveaddr,
2211	    LIST_FIRST(&bp->b_dep), bp->b_iodone);
2212
2213	(*pr)("  dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2214	    bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2215
2216#ifdef FFS_SOFTUPDATES
2217	if (full)
2218		softdep_print(bp, full, pr);
2219#endif
2220}
2221
2222const char *vtypes[] = { VTYPE_NAMES };
2223const char *vtags[] = { VTAG_NAMES };
2224
2225void
2226vfs_vnode_print(void *v, int full,
2227    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2228{
2229	struct vnode *vp = v;
2230
2231	(*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2232	      (u_int)vp->v_tag >= nitems(vtags)? "<unk>":vtags[vp->v_tag],
2233	      vp->v_tag,
2234	      (u_int)vp->v_type >= nitems(vtypes)? "<unk>":vtypes[vp->v_type],
2235	      vp->v_type, vp->v_mount, vp->v_mountedhere);
2236
2237	(*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2238	      vp->v_data, vp->v_usecount, vp->v_writecount,
2239	      vp->v_holdcnt, vp->v_numoutput);
2240
2241	/* uvm_object_printit(&vp->v_uobj, full, pr); */
2242
2243	if (full) {
2244		struct buf *bp;
2245
2246		(*pr)("clean bufs:\n");
2247		LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs) {
2248			(*pr)(" bp %p\n", bp);
2249			vfs_buf_print(bp, full, pr);
2250		}
2251
2252		(*pr)("dirty bufs:\n");
2253		LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2254			(*pr)(" bp %p\n", bp);
2255			vfs_buf_print(bp, full, pr);
2256		}
2257	}
2258}
2259
2260void
2261vfs_mount_print(struct mount *mp, int full,
2262    int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2263{
2264	struct vfsconf *vfc = mp->mnt_vfc;
2265	struct vnode *vp;
2266	int cnt;
2267
2268	(*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2269	    mp->mnt_flag, MNT_BITS,
2270	    mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2271
2272	(*pr)("vfsconf: ops %p name \"%s\" num %d ref %d flags 0x%x\n",
2273            vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2274	    vfc->vfc_refcount, vfc->vfc_flags);
2275
2276	(*pr)("statvfs cache: bsize %x iosize %x\nblocks %llu free %llu avail %lld\n",
2277	    mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2278	    mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2279
2280	(*pr)("  files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2281	    mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2282
2283	(*pr)("  f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2284	    mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2285	    mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2286
2287 	(*pr)("  syncwrites %llu asyncwrites = %llu\n",
2288	    mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2289
2290 	(*pr)("  syncreads %llu asyncreads = %llu\n",
2291	    mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2292
2293	(*pr)("  fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2294	    mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2295	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2296
2297	(*pr)("locked vnodes:");
2298	/* XXX would take mountlist lock, except ddb has no context */
2299	cnt = 0;
2300	LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2301		if (VOP_ISLOCKED(vp)) {
2302			if (cnt == 0)
2303				(*pr)("\n  %p", vp);
2304			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2305				(*pr)(",\n  %p", vp);
2306			else
2307				(*pr)(", %p", vp);
2308			cnt++;
2309		}
2310	}
2311	(*pr)("\n");
2312
2313	if (full) {
2314		(*pr)("all vnodes:");
2315		/* XXX would take mountlist lock, except ddb has no context */
2316		cnt = 0;
2317		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2318			if (cnt == 0)
2319				(*pr)("\n  %p", vp);
2320			else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2321				(*pr)(",\n  %p", vp);
2322			else
2323				(*pr)(", %p", vp);
2324			cnt++;
2325		}
2326		(*pr)("\n");
2327	}
2328}
2329#endif /* DDB */
2330
2331void
2332copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2333{
2334	const struct statfs *mbp;
2335
2336	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
2337
2338	if (sbp == (mbp = &mp->mnt_stat))
2339		return;
2340
2341	sbp->f_fsid = mbp->f_fsid;
2342	sbp->f_owner = mbp->f_owner;
2343	sbp->f_flags = mbp->f_flags;
2344	sbp->f_syncwrites = mbp->f_syncwrites;
2345	sbp->f_asyncwrites = mbp->f_asyncwrites;
2346	sbp->f_syncreads = mbp->f_syncreads;
2347	sbp->f_asyncreads = mbp->f_asyncreads;
2348	sbp->f_namemax = mbp->f_namemax;
2349	memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
2350	memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
2351	memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN);
2352	memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,
2353	    sizeof(union mount_info));
2354}
2355