vfs_export.c revision 13168
1/*
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
39 * $Id: vfs_subr.c,v 1.49 1995/12/17 21:23:19 phk Exp $
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/file.h>
50#include <sys/proc.h>
51#include <sys/mount.h>
52#include <sys/time.h>
53#include <sys/vnode.h>
54#include <sys/stat.h>
55#include <sys/namei.h>
56#include <sys/ucred.h>
57#include <sys/buf.h>
58#include <sys/errno.h>
59#include <sys/malloc.h>
60#include <sys/domain.h>
61#include <sys/mbuf.h>
62
63#include <vm/vm.h>
64#include <vm/vm_param.h>
65#include <vm/vm_object.h>
66#include <vm/vm_extern.h>
67#include <sys/sysctl.h>
68
69#include <miscfs/specfs/specdev.h>
70
71#ifdef DDB
72extern void	printlockedvnodes __P((void));
73#endif
74extern void	vclean __P((struct vnode *vp, int flags));
75extern void	vfs_unmountroot __P((struct mount *rootfs));
76
77enum vtype iftovt_tab[16] = {
78	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
79	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
80};
81int vttoif_tab[9] = {
82	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
83	S_IFSOCK, S_IFIFO, S_IFMT,
84};
85
86/*
87 * Insq/Remq for the vnode usage lists.
88 */
89#define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
90#define	bufremvn(bp) {  \
91	LIST_REMOVE(bp, b_vnbufs); \
92	(bp)->b_vnbufs.le_next = NOLIST; \
93}
94
95TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
96u_long freevnodes	= 0;
97
98struct mntlist mountlist;	/* mounted filesystem list */
99
100int desiredvnodes;
101SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RD, &desiredvnodes, 0, "");
102
103static void	vfs_free_addrlist __P((struct netexport *nep));
104static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
105static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
106				       struct export_args *argp));
107
108/*
109 * Initialize the vnode management data structures.
110 */
111void
112vntblinit()
113{
114	desiredvnodes = maxproc + vm_object_cache_max;
115
116	TAILQ_INIT(&vnode_free_list);
117	CIRCLEQ_INIT(&mountlist);
118}
119
120/*
121 * Lock a filesystem.
122 * Used to prevent access to it while mounting and unmounting.
123 */
124int
125vfs_lock(mp)
126	register struct mount *mp;
127{
128
129	while (mp->mnt_flag & MNT_MLOCK) {
130		mp->mnt_flag |= MNT_MWAIT;
131		(void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
132	}
133	mp->mnt_flag |= MNT_MLOCK;
134	return (0);
135}
136
137/*
138 * Unlock a locked filesystem.
139 * Panic if filesystem is not locked.
140 */
141void
142vfs_unlock(mp)
143	register struct mount *mp;
144{
145
146	if ((mp->mnt_flag & MNT_MLOCK) == 0)
147		panic("vfs_unlock: not locked");
148	mp->mnt_flag &= ~MNT_MLOCK;
149	if (mp->mnt_flag & MNT_MWAIT) {
150		mp->mnt_flag &= ~MNT_MWAIT;
151		wakeup((caddr_t) mp);
152	}
153}
154
155/*
156 * Mark a mount point as busy.
157 * Used to synchronize access and to delay unmounting.
158 */
159int
160vfs_busy(mp)
161	register struct mount *mp;
162{
163
164	while (mp->mnt_flag & MNT_MPBUSY) {
165		mp->mnt_flag |= MNT_MPWANT;
166		(void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0);
167	}
168	if (mp->mnt_flag & MNT_UNMOUNT)
169		return (1);
170	mp->mnt_flag |= MNT_MPBUSY;
171	return (0);
172}
173
174/*
175 * Free a busy filesystem.
176 * Panic if filesystem is not busy.
177 */
178void
179vfs_unbusy(mp)
180	register struct mount *mp;
181{
182
183	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
184		panic("vfs_unbusy: not busy");
185	mp->mnt_flag &= ~MNT_MPBUSY;
186	if (mp->mnt_flag & MNT_MPWANT) {
187		mp->mnt_flag &= ~MNT_MPWANT;
188		wakeup((caddr_t) &mp->mnt_flag);
189	}
190}
191
192void
193vfs_unmountroot(struct mount *rootfs)
194{
195	struct mount *mp = rootfs;
196	int error;
197
198	if (vfs_busy(mp)) {
199		printf("failed to unmount root\n");
200		return;
201	}
202	mp->mnt_flag |= MNT_UNMOUNT;
203	if ((error = vfs_lock(mp))) {
204		printf("lock of root filesystem failed (%d)\n", error);
205		return;
206	}
207	vnode_pager_umount(mp);	/* release cached vnodes */
208	cache_purgevfs(mp);	/* remove cache entries for this file sys */
209
210	if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc)))
211		printf("sync of root filesystem failed (%d)\n", error);
212
213	if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) {
214		printf("unmount of root filesystem failed (");
215		if (error == EBUSY)
216			printf("BUSY)\n");
217		else
218			printf("%d)\n", error);
219	}
220	mp->mnt_flag &= ~MNT_UNMOUNT;
221	vfs_unbusy(mp);
222}
223
224/*
225 * Unmount all filesystems.  Should only be called by halt().
226 */
227void
228vfs_unmountall()
229{
230	struct mount *mp, *nmp, *rootfs = NULL;
231	int error;
232
233	/* unmount all but rootfs */
234	for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
235		nmp = mp->mnt_list.cqe_prev;
236
237		if (mp->mnt_flag & MNT_ROOTFS) {
238			rootfs = mp;
239			continue;
240		}
241		error = dounmount(mp, MNT_FORCE, initproc);
242		if (error) {
243			printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
244			if (error == EBUSY)
245				printf("BUSY)\n");
246			else
247				printf("%d)\n", error);
248		}
249	}
250
251	/* and finally... */
252	if (rootfs) {
253		vfs_unmountroot(rootfs);
254	} else {
255		printf("no root filesystem\n");
256	}
257}
258
259/*
260 * Lookup a mount point by filesystem identifier.
261 */
262struct mount *
263getvfs(fsid)
264	fsid_t *fsid;
265{
266	register struct mount *mp;
267
268	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
269	    mp = mp->mnt_list.cqe_next) {
270		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
271		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
272			return (mp);
273	}
274	return ((struct mount *) 0);
275}
276
277/*
278 * Get a new unique fsid
279 */
280void
281getnewfsid(mp, mtype)
282	struct mount *mp;
283	int mtype;
284{
285	static u_short xxxfs_mntid;
286
287	fsid_t tfsid;
288
289	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
290	mp->mnt_stat.f_fsid.val[1] = mtype;
291	if (xxxfs_mntid == 0)
292		++xxxfs_mntid;
293	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
294	tfsid.val[1] = mtype;
295	if (mountlist.cqh_first != (void *)&mountlist) {
296		while (getvfs(&tfsid)) {
297			tfsid.val[0]++;
298			xxxfs_mntid++;
299		}
300	}
301	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
302}
303
304/*
305 * Set vnode attributes to VNOVAL
306 */
307void
308vattr_null(vap)
309	register struct vattr *vap;
310{
311
312	vap->va_type = VNON;
313	vap->va_size = VNOVAL;
314	vap->va_bytes = VNOVAL;
315	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
316	    vap->va_fsid = vap->va_fileid =
317	    vap->va_blocksize = vap->va_rdev =
318	    vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
319	    vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
320	    vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
321	    vap->va_flags = vap->va_gen = VNOVAL;
322	vap->va_vaflags = 0;
323}
324
325/*
326 * Routines having to do with the management of the vnode table.
327 */
328extern vop_t **dead_vnodeop_p;
329
330/*
331 * Return the next vnode from the free list.
332 */
333int
334getnewvnode(tag, mp, vops, vpp)
335	enum vtagtype tag;
336	struct mount *mp;
337	vop_t **vops;
338	struct vnode **vpp;
339{
340	register struct vnode *vp;
341
342	vp = vnode_free_list.tqh_first;
343	/*
344	 * we allocate a new vnode if
345	 * 	1. we don't have any free
346	 *		Pretty obvious, we actually used to panic, but that
347	 *		is a silly thing to do.
348	 *	2. we havn't filled our pool yet
349	 *		We don't want to trash the incore (VM-)vnodecache.
350	 *	3. if less that 1/4th of our vnodes are free.
351	 *		We don't want to trash the namei cache either.
352	 */
353	if (freevnodes < (numvnodes >> 2) ||
354	    numvnodes < desiredvnodes ||
355	    vp == NULL) {
356		vp = (struct vnode *) malloc((u_long) sizeof *vp,
357		    M_VNODE, M_WAITOK);
358		bzero((char *) vp, sizeof *vp);
359		numvnodes++;
360	} else {
361		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
362		freevnodes--;
363
364		if (vp->v_usecount)
365			panic("free vnode isn't");
366
367		/* see comment on why 0xdeadb is set at end of vgone (below) */
368		vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
369		vp->v_lease = NULL;
370		if (vp->v_type != VBAD)
371			vgone(vp);
372#ifdef DIAGNOSTIC
373		{
374			int s;
375
376			if (vp->v_data)
377				panic("cleaned vnode isn't");
378			s = splbio();
379			if (vp->v_numoutput)
380				panic("Clean vnode has pending I/O's");
381			splx(s);
382		}
383#endif
384		vp->v_flag = 0;
385		vp->v_lastr = 0;
386		vp->v_ralen = 0;
387		vp->v_maxra = 0;
388		vp->v_lastw = 0;
389		vp->v_lasta = 0;
390		vp->v_cstart = 0;
391		vp->v_clen = 0;
392		vp->v_socket = 0;
393		vp->v_writecount = 0;	/* XXX */
394	}
395	vp->v_type = VNON;
396	cache_purge(vp);
397	vp->v_tag = tag;
398	vp->v_op = vops;
399	insmntque(vp, mp);
400	*vpp = vp;
401	vp->v_usecount = 1;
402	vp->v_data = 0;
403	return (0);
404}
405
406/*
407 * Move a vnode from one mount queue to another.
408 */
409void
410insmntque(vp, mp)
411	register struct vnode *vp;
412	register struct mount *mp;
413{
414
415	/*
416	 * Delete from old mount point vnode list, if on one.
417	 */
418	if (vp->v_mount != NULL)
419		LIST_REMOVE(vp, v_mntvnodes);
420	/*
421	 * Insert into list of vnodes for the new mount point, if available.
422	 */
423	if ((vp->v_mount = mp) == NULL)
424		return;
425	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
426}
427
428/*
429 * Update outstanding I/O count and do wakeup if requested.
430 */
431void
432vwakeup(bp)
433	register struct buf *bp;
434{
435	register struct vnode *vp;
436
437	bp->b_flags &= ~B_WRITEINPROG;
438	if ((vp = bp->b_vp)) {
439		vp->v_numoutput--;
440		if (vp->v_numoutput < 0)
441			panic("vwakeup: neg numoutput");
442		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
443			vp->v_flag &= ~VBWAIT;
444			wakeup((caddr_t) &vp->v_numoutput);
445		}
446	}
447}
448
449/*
450 * Flush out and invalidate all buffers associated with a vnode.
451 * Called with the underlying object locked.
452 */
453int
454vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
455	register struct vnode *vp;
456	int flags;
457	struct ucred *cred;
458	struct proc *p;
459	int slpflag, slptimeo;
460{
461	register struct buf *bp;
462	struct buf *nbp, *blist;
463	int s, error;
464	vm_object_t object;
465
466	if (flags & V_SAVE) {
467		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
468			return (error);
469		if (vp->v_dirtyblkhd.lh_first != NULL)
470			panic("vinvalbuf: dirty bufs");
471	}
472	for (;;) {
473		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
474			while (blist && blist->b_lblkno < 0)
475				blist = blist->b_vnbufs.le_next;
476		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
477		    (flags & V_SAVEMETA))
478			while (blist && blist->b_lblkno < 0)
479				blist = blist->b_vnbufs.le_next;
480		if (!blist)
481			break;
482
483		for (bp = blist; bp; bp = nbp) {
484			nbp = bp->b_vnbufs.le_next;
485			if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
486				continue;
487			s = splbio();
488			if (bp->b_flags & B_BUSY) {
489				bp->b_flags |= B_WANTED;
490				error = tsleep((caddr_t) bp,
491				    slpflag | (PRIBIO + 1), "vinvalbuf",
492				    slptimeo);
493				splx(s);
494				if (error)
495					return (error);
496				break;
497			}
498			bremfree(bp);
499			bp->b_flags |= B_BUSY;
500			splx(s);
501			/*
502			 * XXX Since there are no node locks for NFS, I
503			 * believe there is a slight chance that a delayed
504			 * write will occur while sleeping just above, so
505			 * check for it.
506			 */
507			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
508				(void) VOP_BWRITE(bp);
509				break;
510			}
511			bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
512			brelse(bp);
513		}
514	}
515
516	s = splbio();
517	while (vp->v_numoutput > 0) {
518		vp->v_flag |= VBWAIT;
519		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
520	}
521	splx(s);
522
523	/*
524	 * Destroy the copy in the VM cache, too.
525	 */
526	object = vp->v_object;
527	if (object != NULL) {
528		vm_object_page_remove(object, 0, object->size,
529		    (flags & V_SAVE) ? TRUE : FALSE);
530	}
531	if (!(flags & V_SAVEMETA) &&
532	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
533		panic("vinvalbuf: flush failed");
534	return (0);
535}
536
537/*
538 * Associate a buffer with a vnode.
539 */
540void
541bgetvp(vp, bp)
542	register struct vnode *vp;
543	register struct buf *bp;
544{
545	int s;
546
547	if (bp->b_vp)
548		panic("bgetvp: not free");
549	VHOLD(vp);
550	bp->b_vp = vp;
551	if (vp->v_type == VBLK || vp->v_type == VCHR)
552		bp->b_dev = vp->v_rdev;
553	else
554		bp->b_dev = NODEV;
555	/*
556	 * Insert onto list for new vnode.
557	 */
558	s = splbio();
559	bufinsvn(bp, &vp->v_cleanblkhd);
560	splx(s);
561}
562
563/*
564 * Disassociate a buffer from a vnode.
565 */
566void
567brelvp(bp)
568	register struct buf *bp;
569{
570	struct vnode *vp;
571	int s;
572
573	if (bp->b_vp == (struct vnode *) 0)
574		panic("brelvp: NULL");
575	/*
576	 * Delete from old vnode list, if on one.
577	 */
578	s = splbio();
579	if (bp->b_vnbufs.le_next != NOLIST)
580		bufremvn(bp);
581	splx(s);
582
583	vp = bp->b_vp;
584	bp->b_vp = (struct vnode *) 0;
585	HOLDRELE(vp);
586}
587
588/*
589 * Associate a p-buffer with a vnode.
590 */
591void
592pbgetvp(vp, bp)
593	register struct vnode *vp;
594	register struct buf *bp;
595{
596	if (bp->b_vp)
597		panic("pbgetvp: not free");
598	VHOLD(vp);
599	bp->b_vp = vp;
600	if (vp->v_type == VBLK || vp->v_type == VCHR)
601		bp->b_dev = vp->v_rdev;
602	else
603		bp->b_dev = NODEV;
604}
605
606/*
607 * Disassociate a p-buffer from a vnode.
608 */
609void
610pbrelvp(bp)
611	register struct buf *bp;
612{
613	struct vnode *vp;
614
615	if (bp->b_vp == (struct vnode *) 0)
616		panic("brelvp: NULL");
617
618	vp = bp->b_vp;
619	bp->b_vp = (struct vnode *) 0;
620	HOLDRELE(vp);
621}
622
623/*
624 * Reassign a buffer from one vnode to another.
625 * Used to assign file specific control information
626 * (indirect blocks) to the vnode to which they belong.
627 */
628void
629reassignbuf(bp, newvp)
630	register struct buf *bp;
631	register struct vnode *newvp;
632{
633	register struct buflists *listheadp;
634
635	if (newvp == NULL) {
636		printf("reassignbuf: NULL");
637		return;
638	}
639	/*
640	 * Delete from old vnode list, if on one.
641	 */
642	if (bp->b_vnbufs.le_next != NOLIST)
643		bufremvn(bp);
644	/*
645	 * If dirty, put on list of dirty buffers; otherwise insert onto list
646	 * of clean buffers.
647	 */
648	if (bp->b_flags & B_DELWRI) {
649		struct buf *tbp;
650
651		tbp = newvp->v_dirtyblkhd.lh_first;
652		if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
653			bufinsvn(bp, &newvp->v_dirtyblkhd);
654		} else {
655			while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
656				tbp = tbp->b_vnbufs.le_next;
657			}
658			LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
659		}
660	} else {
661		listheadp = &newvp->v_cleanblkhd;
662		bufinsvn(bp, listheadp);
663	}
664}
665
666/*
667 * Create a vnode for a block device.
668 * Used for root filesystem, argdev, and swap areas.
669 * Also used for memory file system special devices.
670 */
671int
672bdevvp(dev, vpp)
673	dev_t dev;
674	struct vnode **vpp;
675{
676	register struct vnode *vp;
677	struct vnode *nvp;
678	int error;
679
680	if (dev == NODEV)
681		return (0);
682	error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
683	if (error) {
684		*vpp = 0;
685		return (error);
686	}
687	vp = nvp;
688	vp->v_type = VBLK;
689	if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
690		vput(vp);
691		vp = nvp;
692	}
693	*vpp = vp;
694	return (0);
695}
696
697/*
698 * Check to see if the new vnode represents a special device
699 * for which we already have a vnode (either because of
700 * bdevvp() or because of a different vnode representing
701 * the same block device). If such an alias exists, deallocate
702 * the existing contents and return the aliased vnode. The
703 * caller is responsible for filling it with its new contents.
704 */
705struct vnode *
706checkalias(nvp, nvp_rdev, mp)
707	register struct vnode *nvp;
708	dev_t nvp_rdev;
709	struct mount *mp;
710{
711	register struct vnode *vp;
712	struct vnode **vpp;
713
714	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
715		return (NULLVP);
716
717	vpp = &speclisth[SPECHASH(nvp_rdev)];
718loop:
719	for (vp = *vpp; vp; vp = vp->v_specnext) {
720		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
721			continue;
722		/*
723		 * Alias, but not in use, so flush it out.
724		 */
725		if (vp->v_usecount == 0) {
726			vgone(vp);
727			goto loop;
728		}
729		if (vget(vp, 1))
730			goto loop;
731		break;
732	}
733	if (vp == NULL || vp->v_tag != VT_NON) {
734		MALLOC(nvp->v_specinfo, struct specinfo *,
735		    sizeof(struct specinfo), M_VNODE, M_WAITOK);
736		nvp->v_rdev = nvp_rdev;
737		nvp->v_hashchain = vpp;
738		nvp->v_specnext = *vpp;
739		nvp->v_specflags = 0;
740		*vpp = nvp;
741		if (vp != NULL) {
742			nvp->v_flag |= VALIASED;
743			vp->v_flag |= VALIASED;
744			vput(vp);
745		}
746		return (NULLVP);
747	}
748	VOP_UNLOCK(vp);
749	vclean(vp, 0);
750	vp->v_op = nvp->v_op;
751	vp->v_tag = nvp->v_tag;
752	nvp->v_type = VNON;
753	insmntque(vp, mp);
754	return (vp);
755}
756
757/*
758 * Grab a particular vnode from the free list, increment its
759 * reference count and lock it. The vnode lock bit is set the
760 * vnode is being eliminated in vgone. The process is awakened
761 * when the transition is completed, and an error returned to
762 * indicate that the vnode is no longer usable (possibly having
763 * been changed to a new file system type).
764 */
765int
766vget(vp, lockflag)
767	register struct vnode *vp;
768	int lockflag;
769{
770
771	/*
772	 * If the vnode is in the process of being cleaned out for another
773	 * use, we wait for the cleaning to finish and then return failure.
774	 * Cleaning is determined either by checking that the VXLOCK flag is
775	 * set, or that the use count is zero with the back pointer set to
776	 * show that it has been removed from the free list by getnewvnode.
777	 * The VXLOCK flag may not have been set yet because vclean is blocked
778	 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
779	 */
780	if ((vp->v_flag & VXLOCK) ||
781	    (vp->v_usecount == 0 &&
782		vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
783		vp->v_flag |= VXWANT;
784		(void) tsleep((caddr_t) vp, PINOD, "vget", 0);
785		return (1);
786	}
787	if (vp->v_usecount == 0) {
788		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
789		freevnodes--;
790	}
791	vp->v_usecount++;
792	if (lockflag)
793		VOP_LOCK(vp);
794	return (0);
795}
796
797/*
798 * Vnode reference, just increment the count
799 */
800void
801vref(vp)
802	struct vnode *vp;
803{
804
805	if (vp->v_usecount <= 0)
806		panic("vref used where vget required");
807	vp->v_usecount++;
808}
809
810/*
811 * vput(), just unlock and vrele()
812 */
813void
814vput(vp)
815	register struct vnode *vp;
816{
817
818	VOP_UNLOCK(vp);
819	vrele(vp);
820}
821
822/*
823 * Vnode release.
824 * If count drops to zero, call inactive routine and return to freelist.
825 */
826void
827vrele(vp)
828	register struct vnode *vp;
829{
830
831#ifdef DIAGNOSTIC
832	if (vp == NULL)
833		panic("vrele: null vp");
834#endif
835	vp->v_usecount--;
836	if (vp->v_usecount > 0)
837		return;
838	if (vp->v_usecount < 0 /* || vp->v_writecount < 0 */ ) {
839#ifdef DIAGNOSTIC
840		vprint("vrele: negative ref count", vp);
841#endif
842		panic("vrele: negative reference cnt");
843	}
844	if (vp->v_flag & VAGE) {
845		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
846		vp->v_flag &= ~VAGE;
847	} else {
848		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
849	}
850	freevnodes++;
851
852	VOP_INACTIVE(vp);
853}
854
855#ifdef DIAGNOSTIC
856/*
857 * Page or buffer structure gets a reference.
858 */
859void
860vhold(vp)
861	register struct vnode *vp;
862{
863
864	vp->v_holdcnt++;
865}
866
867/*
868 * Page or buffer structure frees a reference.
869 */
870void
871holdrele(vp)
872	register struct vnode *vp;
873{
874
875	if (vp->v_holdcnt <= 0)
876		panic("holdrele: holdcnt");
877	vp->v_holdcnt--;
878}
879#endif /* DIAGNOSTIC */
880
881/*
882 * Remove any vnodes in the vnode table belonging to mount point mp.
883 *
884 * If MNT_NOFORCE is specified, there should not be any active ones,
885 * return error if any are found (nb: this is a user error, not a
886 * system error). If MNT_FORCE is specified, detach any active vnodes
887 * that are found.
888 */
889#ifdef DIAGNOSTIC
890static int busyprt = 0;		/* print out busy vnodes */
891SYSCTL_INT(_debug, 1, busyprt, CTLFLAG_RW, &busyprt, 0, "");
892#endif
893
894int
895vflush(mp, skipvp, flags)
896	struct mount *mp;
897	struct vnode *skipvp;
898	int flags;
899{
900	register struct vnode *vp, *nvp;
901	int busy = 0;
902
903	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
904		panic("vflush: not busy");
905loop:
906	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
907		/*
908		 * Make sure this vnode wasn't reclaimed in getnewvnode().
909		 * Start over if it has (it won't be on the list anymore).
910		 */
911		if (vp->v_mount != mp)
912			goto loop;
913		nvp = vp->v_mntvnodes.le_next;
914		/*
915		 * Skip over a selected vnode.
916		 */
917		if (vp == skipvp)
918			continue;
919		/*
920		 * Skip over a vnodes marked VSYSTEM.
921		 */
922		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
923			continue;
924		/*
925		 * If WRITECLOSE is set, only flush out regular file vnodes
926		 * open for writing.
927		 */
928		if ((flags & WRITECLOSE) &&
929		    (vp->v_writecount == 0 || vp->v_type != VREG))
930			continue;
931		/*
932		 * With v_usecount == 0, all we need to do is clear out the
933		 * vnode data structures and we are done.
934		 */
935		if (vp->v_usecount == 0) {
936			vgone(vp);
937			continue;
938		}
939		/*
940		 * If FORCECLOSE is set, forcibly close the vnode. For block
941		 * or character devices, revert to an anonymous device. For
942		 * all other files, just kill them.
943		 */
944		if (flags & FORCECLOSE) {
945			if (vp->v_type != VBLK && vp->v_type != VCHR) {
946				vgone(vp);
947			} else {
948				vclean(vp, 0);
949				vp->v_op = spec_vnodeop_p;
950				insmntque(vp, (struct mount *) 0);
951			}
952			continue;
953		}
954#ifdef DIAGNOSTIC
955		if (busyprt)
956			vprint("vflush: busy vnode", vp);
957#endif
958		busy++;
959	}
960	if (busy)
961		return (EBUSY);
962	return (0);
963}
964
965/*
966 * Disassociate the underlying file system from a vnode.
967 */
968void
969vclean(struct vnode *vp, int flags)
970{
971	int active;
972
973	/*
974	 * Check to see if the vnode is in use. If so we have to reference it
975	 * before we clean it out so that its count cannot fall to zero and
976	 * generate a race against ourselves to recycle it.
977	 */
978	if ((active = vp->v_usecount))
979		VREF(vp);
980	/*
981	 * Even if the count is zero, the VOP_INACTIVE routine may still have
982	 * the object locked while it cleans it out. The VOP_LOCK ensures that
983	 * the VOP_INACTIVE routine is done with its work. For active vnodes,
984	 * it ensures that no other activity can occur while the underlying
985	 * object is being cleaned out.
986	 */
987	VOP_LOCK(vp);
988	/*
989	 * Prevent the vnode from being recycled or brought into use while we
990	 * clean it out.
991	 */
992	if (vp->v_flag & VXLOCK)
993		panic("vclean: deadlock");
994	vp->v_flag |= VXLOCK;
995	/*
996	 * Clean out any buffers associated with the vnode.
997	 */
998	if (flags & DOCLOSE)
999		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
1000	/*
1001	 * Any other processes trying to obtain this lock must first wait for
1002	 * VXLOCK to clear, then call the new lock operation.
1003	 */
1004	VOP_UNLOCK(vp);
1005	/*
1006	 * If purging an active vnode, it must be closed and deactivated
1007	 * before being reclaimed.
1008	 */
1009	if (active) {
1010		if (flags & DOCLOSE)
1011			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1012		VOP_INACTIVE(vp);
1013	}
1014	/*
1015	 * Reclaim the vnode.
1016	 */
1017	if (VOP_RECLAIM(vp))
1018		panic("vclean: cannot reclaim");
1019	if (active)
1020		vrele(vp);
1021
1022	/*
1023	 * Done with purge, notify sleepers of the grim news.
1024	 */
1025	vp->v_op = dead_vnodeop_p;
1026	vp->v_tag = VT_NON;
1027	vp->v_flag &= ~VXLOCK;
1028	if (vp->v_flag & VXWANT) {
1029		vp->v_flag &= ~VXWANT;
1030		wakeup((caddr_t) vp);
1031	}
1032}
1033
1034/*
1035 * Eliminate all activity associated with  the requested vnode
1036 * and with all vnodes aliased to the requested vnode.
1037 */
1038void
1039vgoneall(vp)
1040	register struct vnode *vp;
1041{
1042	register struct vnode *vq;
1043
1044	if (vp->v_flag & VALIASED) {
1045		/*
1046		 * If a vgone (or vclean) is already in progress, wait until
1047		 * it is done and return.
1048		 */
1049		if (vp->v_flag & VXLOCK) {
1050			vp->v_flag |= VXWANT;
1051			(void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
1052			return;
1053		}
1054		/*
1055		 * Ensure that vp will not be vgone'd while we are eliminating
1056		 * its aliases.
1057		 */
1058		vp->v_flag |= VXLOCK;
1059		while (vp->v_flag & VALIASED) {
1060			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1061				if (vq->v_rdev != vp->v_rdev ||
1062				    vq->v_type != vp->v_type || vp == vq)
1063					continue;
1064				vgone(vq);
1065				break;
1066			}
1067		}
1068		/*
1069		 * Remove the lock so that vgone below will really eliminate
1070		 * the vnode after which time vgone will awaken any sleepers.
1071		 */
1072		vp->v_flag &= ~VXLOCK;
1073	}
1074	vgone(vp);
1075}
1076
1077/*
1078 * Eliminate all activity associated with a vnode
1079 * in preparation for reuse.
1080 */
1081void
1082vgone(vp)
1083	register struct vnode *vp;
1084{
1085	register struct vnode *vq;
1086	struct vnode *vx;
1087
1088	/*
1089	 * If a vgone (or vclean) is already in progress, wait until it is
1090	 * done and return.
1091	 */
1092	if (vp->v_flag & VXLOCK) {
1093		vp->v_flag |= VXWANT;
1094		(void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
1095		return;
1096	}
1097	/*
1098	 * Clean out the filesystem specific data.
1099	 */
1100	vclean(vp, DOCLOSE);
1101	/*
1102	 * Delete from old mount point vnode list, if on one.
1103	 */
1104	if (vp->v_mount != NULL) {
1105		LIST_REMOVE(vp, v_mntvnodes);
1106		vp->v_mount = NULL;
1107	}
1108	/*
1109	 * If special device, remove it from special device alias list.
1110	 */
1111	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1112		if (*vp->v_hashchain == vp) {
1113			*vp->v_hashchain = vp->v_specnext;
1114		} else {
1115			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1116				if (vq->v_specnext != vp)
1117					continue;
1118				vq->v_specnext = vp->v_specnext;
1119				break;
1120			}
1121			if (vq == NULL)
1122				panic("missing bdev");
1123		}
1124		if (vp->v_flag & VALIASED) {
1125			vx = NULL;
1126			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1127				if (vq->v_rdev != vp->v_rdev ||
1128				    vq->v_type != vp->v_type)
1129					continue;
1130				if (vx)
1131					break;
1132				vx = vq;
1133			}
1134			if (vx == NULL)
1135				panic("missing alias");
1136			if (vq == NULL)
1137				vx->v_flag &= ~VALIASED;
1138			vp->v_flag &= ~VALIASED;
1139		}
1140		FREE(vp->v_specinfo, M_VNODE);
1141		vp->v_specinfo = NULL;
1142	}
1143	/*
1144	 * If it is on the freelist and not already at the head, move it to
1145	 * the head of the list. The test of the back pointer and the
1146	 * reference count of zero is because it will be removed from the free
1147	 * list by getnewvnode, but will not have its reference count
1148	 * incremented until after calling vgone. If the reference count were
1149	 * incremented first, vgone would (incorrectly) try to close the
1150	 * previous instance of the underlying object. So, the back pointer is
1151	 * explicitly set to `0xdeadb' in getnewvnode after removing it from
1152	 * the freelist to ensure that we do not try to move it here.
1153	 */
1154	if (vp->v_usecount == 0 &&
1155	    vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
1156	    vnode_free_list.tqh_first != vp) {
1157		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1158		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1159	}
1160	vp->v_type = VBAD;
1161}
1162
1163/*
1164 * Lookup a vnode by device number.
1165 */
1166int
1167vfinddev(dev, type, vpp)
1168	dev_t dev;
1169	enum vtype type;
1170	struct vnode **vpp;
1171{
1172	register struct vnode *vp;
1173
1174	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1175		if (dev != vp->v_rdev || type != vp->v_type)
1176			continue;
1177		*vpp = vp;
1178		return (1);
1179	}
1180	return (0);
1181}
1182
1183/*
1184 * Calculate the total number of references to a special device.
1185 */
1186int
1187vcount(vp)
1188	register struct vnode *vp;
1189{
1190	register struct vnode *vq, *vnext;
1191	int count;
1192
1193loop:
1194	if ((vp->v_flag & VALIASED) == 0)
1195		return (vp->v_usecount);
1196	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1197		vnext = vq->v_specnext;
1198		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1199			continue;
1200		/*
1201		 * Alias, but not in use, so flush it out.
1202		 */
1203		if (vq->v_usecount == 0 && vq != vp) {
1204			vgone(vq);
1205			goto loop;
1206		}
1207		count += vq->v_usecount;
1208	}
1209	return (count);
1210}
1211
1212/*
1213 * Print out a description of a vnode.
1214 */
1215static char *typename[] =
1216{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1217
1218void
1219vprint(label, vp)
1220	char *label;
1221	register struct vnode *vp;
1222{
1223	char buf[64];
1224
1225	if (label != NULL)
1226		printf("%s: ", label);
1227	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1228	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1229	    vp->v_holdcnt);
1230	buf[0] = '\0';
1231	if (vp->v_flag & VROOT)
1232		strcat(buf, "|VROOT");
1233	if (vp->v_flag & VTEXT)
1234		strcat(buf, "|VTEXT");
1235	if (vp->v_flag & VSYSTEM)
1236		strcat(buf, "|VSYSTEM");
1237	if (vp->v_flag & VXLOCK)
1238		strcat(buf, "|VXLOCK");
1239	if (vp->v_flag & VXWANT)
1240		strcat(buf, "|VXWANT");
1241	if (vp->v_flag & VBWAIT)
1242		strcat(buf, "|VBWAIT");
1243	if (vp->v_flag & VALIASED)
1244		strcat(buf, "|VALIASED");
1245	if (buf[0] != '\0')
1246		printf(" flags (%s)", &buf[1]);
1247	if (vp->v_data == NULL) {
1248		printf("\n");
1249	} else {
1250		printf("\n\t");
1251		VOP_PRINT(vp);
1252	}
1253}
1254
1255#ifdef DDB
1256/*
1257 * List all of the locked vnodes in the system.
1258 * Called when debugging the kernel.
1259 */
1260void
1261printlockedvnodes(void)
1262{
1263	register struct mount *mp;
1264	register struct vnode *vp;
1265
1266	printf("Locked vnodes\n");
1267	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
1268	    mp = mp->mnt_list.cqe_next) {
1269		for (vp = mp->mnt_vnodelist.lh_first;
1270		    vp != NULL;
1271		    vp = vp->v_mntvnodes.le_next)
1272			if (VOP_ISLOCKED(vp))
1273				vprint((char *) 0, vp);
1274	}
1275}
1276#endif
1277
1278int kinfo_vdebug = 1;
1279int kinfo_vgetfailed;
1280
1281#define KINFO_VNODESLOP	10
1282/*
1283 * Dump vnode list (via sysctl).
1284 * Copyout address of vnode followed by vnode.
1285 */
1286/* ARGSUSED */
1287static int
1288sysctl_vnode SYSCTL_HANDLER_ARGS
1289{
1290	register struct mount *mp, *nmp;
1291	struct vnode *vp;
1292	int error;
1293
1294#define VPTRSZ	sizeof (struct vnode *)
1295#define VNODESZ	sizeof (struct vnode)
1296
1297	req->lock = 0;
1298	if (!req->oldptr) /* Make an estimate */
1299		return (SYSCTL_OUT(req, 0,
1300			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
1301
1302	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1303		nmp = mp->mnt_list.cqe_next;
1304		if (vfs_busy(mp))
1305			continue;
1306again:
1307		for (vp = mp->mnt_vnodelist.lh_first;
1308		    vp != NULL;
1309		    vp = vp->v_mntvnodes.le_next) {
1310			/*
1311			 * Check that the vp is still associated with this
1312			 * filesystem.  RACE: could have been recycled onto
1313			 * the same filesystem.
1314			 */
1315			if (vp->v_mount != mp) {
1316				if (kinfo_vdebug)
1317					printf("kinfo: vp changed\n");
1318				goto again;
1319			}
1320			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
1321			    (error = SYSCTL_OUT(req, vp, VNODESZ))) {
1322				vfs_unbusy(mp);
1323				return (error);
1324			}
1325		}
1326		vfs_unbusy(mp);
1327	}
1328
1329	return (0);
1330}
1331
1332SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
1333	0, 0, sysctl_vnode, "S,vnode", "");
1334
1335/*
1336 * Check to see if a filesystem is mounted on a block device.
1337 */
1338int
1339vfs_mountedon(vp)
1340	register struct vnode *vp;
1341{
1342	register struct vnode *vq;
1343
1344	if (vp->v_specflags & SI_MOUNTEDON)
1345		return (EBUSY);
1346	if (vp->v_flag & VALIASED) {
1347		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1348			if (vq->v_rdev != vp->v_rdev ||
1349			    vq->v_type != vp->v_type)
1350				continue;
1351			if (vq->v_specflags & SI_MOUNTEDON)
1352				return (EBUSY);
1353		}
1354	}
1355	return (0);
1356}
1357
1358/*
1359 * Build hash lists of net addresses and hang them off the mount point.
1360 * Called by ufs_mount() to set up the lists of export addresses.
1361 */
1362static int
1363vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1364	struct export_args *argp)
1365{
1366	register struct netcred *np;
1367	register struct radix_node_head *rnh;
1368	register int i;
1369	struct radix_node *rn;
1370	struct sockaddr *saddr, *smask = 0;
1371	struct domain *dom;
1372	int error;
1373
1374	if (argp->ex_addrlen == 0) {
1375		if (mp->mnt_flag & MNT_DEFEXPORTED)
1376			return (EPERM);
1377		np = &nep->ne_defexported;
1378		np->netc_exflags = argp->ex_flags;
1379		np->netc_anon = argp->ex_anon;
1380		np->netc_anon.cr_ref = 1;
1381		mp->mnt_flag |= MNT_DEFEXPORTED;
1382		return (0);
1383	}
1384	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1385	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
1386	bzero((caddr_t) np, i);
1387	saddr = (struct sockaddr *) (np + 1);
1388	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
1389		goto out;
1390	if (saddr->sa_len > argp->ex_addrlen)
1391		saddr->sa_len = argp->ex_addrlen;
1392	if (argp->ex_masklen) {
1393		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
1394		error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
1395		if (error)
1396			goto out;
1397		if (smask->sa_len > argp->ex_masklen)
1398			smask->sa_len = argp->ex_masklen;
1399	}
1400	i = saddr->sa_family;
1401	if ((rnh = nep->ne_rtable[i]) == 0) {
1402		/*
1403		 * Seems silly to initialize every AF when most are not used,
1404		 * do so on demand here
1405		 */
1406		for (dom = domains; dom; dom = dom->dom_next)
1407			if (dom->dom_family == i && dom->dom_rtattach) {
1408				dom->dom_rtattach((void **) &nep->ne_rtable[i],
1409				    dom->dom_rtoffset);
1410				break;
1411			}
1412		if ((rnh = nep->ne_rtable[i]) == 0) {
1413			error = ENOBUFS;
1414			goto out;
1415		}
1416	}
1417	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
1418	    np->netc_rnodes);
1419	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
1420		error = EPERM;
1421		goto out;
1422	}
1423	np->netc_exflags = argp->ex_flags;
1424	np->netc_anon = argp->ex_anon;
1425	np->netc_anon.cr_ref = 1;
1426	return (0);
1427out:
1428	free(np, M_NETADDR);
1429	return (error);
1430}
1431
1432/* ARGSUSED */
1433static int
1434vfs_free_netcred(struct radix_node *rn, void *w)
1435{
1436	register struct radix_node_head *rnh = (struct radix_node_head *) w;
1437
1438	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
1439	free((caddr_t) rn, M_NETADDR);
1440	return (0);
1441}
1442
1443/*
1444 * Free the net address hash lists that are hanging off the mount points.
1445 */
1446static void
1447vfs_free_addrlist(struct netexport *nep)
1448{
1449	register int i;
1450	register struct radix_node_head *rnh;
1451
1452	for (i = 0; i <= AF_MAX; i++)
1453		if ((rnh = nep->ne_rtable[i])) {
1454			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
1455			    (caddr_t) rnh);
1456			free((caddr_t) rnh, M_RTABLE);
1457			nep->ne_rtable[i] = 0;
1458		}
1459}
1460
1461int
1462vfs_export(mp, nep, argp)
1463	struct mount *mp;
1464	struct netexport *nep;
1465	struct export_args *argp;
1466{
1467	int error;
1468
1469	if (argp->ex_flags & MNT_DELEXPORT) {
1470		vfs_free_addrlist(nep);
1471		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1472	}
1473	if (argp->ex_flags & MNT_EXPORTED) {
1474		if ((error = vfs_hang_addrlist(mp, nep, argp)))
1475			return (error);
1476		mp->mnt_flag |= MNT_EXPORTED;
1477	}
1478	return (0);
1479}
1480
1481struct netcred *
1482vfs_export_lookup(mp, nep, nam)
1483	register struct mount *mp;
1484	struct netexport *nep;
1485	struct mbuf *nam;
1486{
1487	register struct netcred *np;
1488	register struct radix_node_head *rnh;
1489	struct sockaddr *saddr;
1490
1491	np = NULL;
1492	if (mp->mnt_flag & MNT_EXPORTED) {
1493		/*
1494		 * Lookup in the export list first.
1495		 */
1496		if (nam != NULL) {
1497			saddr = mtod(nam, struct sockaddr *);
1498			rnh = nep->ne_rtable[saddr->sa_family];
1499			if (rnh != NULL) {
1500				np = (struct netcred *)
1501				    (*rnh->rnh_matchaddr) ((caddr_t) saddr,
1502				    rnh);
1503				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1504					np = NULL;
1505			}
1506		}
1507		/*
1508		 * If no address match, use the default if it exists.
1509		 */
1510		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1511			np = &nep->ne_defexported;
1512	}
1513	return (np);
1514}
1515
1516
1517/*
1518 * perform msync on all vnodes under a mount point
1519 * the mount point must be locked.
1520 */
1521void
1522vfs_msync(struct mount *mp, int flags) {
1523	struct vnode *vp, *nvp;
1524loop:
1525	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
1526
1527		if (vp->v_mount != mp)
1528			goto loop;
1529		nvp = vp->v_mntvnodes.le_next;
1530		if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
1531			continue;
1532		if (vp->v_object &&
1533		   (((vm_object_t) vp->v_object)->flags & OBJ_MIGHTBEDIRTY)) {
1534			vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
1535		}
1536	}
1537}
1538