Deleted Added
full compact
vfs_export.c (71411) vfs_export.c (71576)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 22 unchanged lines hidden (view full) ---

31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 22 unchanged lines hidden (view full) ---

31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
39 * $FreeBSD: head/sys/kern/vfs_export.c 71411 2001-01-23 04:15:19Z rwatson $
39 * $FreeBSD: head/sys/kern/vfs_export.c 71576 2001-01-24 12:35:55Z jasone $
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45#include "opt_ddb.h"
46#include "opt_ffs.h"
47

--- 99 unchanged lines hidden (view full) ---

147
148/* List of mounted filesystems. */
149struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
150
151/* For any iteration/modification of mountlist */
152struct mtx mountlist_mtx;
153
154/* For any iteration/modification of mnt_vnodelist */
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45#include "opt_ddb.h"
46#include "opt_ffs.h"
47

--- 99 unchanged lines hidden (view full) ---

147
148/* List of mounted filesystems. */
149struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
150
151/* For any iteration/modification of mountlist */
152struct mtx mountlist_mtx;
153
154/* For any iteration/modification of mnt_vnodelist */
155struct simplelock mntvnode_slock;
155struct mtx mntvnode_mtx;
156
156/*
157 * Cache for the mount type id assigned to NFS. This is used for
158 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
159 */
160int nfs_mount_type = -1;
161
157/*
158 * Cache for the mount type id assigned to NFS. This is used for
159 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
160 */
161int nfs_mount_type = -1;
162
162#ifndef NULL_SIMPLELOCKS
163/* To keep more than one thread at a time from running vfs_getnewfsid */
163/* To keep more than one thread at a time from running vfs_getnewfsid */
164static struct simplelock mntid_slock;
164static struct mtx mntid_mtx;
165
166/* For any iteration/modification of vnode_free_list */
165
166/* For any iteration/modification of vnode_free_list */
167static struct simplelock vnode_free_list_slock;
167static struct mtx vnode_free_list_mtx;
168
169/*
170 * For any iteration/modification of dev->si_hlist (linked through
171 * v_specnext)
172 */
168
169/*
170 * For any iteration/modification of dev->si_hlist (linked through
171 * v_specnext)
172 */
173static struct simplelock spechash_slock;
174#endif
173static struct mtx spechash_mtx;
175
176/* Publicly exported FS */
177struct nfs_public nfs_pub;
178
179/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
180static vm_zone_t vnode_zone;
181
182/* Set to 1 to print out reclaim of active vnodes */

--- 62 unchanged lines hidden (view full) ---

245 * Initialize the vnode management data structures.
246 */
247static void
248vntblinit(void *dummy __unused)
249{
250
251 desiredvnodes = maxproc + cnt.v_page_count / 4;
252 mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
174
175/* Publicly exported FS */
176struct nfs_public nfs_pub;
177
178/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
179static vm_zone_t vnode_zone;
180
181/* Set to 1 to print out reclaim of active vnodes */

--- 62 unchanged lines hidden (view full) ---

244 * Initialize the vnode management data structures.
245 */
246static void
247vntblinit(void *dummy __unused)
248{
249
250 desiredvnodes = maxproc + cnt.v_page_count / 4;
251 mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
253 simple_lock_init(&mntvnode_slock);
254 simple_lock_init(&mntid_slock);
255 simple_lock_init(&spechash_slock);
252 mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF);
253 mtx_init(&mntid_mtx, "mntid", MTX_DEF);
254 mtx_init(&spechash_mtx, "spechash", MTX_DEF);
256 TAILQ_INIT(&vnode_free_list);
255 TAILQ_INIT(&vnode_free_list);
257 simple_lock_init(&vnode_free_list_slock);
256 mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
258 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
259 /*
260 * Initialize the filesystem syncer.
261 */
262 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
263 &syncer_mask);
264 syncer_maxdelay = syncer_mask + 1;
265}

--- 152 unchanged lines hidden (view full) ---

418void
419vfs_getnewfsid(mp)
420 struct mount *mp;
421{
422 static u_int16_t mntid_base;
423 fsid_t tfsid;
424 int mtype;
425
257 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
258 /*
259 * Initialize the filesystem syncer.
260 */
261 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
262 &syncer_mask);
263 syncer_maxdelay = syncer_mask + 1;
264}

--- 152 unchanged lines hidden (view full) ---

417void
418vfs_getnewfsid(mp)
419 struct mount *mp;
420{
421 static u_int16_t mntid_base;
422 fsid_t tfsid;
423 int mtype;
424
426 simple_lock(&mntid_slock);
425 mtx_enter(&mntid_mtx, MTX_DEF);
427 mtype = mp->mnt_vfc->vfc_typenum;
428 tfsid.val[1] = mtype;
429 mtype = (mtype & 0xFF) << 24;
430 for (;;) {
431 tfsid.val[0] = makeudev(255,
432 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
433 mntid_base++;
434 if (vfs_getvfs(&tfsid) == NULL)
435 break;
436 }
437 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
438 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
426 mtype = mp->mnt_vfc->vfc_typenum;
427 tfsid.val[1] = mtype;
428 mtype = (mtype & 0xFF) << 24;
429 for (;;) {
430 tfsid.val[0] = makeudev(255,
431 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
432 mntid_base++;
433 if (vfs_getvfs(&tfsid) == NULL)
434 break;
435 }
436 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
437 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
439 simple_unlock(&mntid_slock);
438 mtx_exit(&mntid_mtx, MTX_DEF);
440}
441
442/*
443 * Knob to control the precision of file timestamps:
444 *
445 * 0 = seconds only; nanoseconds zeroed.
446 * 1 = seconds and nanoseconds, accurate within 1/HZ.
447 * 2 = seconds and nanoseconds, truncated to microseconds.

--- 86 unchanged lines hidden (view full) ---

534 /*
535 * We take the least recently used vnode from the freelist
536 * if we can get it and it has no cached pages, and no
537 * namecache entries are relative to it.
538 * Otherwise we allocate a new vnode
539 */
540
541 s = splbio();
439}
440
441/*
442 * Knob to control the precision of file timestamps:
443 *
444 * 0 = seconds only; nanoseconds zeroed.
445 * 1 = seconds and nanoseconds, accurate within 1/HZ.
446 * 2 = seconds and nanoseconds, truncated to microseconds.

--- 86 unchanged lines hidden (view full) ---

533 /*
534 * We take the least recently used vnode from the freelist
535 * if we can get it and it has no cached pages, and no
536 * namecache entries are relative to it.
537 * Otherwise we allocate a new vnode
538 */
539
540 s = splbio();
542 simple_lock(&vnode_free_list_slock);
541 mtx_enter(&vnode_free_list_mtx, MTX_DEF);
543
544 if (wantfreevnodes && freevnodes < wantfreevnodes) {
545 vp = NULL;
546 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
547 /*
548 * XXX: this is only here to be backwards compatible
549 */
550 vp = NULL;

--- 23 unchanged lines hidden (view full) ---

574 mtx_exit(&vp->v_interlock, MTX_DEF);
575 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
576 vp = NULL;
577 }
578 if (vp) {
579 vp->v_flag |= VDOOMED;
580 vp->v_flag &= ~VFREE;
581 freevnodes--;
542
543 if (wantfreevnodes && freevnodes < wantfreevnodes) {
544 vp = NULL;
545 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
546 /*
547 * XXX: this is only here to be backwards compatible
548 */
549 vp = NULL;

--- 23 unchanged lines hidden (view full) ---

573 mtx_exit(&vp->v_interlock, MTX_DEF);
574 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
575 vp = NULL;
576 }
577 if (vp) {
578 vp->v_flag |= VDOOMED;
579 vp->v_flag &= ~VFREE;
580 freevnodes--;
582 simple_unlock(&vnode_free_list_slock);
581 mtx_exit(&vnode_free_list_mtx, MTX_DEF);
583 cache_purge(vp);
584 vp->v_lease = NULL;
585 if (vp->v_type != VBAD) {
586 vgonel(vp, p);
587 } else {
588 mtx_exit(&vp->v_interlock, MTX_DEF);
589 }
590 vn_finished_write(vnmp);

--- 14 unchanged lines hidden (view full) ---

605#endif
606 vp->v_flag = 0;
607 vp->v_lastw = 0;
608 vp->v_lasta = 0;
609 vp->v_cstart = 0;
610 vp->v_clen = 0;
611 vp->v_socket = 0;
612 } else {
582 cache_purge(vp);
583 vp->v_lease = NULL;
584 if (vp->v_type != VBAD) {
585 vgonel(vp, p);
586 } else {
587 mtx_exit(&vp->v_interlock, MTX_DEF);
588 }
589 vn_finished_write(vnmp);

--- 14 unchanged lines hidden (view full) ---

604#endif
605 vp->v_flag = 0;
606 vp->v_lastw = 0;
607 vp->v_lasta = 0;
608 vp->v_cstart = 0;
609 vp->v_clen = 0;
610 vp->v_socket = 0;
611 } else {
613 simple_unlock(&vnode_free_list_slock);
612 mtx_exit(&vnode_free_list_mtx, MTX_DEF);
614 vp = (struct vnode *) zalloc(vnode_zone);
615 bzero((char *) vp, sizeof *vp);
616 mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
617 vp->v_dd = vp;
613 vp = (struct vnode *) zalloc(vnode_zone);
614 bzero((char *) vp, sizeof *vp);
615 mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
616 vp->v_dd = vp;
617 mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
618 cache_purge(vp);
619 LIST_INIT(&vp->v_cache_src);
620 TAILQ_INIT(&vp->v_cache_dst);
621 numvnodes++;
622 }
623
624 TAILQ_INIT(&vp->v_cleanblkhd);
625 TAILQ_INIT(&vp->v_dirtyblkhd);

--- 15 unchanged lines hidden (view full) ---

641 * Move a vnode from one mount queue to another.
642 */
643static void
644insmntque(vp, mp)
645 register struct vnode *vp;
646 register struct mount *mp;
647{
648
618 cache_purge(vp);
619 LIST_INIT(&vp->v_cache_src);
620 TAILQ_INIT(&vp->v_cache_dst);
621 numvnodes++;
622 }
623
624 TAILQ_INIT(&vp->v_cleanblkhd);
625 TAILQ_INIT(&vp->v_dirtyblkhd);

--- 15 unchanged lines hidden (view full) ---

641 * Move a vnode from one mount queue to another.
642 */
643static void
644insmntque(vp, mp)
645 register struct vnode *vp;
646 register struct mount *mp;
647{
648
649 simple_lock(&mntvnode_slock);
649 mtx_enter(&mntvnode_mtx, MTX_DEF);
650 /*
651 * Delete from old mount point vnode list, if on one.
652 */
653 if (vp->v_mount != NULL)
654 LIST_REMOVE(vp, v_mntvnodes);
655 /*
656 * Insert into list of vnodes for the new mount point, if available.
657 */
658 if ((vp->v_mount = mp) == NULL) {
650 /*
651 * Delete from old mount point vnode list, if on one.
652 */
653 if (vp->v_mount != NULL)
654 LIST_REMOVE(vp, v_mntvnodes);
655 /*
656 * Insert into list of vnodes for the new mount point, if available.
657 */
658 if ((vp->v_mount = mp) == NULL) {
659 simple_unlock(&mntvnode_slock);
659 mtx_exit(&mntvnode_mtx, MTX_DEF);
660 return;
661 }
662 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
660 return;
661 }
662 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
663 simple_unlock(&mntvnode_slock);
663 mtx_exit(&mntvnode_mtx, MTX_DEF);
664}
665
666/*
667 * Update outstanding I/O count and do wakeup if requested.
668 */
669void
670vwakeup(bp)
671 register struct buf *bp;

--- 725 unchanged lines hidden (view full) ---

1397static void
1398addalias(nvp, dev)
1399 struct vnode *nvp;
1400 dev_t dev;
1401{
1402
1403 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1404 nvp->v_rdev = dev;
664}
665
666/*
667 * Update outstanding I/O count and do wakeup if requested.
668 */
669void
670vwakeup(bp)
671 register struct buf *bp;

--- 725 unchanged lines hidden (view full) ---

1397static void
1398addalias(nvp, dev)
1399 struct vnode *nvp;
1400 dev_t dev;
1401{
1402
1403 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1404 nvp->v_rdev = dev;
1405 simple_lock(&spechash_slock);
1405 mtx_enter(&spechash_mtx, MTX_DEF);
1406 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1406 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1407 simple_unlock(&spechash_slock);
1407 mtx_exit(&spechash_mtx, MTX_DEF);
1408}
1409
1410/*
1411 * Grab a particular vnode from the free list, increment its
1412 * reference count and lock it. The vnode lock bit is set if the
1413 * vnode is being eliminated in vgone. The process is awakened
1414 * when the transition is completed, and an error returned to
1415 * indicate that the vnode is no longer usable (possibly having

--- 207 unchanged lines hidden (view full) ---

1623 struct mount *mp;
1624 struct vnode *skipvp;
1625 int flags;
1626{
1627 struct proc *p = curproc; /* XXX */
1628 struct vnode *vp, *nvp;
1629 int busy = 0;
1630
1408}
1409
1410/*
1411 * Grab a particular vnode from the free list, increment its
1412 * reference count and lock it. The vnode lock bit is set if the
1413 * vnode is being eliminated in vgone. The process is awakened
1414 * when the transition is completed, and an error returned to
1415 * indicate that the vnode is no longer usable (possibly having

--- 207 unchanged lines hidden (view full) ---

1623 struct mount *mp;
1624 struct vnode *skipvp;
1625 int flags;
1626{
1627 struct proc *p = curproc; /* XXX */
1628 struct vnode *vp, *nvp;
1629 int busy = 0;
1630
1631 simple_lock(&mntvnode_slock);
1631 mtx_enter(&mntvnode_mtx, MTX_DEF);
1632loop:
1633 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1634 /*
1635 * Make sure this vnode wasn't reclaimed in getnewvnode().
1636 * Start over if it has (it won't be on the list anymore).
1637 */
1638 if (vp->v_mount != mp)
1639 goto loop;

--- 22 unchanged lines hidden (view full) ---

1662 continue;
1663 }
1664
1665 /*
1666 * With v_usecount == 0, all we need to do is clear out the
1667 * vnode data structures and we are done.
1668 */
1669 if (vp->v_usecount == 0) {
1632loop:
1633 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1634 /*
1635 * Make sure this vnode wasn't reclaimed in getnewvnode().
1636 * Start over if it has (it won't be on the list anymore).
1637 */
1638 if (vp->v_mount != mp)
1639 goto loop;

--- 22 unchanged lines hidden (view full) ---

1662 continue;
1663 }
1664
1665 /*
1666 * With v_usecount == 0, all we need to do is clear out the
1667 * vnode data structures and we are done.
1668 */
1669 if (vp->v_usecount == 0) {
1670 simple_unlock(&mntvnode_slock);
1670 mtx_exit(&mntvnode_mtx, MTX_DEF);
1671 vgonel(vp, p);
1671 vgonel(vp, p);
1672 simple_lock(&mntvnode_slock);
1672 mtx_enter(&mntvnode_mtx, MTX_DEF);
1673 continue;
1674 }
1675
1676 /*
1677 * If FORCECLOSE is set, forcibly close the vnode. For block
1678 * or character devices, revert to an anonymous device. For
1679 * all other files, just kill them.
1680 */
1681 if (flags & FORCECLOSE) {
1673 continue;
1674 }
1675
1676 /*
1677 * If FORCECLOSE is set, forcibly close the vnode. For block
1678 * or character devices, revert to an anonymous device. For
1679 * all other files, just kill them.
1680 */
1681 if (flags & FORCECLOSE) {
1682 simple_unlock(&mntvnode_slock);
1682 mtx_exit(&mntvnode_mtx, MTX_DEF);
1683 if (vp->v_type != VCHR) {
1684 vgonel(vp, p);
1685 } else {
1686 vclean(vp, 0, p);
1687 vp->v_op = spec_vnodeop_p;
1688 insmntque(vp, (struct mount *) 0);
1689 }
1683 if (vp->v_type != VCHR) {
1684 vgonel(vp, p);
1685 } else {
1686 vclean(vp, 0, p);
1687 vp->v_op = spec_vnodeop_p;
1688 insmntque(vp, (struct mount *) 0);
1689 }
1690 simple_lock(&mntvnode_slock);
1690 mtx_enter(&mntvnode_mtx, MTX_DEF);
1691 continue;
1692 }
1693#ifdef DIAGNOSTIC
1694 if (busyprt)
1695 vprint("vflush: busy vnode", vp);
1696#endif
1697 mtx_exit(&vp->v_interlock, MTX_DEF);
1698 busy++;
1699 }
1691 continue;
1692 }
1693#ifdef DIAGNOSTIC
1694 if (busyprt)
1695 vprint("vflush: busy vnode", vp);
1696#endif
1697 mtx_exit(&vp->v_interlock, MTX_DEF);
1698 busy++;
1699 }
1700 simple_unlock(&mntvnode_slock);
1700 mtx_exit(&mntvnode_mtx, MTX_DEF);
1701 if (busy)
1702 return (EBUSY);
1703 return (0);
1704}
1705
1706/*
1707 * Disassociate the underlying file system from a vnode.
1708 */

--- 128 unchanged lines hidden (view full) ---

1837 if (vp->v_flag & VXLOCK) {
1838 vp->v_flag |= VXWANT;
1839 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP,
1840 "vop_revokeall", 0);
1841 return (0);
1842 }
1843 dev = vp->v_rdev;
1844 for (;;) {
1701 if (busy)
1702 return (EBUSY);
1703 return (0);
1704}
1705
1706/*
1707 * Disassociate the underlying file system from a vnode.
1708 */

--- 128 unchanged lines hidden (view full) ---

1837 if (vp->v_flag & VXLOCK) {
1838 vp->v_flag |= VXWANT;
1839 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP,
1840 "vop_revokeall", 0);
1841 return (0);
1842 }
1843 dev = vp->v_rdev;
1844 for (;;) {
1845 simple_lock(&spechash_slock);
1845 mtx_enter(&spechash_mtx, MTX_DEF);
1846 vq = SLIST_FIRST(&dev->si_hlist);
1846 vq = SLIST_FIRST(&dev->si_hlist);
1847 simple_unlock(&spechash_slock);
1847 mtx_exit(&spechash_mtx, MTX_DEF);
1848 if (!vq)
1849 break;
1850 vgone(vq);
1851 }
1852 return (0);
1853}
1854
1855/*
1856 * Recycle an unused vnode to the front of the free list.
1857 * Release the passed interlock if the vnode will be recycled.
1858 */
1859int
1860vrecycle(vp, inter_lkp, p)
1861 struct vnode *vp;
1848 if (!vq)
1849 break;
1850 vgone(vq);
1851 }
1852 return (0);
1853}
1854
1855/*
1856 * Recycle an unused vnode to the front of the free list.
1857 * Release the passed interlock if the vnode will be recycled.
1858 */
1859int
1860vrecycle(vp, inter_lkp, p)
1861 struct vnode *vp;
1862 struct simplelock *inter_lkp;
1862 struct mtx *inter_lkp;
1863 struct proc *p;
1864{
1865
1866 mtx_enter(&vp->v_interlock, MTX_DEF);
1867 if (vp->v_usecount == 0) {
1868 if (inter_lkp) {
1863 struct proc *p;
1864{
1865
1866 mtx_enter(&vp->v_interlock, MTX_DEF);
1867 if (vp->v_usecount == 0) {
1868 if (inter_lkp) {
1869 simple_unlock(inter_lkp);
1869 mtx_exit(inter_lkp, MTX_DEF);
1870 }
1871 vgonel(vp, p);
1872 return (1);
1873 }
1874 mtx_exit(&vp->v_interlock, MTX_DEF);
1875 return (0);
1876}
1877

--- 43 unchanged lines hidden (view full) ---

1921 */
1922 if (vp->v_mount != NULL)
1923 insmntque(vp, (struct mount *)0);
1924 /*
1925 * If special device, remove it from special device alias list
1926 * if it is on one.
1927 */
1928 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
1870 }
1871 vgonel(vp, p);
1872 return (1);
1873 }
1874 mtx_exit(&vp->v_interlock, MTX_DEF);
1875 return (0);
1876}
1877

--- 43 unchanged lines hidden (view full) ---

1921 */
1922 if (vp->v_mount != NULL)
1923 insmntque(vp, (struct mount *)0);
1924 /*
1925 * If special device, remove it from special device alias list
1926 * if it is on one.
1927 */
1928 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
1929 simple_lock(&spechash_slock);
1929 mtx_enter(&spechash_mtx, MTX_DEF);
1930 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
1931 freedev(vp->v_rdev);
1930 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
1931 freedev(vp->v_rdev);
1932 simple_unlock(&spechash_slock);
1932 mtx_exit(&spechash_mtx, MTX_DEF);
1933 vp->v_rdev = NULL;
1934 }
1935
1936 /*
1937 * If it is on the freelist and not already at the head,
1938 * move it to the head of the list. The test of the
1939 * VDOOMED flag and the reference count of zero is because
1940 * it will be removed from the free list by getnewvnode,
1941 * but will not have its reference count incremented until
1942 * after calling vgone. If the reference count were
1943 * incremented first, vgone would (incorrectly) try to
1944 * close the previous instance of the underlying object.
1945 */
1946 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
1947 s = splbio();
1933 vp->v_rdev = NULL;
1934 }
1935
1936 /*
1937 * If it is on the freelist and not already at the head,
1938 * move it to the head of the list. The test of the
1939 * VDOOMED flag and the reference count of zero is because
1940 * it will be removed from the free list by getnewvnode,
1941 * but will not have its reference count incremented until
1942 * after calling vgone. If the reference count were
1943 * incremented first, vgone would (incorrectly) try to
1944 * close the previous instance of the underlying object.
1945 */
1946 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
1947 s = splbio();
1948 simple_lock(&vnode_free_list_slock);
1948 mtx_enter(&vnode_free_list_mtx, MTX_DEF);
1949 if (vp->v_flag & VFREE)
1950 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1951 else
1952 freevnodes++;
1953 vp->v_flag |= VFREE;
1954 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1949 if (vp->v_flag & VFREE)
1950 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1951 else
1952 freevnodes++;
1953 vp->v_flag |= VFREE;
1954 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1955 simple_unlock(&vnode_free_list_slock);
1955 mtx_exit(&vnode_free_list_mtx, MTX_DEF);
1956 splx(s);
1957 }
1958
1959 vp->v_type = VBAD;
1960 mtx_exit(&vp->v_interlock, MTX_DEF);
1961}
1962
1963/*
1964 * Lookup a vnode by device number.
1965 */
1966int
1967vfinddev(dev, type, vpp)
1968 dev_t dev;
1969 enum vtype type;
1970 struct vnode **vpp;
1971{
1972 struct vnode *vp;
1973
1956 splx(s);
1957 }
1958
1959 vp->v_type = VBAD;
1960 mtx_exit(&vp->v_interlock, MTX_DEF);
1961}
1962
1963/*
1964 * Lookup a vnode by device number.
1965 */
1966int
1967vfinddev(dev, type, vpp)
1968 dev_t dev;
1969 enum vtype type;
1970 struct vnode **vpp;
1971{
1972 struct vnode *vp;
1973
1974 simple_lock(&spechash_slock);
1974 mtx_enter(&spechash_mtx, MTX_DEF);
1975 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
1976 if (type == vp->v_type) {
1977 *vpp = vp;
1975 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
1976 if (type == vp->v_type) {
1977 *vpp = vp;
1978 simple_unlock(&spechash_slock);
1978 mtx_exit(&spechash_mtx, MTX_DEF);
1979 return (1);
1980 }
1981 }
1979 return (1);
1980 }
1981 }
1982 simple_unlock(&spechash_slock);
1982 mtx_exit(&spechash_mtx, MTX_DEF);
1983 return (0);
1984}
1985
1986/*
1987 * Calculate the total number of references to a special device.
1988 */
1989int
1990vcount(vp)
1991 struct vnode *vp;
1992{
1993 struct vnode *vq;
1994 int count;
1995
1996 count = 0;
1983 return (0);
1984}
1985
1986/*
1987 * Calculate the total number of references to a special device.
1988 */
1989int
1990vcount(vp)
1991 struct vnode *vp;
1992{
1993 struct vnode *vq;
1994 int count;
1995
1996 count = 0;
1997 simple_lock(&spechash_slock);
1997 mtx_enter(&spechash_mtx, MTX_DEF);
1998 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
1999 count += vq->v_usecount;
1998 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
1999 count += vq->v_usecount;
2000 simple_unlock(&spechash_slock);
2000 mtx_exit(&spechash_mtx, MTX_DEF);
2001 return (count);
2002}
2003
2004/*
2005 * Same as above, but using the dev_t as argument
2006 */
2007int
2008count_dev(dev)

--- 190 unchanged lines hidden (view full) ---

2199
2200 mtx_enter(&mountlist_mtx, MTX_DEF);
2201 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2202 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
2203 nmp = TAILQ_NEXT(mp, mnt_list);
2204 continue;
2205 }
2206again:
2001 return (count);
2002}
2003
2004/*
2005 * Same as above, but using the dev_t as argument
2006 */
2007int
2008count_dev(dev)

--- 190 unchanged lines hidden (view full) ---

2199
2200 mtx_enter(&mountlist_mtx, MTX_DEF);
2201 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2202 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
2203 nmp = TAILQ_NEXT(mp, mnt_list);
2204 continue;
2205 }
2206again:
2207 simple_lock(&mntvnode_slock);
2207 mtx_enter(&mntvnode_mtx, MTX_DEF);
2208 for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2209 vp != NULL;
2210 vp = nvp) {
2211 /*
2212 * Check that the vp is still associated with
2213 * this filesystem. RACE: could have been
2214 * recycled onto the same filesystem.
2215 */
2216 if (vp->v_mount != mp) {
2208 for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2209 vp != NULL;
2210 vp = nvp) {
2211 /*
2212 * Check that the vp is still associated with
2213 * this filesystem. RACE: could have been
2214 * recycled onto the same filesystem.
2215 */
2216 if (vp->v_mount != mp) {
2217 simple_unlock(&mntvnode_slock);
2217 mtx_exit(&mntvnode_mtx, MTX_DEF);
2218 goto again;
2219 }
2220 nvp = LIST_NEXT(vp, v_mntvnodes);
2218 goto again;
2219 }
2220 nvp = LIST_NEXT(vp, v_mntvnodes);
2221 simple_unlock(&mntvnode_slock);
2221 mtx_exit(&mntvnode_mtx, MTX_DEF);
2222 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
2223 (error = SYSCTL_OUT(req, vp, VNODESZ)))
2224 return (error);
2222 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
2223 (error = SYSCTL_OUT(req, vp, VNODESZ)))
2224 return (error);
2225 simple_lock(&mntvnode_slock);
2225 mtx_enter(&mntvnode_mtx, MTX_DEF);
2226 }
2226 }
2227 simple_unlock(&mntvnode_slock);
2227 mtx_exit(&mntvnode_mtx, MTX_DEF);
2228 mtx_enter(&mountlist_mtx, MTX_DEF);
2229 nmp = TAILQ_NEXT(mp, mnt_list);
2230 vfs_unbusy(mp, p);
2231 }
2232 mtx_exit(&mountlist_mtx, MTX_DEF);
2233
2234 return (0);
2235}

--- 392 unchanged lines hidden (view full) ---

2628 */
2629void
2630vfree(vp)
2631 struct vnode *vp;
2632{
2633 int s;
2634
2635 s = splbio();
2228 mtx_enter(&mountlist_mtx, MTX_DEF);
2229 nmp = TAILQ_NEXT(mp, mnt_list);
2230 vfs_unbusy(mp, p);
2231 }
2232 mtx_exit(&mountlist_mtx, MTX_DEF);
2233
2234 return (0);
2235}

--- 392 unchanged lines hidden (view full) ---

2628 */
2629void
2630vfree(vp)
2631 struct vnode *vp;
2632{
2633 int s;
2634
2635 s = splbio();
2636 simple_lock(&vnode_free_list_slock);
2636 mtx_enter(&vnode_free_list_mtx, MTX_DEF);
2637 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
2638 if (vp->v_flag & VAGE) {
2639 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2640 } else {
2641 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2642 }
2643 freevnodes++;
2637 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
2638 if (vp->v_flag & VAGE) {
2639 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2640 } else {
2641 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2642 }
2643 freevnodes++;
2644 simple_unlock(&vnode_free_list_slock);
2644 mtx_exit(&vnode_free_list_mtx, MTX_DEF);
2645 vp->v_flag &= ~VAGE;
2646 vp->v_flag |= VFREE;
2647 splx(s);
2648}
2649
2650/*
2651 * Opposite of vfree() - mark a vnode as in use.
2652 */
2653void
2654vbusy(vp)
2655 struct vnode *vp;
2656{
2657 int s;
2658
2659 s = splbio();
2645 vp->v_flag &= ~VAGE;
2646 vp->v_flag |= VFREE;
2647 splx(s);
2648}
2649
2650/*
2651 * Opposite of vfree() - mark a vnode as in use.
2652 */
2653void
2654vbusy(vp)
2655 struct vnode *vp;
2656{
2657 int s;
2658
2659 s = splbio();
2660 simple_lock(&vnode_free_list_slock);
2660 mtx_enter(&vnode_free_list_mtx, MTX_DEF);
2661 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
2662 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2663 freevnodes--;
2661 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
2662 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2663 freevnodes--;
2664 simple_unlock(&vnode_free_list_slock);
2664 mtx_exit(&vnode_free_list_mtx, MTX_DEF);
2665 vp->v_flag &= ~(VFREE|VAGE);
2666 splx(s);
2667}
2668
2669/*
2670 * Record a process's interest in events which might happen to
2671 * a vnode. Because poll uses the historic select-style interface
2672 * internally, this routine serves as both the ``check for any
2673 * pending events'' and the ``record my interest in future events''
2674 * functions. (These are done together, while the lock is held,
2675 * to avoid race conditions.)
2676 */
2677int
2678vn_pollrecord(vp, p, events)
2679 struct vnode *vp;
2680 struct proc *p;
2681 short events;
2682{
2665 vp->v_flag &= ~(VFREE|VAGE);
2666 splx(s);
2667}
2668
2669/*
2670 * Record a process's interest in events which might happen to
2671 * a vnode. Because poll uses the historic select-style interface
2672 * internally, this routine serves as both the ``check for any
2673 * pending events'' and the ``record my interest in future events''
2674 * functions. (These are done together, while the lock is held,
2675 * to avoid race conditions.)
2676 */
2677int
2678vn_pollrecord(vp, p, events)
2679 struct vnode *vp;
2680 struct proc *p;
2681 short events;
2682{
2683 simple_lock(&vp->v_pollinfo.vpi_lock);
2683 mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2684 if (vp->v_pollinfo.vpi_revents & events) {
2685 /*
2686 * This leaves events we are not interested
2687 * in available for the other process which
2688 * which presumably had requested them
2689 * (otherwise they would never have been
2690 * recorded).
2691 */
2692 events &= vp->v_pollinfo.vpi_revents;
2693 vp->v_pollinfo.vpi_revents &= ~events;
2694
2684 if (vp->v_pollinfo.vpi_revents & events) {
2685 /*
2686 * This leaves events we are not interested
2687 * in available for the other process which
2688 * which presumably had requested them
2689 * (otherwise they would never have been
2690 * recorded).
2691 */
2692 events &= vp->v_pollinfo.vpi_revents;
2693 vp->v_pollinfo.vpi_revents &= ~events;
2694
2695 simple_unlock(&vp->v_pollinfo.vpi_lock);
2695 mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2696 return events;
2697 }
2698 vp->v_pollinfo.vpi_events |= events;
2699 selrecord(p, &vp->v_pollinfo.vpi_selinfo);
2696 return events;
2697 }
2698 vp->v_pollinfo.vpi_events |= events;
2699 selrecord(p, &vp->v_pollinfo.vpi_selinfo);
2700 simple_unlock(&vp->v_pollinfo.vpi_lock);
2700 mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2701 return 0;
2702}
2703
2704/*
2705 * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
2706 * it is possible for us to miss an event due to race conditions, but
2707 * that condition is expected to be rare, so for the moment it is the
2708 * preferred interface.
2709 */
2710void
2711vn_pollevent(vp, events)
2712 struct vnode *vp;
2713 short events;
2714{
2701 return 0;
2702}
2703
2704/*
2705 * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
2706 * it is possible for us to miss an event due to race conditions, but
2707 * that condition is expected to be rare, so for the moment it is the
2708 * preferred interface.
2709 */
2710void
2711vn_pollevent(vp, events)
2712 struct vnode *vp;
2713 short events;
2714{
2715 simple_lock(&vp->v_pollinfo.vpi_lock);
2715 mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2716 if (vp->v_pollinfo.vpi_events & events) {
2717 /*
2718 * We clear vpi_events so that we don't
2719 * call selwakeup() twice if two events are
2720 * posted before the polling process(es) is
2721 * awakened. This also ensures that we take at
2722 * most one selwakeup() if the polling process
2723 * is no longer interested. However, it does
2724 * mean that only one event can be noticed at
2725 * a time. (Perhaps we should only clear those
2726 * event bits which we note?) XXX
2727 */
2728 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */
2729 vp->v_pollinfo.vpi_revents |= events;
2730 selwakeup(&vp->v_pollinfo.vpi_selinfo);
2731 }
2716 if (vp->v_pollinfo.vpi_events & events) {
2717 /*
2718 * We clear vpi_events so that we don't
2719 * call selwakeup() twice if two events are
2720 * posted before the polling process(es) is
2721 * awakened. This also ensures that we take at
2722 * most one selwakeup() if the polling process
2723 * is no longer interested. However, it does
2724 * mean that only one event can be noticed at
2725 * a time. (Perhaps we should only clear those
2726 * event bits which we note?) XXX
2727 */
2728 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */
2729 vp->v_pollinfo.vpi_revents |= events;
2730 selwakeup(&vp->v_pollinfo.vpi_selinfo);
2731 }
2732 simple_unlock(&vp->v_pollinfo.vpi_lock);
2732 mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2733}
2734
2735/*
2736 * Wake up anyone polling on vp because it is being revoked.
2737 * This depends on dead_poll() returning POLLHUP for correct
2738 * behavior.
2739 */
2740void
2741vn_pollgone(vp)
2742 struct vnode *vp;
2743{
2733}
2734
2735/*
2736 * Wake up anyone polling on vp because it is being revoked.
2737 * This depends on dead_poll() returning POLLHUP for correct
2738 * behavior.
2739 */
2740void
2741vn_pollgone(vp)
2742 struct vnode *vp;
2743{
2744 simple_lock(&vp->v_pollinfo.vpi_lock);
2744 mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2745 if (vp->v_pollinfo.vpi_events) {
2746 vp->v_pollinfo.vpi_events = 0;
2747 selwakeup(&vp->v_pollinfo.vpi_selinfo);
2748 }
2745 if (vp->v_pollinfo.vpi_events) {
2746 vp->v_pollinfo.vpi_events = 0;
2747 selwakeup(&vp->v_pollinfo.vpi_selinfo);
2748 }
2749 simple_unlock(&vp->v_pollinfo.vpi_lock);
2749 mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
2750}
2751
2752
2753
2754/*
2755 * Routine to create and manage a filesystem syncer vnode.
2756 */
2757#define sync_close ((int (*) __P((struct vop_close_args *)))nullop)

--- 375 unchanged lines hidden ---
2750}
2751
2752
2753
2754/*
2755 * Routine to create and manage a filesystem syncer vnode.
2756 */
2757#define sync_close ((int (*) __P((struct vop_close_args *)))nullop)

--- 375 unchanged lines hidden ---