Deleted Added
full compact
vfs_export.c (17605) vfs_export.c (17761)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 22 unchanged lines hidden (view full) ---

31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 22 unchanged lines hidden (view full) ---

31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
39 * $Id: vfs_subr.c,v 1.57 1996/07/30 18:00:25 bde Exp $
39 * $Id: vfs_subr.c,v 1.58 1996/08/15 06:45:01 dyson Exp $
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45#include "opt_ddb.h"
46
47#include <sys/param.h>

--- 12 unchanged lines hidden (view full) ---

60#include <sys/malloc.h>
61#include <sys/domain.h>
62#include <sys/mbuf.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/vm_object.h>
67#include <vm/vm_extern.h>
40 */
41
42/*
43 * External virtual filesystem routines
44 */
45#include "opt_ddb.h"
46
47#include <sys/param.h>

--- 12 unchanged lines hidden (view full) ---

60#include <sys/malloc.h>
61#include <sys/domain.h>
62#include <sys/mbuf.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/vm_object.h>
67#include <vm/vm_extern.h>
68#include <vm/vm_pager.h>
69#include <vm/vnode_pager.h>
68#include <sys/sysctl.h>
69
70#include <miscfs/specfs/specdev.h>
71
72#ifdef DDB
73extern void printlockedvnodes __P((void));
74#endif
75extern void vclean __P((struct vnode *vp, int flags));

--- 396 unchanged lines hidden (view full) ---

472 vm_object_t object;
473
474 if (flags & V_SAVE) {
475 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
476 return (error);
477 if (vp->v_dirtyblkhd.lh_first != NULL)
478 panic("vinvalbuf: dirty bufs");
479 }
70#include <sys/sysctl.h>
71
72#include <miscfs/specfs/specdev.h>
73
74#ifdef DDB
75extern void printlockedvnodes __P((void));
76#endif
77extern void vclean __P((struct vnode *vp, int flags));

--- 396 unchanged lines hidden (view full) ---

474 vm_object_t object;
475
476 if (flags & V_SAVE) {
477 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
478 return (error);
479 if (vp->v_dirtyblkhd.lh_first != NULL)
480 panic("vinvalbuf: dirty bufs");
481 }
482
483 s = splbio();
480 for (;;) {
481 if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
482 while (blist && blist->b_lblkno < 0)
483 blist = blist->b_vnbufs.le_next;
484 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
485 (flags & V_SAVEMETA))
486 while (blist && blist->b_lblkno < 0)
487 blist = blist->b_vnbufs.le_next;
488 if (!blist)
489 break;
490
491 for (bp = blist; bp; bp = nbp) {
492 nbp = bp->b_vnbufs.le_next;
493 if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
494 continue;
484 for (;;) {
485 if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
486 while (blist && blist->b_lblkno < 0)
487 blist = blist->b_vnbufs.le_next;
488 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
489 (flags & V_SAVEMETA))
490 while (blist && blist->b_lblkno < 0)
491 blist = blist->b_vnbufs.le_next;
492 if (!blist)
493 break;
494
495 for (bp = blist; bp; bp = nbp) {
496 nbp = bp->b_vnbufs.le_next;
497 if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
498 continue;
495 s = splbio();
496 if (bp->b_flags & B_BUSY) {
497 bp->b_flags |= B_WANTED;
498 error = tsleep((caddr_t) bp,
499 slpflag | (PRIBIO + 1), "vinvalbuf",
500 slptimeo);
501 splx(s);
502 if (error)
503 return (error);
504 break;
505 }
506 bremfree(bp);
507 bp->b_flags |= B_BUSY;
499 if (bp->b_flags & B_BUSY) {
500 bp->b_flags |= B_WANTED;
501 error = tsleep((caddr_t) bp,
502 slpflag | (PRIBIO + 1), "vinvalbuf",
503 slptimeo);
504 splx(s);
505 if (error)
506 return (error);
507 break;
508 }
509 bremfree(bp);
510 bp->b_flags |= B_BUSY;
508 splx(s);
509 /*
510 * XXX Since there are no node locks for NFS, I
511 * believe there is a slight chance that a delayed
512 * write will occur while sleeping just above, so
513 * check for it.
514 */
515 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
516 (void) VOP_BWRITE(bp);
517 break;
518 }
519 bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
520 brelse(bp);
521 }
522 }
511 /*
512 * XXX Since there are no node locks for NFS, I
513 * believe there is a slight chance that a delayed
514 * write will occur while sleeping just above, so
515 * check for it.
516 */
517 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
518 (void) VOP_BWRITE(bp);
519 break;
520 }
521 bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
522 brelse(bp);
523 }
524 }
525 splx(s);
523
524 s = splbio();
525 while (vp->v_numoutput > 0) {
526 vp->v_flag |= VBWAIT;
527 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
528 }
529 splx(s);
530

--- 102 unchanged lines hidden (view full) ---

633 * Used to assign file specific control information
634 * (indirect blocks) to the vnode to which they belong.
635 */
636void
637reassignbuf(bp, newvp)
638 register struct buf *bp;
639 register struct vnode *newvp;
640{
526
527 s = splbio();
528 while (vp->v_numoutput > 0) {
529 vp->v_flag |= VBWAIT;
530 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
531 }
532 splx(s);
533

--- 102 unchanged lines hidden (view full) ---

636 * Used to assign file specific control information
637 * (indirect blocks) to the vnode to which they belong.
638 */
639void
640reassignbuf(bp, newvp)
641 register struct buf *bp;
642 register struct vnode *newvp;
643{
641 register struct buflists *listheadp;
642 int s;
643
644 if (newvp == NULL) {
645 printf("reassignbuf: NULL");
646 return;
647 }
648
649 s = splbio();

--- 15 unchanged lines hidden (view full) ---

665 } else {
666 while (tbp->b_vnbufs.le_next &&
667 (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
668 tbp = tbp->b_vnbufs.le_next;
669 }
670 LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
671 }
672 } else {
644 int s;
645
646 if (newvp == NULL) {
647 printf("reassignbuf: NULL");
648 return;
649 }
650
651 s = splbio();

--- 15 unchanged lines hidden (view full) ---

667 } else {
668 while (tbp->b_vnbufs.le_next &&
669 (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
670 tbp = tbp->b_vnbufs.le_next;
671 }
672 LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
673 }
674 } else {
673 listheadp = &newvp->v_cleanblkhd;
674 bufinsvn(bp, listheadp);
675 bufinsvn(bp, &newvp->v_cleanblkhd);
675 }
676 splx(s);
677}
678
679#ifndef DEVFS_ROOT
680/*
681 * Create a vnode for a block device.
682 * Used for root filesystem, argdev, and swap areas.

--- 57 unchanged lines hidden (view full) ---

740 if (vp->v_usecount == 0) {
741 vgone(vp);
742 goto loop;
743 }
744 if (vget(vp, 1))
745 goto loop;
746 break;
747 }
676 }
677 splx(s);
678}
679
680#ifndef DEVFS_ROOT
681/*
682 * Create a vnode for a block device.
683 * Used for root filesystem, argdev, and swap areas.

--- 57 unchanged lines hidden (view full) ---

741 if (vp->v_usecount == 0) {
742 vgone(vp);
743 goto loop;
744 }
745 if (vget(vp, 1))
746 goto loop;
747 break;
748 }
749
748 if (vp == NULL || vp->v_tag != VT_NON) {
749 MALLOC(nvp->v_specinfo, struct specinfo *,
750 sizeof(struct specinfo), M_VNODE, M_WAITOK);
751 nvp->v_rdev = nvp_rdev;
752 nvp->v_hashchain = vpp;
753 nvp->v_specnext = *vpp;
754 nvp->v_specflags = 0;
755 *vpp = nvp;

--- 43 unchanged lines hidden (view full) ---

799 (void) tsleep((caddr_t) vp, PINOD, "vget", 0);
800 return (1);
801 }
802 if (vp->v_usecount == 0) {
803 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
804 freevnodes--;
805 }
806 vp->v_usecount++;
750 if (vp == NULL || vp->v_tag != VT_NON) {
751 MALLOC(nvp->v_specinfo, struct specinfo *,
752 sizeof(struct specinfo), M_VNODE, M_WAITOK);
753 nvp->v_rdev = nvp_rdev;
754 nvp->v_hashchain = vpp;
755 nvp->v_specnext = *vpp;
756 nvp->v_specflags = 0;
757 *vpp = nvp;

--- 43 unchanged lines hidden (view full) ---

801 (void) tsleep((caddr_t) vp, PINOD, "vget", 0);
802 return (1);
803 }
804 if (vp->v_usecount == 0) {
805 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
806 freevnodes--;
807 }
808 vp->v_usecount++;
809
810 /*
811 * Create the VM object, if needed
812 */
813 if ((vp->v_type == VREG) &&
814 ((vp->v_object == NULL) ||
815 (vp->v_object->flags & OBJ_VFS_REF) == 0)) {
816 vfs_object_create(vp, curproc, curproc->p_ucred, 0);
817 }
807 if (lockflag)
808 VOP_LOCK(vp);
818 if (lockflag)
819 VOP_LOCK(vp);
820
809 return (0);
810}
811
812/*
813 * Vnode reference, just increment the count
814 */
815void
816vref(vp)
817 struct vnode *vp;
818{
821 return (0);
822}
823
824/*
825 * Vnode reference, just increment the count
826 */
827void
828vref(vp)
829 struct vnode *vp;
830{
819
820 if (vp->v_usecount <= 0)
821 panic("vref used where vget required");
831 if (vp->v_usecount <= 0)
832 panic("vref used where vget required");
833
834 if ((vp->v_type == VREG) &&
835 ((vp->v_object == NULL) ||
836 ((vp->v_object->flags & OBJ_VFS_REF) == 0)) ) {
837 /*
838 * We need to lock to VP during the time that
839 * the object is created. This is necessary to
840 * keep the system from re-entrantly doing it
841 * multiple times.
842 */
843 vfs_object_create(vp, curproc, curproc->p_ucred, 0);
844 }
845
822 vp->v_usecount++;
823}
824
825/*
826 * vput(), just unlock and vrele()
827 */
828void
829vput(vp)
830 register struct vnode *vp;
831{
846 vp->v_usecount++;
847}
848
849/*
850 * vput(), just unlock and vrele()
851 */
852void
853vput(vp)
854 register struct vnode *vp;
855{
832
833 VOP_UNLOCK(vp);
834 vrele(vp);
835}
836
837/*
838 * Vnode release.
839 * If count drops to zero, call inactive routine and return to freelist.
840 */
841void
842vrele(vp)
843 register struct vnode *vp;
844{
845
846#ifdef DIAGNOSTIC
847 if (vp == NULL)
848 panic("vrele: null vp");
849#endif
856 VOP_UNLOCK(vp);
857 vrele(vp);
858}
859
860/*
861 * Vnode release.
862 * If count drops to zero, call inactive routine and return to freelist.
863 */
864void
865vrele(vp)
866 register struct vnode *vp;
867{
868
869#ifdef DIAGNOSTIC
870 if (vp == NULL)
871 panic("vrele: null vp");
872#endif
873
850 vp->v_usecount--;
874 vp->v_usecount--;
875
876 if ((vp->v_usecount == 1) &&
877 vp->v_object &&
878 (vp->v_object->flags & OBJ_VFS_REF)) {
879 vp->v_object->flags &= ~OBJ_VFS_REF;
880 vm_object_deallocate(vp->v_object);
881 return;
882 }
883
851 if (vp->v_usecount > 0)
852 return;
884 if (vp->v_usecount > 0)
885 return;
853 if (vp->v_usecount < 0 /* || vp->v_writecount < 0 */ ) {
886
887 if (vp->v_usecount < 0) {
854#ifdef DIAGNOSTIC
855 vprint("vrele: negative ref count", vp);
856#endif
857 panic("vrele: negative reference cnt");
858 }
859 if (vp->v_flag & VAGE) {
860 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
861 vp->v_flag &= ~VAGE;

--- 77 unchanged lines hidden (view full) ---

939 continue;
940 /*
941 * If WRITECLOSE is set, only flush out regular file vnodes
942 * open for writing.
943 */
944 if ((flags & WRITECLOSE) &&
945 (vp->v_writecount == 0 || vp->v_type != VREG))
946 continue;
888#ifdef DIAGNOSTIC
889 vprint("vrele: negative ref count", vp);
890#endif
891 panic("vrele: negative reference cnt");
892 }
893 if (vp->v_flag & VAGE) {
894 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
895 vp->v_flag &= ~VAGE;

--- 77 unchanged lines hidden (view full) ---

973 continue;
974 /*
975 * If WRITECLOSE is set, only flush out regular file vnodes
976 * open for writing.
977 */
978 if ((flags & WRITECLOSE) &&
979 (vp->v_writecount == 0 || vp->v_type != VREG))
980 continue;
981
982 if ((vp->v_usecount == 1) && vp->v_object) {
983 pager_cache(vp->v_object, FALSE);
984 }
985
947 /*
948 * With v_usecount == 0, all we need to do is clear out the
949 * vnode data structures and we are done.
950 */
951 if (vp->v_usecount == 0) {
952 vgone(vp);
953 continue;
954 }

--- 586 unchanged lines hidden (view full) ---

1541 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
1542
1543 if (vp->v_mount != mp)
1544 goto loop;
1545 nvp = vp->v_mntvnodes.le_next;
1546 if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
1547 continue;
1548 if (vp->v_object &&
986 /*
987 * With v_usecount == 0, all we need to do is clear out the
988 * vnode data structures and we are done.
989 */
990 if (vp->v_usecount == 0) {
991 vgone(vp);
992 continue;
993 }

--- 586 unchanged lines hidden (view full) ---

1580 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
1581
1582 if (vp->v_mount != mp)
1583 goto loop;
1584 nvp = vp->v_mntvnodes.le_next;
1585 if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
1586 continue;
1587 if (vp->v_object &&
1549 (((vm_object_t) vp->v_object)->flags & OBJ_MIGHTBEDIRTY)) {
1588 (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
1550 vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
1551 }
1552 }
1553}
1589 vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
1590 }
1591 }
1592}
1593
1594/*
1595 * Create the VM object needed for VMIO and mmap support. This
1596 * is done for all VREG files in the system. Some filesystems might
1597 * afford the additional metadata buffering capability of the
1598 * VMIO code by making the device node be VMIO mode also.
1599 */
1600int
1601vfs_object_create(vp, p, cred, waslocked)
1602 struct vnode *vp;
1603 struct proc *p;
1604 struct ucred *cred;
1605 int waslocked;
1606{
1607 struct vattr vat;
1608 vm_object_t object;
1609 int error = 0;
1610
1611retry:
1612 if ((object = vp->v_object) == NULL) {
1613 if (vp->v_type == VREG) {
1614 if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
1615 goto retn;
1616 (void) vnode_pager_alloc(vp,
1617 OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
1618 } else {
1619 /*
1620 * This simply allocates the biggest object possible
1621 * for a VBLK vnode. This should be fixed, but doesn't
1622 * cause any problems (yet).
1623 */
1624 (void) vnode_pager_alloc(vp, INT_MAX, 0, 0);
1625 }
1626 vp->v_object->flags |= OBJ_VFS_REF;
1627 } else {
1628 if (object->flags & OBJ_DEAD) {
1629 if (waslocked)
1630 VOP_UNLOCK(vp);
1631 tsleep(object, PVM, "vodead", 0);
1632 if (waslocked)
1633 VOP_LOCK(vp);
1634 goto retry;
1635 }
1636 if ((object->flags & OBJ_VFS_REF) == 0) {
1637 object->flags |= OBJ_VFS_REF;
1638 vm_object_reference(object);
1639 }
1640 }
1641 if (vp->v_object)
1642 vp->v_flag |= VVMIO;
1643
1644retn:
1645 return error;
1646}