Deleted Added
full compact
vfs_cluster.c (145700) vfs_cluster.c (145734)
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 18 unchanged lines hidden (view full) ---

27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 18 unchanged lines hidden (view full) ---

27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/vfs_cluster.c 145700 2005-04-30 11:26:58Z jeff $");
35__FBSDID("$FreeBSD: head/sys/kern/vfs_cluster.c 145734 2005-05-01 01:01:17Z jeff $");
36
37#include "opt_debug_cluster.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/proc.h>
43#include <sys/bio.h>

--- 86 unchanged lines hidden (view full) ---

130 * back-off on prospective read-aheads.
131 */
132 if (bp->b_flags & B_CACHE) {
133 if (!seqcount) {
134 return 0;
135 } else if ((bp->b_flags & B_RAM) == 0) {
136 return 0;
137 } else {
36
37#include "opt_debug_cluster.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/proc.h>
43#include <sys/bio.h>

--- 86 unchanged lines hidden (view full) ---

130 * back-off on prospective read-aheads.
131 */
132 if (bp->b_flags & B_CACHE) {
133 if (!seqcount) {
134 return 0;
135 } else if ((bp->b_flags & B_RAM) == 0) {
136 return 0;
137 } else {
138 int s;
139 bp->b_flags &= ~B_RAM;
138 bp->b_flags &= ~B_RAM;
140 /*
141 * We do the spl here so that there is no window
142 * between the incore and the b_usecount increment
143 * below. We opt to keep the spl out of the loop
144 * for efficiency.
145 */
146 s = splbio();
147 VI_LOCK(vp);
148 for (i = 1; i < maxra; i++) {
149 /*
150 * Stop if the buffer does not exist or it
151 * is invalid (about to go away?)
152 */
153 rbp = gbincore(&vp->v_bufobj, lblkno+i);
154 if (rbp == NULL || (rbp->b_flags & B_INVAL))
155 break;
156
157 /*
158 * Set another read-ahead mark so we know
159 * to check again.
160 */
161 if (((i % racluster) == (racluster - 1)) ||
162 (i == (maxra - 1)))
163 rbp->b_flags |= B_RAM;
164 }
165 VI_UNLOCK(vp);
139 VI_LOCK(vp);
140 for (i = 1; i < maxra; i++) {
141 /*
142 * Stop if the buffer does not exist or it
143 * is invalid (about to go away?)
144 */
145 rbp = gbincore(&vp->v_bufobj, lblkno+i);
146 if (rbp == NULL || (rbp->b_flags & B_INVAL))
147 break;
148
149 /*
150 * Set another read-ahead mark so we know
151 * to check again.
152 */
153 if (((i % racluster) == (racluster - 1)) ||
154 (i == (maxra - 1)))
155 rbp->b_flags |= B_RAM;
156 }
157 VI_UNLOCK(vp);
166 splx(s);
167 if (i >= maxra) {
168 return 0;
169 }
170 lblkno += i;
171 }
172 reqbp = bp = NULL;
173 /*
174 * If it isn't in the cache, then get a chunk from

--- 572 unchanged lines hidden (view full) ---

747int
748cluster_wbuild(vp, size, start_lbn, len)
749 struct vnode *vp;
750 long size;
751 daddr_t start_lbn;
752 int len;
753{
754 struct buf *bp, *tbp;
158 if (i >= maxra) {
159 return 0;
160 }
161 lblkno += i;
162 }
163 reqbp = bp = NULL;
164 /*
165 * If it isn't in the cache, then get a chunk from

--- 572 unchanged lines hidden (view full) ---

738int
739cluster_wbuild(vp, size, start_lbn, len)
740 struct vnode *vp;
741 long size;
742 daddr_t start_lbn;
743 int len;
744{
745 struct buf *bp, *tbp;
755 int i, j, s;
746 int i, j;
756 int totalwritten = 0;
757 int dbsize = btodb(size);
758
759 while (len > 0) {
747 int totalwritten = 0;
748 int dbsize = btodb(size);
749
750 while (len > 0) {
760 s = splbio();
761 /*
762 * If the buffer is not delayed-write (i.e. dirty), or it
763 * is delayed-write but either locked or inval, it cannot
764 * partake in the clustered write.
765 */
766 VI_LOCK(vp);
767 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
768 (tbp->b_vflags & BV_BKGRDINPROG)) {
769 VI_UNLOCK(vp);
770 ++start_lbn;
771 --len;
751 /*
752 * If the buffer is not delayed-write (i.e. dirty), or it
753 * is delayed-write but either locked or inval, it cannot
754 * partake in the clustered write.
755 */
756 VI_LOCK(vp);
757 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
758 (tbp->b_vflags & BV_BKGRDINPROG)) {
759 VI_UNLOCK(vp);
760 ++start_lbn;
761 --len;
772 splx(s);
773 continue;
774 }
775 if (BUF_LOCK(tbp,
776 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) {
777 ++start_lbn;
778 --len;
762 continue;
763 }
764 if (BUF_LOCK(tbp,
765 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) {
766 ++start_lbn;
767 --len;
779 splx(s);
780 continue;
781 }
782 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
783 BUF_UNLOCK(tbp);
784 ++start_lbn;
785 --len;
768 continue;
769 }
770 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
771 BUF_UNLOCK(tbp);
772 ++start_lbn;
773 --len;
786 splx(s);
787 continue;
788 }
789 bremfree(tbp);
790 tbp->b_flags &= ~B_DONE;
774 continue;
775 }
776 bremfree(tbp);
777 tbp->b_flags &= ~B_DONE;
791 splx(s);
792
793 /*
794 * Extra memory in the buffer, punt on this buffer.
795 * XXX we could handle this in most cases, but we would
796 * have to push the extra memory down to after our max
797 * possible cluster size and then potentially pull it back
798 * up if the cluster was terminated prematurely--too much
799 * hassle.

--- 40 unchanged lines hidden (view full) ---

840 pbgetvp(vp, bp);
841 /*
842 * From this location in the file, scan forward to see
843 * if there are buffers with adjacent data that need to
844 * be written as well.
845 */
846 for (i = 0; i < len; ++i, ++start_lbn) {
847 if (i != 0) { /* If not the first buffer */
778
779 /*
780 * Extra memory in the buffer, punt on this buffer.
781 * XXX we could handle this in most cases, but we would
782 * have to push the extra memory down to after our max
783 * possible cluster size and then potentially pull it back
784 * up if the cluster was terminated prematurely--too much
785 * hassle.

--- 40 unchanged lines hidden (view full) ---

826 pbgetvp(vp, bp);
827 /*
828 * From this location in the file, scan forward to see
829 * if there are buffers with adjacent data that need to
830 * be written as well.
831 */
832 for (i = 0; i < len; ++i, ++start_lbn) {
833 if (i != 0) { /* If not the first buffer */
848 s = splbio();
849 /*
850 * If the adjacent data is not even in core it
851 * can't need to be written.
852 */
853 VI_LOCK(vp);
854 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
855 (tbp->b_vflags & BV_BKGRDINPROG)) {
856 VI_UNLOCK(vp);
834 /*
835 * If the adjacent data is not even in core it
836 * can't need to be written.
837 */
838 VI_LOCK(vp);
839 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
840 (tbp->b_vflags & BV_BKGRDINPROG)) {
841 VI_UNLOCK(vp);
857 splx(s);
858 break;
859 }
860
861 /*
862 * If it IS in core, but has different
863 * characteristics, or is locked (which
864 * means it could be undergoing a background
865 * I/O or be in a weird state), then don't
866 * cluster with it.
867 */
868 if (BUF_LOCK(tbp,
869 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
842 break;
843 }
844
845 /*
846 * If it IS in core, but has different
847 * characteristics, or is locked (which
848 * means it could be undergoing a background
849 * I/O or be in a weird state), then don't
850 * cluster with it.
851 */
852 if (BUF_LOCK(tbp,
853 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
870 VI_MTX(vp))) {
871 splx(s);
854 VI_MTX(vp)))
872 break;
855 break;
873 }
874
875 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
876 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
877 != (B_DELWRI | B_CLUSTEROK |
878 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
879 tbp->b_wcred != bp->b_wcred) {
880 BUF_UNLOCK(tbp);
856
857 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
858 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
859 != (B_DELWRI | B_CLUSTEROK |
860 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
861 tbp->b_wcred != bp->b_wcred) {
862 BUF_UNLOCK(tbp);
881 splx(s);
882 break;
883 }
884
885 /*
886 * Check that the combined cluster
887 * would make sense with regard to pages
888 * and would not be too large
889 */
890 if ((tbp->b_bcount != size) ||
891 ((bp->b_blkno + (dbsize * i)) !=
892 tbp->b_blkno) ||
893 ((tbp->b_npages + bp->b_npages) >
894 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
895 BUF_UNLOCK(tbp);
863 break;
864 }
865
866 /*
867 * Check that the combined cluster
868 * would make sense with regard to pages
869 * and would not be too large
870 */
871 if ((tbp->b_bcount != size) ||
872 ((bp->b_blkno + (dbsize * i)) !=
873 tbp->b_blkno) ||
874 ((tbp->b_npages + bp->b_npages) >
875 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
876 BUF_UNLOCK(tbp);
896 splx(s);
897 break;
898 }
899 /*
900 * Ok, it's passed all the tests,
901 * so remove it from the free list
902 * and mark it busy. We will use it.
903 */
904 bremfree(tbp);
905 tbp->b_flags &= ~B_DONE;
877 break;
878 }
879 /*
880 * Ok, it's passed all the tests,
881 * so remove it from the free list
882 * and mark it busy. We will use it.
883 */
884 bremfree(tbp);
885 tbp->b_flags &= ~B_DONE;
906 splx(s);
907 } /* end of code for non-first buffers only */
908 /* check for latent dependencies to be handled */
909 if ((LIST_FIRST(&tbp->b_dep)) != NULL) {
910 tbp->b_iocmd = BIO_WRITE;
911 buf_start(tbp);
912 }
913 /*
914 * If the IO is via the VM then we do some

--- 27 unchanged lines hidden (view full) ---

942 bp->b_pages[bp->b_npages] = m;
943 bp->b_npages++;
944 }
945 }
946 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
947 }
948 bp->b_bcount += size;
949 bp->b_bufsize += size;
886 } /* end of code for non-first buffers only */
887 /* check for latent dependencies to be handled */
888 if ((LIST_FIRST(&tbp->b_dep)) != NULL) {
889 tbp->b_iocmd = BIO_WRITE;
890 buf_start(tbp);
891 }
892 /*
893 * If the IO is via the VM then we do some

--- 27 unchanged lines hidden (view full) ---

921 bp->b_pages[bp->b_npages] = m;
922 bp->b_npages++;
923 }
924 }
925 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
926 }
927 bp->b_bcount += size;
928 bp->b_bufsize += size;
950
951 s = splbio();
952 bundirty(tbp);
953 tbp->b_flags &= ~B_DONE;
954 tbp->b_ioflags &= ~BIO_ERROR;
955 tbp->b_flags |= B_ASYNC;
956 tbp->b_iocmd = BIO_WRITE;
957 reassignbuf(tbp); /* put on clean list */
958 bufobj_wref(tbp->b_bufobj);
929 bundirty(tbp);
930 tbp->b_flags &= ~B_DONE;
931 tbp->b_ioflags &= ~BIO_ERROR;
932 tbp->b_flags |= B_ASYNC;
933 tbp->b_iocmd = BIO_WRITE;
934 reassignbuf(tbp); /* put on clean list */
935 bufobj_wref(tbp->b_bufobj);
959 splx(s);
960 BUF_KERNPROC(tbp);
961 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
962 tbp, b_cluster.cluster_entry);
963 }
964 finishcluster:
965 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
966 (vm_page_t *) bp->b_pages, bp->b_npages);
967 if (bp->b_bufsize > bp->b_kvasize)

--- 46 unchanged lines hidden ---
936 BUF_KERNPROC(tbp);
937 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
938 tbp, b_cluster.cluster_entry);
939 }
940 finishcluster:
941 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
942 (vm_page_t *) bp->b_pages, bp->b_npages);
943 if (bp->b_bufsize > bp->b_kvasize)

--- 46 unchanged lines hidden ---