Deleted Added
full compact
vfs_bio.c (34646) vfs_bio.c (34694)
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.157 1998/03/17 08:41:28 kato Exp $
14 * $Id: vfs_bio.c,v 1.158 1998/03/17 17:36:05 dyson Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 155 unchanged lines hidden (view full) ---

178 bp = &buf[i];
179 bzero(bp, sizeof *bp);
180 bp->b_flags = B_INVAL; /* we're just an empty header */
181 bp->b_dev = NODEV;
182 bp->b_rcred = NOCRED;
183 bp->b_wcred = NOCRED;
184 bp->b_qindex = QUEUE_EMPTY;
185 bp->b_vnbufs.le_next = NOLIST;
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 155 unchanged lines hidden (view full) ---

178 bp = &buf[i];
179 bzero(bp, sizeof *bp);
180 bp->b_flags = B_INVAL; /* we're just an empty header */
181 bp->b_dev = NODEV;
182 bp->b_rcred = NOCRED;
183 bp->b_wcred = NOCRED;
184 bp->b_qindex = QUEUE_EMPTY;
185 bp->b_vnbufs.le_next = NOLIST;
186 bp->b_generation = 0;
187 LIST_INIT(&bp->b_dep);
188 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
189 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
190 }
191/*
192 * maxbufspace is currently calculated to support all filesystem blocks
193 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer
194 * cache is still the same as it would be for 8K filesystems. This

--- 167 unchanged lines hidden (view full) ---

362
363/*
364 * Write, release buffer on completion. (Done by iodone
365 * if async.)
366 */
367int
368bwrite(struct buf * bp)
369{
186 LIST_INIT(&bp->b_dep);
187 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
188 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
189 }
190/*
191 * maxbufspace is currently calculated to support all filesystem blocks
192 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer
193 * cache is still the same as it would be for 8K filesystems. This

--- 167 unchanged lines hidden (view full) ---

361
362/*
363 * Write, release buffer on completion. (Done by iodone
364 * if async.)
365 */
366int
367bwrite(struct buf * bp)
368{
370 int oldflags = bp->b_flags;
369 int oldflags;
371 struct vnode *vp;
372 struct mount *mp;
373
374
375 if (bp->b_flags & B_INVAL) {
376 brelse(bp);
377 return (0);
378 }
370 struct vnode *vp;
371 struct mount *mp;
372
373
374 if (bp->b_flags & B_INVAL) {
375 brelse(bp);
376 return (0);
377 }
378
379 oldflags = bp->b_flags;
380
379#if !defined(MAX_PERF)
381#if !defined(MAX_PERF)
380 if (!(bp->b_flags & B_BUSY))
382 if ((bp->b_flags & B_BUSY) == 0)
381 panic("bwrite: buffer is not busy???");
382#endif
383
384 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
385 bp->b_flags |= B_WRITEINPROG;
386
387 if ((oldflags & B_DELWRI) == B_DELWRI) {
388 --numdirtybuffers;

--- 20 unchanged lines hidden (view full) ---

409 if ((oldflags & B_ASYNC) == 0)
410 mp->mnt_stat.f_syncwrites++;
411 else
412 mp->mnt_stat.f_asyncwrites++;
413 }
414
415 if ((oldflags & B_ASYNC) == 0) {
416 int rtval = biowait(bp);
383 panic("bwrite: buffer is not busy???");
384#endif
385
386 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
387 bp->b_flags |= B_WRITEINPROG;
388
389 if ((oldflags & B_DELWRI) == B_DELWRI) {
390 --numdirtybuffers;

--- 20 unchanged lines hidden (view full) ---

411 if ((oldflags & B_ASYNC) == 0)
412 mp->mnt_stat.f_syncwrites++;
413 else
414 mp->mnt_stat.f_asyncwrites++;
415 }
416
417 if ((oldflags & B_ASYNC) == 0) {
418 int rtval = biowait(bp);
417
418 if (oldflags & B_DELWRI) {
419 reassignbuf(bp, bp->b_vp);
420 }
421 brelse(bp);
422 return (rtval);
423 }
424 return (0);
425}
426
427inline void
428vfs_bio_need_satisfy(void) {

--- 32 unchanged lines hidden (view full) ---

461 }
462 if (bp->b_flags & B_TAPE) {
463 bawrite(bp);
464 return;
465 }
466 bp->b_flags &= ~(B_READ|B_RELBUF);
467 if ((bp->b_flags & B_DELWRI) == 0) {
468 bp->b_flags |= B_DONE | B_DELWRI;
419 brelse(bp);
420 return (rtval);
421 }
422 return (0);
423}
424
425inline void
426vfs_bio_need_satisfy(void) {

--- 32 unchanged lines hidden (view full) ---

459 }
460 if (bp->b_flags & B_TAPE) {
461 bawrite(bp);
462 return;
463 }
464 bp->b_flags &= ~(B_READ|B_RELBUF);
465 if ((bp->b_flags & B_DELWRI) == 0) {
466 bp->b_flags |= B_DONE | B_DELWRI;
469 s = splbio();
470 reassignbuf(bp, bp->b_vp);
467 reassignbuf(bp, bp->b_vp);
471 splx(s);
472 ++numdirtybuffers;
473 }
474
475 /*
476 * This bmap keeps the system from needing to do the bmap later,
477 * perhaps when the system is attempting to do a sync. Since it
478 * is likely that the indirect block -- or whatever other datastructure
479 * that the filesystem needs is still in memory now, it is a good

--- 47 unchanged lines hidden (view full) ---

527bdirty(bp)
528 struct buf *bp;
529{
530 int s;
531
532 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */
533 if ((bp->b_flags & B_DELWRI) == 0) {
534 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */
468 ++numdirtybuffers;
469 }
470
471 /*
472 * This bmap keeps the system from needing to do the bmap later,
473 * perhaps when the system is attempting to do a sync. Since it
474 * is likely that the indirect block -- or whatever other datastructure
475 * that the filesystem needs is still in memory now, it is a good

--- 47 unchanged lines hidden (view full) ---

523bdirty(bp)
524 struct buf *bp;
525{
526 int s;
527
528 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */
529 if ((bp->b_flags & B_DELWRI) == 0) {
530 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */
535 s = splbio();
536 reassignbuf(bp, bp->b_vp);
531 reassignbuf(bp, bp->b_vp);
537 splx(s);
538 ++numdirtybuffers;
539 }
540}
541
542/*
543 * Asynchronous write.
544 * Start output on a buffer, but do not wait for it to complete.
545 * The buffer is released when the output completes.

--- 101 unchanged lines hidden (view full) ---

647
648 vp = bp->b_vp;
649
650 resid = bp->b_bufsize;
651 foff = bp->b_offset;
652
653 for (i = 0; i < bp->b_npages; i++) {
654 m = bp->b_pages[i];
532 ++numdirtybuffers;
533 }
534}
535
536/*
537 * Asynchronous write.
538 * Start output on a buffer, but do not wait for it to complete.
539 * The buffer is released when the output completes.

--- 101 unchanged lines hidden (view full) ---

641
642 vp = bp->b_vp;
643
644 resid = bp->b_bufsize;
645 foff = bp->b_offset;
646
647 for (i = 0; i < bp->b_npages; i++) {
648 m = bp->b_pages[i];
649 m->flags &= ~PG_ZERO;
655 if (m == bogus_page) {
656
657 obj = (vm_object_t) vp->v_object;
658 poff = OFF_TO_IDX(bp->b_offset);
659
660 for (j = i; j < bp->b_npages; j++) {
661 m = bp->b_pages[j];
662 if (m == bogus_page) {

--- 5 unchanged lines hidden (view full) ---

668#endif
669 bp->b_pages[j] = m;
670 }
671 }
672
673 if ((bp->b_flags & B_INVAL) == 0) {
674 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
675 }
650 if (m == bogus_page) {
651
652 obj = (vm_object_t) vp->v_object;
653 poff = OFF_TO_IDX(bp->b_offset);
654
655 for (j = i; j < bp->b_npages; j++) {
656 m = bp->b_pages[j];
657 if (m == bogus_page) {

--- 5 unchanged lines hidden (view full) ---

663#endif
664 bp->b_pages[j] = m;
665 }
666 }
667
668 if ((bp->b_flags & B_INVAL) == 0) {
669 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
670 }
676 break;
677 }
678 if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
679 int poffset = foff & PAGE_MASK;
680 int presid = resid > (PAGE_SIZE - poffset) ?
681 (PAGE_SIZE - poffset) : resid;
682 vm_page_set_invalid(m, poffset, presid);
683 }
684 resid -= PAGE_SIZE;

--- 19 unchanged lines hidden (view full) ---

704 if (bp->b_bufsize == 0) {
705 bp->b_flags |= B_INVAL;
706 bp->b_qindex = QUEUE_EMPTY;
707 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
708 LIST_REMOVE(bp, b_hash);
709 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
710 bp->b_dev = NODEV;
711 kvafreespace += bp->b_kvasize;
671 }
672 if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
673 int poffset = foff & PAGE_MASK;
674 int presid = resid > (PAGE_SIZE - poffset) ?
675 (PAGE_SIZE - poffset) : resid;
676 vm_page_set_invalid(m, poffset, presid);
677 }
678 resid -= PAGE_SIZE;

--- 19 unchanged lines hidden (view full) ---

698 if (bp->b_bufsize == 0) {
699 bp->b_flags |= B_INVAL;
700 bp->b_qindex = QUEUE_EMPTY;
701 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
702 LIST_REMOVE(bp, b_hash);
703 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
704 bp->b_dev = NODEV;
705 kvafreespace += bp->b_kvasize;
712 bp->b_generation++;
713
714 /* buffers with junk contents */
715 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
716 bp->b_flags |= B_INVAL;
717 bp->b_qindex = QUEUE_AGE;
718 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
719 LIST_REMOVE(bp, b_hash);
720 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
721 bp->b_dev = NODEV;
706
707 /* buffers with junk contents */
708 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
709 bp->b_flags |= B_INVAL;
710 bp->b_qindex = QUEUE_AGE;
711 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
712 LIST_REMOVE(bp, b_hash);
713 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
714 bp->b_dev = NODEV;
722 bp->b_generation++;
723
724 /* buffers that are locked */
725 } else if (bp->b_flags & B_LOCKED) {
726 bp->b_qindex = QUEUE_LOCKED;
727 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
728
729 /* buffers with stale but valid contents */
730 } else if (bp->b_flags & B_AGE) {

--- 12 unchanged lines hidden (view full) ---

743 --numdirtybuffers;
744 bp->b_flags &= ~B_DELWRI;
745 }
746 vfs_bio_need_satisfy();
747 }
748
749 /* unlock */
750 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
715
716 /* buffers that are locked */
717 } else if (bp->b_flags & B_LOCKED) {
718 bp->b_qindex = QUEUE_LOCKED;
719 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
720
721 /* buffers with stale but valid contents */
722 } else if (bp->b_flags & B_AGE) {

--- 12 unchanged lines hidden (view full) ---

735 --numdirtybuffers;
736 bp->b_flags &= ~B_DELWRI;
737 }
738 vfs_bio_need_satisfy();
739 }
740
741 /* unlock */
742 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
751 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
743 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
752 splx(s);
753}
754
755/*
756 * Release a buffer.
757 */
758void
759bqrelse(struct buf * bp)

--- 76 unchanged lines hidden (view full) ---

836 vm_page_test_dirty(m);
837 /*
838 * this keeps pressure off of the process memory
839 */
840 if (m->dirty == 0 && m->hold_count == 0)
841 vm_page_cache(m);
842 else
843 vm_page_deactivate(m);
744 splx(s);
745}
746
747/*
748 * Release a buffer.
749 */
750void
751bqrelse(struct buf * bp)

--- 76 unchanged lines hidden (view full) ---

828 vm_page_test_dirty(m);
829 /*
830 * this keeps pressure off of the process memory
831 */
832 if (m->dirty == 0 && m->hold_count == 0)
833 vm_page_cache(m);
834 else
835 vm_page_deactivate(m);
836 m->flags &= ~PG_ZERO;
844 } else if (m->hold_count == 0) {
845 m->flags |= PG_BUSY;
846 vm_page_protect(m, VM_PROT_NONE);
847 vm_page_free(m);
848 }
849 } else {
850 /*
851 * If async, then at least we clear the
852 * act_count.
853 */
854 m->act_count = 0;
837 } else if (m->hold_count == 0) {
838 m->flags |= PG_BUSY;
839 vm_page_protect(m, VM_PROT_NONE);
840 vm_page_free(m);
841 }
842 } else {
843 /*
844 * If async, then at least we clear the
845 * act_count.
846 */
847 m->act_count = 0;
848 m->flags &= ~PG_ZERO;
855 }
856 }
857 }
858 bufspace -= bp->b_bufsize;
859 vmiospace -= bp->b_bufsize;
860 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
861 bp->b_npages = 0;
862 bp->b_bufsize = 0;

--- 74 unchanged lines hidden (view full) ---

937 if (ncl != 1) {
938 nwritten = cluster_wbuild(vp, size, lblkno, ncl);
939 splx(s);
940 return nwritten;
941 }
942 }
943
944 bremfree(bp);
849 }
850 }
851 }
852 bufspace -= bp->b_bufsize;
853 vmiospace -= bp->b_bufsize;
854 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
855 bp->b_npages = 0;
856 bp->b_bufsize = 0;

--- 74 unchanged lines hidden (view full) ---

931 if (ncl != 1) {
932 nwritten = cluster_wbuild(vp, size, lblkno, ncl);
933 splx(s);
934 return nwritten;
935 }
936 }
937
938 bremfree(bp);
939 bp->b_flags |= B_BUSY | B_ASYNC;
940
945 splx(s);
946 /*
947 * default (old) behavior, writing out only one block
948 */
941 splx(s);
942 /*
943 * default (old) behavior, writing out only one block
944 */
949 bp->b_flags |= B_BUSY | B_ASYNC;
950 nwritten = bp->b_bufsize;
951 (void) VOP_BWRITE(bp);
952 return nwritten;
953}
954
955
956/*
957 * Find a buffer header which is available for use.

--- 158 unchanged lines hidden (view full) ---

1116 bp->b_flags &= ~B_ASYNC;
1117 vfs_vmio_release(bp);
1118 }
1119
1120 if (bp->b_vp)
1121 brelvp(bp);
1122
1123fillbuf:
945 nwritten = bp->b_bufsize;
946 (void) VOP_BWRITE(bp);
947 return nwritten;
948}
949
950
951/*
952 * Find a buffer header which is available for use.

--- 158 unchanged lines hidden (view full) ---

1111 bp->b_flags &= ~B_ASYNC;
1112 vfs_vmio_release(bp);
1113 }
1114
1115 if (bp->b_vp)
1116 brelvp(bp);
1117
1118fillbuf:
1124 bp->b_generation++;
1125
1126 /* we are not free, nor do we contain interesting data */
1127 if (bp->b_rcred != NOCRED) {
1128 crfree(bp->b_rcred);
1129 bp->b_rcred = NOCRED;
1130 }
1131 if (bp->b_wcred != NOCRED) {
1132 crfree(bp->b_wcred);

--- 7 unchanged lines hidden (view full) ---

1140 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1141 if (bp->b_bufsize) {
1142 allocbuf(bp, 0);
1143 }
1144 bp->b_flags = B_BUSY;
1145 bp->b_dev = NODEV;
1146 bp->b_vp = NULL;
1147 bp->b_blkno = bp->b_lblkno = 0;
1119
1120 /* we are not free, nor do we contain interesting data */
1121 if (bp->b_rcred != NOCRED) {
1122 crfree(bp->b_rcred);
1123 bp->b_rcred = NOCRED;
1124 }
1125 if (bp->b_wcred != NOCRED) {
1126 crfree(bp->b_wcred);

--- 7 unchanged lines hidden (view full) ---

1134 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1135 if (bp->b_bufsize) {
1136 allocbuf(bp, 0);
1137 }
1138 bp->b_flags = B_BUSY;
1139 bp->b_dev = NODEV;
1140 bp->b_vp = NULL;
1141 bp->b_blkno = bp->b_lblkno = 0;
1148 bp->b_offset = 0;
1142 bp->b_offset = NOOFFSET;
1149 bp->b_iodone = 0;
1150 bp->b_error = 0;
1151 bp->b_resid = 0;
1152 bp->b_bcount = 0;
1153 bp->b_npages = 0;
1154 bp->b_dirtyoff = bp->b_dirtyend = 0;
1155 bp->b_validoff = bp->b_validend = 0;
1156 bp->b_usecount = 5;

--- 197 unchanged lines hidden (view full) ---

1354 * is not cleared simply by protecting pages off.
1355 */
1356 if ((bp->b_flags & B_VMIO) &&
1357 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1358 /*
1359 * test the pages to see if they have been modified directly
1360 * by users through the VM system.
1361 */
1143 bp->b_iodone = 0;
1144 bp->b_error = 0;
1145 bp->b_resid = 0;
1146 bp->b_bcount = 0;
1147 bp->b_npages = 0;
1148 bp->b_dirtyoff = bp->b_dirtyend = 0;
1149 bp->b_validoff = bp->b_validend = 0;
1150 bp->b_usecount = 5;

--- 197 unchanged lines hidden (view full) ---

1348 * is not cleared simply by protecting pages off.
1349 */
1350 if ((bp->b_flags & B_VMIO) &&
1351 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1352 /*
1353 * test the pages to see if they have been modified directly
1354 * by users through the VM system.
1355 */
1362 for (i = 0; i < bp->b_npages; i++)
1356 for (i = 0; i < bp->b_npages; i++) {
1357 bp->b_pages[i]->flags &= ~PG_ZERO;
1363 vm_page_test_dirty(bp->b_pages[i]);
1358 vm_page_test_dirty(bp->b_pages[i]);
1359 }
1364
1365 /*
1366 * scan forwards for the first page modified
1367 */
1368 for (i = 0; i < bp->b_npages; i++) {
1369 if (bp->b_pages[i]->dirty) {
1370 break;
1371 }

--- 51 unchanged lines hidden (view full) ---

1423
1424 s = splbio();
1425loop:
1426 if (numfreebuffers < lofreebuffers) {
1427 waitfreebuffers(slpflag, slptimeo);
1428 }
1429
1430 if ((bp = gbincore(vp, blkno))) {
1360
1361 /*
1362 * scan forwards for the first page modified
1363 */
1364 for (i = 0; i < bp->b_npages; i++) {
1365 if (bp->b_pages[i]->dirty) {
1366 break;
1367 }

--- 51 unchanged lines hidden (view full) ---

1419
1420 s = splbio();
1421loop:
1422 if (numfreebuffers < lofreebuffers) {
1423 waitfreebuffers(slpflag, slptimeo);
1424 }
1425
1426 if ((bp = gbincore(vp, blkno))) {
1431 generation = bp->b_generation;
1432loop1:
1433 if (bp->b_flags & B_BUSY) {
1434
1435 bp->b_flags |= B_WANTED;
1436 if (bp->b_usecount < BUF_MAXUSE)
1437 ++bp->b_usecount;
1438
1439 if (!tsleep(bp,
1440 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1427loop1:
1428 if (bp->b_flags & B_BUSY) {
1429
1430 bp->b_flags |= B_WANTED;
1431 if (bp->b_usecount < BUF_MAXUSE)
1432 ++bp->b_usecount;
1433
1434 if (!tsleep(bp,
1435 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1441 if (bp->b_generation != generation)
1442 goto loop;
1443 goto loop1;
1436 goto loop;
1444 }
1445
1446 splx(s);
1447 return (struct buf *) NULL;
1448 }
1449 bp->b_flags |= B_BUSY | B_CACHE;
1450 bremfree(bp);
1451
1452 /*
1453 * check for size inconsistancies (note that they shouldn't
1454 * happen but do when filesystems don't handle the size changes
1455 * correctly.) We are conservative on metadata and don't just
1456 * extend the buffer but write (if needed) and re-constitute it.
1457 */
1458
1459 if (bp->b_bcount != size) {
1437 }
1438
1439 splx(s);
1440 return (struct buf *) NULL;
1441 }
1442 bp->b_flags |= B_BUSY | B_CACHE;
1443 bremfree(bp);
1444
1445 /*
1446 * check for size inconsistancies (note that they shouldn't
1447 * happen but do when filesystems don't handle the size changes
1448 * correctly.) We are conservative on metadata and don't just
1449 * extend the buffer but write (if needed) and re-constitute it.
1450 */
1451
1452 if (bp->b_bcount != size) {
1460 bp->b_generation++;
1461 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1462 allocbuf(bp, size);
1463 } else {
1464 if (bp->b_flags & B_DELWRI) {
1465 bp->b_flags |= B_NOCACHE;
1466 VOP_BWRITE(bp);
1467 } else {
1468 if (bp->b_flags & B_VMIO) {
1469 bp->b_flags |= B_RELBUF;
1470 brelse(bp);
1471 } else {
1472 bp->b_flags |= B_NOCACHE;
1473 VOP_BWRITE(bp);
1474 }
1475 }
1476 goto loop;
1477 }
1478 }
1479
1453 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1454 allocbuf(bp, size);
1455 } else {
1456 if (bp->b_flags & B_DELWRI) {
1457 bp->b_flags |= B_NOCACHE;
1458 VOP_BWRITE(bp);
1459 } else {
1460 if (bp->b_flags & B_VMIO) {
1461 bp->b_flags |= B_RELBUF;
1462 brelse(bp);
1463 } else {
1464 bp->b_flags |= B_NOCACHE;
1465 VOP_BWRITE(bp);
1466 }
1467 }
1468 goto loop;
1469 }
1470 }
1471
1472#ifdef DIAGNOSTIC
1473 if (bp->b_offset == NOOFFSET)
1474 panic("getblk: no buffer offset");
1475#endif
1476
1480 /*
1481 * Check that the constituted buffer really deserves for the
1477 /*
1478 * Check that the constituted buffer really deserves for the
1482 * B_CACHE bit to be set.
1479 * B_CACHE bit to be set. B_VMIO type buffers might not
1480 * contain fully valid pages. Normal (old-style) buffers
1481 * should be fully valid.
1483 */
1482 */
1484 checksize = bp->b_bufsize;
1485 for (i = 0; i < bp->b_npages; i++) {
1486 int resid;
1487 int poffset;
1488 poffset = bp->b_offset & PAGE_MASK;
1489 resid = (checksize > (PAGE_SIZE - poffset)) ?
1490 (PAGE_SIZE - poffset) : checksize;
1491 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) {
1492 bp->b_flags &= ~(B_CACHE | B_DONE);
1493 break;
1483 if (bp->b_flags & B_VMIO) {
1484 checksize = bp->b_bufsize;
1485 for (i = 0; i < bp->b_npages; i++) {
1486 int resid;
1487 int poffset;
1488 poffset = bp->b_offset & PAGE_MASK;
1489 resid = (checksize > (PAGE_SIZE - poffset)) ?
1490 (PAGE_SIZE - poffset) : checksize;
1491 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) {
1492 bp->b_flags &= ~(B_CACHE | B_DONE);
1493 break;
1494 }
1495 checksize -= resid;
1494 }
1496 }
1495 checksize -= resid;
1496 }
1497
1498 if (bp->b_usecount < BUF_MAXUSE)
1499 ++bp->b_usecount;
1500 splx(s);
1501 return (bp);
1502 } else {
1503 vm_object_t obj;

--- 19 unchanged lines hidden (view full) ---

1523 goto loop;
1524 }
1525
1526 /*
1527 * Insert the buffer into the hash, so that it can
1528 * be found by incore.
1529 */
1530 bp->b_blkno = bp->b_lblkno = blkno;
1497 }
1498
1499 if (bp->b_usecount < BUF_MAXUSE)
1500 ++bp->b_usecount;
1501 splx(s);
1502 return (bp);
1503 } else {
1504 vm_object_t obj;

--- 19 unchanged lines hidden (view full) ---

1524 goto loop;
1525 }
1526
1527 /*
1528 * Insert the buffer into the hash, so that it can
1529 * be found by incore.
1530 */
1531 bp->b_blkno = bp->b_lblkno = blkno;
1532
1531 if (vp->v_type != VBLK)
1532 bp->b_offset = (off_t) blkno * maxsize;
1533 else
1534 bp->b_offset = (off_t) blkno * DEV_BSIZE;
1535
1536 bgetvp(vp, bp);
1537 LIST_REMOVE(bp, b_hash);
1538 bh = BUFHASH(vp, blkno);

--- 200 unchanged lines hidden (view full) ---

1739
1740 if (bp->b_npages < desiredpages) {
1741 obj = vp->v_object;
1742 tinc = PAGE_SIZE;
1743 if (tinc > bsize)
1744 tinc = bsize;
1745
1746 off = bp->b_offset;
1533 if (vp->v_type != VBLK)
1534 bp->b_offset = (off_t) blkno * maxsize;
1535 else
1536 bp->b_offset = (off_t) blkno * DEV_BSIZE;
1537
1538 bgetvp(vp, bp);
1539 LIST_REMOVE(bp, b_hash);
1540 bh = BUFHASH(vp, blkno);

--- 200 unchanged lines hidden (view full) ---

1741
1742 if (bp->b_npages < desiredpages) {
1743 obj = vp->v_object;
1744 tinc = PAGE_SIZE;
1745 if (tinc > bsize)
1746 tinc = bsize;
1747
1748 off = bp->b_offset;
1749#ifdef DIAGNOSTIC
1750 if (bp->b_offset == NOOFFSET)
1751 panic("allocbuf: no buffer offset");
1752#endif
1753
1747 curbpnpages = bp->b_npages;
1748 doretry:
1749 bp->b_validoff = orig_validoff;
1750 bp->b_validend = orig_validend;
1751 bp->b_flags |= B_CACHE;
1752 for (toff = 0; toff < newbsize; toff += tinc) {
1753 int bytesinpage;
1754

--- 41 unchanged lines hidden (view full) ---

1796 (cnt.v_free_min + cnt.v_cache_min))) {
1797 pagedaemon_wakeup();
1798 }
1799 bytesinpage = tinc;
1800 if (tinc > (newbsize - toff))
1801 bytesinpage = newbsize - toff;
1802 if (bp->b_flags & B_CACHE)
1803 vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1754 curbpnpages = bp->b_npages;
1755 doretry:
1756 bp->b_validoff = orig_validoff;
1757 bp->b_validend = orig_validend;
1758 bp->b_flags |= B_CACHE;
1759 for (toff = 0; toff < newbsize; toff += tinc) {
1760 int bytesinpage;
1761

--- 41 unchanged lines hidden (view full) ---

1803 (cnt.v_free_min + cnt.v_cache_min))) {
1804 pagedaemon_wakeup();
1805 }
1806 bytesinpage = tinc;
1807 if (tinc > (newbsize - toff))
1808 bytesinpage = newbsize - toff;
1809 if (bp->b_flags & B_CACHE)
1810 vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
1811 m->flags &= ~PG_ZERO;
1804 vm_page_wire(m);
1805 }
1806 bp->b_pages[pageindex] = m;
1807 curbpnpages = pageindex + 1;
1808 }
1809 if (vp->v_tag == VT_NFS &&
1810 vp->v_type != VBLK) {
1811 if (bp->b_dirtyend > 0) {

--- 73 unchanged lines hidden (view full) ---

1885#endif
1886 return;
1887 }
1888 bp->b_flags |= B_DONE;
1889
1890 if ((bp->b_flags & B_READ) == 0) {
1891 vwakeup(bp);
1892 }
1812 vm_page_wire(m);
1813 }
1814 bp->b_pages[pageindex] = m;
1815 curbpnpages = pageindex + 1;
1816 }
1817 if (vp->v_tag == VT_NFS &&
1818 vp->v_type != VBLK) {
1819 if (bp->b_dirtyend > 0) {

--- 73 unchanged lines hidden (view full) ---

1893#endif
1894 return;
1895 }
1896 bp->b_flags |= B_DONE;
1897
1898 if ((bp->b_flags & B_READ) == 0) {
1899 vwakeup(bp);
1900 }
1901
1893#ifdef BOUNCE_BUFFERS
1902#ifdef BOUNCE_BUFFERS
1894 if (bp->b_flags & B_BOUNCE)
1903 if (bp->b_flags & B_BOUNCE) {
1895 vm_bounce_free(bp);
1904 vm_bounce_free(bp);
1905 }
1896#endif
1897
1898 /* call optional completion function if requested */
1899 if (bp->b_flags & B_CALL) {
1900 bp->b_flags &= ~B_CALL;
1901 (*bp->b_iodone) (bp);
1902 splx(s);
1903 return;

--- 21 unchanged lines hidden (view full) ---

1925 }
1926
1927 if ((vp->v_flag & VOBJBUF) == 0) {
1928 panic("biodone: vnode is not setup for merged cache");
1929 }
1930#endif
1931
1932 foff = bp->b_offset;
1906#endif
1907
1908 /* call optional completion function if requested */
1909 if (bp->b_flags & B_CALL) {
1910 bp->b_flags &= ~B_CALL;
1911 (*bp->b_iodone) (bp);
1912 splx(s);
1913 return;

--- 21 unchanged lines hidden (view full) ---

1935 }
1936
1937 if ((vp->v_flag & VOBJBUF) == 0) {
1938 panic("biodone: vnode is not setup for merged cache");
1939 }
1940#endif
1941
1942 foff = bp->b_offset;
1943#ifdef DIAGNOSTIC
1944 if (bp->b_offset == NOOFFSET)
1945 panic("biodone: no buffer offset");
1946#endif
1933
1934#if !defined(MAX_PERF)
1935 if (!obj) {
1936 panic("biodone: no object");
1937 }
1938#endif
1939#if defined(VFS_BIO_DEBUG)
1940 if (obj->paging_in_progress < bp->b_npages) {

--- 30 unchanged lines hidden (view full) ---

1971 /*
1972 * In the write case, the valid and clean bits are
1973 * already changed correctly, so we only need to do this
1974 * here in the read case.
1975 */
1976 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1977 vfs_page_set_valid(bp, foff, i, m);
1978 }
1947
1948#if !defined(MAX_PERF)
1949 if (!obj) {
1950 panic("biodone: no object");
1951 }
1952#endif
1953#if defined(VFS_BIO_DEBUG)
1954 if (obj->paging_in_progress < bp->b_npages) {

--- 30 unchanged lines hidden (view full) ---

1985 /*
1986 * In the write case, the valid and clean bits are
1987 * already changed correctly, so we only need to do this
1988 * here in the read case.
1989 */
1990 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1991 vfs_page_set_valid(bp, foff, i, m);
1992 }
1993 m->flags &= ~PG_ZERO;
1979
1980 /*
1981 * when debugging new filesystems or buffer I/O methods, this
1982 * is the most common error that pops up. if you see this, you
1983 * have not set the page busy flag correctly!!!
1984 */
1985 if (m->busy == 0) {
1986#if !defined(MAX_PERF)

--- 115 unchanged lines hidden (view full) ---

2102 if (!m) {
2103 panic("vfs_unbusy_pages: page missing\n");
2104 }
2105#endif
2106 bp->b_pages[i] = m;
2107 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2108 }
2109 --obj->paging_in_progress;
1994
1995 /*
1996 * when debugging new filesystems or buffer I/O methods, this
1997 * is the most common error that pops up. if you see this, you
1998 * have not set the page busy flag correctly!!!
1999 */
2000 if (m->busy == 0) {
2001#if !defined(MAX_PERF)

--- 115 unchanged lines hidden (view full) ---

2117 if (!m) {
2118 panic("vfs_unbusy_pages: page missing\n");
2119 }
2120#endif
2121 bp->b_pages[i] = m;
2122 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2123 }
2124 --obj->paging_in_progress;
2125 m->flags &= ~PG_ZERO;
2110 PAGE_BWAKEUP(m);
2111 }
2112 if (obj->paging_in_progress == 0 &&
2113 (obj->flags & OBJ_PIPWNT)) {
2114 obj->flags &= ~OBJ_PIPWNT;
2115 wakeup(obj);
2116 }
2117 }

--- 88 unchanged lines hidden (view full) ---

2206 int i,s;
2207
2208 if (bp->b_flags & B_VMIO) {
2209 struct vnode *vp = bp->b_vp;
2210 vm_object_t obj = vp->v_object;
2211 vm_ooffset_t foff;
2212
2213 foff = bp->b_offset;
2126 PAGE_BWAKEUP(m);
2127 }
2128 if (obj->paging_in_progress == 0 &&
2129 (obj->flags & OBJ_PIPWNT)) {
2130 obj->flags &= ~OBJ_PIPWNT;
2131 wakeup(obj);
2132 }
2133 }

--- 88 unchanged lines hidden (view full) ---

2222 int i,s;
2223
2224 if (bp->b_flags & B_VMIO) {
2225 struct vnode *vp = bp->b_vp;
2226 vm_object_t obj = vp->v_object;
2227 vm_ooffset_t foff;
2228
2229 foff = bp->b_offset;
2230#ifdef DIAGNOSTIC
2231 if (bp->b_offset == NOOFFSET)
2232 panic("vfs_busy_pages: no buffer offset");
2233#endif
2214
2215 vfs_setdirty(bp);
2216
2217retry:
2218 for (i = 0; i < bp->b_npages; i++) {
2219 vm_page_t m = bp->b_pages[i];
2220 if (vm_page_sleep(m, "vbpage", NULL))
2221 goto retry;
2222 }
2223
2224 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2225 vm_page_t m = bp->b_pages[i];
2226
2234
2235 vfs_setdirty(bp);
2236
2237retry:
2238 for (i = 0; i < bp->b_npages; i++) {
2239 vm_page_t m = bp->b_pages[i];
2240 if (vm_page_sleep(m, "vbpage", NULL))
2241 goto retry;
2242 }
2243
2244 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2245 vm_page_t m = bp->b_pages[i];
2246
2247 m->flags &= ~PG_ZERO;
2227 if ((bp->b_flags & B_CLUSTER) == 0) {
2228 obj->paging_in_progress++;
2229 m->busy++;
2230 }
2231
2232 vm_page_protect(m, VM_PROT_NONE);
2233 if (clear_modify)
2234 vfs_page_set_valid(bp, foff, i, m);

--- 17 unchanged lines hidden (view full) ---

2252{
2253 int i;
2254
2255 if (bp->b_flags & B_VMIO) {
2256 struct vnode *vp = bp->b_vp;
2257 vm_ooffset_t foff;
2258 foff = bp->b_offset;
2259
2248 if ((bp->b_flags & B_CLUSTER) == 0) {
2249 obj->paging_in_progress++;
2250 m->busy++;
2251 }
2252
2253 vm_page_protect(m, VM_PROT_NONE);
2254 if (clear_modify)
2255 vfs_page_set_valid(bp, foff, i, m);

--- 17 unchanged lines hidden (view full) ---

2273{
2274 int i;
2275
2276 if (bp->b_flags & B_VMIO) {
2277 struct vnode *vp = bp->b_vp;
2278 vm_ooffset_t foff;
2279 foff = bp->b_offset;
2280
2281#ifdef DIAGNOSTIC
2282 if (bp->b_offset == NOOFFSET)
2283 panic("vfs_clean_pages: no buffer offset");
2284#endif
2285
2260 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2261 vm_page_t m = bp->b_pages[i];
2262 vfs_page_set_valid(bp, foff, i, m);
2263 }
2264 }
2265}
2266
2267void
2268vfs_bio_clrbuf(struct buf *bp) {
2269 int i;
2286 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2287 vm_page_t m = bp->b_pages[i];
2288 vfs_page_set_valid(bp, foff, i, m);
2289 }
2290 }
2291}
2292
2293void
2294vfs_bio_clrbuf(struct buf *bp) {
2295 int i;
2270 if( bp->b_flags & B_VMIO) {
2296 if (((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) ||
2297 ((bp->b_flags & (B_VMIO | B_MALLOC)) == 0) && (bp->b_npages > 0) ) {
2271 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
2272 int mask;
2273 mask = 0;
2274 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
2275 mask |= (1 << (i/DEV_BSIZE));
2298 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
2299 int mask;
2300 mask = 0;
2301 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
2302 mask |= (1 << (i/DEV_BSIZE));
2276 if( bp->b_pages[0]->valid != mask) {
2303 if(((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2304 (bp->b_pages[0]->valid != mask)) {
2277 bzero(bp->b_data, bp->b_bufsize);
2278 }
2279 bp->b_pages[0]->valid = mask;
2280 bp->b_resid = 0;
2281 return;
2282 }
2283 for(i=0;i<bp->b_npages;i++) {
2284 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
2285 continue;
2286 if( bp->b_pages[i]->valid == 0) {
2287 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2288 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
2289 }
2290 } else {
2291 int j;
2292 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
2305 bzero(bp->b_data, bp->b_bufsize);
2306 }
2307 bp->b_pages[0]->valid = mask;
2308 bp->b_resid = 0;
2309 return;
2310 }
2311 for(i=0;i<bp->b_npages;i++) {
2312 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
2313 continue;
2314 if( bp->b_pages[i]->valid == 0) {
2315 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2316 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
2317 }
2318 } else {
2319 int j;
2320 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
2293 if( (bp->b_pages[i]->valid & (1<<j)) == 0)
2321 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
2322 (bp->b_pages[i]->valid & (1<<j)) == 0)
2294 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
2295 }
2296 }
2297 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
2323 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
2324 }
2325 }
2326 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
2327 bp->b_pages[i]->flags &= ~PG_ZERO;
2298 }
2299 bp->b_resid = 0;
2300 } else {
2301 clrbuf(bp);
2302 }
2303}
2304
2305/*

--- 107 unchanged lines hidden ---
2328 }
2329 bp->b_resid = 0;
2330 } else {
2331 clrbuf(bp);
2332 }
2333}
2334
2335/*

--- 107 unchanged lines hidden ---