Deleted Added
full compact
vfs_bio.c (34266) vfs_bio.c (34611)
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.154 1998/03/07 21:35:24 dyson Exp $
14 * $Id: vfs_bio.c,v 1.155 1998/03/08 09:57:04 julian Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 616 unchanged lines hidden (view full) ---

639 ) {
640
641 int i, j, resid;
642 vm_page_t m;
643 off_t foff;
644 vm_pindex_t poff;
645 vm_object_t obj;
646 struct vnode *vp;
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 616 unchanged lines hidden (view full) ---

639 ) {
640
641 int i, j, resid;
642 vm_page_t m;
643 off_t foff;
644 vm_pindex_t poff;
645 vm_object_t obj;
646 struct vnode *vp;
647 int blksize;
648
649 vp = bp->b_vp;
650
647
648 vp = bp->b_vp;
649
651 if (vp->v_type == VBLK)
652 blksize = DEV_BSIZE;
653 else
654 blksize = vp->v_mount->mnt_stat.f_iosize;
655
656 resid = bp->b_bufsize;
650 resid = bp->b_bufsize;
657 foff = -1LL;
651 foff = bp->b_offset;
658
659 for (i = 0; i < bp->b_npages; i++) {
660 m = bp->b_pages[i];
661 if (m == bogus_page) {
662
663 obj = (vm_object_t) vp->v_object;
652
653 for (i = 0; i < bp->b_npages; i++) {
654 m = bp->b_pages[i];
655 if (m == bogus_page) {
656
657 obj = (vm_object_t) vp->v_object;
658 poff = OFF_TO_IDX(bp->b_offset);
664
659
665 foff = (off_t) bp->b_lblkno * blksize;
666 poff = OFF_TO_IDX(foff);
667
668 for (j = i; j < bp->b_npages; j++) {
669 m = bp->b_pages[j];
670 if (m == bogus_page) {
671 m = vm_page_lookup(obj, poff + j);
672#if !defined(MAX_PERF)
673 if (!m) {
674 panic("brelse: page missing\n");
675 }
676#endif
677 bp->b_pages[j] = m;
678 }
679 }
680
681 if ((bp->b_flags & B_INVAL) == 0) {
682 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
683 }
684 break;
685 }
686 if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
660 for (j = i; j < bp->b_npages; j++) {
661 m = bp->b_pages[j];
662 if (m == bogus_page) {
663 m = vm_page_lookup(obj, poff + j);
664#if !defined(MAX_PERF)
665 if (!m) {
666 panic("brelse: page missing\n");
667 }
668#endif
669 bp->b_pages[j] = m;
670 }
671 }
672
673 if ((bp->b_flags & B_INVAL) == 0) {
674 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
675 }
676 break;
677 }
678 if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
687 if ((blksize & PAGE_MASK) == 0) {
688 vm_page_set_invalid(m, 0, resid);
689 } else {
690 if (foff == -1LL)
691 foff = (off_t) bp->b_lblkno * blksize;
692 vm_page_set_invalid(m, (vm_offset_t) foff, resid);
693 }
679 int poffset = foff & PAGE_MASK;
680 int presid = resid > (PAGE_SIZE - poffset) ?
681 (PAGE_SIZE - poffset) : resid;
682 vm_page_set_invalid(m, poffset, presid);
694 }
695 resid -= PAGE_SIZE;
696 }
697
698 if (bp->b_flags & (B_INVAL | B_RELBUF))
699 vfs_vmio_release(bp);
700
701 } else if (bp->b_flags & B_VMIO) {

--- 307 unchanged lines hidden (view full) ---

1009 panic("getnewbuf: inconsistent LRU queue, qindex=%d",
1010 bp->b_qindex);
1011#endif
1012 }
1013 if (!bp) {
1014 /* wait for a free buffer of any kind */
1015 needsbuffer |= VFS_BIO_NEED_ANY;
1016 do
683 }
684 resid -= PAGE_SIZE;
685 }
686
687 if (bp->b_flags & (B_INVAL | B_RELBUF))
688 vfs_vmio_release(bp);
689
690 } else if (bp->b_flags & B_VMIO) {

--- 307 unchanged lines hidden (view full) ---

998 panic("getnewbuf: inconsistent LRU queue, qindex=%d",
999 bp->b_qindex);
1000#endif
1001 }
1002 if (!bp) {
1003 /* wait for a free buffer of any kind */
1004 needsbuffer |= VFS_BIO_NEED_ANY;
1005 do
1017 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf",
1006 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1018 slptimeo);
1019 while (needsbuffer & VFS_BIO_NEED_ANY);
1020 return (0);
1021 }
1022
1023#if defined(DIAGNOSTIC)
1024 if (bp->b_flags & B_BUSY) {
1025 panic("getnewbuf: busy buffer on free list\n");

--- 45 unchanged lines hidden (view full) ---

1071 }
1072
1073 /*
1074 * Certain layered filesystems can recursively re-enter the vfs_bio
1075 * code, due to delayed writes. This helps keep the system from
1076 * deadlocking.
1077 */
1078 if (writerecursion > 0) {
1007 slptimeo);
1008 while (needsbuffer & VFS_BIO_NEED_ANY);
1009 return (0);
1010 }
1011
1012#if defined(DIAGNOSTIC)
1013 if (bp->b_flags & B_BUSY) {
1014 panic("getnewbuf: busy buffer on free list\n");

--- 45 unchanged lines hidden (view full) ---

1060 }
1061
1062 /*
1063 * Certain layered filesystems can recursively re-enter the vfs_bio
1064 * code, due to delayed writes. This helps keep the system from
1065 * deadlocking.
1066 */
1067 if (writerecursion > 0) {
1079 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1080 while (bp) {
1081 if ((bp->b_flags & B_DELWRI) == 0)
1082 break;
1083 bp = TAILQ_NEXT(bp, b_freelist);
1084 }
1085 if (bp == NULL) {
1086 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1068 if (writerecursion > 5) {
1069 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1087 while (bp) {
1088 if ((bp->b_flags & B_DELWRI) == 0)
1089 break;
1090 bp = TAILQ_NEXT(bp, b_freelist);
1091 }
1070 while (bp) {
1071 if ((bp->b_flags & B_DELWRI) == 0)
1072 break;
1073 bp = TAILQ_NEXT(bp, b_freelist);
1074 }
1075 if (bp == NULL) {
1076 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1077 while (bp) {
1078 if ((bp->b_flags & B_DELWRI) == 0)
1079 break;
1080 bp = TAILQ_NEXT(bp, b_freelist);
1081 }
1082 }
1083 if (bp == NULL)
1084 panic("getnewbuf: cannot get buffer, infinite recursion failure");
1085 } else {
1086 bremfree(bp);
1087 bp->b_flags |= B_BUSY | B_AGE | B_ASYNC;
1088 nbyteswritten += bp->b_bufsize;
1089 ++writerecursion;
1090 VOP_BWRITE(bp);
1091 --writerecursion;
1092 if (!slpflag && !slptimeo) {
1093 return (0);
1094 }
1095 goto start;
1092 }
1096 }
1093 if (bp == NULL)
1094 panic("getnewbuf: cannot get buffer, infinite recursion failure");
1095 } else {
1096 ++writerecursion;
1097 nbyteswritten += vfs_bio_awrite(bp);
1098 --writerecursion;
1099 if (!slpflag && !slptimeo) {
1100 return (0);
1101 }
1102 goto start;

--- 35 unchanged lines hidden (view full) ---

1138 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1139 if (bp->b_bufsize) {
1140 allocbuf(bp, 0);
1141 }
1142 bp->b_flags = B_BUSY;
1143 bp->b_dev = NODEV;
1144 bp->b_vp = NULL;
1145 bp->b_blkno = bp->b_lblkno = 0;
1097 } else {
1098 ++writerecursion;
1099 nbyteswritten += vfs_bio_awrite(bp);
1100 --writerecursion;
1101 if (!slpflag && !slptimeo) {
1102 return (0);
1103 }
1104 goto start;

--- 35 unchanged lines hidden (view full) ---

1140 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1141 if (bp->b_bufsize) {
1142 allocbuf(bp, 0);
1143 }
1144 bp->b_flags = B_BUSY;
1145 bp->b_dev = NODEV;
1146 bp->b_vp = NULL;
1147 bp->b_blkno = bp->b_lblkno = 0;
1148 bp->b_offset = 0;
1146 bp->b_iodone = 0;
1147 bp->b_error = 0;
1148 bp->b_resid = 0;
1149 bp->b_bcount = 0;
1150 bp->b_npages = 0;
1151 bp->b_dirtyoff = bp->b_dirtyend = 0;
1152 bp->b_validoff = bp->b_validend = 0;
1153 bp->b_usecount = 5;

--- 71 unchanged lines hidden (view full) ---

1225
1226static void
1227waitfreebuffers(int slpflag, int slptimeo) {
1228 while (numfreebuffers < hifreebuffers) {
1229 flushdirtybuffers(slpflag, slptimeo);
1230 if (numfreebuffers < hifreebuffers)
1231 break;
1232 needsbuffer |= VFS_BIO_NEED_FREE;
1149 bp->b_iodone = 0;
1150 bp->b_error = 0;
1151 bp->b_resid = 0;
1152 bp->b_bcount = 0;
1153 bp->b_npages = 0;
1154 bp->b_dirtyoff = bp->b_dirtyend = 0;
1155 bp->b_validoff = bp->b_validend = 0;
1156 bp->b_usecount = 5;

--- 71 unchanged lines hidden (view full) ---

1228
1229static void
1230waitfreebuffers(int slpflag, int slptimeo) {
1231 while (numfreebuffers < hifreebuffers) {
1232 flushdirtybuffers(slpflag, slptimeo);
1233 if (numfreebuffers < hifreebuffers)
1234 break;
1235 needsbuffer |= VFS_BIO_NEED_FREE;
1233 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo))
1236 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
1234 break;
1235 }
1236}
1237
1238static void
1239flushdirtybuffers(int slpflag, int slptimeo) {
1240 int s;
1241 static pid_t flushing = 0;
1242
1243 s = splbio();
1244
1245 if (flushing) {
1246 if (flushing == curproc->p_pid) {
1247 splx(s);
1248 return;
1249 }
1250 while (flushing) {
1237 break;
1238 }
1239}
1240
1241static void
1242flushdirtybuffers(int slpflag, int slptimeo) {
1243 int s;
1244 static pid_t flushing = 0;
1245
1246 s = splbio();
1247
1248 if (flushing) {
1249 if (flushing == curproc->p_pid) {
1250 splx(s);
1251 return;
1252 }
1253 while (flushing) {
1251 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) {
1254 if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) {
1252 splx(s);
1253 return;
1254 }
1255 }
1256 }
1257 flushing = curproc->p_pid;
1258
1259 while (numdirtybuffers > lodirtybuffers) {

--- 135 unchanged lines hidden (view full) ---

1395struct buf *
1396getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1397{
1398 struct buf *bp;
1399 int i, s;
1400 struct bufhashhdr *bh;
1401 int maxsize;
1402 int generation;
1255 splx(s);
1256 return;
1257 }
1258 }
1259 }
1260 flushing = curproc->p_pid;
1261
1262 while (numdirtybuffers > lodirtybuffers) {

--- 135 unchanged lines hidden (view full) ---

1398struct buf *
1399getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1400{
1401 struct buf *bp;
1402 int i, s;
1403 struct bufhashhdr *bh;
1404 int maxsize;
1405 int generation;
1406 int checksize;
1403
1404 if (vp->v_mount) {
1405 maxsize = vp->v_mount->mnt_stat.f_iosize;
1406 /*
1407 * This happens on mount points.
1408 */
1409 if (maxsize < size)
1410 maxsize = size;

--- 8 unchanged lines hidden (view full) ---

1419
1420 s = splbio();
1421loop:
1422 if (numfreebuffers < lofreebuffers) {
1423 waitfreebuffers(slpflag, slptimeo);
1424 }
1425
1426 if ((bp = gbincore(vp, blkno))) {
1407
1408 if (vp->v_mount) {
1409 maxsize = vp->v_mount->mnt_stat.f_iosize;
1410 /*
1411 * This happens on mount points.
1412 */
1413 if (maxsize < size)
1414 maxsize = size;

--- 8 unchanged lines hidden (view full) ---

1423
1424 s = splbio();
1425loop:
1426 if (numfreebuffers < lofreebuffers) {
1427 waitfreebuffers(slpflag, slptimeo);
1428 }
1429
1430 if ((bp = gbincore(vp, blkno))) {
1427loop1:
1428 generation = bp->b_generation;
1431 generation = bp->b_generation;
1432loop1:
1429 if (bp->b_flags & B_BUSY) {
1433 if (bp->b_flags & B_BUSY) {
1434
1430 bp->b_flags |= B_WANTED;
1431 if (bp->b_usecount < BUF_MAXUSE)
1432 ++bp->b_usecount;
1435 bp->b_flags |= B_WANTED;
1436 if (bp->b_usecount < BUF_MAXUSE)
1437 ++bp->b_usecount;
1438
1433 if (!tsleep(bp,
1439 if (!tsleep(bp,
1434 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) {
1440 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1435 if (bp->b_generation != generation)
1436 goto loop;
1437 goto loop1;
1441 if (bp->b_generation != generation)
1442 goto loop;
1443 goto loop1;
1438 } else {
1439 splx(s);
1440 return (struct buf *) NULL;
1441 }
1444 }
1445
1446 splx(s);
1447 return (struct buf *) NULL;
1442 }
1443 bp->b_flags |= B_BUSY | B_CACHE;
1444 bremfree(bp);
1445
1446 /*
1447 * check for size inconsistancies (note that they shouldn't
1448 * happen but do when filesystems don't handle the size changes
1449 * correctly.) We are conservative on metadata and don't just
1448 }
1449 bp->b_flags |= B_BUSY | B_CACHE;
1450 bremfree(bp);
1451
1452 /*
1453 * check for size inconsistancies (note that they shouldn't
1454 * happen but do when filesystems don't handle the size changes
1455 * correctly.) We are conservative on metadata and don't just
1450 * extend the buffer but write and re-constitute it.
1456 * extend the buffer but write (if needed) and re-constitute it.
1451 */
1452
1453 if (bp->b_bcount != size) {
1454 bp->b_generation++;
1455 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1456 allocbuf(bp, size);
1457 } else {
1458 bp->b_flags |= B_NOCACHE;
1457 */
1458
1459 if (bp->b_bcount != size) {
1460 bp->b_generation++;
1461 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
1462 allocbuf(bp, size);
1463 } else {
1464 bp->b_flags |= B_NOCACHE;
1459 VOP_BWRITE(bp);
1465 if (bp->b_flags & B_DELWRI) {
1466 VOP_BWRITE(bp);
1467 } else {
1468 brelse(bp);
1469 }
1460 goto loop;
1461 }
1462 }
1463
1470 goto loop;
1471 }
1472 }
1473
1474 /*
1475 * Check that the constituted buffer really deserves for the
1476 * B_CACHE bit to be set.
1477 */
1478 checksize = bp->b_bufsize;
1479 for (i = 0; i < bp->b_npages; i++) {
1480 int resid;
1481 int poffset;
1482 poffset = bp->b_offset & PAGE_MASK;
1483 resid = (checksize > (PAGE_SIZE - poffset)) ?
1484 (PAGE_SIZE - poffset) : checksize;
1485 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) {
1486 bp->b_flags &= ~(B_CACHE | B_DONE);
1487 break;
1488 }
1489 checksize -= resid;
1490 }
1491
1464 if (bp->b_usecount < BUF_MAXUSE)
1465 ++bp->b_usecount;
1466 splx(s);
1467 return (bp);
1468 } else {
1469 vm_object_t obj;
1470
1471 if ((bp = getnewbuf(vp, blkno,

--- 17 unchanged lines hidden (view full) ---

1489 goto loop;
1490 }
1491
1492 /*
1493 * Insert the buffer into the hash, so that it can
1494 * be found by incore.
1495 */
1496 bp->b_blkno = bp->b_lblkno = blkno;
1492 if (bp->b_usecount < BUF_MAXUSE)
1493 ++bp->b_usecount;
1494 splx(s);
1495 return (bp);
1496 } else {
1497 vm_object_t obj;
1498
1499 if ((bp = getnewbuf(vp, blkno,

--- 17 unchanged lines hidden (view full) ---

1517 goto loop;
1518 }
1519
1520 /*
1521 * Insert the buffer into the hash, so that it can
1522 * be found by incore.
1523 */
1524 bp->b_blkno = bp->b_lblkno = blkno;
1525 if (vp->v_type != VBLK)
1526 bp->b_offset = (off_t) blkno * maxsize;
1527 else
1528 bp->b_offset = (off_t) blkno * DEV_BSIZE;
1529
1497 bgetvp(vp, bp);
1498 LIST_REMOVE(bp, b_hash);
1499 bh = BUFHASH(vp, blkno);
1500 LIST_INSERT_HEAD(bh, bp, b_hash);
1501
1502 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) {
1503 bp->b_flags |= (B_VMIO | B_CACHE);
1504#if defined(VFS_BIO_DEBUG)

--- 200 unchanged lines hidden (view full) ---

1705 else
1706 bsize = vp->v_mount->mnt_stat.f_iosize;
1707
1708 if (bp->b_npages < desiredpages) {
1709 obj = vp->v_object;
1710 tinc = PAGE_SIZE;
1711 if (tinc > bsize)
1712 tinc = bsize;
1530 bgetvp(vp, bp);
1531 LIST_REMOVE(bp, b_hash);
1532 bh = BUFHASH(vp, blkno);
1533 LIST_INSERT_HEAD(bh, bp, b_hash);
1534
1535 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) {
1536 bp->b_flags |= (B_VMIO | B_CACHE);
1537#if defined(VFS_BIO_DEBUG)

--- 200 unchanged lines hidden (view full) ---

1738 else
1739 bsize = vp->v_mount->mnt_stat.f_iosize;
1740
1741 if (bp->b_npages < desiredpages) {
1742 obj = vp->v_object;
1743 tinc = PAGE_SIZE;
1744 if (tinc > bsize)
1745 tinc = bsize;
1713 off = (vm_ooffset_t) bp->b_lblkno * bsize;
1746
1747 off = bp->b_offset;
1714 curbpnpages = bp->b_npages;
1715 doretry:
1716 bp->b_validoff = orig_validoff;
1717 bp->b_validend = orig_validend;
1718 bp->b_flags |= B_CACHE;
1719 for (toff = 0; toff < newbsize; toff += tinc) {
1720 int bytesinpage;
1721

--- 87 unchanged lines hidden (view full) ---

1809 s = splbio();
1810 while ((bp->b_flags & B_DONE) == 0)
1811#if defined(NO_SCHEDULE_MODS)
1812 tsleep(bp, PRIBIO, "biowait", 0);
1813#else
1814 if (bp->b_flags & B_READ)
1815 tsleep(bp, PRIBIO, "biord", 0);
1816 else
1748 curbpnpages = bp->b_npages;
1749 doretry:
1750 bp->b_validoff = orig_validoff;
1751 bp->b_validend = orig_validend;
1752 bp->b_flags |= B_CACHE;
1753 for (toff = 0; toff < newbsize; toff += tinc) {
1754 int bytesinpage;
1755

--- 87 unchanged lines hidden (view full) ---

1843 s = splbio();
1844 while ((bp->b_flags & B_DONE) == 0)
1845#if defined(NO_SCHEDULE_MODS)
1846 tsleep(bp, PRIBIO, "biowait", 0);
1847#else
1848 if (bp->b_flags & B_READ)
1849 tsleep(bp, PRIBIO, "biord", 0);
1850 else
1817 tsleep(bp, curproc->p_usrpri, "biowr", 0);
1851 tsleep(bp, PRIBIO, "biowr", 0);
1818#endif
1819 splx(s);
1820 if (bp->b_flags & B_EINTR) {
1821 bp->b_flags &= ~B_EINTR;
1822 return (EINTR);
1823 }
1824 if (bp->b_flags & B_ERROR) {
1825 return (bp->b_error ? bp->b_error : EIO);

--- 65 unchanged lines hidden (view full) ---

1891 panic("biodone: missing VM object");
1892 }
1893
1894 if ((vp->v_flag & VOBJBUF) == 0) {
1895 panic("biodone: vnode is not setup for merged cache");
1896 }
1897#endif
1898
1852#endif
1853 splx(s);
1854 if (bp->b_flags & B_EINTR) {
1855 bp->b_flags &= ~B_EINTR;
1856 return (EINTR);
1857 }
1858 if (bp->b_flags & B_ERROR) {
1859 return (bp->b_error ? bp->b_error : EIO);

--- 65 unchanged lines hidden (view full) ---

1925 panic("biodone: missing VM object");
1926 }
1927
1928 if ((vp->v_flag & VOBJBUF) == 0) {
1929 panic("biodone: vnode is not setup for merged cache");
1930 }
1931#endif
1932
1899 if (vp->v_type == VBLK)
1900 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1901 else
1902 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1933 foff = bp->b_offset;
1934
1903#if !defined(MAX_PERF)
1904 if (!obj) {
1905 panic("biodone: no object");
1906 }
1907#endif
1908#if defined(VFS_BIO_DEBUG)
1909 if (obj->paging_in_progress < bp->b_npages) {
1910 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",

--- 20 unchanged lines hidden (view full) ---

1931#if defined(VFS_BIO_DEBUG)
1932 if (OFF_TO_IDX(foff) != m->pindex) {
1933 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1934 }
1935#endif
1936 resid = IDX_TO_OFF(m->pindex + 1) - foff;
1937 if (resid > iosize)
1938 resid = iosize;
1935#if !defined(MAX_PERF)
1936 if (!obj) {
1937 panic("biodone: no object");
1938 }
1939#endif
1940#if defined(VFS_BIO_DEBUG)
1941 if (obj->paging_in_progress < bp->b_npages) {
1942 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",

--- 20 unchanged lines hidden (view full) ---

1963#if defined(VFS_BIO_DEBUG)
1964 if (OFF_TO_IDX(foff) != m->pindex) {
1965 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1966 }
1967#endif
1968 resid = IDX_TO_OFF(m->pindex + 1) - foff;
1969 if (resid > iosize)
1970 resid = iosize;
1971
1939 /*
1940 * In the write case, the valid and clean bits are
1941 * already changed correctly, so we only need to do this
1942 * here in the read case.
1943 */
1944 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1945 vfs_page_set_valid(bp, foff, i, m);
1946 }

--- 108 unchanged lines hidden (view full) ---

2055void
2056vfs_unbusy_pages(struct buf * bp)
2057{
2058 int i;
2059
2060 if (bp->b_flags & B_VMIO) {
2061 struct vnode *vp = bp->b_vp;
2062 vm_object_t obj = vp->v_object;
1972 /*
1973 * In the write case, the valid and clean bits are
1974 * already changed correctly, so we only need to do this
1975 * here in the read case.
1976 */
1977 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1978 vfs_page_set_valid(bp, foff, i, m);
1979 }

--- 108 unchanged lines hidden (view full) ---

2088void
2089vfs_unbusy_pages(struct buf * bp)
2090{
2091 int i;
2092
2093 if (bp->b_flags & B_VMIO) {
2094 struct vnode *vp = bp->b_vp;
2095 vm_object_t obj = vp->v_object;
2063 vm_ooffset_t foff;
2064
2096
2065 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2066
2067 for (i = 0; i < bp->b_npages; i++) {
2068 vm_page_t m = bp->b_pages[i];
2069
2070 if (m == bogus_page) {
2097 for (i = 0; i < bp->b_npages; i++) {
2098 vm_page_t m = bp->b_pages[i];
2099
2100 if (m == bogus_page) {
2071 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
2101 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
2072#if !defined(MAX_PERF)
2073 if (!m) {
2074 panic("vfs_unbusy_pages: page missing\n");
2075 }
2076#endif
2077 bp->b_pages[i] = m;
2078 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2079 }

--- 61 unchanged lines hidden (view full) ---

2141static void
2142vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2143{
2144 struct vnode *vp = bp->b_vp;
2145 vm_ooffset_t soff, eoff;
2146
2147 soff = off;
2148 eoff = off + min(PAGE_SIZE, bp->b_bufsize);
2102#if !defined(MAX_PERF)
2103 if (!m) {
2104 panic("vfs_unbusy_pages: page missing\n");
2105 }
2106#endif
2107 bp->b_pages[i] = m;
2108 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
2109 }

--- 61 unchanged lines hidden (view full) ---

2171static void
2172vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2173{
2174 struct vnode *vp = bp->b_vp;
2175 vm_ooffset_t soff, eoff;
2176
2177 soff = off;
2178 eoff = off + min(PAGE_SIZE, bp->b_bufsize);
2149 vm_page_set_invalid(m,
2150 (vm_offset_t) (soff & PAGE_MASK),
2151 (vm_offset_t) (eoff - soff));
2152 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
2153 vm_ooffset_t sv, ev;
2179 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
2180 vm_ooffset_t sv, ev;
2181 vm_page_set_invalid(m,
2182 (vm_offset_t) (soff & PAGE_MASK),
2183 (vm_offset_t) (eoff - soff));
2154 off = off - pageno * PAGE_SIZE;
2155 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
2156 ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
2157 soff = max(sv, soff);
2158 eoff = min(ev, eoff);
2159 }
2160 if (eoff > soff)
2161 vm_page_set_validclean(m,
2184 off = off - pageno * PAGE_SIZE;
2185 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
2186 ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
2187 soff = max(sv, soff);
2188 eoff = min(ev, eoff);
2189 }
2190 if (eoff > soff)
2191 vm_page_set_validclean(m,
2162 (vm_offset_t) (soff & PAGE_MASK),
2163 (vm_offset_t) (eoff - soff));
2192 (vm_offset_t) (soff & PAGE_MASK),
2193 (vm_offset_t) (eoff - soff));
2164}
2165
2166/*
2167 * This routine is called before a device strategy routine.
2168 * It is used to tell the VM system that paging I/O is in
2169 * progress, and treat the pages associated with the buffer
2170 * almost as being PG_BUSY. Also the object paging_in_progress
2171 * flag is handled to make sure that the object doesn't become

--- 4 unchanged lines hidden (view full) ---

2176{
2177 int i,s;
2178
2179 if (bp->b_flags & B_VMIO) {
2180 struct vnode *vp = bp->b_vp;
2181 vm_object_t obj = vp->v_object;
2182 vm_ooffset_t foff;
2183
2194}
2195
2196/*
2197 * This routine is called before a device strategy routine.
2198 * It is used to tell the VM system that paging I/O is in
2199 * progress, and treat the pages associated with the buffer
2200 * almost as being PG_BUSY. Also the object paging_in_progress
2201 * flag is handled to make sure that the object doesn't become

--- 4 unchanged lines hidden (view full) ---

2206{
2207 int i,s;
2208
2209 if (bp->b_flags & B_VMIO) {
2210 struct vnode *vp = bp->b_vp;
2211 vm_object_t obj = vp->v_object;
2212 vm_ooffset_t foff;
2213
2184 if (vp->v_type == VBLK)
2185 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
2186 else
2187 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2214 foff = bp->b_offset;
2188
2189 vfs_setdirty(bp);
2190
2191retry:
2192 for (i = 0; i < bp->b_npages; i++) {
2193 vm_page_t m = bp->b_pages[i];
2194 if (vm_page_sleep(m, "vbpage", NULL))
2195 goto retry;

--- 28 unchanged lines hidden (view full) ---

2224void
2225vfs_clean_pages(struct buf * bp)
2226{
2227 int i;
2228
2229 if (bp->b_flags & B_VMIO) {
2230 struct vnode *vp = bp->b_vp;
2231 vm_ooffset_t foff;
2215
2216 vfs_setdirty(bp);
2217
2218retry:
2219 for (i = 0; i < bp->b_npages; i++) {
2220 vm_page_t m = bp->b_pages[i];
2221 if (vm_page_sleep(m, "vbpage", NULL))
2222 goto retry;

--- 28 unchanged lines hidden (view full) ---

2251void
2252vfs_clean_pages(struct buf * bp)
2253{
2254 int i;
2255
2256 if (bp->b_flags & B_VMIO) {
2257 struct vnode *vp = bp->b_vp;
2258 vm_ooffset_t foff;
2259 foff = bp->b_offset;
2232
2260
2233 if (vp->v_type == VBLK)
2234 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
2235 else
2236 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
2237 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2238 vm_page_t m = bp->b_pages[i];
2261 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
2262 vm_page_t m = bp->b_pages[i];
2239
2240 vfs_page_set_valid(bp, foff, i, m);
2241 }
2242 }
2243}
2244
2245void
2246vfs_bio_clrbuf(struct buf *bp) {
2247 int i;

--- 19 unchanged lines hidden (view full) ---

2267 }
2268 } else {
2269 int j;
2270 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
2271 if( (bp->b_pages[i]->valid & (1<<j)) == 0)
2272 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
2273 }
2274 }
2263 vfs_page_set_valid(bp, foff, i, m);
2264 }
2265 }
2266}
2267
2268void
2269vfs_bio_clrbuf(struct buf *bp) {
2270 int i;

--- 19 unchanged lines hidden (view full) ---

2290 }
2291 } else {
2292 int j;
2293 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
2294 if( (bp->b_pages[i]->valid & (1<<j)) == 0)
2295 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
2296 }
2297 }
2275 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
2298 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
2276 }
2277 bp->b_resid = 0;
2278 } else {
2279 clrbuf(bp);
2280 }
2281}
2282
2283/*

--- 107 unchanged lines hidden ---
2299 }
2300 bp->b_resid = 0;
2301 } else {
2302 clrbuf(bp);
2303 }
2304}
2305
2306/*

--- 107 unchanged lines hidden ---