Lines Matching refs:bp

108 static void vm_hold_free_pages(struct buf *bp, int newbsize);
109 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
111 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
112 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
114 static void vfs_clean_pages_dirty_buf(struct buf *bp);
115 static void vfs_setdirty_locked_object(struct buf *bp);
116 static void vfs_vmio_invalidate(struct buf *bp);
117 static void vfs_vmio_truncate(struct buf *bp, int npages);
118 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
126 static void bremfreel(struct buf *bp);
489 * bp's get placed back in the queues.
534 bufspace_adjust(struct buf *bp, int bufsize)
539 KASSERT((bp->b_flags & B_MALLOC) == 0,
540 ("bufspace_adjust: malloc buf %p", bp));
541 diff = bufsize - bp->b_bufsize;
551 bp->b_bufsize = bufsize;
739 bufmallocadjust(struct buf *bp, int bufsize)
743 KASSERT((bp->b_flags & B_MALLOC) != 0,
744 ("bufmallocadjust: non-malloc buf %p", bp));
745 diff = bufsize - bp->b_bufsize;
750 bp->b_bufsize = bufsize;
777 runningbufwakeup(struct buf *bp)
781 bspace = bp->b_runningbufspace;
787 bp->b_runningbufspace = 0;
832 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
837 if (bp->b_flags & B_CACHE) {
840 bp->b_flags &= ~B_CACHE;
1011 struct buf *bp;
1032 bp = &buf[i];
1033 bzero(bp, sizeof *bp);
1034 bp->b_flags = B_INVAL;
1035 bp->b_rcred = NOCRED;
1036 bp->b_wcred = NOCRED;
1037 bp->b_qindex = QUEUE_EMPTY;
1038 bp->b_xflags = 0;
1039 bp->b_data = bp->b_kvabase = unmapped_buf;
1040 LIST_INIT(&bp->b_dep);
1041 BUF_LOCKINIT(bp);
1042 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
1136 vfs_buf_check_mapped(struct buf *bp)
1139 KASSERT(bp->b_kvabase != unmapped_buf,
1140 ("mapped buf: b_kvabase was not updated %p", bp));
1141 KASSERT(bp->b_data != unmapped_buf,
1142 ("mapped buf: b_data was not updated %p", bp));
1143 KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1144 MAXPHYS, ("b_data + b_offset unmapped %p", bp));
1148 vfs_buf_check_unmapped(struct buf *bp)
1151 KASSERT(bp->b_data == unmapped_buf,
1152 ("unmapped buf: corrupted b_data %p", bp));
1155 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1156 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1158 #define BUF_CHECK_MAPPED(bp) do {} while (0)
1159 #define BUF_CHECK_UNMAPPED(bp) do {} while (0)
1163 isbufbusy(struct buf *bp)
1165 if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1166 ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1178 struct buf *bp;
1197 for (bp = &buf[nbuf]; --bp >= buf; )
1198 if (isbufbusy(bp))
1246 for (bp = &buf[nbuf]; --bp >= buf; ) {
1247 if (isbufbusy(bp)) {
1250 if (bp->b_dev == NULL) {
1252 bp->b_vp->v_mount, mnt_list);
1260 nbusy, bp, bp->b_vp, bp->b_flags,
1261 (intmax_t)bp->b_blkno,
1262 (intmax_t)bp->b_lblkno);
1263 BUF_LOCKPRINTINFO(bp);
1265 vn_printf(bp->b_vp,
1291 bpmap_qenter(struct buf *bp)
1294 BUF_CHECK_MAPPED(bp);
1297 * bp->b_data is relative to bp->b_offset, but
1298 * bp->b_offset may be offset into the first page.
1300 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1301 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1302 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1303 (vm_offset_t)(bp->b_offset & PAGE_MASK));
1312 binsfree(struct buf *bp, int qindex)
1317 BUF_ASSERT_XLOCKED(bp);
1325 if (bqisclean(bp->b_qindex))
1326 qindex = bp->b_qindex;
1335 if (bp->b_flags & B_REMFREE) {
1336 olock = bqlock(bp->b_qindex);
1338 bremfreel(bp);
1346 if (bp->b_qindex != QUEUE_NONE)
1349 bp->b_qindex = qindex;
1350 if (bp->b_flags & B_AGE)
1351 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1353 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1355 bq_len[bp->b_qindex]++;
1366 buf_free(struct buf *bp)
1369 if (bp->b_flags & B_REMFREE)
1370 bremfreef(bp);
1371 if (bp->b_vflags & BV_BKGRDINPROG)
1373 if (bp->b_rcred != NOCRED) {
1374 crfree(bp->b_rcred);
1375 bp->b_rcred = NOCRED;
1377 if (bp->b_wcred != NOCRED) {
1378 crfree(bp->b_wcred);
1379 bp->b_wcred = NOCRED;
1381 if (!LIST_EMPTY(&bp->b_dep))
1382 buf_deallocate(bp);
1383 bufkva_free(bp);
1384 BUF_UNLOCK(bp);
1385 uma_zfree(buf_zone, bp);
1401 struct buf *bp;
1406 bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1407 if (bp == NULL)
1409 bremfreel(bp);
1410 store[i] = bp;
1439 struct buf *bp;
1441 bp = uma_zalloc(buf_zone, M_NOWAIT);
1442 if (bp == NULL) {
1454 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1455 panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
1457 KASSERT(bp->b_vp == NULL,
1458 ("bp: %p still has vnode %p.", bp, bp->b_vp));
1459 KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1460 ("invalid buffer %p flags %#x", bp, bp->b_flags));
1461 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1462 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1463 KASSERT(bp->b_npages == 0,
1464 ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1465 KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1466 KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1468 bp->b_flags = 0;
1469 bp->b_ioflags = 0;
1470 bp->b_xflags = 0;
1471 bp->b_vflags = 0;
1472 bp->b_vp = NULL;
1473 bp->b_blkno = bp->b_lblkno = 0;
1474 bp->b_offset = NOOFFSET;
1475 bp->b_iodone = 0;
1476 bp->b_error = 0;
1477 bp->b_resid = 0;
1478 bp->b_bcount = 0;
1479 bp->b_npages = 0;
1480 bp->b_dirtyoff = bp->b_dirtyend = 0;
1481 bp->b_bufobj = NULL;
1482 bp->b_pin_count = 0;
1483 bp->b_data = bp->b_kvabase = unmapped_buf;
1484 bp->b_fsprivate1 = NULL;
1485 bp->b_fsprivate2 = NULL;
1486 bp->b_fsprivate3 = NULL;
1487 LIST_INIT(&bp->b_dep);
1489 return (bp);
1502 struct buf *bp, *nbp;
1514 while ((bp = nbp) != NULL) {
1516 * Calculate next bp (we can only use it if we do not
1519 nbp = TAILQ_NEXT(bp, b_freelist);
1525 if (kva && bp->b_kvasize == 0)
1528 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1534 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1535 BUF_UNLOCK(bp);
1539 KASSERT(bp->b_qindex == qindex,
1540 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
1545 bremfreel(bp);
1552 if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1553 bqrelse(bp);
1558 bp->b_flags |= B_INVAL;
1559 brelse(bp);
1624 bremfree(struct buf *bp)
1627 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1628 KASSERT((bp->b_flags & B_REMFREE) == 0,
1629 ("bremfree: buffer %p already marked for delayed removal.", bp));
1630 KASSERT(bp->b_qindex != QUEUE_NONE,
1631 ("bremfree: buffer %p not on a queue.", bp));
1632 BUF_ASSERT_XLOCKED(bp);
1634 bp->b_flags |= B_REMFREE;
1644 bremfreef(struct buf *bp)
1648 qlock = bqlock(bp->b_qindex);
1650 bremfreel(bp);
1661 bremfreel(struct buf *bp)
1665 bp, bp->b_vp, bp->b_flags);
1666 KASSERT(bp->b_qindex != QUEUE_NONE,
1667 ("bremfreel: buffer %p not on a queue.", bp));
1668 if (bp->b_qindex != QUEUE_EMPTY) {
1669 BUF_ASSERT_XLOCKED(bp);
1671 mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
1673 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
1675 KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
1676 bp->b_qindex));
1677 bq_len[bp->b_qindex]--;
1679 bp->b_qindex = QUEUE_NONE;
1680 bp->b_flags &= ~B_REMFREE;
1690 bufkva_free(struct buf *bp)
1694 if (bp->b_kvasize == 0) {
1695 KASSERT(bp->b_kvabase == unmapped_buf &&
1696 bp->b_data == unmapped_buf,
1697 ("Leaked KVA space on %p", bp));
1698 } else if (buf_mapped(bp))
1699 BUF_CHECK_MAPPED(bp);
1701 BUF_CHECK_UNMAPPED(bp);
1703 if (bp->b_kvasize == 0)
1706 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1707 atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
1709 bp->b_data = bp->b_kvabase = unmapped_buf;
1710 bp->b_kvasize = 0;
1719 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
1727 bufkva_free(bp);
1738 bp->b_kvabase = (caddr_t)addr;
1739 bp->b_kvasize = maxsize;
1740 atomic_add_long(&bufkvaspace, bp->b_kvasize);
1742 bp->b_data = unmapped_buf;
1743 BUF_CHECK_UNMAPPED(bp);
1745 bp->b_data = bp->b_kvabase;
1746 BUF_CHECK_MAPPED(bp);
1827 struct buf *bp;
1834 *bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1835 if (bp == NULL)
1839 if ((bp->b_flags & B_CACHE) == 0) {
1844 racct_add_buf(curproc, bp, 0);
1850 bp->b_iocmd = BIO_READ;
1851 bp->b_flags &= ~B_INVAL;
1852 bp->b_ioflags &= ~BIO_ERROR;
1853 if (bp->b_rcred == NOCRED && cred != NOCRED)
1854 bp->b_rcred = crhold(cred);
1855 vfs_busy_pages(bp, 0);
1856 bp->b_iooffset = dbtob(bp->b_blkno);
1857 bstrategy(bp);
1864 rv = bufwait(bp);
1866 brelse(bp);
1885 bufwrite(struct buf *bp)
1892 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1893 if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
1894 bp->b_flags |= B_INVAL | B_RELBUF;
1895 bp->b_flags &= ~B_CACHE;
1896 brelse(bp);
1899 if (bp->b_flags & B_INVAL) {
1900 brelse(bp);
1904 if (bp->b_flags & B_BARRIER)
1907 oldflags = bp->b_flags;
1909 BUF_ASSERT_HELD(bp);
1911 if (bp->b_pin_count > 0)
1912 bunpin_wait(bp);
1914 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1915 ("FFS background buffer should not get here %p", bp));
1917 vp = bp->b_vp;
1929 bufobj_wref(bp->b_bufobj);
1930 bundirty(bp);
1932 bp->b_flags &= ~B_DONE;
1933 bp->b_ioflags &= ~BIO_ERROR;
1934 bp->b_flags |= B_CACHE;
1935 bp->b_iocmd = BIO_WRITE;
1937 vfs_busy_pages(bp, 1);
1942 bp->b_runningbufspace = bp->b_bufsize;
1943 space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
1949 racct_add_buf(curproc, bp, 1);
1956 BUF_KERNPROC(bp);
1957 bp->b_iooffset = dbtob(bp->b_blkno);
1958 bstrategy(bp);
1961 int rtval = bufwait(bp);
1962 brelse(bp);
1981 bufbdflush(struct bufobj *bo, struct buf *bp)
1986 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1998 if (bp == nbp)
2031 bdwrite(struct buf *bp)
2037 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2038 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2039 KASSERT((bp->b_flags & B_BARRIER) == 0,
2040 ("Barrier request in delayed write %p", bp));
2041 BUF_ASSERT_HELD(bp);
2043 if (bp->b_flags & B_INVAL) {
2044 brelse(bp);
2055 vp = bp->b_vp;
2056 bo = bp->b_bufobj;
2059 BO_BDFLUSH(bo, bp);
2064 bdirty(bp);
2069 bp->b_flags |= B_CACHE;
2080 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2081 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2095 vfs_clean_pages_dirty_buf(bp);
2096 bqrelse(bp);
2123 bdirty(struct buf *bp)
2127 bp, bp->b_vp, bp->b_flags);
2128 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2129 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2130 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2131 BUF_ASSERT_HELD(bp);
2132 bp->b_flags &= ~(B_RELBUF);
2133 bp->b_iocmd = BIO_WRITE;
2135 if ((bp->b_flags & B_DELWRI) == 0) {
2136 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2137 reassignbuf(bp);
2154 bundirty(struct buf *bp)
2157 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2158 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2159 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2160 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2161 BUF_ASSERT_HELD(bp);
2163 if (bp->b_flags & B_DELWRI) {
2164 bp->b_flags &= ~B_DELWRI;
2165 reassignbuf(bp);
2171 bp->b_flags &= ~B_DEFERRED;
2184 bawrite(struct buf *bp)
2187 bp->b_flags |= B_ASYNC;
2188 (void) bwrite(bp);
2201 babarrierwrite(struct buf *bp)
2204 bp->b_flags |= B_ASYNC | B_BARRIER;
2205 (void) bwrite(bp);
2218 bbarrierwrite(struct buf *bp)
2221 bp->b_flags |= B_BARRIER;
2222 return (bwrite(bp));
2267 brelse(struct buf *bp)
2272 * Many functions erroneously call brelse with a NULL bp under rare
2273 * error conditions. Simply return when called with a NULL bp.
2275 if (bp == NULL)
2278 bp, bp->b_vp, bp->b_flags);
2279 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2280 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2281 KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2284 if (BUF_LOCKRECURSED(bp)) {
2289 BUF_UNLOCK(bp);
2293 if (bp->b_flags & B_MANAGED) {
2294 bqrelse(bp);
2298 if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2299 BO_LOCK(bp->b_bufobj);
2300 bp->b_vflags &= ~BV_BKGRDERR;
2301 BO_UNLOCK(bp->b_bufobj);
2302 bdirty(bp);
2304 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2305 !(bp->b_flags & B_INVAL)) {
2310 bp->b_ioflags &= ~BIO_ERROR;
2311 bdirty(bp);
2312 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2313 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2318 bp->b_flags |= B_INVAL;
2319 if (!LIST_EMPTY(&bp->b_dep))
2320 buf_deallocate(bp);
2321 if (bp->b_flags & B_DELWRI)
2323 bp->b_flags &= ~(B_DELWRI | B_CACHE);
2324 if ((bp->b_flags & B_VMIO) == 0) {
2325 allocbuf(bp, 0);
2326 if (bp->b_vp)
2327 brelvp(bp);
2335 * because pages associated with a B_DELWRI bp are marked clean.
2340 if (bp->b_flags & B_DELWRI)
2341 bp->b_flags &= ~B_RELBUF;
2360 if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2361 (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2362 !(bp->b_vp->v_mount != NULL &&
2363 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2364 !vn_isdisk(bp->b_vp, NULL) && (bp->b_flags & B_DELWRI))) {
2365 vfs_vmio_invalidate(bp);
2366 allocbuf(bp, 0);
2369 if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2370 (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2371 allocbuf(bp, 0);
2372 bp->b_flags &= ~B_NOREUSE;
2373 if (bp->b_vp != NULL)
2374 brelvp(bp);
2382 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2383 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2384 bp->b_flags |= B_INVAL;
2385 if (bp->b_flags & B_INVAL) {
2386 if (bp->b_flags & B_DELWRI)
2387 bundirty(bp);
2388 if (bp->b_vp)
2389 brelvp(bp);
2393 if (bp->b_bufsize == 0) {
2394 buf_free(bp);
2398 if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2399 (bp->b_ioflags & BIO_ERROR)) {
2400 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2401 if (bp->b_vflags & BV_BKGRDINPROG)
2404 bp->b_flags |= B_AGE;
2406 } else if (bp->b_flags & B_DELWRI)
2411 binsfree(bp, qindex);
2413 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
2414 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2417 BUF_UNLOCK(bp);
2434 bqrelse(struct buf *bp)
2438 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2439 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2440 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2443 if (BUF_LOCKRECURSED(bp)) {
2445 BUF_UNLOCK(bp);
2448 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2450 if (bp->b_flags & B_MANAGED) {
2451 if (bp->b_flags & B_REMFREE)
2452 bremfreef(bp);
2457 if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2459 BO_LOCK(bp->b_bufobj);
2460 bp->b_vflags &= ~BV_BKGRDERR;
2461 BO_UNLOCK(bp->b_bufobj);
2464 if ((bp->b_flags & B_DELWRI) == 0 &&
2465 (bp->b_xflags & BX_VNDIRTY))
2467 if ((bp->b_flags & B_NOREUSE) != 0) {
2468 brelse(bp);
2473 binsfree(bp, qindex);
2477 BUF_UNLOCK(bp);
2487 vfs_vmio_iodone(struct buf *bp)
2495 obj = bp->b_bufobj->bo_object;
2496 KASSERT(obj->paging_in_progress >= bp->b_npages,
2498 obj->paging_in_progress, bp->b_npages));
2500 vp = bp->b_vp;
2506 foff = bp->b_offset;
2507 KASSERT(bp->b_offset != NOOFFSET,
2508 ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2511 iosize = bp->b_bcount - bp->b_resid;
2513 for (i = 0; i < bp->b_npages; i++) {
2523 m = bp->b_pages[i];
2529 bp->b_pages[i] = m;
2530 } else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2539 vfs_page_set_valid(bp, foff, m);
2549 vm_object_pip_wakeupn(obj, bp->b_npages);
2551 if (bogus && buf_mapped(bp)) {
2552 BUF_CHECK_MAPPED(bp);
2553 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2554 bp->b_pages, bp->b_npages);
2562 vfs_vmio_unwire(struct buf *bp, vm_page_t m)
2576 } else if ((bp->b_flags & B_DIRECT) != 0)
2587 if ((bp->b_flags & B_NOREUSE) != 0)
2601 vfs_vmio_invalidate(struct buf *bp)
2607 if (buf_mapped(bp)) {
2608 BUF_CHECK_MAPPED(bp);
2609 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2611 BUF_CHECK_UNMAPPED(bp);
2624 obj = bp->b_bufobj->bo_object;
2625 resid = bp->b_bufsize;
2626 poffset = bp->b_offset & PAGE_MASK;
2628 for (i = 0; i < bp->b_npages; i++) {
2629 m = bp->b_pages[i];
2632 bp->b_pages[i] = NULL;
2645 vfs_vmio_unwire(bp, m);
2650 bp->b_npages = 0;
2657 vfs_vmio_truncate(struct buf *bp, int desiredpages)
2663 if (bp->b_npages == desiredpages)
2666 if (buf_mapped(bp)) {
2667 BUF_CHECK_MAPPED(bp);
2668 pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
2669 (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
2671 BUF_CHECK_UNMAPPED(bp);
2672 obj = bp->b_bufobj->bo_object;
2675 for (i = desiredpages; i < bp->b_npages; i++) {
2676 m = bp->b_pages[i];
2678 bp->b_pages[i] = NULL;
2679 vfs_vmio_unwire(bp, m);
2683 bp->b_npages = desiredpages;
2690 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
2706 obj = bp->b_bufobj->bo_object;
2708 while (bp->b_npages < desiredpages) {
2719 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages,
2722 VM_ALLOC_COUNT(desiredpages - bp->b_npages));
2724 bp->b_flags &= ~B_CACHE;
2725 bp->b_pages[bp->b_npages] = m;
2726 ++bp->b_npages;
2743 toff = bp->b_bcount;
2744 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2745 while ((bp->b_flags & B_CACHE) && toff < size) {
2750 pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
2751 m = bp->b_pages[pi];
2752 vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
2761 if (buf_mapped(bp))
2762 bpmap_qenter(bp);
2764 BUF_CHECK_UNMAPPED(bp);
2815 vfs_bio_awrite(struct buf *bp)
2820 daddr_t lblkno = bp->b_lblkno;
2821 struct vnode *vp = bp->b_vp;
2829 gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
2837 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
2845 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
2850 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2859 BUF_UNLOCK(bp);
2865 bremfree(bp);
2866 bp->b_flags |= B_ASYNC;
2872 nwritten = bp->b_bufsize;
2873 (void) bwrite(bp);
2884 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
2894 if (maxsize != bp->b_kvasize &&
2895 bufkva_alloc(bp, maxsize, gbflags))
2919 struct buf *bp;
2922 bp = NULL;
2940 if ((bp = buf_alloc()) == NULL)
2942 if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
2943 return (bp);
2949 if (bp != NULL) {
2950 bp->b_flags |= B_INVAL;
2951 brelse(bp);
3083 struct buf *bp;
3092 bp = NULL;
3101 bp = TAILQ_NEXT(sentinel, b_freelist);
3102 if (bp != NULL) {
3104 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
3117 if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3118 bp->b_vp != lvp)) {
3122 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3126 if (bp->b_pin_count > 0) {
3127 BUF_UNLOCK(bp);
3134 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3135 (bp->b_flags & B_DELWRI) == 0) {
3136 BUF_UNLOCK(bp);
3139 if (bp->b_flags & B_INVAL) {
3140 bremfreef(bp);
3141 brelse(bp);
3146 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3148 BUF_UNLOCK(bp);
3164 vp = bp->b_vp;
3166 BUF_UNLOCK(bp);
3180 bp, bp->b_vp, bp->b_flags);
3182 vfs_bio_awrite(bp);
3184 bremfree(bp);
3185 bwrite(bp);
3204 BUF_UNLOCK(bp);
3219 struct buf *bp;
3222 bp = gbincore(bo, blkno);
3224 return (bp);
3289 vfs_clean_pages_dirty_buf(struct buf *bp)
3295 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3298 foff = bp->b_offset;
3299 KASSERT(bp->b_offset != NOOFFSET,
3302 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3303 vfs_drain_busy_pages(bp);
3304 vfs_setdirty_locked_object(bp);
3305 for (i = 0; i < bp->b_npages; i++) {
3308 if (eoff > bp->b_offset + bp->b_bufsize)
3309 eoff = bp->b_offset + bp->b_bufsize;
3310 m = bp->b_pages[i];
3311 vfs_page_set_validclean(bp, foff, m);
3315 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3319 vfs_setdirty_locked_object(struct buf *bp)
3324 object = bp->b_bufobj->bo_object;
3339 for (i = 0; i < bp->b_npages; i++)
3340 vm_page_test_dirty(bp->b_pages[i]);
3347 for (i = 0; i < bp->b_npages; i++) {
3348 if (bp->b_pages[i]->dirty)
3351 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3353 for (i = bp->b_npages - 1; i >= 0; --i) {
3354 if (bp->b_pages[i]->dirty) {
3358 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3364 if (eoffset > bp->b_bcount)
3365 eoffset = bp->b_bcount;
3373 if (bp->b_dirtyoff > boffset)
3374 bp->b_dirtyoff = boffset;
3375 if (bp->b_dirtyend < eoffset)
3376 bp->b_dirtyend = eoffset;
3387 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3392 need_mapping = bp->b_data == unmapped_buf &&
3394 need_kva = bp->b_kvabase == unmapped_buf &&
3395 bp->b_data == unmapped_buf &&
3400 BUF_CHECK_UNMAPPED(bp);
3402 if (need_mapping && bp->b_kvabase != unmapped_buf) {
3415 bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3421 while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3427 panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3430 bufspace_wait(bp->b_vp, gbflags, 0, 0);
3435 bp->b_data = bp->b_kvabase;
3436 BUF_CHECK_MAPPED(bp);
3437 bpmap_qenter(bp);
3481 struct buf *bp;
3499 bp = gbincore(bo, blkno);
3500 if (bp != NULL) {
3511 error = BUF_TIMELOCK(bp, lockflags,
3524 else if (BUF_LOCKRECURSED(bp))
3533 if (bp->b_flags & B_INVAL)
3534 bp->b_flags &= ~B_CACHE;
3535 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3536 bp->b_flags |= B_CACHE;
3537 if (bp->b_flags & B_MANAGED)
3538 MPASS(bp->b_qindex == QUEUE_NONE);
3540 bremfree(bp);
3545 if (bp->b_bcount != size) {
3546 if ((bp->b_flags & B_VMIO) == 0 ||
3547 (size > bp->b_kvasize)) {
3548 if (bp->b_flags & B_DELWRI) {
3554 if (bp->b_pin_count > 0) {
3556 bqrelse(bp);
3559 bunpin_wait(bp);
3562 bp->b_flags |= B_NOCACHE;
3563 bwrite(bp);
3565 if (LIST_EMPTY(&bp->b_dep)) {
3566 bp->b_flags |= B_RELBUF;
3567 brelse(bp);
3569 bp->b_flags |= B_NOCACHE;
3570 bwrite(bp);
3582 bp_unmapped_get_kva(bp, blkno, size, flags);
3590 allocbuf(bp, size);
3592 KASSERT(bp->b_offset != NOOFFSET,
3622 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3623 bp->b_flags |= B_NOCACHE;
3624 bwrite(bp);
3627 bp->b_flags &= ~B_DONE;
3657 bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
3658 if (bp == NULL) {
3696 bp->b_flags |= B_INVAL;
3697 brelse(bp);
3706 bp->b_blkno = bp->b_lblkno = blkno;
3707 bp->b_offset = offset;
3708 bgetvp(vp, bp);
3719 bp->b_flags |= B_VMIO;
3720 KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3722 bp, vp->v_object, bp->b_bufobj->bo_object));
3724 bp->b_flags &= ~B_VMIO;
3725 KASSERT(bp->b_bufobj->bo_object == NULL,
3727 bp, bp->b_bufobj->bo_object));
3728 BUF_CHECK_MAPPED(bp);
3731 allocbuf(bp, size);
3733 bp->b_flags &= ~B_DONE;
3735 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3736 BUF_ASSERT_HELD(bp);
3738 KASSERT(bp->b_bufobj == bo,
3739 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3740 return (bp);
3750 struct buf *bp;
3754 while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
3759 allocbuf(bp, size);
3761 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
3762 BUF_ASSERT_HELD(bp);
3763 return (bp);
3770 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
3773 if (bp->b_flags & B_MALLOC) {
3778 bufmallocadjust(bp, 0);
3779 free(bp->b_data, M_BIOBUF);
3780 bp->b_data = bp->b_kvabase;
3781 bp->b_flags &= ~B_MALLOC;
3785 vm_hold_free_pages(bp, newbsize);
3786 bufspace_adjust(bp, newbsize);
3793 vfs_nonvmio_extend(struct buf *bp, int newbsize)
3808 if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
3810 bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
3811 bp->b_flags |= B_MALLOC;
3812 bufmallocadjust(bp, newbsize);
3823 if (bp->b_flags & B_MALLOC) {
3824 origbuf = bp->b_data;
3825 origbufsize = bp->b_bufsize;
3826 bp->b_data = bp->b_kvabase;
3827 bufmallocadjust(bp, 0);
3828 bp->b_flags &= ~B_MALLOC;
3831 vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
3832 (vm_offset_t) bp->b_data + newbsize);
3834 bcopy(origbuf, bp->b_data, origbufsize);
3837 bufspace_adjust(bp, newbsize);
3855 allocbuf(struct buf *bp, int size)
3859 BUF_ASSERT_HELD(bp);
3861 if (bp->b_bcount == size)
3864 if (bp->b_kvasize != 0 && bp->b_kvasize < size)
3868 if ((bp->b_flags & B_VMIO) == 0) {
3869 if ((bp->b_flags & B_MALLOC) == 0)
3875 if (newbsize < bp->b_bufsize)
3876 vfs_nonvmio_truncate(bp, newbsize);
3877 else if (newbsize > bp->b_bufsize)
3878 vfs_nonvmio_extend(bp, newbsize);
3883 num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3885 if (bp->b_flags & B_MALLOC)
3891 if (size == 0 || bp->b_bufsize == 0)
3892 bp->b_flags |= B_CACHE;
3894 if (newbsize < bp->b_bufsize)
3895 vfs_vmio_truncate(bp, desiredpages);
3897 else if (size > bp->b_bcount)
3898 vfs_vmio_extend(bp, desiredpages, size);
3899 bufspace_adjust(bp, newbsize);
3901 bp->b_bcount = size; /* requested buffer size. */
3908 biodone(struct bio *bp)
3914 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3915 bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
3916 bp->bio_flags |= BIO_UNMAPPED;
3917 start = trunc_page((vm_offset_t)bp->bio_data);
3918 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3919 bp->bio_data = unmapped_buf;
3924 done = bp->bio_done;
3926 mtxp = mtx_pool_find(mtxpool_sleep, bp);
3928 bp->bio_flags |= BIO_DONE;
3929 wakeup(bp);
3932 bp->bio_flags |= BIO_DONE;
3933 done(bp);
3941 biowait(struct bio *bp, const char *wchan)
3945 mtxp = mtx_pool_find(mtxpool_sleep, bp);
3947 while ((bp->bio_flags & BIO_DONE) == 0)
3948 msleep(bp, mtxp, PRIBIO, wchan, 0);
3950 if (bp->bio_error != 0)
3951 return (bp->bio_error);
3952 if (!(bp->bio_flags & BIO_ERROR))
3958 biofinish(struct bio *bp, struct devstat *stat, int error)
3962 bp->bio_error = error;
3963 bp->bio_flags |= BIO_ERROR;
3966 devstat_end_transaction_bio(stat, bp);
3967 biodone(bp);
3978 bufwait(struct buf *bp)
3980 if (bp->b_iocmd == BIO_READ)
3981 bwait(bp, PRIBIO, "biord");
3983 bwait(bp, PRIBIO, "biowr");
3984 if (bp->b_flags & B_EINTR) {
3985 bp->b_flags &= ~B_EINTR;
3988 if (bp->b_ioflags & BIO_ERROR) {
3989 return (bp->b_error ? bp->b_error : EIO);
4002 * biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4003 * In a non-VMIO bp, B_CACHE will be set on the next getblk()
4015 bufdone(struct buf *bp)
4020 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4023 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4024 BUF_ASSERT_HELD(bp);
4026 runningbufwakeup(bp);
4027 if (bp->b_iocmd == BIO_WRITE)
4028 dropobj = bp->b_bufobj;
4030 if (bp->b_iodone != NULL) {
4031 biodone = bp->b_iodone;
4032 bp->b_iodone = NULL;
4033 (*biodone) (bp);
4039 bufdone_finish(bp);
4046 bufdone_finish(struct buf *bp)
4048 BUF_ASSERT_HELD(bp);
4050 if (!LIST_EMPTY(&bp->b_dep))
4051 buf_complete(bp);
4053 if (bp->b_flags & B_VMIO) {
4059 if (bp->b_iocmd == BIO_READ &&
4060 !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4061 !(bp->b_ioflags & BIO_ERROR))
4062 bp->b_flags |= B_CACHE;
4063 vfs_vmio_iodone(bp);
4071 if (bp->b_flags & B_ASYNC) {
4072 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4073 (bp->b_ioflags & BIO_ERROR))
4074 brelse(bp);
4076 bqrelse(bp);
4078 bdone(bp);
4087 vfs_unbusy_pages(struct buf *bp)
4093 runningbufwakeup(bp);
4094 if (!(bp->b_flags & B_VMIO))
4097 obj = bp->b_bufobj->bo_object;
4099 for (i = 0; i < bp->b_npages; i++) {
4100 m = bp->b_pages[i];
4102 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4105 bp->b_pages[i] = m;
4106 if (buf_mapped(bp)) {
4107 BUF_CHECK_MAPPED(bp);
4108 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4109 bp->b_pages, bp->b_npages);
4111 BUF_CHECK_UNMAPPED(bp);
4115 vm_object_pip_wakeupn(obj, bp->b_npages);
4128 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4139 if (eoff > bp->b_offset + bp->b_bcount)
4140 eoff = bp->b_offset + bp->b_bcount;
4157 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4169 if (eoff > bp->b_offset + bp->b_bcount)
4170 eoff = bp->b_offset + bp->b_bcount;
4190 vfs_drain_busy_pages(struct buf *bp)
4195 VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4197 for (i = 0; i < bp->b_npages; i++) {
4198 m = bp->b_pages[i];
4201 vm_page_sbusy(bp->b_pages[last_busied]);
4204 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4206 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4211 vm_page_sunbusy(bp->b_pages[i]);
4227 vfs_busy_pages(struct buf *bp, int clear_modify)
4234 if (!(bp->b_flags & B_VMIO))
4237 obj = bp->b_bufobj->bo_object;
4238 foff = bp->b_offset;
4239 KASSERT(bp->b_offset != NOOFFSET,
4242 vfs_drain_busy_pages(bp);
4243 if (bp->b_bufsize != 0)
4244 vfs_setdirty_locked_object(bp);
4246 for (i = 0; i < bp->b_npages; i++) {
4247 m = bp->b_pages[i];
4249 if ((bp->b_flags & B_CLUSTER) == 0) {
4270 vfs_page_set_validclean(bp, foff, m);
4272 (bp->b_flags & B_CACHE) == 0) {
4273 bp->b_pages[i] = bogus_page;
4279 if (bogus && buf_mapped(bp)) {
4280 BUF_CHECK_MAPPED(bp);
4281 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4282 bp->b_pages, bp->b_npages);
4295 vfs_bio_set_valid(struct buf *bp, int base, int size)
4300 if (!(bp->b_flags & B_VMIO))
4308 base += (bp->b_offset & PAGE_MASK);
4311 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4312 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4313 m = bp->b_pages[i];
4321 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4337 vfs_bio_clrbuf(struct buf *bp)
4341 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4342 clrbuf(bp);
4345 bp->b_flags &= ~B_INVAL;
4346 bp->b_ioflags &= ~BIO_ERROR;
4347 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4348 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4349 (bp->b_offset & PAGE_MASK) == 0) {
4350 if (bp->b_pages[0] == bogus_page)
4352 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4353 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4354 if ((bp->b_pages[0]->valid & mask) == mask)
4356 if ((bp->b_pages[0]->valid & mask) == 0) {
4357 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4358 bp->b_pages[0]->valid |= mask;
4362 sa = bp->b_offset & PAGE_MASK;
4364 for (i = 0; i < bp->b_npages; i++, sa = 0) {
4365 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4369 if (bp->b_pages[i] == bogus_page)
4373 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4374 if ((bp->b_pages[i]->valid & mask) == mask)
4376 if ((bp->b_pages[i]->valid & mask) == 0)
4377 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4380 if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4381 pmap_zero_page_area(bp->b_pages[i],
4386 bp->b_pages[i]->valid |= mask;
4389 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4390 bp->b_resid = 0;
4394 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4399 if (buf_mapped(bp)) {
4400 BUF_CHECK_MAPPED(bp);
4401 bzero(bp->b_data + base, size);
4403 BUF_CHECK_UNMAPPED(bp);
4405 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4406 m = bp->b_pages[i];
4423 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4429 BUF_CHECK_MAPPED(bp);
4433 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4449 bp->b_pages[index] = p;
4451 bp->b_npages = index;
4456 vm_hold_free_pages(struct buf *bp, int newbsize)
4462 BUF_CHECK_MAPPED(bp);
4464 from = round_page((vm_offset_t)bp->b_data + newbsize);
4465 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4466 if (bp->b_npages > newnpages)
4467 pmap_qremove(from, bp->b_npages - newnpages);
4468 for (index = newnpages; index < bp->b_npages; index++) {
4469 p = bp->b_pages[index];
4470 bp->b_pages[index] = NULL;
4473 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4478 bp->b_npages = newnpages;
4496 vmapbuf(struct buf *bp, int mapbuf)
4501 if (bp->b_bufsize < 0)
4504 if (bp->b_iocmd == BIO_READ)
4507 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4510 bp->b_npages = pidx;
4511 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4513 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4514 bp->b_data = bp->b_kvabase + bp->b_offset;
4516 bp->b_data = unmapped_buf;
4527 vunmapbuf(struct buf *bp)
4531 npages = bp->b_npages;
4532 if (buf_mapped(bp))
4533 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4534 vm_page_unhold_pages(bp->b_pages, npages);
4536 bp->b_data = unmapped_buf;
4540 bdone(struct buf *bp)
4544 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4546 bp->b_flags |= B_DONE;
4547 wakeup(bp);
4552 bwait(struct buf *bp, u_char pri, const char *wchan)
4556 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4558 while ((bp->b_flags & B_DONE) == 0)
4559 msleep(bp, mtxp, pri, wchan, 0);
4571 bufstrategy(struct bufobj *bo, struct buf *bp)
4576 vp = bp->b_vp;
4579 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4580 i = VOP_STRATEGY(vp, bp);
4581 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4636 bpin(struct buf *bp)
4640 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4642 bp->b_pin_count++;
4647 bunpin(struct buf *bp)
4651 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4653 if (--bp->b_pin_count == 0)
4654 wakeup(bp);
4659 bunpin_wait(struct buf *bp)
4663 mtxp = mtx_pool_find(mtxpool_sleep, bp);
4665 while (bp->b_pin_count > 0)
4666 msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4674 bdata2bio(struct buf *bp, struct bio *bip)
4677 if (!buf_mapped(bp)) {
4679 bip->bio_ma = bp->b_pages;
4680 bip->bio_ma_n = bp->b_npages;
4682 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4685 PAGE_SIZE == bp->b_npages,
4686 ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4689 bip->bio_data = bp->b_data;
4702 struct buf *bp = (struct buf *)addr;
4709 db_printf("buf at %p\n", bp);
4711 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4712 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4717 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4718 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4719 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4721 bp->b_kvabase, bp->b_kvasize);
4722 if (bp->b_npages) {
4724 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4725 for (i = 0; i < bp->b_npages; i++) {
4727 m = bp->b_pages[i];
4730 if ((i + 1) < bp->b_npages)
4736 BUF_LOCKPRINTINFO(bp);
4741 struct buf *bp;
4745 bp = &buf[i];
4746 if (BUF_ISLOCKED(bp)) {
4747 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4756 struct buf *bp;
4764 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4765 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4769 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4770 db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4777 struct buf *bp;
4786 bp = &buf[i];
4787 if (bp->b_qindex == QUEUE_EMPTY)