Lines Matching refs:bp

120 int	bcleanbuf(buf_t bp, boolean_t discard);
121 static int brecover_data(buf_t bp);
125 static void bremfree_locked(buf_t bp);
126 static void buf_reassign(buf_t bp, vnode_t newvp);
127 static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
131 static buf_t buf_brelse_shadow(buf_t bp);
132 static void buf_free_meta_store(buf_t bp);
134 static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
194 #define binsheadfree(bp, dp, whichq) do { \
195 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
199 #define binstailfree(bp, dp, whichq) do { \
200 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
204 #define binsheadfree(bp, dp, whichq) do { \
205 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
208 #define binstailfree(bp, dp, whichq) do { \
209 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
214 #define BHASHENTCHECK(bp) \
215 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
216 panic("%p: b_hash.le_prev is not deadbeef", (bp));
218 #define BLISTNONE(bp) \
219 (bp)->b_hash.le_next = (struct buf *)0; \
220 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
225 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
226 #define bufremvn(bp) { \
227 LIST_REMOVE(bp, b_vnbufs); \
228 (bp)->b_vnbufs.le_next = NOLIST; \
247 blistenterhead(struct bufhashhdr * head, buf_t bp)
249 if ((bp->b_hash.le_next = (head)->lh_first) != NULL)
250 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
251 (head)->lh_first = bp;
252 bp->b_hash.le_prev = &(head)->lh_first;
253 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
258 binshash(buf_t bp, struct bufhashhdr *dp)
264 BHASHENTCHECK(bp);
269 if(nbp == bp)
274 blistenterhead(dp, bp);
278 bremhash(buf_t bp)
280 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
282 if (bp->b_hash.le_next == bp)
285 if (bp->b_hash.le_next != NULL)
286 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
287 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
294 bmovelaundry(buf_t bp)
296 bp->b_whichq = BQ_LAUNDRY;
297 bp->b_timestamp = buf_timestamp();
298 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
303 buf_release_credentials(buf_t bp)
305 if (IS_VALID_CRED(bp->b_rcred)) {
306 kauth_cred_unref(&bp->b_rcred);
308 if (IS_VALID_CRED(bp->b_wcred)) {
309 kauth_cred_unref(&bp->b_wcred);
315 buf_valid(buf_t bp) {
317 if ( (bp->b_flags & (B_DONE | B_DELWRI)) )
323 buf_fromcache(buf_t bp) {
325 if ( (bp->b_flags & B_CACHE) )
331 buf_markinvalid(buf_t bp) {
333 SET(bp->b_flags, B_INVAL);
337 buf_markdelayed(buf_t bp) {
339 if (!ISSET(bp->b_flags, B_DELWRI)) {
340 SET(bp->b_flags, B_DELWRI);
343 buf_reassign(bp, bp->b_vp);
345 SET(bp->b_flags, B_DONE);
349 buf_markclean(buf_t bp) {
351 if (ISSET(bp->b_flags, B_DELWRI)) {
352 CLR(bp->b_flags, B_DELWRI);
355 buf_reassign(bp, bp->b_vp);
360 buf_markeintr(buf_t bp) {
362 SET(bp->b_flags, B_EINTR);
367 buf_markaged(buf_t bp) {
369 SET(bp->b_flags, B_AGE);
373 buf_fua(buf_t bp) {
375 if ((bp->b_flags & B_FUA) == B_FUA)
381 buf_markfua(buf_t bp) {
383 SET(bp->b_flags, B_FUA);
388 buf_setcpaddr(buf_t bp, struct cprotect *entry) {
389 bp->b_attr.ba_cpentry = entry;
393 buf_setcpoff (buf_t bp, uint64_t foffset) {
394 bp->b_attr.ba_cp_file_off = foffset;
498 buf_attr(buf_t bp) {
499 return &bp->b_attr;
503 buf_markstatic(buf_t bp __unused) {
504 SET(bp->b_flags, B_STATICCONTENT);
508 buf_static(buf_t bp) {
509 if ( (bp->b_flags & B_STATICCONTENT) )
515 buf_error(buf_t bp) {
517 return (bp->b_error);
521 buf_seterror(buf_t bp, errno_t error) {
523 if ((bp->b_error = error))
524 SET(bp->b_flags, B_ERROR);
526 CLR(bp->b_flags, B_ERROR);
530 buf_setflags(buf_t bp, int32_t flags) {
532 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
536 buf_clearflags(buf_t bp, int32_t flags) {
538 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
542 buf_flags(buf_t bp) {
544 return ((bp->b_flags & BUF_X_RDFLAGS));
548 buf_reset(buf_t bp, int32_t io_flags) {
550 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
551 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
553 bp->b_error = 0;
557 buf_count(buf_t bp) {
559 return (bp->b_bcount);
563 buf_setcount(buf_t bp, uint32_t bcount) {
565 bp->b_bcount = bcount;
569 buf_size(buf_t bp) {
571 return (bp->b_bufsize);
575 buf_setsize(buf_t bp, uint32_t bufsize) {
577 bp->b_bufsize = bufsize;
581 buf_resid(buf_t bp) {
583 return (bp->b_resid);
587 buf_setresid(buf_t bp, uint32_t resid) {
589 bp->b_resid = resid;
593 buf_dirtyoff(buf_t bp) {
595 return (bp->b_dirtyoff);
599 buf_dirtyend(buf_t bp) {
601 return (bp->b_dirtyend);
605 buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) {
607 bp->b_dirtyoff = dirtyoff;
611 buf_setdirtyend(buf_t bp, uint32_t dirtyend) {
613 bp->b_dirtyend = dirtyend;
617 buf_dataptr(buf_t bp) {
619 return (bp->b_datap);
623 buf_setdataptr(buf_t bp, uintptr_t data) {
625 bp->b_datap = data;
629 buf_vnode(buf_t bp) {
631 return (bp->b_vp);
635 buf_setvnode(buf_t bp, vnode_t vp) {
637 bp->b_vp = vp;
642 buf_callback(buf_t bp)
644 if ( !(bp->b_flags & B_CALL) )
647 return ((void *)bp->b_iodone);
652 buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
655 bp->b_flags |= (B_CALL | B_ASYNC);
657 bp->b_flags &= ~B_CALL;
658 bp->b_transaction = transaction;
659 bp->b_iodone = callback;
665 buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
668 if ( !(bp->b_lflags & BL_IOBUF) )
672 bp->b_flags |= B_CLUSTER;
674 bp->b_flags &= ~B_CLUSTER;
675 bp->b_upl = upl;
676 bp->b_uploffset = offset;
682 buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
689 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount)
692 if (bp->b_flags & B_CLUSTER) {
693 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK))
696 if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount))
699 io_bp = alloc_io_buf(bp->b_vp, 0);
701 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
708 if (bp->b_flags & B_CLUSTER) {
709 io_bp->b_upl = bp->b_upl;
710 io_bp->b_uploffset = bp->b_uploffset + io_offset;
712 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
721 buf_shadow(buf_t bp)
723 if (bp->b_lflags & BL_SHADOW)
730 buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
732 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1));
736 buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
738 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0));
743 buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
747 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
749 if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
751 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
755 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0)
756 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
758 io_bp = alloc_io_buf(bp->b_vp, priv);
760 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
761 io_bp->b_blkno = bp->b_blkno;
762 io_bp->b_lblkno = bp->b_lblkno;
770 io_bp->b_bcount = bp->b_bcount;
771 io_bp->b_bufsize = bp->b_bufsize;
779 io_bp->b_datap = bp->b_datap;
781 io_bp->b_data_store = bp;
784 *(buf_t *)(&io_bp->b_orig) = bp;
789 io_bp->b_shadow = bp->b_shadow;
790 bp->b_shadow = io_bp;
791 bp->b_shadow_ref++;
797 bp->b_data_ref++;
805 io_bp->b_bcount = bp->b_bcount;
806 io_bp->b_bufsize = bp->b_bufsize;
809 allocbuf(io_bp, bp->b_bcount);
813 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
819 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
827 buf_make_private(buf_t bp)
833 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
835 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
837 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
842 allocbuf(&my_buf, bp->b_bcount);
844 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
848 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
849 if ( !ISSET(bp->b_lflags, BL_EXTERNAL))
854 if (ds_bp == NULL && bp->b_data_ref)
857 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0))
865 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
868 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
872 ds_bp->b_data_ref = bp->b_data_ref;
874 bp->b_data_ref = 0;
875 bp->b_datap = my_buf.b_datap;
879 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
886 buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
890 *old_iodone = bp->b_iodone;
892 *old_transaction = bp->b_transaction;
894 bp->b_transaction = transaction;
895 bp->b_iodone = filter;
897 bp->b_flags |= B_FILTER;
899 bp->b_flags &= ~B_FILTER;
904 buf_blkno(buf_t bp) {
906 return (bp->b_blkno);
910 buf_lblkno(buf_t bp) {
912 return (bp->b_lblkno);
916 buf_setblkno(buf_t bp, daddr64_t blkno) {
918 bp->b_blkno = blkno;
922 buf_setlblkno(buf_t bp, daddr64_t lblkno) {
924 bp->b_lblkno = lblkno;
928 buf_device(buf_t bp) {
930 return (bp->b_dev);
934 buf_setdevice(buf_t bp, vnode_t vp) {
938 bp->b_dev = vp->v_rdev;
945 buf_drvdata(buf_t bp) {
947 return (bp->b_drvdata);
951 buf_setdrvdata(buf_t bp, void *drvdata) {
953 bp->b_drvdata = drvdata;
957 buf_fsprivate(buf_t bp) {
959 return (bp->b_fsprivate);
963 buf_setfsprivate(buf_t bp, void *fsprivate) {
965 bp->b_fsprivate = fsprivate;
969 buf_rcred(buf_t bp) {
971 return (bp->b_rcred);
975 buf_wcred(buf_t bp) {
977 return (bp->b_wcred);
981 buf_upl(buf_t bp) {
983 return (bp->b_upl);
987 buf_uploffset(buf_t bp) {
989 return ((uint32_t)(bp->b_uploffset));
993 buf_proc(buf_t bp) {
995 return (bp->b_proc);
1000 buf_map(buf_t bp, caddr_t *io_addr)
1006 if ( !(bp->b_flags & B_CLUSTER)) {
1007 *io_addr = (caddr_t)bp->b_datap;
1010 real_bp = (buf_t)(bp->b_real_bp);
1023 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
1030 vaddr += bp->b_uploffset;
1038 buf_unmap(buf_t bp)
1043 if ( !(bp->b_flags & B_CLUSTER))
1048 real_bp = (buf_t)(bp->b_real_bp);
1053 if ((bp->b_lflags & BL_IOBUF) &&
1054 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
1065 bp->b_flags |= B_AGE;
1067 kret = ubc_upl_unmap(bp->b_upl);
1076 buf_clear(buf_t bp) {
1079 if (buf_map(bp, &baddr) == 0) {
1080 bzero(baddr, bp->b_bcount);
1081 buf_unmap(bp);
1083 bp->b_resid = 0;
1091 buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1093 vnode_t vp = buf_vnode(bp);
1103 * save our starting point... the bp was already mapped
1107 io_blkno = bp->b_blkno;
1112 bp->b_blkno = bp->b_lblkno;
1119 io_bp->b_lblkno = bp->b_lblkno;
1120 io_bp->b_datap = bp->b_datap;
1121 io_resid = bp->b_bcount;
1122 io_direction = bp->b_flags & B_READ;
1125 if (bp->b_flags & B_READ)
1148 if (!ISSET(bp->b_flags, B_READ))
1174 buf_seterror(bp, error);
1175 bp->b_resid = io_resid;
1179 buf_biodone(bp);
1193 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1194 vnode_t vp = bp->b_vp;
1212 bp->b_dev = devvp->v_rdev;
1214 if (bp->b_flags & B_READ)
1219 if ( !(bp->b_flags & B_CLUSTER)) {
1221 if ( (bp->b_upl) ) {
1223 * we have a UPL associated with this bp
1228 DTRACE_IO1(start, buf_t, bp);
1229 return (cluster_bp(bp));
1231 if (bp->b_blkno == bp->b_lblkno) {
1235 if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
1236 DTRACE_IO1(start, buf_t, bp);
1237 buf_seterror(bp, error);
1238 buf_biodone(bp);
1243 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
1244 DTRACE_IO1(start, buf_t, bp);
1245 buf_seterror(bp, error);
1246 buf_biodone(bp);
1251 DTRACE_IO1(start, buf_t, bp);
1256 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1258 bp->b_blkno = -1;
1259 buf_clear(bp);
1261 else if ((long)contig_bytes < bp->b_bcount) {
1262 return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes));
1268 DTRACE_IO1(start, buf_t, bp);
1273 if (bp->b_blkno == -1) {
1274 buf_biodone(bp);
1281 DTRACE_IO1(start, buf_t, bp);
1286 if (bp->b_attr.ba_cpentry != 0) {
1288 if(bp->b_attr.ba_cpentry->cp_flags & CP_OFF_IV_ENABLED) {
1290 if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset)))
1306 bufattr_setcpoff (&(bp->b_attr), (u_int64_t)f_offset);
1331 buf_free(buf_t bp) {
1333 free_io_buf(bp);
1342 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1353 buf_t bp;
1390 bp = LIST_FIRST(&local_iterblkhd);
1391 LIST_REMOVE(bp, b_vnbufs);
1392 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
1394 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1396 bp = NULL;
1404 retval = callout(bp, arg);
1408 if (bp)
1409 buf_brelse(bp);
1414 if (bp)
1415 buf_brelse(bp);
1437 buf_t bp;
1470 bp = LIST_FIRST(&local_iterblkhd);
1472 LIST_REMOVE(bp, b_vnbufs);
1473 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1478 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1486 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
1514 if (bp->b_flags & B_LOCKED)
1515 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1517 CLR(bp->b_flags, B_LOCKED);
1518 SET(bp->b_flags, B_INVAL);
1519 buf_brelse(bp);
1541 bp = LIST_FIRST(&local_iterblkhd);
1543 LIST_REMOVE(bp, b_vnbufs);
1544 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1549 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1557 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
1585 if (bp->b_flags & B_LOCKED)
1586 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1588 CLR(bp->b_flags, B_LOCKED);
1589 SET(bp->b_flags, B_INVAL);
1591 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA))
1592 (void) VNOP_BWRITE(bp);
1594 buf_brelse(bp);
1621 buf_t bp;
1638 bp = LIST_FIRST(&local_iterblkhd);
1639 LIST_REMOVE(bp, b_vnbufs);
1640 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1642 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1663 bp->b_flags &= ~B_LOCKED;
1669 if ((bp->b_vp == vp) || (wait == 0))
1670 (void) buf_bawrite(bp);
1672 (void) VNOP_BWRITE(bp);
1750 buf_t bp;
1758 bp = LIST_FIRST(iterheadp);
1759 LIST_REMOVE(bp, b_vnbufs);
1760 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
1772 bremfree_locked(buf_t bp)
1777 whichq = bp->b_whichq;
1780 if (bp->b_shadow_ref == 0)
1781 panic("bremfree_locked: %p not on freelist", bp);
1783 * there are clones pointing to 'bp'...
1785 * when buf_brelse was last called on 'bp'
1796 if (bp->b_freelist.tqe_next == NULL) {
1799 if (dp->tqh_last != &bp->b_freelist.tqe_next)
1802 TAILQ_REMOVE(dp, bp, b_freelist);
1810 bp->b_whichq = -1;
1811 bp->b_timestamp = 0;
1812 bp->b_shadow = 0;
1820 bgetvp_locked(vnode_t vp, buf_t bp)
1823 if (bp->b_vp != vp)
1827 bp->b_dev = vp->v_rdev;
1829 bp->b_dev = NODEV;
1833 bufinsvn(bp, &vp->v_cleanblkhd);
1841 brelvp_locked(buf_t bp)
1846 if (bp->b_vnbufs.le_next != NOLIST)
1847 bufremvn(bp);
1849 bp->b_vp = (vnode_t)NULL;
1858 buf_reassign(buf_t bp, vnode_t newvp)
1871 if (bp->b_vnbufs.le_next != NOLIST)
1872 bufremvn(bp);
1877 if (ISSET(bp->b_flags, B_DELWRI))
1881 bufinsvn(bp, listheadp);
1887 bufhdrinit(buf_t bp)
1889 bzero((char *)bp, sizeof *bp);
1890 bp->b_dev = NODEV;
1891 bp->b_rcred = NOCRED;
1892 bp->b_wcred = NOCRED;
1893 bp->b_vnbufs.le_next = NOLIST;
1894 bp->b_flags = B_INVAL;
1905 buf_t bp;
1920 bp = &buf_headers[i];
1921 bufhdrinit(bp);
1923 BLISTNONE(bp);
1925 bp->b_whichq = BQ_EMPTY;
1926 bp->b_timestamp = buf_timestamp();
1927 binsheadfree(bp, dp, BQ_EMPTY);
1928 binshash(bp, &invalhash);
1936 bp = &buf_headers[i];
1937 bufhdrinit(bp);
1938 bp->b_whichq = -1;
1939 binsheadfree(bp, &iobufqueue, -1);
2061 buf_t bp;
2063 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2070 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2076 SET(bp->b_flags, B_READ | async);
2077 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
2079 bp->b_rcred = cred;
2082 VNOP_STRATEGY(bp);
2094 * we don't want to pass back a bp
2097 bp = NULL;
2100 buf_brelse(bp);
2101 bp = NULL;
2106 return (bp);
2117 buf_t bp;
2120 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
2135 return (buf_biowait(bp));
2146 buf_t bp;
2149 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2152 return (buf_biowait(bp));
2162 buf_t bp;
2165 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2168 return (buf_biowait(bp));
2194 buf_bwrite(buf_t bp)
2199 vnode_t vp = bp->b_vp;
2201 if (bp->b_datap == 0) {
2202 if (brecover_data(bp) == 0)
2206 sync = !ISSET(bp->b_flags, B_ASYNC);
2207 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2208 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
2221 buf_reassign(bp, vp);
2226 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
2232 VNOP_STRATEGY(bp);
2238 rv = buf_biowait(bp);
2246 buf_reassign(bp, vp);
2253 if (!ISSET(bp->b_flags, B_NORELSE)) {
2254 buf_brelse(bp);
2256 CLR(bp->b_flags, B_NORELSE);
2291 bdwrite_internal(buf_t bp, int return_error)
2294 vnode_t vp = bp->b_vp;
2302 if (!ISSET(bp->b_flags, B_DELWRI)) {
2303 SET(bp->b_flags, B_DELWRI);
2307 buf_reassign(bp, vp);
2324 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) {
2333 return (buf_bawrite(bp));
2337 SET(bp->b_flags, B_DONE);
2338 buf_brelse(bp);
2343 buf_bdwrite(buf_t bp)
2345 return (bdwrite_internal(bp, 0));
2360 bawrite_internal(buf_t bp, int throttle)
2362 vnode_t vp = bp->b_vp;
2378 SET(bp->b_flags, B_ASYNC);
2380 return (VNOP_BWRITE(bp));
2384 buf_bawrite(buf_t bp)
2386 return (bawrite_internal(bp, 1));
2392 buf_free_meta_store(buf_t bp)
2394 if (bp->b_bufsize) {
2395 if (ISSET(bp->b_flags, B_ZALLOC)) {
2398 z = getbufzone(bp->b_bufsize);
2399 zfree(z, (void *)bp->b_datap);
2401 kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
2403 bp->b_datap = (uintptr_t)NULL;
2404 bp->b_bufsize = 0;
2410 buf_brelse_shadow(buf_t bp)
2423 bp_head = (buf_t)bp->b_orig;
2429 if (bp_data = bp->b_data_store) {
2439 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2443 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow);
2446 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2456 if (bp == bp_data && data_ref) {
2458 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2466 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2468 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2500 if (bp == bp_data && data_ref == 0)
2501 buf_free_meta_store(bp);
2503 bp->b_data_store = NULL;
2505 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2516 buf_brelse(buf_t bp)
2525 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY))
2526 panic("buf_brelse: bad buffer = %p\n", bp);
2529 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
2531 bp->b_lastbrelse = current_thread();
2532 bp->b_tag = 0;
2534 if (bp->b_lflags & BL_IOBUF) {
2537 if (ISSET(bp->b_lflags, BL_SHADOW))
2538 shadow_master_bp = buf_brelse_shadow(bp);
2539 else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC))
2540 buf_free_meta_store(bp);
2541 free_io_buf(bp);
2544 bp = shadow_master_bp;
2551 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
2552 bp->b_flags, 0);
2554 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2563 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
2564 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2565 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
2566 void *arg = bp->b_transaction;
2568 CLR(bp->b_flags, B_FILTER); /* but note callout done */
2569 bp->b_iodone = NULL;
2570 bp->b_transaction = NULL;
2573 panic("brelse: bp @ %p has NULL b_iodone!\n", bp);
2575 (*iodone_func)(bp, arg);
2581 upl = bp->b_upl;
2583 if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2588 if ( !ISSET(bp->b_flags, B_INVAL)) {
2589 kret = ubc_create_upl(bp->b_vp,
2590 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2591 bp->b_bufsize,
2599 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
2603 if (bp->b_datap) {
2608 bp->b_datap = (uintptr_t)NULL;
2612 if (bp->b_flags & (B_ERROR | B_INVAL)) {
2613 if (bp->b_flags & (B_READ | B_INVAL))
2620 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY))
2625 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
2628 bp->b_upl = NULL;
2632 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
2638 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
2639 CLR(bp->b_flags, B_ERROR);
2643 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
2644 SET(bp->b_flags, B_INVAL);
2646 if ((bp->b_bufsize <= 0) ||
2647 ISSET(bp->b_flags, B_INVAL) ||
2648 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
2657 if (ISSET(bp->b_flags, B_DELWRI))
2660 if (ISSET(bp->b_flags, B_META)) {
2661 if (bp->b_shadow_ref)
2664 buf_free_meta_store(bp);
2669 buf_release_credentials(bp);
2673 if (bp->b_shadow_ref) {
2674 SET(bp->b_lflags, BL_WAITSHADOW);
2684 buf_free_meta_store(bp);
2688 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2690 if (bp->b_vp)
2691 brelvp_locked(bp);
2693 bremhash(bp);
2694 BLISTNONE(bp);
2695 binshash(bp, &invalhash);
2697 bp->b_whichq = BQ_EMPTY;
2698 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2705 if (ISSET(bp->b_flags, B_LOCKED))
2707 else if (ISSET(bp->b_flags, B_META))
2709 else if (ISSET(bp->b_flags, B_AGE))
2715 bp->b_timestamp = buf_timestamp();
2728 if (bp->b_shadow_ref == 0) {
2729 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2730 bp->b_whichq = whichq;
2731 binstailfree(bp, bufq, whichq);
2739 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2752 if (ISSET(bp->b_lflags, BL_WANTED)) {
2762 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
2777 wakeup(bp);
2780 bp, bp->b_datap, bp->b_flags, 0, 0);
2813 struct buf *bp;
2816 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
2817 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
2818 !ISSET(bp->b_flags, B_INVAL)) {
2819 return (bp);
2828 buf_t bp;
2836 if ((bp = incore_locked(vp, blkno, dp)) == NULL)
2839 if (bp->b_shadow_ref == 0)
2842 SET(bp->b_lflags, BL_WANTED_REF);
2844 (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow", NULL);
2861 buf_t bp;
2880 if ((bp = incore_locked(vp, blkno, dp))) {
2884 if (ISSET(bp->b_lflags, BL_BUSY)) {
2892 SET(bp->b_lflags, BL_WANTED);
2905 err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
2929 SET(bp->b_lflags, BL_BUSY);
2930 SET(bp->b_flags, B_CACHE);
2933 bremfree_locked(bp);
2938 bp->b_owner = current_thread();
2939 bp->b_tag = 1;
2941 if ( (bp->b_upl) )
2942 panic("buffer has UPL, but not marked BUSY: %p", bp);
2944 if ( !ret_only_valid && bp->b_bufsize != size)
2945 allocbuf(bp, size);
2958 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2960 ubc_blktooff(vp, bp->b_lblkno),
2961 bp->b_bufsize,
2968 bp->b_upl = upl;
2972 SET(bp->b_flags, B_WASDIRTY);
2974 CLR(bp->b_flags, B_WASDIRTY);
2976 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
2978 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
3008 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL)
3019 SET(bp->b_flags, B_INVAL);
3020 binshash(bp, &invalhash);
3024 buf_brelse(bp);
3037 SET(bp->b_flags, B_META);
3039 bp->b_blkno = bp->b_lblkno = blkno;
3040 bp->b_vp = vp;
3045 binshash(bp, BUFHASH(vp, blkno));
3047 bgetvp_locked(vp, bp);
3051 allocbuf(bp, size);
3080 if ( (bp->b_upl) )
3081 panic("bp already has UPL: %p",bp);
3088 bp->b_bufsize,
3096 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
3098 bp->b_upl = upl;
3107 SET(bp->b_flags, B_CACHE | B_DONE);
3111 bp->b_validoff = 0;
3112 bp->b_dirtyoff = 0;
3116 SET(bp->b_flags, B_WASDIRTY);
3118 bp->b_validend = bp->b_bcount;
3119 bp->b_dirtyend = bp->b_bcount;
3122 bp->b_validend = bp->b_bcount;
3123 bp->b_dirtyend = 0;
3129 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
3137 if ((long)contig_bytes < bp->b_bcount)
3138 bp->b_blkno = bp->b_lblkno;
3142 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
3155 bp, bp->b_datap, bp->b_flags, 3, 0);
3158 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
3160 return (bp);
3169 buf_t bp = NULL;
3175 bp = getnewbuf(0, 0, &queue);
3176 } while (bp == NULL);
3178 SET(bp->b_flags, (B_META|B_INVAL));
3185 binshash(bp, &invalhash);
3190 allocbuf(bp, size);
3192 return (bp);
3196 buf_redundancy_flags(buf_t bp)
3198 return bp->b_redundancy_flags;
3202 buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3204 SET(bp->b_redundancy_flags, flags);
3208 buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3210 CLR(bp->b_redundancy_flags, flags);
3226 allocbuf(buf_t bp, int size)
3237 if (ISSET(bp->b_flags, B_META)) {
3241 if (bp->b_datap) {
3242 vm_offset_t elem = (vm_offset_t)bp->b_datap;
3244 if (ISSET(bp->b_flags, B_ZALLOC)) {
3245 if (bp->b_bufsize < nsize) {
3248 zprev = getbufzone(bp->b_bufsize);
3253 *(void **)(&bp->b_datap) = zalloc(z);
3255 bp->b_datap = (uintptr_t)NULL;
3256 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
3257 CLR(bp->b_flags, B_ZALLOC);
3259 bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3262 desired_size = bp->b_bufsize;
3266 if ((vm_size_t)bp->b_bufsize < desired_size) {
3268 bp->b_datap = (uintptr_t)NULL;
3269 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
3270 bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3271 kmem_free(kernel_map, elem, bp->b_bufsize);
3273 desired_size = bp->b_bufsize;
3282 *(void **)(&bp->b_datap) = zalloc(z);
3283 SET(bp->b_flags, B_ZALLOC);
3285 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
3288 if (bp->b_datap == 0)
3291 bp->b_bufsize = desired_size;
3292 bp->b_bcount = size;
3322 buf_t bp;
3339 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first))
3355 bp = bufqueues[*queue].tqh_first;
3356 if (bp)
3369 bp = bufqueues[BQ_EMPTY].tqh_first;
3370 if (bp) {
3384 bp = (struct buf *)zalloc(buf_hdr_zone);
3386 if (bp) {
3387 bufhdrinit(bp);
3388 bp->b_whichq = BQ_EMPTY;
3389 bp->b_timestamp = buf_timestamp();
3390 BLISTNONE(bp);
3391 SET(bp->b_flags, B_HDRALLOC);
3396 if (bp) {
3397 binshash(bp, &invalhash);
3398 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3419 bp = NULL;
3424 bp = lru_bp;
3427 bp = age_bp;
3435 bp = age_bp;
3443 bp = lru_bp;
3446 bp = age_bp;
3452 if (!bp) { /* Neither on AGE nor on LRU */
3453 bp = meta_bp;
3458 bp_time = t - bp->b_timestamp;
3469 bp = meta_bp;
3475 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY))
3476 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
3479 if (bcleanbuf(bp, FALSE)) {
3486 return (bp);
3500 bcleanbuf(buf_t bp, boolean_t discard)
3503 bremfree_locked(bp);
3506 bp->b_owner = current_thread();
3507 bp->b_tag = 2;
3513 if (ISSET(bp->b_flags, B_DELWRI)) {
3515 SET(bp->b_lflags, BL_WANTDEALLOC);
3518 bmovelaundry(bp);
3533 bp->b_owner = current_thread();
3534 bp->b_tag = 8;
3539 SET(bp->b_lflags, BL_BUSY);
3542 bremhash(bp);
3547 if (bp->b_vp)
3548 brelvp_locked(bp);
3552 BLISTNONE(bp);
3554 if (ISSET(bp->b_flags, B_META))
3555 buf_free_meta_store(bp);
3557 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3559 buf_release_credentials(bp);
3564 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3565 bp->b_whichq = BQ_EMPTY;
3566 binshash(bp, &invalhash);
3567 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3568 CLR(bp->b_lflags, BL_BUSY);
3572 bp->b_bufsize = 0;
3573 bp->b_datap = (uintptr_t)NULL;
3574 bp->b_upl = (void *)NULL;
3582 bp->b_owner = current_thread();
3583 bp->b_tag = 3;
3585 bp->b_lflags = BL_BUSY;
3586 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3587 bp->b_dev = NODEV;
3588 bp->b_blkno = bp->b_lblkno = 0;
3589 bp->b_iodone = NULL;
3590 bp->b_error = 0;
3591 bp->b_resid = 0;
3592 bp->b_bcount = 0;
3593 bp->b_dirtyoff = bp->b_dirtyend = 0;
3594 bp->b_validoff = bp->b_validend = 0;
3595 bzero(&bp->b_attr, sizeof(struct bufattr));
3607 buf_t bp;
3616 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
3620 if (ISSET(bp->b_lflags, BL_BUSY)) {
3625 SET(bp->b_lflags, BL_WANTED);
3627 error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
3634 bremfree_locked(bp);
3635 SET(bp->b_lflags, BL_BUSY);
3636 SET(bp->b_flags, B_INVAL);
3639 bp->b_owner = current_thread();
3640 bp->b_tag = 4;
3643 buf_brelse(bp);
3650 buf_drop(buf_t bp)
3656 if (ISSET(bp->b_lflags, BL_WANTED)) {
3664 bp->b_owner = current_thread();
3665 bp->b_tag = 9;
3670 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
3679 wakeup(bp);
3685 buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) {
3690 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
3699 buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
3704 if (ISSET(bp->b_flags, B_LOCKED)) {
3711 if (ISSET(bp->b_lflags, BL_BUSY)) {
3719 SET(bp->b_lflags, BL_WANTED);
3724 error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
3731 bremfree_locked(bp);
3732 SET(bp->b_lflags, BL_BUSY);
3736 bp->b_owner = current_thread();
3737 bp->b_tag = 5;
3748 buf_biowait(buf_t bp)
3750 while (!ISSET(bp->b_flags, B_DONE)) {
3754 if (!ISSET(bp->b_flags, B_DONE)) {
3755 DTRACE_IO1(wait__start, buf_t, bp);
3756 (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL);
3757 DTRACE_IO1(wait__done, buf_t, bp);
3762 if (ISSET(bp->b_flags, B_EINTR)) {
3763 CLR(bp->b_flags, B_EINTR);
3765 } else if (ISSET(bp->b_flags, B_ERROR))
3766 return (bp->b_error ? bp->b_error : EIO);
3792 buf_biodone(buf_t bp)
3797 bp, bp->b_datap, bp->b_flags, 0, 0);
3799 if (ISSET(bp->b_flags, B_DONE))
3802 if (ISSET(bp->b_flags, B_ERROR)) {
3803 fslog_io_error(bp);
3806 if (bp->b_vp && bp->b_vp->v_mount) {
3807 mp = bp->b_vp->v_mount;
3812 if (mp && (bp->b_flags & B_READ) == 0) {
3814 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
3816 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
3822 if (bp->b_flags & B_READ)
3824 if (bp->b_flags & B_ASYNC)
3827 if (bp->b_flags & B_META)
3829 else if (bp->b_flags & B_PAGEIO)
3832 if (bp->b_flags & B_THROTTLED_IO)
3834 else if (bp->b_flags & B_PASSIVE)
3837 if (bp->b_attr.ba_flags & BA_NOCACHE)
3841 bp, (uintptr_t)bp->b_vp,
3842 bp->b_resid, bp->b_error, 0);
3844 if ((bp->b_vp != NULLVP) &&
3845 ((bp->b_flags & (B_THROTTLED_IO | B_PASSIVE | B_IOSTREAMING | B_PAGEIO | B_READ | B_THROTTLED_IO | B_PASSIVE)) == (B_PAGEIO | B_READ)) &&
3846 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) {
3857 CLR(bp->b_flags, (B_WASDIRTY | B_THROTTLED_IO | B_PASSIVE));
3858 CLR(bp->b_attr.ba_flags, (BA_META | BA_NOCACHE));
3860 CLR(bp->b_attr.ba_flags, (BA_THROTTLED_IO | BA_DELAYIDLESLEEP));
3862 CLR(bp->b_attr.ba_flags, BA_THROTTLED_IO);
3864 DTRACE_IO1(done, buf_t, bp);
3866 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
3872 vnode_writedone(bp->b_vp);
3874 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
3875 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
3876 void *arg = bp->b_transaction;
3877 int callout = ISSET(bp->b_flags, B_CALL);
3880 panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
3882 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
3883 bp->b_iodone = NULL;
3884 bp->b_transaction = NULL;
3887 SET(bp->b_flags, B_DONE); /* note that it's done */
3889 (*iodone_func)(bp, arg);
3894 * ownership of the bp and deals with releasing it if necessary
3901 * ownership of the bp and is expecting us
3906 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
3907 SET(bp->b_flags, B_DONE); /* note that it's done */
3909 buf_brelse(bp);
3927 CLR(bp->b_lflags, BL_WANTED);
3928 SET(bp->b_flags, B_DONE); /* note that it's done */
3932 wakeup(bp);
3936 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
3945 buf_t bp;
3950 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
3951 bp = bp->b_freelist.tqe_next)
3978 struct buf *bp;
3991 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
3992 counts[bp->b_bufsize/CLBYTES]++;
4012 buf_t bp;
4017 (bp = iobufqueue.tqh_first) == NULL) {
4023 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4037 bp->b_timestamp = 0;
4038 bp->b_proc = NULL;
4040 bp->b_datap = 0;
4041 bp->b_flags = 0;
4042 bp->b_lflags = BL_BUSY | BL_IOBUF;
4043 bp->b_redundancy_flags = 0;
4044 bp->b_blkno = bp->b_lblkno = 0;
4046 bp->b_owner = current_thread();
4047 bp->b_tag = 6;
4049 bp->b_iodone = NULL;
4050 bp->b_error = 0;
4051 bp->b_resid = 0;
4052 bp->b_bcount = 0;
4053 bp->b_bufsize = 0;
4054 bp->b_upl = NULL;
4055 bp->b_vp = vp;
4056 bzero(&bp->b_attr, sizeof(struct bufattr));
4059 bp->b_dev = vp->v_rdev;
4061 bp->b_dev = NODEV;
4063 return (bp);
4068 free_io_buf(buf_t bp)
4075 bp->b_vp = NULL;
4076 bp->b_flags = B_INVAL;
4080 binsheadfree(bp, &iobufqueue, -1);
4096 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4141 struct buf *bp;
4148 while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
4155 bremfree_locked(bp);
4160 SET(bp->b_lflags, BL_BUSY);
4164 bp->b_owner = current_thread();
4165 bp->b_tag = 10;
4172 error = bawrite_internal(bp, 0);
4175 bp->b_whichq = BQ_LAUNDRY;
4176 bp->b_timestamp = buf_timestamp();
4180 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4184 CLR(bp->b_lflags, BL_BUSY);
4187 bp->b_owner = current_thread();
4188 bp->b_tag = 11;
4212 brecover_data(buf_t bp)
4218 vnode_t vp = bp->b_vp;
4222 if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0)
4226 if (! (buf_flags(bp) & B_READ)) {
4236 ubc_blktooff(vp, bp->b_lblkno),
4237 bp->b_bufsize,
4244 for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4251 bp->b_upl = upl;
4253 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
4260 bp->b_bufsize = 0;
4261 SET(bp->b_flags, B_INVAL);
4262 buf_brelse(bp);
4270 buf_t bp;
4295 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4296 (now > bp->b_timestamp) &&
4297 (now - bp->b_timestamp > thresh_hold) &&
4301 bremfree_locked(bp);
4305 bp->b_owner = current_thread();
4306 bp->b_tag = 12;
4310 if (ISSET(bp->b_flags, B_DELWRI)) {
4311 SET(bp->b_lflags, BL_WANTDEALLOC);
4313 bmovelaundry(bp);
4323 SET(bp->b_lflags, BL_BUSY);
4329 bremhash(bp);
4330 if (bp->b_vp) {
4331 brelvp_locked(bp);
4334 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4351 TAILQ_FOREACH(bp, &privq, b_freelist) {
4353 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4357 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4360 buf_free_meta_store(bp);
4363 buf_release_credentials(bp);
4366 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4368 bp->b_whichq = BQ_EMPTY;
4369 BLISTNONE(bp);
4374 TAILQ_FOREACH(bp, &privq, b_freelist) {
4375 binshash(bp, &invalhash);
4376 CLR(bp->b_lflags, BL_BUSY);
4380 if (bp->b_owner != current_thread()) {
4383 bp->b_owner = current_thread();
4384 bp->b_tag = 13;
4426 buf_t bp, next;
4438 bp = TAILQ_FIRST(&bufqueues[whichq]);
4440 for (buf_count = 0; bp; bp = next) {
4441 next = bp->b_freelist.tqe_next;
4443 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4447 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
4449 bremfree_locked(bp);
4451 bp->b_owner = current_thread();
4452 bp->b_tag = 7;
4454 SET(bp->b_lflags, BL_BUSY);
4457 flush_table[buf_count] = bp;
4758 struct buf *bp = bufqueues[q].tqh_first;
4759 if (!bp)
4763 if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) {
4764 if (bcleanbuf(bp, FALSE)) {
4765 /* buf_bawrite() issued, bp not ready */
4769 SET(bp->b_flags, B_INVAL);
4770 buf_brelse(bp);