Lines Matching refs:bp

55 static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
59 struct xfs_buf *bp)
61 return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
64 static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
66 return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
71 struct xfs_buf *bp)
78 * to be both for b_addr and bp->b_page_count > 1.
80 return bp->b_addr && bp->b_page_count > 1;
85 struct xfs_buf *bp)
87 return (bp->b_page_count * PAGE_SIZE);
105 struct xfs_buf *bp)
107 if (bp->b_flags & XBF_NO_IOACCT)
110 ASSERT(bp->b_flags & XBF_ASYNC);
111 spin_lock(&bp->b_lock);
112 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
113 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
114 percpu_counter_inc(&bp->b_target->bt_io_count);
116 spin_unlock(&bp->b_lock);
125 struct xfs_buf *bp)
127 lockdep_assert_held(&bp->b_lock);
129 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
130 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
131 percpu_counter_dec(&bp->b_target->bt_io_count);
137 struct xfs_buf *bp)
139 spin_lock(&bp->b_lock);
140 __xfs_buf_ioacct_dec(bp);
141 spin_unlock(&bp->b_lock);
154 struct xfs_buf *bp)
156 ASSERT(xfs_buf_islocked(bp));
158 bp->b_flags |= XBF_STALE;
165 bp->b_flags &= ~_XBF_DELWRI_Q;
173 spin_lock(&bp->b_lock);
174 __xfs_buf_ioacct_dec(bp);
176 atomic_set(&bp->b_lru_ref, 0);
177 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
178 (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
179 atomic_dec(&bp->b_hold);
181 ASSERT(atomic_read(&bp->b_hold) >= 1);
182 spin_unlock(&bp->b_lock);
187 struct xfs_buf *bp,
190 ASSERT(bp->b_maps == NULL);
191 bp->b_map_count = map_count;
194 bp->b_maps = &bp->__b_map;
198 bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
200 if (!bp->b_maps)
210 struct xfs_buf *bp)
212 if (bp->b_maps != &bp->__b_map) {
213 kfree(bp->b_maps);
214 bp->b_maps = NULL;
226 struct xfs_buf *bp;
231 bp = kmem_cache_zalloc(xfs_buf_cache,
240 atomic_set(&bp->b_hold, 1);
241 atomic_set(&bp->b_lru_ref, 1);
242 init_completion(&bp->b_iowait);
243 INIT_LIST_HEAD(&bp->b_lru);
244 INIT_LIST_HEAD(&bp->b_list);
245 INIT_LIST_HEAD(&bp->b_li_list);
246 sema_init(&bp->b_sema, 0); /* held, no waiters */
247 spin_lock_init(&bp->b_lock);
248 bp->b_target = target;
249 bp->b_mount = target->bt_mount;
250 bp->b_flags = flags;
257 error = xfs_buf_get_maps(bp, nmaps);
259 kmem_cache_free(xfs_buf_cache, bp);
263 bp->b_rhash_key = map[0].bm_bn;
264 bp->b_length = 0;
266 bp->b_maps[i].bm_bn = map[i].bm_bn;
267 bp->b_maps[i].bm_len = map[i].bm_len;
268 bp->b_length += map[i].bm_len;
271 atomic_set(&bp->b_pin_count, 0);
272 init_waitqueue_head(&bp->b_waiters);
274 XFS_STATS_INC(bp->b_mount, xb_create);
275 trace_xfs_buf_init(bp, _RET_IP_);
277 *bpp = bp;
283 struct xfs_buf *bp)
287 ASSERT(bp->b_flags & _XBF_PAGES);
289 if (xfs_buf_is_vmapped(bp))
290 vm_unmap_ram(bp->b_addr, bp->b_page_count);
292 for (i = 0; i < bp->b_page_count; i++) {
293 if (bp->b_pages[i])
294 __free_page(bp->b_pages[i]);
296 mm_account_reclaimed_pages(bp->b_page_count);
298 if (bp->b_pages != bp->b_page_array)
299 kfree(bp->b_pages);
300 bp->b_pages = NULL;
301 bp->b_flags &= ~_XBF_PAGES;
308 struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
310 xfs_buf_free_maps(bp);
311 kmem_cache_free(xfs_buf_cache, bp);
316 struct xfs_buf *bp)
318 trace_xfs_buf_free(bp, _RET_IP_);
320 ASSERT(list_empty(&bp->b_lru));
322 if (xfs_buftarg_is_mem(bp->b_target))
323 xmbuf_unmap_page(bp);
324 else if (bp->b_flags & _XBF_PAGES)
325 xfs_buf_free_pages(bp);
326 else if (bp->b_flags & _XBF_KMEM)
327 kfree(bp->b_addr);
329 call_rcu(&bp->b_rcu, xfs_buf_free_callback);
334 struct xfs_buf *bp,
338 size_t size = BBTOB(bp->b_length);
344 bp->b_addr = kmalloc(size, gfp_mask);
345 if (!bp->b_addr)
348 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
349 ((unsigned long)bp->b_addr & PAGE_MASK)) {
351 kfree(bp->b_addr);
352 bp->b_addr = NULL;
355 bp->b_offset = offset_in_page(bp->b_addr);
356 bp->b_pages = bp->b_page_array;
357 bp->b_pages[0] = kmem_to_page(bp->b_addr);
358 bp->b_page_count = 1;
359 bp->b_flags |= _XBF_KMEM;
365 struct xfs_buf *bp,
375 bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
376 if (bp->b_page_count <= XB_PAGES) {
377 bp->b_pages = bp->b_page_array;
379 bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
381 if (!bp->b_pages)
384 bp->b_flags |= _XBF_PAGES;
398 filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
399 bp->b_pages);
400 if (filled == bp->b_page_count) {
401 XFS_STATS_INC(bp->b_mount, xb_page_found);
409 xfs_buf_free_pages(bp);
413 XFS_STATS_INC(bp->b_mount, xb_page_retries);
424 struct xfs_buf *bp,
427 ASSERT(bp->b_flags & _XBF_PAGES);
428 if (bp->b_page_count == 1) {
430 bp->b_addr = page_address(bp->b_pages[0]);
432 bp->b_addr = NULL;
454 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
456 if (bp->b_addr)
462 if (!bp->b_addr)
478 const struct xfs_buf *bp = obj;
486 if (bp->b_rhash_key != map->bm_bn)
489 if (unlikely(bp->b_length != map->bm_len)) {
499 ASSERT(bp->b_flags & XBF_STALE);
558 struct xfs_buf *bp,
562 if (!xfs_buf_trylock(bp)) {
563 XFS_STATS_INC(bp->b_mount, xb_busy_locked);
567 xfs_buf_lock(bp);
568 XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
576 if (bp->b_flags & XBF_STALE) {
578 xfs_buf_unlock(bp);
581 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
582 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
583 bp->b_ops = NULL;
595 struct xfs_buf *bp;
599 bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
600 if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
606 error = xfs_buf_find_lock(bp, flags);
608 xfs_buf_rele(bp);
612 trace_xfs_buf_find(bp, flags, _RET_IP_);
613 *bpp = bp;
633 struct xfs_buf *bp;
656 bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
658 if (IS_ERR(bp)) {
659 error = PTR_ERR(bp);
663 if (bp) {
665 atomic_inc(&bp->b_hold);
667 error = xfs_buf_find_lock(bp, flags);
669 xfs_buf_rele(bp);
671 *bpp = bp;
726 struct xfs_buf *bp = NULL;
743 error = xfs_buf_lookup(bch, &cmap, flags, &bp);
748 if (unlikely(!bp)) {
756 flags, &bp);
766 if (!bp->b_addr) {
767 error = _xfs_buf_map_pages(bp, flags);
771 bp->b_page_count);
772 xfs_buf_relse(bp);
782 xfs_buf_ioerror(bp, 0);
785 trace_xfs_buf_get(bp, flags, _RET_IP_);
786 *bpp = bp;
797 struct xfs_buf *bp,
801 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
803 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
804 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
806 return xfs_buf_submit(bp);
828 struct xfs_buf *bp,
831 ASSERT(bp->b_flags & XBF_DONE);
832 ASSERT(bp->b_error == 0);
834 if (!ops || bp->b_ops)
837 bp->b_ops = ops;
838 bp->b_ops->verify_read(bp);
839 if (bp->b_error)
840 bp->b_flags &= ~XBF_DONE;
841 return bp->b_error;
854 struct xfs_buf *bp;
860 error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
864 trace_xfs_buf_read(bp, flags, _RET_IP_);
866 if (!(bp->b_flags & XBF_DONE)) {
869 bp->b_ops = ops;
870 error = _xfs_buf_read(bp, flags);
877 error = xfs_buf_reverify(bp, ops);
881 xfs_buf_relse(bp);
886 bp->b_flags &= ~XBF_READ;
887 ASSERT(bp->b_ops != NULL || ops == NULL);
909 xfs_buf_ioerror_alert(bp, fa);
911 bp->b_flags &= ~XBF_DONE;
912 xfs_buf_stale(bp);
913 xfs_buf_relse(bp);
921 *bpp = bp;
936 struct xfs_buf *bp;
946 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
965 struct xfs_buf *bp;
970 error = xfs_buf_get_uncached(target, numblks, flags, &bp);
975 ASSERT(bp->b_map_count == 1);
976 bp->b_rhash_key = XFS_BUF_DADDR_NULL;
977 bp->b_maps[0].bm_bn = daddr;
978 bp->b_flags |= XBF_READ;
979 bp->b_ops = ops;
981 xfs_buf_submit(bp);
982 if (bp->b_error) {
983 error = bp->b_error;
984 xfs_buf_relse(bp);
988 *bpp = bp;
1000 struct xfs_buf *bp;
1006 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
1010 if (xfs_buftarg_is_mem(bp->b_target))
1011 error = xmbuf_map_page(bp);
1013 error = xfs_buf_alloc_pages(bp, flags);
1017 error = _xfs_buf_map_pages(bp, 0);
1024 trace_xfs_buf_get_uncached(bp, _RET_IP_);
1025 *bpp = bp;
1029 xfs_buf_free(bp);
1040 struct xfs_buf *bp)
1042 trace_xfs_buf_hold(bp, _RET_IP_);
1043 atomic_inc(&bp->b_hold);
1048 struct xfs_buf *bp)
1050 ASSERT(list_empty(&bp->b_lru));
1051 if (atomic_dec_and_test(&bp->b_hold)) {
1052 xfs_buf_ioacct_dec(bp);
1053 xfs_buf_free(bp);
1059 struct xfs_buf *bp)
1061 struct xfs_buftarg *btp = bp->b_target;
1062 struct xfs_perag *pag = bp->b_pag;
1067 trace_xfs_buf_rele(bp, _RET_IP_);
1069 ASSERT(atomic_read(&bp->b_hold) > 0);
1076 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1081 spin_lock(&bp->b_lock);
1082 release = atomic_dec_and_lock(&bp->b_hold, &bch->bc_lock);
1090 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1091 __xfs_buf_ioacct_dec(bp);
1096 __xfs_buf_ioacct_dec(bp);
1097 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1103 if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) {
1104 bp->b_state &= ~XFS_BSTATE_DISPOSE;
1105 atomic_inc(&bp->b_hold);
1115 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1116 list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
1118 ASSERT(list_empty(&bp->b_lru));
1121 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1122 rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
1131 spin_unlock(&bp->b_lock);
1134 xfs_buf_free(bp);
1142 struct xfs_buf *bp)
1144 trace_xfs_buf_rele(bp, _RET_IP_);
1145 if (xfs_buf_is_uncached(bp))
1146 xfs_buf_rele_uncached(bp);
1148 xfs_buf_rele_cached(bp);
1164 struct xfs_buf *bp)
1168 locked = down_trylock(&bp->b_sema) == 0;
1170 trace_xfs_buf_trylock(bp, _RET_IP_);
1172 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1187 struct xfs_buf *bp)
1189 trace_xfs_buf_lock(bp, _RET_IP_);
1191 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1192 xfs_log_force(bp->b_mount, 0);
1193 down(&bp->b_sema);
1195 trace_xfs_buf_lock_done(bp, _RET_IP_);
1200 struct xfs_buf *bp)
1202 ASSERT(xfs_buf_islocked(bp));
1204 up(&bp->b_sema);
1205 trace_xfs_buf_unlock(bp, _RET_IP_);
1210 struct xfs_buf *bp)
1214 if (atomic_read(&bp->b_pin_count) == 0)
1217 add_wait_queue(&bp->b_waiters, &wait);
1220 if (atomic_read(&bp->b_pin_count) == 0)
1224 remove_wait_queue(&bp->b_waiters, &wait);
1230 struct xfs_buf *bp)
1235 if (bp->b_target != lasttarg ||
1238 xfs_buf_ioerror_alert(bp, __this_address);
1240 lasttarg = bp->b_target;
1249 struct xfs_buf *bp,
1252 struct xfs_mount *mp = bp->b_mount;
1255 ++bp->b_retries > cfg->max_retries)
1258 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1270 * caller handle the error in bp->b_error appropriately.
1287 struct xfs_buf *bp)
1289 struct xfs_mount *mp = bp->b_mount;
1299 xfs_buf_ioerror_alert_ratelimited(bp);
1305 if (bp->b_flags & _XBF_LOGRECOVERY) {
1313 if (!(bp->b_flags & XBF_ASYNC))
1316 trace_xfs_buf_iodone_async(bp, _RET_IP_);
1318 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1319 if (bp->b_last_error != bp->b_error ||
1320 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
1321 bp->b_last_error = bp->b_error;
1323 !bp->b_first_retry_time)
1324 bp->b_first_retry_time = jiffies;
1332 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1338 if (bp->b_flags & _XBF_INODES)
1339 xfs_buf_inode_io_fail(bp);
1340 else if (bp->b_flags & _XBF_DQUOTS)
1341 xfs_buf_dquot_io_fail(bp);
1343 ASSERT(list_empty(&bp->b_li_list));
1344 xfs_buf_ioerror(bp, 0);
1345 xfs_buf_relse(bp);
1349 xfs_buf_ioerror(bp, 0);
1350 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1351 xfs_buf_submit(bp);
1354 xfs_buf_stale(bp);
1355 bp->b_flags |= XBF_DONE;
1356 bp->b_flags &= ~XBF_WRITE;
1357 trace_xfs_buf_error_relse(bp, _RET_IP_);
1363 struct xfs_buf *bp)
1365 trace_xfs_buf_iodone(bp, _RET_IP_);
1371 if (!bp->b_error && bp->b_io_error)
1372 xfs_buf_ioerror(bp, bp->b_io_error);
1374 if (bp->b_flags & XBF_READ) {
1375 if (!bp->b_error && bp->b_ops)
1376 bp->b_ops->verify_read(bp);
1377 if (!bp->b_error)
1378 bp->b_flags |= XBF_DONE;
1380 if (!bp->b_error) {
1381 bp->b_flags &= ~XBF_WRITE_FAIL;
1382 bp->b_flags |= XBF_DONE;
1385 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1389 bp->b_last_error = 0;
1390 bp->b_retries = 0;
1391 bp->b_first_retry_time = 0;
1398 if (bp->b_log_item)
1399 xfs_buf_item_done(bp);
1401 if (bp->b_flags & _XBF_INODES)
1402 xfs_buf_inode_iodone(bp);
1403 else if (bp->b_flags & _XBF_DQUOTS)
1404 xfs_buf_dquot_iodone(bp);
1408 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
1411 if (bp->b_flags & XBF_ASYNC)
1412 xfs_buf_relse(bp);
1414 complete(&bp->b_iowait);
1421 struct xfs_buf *bp =
1424 xfs_buf_ioend(bp);
1429 struct xfs_buf *bp)
1431 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1432 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1437 struct xfs_buf *bp,
1442 bp->b_error = error;
1443 trace_xfs_buf_ioerror(bp, error, failaddr);
1448 struct xfs_buf *bp,
1451 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1453 func, (uint64_t)xfs_buf_daddr(bp),
1454 bp->b_length, -bp->b_error);
1465 struct xfs_buf *bp)
1467 bp->b_flags &= ~XBF_DONE;
1468 xfs_buf_stale(bp);
1469 xfs_buf_ioerror(bp, -EIO);
1470 xfs_buf_ioend(bp);
1475 struct xfs_buf *bp)
1479 ASSERT(xfs_buf_islocked(bp));
1481 bp->b_flags |= XBF_WRITE;
1482 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1485 error = xfs_buf_submit(bp);
1487 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1495 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
1498 (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1499 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1509 cmpxchg(&bp->b_io_error, 0, error);
1512 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1513 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1515 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1516 xfs_buf_ioend_async(bp);
1522 struct xfs_buf *bp,
1529 unsigned int total_nr_pages = bp->b_page_count;
1532 sector_t sector = bp->b_maps[map].bm_bn;
1548 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1553 atomic_inc(&bp->b_io_remaining);
1556 bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
1559 bio->bi_private = bp;
1567 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1579 if (xfs_buf_is_vmapped(bp)) {
1580 flush_kernel_vmap_range(bp->b_addr,
1581 xfs_buf_vmap_len(bp));
1591 atomic_dec(&bp->b_io_remaining);
1592 xfs_buf_ioerror(bp, -EIO);
1600 struct xfs_buf *bp)
1612 bp->b_error = 0;
1614 if (bp->b_flags & XBF_WRITE) {
1622 if (bp->b_ops) {
1623 bp->b_ops->verify_write(bp);
1624 if (bp->b_error) {
1625 xfs_force_shutdown(bp->b_mount,
1629 } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
1630 struct xfs_mount *mp = bp->b_mount;
1639 __func__, xfs_buf_daddr(bp),
1640 bp->b_length);
1641 xfs_hex_dump(bp->b_addr,
1648 if (bp->b_flags & XBF_READ_AHEAD)
1656 if (xfs_buftarg_is_mem(bp->b_target)) {
1657 xfs_buf_ioend(bp);
1667 offset = bp->b_offset;
1668 size = BBTOB(bp->b_length);
1670 for (i = 0; i < bp->b_map_count; i++) {
1671 xfs_buf_ioapply_map(bp, i, &offset, &size, op);
1672 if (bp->b_error)
1685 struct xfs_buf *bp)
1687 ASSERT(!(bp->b_flags & XBF_ASYNC));
1689 trace_xfs_buf_iowait(bp, _RET_IP_);
1690 wait_for_completion(&bp->b_iowait);
1691 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1693 return bp->b_error;
1704 struct xfs_buf *bp,
1709 trace_xfs_buf_submit(bp, _RET_IP_);
1711 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1728 if (bp->b_mount->m_log &&
1729 xlog_is_shutdown(bp->b_mount->m_log)) {
1730 xfs_buf_ioend_fail(bp);
1739 xfs_buf_hold(bp);
1741 if (bp->b_flags & XBF_WRITE)
1742 xfs_buf_wait_unpin(bp);
1745 bp->b_io_error = 0;
1752 atomic_set(&bp->b_io_remaining, 1);
1753 if (bp->b_flags & XBF_ASYNC)
1754 xfs_buf_ioacct_inc(bp);
1755 _xfs_buf_ioapply(bp);
1762 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1763 if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1764 xfs_buf_ioend(bp);
1766 xfs_buf_ioend_async(bp);
1770 error = xfs_buf_iowait(bp);
1777 xfs_buf_rele(bp);
1783 struct xfs_buf *bp,
1788 if (bp->b_addr)
1789 return bp->b_addr + offset;
1791 page = bp->b_pages[offset >> PAGE_SHIFT];
1797 struct xfs_buf *bp,
1808 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1809 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1810 page = bp->b_pages[page_index];
1812 BBTOB(bp->b_length) - boff);
1835 struct xfs_buf *bp,
1838 ASSERT(bp->b_flags & XBF_DONE);
1840 xfs_buf_corruption_error(bp, fa);
1841 xfs_buf_stale(bp);
1861 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1864 if (atomic_read(&bp->b_hold) > 1) {
1866 trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
1869 if (!spin_trylock(&bp->b_lock))
1876 atomic_set(&bp->b_lru_ref, 0);
1877 bp->b_state |= XFS_BSTATE_DISPOSE;
1879 spin_unlock(&bp->b_lock);
1923 struct xfs_buf *bp;
1924 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1925 list_del_init(&bp->b_lru);
1926 if (bp->b_flags & XBF_WRITE_FAIL) {
1928 xfs_buf_alert_ratelimited(bp,
1931 (long long)xfs_buf_daddr(bp));
1933 xfs_buf_rele(bp);
1959 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1963 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1966 if (!spin_trylock(&bp->b_lock))
1973 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1974 spin_unlock(&bp->b_lock);
1978 bp->b_state |= XFS_BSTATE_DISPOSE;
1980 spin_unlock(&bp->b_lock);
1997 struct xfs_buf *bp;
1998 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1999 list_del_init(&bp->b_lru);
2000 xfs_buf_rele(bp);
2134 struct xfs_buf *bp)
2136 list_del_init(&bp->b_list);
2137 wake_up_var(&bp->b_list);
2150 struct xfs_buf *bp;
2153 bp = list_first_entry(list, struct xfs_buf, b_list);
2155 xfs_buf_lock(bp);
2156 bp->b_flags &= ~_XBF_DELWRI_Q;
2157 xfs_buf_list_del(bp);
2158 xfs_buf_relse(bp);
2175 struct xfs_buf *bp,
2178 ASSERT(xfs_buf_islocked(bp));
2179 ASSERT(!(bp->b_flags & XBF_READ));
2186 if (bp->b_flags & _XBF_DELWRI_Q) {
2187 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
2191 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
2201 bp->b_flags |= _XBF_DELWRI_Q;
2202 if (list_empty(&bp->b_list)) {
2203 atomic_inc(&bp->b_hold);
2204 list_add_tail(&bp->b_list, list);
2218 struct xfs_buf *bp,
2227 while (!list_empty(&bp->b_list)) {
2228 xfs_buf_unlock(bp);
2229 wait_var_event(&bp->b_list, list_empty(&bp->b_list));
2230 xfs_buf_lock(bp);
2233 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
2235 xfs_buf_delwri_queue(bp, buffer_list);
2250 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
2253 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
2273 struct xfs_buf *bp, *n;
2280 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2282 if (!xfs_buf_trylock(bp))
2284 if (xfs_buf_ispinned(bp)) {
2285 xfs_buf_unlock(bp);
2290 xfs_buf_lock(bp);
2299 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2300 xfs_buf_list_del(bp);
2301 xfs_buf_relse(bp);
2305 trace_xfs_buf_delwri_split(bp, _RET_IP_);
2313 bp->b_flags &= ~_XBF_DELWRI_Q;
2314 bp->b_flags |= XBF_WRITE;
2316 bp->b_flags &= ~XBF_ASYNC;
2317 list_move_tail(&bp->b_list, wait_list);
2319 bp->b_flags |= XBF_ASYNC;
2320 xfs_buf_list_del(bp);
2322 __xfs_buf_submit(bp, false);
2366 struct xfs_buf *bp;
2372 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2374 xfs_buf_list_del(bp);
2380 error2 = xfs_buf_iowait(bp);
2381 xfs_buf_relse(bp);
2406 struct xfs_buf *bp,
2412 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2414 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2420 xfs_buf_lock(bp);
2421 list_move(&bp->b_list, &submit_list);
2422 xfs_buf_unlock(bp);
2437 error = xfs_buf_iowait(bp);
2438 bp->b_flags |= _XBF_DELWRI_Q;
2439 xfs_buf_unlock(bp);
2444 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2451 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2454 atomic_set(&bp->b_lru_ref, lru_ref);
2464 struct xfs_buf *bp,
2467 struct xfs_mount *mp = bp->b_mount;
2471 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2473 return dmagic == bp->b_ops->magic[idx];
2482 struct xfs_buf *bp,
2485 struct xfs_mount *mp = bp->b_mount;
2489 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2491 return dmagic == bp->b_ops->magic16[idx];