Lines Matching defs:btp

532 	struct xfs_buftarg	*btp,
538 ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
539 ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
545 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
547 xfs_alert(btp->bt_mount,
623 struct xfs_buftarg *btp,
636 error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
691 struct xfs_buftarg *btp,
694 struct xfs_mount *mp = btp->bt_mount;
696 if (xfs_buftarg_is_mem(btp))
703 struct xfs_buftarg *btp,
708 return btp->bt_cache;
718 struct xfs_buftarg *btp,
736 error = xfs_buf_map_verify(btp, &cmap);
740 pag = xfs_buftarg_get_pag(btp, &cmap);
741 bch = xfs_buftarg_buf_cache(btp, pag);
749 XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
755 error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
760 XFS_STATS_INC(btp->bt_mount, xb_get_locked);
769 xfs_warn_ratelimited(btp->bt_mount,
784 XFS_STATS_INC(btp->bt_mount, xb_get);
1061 struct xfs_buftarg *btp = bp->b_target;
1063 struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
1103 if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) {
1116 list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
1888 struct xfs_buftarg *btp)
1902 while (percpu_counter_sum(&btp->bt_io_count))
1904 flush_workqueue(btp->bt_mount->m_buf_workqueue);
1909 struct xfs_buftarg *btp)
1915 xfs_buftarg_wait(btp);
1918 while (list_lru_count(&btp->bt_lru)) {
1919 list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
1946 ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
1947 xfs_alert(btp->bt_mount,
1989 struct xfs_buftarg *btp = shrink->private_data;
1993 freed = list_lru_shrink_walk(&btp->bt_lru, sc,
2011 struct xfs_buftarg *btp = shrink->private_data;
2012 return list_lru_shrink_count(&btp->bt_lru, sc);
2017 struct xfs_buftarg *btp)
2019 shrinker_free(btp->bt_shrinker);
2020 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
2021 percpu_counter_destroy(&btp->bt_io_count);
2022 list_lru_destroy(&btp->bt_lru);
2027 struct xfs_buftarg *btp)
2029 xfs_destroy_buftarg(btp);
2030 fs_put_dax(btp->bt_daxdev, btp->bt_mount);
2032 if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
2033 bdev_fput(btp->bt_bdev_file);
2034 kfree(btp);
2039 struct xfs_buftarg *btp,
2043 btp->bt_meta_sectorsize = sectorsize;
2044 btp->bt_meta_sectormask = sectorsize - 1;
2046 if (set_blocksize(btp->bt_bdev, sectorsize)) {
2047 xfs_warn(btp->bt_mount,
2049 sectorsize, btp->bt_bdev);
2058 struct xfs_buftarg *btp,
2063 btp->bt_logical_sectorsize = logical_sectorsize;
2064 btp->bt_logical_sectormask = logical_sectorsize - 1;
2070 ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
2073 if (list_lru_init(&btp->bt_lru))
2075 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
2078 btp->bt_shrinker =
2080 if (!btp->bt_shrinker)
2082 btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
2083 btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
2084 btp->bt_shrinker->private_data = btp;
2085 shrinker_register(btp->bt_shrinker);
2089 percpu_counter_destroy(&btp->bt_io_count);
2091 list_lru_destroy(&btp->bt_lru);
2100 struct xfs_buftarg *btp;
2106 btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
2108 btp->bt_mount = mp;
2109 btp->bt_bdev_file = bdev_file;
2110 btp->bt_bdev = file_bdev(bdev_file);
2111 btp->bt_dev = btp->bt_bdev->bd_dev;
2112 btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
2119 if (xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev)))
2121 if (xfs_init_buftarg(btp, bdev_logical_block_size(btp->bt_bdev),
2125 return btp;
2128 kfree(btp);