Searched refs:bdev (Results 26 - 50 of 414) sorted by last modified time

1234567891011>>

/linux-master/block/
H A Dbdev.c37 struct block_device bdev; member in struct:bdev_inode
48 return &BDEV_I(inode)->bdev;
58 static void bdev_write_inode(struct block_device *bdev) argument
60 struct inode *inode = bdev->bd_inode;
70 bdev, ret);
77 static void kill_bdev(struct block_device *bdev) argument
79 struct address_space *mapping = bdev->bd_inode->i_mapping;
89 void invalidate_bdev(struct block_device *bdev) argument
91 struct address_space *mapping = bdev->bd_inode->i_mapping;
102 * Drop all buffers & page cache for given bdev rang
105 truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, loff_t lstart, loff_t lend) argument
134 set_init_blocksize(struct block_device *bdev) argument
147 set_blocksize(struct block_device *bdev, int size) argument
191 sync_blockdev_nowait(struct block_device *bdev) argument
203 sync_blockdev(struct block_device *bdev) argument
211 sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) argument
232 bdev_freeze(struct block_device *bdev) argument
268 bdev_thaw(struct block_device *bdev) argument
321 struct block_device *bdev = I_BDEV(inode); local
399 struct block_device *bdev; local
430 bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) argument
438 bdev_add(struct block_device *bdev, dev_t dev) argument
472 bd_may_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) argument
514 bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) argument
566 bd_finish_claiming(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) argument
597 bd_abort_claiming(struct block_device *bdev, void *holder) argument
605 bd_end_claim(struct block_device *bdev, void *holder) argument
640 blkdev_flush_mapping(struct block_device *bdev) argument
648 blkdev_put_whole(struct block_device *bdev) argument
656 blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) argument
752 struct block_device *bdev; local
774 blkdev_put_no_open(struct block_device *bdev) argument
779 bdev_writes_blocked(struct block_device *bdev) argument
784 bdev_block_writes(struct block_device *bdev) argument
789 bdev_unblock_writes(struct block_device *bdev) argument
794 bdev_may_open(struct block_device *bdev, blk_mode_t mode) argument
806 bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode) argument
825 struct block_device *bdev; local
858 bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops, struct file *bdev_file) argument
969 struct block_device *bdev; local
1027 struct block_device *bdev = file_bdev(bdev_file); local
1041 struct block_device *bdev = file_bdev(bdev_file); local
1097 struct block_device *bdev = file_bdev(bdev_file); local
1169 bdev_mark_dead(struct block_device *bdev, bool surprise) argument
1196 struct block_device *bdev; local
1250 struct block_device *bdev; local
[all...]
H A Dioctl.c16 static int blkpg_do_ioctl(struct block_device *bdev, argument
19 struct gendisk *disk = bdev->bd_disk;
27 if (bdev_is_partition(bdev))
39 if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
62 static int blkpg_ioctl(struct block_device *bdev, argument
71 return blkpg_do_ioctl(bdev, udata, op);
82 static int compat_blkpg_ioctl(struct block_device *bdev, argument
91 return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
95 static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, argument
100 struct inode *inode = bdev
134 blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, void __user *argp) argument
165 blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, unsigned long arg) argument
254 blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg) argument
268 blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode) argument
283 blkdev_pr_register(struct block_device *bdev, blk_mode_t mode, struct pr_registration __user *arg) argument
301 blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode, struct pr_reservation __user *arg) argument
319 blkdev_pr_release(struct block_device *bdev, blk_mode_t mode, struct pr_reservation __user *arg) argument
337 blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode, struct pr_preempt __user *arg, bool abort) argument
355 blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode, struct pr_clear __user *arg) argument
373 blkdev_flushbuf(struct block_device *bdev, unsigned cmd, unsigned long arg) argument
391 blkdev_roset(struct block_device *bdev, unsigned cmd, unsigned long arg) argument
410 blkdev_getgeo(struct block_device *bdev, struct hd_geometry __user *argp) argument
444 compat_hdio_getgeo(struct block_device *bdev, struct compat_hd_geometry __user *ugeo) argument
476 blkdev_bszset(struct block_device *bdev, blk_mode_t mode, int __user *argp) argument
505 blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg, void __user *argp) argument
597 struct block_device *bdev = I_BDEV(file->f_mapping->host); local
658 struct block_device *bdev = I_BDEV(file->f_mapping->host); local
[all...]
H A Dblk-iocost.c3227 disk = ctx.bdev->bd_disk;
3421 q = bdev_get_queue(ctx.bdev);
3429 ret = blk_iocost_init(ctx.bdev->bd_disk);
H A Dblk-core.c745 struct block_device *bdev = bio->bi_bdev; local
746 struct request_queue *q = bdev_get_queue(bdev);
755 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
764 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
799 if (!bdev_max_discard_sectors(bdev))
803 if (!bdev_max_secure_erase_sectors(bdev))
900 struct block_device *bdev; local
904 bdev = READ_ONCE(bio->bi_bdev);
905 if (!bdev)
908 q = bdev_get_queue(bdev);
1000 bdev_start_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time) argument
1024 bdev_end_io_acct(struct block_device *bdev, enum req_op op, unsigned int sectors, unsigned long start_time) argument
[all...]
H A Dblk-cgroup.h214 struct block_device *bdev; member in struct:blkg_conf_ctx
/linux-master/fs/btrfs/
H A Dscrub.c1765 !stripe->dev->bdev)) {
2423 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2761 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
H A Dinode.c7722 iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
10932 sis->bdev = device->bdev;
H A Dextent_io.c790 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1702 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
H A Dextent-tree.c1253 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, argument
1301 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1319 ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1356 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) {
1357 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
1399 if (!stripe->dev->bdev) {
6227 if (!bdev_max_discard_sectors(device->bdev))
6284 ret = btrfs_issue_discard(device->bdev, start, len,
/linux-master/include/linux/
H A Dblkdev.h330 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
332 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
622 unsigned int bdev_nr_zones(struct block_device *bdev);
657 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) argument
659 return bdev->bd_disk->queue->limits.max_open_zones;
662 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) argument
664 return bdev->bd_disk->queue->limits.max_active_zones;
668 static inline unsigned int bdev_nr_zones(struct block_device *bdev) argument
685 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) argument
690 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) argument
731 bdev_read_only(struct block_device *bdev) argument
743 get_start_sect(struct block_device *bdev) argument
748 bdev_nr_sectors(struct block_device *bdev) argument
753 bdev_nr_bytes(struct block_device *bdev) argument
807 bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) argument
812 bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) argument
845 bdev_get_queue(struct block_device *bdev) argument
1048 blkdev_issue_flush(struct block_device *bdev) argument
1098 bdev_is_partition(struct block_device *bdev) argument
1168 bdev_max_zone_append_sectors(struct block_device *bdev) argument
1173 bdev_max_segments(struct block_device *bdev) argument
1188 bdev_logical_block_size(struct block_device *bdev) argument
1198 bdev_physical_block_size(struct block_device *bdev) argument
1208 bdev_io_min(struct block_device *bdev) argument
1218 bdev_io_opt(struct block_device *bdev) argument
1230 bdev_zone_write_granularity(struct block_device *bdev) argument
1238 bdev_max_discard_sectors(struct block_device *bdev) argument
1243 bdev_discard_granularity(struct block_device *bdev) argument
1249 bdev_max_secure_erase_sectors(struct block_device *bdev) argument
1254 bdev_write_zeroes_sectors(struct block_device *bdev) argument
1264 bdev_nonrot(struct block_device *bdev) argument
1269 bdev_synchronous(struct block_device *bdev) argument
1275 bdev_stable_writes(struct block_device *bdev) argument
1281 bdev_write_cache(struct block_device *bdev) argument
1286 bdev_fua(struct block_device *bdev) argument
1291 bdev_nowait(struct block_device *bdev) argument
1296 bdev_is_zoned(struct block_device *bdev) argument
1301 bdev_zone_no(struct block_device *bdev, sector_t sec) argument
1312 bdev_op_is_zoned_write(struct block_device *bdev, enum req_op op) argument
1318 bdev_zone_sectors(struct block_device *bdev) argument
1327 bdev_offset_from_zone_start(struct block_device *bdev, sector_t sector) argument
1333 bdev_is_zone_start(struct block_device *bdev, sector_t sector) argument
1344 bdev_dma_alignment(struct block_device *bdev) argument
1349 bdev_iter_is_aligned(struct block_device *bdev, struct iov_iter *iter) argument
1369 block_size(struct block_device *bdev) argument
1552 invalidate_bdev(struct block_device *bdev) argument
1555 sync_blockdev(struct block_device *bdev) argument
1559 sync_blockdev_nowait(struct block_device *bdev) argument
[all...]
/linux-master/fs/bcachefs/
H A Djournal_io.c1008 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1747 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1792 bio_reset(bio, ca->disk_sb.bdev,
H A Dbtree_io.c1302 bio_set_dev(bio, ca->disk_sb.bdev);
1622 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1704 bio_set_dev(bio, ca->disk_sb.bdev);
H A Dsuper_types.h8 struct block_device *bdev; member in struct:bch_sb_handle
H A Dfs-io.c81 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
H A Dec.c736 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
1337 ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_ttm_buffer.c273 static int vmw_ttm_bind(struct ttm_device *bdev, argument
320 static void vmw_ttm_unbind(struct ttm_device *bdev, argument
348 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) argument
362 static int vmw_ttm_populate(struct ttm_device *bdev, argument
375 return ttm_pool_alloc(&bdev->pool, ttm, ctx);
378 static void vmw_ttm_unpopulate(struct ttm_device *bdev, argument
388 vmw_ttm_unbind(bdev, ttm);
397 ttm_pool_free(&bdev->pool, ttm);
411 vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
438 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struc argument
[all...]
H A Dvmwgfx_drv.h502 struct ttm_device bdev; member in struct:vmw_private
668 static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) argument
670 return container_of(bdev, struct vmw_private, bdev);
H A Dvmwgfx_drv.c556 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
557 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
592 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
593 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
712 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
714 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
720 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
1050 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
1167 ttm_device_fini(&dev_priv->bdev);
1212 ttm_device_fini(&dev_priv->bdev);
[all...]
H A Dvmwgfx_blit.c471 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
477 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx);
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm.c388 spin_lock(&adev->mman.bdev.lru_lock);
390 spin_unlock(&adev->mman.bdev.lru_lock);
1163 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
H A Damdgpu_ttm.h55 struct ttm_device bdev; member in struct:amdgpu_mman
H A Damdgpu_cs.c780 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
/linux-master/fs/zonefs/
H A Dsuper.c902 struct block_device *bdev = zd->sb->s_bdev; local
905 zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
911 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
918 if (ret != bdev_nr_zones(bdev)) {
920 ret, bdev_nr_zones(bdev));
/linux-master/drivers/md/
H A Draid1.c404 rdev->bdev,
724 opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
1333 snprintf(b, sizeof(b), "%pg", rdev->bdev);
1377 mirror->rdev->bdev);
1405 read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
1625 mbio = bio_alloc_clone(rdev->bdev,
1633 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
1761 mdname(mddev), rdev->bdev,
1784 rdev->bdev);
1863 if (bdev_nonrot(rdev->bdev)) {
[all...]
/linux-master/drivers/ata/
H A Dlibata-scsi.c269 * @bdev: block device associated with @sdev
284 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, argument

Completed in 335 milliseconds

1234567891011>>