Lines Matching refs:bdev

330 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
332 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
622 unsigned int bdev_nr_zones(struct block_device *bdev);
657 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
659 return bdev->bd_disk->queue->limits.max_open_zones;
662 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
664 return bdev->bd_disk->queue->limits.max_active_zones;
668 static inline unsigned int bdev_nr_zones(struct block_device *bdev)
685 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
690 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
731 static inline int bdev_read_only(struct block_device *bdev)
733 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
738 void bdev_mark_dead(struct block_device *bdev, bool surprise);
743 static inline sector_t get_start_sect(struct block_device *bdev)
745 return bdev->bd_start_sect;
748 static inline sector_t bdev_nr_sectors(struct block_device *bdev)
750 return bdev->bd_nr_sectors;
753 static inline loff_t bdev_nr_bytes(struct block_device *bdev)
755 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
804 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
805 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
807 static inline int bd_link_disk_holder(struct block_device *bdev,
812 static inline void bd_unlink_disk_holder(struct block_device *bdev,
845 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
847 return bdev->bd_queue; /* this is never NULL */
930 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
1021 int blkdev_issue_flush(struct block_device *bdev);
1048 static inline int blkdev_issue_flush(struct block_device *bdev)
1061 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1063 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1065 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1071 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1074 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1098 static inline bool bdev_is_partition(struct block_device *bdev)
1100 return bdev->bd_partno;
1168 bdev_max_zone_append_sectors(struct block_device *bdev)
1170 return queue_max_zone_append_sectors(bdev_get_queue(bdev));
1173 static inline unsigned int bdev_max_segments(struct block_device *bdev)
1175 return queue_max_segments(bdev_get_queue(bdev));
1188 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1190 return queue_logical_block_size(bdev_get_queue(bdev));
1198 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1200 return queue_physical_block_size(bdev_get_queue(bdev));
1208 static inline int bdev_io_min(struct block_device *bdev)
1210 return queue_io_min(bdev_get_queue(bdev));
1218 static inline int bdev_io_opt(struct block_device *bdev)
1220 return queue_io_opt(bdev_get_queue(bdev));
1230 bdev_zone_write_granularity(struct block_device *bdev)
1232 return queue_zone_write_granularity(bdev_get_queue(bdev));
1235 int bdev_alignment_offset(struct block_device *bdev);
1236 unsigned int bdev_discard_alignment(struct block_device *bdev);
1238 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1240 return bdev_get_queue(bdev)->limits.max_discard_sectors;
1243 static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1245 return bdev_get_queue(bdev)->limits.discard_granularity;
1249 bdev_max_secure_erase_sectors(struct block_device *bdev)
1251 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
1254 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1256 struct request_queue *q = bdev_get_queue(bdev);
1264 static inline bool bdev_nonrot(struct block_device *bdev)
1266 return blk_queue_nonrot(bdev_get_queue(bdev));
1269 static inline bool bdev_synchronous(struct block_device *bdev)
1272 &bdev_get_queue(bdev)->queue_flags);
1275 static inline bool bdev_stable_writes(struct block_device *bdev)
1278 &bdev_get_queue(bdev)->queue_flags);
1281 static inline bool bdev_write_cache(struct block_device *bdev)
1283 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
1286 static inline bool bdev_fua(struct block_device *bdev)
1288 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
1291 static inline bool bdev_nowait(struct block_device *bdev)
1293 return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
1296 static inline bool bdev_is_zoned(struct block_device *bdev)
1298 return blk_queue_is_zoned(bdev_get_queue(bdev));
1301 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1303 return disk_zone_no(bdev->bd_disk, sec);
1312 static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
1315 return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
1318 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1320 struct request_queue *q = bdev_get_queue(bdev);
1327 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1330 return sector & (bdev_zone_sectors(bdev) - 1);
1333 static inline bool bdev_is_zone_start(struct block_device *bdev,
1336 return bdev_offset_from_zone_start(bdev, sector) == 0;
1344 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1346 return queue_dma_alignment(bdev_get_queue(bdev));
1349 static inline bool bdev_iter_is_aligned(struct block_device *bdev,
1352 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
1353 bdev_logical_block_size(bdev) - 1);
1369 static inline unsigned int block_size(struct block_device *bdev)
1371 return 1 << bdev->bd_inode->i_blkbits;
1410 int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
1412 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
1418 int (*set_read_only)(struct block_device *bdev, bool ro);
1459 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
1461 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1478 int bdev_read_only(struct block_device *bdev);
1479 int set_blocksize(struct block_device *bdev, int size);
1494 void (*mark_dead)(struct block_device *bdev, bool surprise);
1499 void (*sync)(struct block_device *bdev);
1504 int (*freeze)(struct block_device *bdev);
1509 int (*thaw)(struct block_device *bdev);
1531 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
1533 void bd_abort_claiming(struct block_device *bdev, void *holder);
1537 void blkdev_put_no_open(struct block_device *bdev);
1543 void invalidate_bdev(struct block_device *bdev);
1544 int sync_blockdev(struct block_device *bdev);
1545 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
1546 int sync_blockdev_nowait(struct block_device *bdev);
1552 static inline void invalidate_bdev(struct block_device *bdev)
1555 static inline int sync_blockdev(struct block_device *bdev)
1559 static inline int sync_blockdev_nowait(struct block_device *bdev)
1578 int bdev_freeze(struct block_device *bdev);
1579 int bdev_thaw(struct block_device *bdev);