/linux-master/block/ |
H A D | blk-ioprio.h | 9 struct bio; 14 void blkcg_set_ioprio(struct bio *bio); 23 static inline void blkcg_set_ioprio(struct bio *bio) argument
|
H A D | blk-lib.c | 7 #include <linux/bio.h> 32 * Align the bio size to the discard granularity to make splitting the bio 38 struct bio *blk_alloc_discard_bio(struct block_device *bdev, 42 struct bio *bio; local 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); 48 if (!bio) 50 bio->bi_iter.bi_sector = *sector; 51 bio 66 struct bio *bio; local 88 struct bio *bio = NULL; local 110 struct bio *bio = *biop; local 157 struct bio *bio = *biop; local 242 struct bio *bio; local 293 struct bio *bio = NULL; local [all...] |
H A D | blk-map.c | 8 #include <linux/bio.h> 41 * bio_copy_from_iter - copy all pages from iov_iter to bio 42 * @bio: The &struct bio which describes the I/O as destination 45 * Copy all pages from iov_iter to bio. 48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) argument 53 bio_for_each_segment_all(bvec, bio, iter_all) { 72 * bio_copy_to_iter - copy all pages from bio to iov_iter 73 * @bio 79 bio_copy_to_iter(struct bio *bio, struct iov_iter iter) argument 109 bio_uncopy_user(struct bio *bio) argument 136 struct bio *bio; local 243 blk_mq_map_bio_put(struct bio *bio) argument 256 struct bio *bio; local 278 struct bio *bio; local 359 bio_invalidate_vmalloc_pages(struct bio *bio) argument 372 bio_map_kern_endio(struct bio *bio) argument 399 struct bio *bio; local 442 bio_copy_kern_endio(struct bio *bio) argument 449 bio_copy_kern_endio_read(struct bio *bio) argument 480 struct bio *bio; local 537 blk_rq_append_bio(struct request *rq, struct bio *bio) argument 570 struct bio *bio; local 638 struct bio *bio = NULL; local 746 blk_rq_unmap_user(struct bio *bio) argument 787 struct bio *bio; local [all...] |
H A D | blk-merge.c | 7 #include <linux/bio.h> 21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) argument 23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) argument 28 struct bvec_iter iter = bio->bi_iter; 31 bio_get_first_bvec(bio, bv); 32 if (bv->bv_len == bio 88 req_gap_back_merge(struct request *req, struct bio *bio) argument 93 req_gap_front_merge(struct request *req, struct bio *bio) argument 108 bio_split_discard(struct bio *bio, const struct queue_limits *lim, unsigned *nsegs, struct bio_set *bs) argument 145 bio_split_write_zeroes(struct bio *bio, const struct queue_limits *lim, unsigned *nsegs, struct bio_set *bs) argument 165 get_max_io_size(struct bio *bio, const struct queue_limits *lim) argument 275 bio_split_rw(struct bio *bio, const struct queue_limits *lim, unsigned *segs, struct bio_set *bs, unsigned max_bytes) argument 350 __bio_split_to_limits(struct bio *bio, const struct queue_limits *lim, unsigned int *nr_segs) argument 398 bio_split_to_limits(struct bio *bio) argument 423 struct bio *bio = rq->bio; local 524 __blk_bios_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist, struct scatterlist **sg) argument 605 ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) argument 633 ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) argument 651 ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) argument 733 struct bio *bio; local 751 bio_failfast(const struct bio *bio) argument 764 blk_update_mixed_merge(struct request *req, struct bio *bio, bool front_merge) argument 923 blk_rq_merge_ok(struct request *rq, struct bio *bio) argument 957 blk_try_merge(struct request *rq, struct bio *bio) argument 978 bio_attempt_back_merge(struct request *req, struct bio *bio, unsigned int nr_segs) argument 1007 bio_attempt_front_merge(struct request *req, struct bio *bio, unsigned int nr_segs) argument 1043 bio_attempt_discard_merge(struct request_queue *q, struct request *req, struct bio *bio) argument 1068 blk_attempt_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio, unsigned int nr_segs, bool sched_allow_merge) argument 1115 blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) argument 1146 blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs) argument 1171 blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **merged_request) argument [all...] |
H A D | blk-mq-sched.c | 337 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, argument 347 ret = e->type->ops.bio_merge(q, bio, nr_segs); 352 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 365 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
|
H A D | blk-mq-sched.h | 10 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 12 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 31 static inline bool bio_mergeable(struct bio *bio) argument 33 return !(bio->bi_opf & REQ_NOMERGE_FLAGS); 38 struct bio *bio) 44 return e->type->ops.allow_merge(q, rq, bio); 37 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) argument [all...] |
H A D | blk-mq-sysfs.c | 5 #include <linux/bio.h>
|
H A D | blk-mq.c | 11 #include <linux/bio.h> 606 rq->bio = rq->biotail = NULL; 681 rq->bio = rq->biotail = NULL; 762 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 763 rq->bio, rq->biotail, blk_rq_bytes(rq)); 800 struct bio *bio = req->bio; local 804 if (!bio) 821 struct bio *nex 901 struct bio *bio = req->bio; local 2543 blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) argument 2836 blk_mq_attempt_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) argument 2848 blk_mq_get_new_requests(struct request_queue *q, struct blk_plug *plug, struct bio *bio, unsigned int nsegs) argument 2899 blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) argument 2930 blk_mq_submit_bio(struct bio *bio) argument 3109 struct bio *bio; local 3141 struct bio *bio, *bio_src; local [all...] |
H A D | blk-mq.h | 42 void blk_mq_submit_bio(struct bio *bio);
|
H A D | blk-rq-qos.c | 26 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio) argument 30 rqos->ops->cleanup(rqos, bio); 62 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio) argument 66 rqos->ops->throttle(rqos, bio); 71 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) argument 75 rqos->ops->track(rqos, rq, bio); 80 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bi argument 89 __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio) argument [all...] |
H A D | blk-rq-qos.h | 38 void (*throttle)(struct rq_qos *, struct bio *); 39 void (*track)(struct rq_qos *, struct request *, struct bio *); 40 void (*merge)(struct rq_qos *, struct request *, struct bio *); 44 void (*done_bio)(struct rq_qos *, struct bio *); 45 void (*cleanup)(struct rq_qos *, struct bio *); 103 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 107 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bi 113 rq_qos_cleanup(struct request_queue *q, struct bio *bio) argument 137 rq_qos_done_bio(struct bio *bio) argument 147 rq_qos_throttle(struct request_queue *q, struct bio *bio) argument 155 rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) argument 162 rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) argument [all...] |
H A D | blk-settings.c | 8 #include <linux/bio.h> 188 * bvec and lower layer bio splitting is supposed to handle the two
|
H A D | blk-sysfs.c | 8 #include <linux/bio.h> 602 /* Common attributes for bio-based and request-based queues. */ 644 /* Request-based queue attributes that are not relevant for bio-based queues. */
|
H A D | blk-throttle.c | 11 #include <linux/bio.h> 151 static inline unsigned int throtl_bio_data_size(struct bio *bio) argument 154 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 156 return bio->bi_iter.bi_size; 167 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it 168 * @bio: bio being added 169 * @qn: qnode to add bio to 172 * Add @bio t 176 throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, struct list_head *queued) argument 193 struct bio *bio; local 222 struct bio *bio; local 690 tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio, u32 iops_limit) argument 715 tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, u64 bps_limit) argument 760 tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, unsigned long *wait) argument 820 throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) argument 844 throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, struct throtl_grp *tg) argument 872 struct bio *bio; local 910 struct bio *bio; local 953 struct bio *bio; local 1110 struct bio *bio; local 1600 __blk_throtl_bio(struct bio *bio) argument [all...] |
H A D | blk-throttle.h | 58 THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */ 87 * will unthrottle and is ready to dispatch more bio. It is used as 110 /* Number of bio's dispatched in current slice */ 154 static inline bool blk_throtl_bio(struct bio *bio) { return false; } argument 158 bool __blk_throtl_bio(struct bio *bio); 166 static inline bool blk_should_throtl(struct bio *bio) argument 169 int rw = bio_data_dir(bio); 199 blk_throtl_bio(struct bio *bio) argument [all...] |
H A D | blk-wbt.c | 597 static inline bool wbt_should_throttle(struct bio *bio) argument 599 switch (bio_op(bio)) { 604 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == 615 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) argument 622 if (bio_op(bio) == REQ_OP_READ) { 624 } else if (wbt_should_throttle(bio)) { 627 if (bio_op(bio) == REQ_OP_DISCARD) 634 static void wbt_cleanup(struct rq_qos *rqos, struct bio *bi argument 646 wbt_wait(struct rq_qos *rqos, struct bio *bio) argument 664 wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) argument [all...] |
H A D | blk-zoned.c | 82 * being executed or the zone write plug bio list is not empty. 203 struct bio *bio = NULL; local 223 bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC, 225 bio->bi_iter.bi_sector = sector; 232 if (bio) { 233 ret = submit_bio_wait(bio); 234 bio_put(bio); 244 struct bio bi local 272 struct bio *bio = NULL; local 637 blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug, struct bio *bio) argument 653 struct bio *bio; local 670 struct bio *bio; local 781 blk_zone_wplug_handle_reset_or_finish(struct bio *bio, unsigned int wp_offset) argument 810 blk_zone_wplug_handle_reset_all(struct bio *bio) argument 833 blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug, struct bio *bio, unsigned int nr_segs) argument 869 blk_zone_write_plug_bio_merged(struct bio *bio) argument 916 struct bio *bio; local 972 blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, struct bio *bio) argument 1022 blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) argument 1106 blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) argument 1220 blk_zone_write_plug_bio_endio(struct bio *bio) argument 1296 struct bio *bio; local [all...] |
H A D | blk.h | 39 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 40 void submit_bio_noacct_nocheck(struct bio *bio); 41 void bio_await_chain(struct bio *bio); 67 static inline int bio_queue_enter(struct bio *bio) argument 69 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 73 return __bio_queue_enter(q, bio); 201 bio_integrity_endio(struct bio *bio) argument 224 integrity_req_gap_front_merge(struct request *req, struct bio *bio) argument 252 integrity_req_gap_front_merge(struct request *req, struct bio *bio) argument 261 bio_integrity_endio(struct bio *bio) argument 265 bio_integrity_free(struct bio *bio) argument 319 bio_may_exceed_limits(struct bio *bio, const struct queue_limits *lim) argument 400 blk_queue_bounce(struct bio *bio, struct request_queue *q) argument 411 bio_zone_write_plugging(struct bio *bio) argument 415 bio_is_zone_append(struct bio *bio) argument 422 blk_zone_update_request_bio(struct request *rq, struct bio *bio) argument 437 blk_zone_bio_endio(struct bio *bio) argument 464 bio_zone_write_plugging(struct bio *bio) argument 468 bio_is_zone_append(struct bio *bio) argument 472 blk_zone_write_plug_bio_merged(struct bio *bio) argument 478 blk_zone_update_request_bio(struct request *rq, struct bio *bio) argument 482 blk_zone_bio_endio(struct bio *bio) argument 528 bio_release_page(struct bio *bio, struct page *page) argument [all...] |
H A D | bounce.c | 13 #include <linux/bio.h> 76 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 81 * The bio of @from is created by bounce, so we can iterate 102 static void bounce_end_io(struct bio *bio) argument 104 struct bio *bio_orig = bio->bi_private; 112 bio_for_each_segment_all(bvec, bio, iter_all) { 121 bio_orig->bi_status = bio 126 bounce_end_io_write(struct bio *bio) argument 131 bounce_end_io_read(struct bio *bio) argument 145 struct bio *bio; local 205 struct bio *bio; local [all...] |
H A D | bsg-lib.c | 33 struct bio *bio; local 76 job->bidi_bio = job->bidi_rq->bio; 94 bio = rq->bio; 138 blk_rq_unmap_user(bio); 239 if (req->bio) {
|
H A D | elevator.c | 21 * - Rework again to work with bio instead of buffer_heads 29 #include <linux/bio.h> 57 * Query io scheduler to see if the current process issuing bio may be 60 static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) argument 66 return e->type->ops.allow_merge(q, rq, bio); 74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) argument 76 if (!blk_rq_merge_ok(rq, bio)) 79 if (!elv_iosched_allow_bio_merge(rq, bio)) 269 elv_merge(struct request_queue *q, struct request **req, struct bio *bio) argument [all...] |
H A D | elevator.h | 33 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); 34 bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); 35 int (*request_merge)(struct request_queue *q, struct request **, struct bio *); 130 struct bio *); 153 extern bool elv_bio_merge_ok(struct request *, struct bio *);
|
H A D | fops.c | 53 struct bio bio; local 66 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); 70 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); 72 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; 73 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; 74 bio.bi_ioprio = iocb->ki_ioprio; 76 ret = bio_iov_iter_get_pages(&bio, iter); 79 ret = bio.bi_iter.bi_size; 85 bio 120 blkdev_bio_end_io(struct bio *bio) argument 165 struct bio *bio; local 271 blkdev_bio_end_io_async(struct bio *bio) argument 304 struct bio *bio; local [all...] |
H A D | ioctl.c | 101 struct bio *prev = NULL, *bio; local 145 bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, 147 if (!bio) 149 prev = bio_chain_and_submit(prev, bio);
|
H A D | kyber-iosched.c | 567 static bool kyber_bio_merge(struct request_queue *q, struct bio *bio, argument 571 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 574 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); 579 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
|