Lines Matching defs:bio

47 /* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
48 static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
63 * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
85 /* Avoid reading for writes in case bio vector's page overwrites block completely. */
98 /* Copy data to/from bio to buffer if read/new was successful above. */
121 /* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
122 static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
128 bio_for_each_bvec(bv, bio, iter) {
138 * Discard bio's blocks, i.e. pass discards down.
143 static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
145 sector_t block, blocks, sector = bio->bi_iter.bi_sector;
148 blocks = __nr_blocks(ec, bio);
160 if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
167 static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
169 sector_t blocks, sector = bio->bi_iter.bi_sector;
171 blocks = __nr_blocks(ec, bio);
183 struct bio *bio;
194 bio_list_for_each(bio, &bios) {
195 block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
196 if (bio_op(bio) == REQ_OP_READ)
197 dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
198 else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
199 block2 = __sector_to_block(ec, bio_end_sector(bio));
200 if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
202 if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
207 bio_list_for_each(bio, &bios) {
209 if (bio_op(bio) == REQ_OP_READ)
210 r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
211 else if (bio_op(bio) == REQ_OP_WRITE) {
213 r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
214 } else if (bio_op(bio) == REQ_OP_DISCARD) {
215 __ebs_forget_bio(ec, bio);
216 r = __ebs_discard_bio(ec, bio);
220 bio->bi_status = errno_to_blk_status(r);
229 while ((bio = bio_list_pop(&bios))) {
231 if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
232 bio_io_error(bio);
234 bio_endio(bio);
361 static int ebs_map(struct dm_target *ti, struct bio *bio)
365 bio_set_dev(bio, ec->dev->bdev);
366 bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
368 if (unlikely(bio_op(bio) == REQ_OP_FLUSH))
375 if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
376 __block_mod(bio_end_sector(bio), ec->u_bs) ||
379 bio_list_add(&ec->bios_in, bio);
388 __ebs_forget_bio(ec, bio);