Lines Matching refs:bio

204 	 * in a bio list
229 struct bio *full_bio;
245 static void init_tracked_chunk(struct bio *bio)
247 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
252 static bool is_bio_tracked(struct bio *bio)
254 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
259 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
261 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
271 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
273 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
928 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
986 static void flush_bios(struct bio *bio);
990 struct bio *b = NULL;
1126 static void error_bios(struct bio *bio);
1131 struct bio *b = NULL;
1571 static void flush_bios(struct bio *bio)
1573 struct bio *n;
1575 while (bio) {
1576 n = bio->bi_next;
1577 bio->bi_next = NULL;
1578 submit_bio_noacct(bio);
1579 bio = n;
1583 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
1588 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1590 struct bio *n;
1593 while (bio) {
1594 n = bio->bi_next;
1595 bio->bi_next = NULL;
1596 r = do_origin(s->origin, bio, false);
1598 submit_bio_noacct(bio);
1599 bio = n;
1606 static void error_bios(struct bio *bio)
1608 struct bio *n;
1610 while (bio) {
1611 n = bio->bi_next;
1612 bio->bi_next = NULL;
1613 bio_io_error(bio);
1614 bio = n;
1648 struct bio *origin_bios = NULL;
1649 struct bio *snapshot_bios = NULL;
1650 struct bio *full_bio = NULL;
1816 static void full_bio_end_io(struct bio *bio)
1818 void *callback_data = bio->bi_private;
1820 dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
1824 struct bio *bio)
1829 pe->full_bio = bio;
1830 pe->full_bio_end_io = bio->bi_end_io;
1836 bio->bi_end_io = full_bio_end_io;
1837 bio->bi_private = callback_data;
1839 submit_bio_noacct(bio);
1908 struct bio *bio, chunk_t chunk)
1910 bio_set_dev(bio, s->cow->bdev);
1911 bio->bi_iter.bi_sector =
1914 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1919 struct bio *bio = context;
1920 struct dm_snapshot *s = bio->bi_private;
1923 bio->bi_status = write_err ? BLK_STS_IOERR : 0;
1924 bio_endio(bio);
1928 struct bio *bio, chunk_t chunk)
1933 dest.sector = bio->bi_iter.bi_sector;
1937 WARN_ON_ONCE(bio->bi_private);
1938 bio->bi_private = s;
1939 dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
1942 static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
1944 return bio->bi_iter.bi_size ==
1948 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1957 init_tracked_chunk(bio);
1959 if (bio->bi_opf & REQ_PREFLUSH) {
1960 bio_set_dev(bio, s->cow->bdev);
1964 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1972 if (bio_data_dir(bio) == WRITE) {
1981 bio_data_dir(bio) == WRITE)) {
1986 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1987 if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
1994 bio_set_dev(bio, s->origin->bdev);
1995 track_chunk(s, bio, chunk);
2004 remap_exception(s, e, bio, chunk);
2005 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
2006 io_overlaps_chunk(s, bio)) {
2009 zero_exception(s, e, bio, chunk);
2016 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2021 bio_endio(bio);
2031 if (bio_data_dir(bio) == WRITE) {
2041 remap_exception(s, e, bio, chunk);
2066 remap_exception(s, &pe->e, bio, chunk);
2070 if (!pe->started && io_overlaps_chunk(s, bio)) {
2076 start_full_bio(pe, bio);
2080 bio_list_add(&pe->snapshot_bios, bio);
2093 bio_set_dev(bio, s->origin->bdev);
2094 track_chunk(s, bio, chunk);
2116 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
2123 init_tracked_chunk(bio);
2125 if (bio->bi_opf & REQ_PREFLUSH) {
2126 if (!dm_bio_get_target_bio_nr(bio))
2127 bio_set_dev(bio, s->origin->bdev);
2129 bio_set_dev(bio, s->cow->bdev);
2133 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2135 bio_endio(bio);
2139 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
2151 if (bio_data_dir(bio) == WRITE &&
2155 bio_set_dev(bio, s->origin->bdev);
2156 bio_list_add(&s->bios_queued_during_merge, bio);
2161 remap_exception(s, e, bio, chunk);
2163 if (bio_data_dir(bio) == WRITE)
2164 track_chunk(s, bio, chunk);
2169 bio_set_dev(bio, s->origin->bdev);
2171 if (bio_data_dir(bio) == WRITE) {
2173 return do_origin(s->origin, bio, false);
2182 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
2187 if (is_bio_tracked(bio))
2188 stop_tracking_chunk(s, bio);
2426 * supplied bio was ignored. The caller may submit it immediately.
2431 * and any supplied bio is added to a list to be submitted once all
2435 struct bio *bio)
2514 * If an origin bio was supplied, queue it to wait for the
2518 if (bio) {
2519 bio_list_add(&pe->origin_bios, bio);
2520 bio = NULL;
2544 * Submit the exception against which the bio is queued last,
2556 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
2573 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2667 static int origin_map(struct dm_target *ti, struct bio *bio)
2672 bio_set_dev(bio, o->dev->bdev);
2674 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2677 if (bio_data_dir(bio) != WRITE)
2681 ((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2683 if (bio_sectors(bio) > available_sectors)
2684 dm_accept_partial_bio(bio, available_sectors);
2687 return do_origin(o->dev, bio, true);