Lines Matching refs:pe

744 	struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool,
748 pe->snap = s;
750 return pe;
753 static void free_pending_exception(struct dm_snap_pending_exception *pe)
755 struct dm_snapshot *s = pe->snap;
757 mempool_free(pe, &s->pending_pool);
1645 struct dm_snap_pending_exception *pe = context;
1647 struct dm_snapshot *s = pe->snap;
1654 dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
1673 *e = pe->e;
1696 if (__chunk_is_tracked(s, pe->e.old_chunk)) {
1698 __check_for_conflicting_io(s, pe->e.old_chunk);
1704 dm_remove_exception(&pe->e);
1708 snapshot_bios = bio_list_get(&pe->snapshot_bios);
1709 origin_bios = bio_list_get(&pe->origin_bios);
1710 full_bio = pe->full_bio;
1712 full_bio->bi_end_io = pe->full_bio_end_io;
1728 free_pending_exception(pe);
1731 static void complete_exception(struct dm_snap_pending_exception *pe)
1733 struct dm_snapshot *s = pe->snap;
1736 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1737 pending_complete, pe);
1746 struct dm_snap_pending_exception *pe = context;
1747 struct dm_snapshot *s = pe->snap;
1749 pe->copy_error = read_err || write_err;
1751 if (pe->exception_sequence == s->exception_complete_sequence) {
1755 complete_exception(pe);
1759 pe = rb_entry(next, struct dm_snap_pending_exception,
1761 if (pe->exception_sequence != s->exception_complete_sequence)
1765 rb_erase(&pe->out_of_order_node, &s->out_of_order_tree);
1766 complete_exception(pe);
1778 BUG_ON(pe->exception_sequence == pe2->exception_sequence);
1779 if (pe->exception_sequence < pe2->exception_sequence)
1785 rb_link_node(&pe->out_of_order_node, parent, p);
1786 rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
1794 static void start_copy(struct dm_snap_pending_exception *pe)
1796 struct dm_snapshot *s = pe->snap;
1804 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1808 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1813 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1823 static void start_full_bio(struct dm_snap_pending_exception *pe,
1826 struct dm_snapshot *s = pe->snap;
1829 pe->full_bio = bio;
1830 pe->full_bio_end_io = bio->bi_end_io;
1834 copy_callback, pe);
1861 struct dm_snap_pending_exception *pe, chunk_t chunk)
1863 pe->e.old_chunk = chunk;
1864 bio_list_init(&pe->origin_bios);
1865 bio_list_init(&pe->snapshot_bios);
1866 pe->started = 0;
1867 pe->full_bio = NULL;
1870 if (s->store->type->prepare_exception(s->store, &pe->e)) {
1872 free_pending_exception(pe);
1876 pe->exception_sequence = s->exception_start_sequence++;
1879 dm_insert_exception(&s->pending, &pe->e);
1881 return pe;
1894 struct dm_snap_pending_exception *pe, chunk_t chunk)
1900 free_pending_exception(pe);
1904 return __insert_pending_exception(s, pe, chunk);
1954 struct dm_snap_pending_exception *pe = NULL;
2032 pe = __lookup_pending_exception(s, chunk);
2033 if (!pe) {
2035 pe = alloc_pending_exception(s);
2040 free_pending_exception(pe);
2045 pe = __find_pending_exception(s, pe, chunk);
2046 if (!pe) {
2066 remap_exception(s, &pe->e, bio, chunk);
2070 if (!pe->started && io_overlaps_chunk(s, bio)) {
2071 pe->started = 1;
2076 start_full_bio(pe, bio);
2080 bio_list_add(&pe->snapshot_bios, bio);
2082 if (!pe->started) {
2084 pe->started = 1;
2089 start_copy(pe);
2440 struct dm_snap_pending_exception *pe, *pe2;
2473 pe = __lookup_pending_exception(snap, chunk);
2474 if (!pe) {
2485 pe = alloc_pending_exception(snap);
2493 free_pending_exception(pe);
2497 pe = __insert_pending_exception(snap, pe, chunk);
2498 if (!pe) {
2506 free_pending_exception(pe);
2507 pe = pe2;
2519 bio_list_add(&pe->origin_bios, bio);
2522 if (!pe->started) {
2523 pe->started = 1;
2524 pe_to_start_last = pe;
2528 if (!pe->started) {
2529 pe->started = 1;
2530 pe_to_start_now = pe;