Lines Matching refs:map

83 	struct dm_table *map;
416 struct dm_table *map;
421 map = dm_get_live_table(md, srcu_idx);
422 if (!map || !dm_table_get_size(map))
426 if (map->num_targets != 1)
429 ti = dm_table_get_target(map, 0);
682 * function to access the md->map field, and make sure they call
690 return srcu_dereference(md->map, &md->io_barrier);
712 return rcu_dereference(md->map);
723 * Open a table device so we can use it as a map destination.
1218 struct dm_table *map;
1221 map = dm_get_live_table(md, srcu_idx);
1222 if (!map)
1225 ti = dm_table_find_target(map, sector);
1307 * A target may call dm_accept_partial_bio only from the map routine. It is
1362 * @clone: clone bio that DM core passed to target's .map function
1435 * calls the target map operation.
1443 if (likely(ti->type->map == linear_map))
1445 else if (ti->type->map == stripe_map)
1448 r = ti->type->map(ti, clone);
1472 DMCRIT("unimplemented target map return value: %d", r);
1554 struct dm_table *t = ci->map;
1726 ti = dm_table_find_target(ci->map, ci->sector);
1761 struct dm_table *map, struct bio *bio, bool is_abnormal)
1763 ci->map = map;
1781 struct dm_table *map, struct bio *bio)
1810 init_clone_info(&ci, io, map, bio, is_abnormal);
1853 struct dm_table *map;
1855 map = dm_get_live_table(md, &srcu_idx);
1857 /* If suspended, or map not yet available, queue this IO for later */
1859 unlikely(!map)) {
1869 dm_split_and_process_bio(md, map, bio);
2220 * Returns old map, which caller must destroy.
2279 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2280 rcu_assign_pointer(md->map, (void *)t);
2294 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2296 if (!map)
2299 dm_table_event_callback(map, NULL, NULL);
2300 RCU_INIT_POINTER(md->map, NULL);
2303 return map;
2487 struct dm_table *map;
2504 map = dm_get_live_table(md, &srcu_idx);
2506 dm_table_presuspend_targets(map);
2509 dm_table_postsuspend_targets(map);
2639 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2665 map = ERR_PTR(r);
2670 map = __bind(md, table, &limits);
2675 return map;
2711 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2734 dm_table_presuspend_targets(map);
2745 dm_table_presuspend_undo_targets(map);
2762 if (map)
2785 if (map)
2796 dm_table_presuspend_undo_targets(map);
2821 struct dm_table *map = NULL;
2841 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2842 if (!map) {
2847 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2852 dm_table_postsuspend_targets(map);
2860 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2862 if (map) {
2863 int r = dm_table_resume_targets(map);
2887 struct dm_table *map = NULL;
2905 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2906 if (!map || !dm_table_get_size(map))
2909 r = __dm_resume(md, map);
2928 struct dm_table *map = NULL;
2940 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2948 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2952 dm_table_postsuspend_targets(map);
2959 struct dm_table *map;
2969 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2970 r = __dm_resume(md, map);