• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/md/

Lines Matching defs:mapped_device

47 	struct mapped_device *md;
71 struct mapped_device *md;
118 struct mapped_device {
338 int dm_deleting_md(struct mapped_device *md)
345 struct mapped_device *md;
372 struct mapped_device *md = disk->private_data;
382 int dm_open_count(struct mapped_device *md)
390 int dm_lock_for_deletion(struct mapped_device *md)
408 struct mapped_device *md = bdev->bd_disk->private_data;
416 struct mapped_device *md = bdev->bd_disk->private_data;
444 static struct dm_io *alloc_io(struct mapped_device *md)
449 static void free_io(struct mapped_device *md, struct dm_io *io)
454 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
459 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
470 static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
480 static int md_in_flight(struct mapped_device *md)
488 struct mapped_device *md = io->md;
502 struct mapped_device *md = io->md;
529 static void queue_io(struct mapped_device *md, struct bio *bio)
548 struct dm_table *dm_get_live_table(struct mapped_device *md)
565 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
575 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
598 static int __noflush_suspending(struct mapped_device *md)
612 struct mapped_device *md = io->md;
675 struct mapped_device *md = tio->io->md;
758 static void store_barrier_error(struct mapped_device *md, int error)
780 static void rq_completed(struct mapped_device *md, int rw, int run_queue)
815 struct mapped_device *md = tio->md;
860 struct mapped_device *md = tio->md;
1068 struct mapped_device *md;
1105 struct mapped_device *md;
1379 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1424 struct mapped_device *md = q->queuedata;
1484 struct mapped_device *md = q->queuedata;
1520 struct mapped_device *md = q->queuedata;
1525 static int dm_request_based(struct mapped_device *md)
1532 struct mapped_device *md = q->queuedata;
1565 struct mapped_device *md = info->tio->md;
1575 struct mapped_device *md = tio->md;
1617 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1648 struct mapped_device *md = q->queuedata;
1675 struct mapped_device *md)
1726 struct mapped_device *md = q->queuedata;
1790 struct mapped_device *md = q->queuedata;
1805 struct mapped_device *md = q->queuedata;
1820 struct mapped_device *md = congested_data;
1923 static void dm_init_md_queue(struct mapped_device *md)
1948 static struct mapped_device *alloc_dev(int minor)
1951 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
2043 static void unlock_fs(struct mapped_device *md);
2045 static void free_dev(struct mapped_device *md)
2072 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2102 struct mapped_device *md = (struct mapped_device *) context;
2117 static void __set_size(struct mapped_device *md, sector_t size)
2127 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2171 static struct dm_table *__unbind(struct mapped_device *md)
2190 int dm_create(int minor, struct mapped_device **result)
2192 struct mapped_device *md;
2208 void dm_lock_md_type(struct mapped_device *md)
2213 void dm_unlock_md_type(struct mapped_device *md)
2218 void dm_set_md_type(struct mapped_device *md, unsigned type)
2223 unsigned dm_get_md_type(struct mapped_device *md)
2231 static int dm_init_request_based_queue(struct mapped_device *md)
2259 int dm_setup_md_queue(struct mapped_device *md)
2270 static struct mapped_device *dm_find_md(dev_t dev)
2272 struct mapped_device *md;
2295 struct mapped_device *dm_get_md(dev_t dev)
2297 struct mapped_device *md = dm_find_md(dev);
2305 void *dm_get_mdptr(struct mapped_device *md)
2310 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2315 void dm_get(struct mapped_device *md)
2321 const char *dm_device_name(struct mapped_device *md)
2327 static void __dm_destroy(struct mapped_device *md, bool wait)
2347 * No one should increment the reference count of the mapped_device,
2348 * after the mapped_device state becomes DMF_FREEING.
2354 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2363 void dm_destroy(struct mapped_device *md)
2368 void dm_destroy_immediate(struct mapped_device *md)
2373 void dm_put(struct mapped_device *md)
2379 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2410 static void dm_flush(struct mapped_device *md)
2422 static void process_barrier(struct mapped_device *md, struct bio *bio)
2452 struct mapped_device *md = container_of(work, struct mapped_device,
2485 static void dm_queue_flush(struct mapped_device *md)
2500 static int dm_rq_barrier(struct mapped_device *md)
2529 struct mapped_device *md = container_of(work, struct mapped_device,
2562 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2591 static int lock_fs(struct mapped_device *md)
2609 static void unlock_fs(struct mapped_device *md)
2635 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2745 int dm_resume(struct mapped_device *md)
2788 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2804 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2809 uint32_t dm_get_event_nr(struct mapped_device *md)
2814 int dm_wait_event(struct mapped_device *md, int event_nr)
2820 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2833 struct gendisk *dm_disk(struct mapped_device *md)
2838 struct kobject *dm_kobject(struct mapped_device *md)
2844 * struct mapped_device should not be exported outside of dm.c
2847 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2849 struct mapped_device *md;
2851 md = container_of(kobj, struct mapped_device, kobj);
2863 int dm_suspended_md(struct mapped_device *md)