1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <linux/mutex.h>
18#include <asm/atomic.h>
19
20#define DM_MSG_PREFIX "table"
21
22#define MAX_DEPTH 16
23#define NODE_SIZE L1_CACHE_BYTES
24#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
26
27struct dm_table {
28	struct mapped_device *md;
29	atomic_t holders;
30
31	/* btree table */
32	unsigned int depth;
33	unsigned int counts[MAX_DEPTH];	/* in nodes */
34	sector_t *index[MAX_DEPTH];
35
36	unsigned int num_targets;
37	unsigned int num_allocated;
38	sector_t *highs;
39	struct dm_target *targets;
40
41	/*
42	 * Indicates the rw permissions for the new logical
43	 * device.  This should be a combination of FMODE_READ
44	 * and FMODE_WRITE.
45	 */
46	int mode;
47
48	/* a list of devices used by this table */
49	struct list_head devices;
50
51	/*
52	 * These are optimistic limits taken from all the
53	 * targets, some targets will need smaller limits.
54	 */
55	struct io_restrictions limits;
56
57	/* events get handed up using this callback */
58	void (*event_fn)(void *);
59	void *event_context;
60};
61
62/*
63 * Similar to ceiling(log_size(n))
64 */
65static unsigned int int_log(unsigned int n, unsigned int base)
66{
67	int result = 0;
68
69	while (n > 1) {
70		n = dm_div_up(n, base);
71		result++;
72	}
73
74	return result;
75}
76
77/*
78 * Returns the minimum that is _not_ zero, unless both are zero.
79 */
80#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
81
82/*
83 * Combine two io_restrictions, always taking the lower value.
84 */
85static void combine_restrictions_low(struct io_restrictions *lhs,
86				     struct io_restrictions *rhs)
87{
88	lhs->max_sectors =
89		min_not_zero(lhs->max_sectors, rhs->max_sectors);
90
91	lhs->max_phys_segments =
92		min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
93
94	lhs->max_hw_segments =
95		min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
96
97	lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
98
99	lhs->max_segment_size =
100		min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
101
102	lhs->seg_boundary_mask =
103		min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
104
105	lhs->no_cluster |= rhs->no_cluster;
106}
107
108/*
109 * Calculate the index of the child node of the n'th node k'th key.
110 */
111static inline unsigned int get_child(unsigned int n, unsigned int k)
112{
113	return (n * CHILDREN_PER_NODE) + k;
114}
115
116/*
117 * Return the n'th node of level l from table t.
118 */
119static inline sector_t *get_node(struct dm_table *t,
120				 unsigned int l, unsigned int n)
121{
122	return t->index[l] + (n * KEYS_PER_NODE);
123}
124
125/*
126 * Return the highest key that you could lookup from the n'th
127 * node on level l of the btree.
128 */
129static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
130{
131	for (; l < t->depth - 1; l++)
132		n = get_child(n, CHILDREN_PER_NODE - 1);
133
134	if (n >= t->counts[l])
135		return (sector_t) - 1;
136
137	return get_node(t, l, n)[KEYS_PER_NODE - 1];
138}
139
140/*
141 * Fills in a level of the btree based on the highs of the level
142 * below it.
143 */
144static int setup_btree_index(unsigned int l, struct dm_table *t)
145{
146	unsigned int n, k;
147	sector_t *node;
148
149	for (n = 0U; n < t->counts[l]; n++) {
150		node = get_node(t, l, n);
151
152		for (k = 0U; k < KEYS_PER_NODE; k++)
153			node[k] = high(t, l + 1, get_child(n, k));
154	}
155
156	return 0;
157}
158
159void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
160{
161	unsigned long size;
162	void *addr;
163
164	/*
165	 * Check that we're not going to overflow.
166	 */
167	if (nmemb > (ULONG_MAX / elem_size))
168		return NULL;
169
170	size = nmemb * elem_size;
171	addr = vmalloc(size);
172	if (addr)
173		memset(addr, 0, size);
174
175	return addr;
176}
177
178/*
179 * highs, and targets are managed as dynamic arrays during a
180 * table load.
181 */
182static int alloc_targets(struct dm_table *t, unsigned int num)
183{
184	sector_t *n_highs;
185	struct dm_target *n_targets;
186	int n = t->num_targets;
187
188	/*
189	 * Allocate both the target array and offset array at once.
190	 */
191	n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
192					  sizeof(sector_t));
193	if (!n_highs)
194		return -ENOMEM;
195
196	n_targets = (struct dm_target *) (n_highs + num);
197
198	if (n) {
199		memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
200		memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
201	}
202
203	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
204	vfree(t->highs);
205
206	t->num_allocated = num;
207	t->highs = n_highs;
208	t->targets = n_targets;
209
210	return 0;
211}
212
213int dm_table_create(struct dm_table **result, int mode,
214		    unsigned num_targets, struct mapped_device *md)
215{
216	struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
217
218	if (!t)
219		return -ENOMEM;
220
221	memset(t, 0, sizeof(*t));
222	INIT_LIST_HEAD(&t->devices);
223	atomic_set(&t->holders, 1);
224
225	if (!num_targets)
226		num_targets = KEYS_PER_NODE;
227
228	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
229
230	if (alloc_targets(t, num_targets)) {
231		kfree(t);
232		t = NULL;
233		return -ENOMEM;
234	}
235
236	t->mode = mode;
237	t->md = md;
238	*result = t;
239	return 0;
240}
241
242int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
243{
244	struct dm_table *t;
245	sector_t dev_size = 1;
246	int r;
247
248	/*
249	 * Find current size of device.
250	 * Default to 1 sector if inactive.
251	 */
252	t = dm_get_table(md);
253	if (t) {
254		dev_size = dm_table_get_size(t);
255		dm_table_put(t);
256	}
257
258	r = dm_table_create(&t, FMODE_READ, 1, md);
259	if (r)
260		return r;
261
262	r = dm_table_add_target(t, "error", 0, dev_size, NULL);
263	if (r)
264		goto out;
265
266	r = dm_table_complete(t);
267	if (r)
268		goto out;
269
270	*result = t;
271
272out:
273	if (r)
274		dm_table_put(t);
275
276	return r;
277}
278EXPORT_SYMBOL_GPL(dm_create_error_table);
279
280static void free_devices(struct list_head *devices)
281{
282	struct list_head *tmp, *next;
283
284	for (tmp = devices->next; tmp != devices; tmp = next) {
285		struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
286		next = tmp->next;
287		kfree(dd);
288	}
289}
290
291static void table_destroy(struct dm_table *t)
292{
293	unsigned int i;
294
295	/* free the indexes (see dm_table_complete) */
296	if (t->depth >= 2)
297		vfree(t->index[t->depth - 2]);
298
299	/* free the targets */
300	for (i = 0; i < t->num_targets; i++) {
301		struct dm_target *tgt = t->targets + i;
302
303		if (tgt->type->dtr)
304			tgt->type->dtr(tgt);
305
306		dm_put_target_type(tgt->type);
307	}
308
309	vfree(t->highs);
310
311	/* free the device list */
312	if (t->devices.next != &t->devices) {
313		DMWARN("devices still present during destroy: "
314		       "dm_table_remove_device calls missing");
315
316		free_devices(&t->devices);
317	}
318
319	kfree(t);
320}
321
322void dm_table_get(struct dm_table *t)
323{
324	atomic_inc(&t->holders);
325}
326
327void dm_table_put(struct dm_table *t)
328{
329	if (!t)
330		return;
331
332	if (atomic_dec_and_test(&t->holders))
333		table_destroy(t);
334}
335
336/*
337 * Checks to see if we need to extend highs or targets.
338 */
339static inline int check_space(struct dm_table *t)
340{
341	if (t->num_targets >= t->num_allocated)
342		return alloc_targets(t, t->num_allocated * 2);
343
344	return 0;
345}
346
347/*
348 * Convert a device path to a dev_t.
349 */
350static int lookup_device(const char *path, dev_t *dev)
351{
352	int r;
353	struct nameidata nd;
354	struct inode *inode;
355
356	if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
357		return r;
358
359	inode = nd.dentry->d_inode;
360	if (!inode) {
361		r = -ENOENT;
362		goto out;
363	}
364
365	if (!S_ISBLK(inode->i_mode)) {
366		r = -ENOTBLK;
367		goto out;
368	}
369
370	*dev = inode->i_rdev;
371
372 out:
373	path_release(&nd);
374	return r;
375}
376
377/*
378 * See if we've already got a device in the list.
379 */
380static struct dm_dev *find_device(struct list_head *l, dev_t dev)
381{
382	struct dm_dev *dd;
383
384	list_for_each_entry (dd, l, list)
385		if (dd->bdev->bd_dev == dev)
386			return dd;
387
388	return NULL;
389}
390
391/*
392 * Open a device so we can use it as a map destination.
393 */
394static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
395{
396	static char *_claim_ptr = "I belong to device-mapper";
397	struct block_device *bdev;
398
399	int r;
400
401	BUG_ON(d->bdev);
402
403	bdev = open_by_devnum(dev, d->mode);
404	if (IS_ERR(bdev))
405		return PTR_ERR(bdev);
406	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
407	if (r)
408		blkdev_put(bdev);
409	else
410		d->bdev = bdev;
411	return r;
412}
413
414/*
415 * Close a device that we've been using.
416 */
417static void close_dev(struct dm_dev *d, struct mapped_device *md)
418{
419	if (!d->bdev)
420		return;
421
422	bd_release_from_disk(d->bdev, dm_disk(md));
423	blkdev_put(d->bdev);
424	d->bdev = NULL;
425}
426
427/*
428 * If possible, this checks an area of a destination device is valid.
429 */
430static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
431{
432	sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
433
434	if (!dev_size)
435		return 1;
436
437	return ((start < dev_size) && (len <= (dev_size - start)));
438}
439
440/*
441 * This upgrades the mode on an already open dm_dev.  Being
442 * careful to leave things as they were if we fail to reopen the
443 * device.
444 */
445static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
446{
447	int r;
448	struct dm_dev dd_copy;
449	dev_t dev = dd->bdev->bd_dev;
450
451	dd_copy = *dd;
452
453	dd->mode |= new_mode;
454	dd->bdev = NULL;
455	r = open_dev(dd, dev, md);
456	if (!r)
457		close_dev(&dd_copy, md);
458	else
459		*dd = dd_copy;
460
461	return r;
462}
463
464/*
465 * Add a device to the list, or just increment the usage count if
466 * it's already present.
467 */
468static int __table_get_device(struct dm_table *t, struct dm_target *ti,
469			      const char *path, sector_t start, sector_t len,
470			      int mode, struct dm_dev **result)
471{
472	int r;
473	dev_t dev;
474	struct dm_dev *dd;
475	unsigned int major, minor;
476
477	BUG_ON(!t);
478
479	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
480		/* Extract the major/minor numbers */
481		dev = MKDEV(major, minor);
482		if (MAJOR(dev) != major || MINOR(dev) != minor)
483			return -EOVERFLOW;
484	} else {
485		/* convert the path to a device */
486		if ((r = lookup_device(path, &dev)))
487			return r;
488	}
489
490	dd = find_device(&t->devices, dev);
491	if (!dd) {
492		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
493		if (!dd)
494			return -ENOMEM;
495
496		dd->mode = mode;
497		dd->bdev = NULL;
498
499		if ((r = open_dev(dd, dev, t->md))) {
500			kfree(dd);
501			return r;
502		}
503
504		format_dev_t(dd->name, dev);
505
506		atomic_set(&dd->count, 0);
507		list_add(&dd->list, &t->devices);
508
509	} else if (dd->mode != (mode | dd->mode)) {
510		r = upgrade_mode(dd, mode, t->md);
511		if (r)
512			return r;
513	}
514	atomic_inc(&dd->count);
515
516	if (!check_device_area(dd, start, len)) {
517		DMWARN("device %s too small for target", path);
518		dm_put_device(ti, dd);
519		return -EINVAL;
520	}
521
522	*result = dd;
523
524	return 0;
525}
526
527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
528{
529	request_queue_t *q = bdev_get_queue(bdev);
530	struct io_restrictions *rs = &ti->limits;
531
532	rs->max_sectors =
533		min_not_zero(rs->max_sectors, q->max_sectors);
534
535	if (q->merge_bvec_fn)
536		rs->max_sectors =
537			min_not_zero(rs->max_sectors,
538				     (unsigned int) (PAGE_SIZE >> 9));
539
540	rs->max_phys_segments =
541		min_not_zero(rs->max_phys_segments,
542			     q->max_phys_segments);
543
544	rs->max_hw_segments =
545		min_not_zero(rs->max_hw_segments, q->max_hw_segments);
546
547	rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
548
549	rs->max_segment_size =
550		min_not_zero(rs->max_segment_size, q->max_segment_size);
551
552	rs->seg_boundary_mask =
553		min_not_zero(rs->seg_boundary_mask,
554			     q->seg_boundary_mask);
555
556	rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
557}
558EXPORT_SYMBOL_GPL(dm_set_device_limits);
559
560int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
561		  sector_t len, int mode, struct dm_dev **result)
562{
563	int r = __table_get_device(ti->table, ti, path,
564				   start, len, mode, result);
565
566	if (!r)
567		dm_set_device_limits(ti, (*result)->bdev);
568
569	return r;
570}
571
572/*
573 * Decrement a devices use count and remove it if necessary.
574 */
575void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
576{
577	if (atomic_dec_and_test(&dd->count)) {
578		close_dev(dd, ti->table->md);
579		list_del(&dd->list);
580		kfree(dd);
581	}
582}
583
584/*
585 * Checks to see if the target joins onto the end of the table.
586 */
587static int adjoin(struct dm_table *table, struct dm_target *ti)
588{
589	struct dm_target *prev;
590
591	if (!table->num_targets)
592		return !ti->begin;
593
594	prev = &table->targets[table->num_targets - 1];
595	return (ti->begin == (prev->begin + prev->len));
596}
597
598/*
599 * Used to dynamically allocate the arg array.
600 */
601static char **realloc_argv(unsigned *array_size, char **old_argv)
602{
603	char **argv;
604	unsigned new_size;
605
606	new_size = *array_size ? *array_size * 2 : 64;
607	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
608	if (argv) {
609		memcpy(argv, old_argv, *array_size * sizeof(*argv));
610		*array_size = new_size;
611	}
612
613	kfree(old_argv);
614	return argv;
615}
616
617/*
618 * Destructively splits up the argument list to pass to ctr.
619 */
620int dm_split_args(int *argc, char ***argvp, char *input)
621{
622	char *start, *end = input, *out, **argv = NULL;
623	unsigned array_size = 0;
624
625	*argc = 0;
626
627	if (!input) {
628		*argvp = NULL;
629		return 0;
630	}
631
632	argv = realloc_argv(&array_size, argv);
633	if (!argv)
634		return -ENOMEM;
635
636	while (1) {
637		start = end;
638
639		/* Skip whitespace */
640		while (*start && isspace(*start))
641			start++;
642
643		if (!*start)
644			break;	/* success, we hit the end */
645
646		/* 'out' is used to remove any back-quotes */
647		end = out = start;
648		while (*end) {
649			/* Everything apart from '\0' can be quoted */
650			if (*end == '\\' && *(end + 1)) {
651				*out++ = *(end + 1);
652				end += 2;
653				continue;
654			}
655
656			if (isspace(*end))
657				break;	/* end of token */
658
659			*out++ = *end++;
660		}
661
662		/* have we already filled the array ? */
663		if ((*argc + 1) > array_size) {
664			argv = realloc_argv(&array_size, argv);
665			if (!argv)
666				return -ENOMEM;
667		}
668
669		/* we know this is whitespace */
670		if (*end)
671			end++;
672
673		/* terminate the string and put it in the array */
674		*out = '\0';
675		argv[*argc] = start;
676		(*argc)++;
677	}
678
679	*argvp = argv;
680	return 0;
681}
682
683static void check_for_valid_limits(struct io_restrictions *rs)
684{
685	if (!rs->max_sectors)
686		rs->max_sectors = SAFE_MAX_SECTORS;
687	if (!rs->max_phys_segments)
688		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
689	if (!rs->max_hw_segments)
690		rs->max_hw_segments = MAX_HW_SEGMENTS;
691	if (!rs->hardsect_size)
692		rs->hardsect_size = 1 << SECTOR_SHIFT;
693	if (!rs->max_segment_size)
694		rs->max_segment_size = MAX_SEGMENT_SIZE;
695	if (!rs->seg_boundary_mask)
696		rs->seg_boundary_mask = -1;
697}
698
699int dm_table_add_target(struct dm_table *t, const char *type,
700			sector_t start, sector_t len, char *params)
701{
702	int r = -EINVAL, argc;
703	char **argv;
704	struct dm_target *tgt;
705
706	if ((r = check_space(t)))
707		return r;
708
709	tgt = t->targets + t->num_targets;
710	memset(tgt, 0, sizeof(*tgt));
711
712	if (!len) {
713		DMERR("%s: zero-length target", dm_device_name(t->md));
714		return -EINVAL;
715	}
716
717	tgt->type = dm_get_target_type(type);
718	if (!tgt->type) {
719		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
720		      type);
721		return -EINVAL;
722	}
723
724	tgt->table = t;
725	tgt->begin = start;
726	tgt->len = len;
727	tgt->error = "Unknown error";
728
729	/*
730	 * Does this target adjoin the previous one ?
731	 */
732	if (!adjoin(t, tgt)) {
733		tgt->error = "Gap in table";
734		r = -EINVAL;
735		goto bad;
736	}
737
738	r = dm_split_args(&argc, &argv, params);
739	if (r) {
740		tgt->error = "couldn't split parameters (insufficient memory)";
741		goto bad;
742	}
743
744	r = tgt->type->ctr(tgt, argc, argv);
745	kfree(argv);
746	if (r)
747		goto bad;
748
749	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
750
751	combine_restrictions_low(&t->limits, &tgt->limits);
752	return 0;
753
754 bad:
755	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
756	dm_put_target_type(tgt->type);
757	return r;
758}
759
760static int setup_indexes(struct dm_table *t)
761{
762	int i;
763	unsigned int total = 0;
764	sector_t *indexes;
765
766	/* allocate the space for *all* the indexes */
767	for (i = t->depth - 2; i >= 0; i--) {
768		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
769		total += t->counts[i];
770	}
771
772	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
773	if (!indexes)
774		return -ENOMEM;
775
776	/* set up internal nodes, bottom-up */
777	for (i = t->depth - 2, total = 0; i >= 0; i--) {
778		t->index[i] = indexes;
779		indexes += (KEYS_PER_NODE * t->counts[i]);
780		setup_btree_index(i, t);
781	}
782
783	return 0;
784}
785
786/*
787 * Builds the btree to index the map.
788 */
789int dm_table_complete(struct dm_table *t)
790{
791	int r = 0;
792	unsigned int leaf_nodes;
793
794	check_for_valid_limits(&t->limits);
795
796	/* how many indexes will the btree have ? */
797	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
798	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
799
800	/* leaf layer has already been set up */
801	t->counts[t->depth - 1] = leaf_nodes;
802	t->index[t->depth - 1] = t->highs;
803
804	if (t->depth >= 2)
805		r = setup_indexes(t);
806
807	return r;
808}
809
810static DEFINE_MUTEX(_event_lock);
811void dm_table_event_callback(struct dm_table *t,
812			     void (*fn)(void *), void *context)
813{
814	mutex_lock(&_event_lock);
815	t->event_fn = fn;
816	t->event_context = context;
817	mutex_unlock(&_event_lock);
818}
819
820void dm_table_event(struct dm_table *t)
821{
822	/*
823	 * You can no longer call dm_table_event() from interrupt
824	 * context, use a bottom half instead.
825	 */
826	BUG_ON(in_interrupt());
827
828	mutex_lock(&_event_lock);
829	if (t->event_fn)
830		t->event_fn(t->event_context);
831	mutex_unlock(&_event_lock);
832}
833
834sector_t dm_table_get_size(struct dm_table *t)
835{
836	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
837}
838
839struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
840{
841	if (index >= t->num_targets)
842		return NULL;
843
844	return t->targets + index;
845}
846
847/*
848 * Search the btree for the correct target.
849 */
850struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
851{
852	unsigned int l, n = 0, k = 0;
853	sector_t *node;
854
855	for (l = 0; l < t->depth; l++) {
856		n = get_child(n, k);
857		node = get_node(t, l, n);
858
859		for (k = 0; k < KEYS_PER_NODE; k++)
860			if (node[k] >= sector)
861				break;
862	}
863
864	return &t->targets[(KEYS_PER_NODE * n) + k];
865}
866
867void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
868{
869	/*
870	 * Make sure we obey the optimistic sub devices
871	 * restrictions.
872	 */
873	blk_queue_max_sectors(q, t->limits.max_sectors);
874	q->max_phys_segments = t->limits.max_phys_segments;
875	q->max_hw_segments = t->limits.max_hw_segments;
876	q->hardsect_size = t->limits.hardsect_size;
877	q->max_segment_size = t->limits.max_segment_size;
878	q->seg_boundary_mask = t->limits.seg_boundary_mask;
879	if (t->limits.no_cluster)
880		q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
881	else
882		q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
883
884}
885
886unsigned int dm_table_get_num_targets(struct dm_table *t)
887{
888	return t->num_targets;
889}
890
891struct list_head *dm_table_get_devices(struct dm_table *t)
892{
893	return &t->devices;
894}
895
896int dm_table_get_mode(struct dm_table *t)
897{
898	return t->mode;
899}
900
901static void suspend_targets(struct dm_table *t, unsigned postsuspend)
902{
903	int i = t->num_targets;
904	struct dm_target *ti = t->targets;
905
906	while (i--) {
907		if (postsuspend) {
908			if (ti->type->postsuspend)
909				ti->type->postsuspend(ti);
910		} else if (ti->type->presuspend)
911			ti->type->presuspend(ti);
912
913		ti++;
914	}
915}
916
917void dm_table_presuspend_targets(struct dm_table *t)
918{
919	if (!t)
920		return;
921
922	return suspend_targets(t, 0);
923}
924
925void dm_table_postsuspend_targets(struct dm_table *t)
926{
927	if (!t)
928		return;
929
930	return suspend_targets(t, 1);
931}
932
933int dm_table_resume_targets(struct dm_table *t)
934{
935	int i, r = 0;
936
937	for (i = 0; i < t->num_targets; i++) {
938		struct dm_target *ti = t->targets + i;
939
940		if (!ti->type->preresume)
941			continue;
942
943		r = ti->type->preresume(ti);
944		if (r)
945			return r;
946	}
947
948	for (i = 0; i < t->num_targets; i++) {
949		struct dm_target *ti = t->targets + i;
950
951		if (ti->type->resume)
952			ti->type->resume(ti);
953	}
954
955	return 0;
956}
957
958int dm_table_any_congested(struct dm_table *t, int bdi_bits)
959{
960	struct list_head *d, *devices;
961	int r = 0;
962
963	devices = dm_table_get_devices(t);
964	for (d = devices->next; d != devices; d = d->next) {
965		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
966		request_queue_t *q = bdev_get_queue(dd->bdev);
967		r |= bdi_congested(&q->backing_dev_info, bdi_bits);
968	}
969
970	return r;
971}
972
973void dm_table_unplug_all(struct dm_table *t)
974{
975	struct list_head *d, *devices = dm_table_get_devices(t);
976
977	for (d = devices->next; d != devices; d = d->next) {
978		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
979		request_queue_t *q = bdev_get_queue(dd->bdev);
980
981		if (q->unplug_fn)
982			q->unplug_fn(q);
983	}
984}
985
986int dm_table_flush_all(struct dm_table *t)
987{
988	struct list_head *d, *devices = dm_table_get_devices(t);
989	int ret = 0;
990	unsigned i;
991
992	for (i = 0; i < t->num_targets; i++)
993		if (t->targets[i].type->flush)
994			t->targets[i].type->flush(&t->targets[i]);
995
996	for (d = devices->next; d != devices; d = d->next) {
997		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
998		request_queue_t *q = bdev_get_queue(dd->bdev);
999		int err;
1000
1001		if (!q->issue_flush_fn)
1002			err = -EOPNOTSUPP;
1003		else
1004			err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
1005
1006		if (!ret)
1007			ret = err;
1008	}
1009
1010	return ret;
1011}
1012
1013struct mapped_device *dm_table_get_md(struct dm_table *t)
1014{
1015	dm_get(t->md);
1016
1017	return t->md;
1018}
1019
1020EXPORT_SYMBOL(dm_vcalloc);
1021EXPORT_SYMBOL(dm_get_device);
1022EXPORT_SYMBOL(dm_put_device);
1023EXPORT_SYMBOL(dm_table_event);
1024EXPORT_SYMBOL(dm_table_get_size);
1025EXPORT_SYMBOL(dm_table_get_mode);
1026EXPORT_SYMBOL(dm_table_get_md);
1027EXPORT_SYMBOL(dm_table_put);
1028EXPORT_SYMBOL(dm_table_get);
1029EXPORT_SYMBOL(dm_table_unplug_all);
1030EXPORT_SYMBOL(dm_table_flush_all);
1031