1/*
2 * Copyright (C) 2007 Oracle.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/buffer_head.h>
22#include <linux/blkdev.h>
23#include <linux/random.h>
24#include <linux/iocontext.h>
25#include <asm/div64.h>
26#include "compat.h"
27#include "ctree.h"
28#include "extent_map.h"
29#include "disk-io.h"
30#include "transaction.h"
31#include "print-tree.h"
32#include "volumes.h"
33#include "async-thread.h"
34
35struct map_lookup {
36	u64 type;
37	int io_align;
38	int io_width;
39	int stripe_len;
40	int sector_size;
41	int num_stripes;
42	int sub_stripes;
43	struct btrfs_bio_stripe stripes[];
44};
45
46static int init_first_rw_device(struct btrfs_trans_handle *trans,
47				struct btrfs_root *root,
48				struct btrfs_device *device);
49static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
50
51#define map_lookup_size(n) (sizeof(struct map_lookup) + \
52			    (sizeof(struct btrfs_bio_stripe) * (n)))
53
54static DEFINE_MUTEX(uuid_mutex);
55static LIST_HEAD(fs_uuids);
56
57void btrfs_lock_volumes(void)
58{
59	mutex_lock(&uuid_mutex);
60}
61
62void btrfs_unlock_volumes(void)
63{
64	mutex_unlock(&uuid_mutex);
65}
66
67static void lock_chunks(struct btrfs_root *root)
68{
69	mutex_lock(&root->fs_info->chunk_mutex);
70}
71
72static void unlock_chunks(struct btrfs_root *root)
73{
74	mutex_unlock(&root->fs_info->chunk_mutex);
75}
76
77static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
78{
79	struct btrfs_device *device;
80	WARN_ON(fs_devices->opened);
81	while (!list_empty(&fs_devices->devices)) {
82		device = list_entry(fs_devices->devices.next,
83				    struct btrfs_device, dev_list);
84		list_del(&device->dev_list);
85		kfree(device->name);
86		kfree(device);
87	}
88	kfree(fs_devices);
89}
90
91int btrfs_cleanup_fs_uuids(void)
92{
93	struct btrfs_fs_devices *fs_devices;
94
95	while (!list_empty(&fs_uuids)) {
96		fs_devices = list_entry(fs_uuids.next,
97					struct btrfs_fs_devices, list);
98		list_del(&fs_devices->list);
99		free_fs_devices(fs_devices);
100	}
101	return 0;
102}
103
104static noinline struct btrfs_device *__find_device(struct list_head *head,
105						   u64 devid, u8 *uuid)
106{
107	struct btrfs_device *dev;
108
109	list_for_each_entry(dev, head, dev_list) {
110		if (dev->devid == devid &&
111		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
112			return dev;
113		}
114	}
115	return NULL;
116}
117
118static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
119{
120	struct btrfs_fs_devices *fs_devices;
121
122	list_for_each_entry(fs_devices, &fs_uuids, list) {
123		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
124			return fs_devices;
125	}
126	return NULL;
127}
128
129static void requeue_list(struct btrfs_pending_bios *pending_bios,
130			struct bio *head, struct bio *tail)
131{
132
133	struct bio *old_head;
134
135	old_head = pending_bios->head;
136	pending_bios->head = head;
137	if (pending_bios->tail)
138		tail->bi_next = old_head;
139	else
140		pending_bios->tail = tail;
141}
142
143/*
144 * we try to collect pending bios for a device so we don't get a large
145 * number of procs sending bios down to the same device.  This greatly
146 * improves the schedulers ability to collect and merge the bios.
147 *
148 * But, it also turns into a long list of bios to process and that is sure
149 * to eventually make the worker thread block.  The solution here is to
150 * make some progress and then put this work struct back at the end of
151 * the list if the block device is congested.  This way, multiple devices
152 * can make progress from a single worker thread.
153 */
154static noinline int run_scheduled_bios(struct btrfs_device *device)
155{
156	struct bio *pending;
157	struct backing_dev_info *bdi;
158	struct btrfs_fs_info *fs_info;
159	struct btrfs_pending_bios *pending_bios;
160	struct bio *tail;
161	struct bio *cur;
162	int again = 0;
163	unsigned long num_run;
164	unsigned long num_sync_run;
165	unsigned long batch_run = 0;
166	unsigned long limit;
167	unsigned long last_waited = 0;
168	int force_reg = 0;
169
170	bdi = blk_get_backing_dev_info(device->bdev);
171	fs_info = device->dev_root->fs_info;
172	limit = btrfs_async_submit_limit(fs_info);
173	limit = limit * 2 / 3;
174
175	/* we want to make sure that every time we switch from the sync
176	 * list to the normal list, we unplug
177	 */
178	num_sync_run = 0;
179
180loop:
181	spin_lock(&device->io_lock);
182
183loop_lock:
184	num_run = 0;
185
186	/* take all the bios off the list at once and process them
187	 * later on (without the lock held).  But, remember the
188	 * tail and other pointers so the bios can be properly reinserted
189	 * into the list if we hit congestion
190	 */
191	if (!force_reg && device->pending_sync_bios.head) {
192		pending_bios = &device->pending_sync_bios;
193		force_reg = 1;
194	} else {
195		pending_bios = &device->pending_bios;
196		force_reg = 0;
197	}
198
199	pending = pending_bios->head;
200	tail = pending_bios->tail;
201	WARN_ON(pending && !tail);
202
203	/*
204	 * if pending was null this time around, no bios need processing
205	 * at all and we can stop.  Otherwise it'll loop back up again
206	 * and do an additional check so no bios are missed.
207	 *
208	 * device->running_pending is used to synchronize with the
209	 * schedule_bio code.
210	 */
211	if (device->pending_sync_bios.head == NULL &&
212	    device->pending_bios.head == NULL) {
213		again = 0;
214		device->running_pending = 0;
215	} else {
216		again = 1;
217		device->running_pending = 1;
218	}
219
220	pending_bios->head = NULL;
221	pending_bios->tail = NULL;
222
223	spin_unlock(&device->io_lock);
224
225	/*
226	 * if we're doing the regular priority list, make sure we unplug
227	 * for any high prio bios we've sent down
228	 */
229	if (pending_bios == &device->pending_bios && num_sync_run > 0) {
230		num_sync_run = 0;
231		blk_run_backing_dev(bdi, NULL);
232	}
233
234	while (pending) {
235
236		rmb();
237		/* we want to work on both lists, but do more bios on the
238		 * sync list than the regular list
239		 */
240		if ((num_run > 32 &&
241		    pending_bios != &device->pending_sync_bios &&
242		    device->pending_sync_bios.head) ||
243		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
244		    device->pending_bios.head)) {
245			spin_lock(&device->io_lock);
246			requeue_list(pending_bios, pending, tail);
247			goto loop_lock;
248		}
249
250		cur = pending;
251		pending = pending->bi_next;
252		cur->bi_next = NULL;
253		atomic_dec(&fs_info->nr_async_bios);
254
255		if (atomic_read(&fs_info->nr_async_bios) < limit &&
256		    waitqueue_active(&fs_info->async_submit_wait))
257			wake_up(&fs_info->async_submit_wait);
258
259		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
260
261		if (cur->bi_rw & REQ_SYNC)
262			num_sync_run++;
263
264		submit_bio(cur->bi_rw, cur);
265		num_run++;
266		batch_run++;
267		if (need_resched()) {
268			if (num_sync_run) {
269				blk_run_backing_dev(bdi, NULL);
270				num_sync_run = 0;
271			}
272			cond_resched();
273		}
274
275		/*
276		 * we made progress, there is more work to do and the bdi
277		 * is now congested.  Back off and let other work structs
278		 * run instead
279		 */
280		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
281		    fs_info->fs_devices->open_devices > 1) {
282			struct io_context *ioc;
283
284			ioc = current->io_context;
285
286			/*
287			 * the main goal here is that we don't want to
288			 * block if we're going to be able to submit
289			 * more requests without blocking.
290			 *
291			 * This code does two great things, it pokes into
292			 * the elevator code from a filesystem _and_
293			 * it makes assumptions about how batching works.
294			 */
295			if (ioc && ioc->nr_batch_requests > 0 &&
296			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
297			    (last_waited == 0 ||
298			     ioc->last_waited == last_waited)) {
299				/*
300				 * we want to go through our batch of
301				 * requests and stop.  So, we copy out
302				 * the ioc->last_waited time and test
303				 * against it before looping
304				 */
305				last_waited = ioc->last_waited;
306				if (need_resched()) {
307					if (num_sync_run) {
308						blk_run_backing_dev(bdi, NULL);
309						num_sync_run = 0;
310					}
311					cond_resched();
312				}
313				continue;
314			}
315			spin_lock(&device->io_lock);
316			requeue_list(pending_bios, pending, tail);
317			device->running_pending = 1;
318
319			spin_unlock(&device->io_lock);
320			btrfs_requeue_work(&device->work);
321			goto done;
322		}
323	}
324
325	if (num_sync_run) {
326		num_sync_run = 0;
327		blk_run_backing_dev(bdi, NULL);
328	}
329	/*
330	 * IO has already been through a long path to get here.  Checksumming,
331	 * async helper threads, perhaps compression.  We've done a pretty
332	 * good job of collecting a batch of IO and should just unplug
333	 * the device right away.
334	 *
335	 * This will help anyone who is waiting on the IO, they might have
336	 * already unplugged, but managed to do so before the bio they
337	 * cared about found its way down here.
338	 */
339	blk_run_backing_dev(bdi, NULL);
340
341	cond_resched();
342	if (again)
343		goto loop;
344
345	spin_lock(&device->io_lock);
346	if (device->pending_bios.head || device->pending_sync_bios.head)
347		goto loop_lock;
348	spin_unlock(&device->io_lock);
349
350done:
351	return 0;
352}
353
354static void pending_bios_fn(struct btrfs_work *work)
355{
356	struct btrfs_device *device;
357
358	device = container_of(work, struct btrfs_device, work);
359	run_scheduled_bios(device);
360}
361
362static noinline int device_list_add(const char *path,
363			   struct btrfs_super_block *disk_super,
364			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
365{
366	struct btrfs_device *device;
367	struct btrfs_fs_devices *fs_devices;
368	u64 found_transid = btrfs_super_generation(disk_super);
369	char *name;
370
371	fs_devices = find_fsid(disk_super->fsid);
372	if (!fs_devices) {
373		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
374		if (!fs_devices)
375			return -ENOMEM;
376		INIT_LIST_HEAD(&fs_devices->devices);
377		INIT_LIST_HEAD(&fs_devices->alloc_list);
378		list_add(&fs_devices->list, &fs_uuids);
379		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
380		fs_devices->latest_devid = devid;
381		fs_devices->latest_trans = found_transid;
382		mutex_init(&fs_devices->device_list_mutex);
383		device = NULL;
384	} else {
385		device = __find_device(&fs_devices->devices, devid,
386				       disk_super->dev_item.uuid);
387	}
388	if (!device) {
389		if (fs_devices->opened)
390			return -EBUSY;
391
392		device = kzalloc(sizeof(*device), GFP_NOFS);
393		if (!device) {
394			/* we can safely leave the fs_devices entry around */
395			return -ENOMEM;
396		}
397		device->devid = devid;
398		device->work.func = pending_bios_fn;
399		memcpy(device->uuid, disk_super->dev_item.uuid,
400		       BTRFS_UUID_SIZE);
401		device->barriers = 1;
402		spin_lock_init(&device->io_lock);
403		device->name = kstrdup(path, GFP_NOFS);
404		if (!device->name) {
405			kfree(device);
406			return -ENOMEM;
407		}
408		INIT_LIST_HEAD(&device->dev_alloc_list);
409
410		mutex_lock(&fs_devices->device_list_mutex);
411		list_add(&device->dev_list, &fs_devices->devices);
412		mutex_unlock(&fs_devices->device_list_mutex);
413
414		device->fs_devices = fs_devices;
415		fs_devices->num_devices++;
416	} else if (strcmp(device->name, path)) {
417		name = kstrdup(path, GFP_NOFS);
418		if (!name)
419			return -ENOMEM;
420		kfree(device->name);
421		device->name = name;
422	}
423
424	if (found_transid > fs_devices->latest_trans) {
425		fs_devices->latest_devid = devid;
426		fs_devices->latest_trans = found_transid;
427	}
428	*fs_devices_ret = fs_devices;
429	return 0;
430}
431
432static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
433{
434	struct btrfs_fs_devices *fs_devices;
435	struct btrfs_device *device;
436	struct btrfs_device *orig_dev;
437
438	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
439	if (!fs_devices)
440		return ERR_PTR(-ENOMEM);
441
442	INIT_LIST_HEAD(&fs_devices->devices);
443	INIT_LIST_HEAD(&fs_devices->alloc_list);
444	INIT_LIST_HEAD(&fs_devices->list);
445	mutex_init(&fs_devices->device_list_mutex);
446	fs_devices->latest_devid = orig->latest_devid;
447	fs_devices->latest_trans = orig->latest_trans;
448	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
449
450	mutex_lock(&orig->device_list_mutex);
451	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
452		device = kzalloc(sizeof(*device), GFP_NOFS);
453		if (!device)
454			goto error;
455
456		device->name = kstrdup(orig_dev->name, GFP_NOFS);
457		if (!device->name) {
458			kfree(device);
459			goto error;
460		}
461
462		device->devid = orig_dev->devid;
463		device->work.func = pending_bios_fn;
464		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
465		device->barriers = 1;
466		spin_lock_init(&device->io_lock);
467		INIT_LIST_HEAD(&device->dev_list);
468		INIT_LIST_HEAD(&device->dev_alloc_list);
469
470		list_add(&device->dev_list, &fs_devices->devices);
471		device->fs_devices = fs_devices;
472		fs_devices->num_devices++;
473	}
474	mutex_unlock(&orig->device_list_mutex);
475	return fs_devices;
476error:
477	mutex_unlock(&orig->device_list_mutex);
478	free_fs_devices(fs_devices);
479	return ERR_PTR(-ENOMEM);
480}
481
482int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
483{
484	struct btrfs_device *device, *next;
485
486	mutex_lock(&uuid_mutex);
487again:
488	mutex_lock(&fs_devices->device_list_mutex);
489	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
490		if (device->in_fs_metadata)
491			continue;
492
493		if (device->bdev) {
494			close_bdev_exclusive(device->bdev, device->mode);
495			device->bdev = NULL;
496			fs_devices->open_devices--;
497		}
498		if (device->writeable) {
499			list_del_init(&device->dev_alloc_list);
500			device->writeable = 0;
501			fs_devices->rw_devices--;
502		}
503		list_del_init(&device->dev_list);
504		fs_devices->num_devices--;
505		kfree(device->name);
506		kfree(device);
507	}
508	mutex_unlock(&fs_devices->device_list_mutex);
509
510	if (fs_devices->seed) {
511		fs_devices = fs_devices->seed;
512		goto again;
513	}
514
515	mutex_unlock(&uuid_mutex);
516	return 0;
517}
518
519static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
520{
521	struct btrfs_device *device;
522
523	if (--fs_devices->opened > 0)
524		return 0;
525
526	list_for_each_entry(device, &fs_devices->devices, dev_list) {
527		if (device->bdev) {
528			close_bdev_exclusive(device->bdev, device->mode);
529			fs_devices->open_devices--;
530		}
531		if (device->writeable) {
532			list_del_init(&device->dev_alloc_list);
533			fs_devices->rw_devices--;
534		}
535
536		device->bdev = NULL;
537		device->writeable = 0;
538		device->in_fs_metadata = 0;
539	}
540	WARN_ON(fs_devices->open_devices);
541	WARN_ON(fs_devices->rw_devices);
542	fs_devices->opened = 0;
543	fs_devices->seeding = 0;
544
545	return 0;
546}
547
548int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
549{
550	struct btrfs_fs_devices *seed_devices = NULL;
551	int ret;
552
553	mutex_lock(&uuid_mutex);
554	ret = __btrfs_close_devices(fs_devices);
555	if (!fs_devices->opened) {
556		seed_devices = fs_devices->seed;
557		fs_devices->seed = NULL;
558	}
559	mutex_unlock(&uuid_mutex);
560
561	while (seed_devices) {
562		fs_devices = seed_devices;
563		seed_devices = fs_devices->seed;
564		__btrfs_close_devices(fs_devices);
565		free_fs_devices(fs_devices);
566	}
567	return ret;
568}
569
570static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
571				fmode_t flags, void *holder)
572{
573	struct block_device *bdev;
574	struct list_head *head = &fs_devices->devices;
575	struct btrfs_device *device;
576	struct block_device *latest_bdev = NULL;
577	struct buffer_head *bh;
578	struct btrfs_super_block *disk_super;
579	u64 latest_devid = 0;
580	u64 latest_transid = 0;
581	u64 devid;
582	int seeding = 1;
583	int ret = 0;
584
585	list_for_each_entry(device, head, dev_list) {
586		if (device->bdev)
587			continue;
588		if (!device->name)
589			continue;
590
591		bdev = open_bdev_exclusive(device->name, flags, holder);
592		if (IS_ERR(bdev)) {
593			printk(KERN_INFO "open %s failed\n", device->name);
594			goto error;
595		}
596		set_blocksize(bdev, 4096);
597
598		bh = btrfs_read_dev_super(bdev);
599		if (!bh)
600			goto error_close;
601
602		disk_super = (struct btrfs_super_block *)bh->b_data;
603		devid = btrfs_stack_device_id(&disk_super->dev_item);
604		if (devid != device->devid)
605			goto error_brelse;
606
607		if (memcmp(device->uuid, disk_super->dev_item.uuid,
608			   BTRFS_UUID_SIZE))
609			goto error_brelse;
610
611		device->generation = btrfs_super_generation(disk_super);
612		if (!latest_transid || device->generation > latest_transid) {
613			latest_devid = devid;
614			latest_transid = device->generation;
615			latest_bdev = bdev;
616		}
617
618		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
619			device->writeable = 0;
620		} else {
621			device->writeable = !bdev_read_only(bdev);
622			seeding = 0;
623		}
624
625		device->bdev = bdev;
626		device->in_fs_metadata = 0;
627		device->mode = flags;
628
629		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
630			fs_devices->rotating = 1;
631
632		fs_devices->open_devices++;
633		if (device->writeable) {
634			fs_devices->rw_devices++;
635			list_add(&device->dev_alloc_list,
636				 &fs_devices->alloc_list);
637		}
638		continue;
639
640error_brelse:
641		brelse(bh);
642error_close:
643		close_bdev_exclusive(bdev, FMODE_READ);
644error:
645		continue;
646	}
647	if (fs_devices->open_devices == 0) {
648		ret = -EIO;
649		goto out;
650	}
651	fs_devices->seeding = seeding;
652	fs_devices->opened = 1;
653	fs_devices->latest_bdev = latest_bdev;
654	fs_devices->latest_devid = latest_devid;
655	fs_devices->latest_trans = latest_transid;
656	fs_devices->total_rw_bytes = 0;
657out:
658	return ret;
659}
660
661int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
662		       fmode_t flags, void *holder)
663{
664	int ret;
665
666	mutex_lock(&uuid_mutex);
667	if (fs_devices->opened) {
668		fs_devices->opened++;
669		ret = 0;
670	} else {
671		ret = __btrfs_open_devices(fs_devices, flags, holder);
672	}
673	mutex_unlock(&uuid_mutex);
674	return ret;
675}
676
677int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
678			  struct btrfs_fs_devices **fs_devices_ret)
679{
680	struct btrfs_super_block *disk_super;
681	struct block_device *bdev;
682	struct buffer_head *bh;
683	int ret;
684	u64 devid;
685	u64 transid;
686
687	mutex_lock(&uuid_mutex);
688
689	bdev = open_bdev_exclusive(path, flags, holder);
690
691	if (IS_ERR(bdev)) {
692		ret = PTR_ERR(bdev);
693		goto error;
694	}
695
696	ret = set_blocksize(bdev, 4096);
697	if (ret)
698		goto error_close;
699	bh = btrfs_read_dev_super(bdev);
700	if (!bh) {
701		ret = -EIO;
702		goto error_close;
703	}
704	disk_super = (struct btrfs_super_block *)bh->b_data;
705	devid = btrfs_stack_device_id(&disk_super->dev_item);
706	transid = btrfs_super_generation(disk_super);
707	if (disk_super->label[0])
708		printk(KERN_INFO "device label %s ", disk_super->label);
709	else {
710		printk(KERN_INFO "device fsid %llx-%llx ",
711		       *(unsigned long long *)disk_super->fsid,
712		       *(unsigned long long *)(disk_super->fsid + 8));
713	}
714	printk(KERN_CONT "devid %llu transid %llu %s\n",
715	       (unsigned long long)devid, (unsigned long long)transid, path);
716	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
717
718	brelse(bh);
719error_close:
720	close_bdev_exclusive(bdev, flags);
721error:
722	mutex_unlock(&uuid_mutex);
723	return ret;
724}
725
726/*
727 * this uses a pretty simple search, the expectation is that it is
728 * called very infrequently and that a given device has a small number
729 * of extents
730 */
731int find_free_dev_extent(struct btrfs_trans_handle *trans,
732			 struct btrfs_device *device, u64 num_bytes,
733			 u64 *start, u64 *max_avail)
734{
735	struct btrfs_key key;
736	struct btrfs_root *root = device->dev_root;
737	struct btrfs_dev_extent *dev_extent = NULL;
738	struct btrfs_path *path;
739	u64 hole_size = 0;
740	u64 last_byte = 0;
741	u64 search_start = 0;
742	u64 search_end = device->total_bytes;
743	int ret;
744	int slot = 0;
745	int start_found;
746	struct extent_buffer *l;
747
748	path = btrfs_alloc_path();
749	if (!path)
750		return -ENOMEM;
751	path->reada = 2;
752	start_found = 0;
753
754
755	/* we don't want to overwrite the superblock on the drive,
756	 * so we make sure to start at an offset of at least 1MB
757	 */
758	search_start = max((u64)1024 * 1024, search_start);
759
760	if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
761		search_start = max(root->fs_info->alloc_start, search_start);
762
763	key.objectid = device->devid;
764	key.offset = search_start;
765	key.type = BTRFS_DEV_EXTENT_KEY;
766	ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
767	if (ret < 0)
768		goto error;
769	if (ret > 0) {
770		ret = btrfs_previous_item(root, path, key.objectid, key.type);
771		if (ret < 0)
772			goto error;
773		if (ret > 0)
774			start_found = 1;
775	}
776	l = path->nodes[0];
777	btrfs_item_key_to_cpu(l, &key, path->slots[0]);
778	while (1) {
779		l = path->nodes[0];
780		slot = path->slots[0];
781		if (slot >= btrfs_header_nritems(l)) {
782			ret = btrfs_next_leaf(root, path);
783			if (ret == 0)
784				continue;
785			if (ret < 0)
786				goto error;
787no_more_items:
788			if (!start_found) {
789				if (search_start >= search_end) {
790					ret = -ENOSPC;
791					goto error;
792				}
793				*start = search_start;
794				start_found = 1;
795				goto check_pending;
796			}
797			*start = last_byte > search_start ?
798				last_byte : search_start;
799			if (search_end <= *start) {
800				ret = -ENOSPC;
801				goto error;
802			}
803			goto check_pending;
804		}
805		btrfs_item_key_to_cpu(l, &key, slot);
806
807		if (key.objectid < device->devid)
808			goto next;
809
810		if (key.objectid > device->devid)
811			goto no_more_items;
812
813		if (key.offset >= search_start && key.offset > last_byte &&
814		    start_found) {
815			if (last_byte < search_start)
816				last_byte = search_start;
817			hole_size = key.offset - last_byte;
818
819			if (hole_size > *max_avail)
820				*max_avail = hole_size;
821
822			if (key.offset > last_byte &&
823			    hole_size >= num_bytes) {
824				*start = last_byte;
825				goto check_pending;
826			}
827		}
828		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
829			goto next;
830
831		start_found = 1;
832		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
833		last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
834next:
835		path->slots[0]++;
836		cond_resched();
837	}
838check_pending:
839	/* we have to make sure we didn't find an extent that has already
840	 * been allocated by the map tree or the original allocation
841	 */
842	BUG_ON(*start < search_start);
843
844	if (*start + num_bytes > search_end) {
845		ret = -ENOSPC;
846		goto error;
847	}
848	/* check for pending inserts here */
849	ret = 0;
850
851error:
852	btrfs_free_path(path);
853	return ret;
854}
855
856static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
857			  struct btrfs_device *device,
858			  u64 start)
859{
860	int ret;
861	struct btrfs_path *path;
862	struct btrfs_root *root = device->dev_root;
863	struct btrfs_key key;
864	struct btrfs_key found_key;
865	struct extent_buffer *leaf = NULL;
866	struct btrfs_dev_extent *extent = NULL;
867
868	path = btrfs_alloc_path();
869	if (!path)
870		return -ENOMEM;
871
872	key.objectid = device->devid;
873	key.offset = start;
874	key.type = BTRFS_DEV_EXTENT_KEY;
875
876	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
877	if (ret > 0) {
878		ret = btrfs_previous_item(root, path, key.objectid,
879					  BTRFS_DEV_EXTENT_KEY);
880		BUG_ON(ret);
881		leaf = path->nodes[0];
882		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
883		extent = btrfs_item_ptr(leaf, path->slots[0],
884					struct btrfs_dev_extent);
885		BUG_ON(found_key.offset > start || found_key.offset +
886		       btrfs_dev_extent_length(leaf, extent) < start);
887		ret = 0;
888	} else if (ret == 0) {
889		leaf = path->nodes[0];
890		extent = btrfs_item_ptr(leaf, path->slots[0],
891					struct btrfs_dev_extent);
892	}
893	BUG_ON(ret);
894
895	if (device->bytes_used > 0)
896		device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
897	ret = btrfs_del_item(trans, root, path);
898	BUG_ON(ret);
899
900	btrfs_free_path(path);
901	return ret;
902}
903
904int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
905			   struct btrfs_device *device,
906			   u64 chunk_tree, u64 chunk_objectid,
907			   u64 chunk_offset, u64 start, u64 num_bytes)
908{
909	int ret;
910	struct btrfs_path *path;
911	struct btrfs_root *root = device->dev_root;
912	struct btrfs_dev_extent *extent;
913	struct extent_buffer *leaf;
914	struct btrfs_key key;
915
916	WARN_ON(!device->in_fs_metadata);
917	path = btrfs_alloc_path();
918	if (!path)
919		return -ENOMEM;
920
921	key.objectid = device->devid;
922	key.offset = start;
923	key.type = BTRFS_DEV_EXTENT_KEY;
924	ret = btrfs_insert_empty_item(trans, root, path, &key,
925				      sizeof(*extent));
926	BUG_ON(ret);
927
928	leaf = path->nodes[0];
929	extent = btrfs_item_ptr(leaf, path->slots[0],
930				struct btrfs_dev_extent);
931	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
932	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
933	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
934
935	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
936		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
937		    BTRFS_UUID_SIZE);
938
939	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
940	btrfs_mark_buffer_dirty(leaf);
941	btrfs_free_path(path);
942	return ret;
943}
944
945static noinline int find_next_chunk(struct btrfs_root *root,
946				    u64 objectid, u64 *offset)
947{
948	struct btrfs_path *path;
949	int ret;
950	struct btrfs_key key;
951	struct btrfs_chunk *chunk;
952	struct btrfs_key found_key;
953
954	path = btrfs_alloc_path();
955	BUG_ON(!path);
956
957	key.objectid = objectid;
958	key.offset = (u64)-1;
959	key.type = BTRFS_CHUNK_ITEM_KEY;
960
961	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
962	if (ret < 0)
963		goto error;
964
965	BUG_ON(ret == 0);
966
967	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
968	if (ret) {
969		*offset = 0;
970	} else {
971		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
972				      path->slots[0]);
973		if (found_key.objectid != objectid)
974			*offset = 0;
975		else {
976			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
977					       struct btrfs_chunk);
978			*offset = found_key.offset +
979				btrfs_chunk_length(path->nodes[0], chunk);
980		}
981	}
982	ret = 0;
983error:
984	btrfs_free_path(path);
985	return ret;
986}
987
988static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
989{
990	int ret;
991	struct btrfs_key key;
992	struct btrfs_key found_key;
993	struct btrfs_path *path;
994
995	root = root->fs_info->chunk_root;
996
997	path = btrfs_alloc_path();
998	if (!path)
999		return -ENOMEM;
1000
1001	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1002	key.type = BTRFS_DEV_ITEM_KEY;
1003	key.offset = (u64)-1;
1004
1005	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1006	if (ret < 0)
1007		goto error;
1008
1009	BUG_ON(ret == 0);
1010
1011	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1012				  BTRFS_DEV_ITEM_KEY);
1013	if (ret) {
1014		*objectid = 1;
1015	} else {
1016		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1017				      path->slots[0]);
1018		*objectid = found_key.offset + 1;
1019	}
1020	ret = 0;
1021error:
1022	btrfs_free_path(path);
1023	return ret;
1024}
1025
1026/*
1027 * the device information is stored in the chunk root
1028 * the btrfs_device struct should be fully filled in
1029 */
1030int btrfs_add_device(struct btrfs_trans_handle *trans,
1031		     struct btrfs_root *root,
1032		     struct btrfs_device *device)
1033{
1034	int ret;
1035	struct btrfs_path *path;
1036	struct btrfs_dev_item *dev_item;
1037	struct extent_buffer *leaf;
1038	struct btrfs_key key;
1039	unsigned long ptr;
1040
1041	root = root->fs_info->chunk_root;
1042
1043	path = btrfs_alloc_path();
1044	if (!path)
1045		return -ENOMEM;
1046
1047	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1048	key.type = BTRFS_DEV_ITEM_KEY;
1049	key.offset = device->devid;
1050
1051	ret = btrfs_insert_empty_item(trans, root, path, &key,
1052				      sizeof(*dev_item));
1053	if (ret)
1054		goto out;
1055
1056	leaf = path->nodes[0];
1057	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1058
1059	btrfs_set_device_id(leaf, dev_item, device->devid);
1060	btrfs_set_device_generation(leaf, dev_item, 0);
1061	btrfs_set_device_type(leaf, dev_item, device->type);
1062	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1063	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1064	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1065	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1066	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1067	btrfs_set_device_group(leaf, dev_item, 0);
1068	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1069	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1070	btrfs_set_device_start_offset(leaf, dev_item, 0);
1071
1072	ptr = (unsigned long)btrfs_device_uuid(dev_item);
1073	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1074	ptr = (unsigned long)btrfs_device_fsid(dev_item);
1075	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1076	btrfs_mark_buffer_dirty(leaf);
1077
1078	ret = 0;
1079out:
1080	btrfs_free_path(path);
1081	return ret;
1082}
1083
1084static int btrfs_rm_dev_item(struct btrfs_root *root,
1085			     struct btrfs_device *device)
1086{
1087	int ret;
1088	struct btrfs_path *path;
1089	struct btrfs_key key;
1090	struct btrfs_trans_handle *trans;
1091
1092	root = root->fs_info->chunk_root;
1093
1094	path = btrfs_alloc_path();
1095	if (!path)
1096		return -ENOMEM;
1097
1098	trans = btrfs_start_transaction(root, 0);
1099	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1100	key.type = BTRFS_DEV_ITEM_KEY;
1101	key.offset = device->devid;
1102	lock_chunks(root);
1103
1104	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1105	if (ret < 0)
1106		goto out;
1107
1108	if (ret > 0) {
1109		ret = -ENOENT;
1110		goto out;
1111	}
1112
1113	ret = btrfs_del_item(trans, root, path);
1114	if (ret)
1115		goto out;
1116out:
1117	btrfs_free_path(path);
1118	unlock_chunks(root);
1119	btrfs_commit_transaction(trans, root);
1120	return ret;
1121}
1122
1123int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1124{
1125	struct btrfs_device *device;
1126	struct btrfs_device *next_device;
1127	struct block_device *bdev;
1128	struct buffer_head *bh = NULL;
1129	struct btrfs_super_block *disk_super;
1130	u64 all_avail;
1131	u64 devid;
1132	u64 num_devices;
1133	u8 *dev_uuid;
1134	int ret = 0;
1135
1136	mutex_lock(&uuid_mutex);
1137	mutex_lock(&root->fs_info->volume_mutex);
1138
1139	all_avail = root->fs_info->avail_data_alloc_bits |
1140		root->fs_info->avail_system_alloc_bits |
1141		root->fs_info->avail_metadata_alloc_bits;
1142
1143	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1144	    root->fs_info->fs_devices->num_devices <= 4) {
1145		printk(KERN_ERR "btrfs: unable to go below four devices "
1146		       "on raid10\n");
1147		ret = -EINVAL;
1148		goto out;
1149	}
1150
1151	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1152	    root->fs_info->fs_devices->num_devices <= 2) {
1153		printk(KERN_ERR "btrfs: unable to go below two "
1154		       "devices on raid1\n");
1155		ret = -EINVAL;
1156		goto out;
1157	}
1158
1159	if (strcmp(device_path, "missing") == 0) {
1160		struct list_head *devices;
1161		struct btrfs_device *tmp;
1162
1163		device = NULL;
1164		devices = &root->fs_info->fs_devices->devices;
1165		mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1166		list_for_each_entry(tmp, devices, dev_list) {
1167			if (tmp->in_fs_metadata && !tmp->bdev) {
1168				device = tmp;
1169				break;
1170			}
1171		}
1172		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1173		bdev = NULL;
1174		bh = NULL;
1175		disk_super = NULL;
1176		if (!device) {
1177			printk(KERN_ERR "btrfs: no missing devices found to "
1178			       "remove\n");
1179			goto out;
1180		}
1181	} else {
1182		bdev = open_bdev_exclusive(device_path, FMODE_READ,
1183				      root->fs_info->bdev_holder);
1184		if (IS_ERR(bdev)) {
1185			ret = PTR_ERR(bdev);
1186			goto out;
1187		}
1188
1189		set_blocksize(bdev, 4096);
1190		bh = btrfs_read_dev_super(bdev);
1191		if (!bh) {
1192			ret = -EIO;
1193			goto error_close;
1194		}
1195		disk_super = (struct btrfs_super_block *)bh->b_data;
1196		devid = btrfs_stack_device_id(&disk_super->dev_item);
1197		dev_uuid = disk_super->dev_item.uuid;
1198		device = btrfs_find_device(root, devid, dev_uuid,
1199					   disk_super->fsid);
1200		if (!device) {
1201			ret = -ENOENT;
1202			goto error_brelse;
1203		}
1204	}
1205
1206	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1207		printk(KERN_ERR "btrfs: unable to remove the only writeable "
1208		       "device\n");
1209		ret = -EINVAL;
1210		goto error_brelse;
1211	}
1212
1213	if (device->writeable) {
1214		list_del_init(&device->dev_alloc_list);
1215		root->fs_info->fs_devices->rw_devices--;
1216	}
1217
1218	ret = btrfs_shrink_device(device, 0);
1219	if (ret)
1220		goto error_brelse;
1221
1222	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1223	if (ret)
1224		goto error_brelse;
1225
1226	device->in_fs_metadata = 0;
1227
1228	/*
1229	 * the device list mutex makes sure that we don't change
1230	 * the device list while someone else is writing out all
1231	 * the device supers.
1232	 */
1233	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1234	list_del_init(&device->dev_list);
1235	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1236
1237	device->fs_devices->num_devices--;
1238
1239	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1240				 struct btrfs_device, dev_list);
1241	if (device->bdev == root->fs_info->sb->s_bdev)
1242		root->fs_info->sb->s_bdev = next_device->bdev;
1243	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1244		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1245
1246	if (device->bdev) {
1247		close_bdev_exclusive(device->bdev, device->mode);
1248		device->bdev = NULL;
1249		device->fs_devices->open_devices--;
1250	}
1251
1252	num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1253	btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1254
1255	if (device->fs_devices->open_devices == 0) {
1256		struct btrfs_fs_devices *fs_devices;
1257		fs_devices = root->fs_info->fs_devices;
1258		while (fs_devices) {
1259			if (fs_devices->seed == device->fs_devices)
1260				break;
1261			fs_devices = fs_devices->seed;
1262		}
1263		fs_devices->seed = device->fs_devices->seed;
1264		device->fs_devices->seed = NULL;
1265		__btrfs_close_devices(device->fs_devices);
1266		free_fs_devices(device->fs_devices);
1267	}
1268
1269	/*
1270	 * at this point, the device is zero sized.  We want to
1271	 * remove it from the devices list and zero out the old super
1272	 */
1273	if (device->writeable) {
1274		/* make sure this device isn't detected as part of
1275		 * the FS anymore
1276		 */
1277		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1278		set_buffer_dirty(bh);
1279		sync_dirty_buffer(bh);
1280	}
1281
1282	kfree(device->name);
1283	kfree(device);
1284	ret = 0;
1285
1286error_brelse:
1287	brelse(bh);
1288error_close:
1289	if (bdev)
1290		close_bdev_exclusive(bdev, FMODE_READ);
1291out:
1292	mutex_unlock(&root->fs_info->volume_mutex);
1293	mutex_unlock(&uuid_mutex);
1294	return ret;
1295}
1296
1297/*
1298 * does all the dirty work required for changing file system's UUID.
1299 */
1300static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1301				struct btrfs_root *root)
1302{
1303	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1304	struct btrfs_fs_devices *old_devices;
1305	struct btrfs_fs_devices *seed_devices;
1306	struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1307	struct btrfs_device *device;
1308	u64 super_flags;
1309
1310	BUG_ON(!mutex_is_locked(&uuid_mutex));
1311	if (!fs_devices->seeding)
1312		return -EINVAL;
1313
1314	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1315	if (!seed_devices)
1316		return -ENOMEM;
1317
1318	old_devices = clone_fs_devices(fs_devices);
1319	if (IS_ERR(old_devices)) {
1320		kfree(seed_devices);
1321		return PTR_ERR(old_devices);
1322	}
1323
1324	list_add(&old_devices->list, &fs_uuids);
1325
1326	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1327	seed_devices->opened = 1;
1328	INIT_LIST_HEAD(&seed_devices->devices);
1329	INIT_LIST_HEAD(&seed_devices->alloc_list);
1330	mutex_init(&seed_devices->device_list_mutex);
1331	list_splice_init(&fs_devices->devices, &seed_devices->devices);
1332	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1333	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1334		device->fs_devices = seed_devices;
1335	}
1336
1337	fs_devices->seeding = 0;
1338	fs_devices->num_devices = 0;
1339	fs_devices->open_devices = 0;
1340	fs_devices->seed = seed_devices;
1341
1342	generate_random_uuid(fs_devices->fsid);
1343	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1344	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1345	super_flags = btrfs_super_flags(disk_super) &
1346		      ~BTRFS_SUPER_FLAG_SEEDING;
1347	btrfs_set_super_flags(disk_super, super_flags);
1348
1349	return 0;
1350}
1351
1352/*
1353 * strore the expected generation for seed devices in device items.
1354 */
1355static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1356			       struct btrfs_root *root)
1357{
1358	struct btrfs_path *path;
1359	struct extent_buffer *leaf;
1360	struct btrfs_dev_item *dev_item;
1361	struct btrfs_device *device;
1362	struct btrfs_key key;
1363	u8 fs_uuid[BTRFS_UUID_SIZE];
1364	u8 dev_uuid[BTRFS_UUID_SIZE];
1365	u64 devid;
1366	int ret;
1367
1368	path = btrfs_alloc_path();
1369	if (!path)
1370		return -ENOMEM;
1371
1372	root = root->fs_info->chunk_root;
1373	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1374	key.offset = 0;
1375	key.type = BTRFS_DEV_ITEM_KEY;
1376
1377	while (1) {
1378		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1379		if (ret < 0)
1380			goto error;
1381
1382		leaf = path->nodes[0];
1383next_slot:
1384		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1385			ret = btrfs_next_leaf(root, path);
1386			if (ret > 0)
1387				break;
1388			if (ret < 0)
1389				goto error;
1390			leaf = path->nodes[0];
1391			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1392			btrfs_release_path(root, path);
1393			continue;
1394		}
1395
1396		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1397		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1398		    key.type != BTRFS_DEV_ITEM_KEY)
1399			break;
1400
1401		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1402					  struct btrfs_dev_item);
1403		devid = btrfs_device_id(leaf, dev_item);
1404		read_extent_buffer(leaf, dev_uuid,
1405				   (unsigned long)btrfs_device_uuid(dev_item),
1406				   BTRFS_UUID_SIZE);
1407		read_extent_buffer(leaf, fs_uuid,
1408				   (unsigned long)btrfs_device_fsid(dev_item),
1409				   BTRFS_UUID_SIZE);
1410		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1411		BUG_ON(!device);
1412
1413		if (device->fs_devices->seeding) {
1414			btrfs_set_device_generation(leaf, dev_item,
1415						    device->generation);
1416			btrfs_mark_buffer_dirty(leaf);
1417		}
1418
1419		path->slots[0]++;
1420		goto next_slot;
1421	}
1422	ret = 0;
1423error:
1424	btrfs_free_path(path);
1425	return ret;
1426}
1427
1428int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1429{
1430	struct btrfs_trans_handle *trans;
1431	struct btrfs_device *device;
1432	struct block_device *bdev;
1433	struct list_head *devices;
1434	struct super_block *sb = root->fs_info->sb;
1435	u64 total_bytes;
1436	int seeding_dev = 0;
1437	int ret = 0;
1438
1439	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1440		return -EINVAL;
1441
1442	bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1443	if (IS_ERR(bdev))
1444		return PTR_ERR(bdev);
1445
1446	if (root->fs_info->fs_devices->seeding) {
1447		seeding_dev = 1;
1448		down_write(&sb->s_umount);
1449		mutex_lock(&uuid_mutex);
1450	}
1451
1452	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1453	mutex_lock(&root->fs_info->volume_mutex);
1454
1455	devices = &root->fs_info->fs_devices->devices;
1456	/*
1457	 * we have the volume lock, so we don't need the extra
1458	 * device list mutex while reading the list here.
1459	 */
1460	list_for_each_entry(device, devices, dev_list) {
1461		if (device->bdev == bdev) {
1462			ret = -EEXIST;
1463			goto error;
1464		}
1465	}
1466
1467	device = kzalloc(sizeof(*device), GFP_NOFS);
1468	if (!device) {
1469		/* we can safely leave the fs_devices entry around */
1470		ret = -ENOMEM;
1471		goto error;
1472	}
1473
1474	device->name = kstrdup(device_path, GFP_NOFS);
1475	if (!device->name) {
1476		kfree(device);
1477		ret = -ENOMEM;
1478		goto error;
1479	}
1480
1481	ret = find_next_devid(root, &device->devid);
1482	if (ret) {
1483		kfree(device);
1484		goto error;
1485	}
1486
1487	trans = btrfs_start_transaction(root, 0);
1488	lock_chunks(root);
1489
1490	device->barriers = 1;
1491	device->writeable = 1;
1492	device->work.func = pending_bios_fn;
1493	generate_random_uuid(device->uuid);
1494	spin_lock_init(&device->io_lock);
1495	device->generation = trans->transid;
1496	device->io_width = root->sectorsize;
1497	device->io_align = root->sectorsize;
1498	device->sector_size = root->sectorsize;
1499	device->total_bytes = i_size_read(bdev->bd_inode);
1500	device->disk_total_bytes = device->total_bytes;
1501	device->dev_root = root->fs_info->dev_root;
1502	device->bdev = bdev;
1503	device->in_fs_metadata = 1;
1504	device->mode = 0;
1505	set_blocksize(device->bdev, 4096);
1506
1507	if (seeding_dev) {
1508		sb->s_flags &= ~MS_RDONLY;
1509		ret = btrfs_prepare_sprout(trans, root);
1510		BUG_ON(ret);
1511	}
1512
1513	device->fs_devices = root->fs_info->fs_devices;
1514
1515	/*
1516	 * we don't want write_supers to jump in here with our device
1517	 * half setup
1518	 */
1519	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1520	list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1521	list_add(&device->dev_alloc_list,
1522		 &root->fs_info->fs_devices->alloc_list);
1523	root->fs_info->fs_devices->num_devices++;
1524	root->fs_info->fs_devices->open_devices++;
1525	root->fs_info->fs_devices->rw_devices++;
1526	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1527
1528	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1529		root->fs_info->fs_devices->rotating = 1;
1530
1531	total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1532	btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1533				    total_bytes + device->total_bytes);
1534
1535	total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1536	btrfs_set_super_num_devices(&root->fs_info->super_copy,
1537				    total_bytes + 1);
1538	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1539
1540	if (seeding_dev) {
1541		ret = init_first_rw_device(trans, root, device);
1542		BUG_ON(ret);
1543		ret = btrfs_finish_sprout(trans, root);
1544		BUG_ON(ret);
1545	} else {
1546		ret = btrfs_add_device(trans, root, device);
1547	}
1548
1549	/*
1550	 * we've got more storage, clear any full flags on the space
1551	 * infos
1552	 */
1553	btrfs_clear_space_info_full(root->fs_info);
1554
1555	unlock_chunks(root);
1556	btrfs_commit_transaction(trans, root);
1557
1558	if (seeding_dev) {
1559		mutex_unlock(&uuid_mutex);
1560		up_write(&sb->s_umount);
1561
1562		ret = btrfs_relocate_sys_chunks(root);
1563		BUG_ON(ret);
1564	}
1565out:
1566	mutex_unlock(&root->fs_info->volume_mutex);
1567	return ret;
1568error:
1569	close_bdev_exclusive(bdev, 0);
1570	if (seeding_dev) {
1571		mutex_unlock(&uuid_mutex);
1572		up_write(&sb->s_umount);
1573	}
1574	goto out;
1575}
1576
1577static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1578					struct btrfs_device *device)
1579{
1580	int ret;
1581	struct btrfs_path *path;
1582	struct btrfs_root *root;
1583	struct btrfs_dev_item *dev_item;
1584	struct extent_buffer *leaf;
1585	struct btrfs_key key;
1586
1587	root = device->dev_root->fs_info->chunk_root;
1588
1589	path = btrfs_alloc_path();
1590	if (!path)
1591		return -ENOMEM;
1592
1593	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1594	key.type = BTRFS_DEV_ITEM_KEY;
1595	key.offset = device->devid;
1596
1597	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1598	if (ret < 0)
1599		goto out;
1600
1601	if (ret > 0) {
1602		ret = -ENOENT;
1603		goto out;
1604	}
1605
1606	leaf = path->nodes[0];
1607	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1608
1609	btrfs_set_device_id(leaf, dev_item, device->devid);
1610	btrfs_set_device_type(leaf, dev_item, device->type);
1611	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1612	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1613	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1614	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1615	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1616	btrfs_mark_buffer_dirty(leaf);
1617
1618out:
1619	btrfs_free_path(path);
1620	return ret;
1621}
1622
1623static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1624		      struct btrfs_device *device, u64 new_size)
1625{
1626	struct btrfs_super_block *super_copy =
1627		&device->dev_root->fs_info->super_copy;
1628	u64 old_total = btrfs_super_total_bytes(super_copy);
1629	u64 diff = new_size - device->total_bytes;
1630
1631	if (!device->writeable)
1632		return -EACCES;
1633	if (new_size <= device->total_bytes)
1634		return -EINVAL;
1635
1636	btrfs_set_super_total_bytes(super_copy, old_total + diff);
1637	device->fs_devices->total_rw_bytes += diff;
1638
1639	device->total_bytes = new_size;
1640	device->disk_total_bytes = new_size;
1641	btrfs_clear_space_info_full(device->dev_root->fs_info);
1642
1643	return btrfs_update_device(trans, device);
1644}
1645
1646int btrfs_grow_device(struct btrfs_trans_handle *trans,
1647		      struct btrfs_device *device, u64 new_size)
1648{
1649	int ret;
1650	lock_chunks(device->dev_root);
1651	ret = __btrfs_grow_device(trans, device, new_size);
1652	unlock_chunks(device->dev_root);
1653	return ret;
1654}
1655
1656static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1657			    struct btrfs_root *root,
1658			    u64 chunk_tree, u64 chunk_objectid,
1659			    u64 chunk_offset)
1660{
1661	int ret;
1662	struct btrfs_path *path;
1663	struct btrfs_key key;
1664
1665	root = root->fs_info->chunk_root;
1666	path = btrfs_alloc_path();
1667	if (!path)
1668		return -ENOMEM;
1669
1670	key.objectid = chunk_objectid;
1671	key.offset = chunk_offset;
1672	key.type = BTRFS_CHUNK_ITEM_KEY;
1673
1674	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1675	BUG_ON(ret);
1676
1677	ret = btrfs_del_item(trans, root, path);
1678	BUG_ON(ret);
1679
1680	btrfs_free_path(path);
1681	return 0;
1682}
1683
1684static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1685			chunk_offset)
1686{
1687	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1688	struct btrfs_disk_key *disk_key;
1689	struct btrfs_chunk *chunk;
1690	u8 *ptr;
1691	int ret = 0;
1692	u32 num_stripes;
1693	u32 array_size;
1694	u32 len = 0;
1695	u32 cur;
1696	struct btrfs_key key;
1697
1698	array_size = btrfs_super_sys_array_size(super_copy);
1699
1700	ptr = super_copy->sys_chunk_array;
1701	cur = 0;
1702
1703	while (cur < array_size) {
1704		disk_key = (struct btrfs_disk_key *)ptr;
1705		btrfs_disk_key_to_cpu(&key, disk_key);
1706
1707		len = sizeof(*disk_key);
1708
1709		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1710			chunk = (struct btrfs_chunk *)(ptr + len);
1711			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1712			len += btrfs_chunk_item_size(num_stripes);
1713		} else {
1714			ret = -EIO;
1715			break;
1716		}
1717		if (key.objectid == chunk_objectid &&
1718		    key.offset == chunk_offset) {
1719			memmove(ptr, ptr + len, array_size - (cur + len));
1720			array_size -= len;
1721			btrfs_set_super_sys_array_size(super_copy, array_size);
1722		} else {
1723			ptr += len;
1724			cur += len;
1725		}
1726	}
1727	return ret;
1728}
1729
1730static int btrfs_relocate_chunk(struct btrfs_root *root,
1731			 u64 chunk_tree, u64 chunk_objectid,
1732			 u64 chunk_offset)
1733{
1734	struct extent_map_tree *em_tree;
1735	struct btrfs_root *extent_root;
1736	struct btrfs_trans_handle *trans;
1737	struct extent_map *em;
1738	struct map_lookup *map;
1739	int ret;
1740	int i;
1741
1742	root = root->fs_info->chunk_root;
1743	extent_root = root->fs_info->extent_root;
1744	em_tree = &root->fs_info->mapping_tree.map_tree;
1745
1746	ret = btrfs_can_relocate(extent_root, chunk_offset);
1747	if (ret)
1748		return -ENOSPC;
1749
1750	/* step one, relocate all the extents inside this chunk */
1751	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1752	if (ret)
1753		return ret;
1754
1755	trans = btrfs_start_transaction(root, 0);
1756	BUG_ON(!trans);
1757
1758	lock_chunks(root);
1759
1760	/*
1761	 * step two, delete the device extents and the
1762	 * chunk tree entries
1763	 */
1764	read_lock(&em_tree->lock);
1765	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1766	read_unlock(&em_tree->lock);
1767
1768	BUG_ON(em->start > chunk_offset ||
1769	       em->start + em->len < chunk_offset);
1770	map = (struct map_lookup *)em->bdev;
1771
1772	for (i = 0; i < map->num_stripes; i++) {
1773		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1774					    map->stripes[i].physical);
1775		BUG_ON(ret);
1776
1777		if (map->stripes[i].dev) {
1778			ret = btrfs_update_device(trans, map->stripes[i].dev);
1779			BUG_ON(ret);
1780		}
1781	}
1782	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1783			       chunk_offset);
1784
1785	BUG_ON(ret);
1786
1787	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1788		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1789		BUG_ON(ret);
1790	}
1791
1792	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1793	BUG_ON(ret);
1794
1795	write_lock(&em_tree->lock);
1796	remove_extent_mapping(em_tree, em);
1797	write_unlock(&em_tree->lock);
1798
1799	kfree(map);
1800	em->bdev = NULL;
1801
1802	/* once for the tree */
1803	free_extent_map(em);
1804	/* once for us */
1805	free_extent_map(em);
1806
1807	unlock_chunks(root);
1808	btrfs_end_transaction(trans, root);
1809	return 0;
1810}
1811
1812static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1813{
1814	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1815	struct btrfs_path *path;
1816	struct extent_buffer *leaf;
1817	struct btrfs_chunk *chunk;
1818	struct btrfs_key key;
1819	struct btrfs_key found_key;
1820	u64 chunk_tree = chunk_root->root_key.objectid;
1821	u64 chunk_type;
1822	bool retried = false;
1823	int failed = 0;
1824	int ret;
1825
1826	path = btrfs_alloc_path();
1827	if (!path)
1828		return -ENOMEM;
1829
1830again:
1831	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1832	key.offset = (u64)-1;
1833	key.type = BTRFS_CHUNK_ITEM_KEY;
1834
1835	while (1) {
1836		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1837		if (ret < 0)
1838			goto error;
1839		BUG_ON(ret == 0);
1840
1841		ret = btrfs_previous_item(chunk_root, path, key.objectid,
1842					  key.type);
1843		if (ret < 0)
1844			goto error;
1845		if (ret > 0)
1846			break;
1847
1848		leaf = path->nodes[0];
1849		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1850
1851		chunk = btrfs_item_ptr(leaf, path->slots[0],
1852				       struct btrfs_chunk);
1853		chunk_type = btrfs_chunk_type(leaf, chunk);
1854		btrfs_release_path(chunk_root, path);
1855
1856		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1857			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1858						   found_key.objectid,
1859						   found_key.offset);
1860			if (ret == -ENOSPC)
1861				failed++;
1862			else if (ret)
1863				BUG();
1864		}
1865
1866		if (found_key.offset == 0)
1867			break;
1868		key.offset = found_key.offset - 1;
1869	}
1870	ret = 0;
1871	if (failed && !retried) {
1872		failed = 0;
1873		retried = true;
1874		goto again;
1875	} else if (failed && retried) {
1876		WARN_ON(1);
1877		ret = -ENOSPC;
1878	}
1879error:
1880	btrfs_free_path(path);
1881	return ret;
1882}
1883
1884static u64 div_factor(u64 num, int factor)
1885{
1886	if (factor == 10)
1887		return num;
1888	num *= factor;
1889	do_div(num, 10);
1890	return num;
1891}
1892
1893int btrfs_balance(struct btrfs_root *dev_root)
1894{
1895	int ret;
1896	struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1897	struct btrfs_device *device;
1898	u64 old_size;
1899	u64 size_to_free;
1900	struct btrfs_path *path;
1901	struct btrfs_key key;
1902	struct btrfs_chunk *chunk;
1903	struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1904	struct btrfs_trans_handle *trans;
1905	struct btrfs_key found_key;
1906
1907	if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1908		return -EROFS;
1909
1910	mutex_lock(&dev_root->fs_info->volume_mutex);
1911	dev_root = dev_root->fs_info->dev_root;
1912
1913	/* step one make some room on all the devices */
1914	list_for_each_entry(device, devices, dev_list) {
1915		old_size = device->total_bytes;
1916		size_to_free = div_factor(old_size, 1);
1917		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1918		if (!device->writeable ||
1919		    device->total_bytes - device->bytes_used > size_to_free)
1920			continue;
1921
1922		ret = btrfs_shrink_device(device, old_size - size_to_free);
1923		if (ret == -ENOSPC)
1924			break;
1925		BUG_ON(ret);
1926
1927		trans = btrfs_start_transaction(dev_root, 0);
1928		BUG_ON(!trans);
1929
1930		ret = btrfs_grow_device(trans, device, old_size);
1931		BUG_ON(ret);
1932
1933		btrfs_end_transaction(trans, dev_root);
1934	}
1935
1936	/* step two, relocate all the chunks */
1937	path = btrfs_alloc_path();
1938	BUG_ON(!path);
1939
1940	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1941	key.offset = (u64)-1;
1942	key.type = BTRFS_CHUNK_ITEM_KEY;
1943
1944	while (1) {
1945		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1946		if (ret < 0)
1947			goto error;
1948
1949		/*
1950		 * this shouldn't happen, it means the last relocate
1951		 * failed
1952		 */
1953		if (ret == 0)
1954			break;
1955
1956		ret = btrfs_previous_item(chunk_root, path, 0,
1957					  BTRFS_CHUNK_ITEM_KEY);
1958		if (ret)
1959			break;
1960
1961		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1962				      path->slots[0]);
1963		if (found_key.objectid != key.objectid)
1964			break;
1965
1966		chunk = btrfs_item_ptr(path->nodes[0],
1967				       path->slots[0],
1968				       struct btrfs_chunk);
1969		/* chunk zero is special */
1970		if (found_key.offset == 0)
1971			break;
1972
1973		btrfs_release_path(chunk_root, path);
1974		ret = btrfs_relocate_chunk(chunk_root,
1975					   chunk_root->root_key.objectid,
1976					   found_key.objectid,
1977					   found_key.offset);
1978		BUG_ON(ret && ret != -ENOSPC);
1979		key.offset = found_key.offset - 1;
1980	}
1981	ret = 0;
1982error:
1983	btrfs_free_path(path);
1984	mutex_unlock(&dev_root->fs_info->volume_mutex);
1985	return ret;
1986}
1987
1988/*
1989 * shrinking a device means finding all of the device extents past
1990 * the new size, and then following the back refs to the chunks.
1991 * The chunk relocation code actually frees the device extent
1992 */
1993int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1994{
1995	struct btrfs_trans_handle *trans;
1996	struct btrfs_root *root = device->dev_root;
1997	struct btrfs_dev_extent *dev_extent = NULL;
1998	struct btrfs_path *path;
1999	u64 length;
2000	u64 chunk_tree;
2001	u64 chunk_objectid;
2002	u64 chunk_offset;
2003	int ret;
2004	int slot;
2005	int failed = 0;
2006	bool retried = false;
2007	struct extent_buffer *l;
2008	struct btrfs_key key;
2009	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2010	u64 old_total = btrfs_super_total_bytes(super_copy);
2011	u64 old_size = device->total_bytes;
2012	u64 diff = device->total_bytes - new_size;
2013
2014	if (new_size >= device->total_bytes)
2015		return -EINVAL;
2016
2017	path = btrfs_alloc_path();
2018	if (!path)
2019		return -ENOMEM;
2020
2021	path->reada = 2;
2022
2023	lock_chunks(root);
2024
2025	device->total_bytes = new_size;
2026	if (device->writeable)
2027		device->fs_devices->total_rw_bytes -= diff;
2028	unlock_chunks(root);
2029
2030again:
2031	key.objectid = device->devid;
2032	key.offset = (u64)-1;
2033	key.type = BTRFS_DEV_EXTENT_KEY;
2034
2035	while (1) {
2036		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2037		if (ret < 0)
2038			goto done;
2039
2040		ret = btrfs_previous_item(root, path, 0, key.type);
2041		if (ret < 0)
2042			goto done;
2043		if (ret) {
2044			ret = 0;
2045			btrfs_release_path(root, path);
2046			break;
2047		}
2048
2049		l = path->nodes[0];
2050		slot = path->slots[0];
2051		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2052
2053		if (key.objectid != device->devid) {
2054			btrfs_release_path(root, path);
2055			break;
2056		}
2057
2058		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2059		length = btrfs_dev_extent_length(l, dev_extent);
2060
2061		if (key.offset + length <= new_size) {
2062			btrfs_release_path(root, path);
2063			break;
2064		}
2065
2066		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2067		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2068		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2069		btrfs_release_path(root, path);
2070
2071		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2072					   chunk_offset);
2073		if (ret && ret != -ENOSPC)
2074			goto done;
2075		if (ret == -ENOSPC)
2076			failed++;
2077		key.offset -= 1;
2078	}
2079
2080	if (failed && !retried) {
2081		failed = 0;
2082		retried = true;
2083		goto again;
2084	} else if (failed && retried) {
2085		ret = -ENOSPC;
2086		lock_chunks(root);
2087
2088		device->total_bytes = old_size;
2089		if (device->writeable)
2090			device->fs_devices->total_rw_bytes += diff;
2091		unlock_chunks(root);
2092		goto done;
2093	}
2094
2095	/* Shrinking succeeded, else we would be at "done". */
2096	trans = btrfs_start_transaction(root, 0);
2097	lock_chunks(root);
2098
2099	device->disk_total_bytes = new_size;
2100	/* Now btrfs_update_device() will change the on-disk size. */
2101	ret = btrfs_update_device(trans, device);
2102	if (ret) {
2103		unlock_chunks(root);
2104		btrfs_end_transaction(trans, root);
2105		goto done;
2106	}
2107	WARN_ON(diff > old_total);
2108	btrfs_set_super_total_bytes(super_copy, old_total - diff);
2109	unlock_chunks(root);
2110	btrfs_end_transaction(trans, root);
2111done:
2112	btrfs_free_path(path);
2113	return ret;
2114}
2115
2116static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2117			   struct btrfs_root *root,
2118			   struct btrfs_key *key,
2119			   struct btrfs_chunk *chunk, int item_size)
2120{
2121	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2122	struct btrfs_disk_key disk_key;
2123	u32 array_size;
2124	u8 *ptr;
2125
2126	array_size = btrfs_super_sys_array_size(super_copy);
2127	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2128		return -EFBIG;
2129
2130	ptr = super_copy->sys_chunk_array + array_size;
2131	btrfs_cpu_key_to_disk(&disk_key, key);
2132	memcpy(ptr, &disk_key, sizeof(disk_key));
2133	ptr += sizeof(disk_key);
2134	memcpy(ptr, chunk, item_size);
2135	item_size += sizeof(disk_key);
2136	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2137	return 0;
2138}
2139
2140static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
2141					int num_stripes, int sub_stripes)
2142{
2143	if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
2144		return calc_size;
2145	else if (type & BTRFS_BLOCK_GROUP_RAID10)
2146		return calc_size * (num_stripes / sub_stripes);
2147	else
2148		return calc_size * num_stripes;
2149}
2150
2151static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2152			       struct btrfs_root *extent_root,
2153			       struct map_lookup **map_ret,
2154			       u64 *num_bytes, u64 *stripe_size,
2155			       u64 start, u64 type)
2156{
2157	struct btrfs_fs_info *info = extent_root->fs_info;
2158	struct btrfs_device *device = NULL;
2159	struct btrfs_fs_devices *fs_devices = info->fs_devices;
2160	struct list_head *cur;
2161	struct map_lookup *map = NULL;
2162	struct extent_map_tree *em_tree;
2163	struct extent_map *em;
2164	struct list_head private_devs;
2165	int min_stripe_size = 1 * 1024 * 1024;
2166	u64 calc_size = 1024 * 1024 * 1024;
2167	u64 max_chunk_size = calc_size;
2168	u64 min_free;
2169	u64 avail;
2170	u64 max_avail = 0;
2171	u64 dev_offset;
2172	int num_stripes = 1;
2173	int min_stripes = 1;
2174	int sub_stripes = 0;
2175	int looped = 0;
2176	int ret;
2177	int index;
2178	int stripe_len = 64 * 1024;
2179
2180	if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2181	    (type & BTRFS_BLOCK_GROUP_DUP)) {
2182		WARN_ON(1);
2183		type &= ~BTRFS_BLOCK_GROUP_DUP;
2184	}
2185	if (list_empty(&fs_devices->alloc_list))
2186		return -ENOSPC;
2187
2188	if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2189		num_stripes = fs_devices->rw_devices;
2190		min_stripes = 2;
2191	}
2192	if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2193		num_stripes = 2;
2194		min_stripes = 2;
2195	}
2196	if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2197		if (fs_devices->rw_devices < 2)
2198			return -ENOSPC;
2199		num_stripes = 2;
2200		min_stripes = 2;
2201	}
2202	if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2203		num_stripes = fs_devices->rw_devices;
2204		if (num_stripes < 4)
2205			return -ENOSPC;
2206		num_stripes &= ~(u32)1;
2207		sub_stripes = 2;
2208		min_stripes = 4;
2209	}
2210
2211	if (type & BTRFS_BLOCK_GROUP_DATA) {
2212		max_chunk_size = 10 * calc_size;
2213		min_stripe_size = 64 * 1024 * 1024;
2214	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2215		max_chunk_size = 256 * 1024 * 1024;
2216		min_stripe_size = 32 * 1024 * 1024;
2217	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2218		calc_size = 8 * 1024 * 1024;
2219		max_chunk_size = calc_size * 2;
2220		min_stripe_size = 1 * 1024 * 1024;
2221	}
2222
2223	/* we don't want a chunk larger than 10% of writeable space */
2224	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2225			     max_chunk_size);
2226
2227again:
2228	max_avail = 0;
2229	if (!map || map->num_stripes != num_stripes) {
2230		kfree(map);
2231		map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2232		if (!map)
2233			return -ENOMEM;
2234		map->num_stripes = num_stripes;
2235	}
2236
2237	if (calc_size * num_stripes > max_chunk_size) {
2238		calc_size = max_chunk_size;
2239		do_div(calc_size, num_stripes);
2240		do_div(calc_size, stripe_len);
2241		calc_size *= stripe_len;
2242	}
2243
2244	/* we don't want tiny stripes */
2245	if (!looped)
2246		calc_size = max_t(u64, min_stripe_size, calc_size);
2247
2248	/*
2249	 * we're about to do_div by the stripe_len so lets make sure
2250	 * we end up with something bigger than a stripe
2251	 */
2252	calc_size = max_t(u64, calc_size, stripe_len * 4);
2253
2254	do_div(calc_size, stripe_len);
2255	calc_size *= stripe_len;
2256
2257	cur = fs_devices->alloc_list.next;
2258	index = 0;
2259
2260	if (type & BTRFS_BLOCK_GROUP_DUP)
2261		min_free = calc_size * 2;
2262	else
2263		min_free = calc_size;
2264
2265	/*
2266	 * we add 1MB because we never use the first 1MB of the device, unless
2267	 * we've looped, then we are likely allocating the maximum amount of
2268	 * space left already
2269	 */
2270	if (!looped)
2271		min_free += 1024 * 1024;
2272
2273	INIT_LIST_HEAD(&private_devs);
2274	while (index < num_stripes) {
2275		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2276		BUG_ON(!device->writeable);
2277		if (device->total_bytes > device->bytes_used)
2278			avail = device->total_bytes - device->bytes_used;
2279		else
2280			avail = 0;
2281		cur = cur->next;
2282
2283		if (device->in_fs_metadata && avail >= min_free) {
2284			ret = find_free_dev_extent(trans, device,
2285						   min_free, &dev_offset,
2286						   &max_avail);
2287			if (ret == 0) {
2288				list_move_tail(&device->dev_alloc_list,
2289					       &private_devs);
2290				map->stripes[index].dev = device;
2291				map->stripes[index].physical = dev_offset;
2292				index++;
2293				if (type & BTRFS_BLOCK_GROUP_DUP) {
2294					map->stripes[index].dev = device;
2295					map->stripes[index].physical =
2296						dev_offset + calc_size;
2297					index++;
2298				}
2299			}
2300		} else if (device->in_fs_metadata && avail > max_avail)
2301			max_avail = avail;
2302		if (cur == &fs_devices->alloc_list)
2303			break;
2304	}
2305	list_splice(&private_devs, &fs_devices->alloc_list);
2306	if (index < num_stripes) {
2307		if (index >= min_stripes) {
2308			num_stripes = index;
2309			if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2310				num_stripes /= sub_stripes;
2311				num_stripes *= sub_stripes;
2312			}
2313			looped = 1;
2314			goto again;
2315		}
2316		if (!looped && max_avail > 0) {
2317			looped = 1;
2318			calc_size = max_avail;
2319			goto again;
2320		}
2321		kfree(map);
2322		return -ENOSPC;
2323	}
2324	map->sector_size = extent_root->sectorsize;
2325	map->stripe_len = stripe_len;
2326	map->io_align = stripe_len;
2327	map->io_width = stripe_len;
2328	map->type = type;
2329	map->num_stripes = num_stripes;
2330	map->sub_stripes = sub_stripes;
2331
2332	*map_ret = map;
2333	*stripe_size = calc_size;
2334	*num_bytes = chunk_bytes_by_type(type, calc_size,
2335					 num_stripes, sub_stripes);
2336
2337	em = alloc_extent_map(GFP_NOFS);
2338	if (!em) {
2339		kfree(map);
2340		return -ENOMEM;
2341	}
2342	em->bdev = (struct block_device *)map;
2343	em->start = start;
2344	em->len = *num_bytes;
2345	em->block_start = 0;
2346	em->block_len = em->len;
2347
2348	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2349	write_lock(&em_tree->lock);
2350	ret = add_extent_mapping(em_tree, em);
2351	write_unlock(&em_tree->lock);
2352	BUG_ON(ret);
2353	free_extent_map(em);
2354
2355	ret = btrfs_make_block_group(trans, extent_root, 0, type,
2356				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2357				     start, *num_bytes);
2358	BUG_ON(ret);
2359
2360	index = 0;
2361	while (index < map->num_stripes) {
2362		device = map->stripes[index].dev;
2363		dev_offset = map->stripes[index].physical;
2364
2365		ret = btrfs_alloc_dev_extent(trans, device,
2366				info->chunk_root->root_key.objectid,
2367				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2368				start, dev_offset, calc_size);
2369		BUG_ON(ret);
2370		index++;
2371	}
2372
2373	return 0;
2374}
2375
2376static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2377				struct btrfs_root *extent_root,
2378				struct map_lookup *map, u64 chunk_offset,
2379				u64 chunk_size, u64 stripe_size)
2380{
2381	u64 dev_offset;
2382	struct btrfs_key key;
2383	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2384	struct btrfs_device *device;
2385	struct btrfs_chunk *chunk;
2386	struct btrfs_stripe *stripe;
2387	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2388	int index = 0;
2389	int ret;
2390
2391	chunk = kzalloc(item_size, GFP_NOFS);
2392	if (!chunk)
2393		return -ENOMEM;
2394
2395	index = 0;
2396	while (index < map->num_stripes) {
2397		device = map->stripes[index].dev;
2398		device->bytes_used += stripe_size;
2399		ret = btrfs_update_device(trans, device);
2400		BUG_ON(ret);
2401		index++;
2402	}
2403
2404	index = 0;
2405	stripe = &chunk->stripe;
2406	while (index < map->num_stripes) {
2407		device = map->stripes[index].dev;
2408		dev_offset = map->stripes[index].physical;
2409
2410		btrfs_set_stack_stripe_devid(stripe, device->devid);
2411		btrfs_set_stack_stripe_offset(stripe, dev_offset);
2412		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2413		stripe++;
2414		index++;
2415	}
2416
2417	btrfs_set_stack_chunk_length(chunk, chunk_size);
2418	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2419	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2420	btrfs_set_stack_chunk_type(chunk, map->type);
2421	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2422	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2423	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2424	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2425	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2426
2427	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2428	key.type = BTRFS_CHUNK_ITEM_KEY;
2429	key.offset = chunk_offset;
2430
2431	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2432	BUG_ON(ret);
2433
2434	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2435		ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2436					     item_size);
2437		BUG_ON(ret);
2438	}
2439	kfree(chunk);
2440	return 0;
2441}
2442
2443/*
2444 * Chunk allocation falls into two parts. The first part does works
2445 * that make the new allocated chunk useable, but not do any operation
2446 * that modifies the chunk tree. The second part does the works that
2447 * require modifying the chunk tree. This division is important for the
2448 * bootstrap process of adding storage to a seed btrfs.
2449 */
2450int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2451		      struct btrfs_root *extent_root, u64 type)
2452{
2453	u64 chunk_offset;
2454	u64 chunk_size;
2455	u64 stripe_size;
2456	struct map_lookup *map;
2457	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2458	int ret;
2459
2460	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2461			      &chunk_offset);
2462	if (ret)
2463		return ret;
2464
2465	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2466				  &stripe_size, chunk_offset, type);
2467	if (ret)
2468		return ret;
2469
2470	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2471				   chunk_size, stripe_size);
2472	BUG_ON(ret);
2473	return 0;
2474}
2475
2476static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2477					 struct btrfs_root *root,
2478					 struct btrfs_device *device)
2479{
2480	u64 chunk_offset;
2481	u64 sys_chunk_offset;
2482	u64 chunk_size;
2483	u64 sys_chunk_size;
2484	u64 stripe_size;
2485	u64 sys_stripe_size;
2486	u64 alloc_profile;
2487	struct map_lookup *map;
2488	struct map_lookup *sys_map;
2489	struct btrfs_fs_info *fs_info = root->fs_info;
2490	struct btrfs_root *extent_root = fs_info->extent_root;
2491	int ret;
2492
2493	ret = find_next_chunk(fs_info->chunk_root,
2494			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2495	BUG_ON(ret);
2496
2497	alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2498			(fs_info->metadata_alloc_profile &
2499			 fs_info->avail_metadata_alloc_bits);
2500	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2501
2502	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2503				  &stripe_size, chunk_offset, alloc_profile);
2504	BUG_ON(ret);
2505
2506	sys_chunk_offset = chunk_offset + chunk_size;
2507
2508	alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2509			(fs_info->system_alloc_profile &
2510			 fs_info->avail_system_alloc_bits);
2511	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2512
2513	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2514				  &sys_chunk_size, &sys_stripe_size,
2515				  sys_chunk_offset, alloc_profile);
2516	BUG_ON(ret);
2517
2518	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2519	BUG_ON(ret);
2520
2521	/*
2522	 * Modifying chunk tree needs allocating new blocks from both
2523	 * system block group and metadata block group. So we only can
2524	 * do operations require modifying the chunk tree after both
2525	 * block groups were created.
2526	 */
2527	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2528				   chunk_size, stripe_size);
2529	BUG_ON(ret);
2530
2531	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2532				   sys_chunk_offset, sys_chunk_size,
2533				   sys_stripe_size);
2534	BUG_ON(ret);
2535	return 0;
2536}
2537
2538int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2539{
2540	struct extent_map *em;
2541	struct map_lookup *map;
2542	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2543	int readonly = 0;
2544	int i;
2545
2546	read_lock(&map_tree->map_tree.lock);
2547	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2548	read_unlock(&map_tree->map_tree.lock);
2549	if (!em)
2550		return 1;
2551
2552	if (btrfs_test_opt(root, DEGRADED)) {
2553		free_extent_map(em);
2554		return 0;
2555	}
2556
2557	map = (struct map_lookup *)em->bdev;
2558	for (i = 0; i < map->num_stripes; i++) {
2559		if (!map->stripes[i].dev->writeable) {
2560			readonly = 1;
2561			break;
2562		}
2563	}
2564	free_extent_map(em);
2565	return readonly;
2566}
2567
2568void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2569{
2570	extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2571}
2572
2573void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2574{
2575	struct extent_map *em;
2576
2577	while (1) {
2578		write_lock(&tree->map_tree.lock);
2579		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2580		if (em)
2581			remove_extent_mapping(&tree->map_tree, em);
2582		write_unlock(&tree->map_tree.lock);
2583		if (!em)
2584			break;
2585		kfree(em->bdev);
2586		/* once for us */
2587		free_extent_map(em);
2588		/* once for the tree */
2589		free_extent_map(em);
2590	}
2591}
2592
2593int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2594{
2595	struct extent_map *em;
2596	struct map_lookup *map;
2597	struct extent_map_tree *em_tree = &map_tree->map_tree;
2598	int ret;
2599
2600	read_lock(&em_tree->lock);
2601	em = lookup_extent_mapping(em_tree, logical, len);
2602	read_unlock(&em_tree->lock);
2603	BUG_ON(!em);
2604
2605	BUG_ON(em->start > logical || em->start + em->len < logical);
2606	map = (struct map_lookup *)em->bdev;
2607	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2608		ret = map->num_stripes;
2609	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2610		ret = map->sub_stripes;
2611	else
2612		ret = 1;
2613	free_extent_map(em);
2614	return ret;
2615}
2616
2617static int find_live_mirror(struct map_lookup *map, int first, int num,
2618			    int optimal)
2619{
2620	int i;
2621	if (map->stripes[optimal].dev->bdev)
2622		return optimal;
2623	for (i = first; i < first + num; i++) {
2624		if (map->stripes[i].dev->bdev)
2625			return i;
2626	}
2627	/* we couldn't find one that doesn't fail.  Just return something
2628	 * and the io error handling code will clean up eventually
2629	 */
2630	return optimal;
2631}
2632
2633static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2634			     u64 logical, u64 *length,
2635			     struct btrfs_multi_bio **multi_ret,
2636			     int mirror_num, struct page *unplug_page)
2637{
2638	struct extent_map *em;
2639	struct map_lookup *map;
2640	struct extent_map_tree *em_tree = &map_tree->map_tree;
2641	u64 offset;
2642	u64 stripe_offset;
2643	u64 stripe_nr;
2644	int stripes_allocated = 8;
2645	int stripes_required = 1;
2646	int stripe_index;
2647	int i;
2648	int num_stripes;
2649	int max_errors = 0;
2650	struct btrfs_multi_bio *multi = NULL;
2651
2652	if (multi_ret && !(rw & REQ_WRITE))
2653		stripes_allocated = 1;
2654again:
2655	if (multi_ret) {
2656		multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2657				GFP_NOFS);
2658		if (!multi)
2659			return -ENOMEM;
2660
2661		atomic_set(&multi->error, 0);
2662	}
2663
2664	read_lock(&em_tree->lock);
2665	em = lookup_extent_mapping(em_tree, logical, *length);
2666	read_unlock(&em_tree->lock);
2667
2668	if (!em && unplug_page) {
2669		kfree(multi);
2670		return 0;
2671	}
2672
2673	if (!em) {
2674		printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2675		       (unsigned long long)logical,
2676		       (unsigned long long)*length);
2677		BUG();
2678	}
2679
2680	BUG_ON(em->start > logical || em->start + em->len < logical);
2681	map = (struct map_lookup *)em->bdev;
2682	offset = logical - em->start;
2683
2684	if (mirror_num > map->num_stripes)
2685		mirror_num = 0;
2686
2687	/* if our multi bio struct is too small, back off and try again */
2688	if (rw & REQ_WRITE) {
2689		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2690				 BTRFS_BLOCK_GROUP_DUP)) {
2691			stripes_required = map->num_stripes;
2692			max_errors = 1;
2693		} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2694			stripes_required = map->sub_stripes;
2695			max_errors = 1;
2696		}
2697	}
2698	if (multi_ret && (rw & REQ_WRITE) &&
2699	    stripes_allocated < stripes_required) {
2700		stripes_allocated = map->num_stripes;
2701		free_extent_map(em);
2702		kfree(multi);
2703		goto again;
2704	}
2705	stripe_nr = offset;
2706	/*
2707	 * stripe_nr counts the total number of stripes we have to stride
2708	 * to get to this block
2709	 */
2710	do_div(stripe_nr, map->stripe_len);
2711
2712	stripe_offset = stripe_nr * map->stripe_len;
2713	BUG_ON(offset < stripe_offset);
2714
2715	/* stripe_offset is the offset of this block in its stripe*/
2716	stripe_offset = offset - stripe_offset;
2717
2718	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2719			 BTRFS_BLOCK_GROUP_RAID10 |
2720			 BTRFS_BLOCK_GROUP_DUP)) {
2721		/* we limit the length of each bio to what fits in a stripe */
2722		*length = min_t(u64, em->len - offset,
2723			      map->stripe_len - stripe_offset);
2724	} else {
2725		*length = em->len - offset;
2726	}
2727
2728	if (!multi_ret && !unplug_page)
2729		goto out;
2730
2731	num_stripes = 1;
2732	stripe_index = 0;
2733	if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2734		if (unplug_page || (rw & REQ_WRITE))
2735			num_stripes = map->num_stripes;
2736		else if (mirror_num)
2737			stripe_index = mirror_num - 1;
2738		else {
2739			stripe_index = find_live_mirror(map, 0,
2740					    map->num_stripes,
2741					    current->pid % map->num_stripes);
2742		}
2743
2744	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2745		if (rw & REQ_WRITE)
2746			num_stripes = map->num_stripes;
2747		else if (mirror_num)
2748			stripe_index = mirror_num - 1;
2749
2750	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2751		int factor = map->num_stripes / map->sub_stripes;
2752
2753		stripe_index = do_div(stripe_nr, factor);
2754		stripe_index *= map->sub_stripes;
2755
2756		if (unplug_page || (rw & REQ_WRITE))
2757			num_stripes = map->sub_stripes;
2758		else if (mirror_num)
2759			stripe_index += mirror_num - 1;
2760		else {
2761			stripe_index = find_live_mirror(map, stripe_index,
2762					      map->sub_stripes, stripe_index +
2763					      current->pid % map->sub_stripes);
2764		}
2765	} else {
2766		/*
2767		 * after this do_div call, stripe_nr is the number of stripes
2768		 * on this device we have to walk to find the data, and
2769		 * stripe_index is the number of our device in the stripe array
2770		 */
2771		stripe_index = do_div(stripe_nr, map->num_stripes);
2772	}
2773	BUG_ON(stripe_index >= map->num_stripes);
2774
2775	for (i = 0; i < num_stripes; i++) {
2776		if (unplug_page) {
2777			struct btrfs_device *device;
2778			struct backing_dev_info *bdi;
2779
2780			device = map->stripes[stripe_index].dev;
2781			if (device->bdev) {
2782				bdi = blk_get_backing_dev_info(device->bdev);
2783				if (bdi->unplug_io_fn)
2784					bdi->unplug_io_fn(bdi, unplug_page);
2785			}
2786		} else {
2787			multi->stripes[i].physical =
2788				map->stripes[stripe_index].physical +
2789				stripe_offset + stripe_nr * map->stripe_len;
2790			multi->stripes[i].dev = map->stripes[stripe_index].dev;
2791		}
2792		stripe_index++;
2793	}
2794	if (multi_ret) {
2795		*multi_ret = multi;
2796		multi->num_stripes = num_stripes;
2797		multi->max_errors = max_errors;
2798	}
2799out:
2800	free_extent_map(em);
2801	return 0;
2802}
2803
2804int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2805		      u64 logical, u64 *length,
2806		      struct btrfs_multi_bio **multi_ret, int mirror_num)
2807{
2808	return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2809				 mirror_num, NULL);
2810}
2811
2812int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2813		     u64 chunk_start, u64 physical, u64 devid,
2814		     u64 **logical, int *naddrs, int *stripe_len)
2815{
2816	struct extent_map_tree *em_tree = &map_tree->map_tree;
2817	struct extent_map *em;
2818	struct map_lookup *map;
2819	u64 *buf;
2820	u64 bytenr;
2821	u64 length;
2822	u64 stripe_nr;
2823	int i, j, nr = 0;
2824
2825	read_lock(&em_tree->lock);
2826	em = lookup_extent_mapping(em_tree, chunk_start, 1);
2827	read_unlock(&em_tree->lock);
2828
2829	BUG_ON(!em || em->start != chunk_start);
2830	map = (struct map_lookup *)em->bdev;
2831
2832	length = em->len;
2833	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2834		do_div(length, map->num_stripes / map->sub_stripes);
2835	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2836		do_div(length, map->num_stripes);
2837
2838	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2839	BUG_ON(!buf);
2840
2841	for (i = 0; i < map->num_stripes; i++) {
2842		if (devid && map->stripes[i].dev->devid != devid)
2843			continue;
2844		if (map->stripes[i].physical > physical ||
2845		    map->stripes[i].physical + length <= physical)
2846			continue;
2847
2848		stripe_nr = physical - map->stripes[i].physical;
2849		do_div(stripe_nr, map->stripe_len);
2850
2851		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2852			stripe_nr = stripe_nr * map->num_stripes + i;
2853			do_div(stripe_nr, map->sub_stripes);
2854		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2855			stripe_nr = stripe_nr * map->num_stripes + i;
2856		}
2857		bytenr = chunk_start + stripe_nr * map->stripe_len;
2858		WARN_ON(nr >= map->num_stripes);
2859		for (j = 0; j < nr; j++) {
2860			if (buf[j] == bytenr)
2861				break;
2862		}
2863		if (j == nr) {
2864			WARN_ON(nr >= map->num_stripes);
2865			buf[nr++] = bytenr;
2866		}
2867	}
2868
2869	*logical = buf;
2870	*naddrs = nr;
2871	*stripe_len = map->stripe_len;
2872
2873	free_extent_map(em);
2874	return 0;
2875}
2876
2877int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2878		      u64 logical, struct page *page)
2879{
2880	u64 length = PAGE_CACHE_SIZE;
2881	return __btrfs_map_block(map_tree, READ, logical, &length,
2882				 NULL, 0, page);
2883}
2884
2885static void end_bio_multi_stripe(struct bio *bio, int err)
2886{
2887	struct btrfs_multi_bio *multi = bio->bi_private;
2888	int is_orig_bio = 0;
2889
2890	if (err)
2891		atomic_inc(&multi->error);
2892
2893	if (bio == multi->orig_bio)
2894		is_orig_bio = 1;
2895
2896	if (atomic_dec_and_test(&multi->stripes_pending)) {
2897		if (!is_orig_bio) {
2898			bio_put(bio);
2899			bio = multi->orig_bio;
2900		}
2901		bio->bi_private = multi->private;
2902		bio->bi_end_io = multi->end_io;
2903		/* only send an error to the higher layers if it is
2904		 * beyond the tolerance of the multi-bio
2905		 */
2906		if (atomic_read(&multi->error) > multi->max_errors) {
2907			err = -EIO;
2908		} else if (err) {
2909			/*
2910			 * this bio is actually up to date, we didn't
2911			 * go over the max number of errors
2912			 */
2913			set_bit(BIO_UPTODATE, &bio->bi_flags);
2914			err = 0;
2915		}
2916		kfree(multi);
2917
2918		bio_endio(bio, err);
2919	} else if (!is_orig_bio) {
2920		bio_put(bio);
2921	}
2922}
2923
2924struct async_sched {
2925	struct bio *bio;
2926	int rw;
2927	struct btrfs_fs_info *info;
2928	struct btrfs_work work;
2929};
2930
2931/*
2932 * see run_scheduled_bios for a description of why bios are collected for
2933 * async submit.
2934 *
2935 * This will add one bio to the pending list for a device and make sure
2936 * the work struct is scheduled.
2937 */
2938static noinline int schedule_bio(struct btrfs_root *root,
2939				 struct btrfs_device *device,
2940				 int rw, struct bio *bio)
2941{
2942	int should_queue = 1;
2943	struct btrfs_pending_bios *pending_bios;
2944
2945	/* don't bother with additional async steps for reads, right now */
2946	if (!(rw & REQ_WRITE)) {
2947		bio_get(bio);
2948		submit_bio(rw, bio);
2949		bio_put(bio);
2950		return 0;
2951	}
2952
2953	/*
2954	 * nr_async_bios allows us to reliably return congestion to the
2955	 * higher layers.  Otherwise, the async bio makes it appear we have
2956	 * made progress against dirty pages when we've really just put it
2957	 * on a queue for later
2958	 */
2959	atomic_inc(&root->fs_info->nr_async_bios);
2960	WARN_ON(bio->bi_next);
2961	bio->bi_next = NULL;
2962	bio->bi_rw |= rw;
2963
2964	spin_lock(&device->io_lock);
2965	if (bio->bi_rw & REQ_SYNC)
2966		pending_bios = &device->pending_sync_bios;
2967	else
2968		pending_bios = &device->pending_bios;
2969
2970	if (pending_bios->tail)
2971		pending_bios->tail->bi_next = bio;
2972
2973	pending_bios->tail = bio;
2974	if (!pending_bios->head)
2975		pending_bios->head = bio;
2976	if (device->running_pending)
2977		should_queue = 0;
2978
2979	spin_unlock(&device->io_lock);
2980
2981	if (should_queue)
2982		btrfs_queue_worker(&root->fs_info->submit_workers,
2983				   &device->work);
2984	return 0;
2985}
2986
2987int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2988		  int mirror_num, int async_submit)
2989{
2990	struct btrfs_mapping_tree *map_tree;
2991	struct btrfs_device *dev;
2992	struct bio *first_bio = bio;
2993	u64 logical = (u64)bio->bi_sector << 9;
2994	u64 length = 0;
2995	u64 map_length;
2996	struct btrfs_multi_bio *multi = NULL;
2997	int ret;
2998	int dev_nr = 0;
2999	int total_devs = 1;
3000
3001	length = bio->bi_size;
3002	map_tree = &root->fs_info->mapping_tree;
3003	map_length = length;
3004
3005	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
3006			      mirror_num);
3007	BUG_ON(ret);
3008
3009	total_devs = multi->num_stripes;
3010	if (map_length < length) {
3011		printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3012		       "len %llu\n", (unsigned long long)logical,
3013		       (unsigned long long)length,
3014		       (unsigned long long)map_length);
3015		BUG();
3016	}
3017	multi->end_io = first_bio->bi_end_io;
3018	multi->private = first_bio->bi_private;
3019	multi->orig_bio = first_bio;
3020	atomic_set(&multi->stripes_pending, multi->num_stripes);
3021
3022	while (dev_nr < total_devs) {
3023		if (total_devs > 1) {
3024			if (dev_nr < total_devs - 1) {
3025				bio = bio_clone(first_bio, GFP_NOFS);
3026				BUG_ON(!bio);
3027			} else {
3028				bio = first_bio;
3029			}
3030			bio->bi_private = multi;
3031			bio->bi_end_io = end_bio_multi_stripe;
3032		}
3033		bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3034		dev = multi->stripes[dev_nr].dev;
3035		BUG_ON(rw == WRITE && !dev->writeable);
3036		if (dev && dev->bdev) {
3037			bio->bi_bdev = dev->bdev;
3038			if (async_submit)
3039				schedule_bio(root, dev, rw, bio);
3040			else
3041				submit_bio(rw, bio);
3042		} else {
3043			bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3044			bio->bi_sector = logical >> 9;
3045			bio_endio(bio, -EIO);
3046		}
3047		dev_nr++;
3048	}
3049	if (total_devs == 1)
3050		kfree(multi);
3051	return 0;
3052}
3053
3054struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3055				       u8 *uuid, u8 *fsid)
3056{
3057	struct btrfs_device *device;
3058	struct btrfs_fs_devices *cur_devices;
3059
3060	cur_devices = root->fs_info->fs_devices;
3061	while (cur_devices) {
3062		if (!fsid ||
3063		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3064			device = __find_device(&cur_devices->devices,
3065					       devid, uuid);
3066			if (device)
3067				return device;
3068		}
3069		cur_devices = cur_devices->seed;
3070	}
3071	return NULL;
3072}
3073
3074static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3075					    u64 devid, u8 *dev_uuid)
3076{
3077	struct btrfs_device *device;
3078	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3079
3080	device = kzalloc(sizeof(*device), GFP_NOFS);
3081	if (!device)
3082		return NULL;
3083	list_add(&device->dev_list,
3084		 &fs_devices->devices);
3085	device->barriers = 1;
3086	device->dev_root = root->fs_info->dev_root;
3087	device->devid = devid;
3088	device->work.func = pending_bios_fn;
3089	device->fs_devices = fs_devices;
3090	fs_devices->num_devices++;
3091	spin_lock_init(&device->io_lock);
3092	INIT_LIST_HEAD(&device->dev_alloc_list);
3093	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3094	return device;
3095}
3096
3097static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3098			  struct extent_buffer *leaf,
3099			  struct btrfs_chunk *chunk)
3100{
3101	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3102	struct map_lookup *map;
3103	struct extent_map *em;
3104	u64 logical;
3105	u64 length;
3106	u64 devid;
3107	u8 uuid[BTRFS_UUID_SIZE];
3108	int num_stripes;
3109	int ret;
3110	int i;
3111
3112	logical = key->offset;
3113	length = btrfs_chunk_length(leaf, chunk);
3114
3115	read_lock(&map_tree->map_tree.lock);
3116	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3117	read_unlock(&map_tree->map_tree.lock);
3118
3119	/* already mapped? */
3120	if (em && em->start <= logical && em->start + em->len > logical) {
3121		free_extent_map(em);
3122		return 0;
3123	} else if (em) {
3124		free_extent_map(em);
3125	}
3126
3127	em = alloc_extent_map(GFP_NOFS);
3128	if (!em)
3129		return -ENOMEM;
3130	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3131	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3132	if (!map) {
3133		free_extent_map(em);
3134		return -ENOMEM;
3135	}
3136
3137	em->bdev = (struct block_device *)map;
3138	em->start = logical;
3139	em->len = length;
3140	em->block_start = 0;
3141	em->block_len = em->len;
3142
3143	map->num_stripes = num_stripes;
3144	map->io_width = btrfs_chunk_io_width(leaf, chunk);
3145	map->io_align = btrfs_chunk_io_align(leaf, chunk);
3146	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3147	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3148	map->type = btrfs_chunk_type(leaf, chunk);
3149	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3150	for (i = 0; i < num_stripes; i++) {
3151		map->stripes[i].physical =
3152			btrfs_stripe_offset_nr(leaf, chunk, i);
3153		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3154		read_extent_buffer(leaf, uuid, (unsigned long)
3155				   btrfs_stripe_dev_uuid_nr(chunk, i),
3156				   BTRFS_UUID_SIZE);
3157		map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3158							NULL);
3159		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3160			kfree(map);
3161			free_extent_map(em);
3162			return -EIO;
3163		}
3164		if (!map->stripes[i].dev) {
3165			map->stripes[i].dev =
3166				add_missing_dev(root, devid, uuid);
3167			if (!map->stripes[i].dev) {
3168				kfree(map);
3169				free_extent_map(em);
3170				return -EIO;
3171			}
3172		}
3173		map->stripes[i].dev->in_fs_metadata = 1;
3174	}
3175
3176	write_lock(&map_tree->map_tree.lock);
3177	ret = add_extent_mapping(&map_tree->map_tree, em);
3178	write_unlock(&map_tree->map_tree.lock);
3179	BUG_ON(ret);
3180	free_extent_map(em);
3181
3182	return 0;
3183}
3184
3185static int fill_device_from_item(struct extent_buffer *leaf,
3186				 struct btrfs_dev_item *dev_item,
3187				 struct btrfs_device *device)
3188{
3189	unsigned long ptr;
3190
3191	device->devid = btrfs_device_id(leaf, dev_item);
3192	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3193	device->total_bytes = device->disk_total_bytes;
3194	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3195	device->type = btrfs_device_type(leaf, dev_item);
3196	device->io_align = btrfs_device_io_align(leaf, dev_item);
3197	device->io_width = btrfs_device_io_width(leaf, dev_item);
3198	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3199
3200	ptr = (unsigned long)btrfs_device_uuid(dev_item);
3201	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3202
3203	return 0;
3204}
3205
3206static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3207{
3208	struct btrfs_fs_devices *fs_devices;
3209	int ret;
3210
3211	mutex_lock(&uuid_mutex);
3212
3213	fs_devices = root->fs_info->fs_devices->seed;
3214	while (fs_devices) {
3215		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3216			ret = 0;
3217			goto out;
3218		}
3219		fs_devices = fs_devices->seed;
3220	}
3221
3222	fs_devices = find_fsid(fsid);
3223	if (!fs_devices) {
3224		ret = -ENOENT;
3225		goto out;
3226	}
3227
3228	fs_devices = clone_fs_devices(fs_devices);
3229	if (IS_ERR(fs_devices)) {
3230		ret = PTR_ERR(fs_devices);
3231		goto out;
3232	}
3233
3234	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3235				   root->fs_info->bdev_holder);
3236	if (ret)
3237		goto out;
3238
3239	if (!fs_devices->seeding) {
3240		__btrfs_close_devices(fs_devices);
3241		free_fs_devices(fs_devices);
3242		ret = -EINVAL;
3243		goto out;
3244	}
3245
3246	fs_devices->seed = root->fs_info->fs_devices->seed;
3247	root->fs_info->fs_devices->seed = fs_devices;
3248out:
3249	mutex_unlock(&uuid_mutex);
3250	return ret;
3251}
3252
3253static int read_one_dev(struct btrfs_root *root,
3254			struct extent_buffer *leaf,
3255			struct btrfs_dev_item *dev_item)
3256{
3257	struct btrfs_device *device;
3258	u64 devid;
3259	int ret;
3260	u8 fs_uuid[BTRFS_UUID_SIZE];
3261	u8 dev_uuid[BTRFS_UUID_SIZE];
3262
3263	devid = btrfs_device_id(leaf, dev_item);
3264	read_extent_buffer(leaf, dev_uuid,
3265			   (unsigned long)btrfs_device_uuid(dev_item),
3266			   BTRFS_UUID_SIZE);
3267	read_extent_buffer(leaf, fs_uuid,
3268			   (unsigned long)btrfs_device_fsid(dev_item),
3269			   BTRFS_UUID_SIZE);
3270
3271	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3272		ret = open_seed_devices(root, fs_uuid);
3273		if (ret && !btrfs_test_opt(root, DEGRADED))
3274			return ret;
3275	}
3276
3277	device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3278	if (!device || !device->bdev) {
3279		if (!btrfs_test_opt(root, DEGRADED))
3280			return -EIO;
3281
3282		if (!device) {
3283			printk(KERN_WARNING "warning devid %llu missing\n",
3284			       (unsigned long long)devid);
3285			device = add_missing_dev(root, devid, dev_uuid);
3286			if (!device)
3287				return -ENOMEM;
3288		}
3289	}
3290
3291	if (device->fs_devices != root->fs_info->fs_devices) {
3292		BUG_ON(device->writeable);
3293		if (device->generation !=
3294		    btrfs_device_generation(leaf, dev_item))
3295			return -EINVAL;
3296	}
3297
3298	fill_device_from_item(leaf, dev_item, device);
3299	device->dev_root = root->fs_info->dev_root;
3300	device->in_fs_metadata = 1;
3301	if (device->writeable)
3302		device->fs_devices->total_rw_bytes += device->total_bytes;
3303	ret = 0;
3304	return ret;
3305}
3306
3307int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3308{
3309	struct btrfs_dev_item *dev_item;
3310
3311	dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3312						     dev_item);
3313	return read_one_dev(root, buf, dev_item);
3314}
3315
3316int btrfs_read_sys_array(struct btrfs_root *root)
3317{
3318	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3319	struct extent_buffer *sb;
3320	struct btrfs_disk_key *disk_key;
3321	struct btrfs_chunk *chunk;
3322	u8 *ptr;
3323	unsigned long sb_ptr;
3324	int ret = 0;
3325	u32 num_stripes;
3326	u32 array_size;
3327	u32 len = 0;
3328	u32 cur;
3329	struct btrfs_key key;
3330
3331	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3332					  BTRFS_SUPER_INFO_SIZE);
3333	if (!sb)
3334		return -ENOMEM;
3335	btrfs_set_buffer_uptodate(sb);
3336	btrfs_set_buffer_lockdep_class(sb, 0);
3337
3338	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3339	array_size = btrfs_super_sys_array_size(super_copy);
3340
3341	ptr = super_copy->sys_chunk_array;
3342	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3343	cur = 0;
3344
3345	while (cur < array_size) {
3346		disk_key = (struct btrfs_disk_key *)ptr;
3347		btrfs_disk_key_to_cpu(&key, disk_key);
3348
3349		len = sizeof(*disk_key); ptr += len;
3350		sb_ptr += len;
3351		cur += len;
3352
3353		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3354			chunk = (struct btrfs_chunk *)sb_ptr;
3355			ret = read_one_chunk(root, &key, sb, chunk);
3356			if (ret)
3357				break;
3358			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3359			len = btrfs_chunk_item_size(num_stripes);
3360		} else {
3361			ret = -EIO;
3362			break;
3363		}
3364		ptr += len;
3365		sb_ptr += len;
3366		cur += len;
3367	}
3368	free_extent_buffer(sb);
3369	return ret;
3370}
3371
3372int btrfs_read_chunk_tree(struct btrfs_root *root)
3373{
3374	struct btrfs_path *path;
3375	struct extent_buffer *leaf;
3376	struct btrfs_key key;
3377	struct btrfs_key found_key;
3378	int ret;
3379	int slot;
3380
3381	root = root->fs_info->chunk_root;
3382
3383	path = btrfs_alloc_path();
3384	if (!path)
3385		return -ENOMEM;
3386
3387	/* first we search for all of the device items, and then we
3388	 * read in all of the chunk items.  This way we can create chunk
3389	 * mappings that reference all of the devices that are afound
3390	 */
3391	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3392	key.offset = 0;
3393	key.type = 0;
3394again:
3395	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3396	if (ret < 0)
3397		goto error;
3398	while (1) {
3399		leaf = path->nodes[0];
3400		slot = path->slots[0];
3401		if (slot >= btrfs_header_nritems(leaf)) {
3402			ret = btrfs_next_leaf(root, path);
3403			if (ret == 0)
3404				continue;
3405			if (ret < 0)
3406				goto error;
3407			break;
3408		}
3409		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3410		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3411			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3412				break;
3413			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3414				struct btrfs_dev_item *dev_item;
3415				dev_item = btrfs_item_ptr(leaf, slot,
3416						  struct btrfs_dev_item);
3417				ret = read_one_dev(root, leaf, dev_item);
3418				if (ret)
3419					goto error;
3420			}
3421		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3422			struct btrfs_chunk *chunk;
3423			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3424			ret = read_one_chunk(root, &found_key, leaf, chunk);
3425			if (ret)
3426				goto error;
3427		}
3428		path->slots[0]++;
3429	}
3430	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3431		key.objectid = 0;
3432		btrfs_release_path(root, path);
3433		goto again;
3434	}
3435	ret = 0;
3436error:
3437	btrfs_free_path(path);
3438	return ret;
3439}
3440