extent_io.c revision a129ffb8
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include <linux/prefetch.h>
15#include <linux/cleancache.h>
16#include "misc.h"
17#include "extent_io.h"
18#include "extent-io-tree.h"
19#include "extent_map.h"
20#include "ctree.h"
21#include "btrfs_inode.h"
22#include "volumes.h"
23#include "check-integrity.h"
24#include "locking.h"
25#include "rcu-string.h"
26#include "backref.h"
27#include "disk-io.h"
28#include "subpage.h"
29#include "zoned.h"
30#include "block-group.h"
31
32static struct kmem_cache *extent_state_cache;
33static struct kmem_cache *extent_buffer_cache;
34static struct bio_set btrfs_bioset;
35
36static inline bool extent_state_in_tree(const struct extent_state *state)
37{
38	return !RB_EMPTY_NODE(&state->rb_node);
39}
40
41#ifdef CONFIG_BTRFS_DEBUG
42static LIST_HEAD(states);
43static DEFINE_SPINLOCK(leak_lock);
44
45static inline void btrfs_leak_debug_add(spinlock_t *lock,
46					struct list_head *new,
47					struct list_head *head)
48{
49	unsigned long flags;
50
51	spin_lock_irqsave(lock, flags);
52	list_add(new, head);
53	spin_unlock_irqrestore(lock, flags);
54}
55
56static inline void btrfs_leak_debug_del(spinlock_t *lock,
57					struct list_head *entry)
58{
59	unsigned long flags;
60
61	spin_lock_irqsave(lock, flags);
62	list_del(entry);
63	spin_unlock_irqrestore(lock, flags);
64}
65
66void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
67{
68	struct extent_buffer *eb;
69	unsigned long flags;
70
71	/*
72	 * If we didn't get into open_ctree our allocated_ebs will not be
73	 * initialized, so just skip this.
74	 */
75	if (!fs_info->allocated_ebs.next)
76		return;
77
78	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
79	while (!list_empty(&fs_info->allocated_ebs)) {
80		eb = list_first_entry(&fs_info->allocated_ebs,
81				      struct extent_buffer, leak_list);
82		pr_err(
83	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
84		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
85		       btrfs_header_owner(eb));
86		list_del(&eb->leak_list);
87		kmem_cache_free(extent_buffer_cache, eb);
88	}
89	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
90}
91
92static inline void btrfs_extent_state_leak_debug_check(void)
93{
94	struct extent_state *state;
95
96	while (!list_empty(&states)) {
97		state = list_entry(states.next, struct extent_state, leak_list);
98		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
99		       state->start, state->end, state->state,
100		       extent_state_in_tree(state),
101		       refcount_read(&state->refs));
102		list_del(&state->leak_list);
103		kmem_cache_free(extent_state_cache, state);
104	}
105}
106
107#define btrfs_debug_check_extent_io_range(tree, start, end)		\
108	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
109static inline void __btrfs_debug_check_extent_io_range(const char *caller,
110		struct extent_io_tree *tree, u64 start, u64 end)
111{
112	struct inode *inode = tree->private_data;
113	u64 isize;
114
115	if (!inode || !is_data_inode(inode))
116		return;
117
118	isize = i_size_read(inode);
119	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
120		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
121		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
122			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
123	}
124}
125#else
126#define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
127#define btrfs_leak_debug_del(lock, entry)	do {} while (0)
128#define btrfs_extent_state_leak_debug_check()	do {} while (0)
129#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
130#endif
131
132struct tree_entry {
133	u64 start;
134	u64 end;
135	struct rb_node rb_node;
136};
137
138struct extent_page_data {
139	struct btrfs_bio_ctrl bio_ctrl;
140	/* tells writepage not to lock the state bits for this range
141	 * it still does the unlocking
142	 */
143	unsigned int extent_locked:1;
144
145	/* tells the submit_bio code to use REQ_SYNC */
146	unsigned int sync_io:1;
147};
148
149static int add_extent_changeset(struct extent_state *state, u32 bits,
150				 struct extent_changeset *changeset,
151				 int set)
152{
153	int ret;
154
155	if (!changeset)
156		return 0;
157	if (set && (state->state & bits) == bits)
158		return 0;
159	if (!set && (state->state & bits) == 0)
160		return 0;
161	changeset->bytes_changed += state->end - state->start + 1;
162	ret = ulist_add(&changeset->range_changed, state->start, state->end,
163			GFP_ATOMIC);
164	return ret;
165}
166
167int __must_check submit_one_bio(struct bio *bio, int mirror_num,
168				unsigned long bio_flags)
169{
170	blk_status_t ret = 0;
171	struct extent_io_tree *tree = bio->bi_private;
172
173	bio->bi_private = NULL;
174
175	if (is_data_inode(tree->private_data))
176		ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
177					    bio_flags);
178	else
179		ret = btrfs_submit_metadata_bio(tree->private_data, bio,
180						mirror_num, bio_flags);
181
182	return blk_status_to_errno(ret);
183}
184
185/* Cleanup unsubmitted bios */
186static void end_write_bio(struct extent_page_data *epd, int ret)
187{
188	struct bio *bio = epd->bio_ctrl.bio;
189
190	if (bio) {
191		bio->bi_status = errno_to_blk_status(ret);
192		bio_endio(bio);
193		epd->bio_ctrl.bio = NULL;
194	}
195}
196
197/*
198 * Submit bio from extent page data via submit_one_bio
199 *
200 * Return 0 if everything is OK.
201 * Return <0 for error.
202 */
203static int __must_check flush_write_bio(struct extent_page_data *epd)
204{
205	int ret = 0;
206	struct bio *bio = epd->bio_ctrl.bio;
207
208	if (bio) {
209		ret = submit_one_bio(bio, 0, 0);
210		/*
211		 * Clean up of epd->bio is handled by its endio function.
212		 * And endio is either triggered by successful bio execution
213		 * or the error handler of submit bio hook.
214		 * So at this point, no matter what happened, we don't need
215		 * to clean up epd->bio.
216		 */
217		epd->bio_ctrl.bio = NULL;
218	}
219	return ret;
220}
221
222int __init extent_state_cache_init(void)
223{
224	extent_state_cache = kmem_cache_create("btrfs_extent_state",
225			sizeof(struct extent_state), 0,
226			SLAB_MEM_SPREAD, NULL);
227	if (!extent_state_cache)
228		return -ENOMEM;
229	return 0;
230}
231
232int __init extent_io_init(void)
233{
234	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
235			sizeof(struct extent_buffer), 0,
236			SLAB_MEM_SPREAD, NULL);
237	if (!extent_buffer_cache)
238		return -ENOMEM;
239
240	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
241			offsetof(struct btrfs_io_bio, bio),
242			BIOSET_NEED_BVECS))
243		goto free_buffer_cache;
244
245	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
246		goto free_bioset;
247
248	return 0;
249
250free_bioset:
251	bioset_exit(&btrfs_bioset);
252
253free_buffer_cache:
254	kmem_cache_destroy(extent_buffer_cache);
255	extent_buffer_cache = NULL;
256	return -ENOMEM;
257}
258
259void __cold extent_state_cache_exit(void)
260{
261	btrfs_extent_state_leak_debug_check();
262	kmem_cache_destroy(extent_state_cache);
263}
264
265void __cold extent_io_exit(void)
266{
267	/*
268	 * Make sure all delayed rcu free are flushed before we
269	 * destroy caches.
270	 */
271	rcu_barrier();
272	kmem_cache_destroy(extent_buffer_cache);
273	bioset_exit(&btrfs_bioset);
274}
275
276/*
277 * For the file_extent_tree, we want to hold the inode lock when we lookup and
278 * update the disk_i_size, but lockdep will complain because our io_tree we hold
279 * the tree lock and get the inode lock when setting delalloc.  These two things
280 * are unrelated, so make a class for the file_extent_tree so we don't get the
281 * two locking patterns mixed up.
282 */
283static struct lock_class_key file_extent_tree_class;
284
285void extent_io_tree_init(struct btrfs_fs_info *fs_info,
286			 struct extent_io_tree *tree, unsigned int owner,
287			 void *private_data)
288{
289	tree->fs_info = fs_info;
290	tree->state = RB_ROOT;
291	tree->dirty_bytes = 0;
292	spin_lock_init(&tree->lock);
293	tree->private_data = private_data;
294	tree->owner = owner;
295	if (owner == IO_TREE_INODE_FILE_EXTENT)
296		lockdep_set_class(&tree->lock, &file_extent_tree_class);
297}
298
299void extent_io_tree_release(struct extent_io_tree *tree)
300{
301	spin_lock(&tree->lock);
302	/*
303	 * Do a single barrier for the waitqueue_active check here, the state
304	 * of the waitqueue should not change once extent_io_tree_release is
305	 * called.
306	 */
307	smp_mb();
308	while (!RB_EMPTY_ROOT(&tree->state)) {
309		struct rb_node *node;
310		struct extent_state *state;
311
312		node = rb_first(&tree->state);
313		state = rb_entry(node, struct extent_state, rb_node);
314		rb_erase(&state->rb_node, &tree->state);
315		RB_CLEAR_NODE(&state->rb_node);
316		/*
317		 * btree io trees aren't supposed to have tasks waiting for
318		 * changes in the flags of extent states ever.
319		 */
320		ASSERT(!waitqueue_active(&state->wq));
321		free_extent_state(state);
322
323		cond_resched_lock(&tree->lock);
324	}
325	spin_unlock(&tree->lock);
326}
327
328static struct extent_state *alloc_extent_state(gfp_t mask)
329{
330	struct extent_state *state;
331
332	/*
333	 * The given mask might be not appropriate for the slab allocator,
334	 * drop the unsupported bits
335	 */
336	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
337	state = kmem_cache_alloc(extent_state_cache, mask);
338	if (!state)
339		return state;
340	state->state = 0;
341	state->failrec = NULL;
342	RB_CLEAR_NODE(&state->rb_node);
343	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
344	refcount_set(&state->refs, 1);
345	init_waitqueue_head(&state->wq);
346	trace_alloc_extent_state(state, mask, _RET_IP_);
347	return state;
348}
349
350void free_extent_state(struct extent_state *state)
351{
352	if (!state)
353		return;
354	if (refcount_dec_and_test(&state->refs)) {
355		WARN_ON(extent_state_in_tree(state));
356		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
357		trace_free_extent_state(state, _RET_IP_);
358		kmem_cache_free(extent_state_cache, state);
359	}
360}
361
362static struct rb_node *tree_insert(struct rb_root *root,
363				   struct rb_node *search_start,
364				   u64 offset,
365				   struct rb_node *node,
366				   struct rb_node ***p_in,
367				   struct rb_node **parent_in)
368{
369	struct rb_node **p;
370	struct rb_node *parent = NULL;
371	struct tree_entry *entry;
372
373	if (p_in && parent_in) {
374		p = *p_in;
375		parent = *parent_in;
376		goto do_insert;
377	}
378
379	p = search_start ? &search_start : &root->rb_node;
380	while (*p) {
381		parent = *p;
382		entry = rb_entry(parent, struct tree_entry, rb_node);
383
384		if (offset < entry->start)
385			p = &(*p)->rb_left;
386		else if (offset > entry->end)
387			p = &(*p)->rb_right;
388		else
389			return parent;
390	}
391
392do_insert:
393	rb_link_node(node, parent, p);
394	rb_insert_color(node, root);
395	return NULL;
396}
397
398/**
399 * Search @tree for an entry that contains @offset. Such entry would have
400 * entry->start <= offset && entry->end >= offset.
401 *
402 * @tree:       the tree to search
403 * @offset:     offset that should fall within an entry in @tree
404 * @next_ret:   pointer to the first entry whose range ends after @offset
405 * @prev_ret:   pointer to the first entry whose range begins before @offset
406 * @p_ret:      pointer where new node should be anchored (used when inserting an
407 *	        entry in the tree)
408 * @parent_ret: points to entry which would have been the parent of the entry,
409 *               containing @offset
410 *
411 * This function returns a pointer to the entry that contains @offset byte
412 * address. If no such entry exists, then NULL is returned and the other
413 * pointer arguments to the function are filled, otherwise the found entry is
414 * returned and other pointers are left untouched.
415 */
416static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
417				      struct rb_node **next_ret,
418				      struct rb_node **prev_ret,
419				      struct rb_node ***p_ret,
420				      struct rb_node **parent_ret)
421{
422	struct rb_root *root = &tree->state;
423	struct rb_node **n = &root->rb_node;
424	struct rb_node *prev = NULL;
425	struct rb_node *orig_prev = NULL;
426	struct tree_entry *entry;
427	struct tree_entry *prev_entry = NULL;
428
429	while (*n) {
430		prev = *n;
431		entry = rb_entry(prev, struct tree_entry, rb_node);
432		prev_entry = entry;
433
434		if (offset < entry->start)
435			n = &(*n)->rb_left;
436		else if (offset > entry->end)
437			n = &(*n)->rb_right;
438		else
439			return *n;
440	}
441
442	if (p_ret)
443		*p_ret = n;
444	if (parent_ret)
445		*parent_ret = prev;
446
447	if (next_ret) {
448		orig_prev = prev;
449		while (prev && offset > prev_entry->end) {
450			prev = rb_next(prev);
451			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
452		}
453		*next_ret = prev;
454		prev = orig_prev;
455	}
456
457	if (prev_ret) {
458		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
459		while (prev && offset < prev_entry->start) {
460			prev = rb_prev(prev);
461			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
462		}
463		*prev_ret = prev;
464	}
465	return NULL;
466}
467
468static inline struct rb_node *
469tree_search_for_insert(struct extent_io_tree *tree,
470		       u64 offset,
471		       struct rb_node ***p_ret,
472		       struct rb_node **parent_ret)
473{
474	struct rb_node *next= NULL;
475	struct rb_node *ret;
476
477	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
478	if (!ret)
479		return next;
480	return ret;
481}
482
483static inline struct rb_node *tree_search(struct extent_io_tree *tree,
484					  u64 offset)
485{
486	return tree_search_for_insert(tree, offset, NULL, NULL);
487}
488
489/*
490 * utility function to look for merge candidates inside a given range.
491 * Any extents with matching state are merged together into a single
492 * extent in the tree.  Extents with EXTENT_IO in their state field
493 * are not merged because the end_io handlers need to be able to do
494 * operations on them without sleeping (or doing allocations/splits).
495 *
496 * This should be called with the tree lock held.
497 */
498static void merge_state(struct extent_io_tree *tree,
499		        struct extent_state *state)
500{
501	struct extent_state *other;
502	struct rb_node *other_node;
503
504	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
505		return;
506
507	other_node = rb_prev(&state->rb_node);
508	if (other_node) {
509		other = rb_entry(other_node, struct extent_state, rb_node);
510		if (other->end == state->start - 1 &&
511		    other->state == state->state) {
512			if (tree->private_data &&
513			    is_data_inode(tree->private_data))
514				btrfs_merge_delalloc_extent(tree->private_data,
515							    state, other);
516			state->start = other->start;
517			rb_erase(&other->rb_node, &tree->state);
518			RB_CLEAR_NODE(&other->rb_node);
519			free_extent_state(other);
520		}
521	}
522	other_node = rb_next(&state->rb_node);
523	if (other_node) {
524		other = rb_entry(other_node, struct extent_state, rb_node);
525		if (other->start == state->end + 1 &&
526		    other->state == state->state) {
527			if (tree->private_data &&
528			    is_data_inode(tree->private_data))
529				btrfs_merge_delalloc_extent(tree->private_data,
530							    state, other);
531			state->end = other->end;
532			rb_erase(&other->rb_node, &tree->state);
533			RB_CLEAR_NODE(&other->rb_node);
534			free_extent_state(other);
535		}
536	}
537}
538
539static void set_state_bits(struct extent_io_tree *tree,
540			   struct extent_state *state, u32 *bits,
541			   struct extent_changeset *changeset);
542
543/*
544 * insert an extent_state struct into the tree.  'bits' are set on the
545 * struct before it is inserted.
546 *
547 * This may return -EEXIST if the extent is already there, in which case the
548 * state struct is freed.
549 *
550 * The tree lock is not taken internally.  This is a utility function and
551 * probably isn't what you want to call (see set/clear_extent_bit).
552 */
553static int insert_state(struct extent_io_tree *tree,
554			struct extent_state *state, u64 start, u64 end,
555			struct rb_node ***p,
556			struct rb_node **parent,
557			u32 *bits, struct extent_changeset *changeset)
558{
559	struct rb_node *node;
560
561	if (end < start) {
562		btrfs_err(tree->fs_info,
563			"insert state: end < start %llu %llu", end, start);
564		WARN_ON(1);
565	}
566	state->start = start;
567	state->end = end;
568
569	set_state_bits(tree, state, bits, changeset);
570
571	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
572	if (node) {
573		struct extent_state *found;
574		found = rb_entry(node, struct extent_state, rb_node);
575		btrfs_err(tree->fs_info,
576		       "found node %llu %llu on insert of %llu %llu",
577		       found->start, found->end, start, end);
578		return -EEXIST;
579	}
580	merge_state(tree, state);
581	return 0;
582}
583
584/*
585 * split a given extent state struct in two, inserting the preallocated
586 * struct 'prealloc' as the newly created second half.  'split' indicates an
587 * offset inside 'orig' where it should be split.
588 *
589 * Before calling,
590 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
591 * are two extent state structs in the tree:
592 * prealloc: [orig->start, split - 1]
593 * orig: [ split, orig->end ]
594 *
595 * The tree locks are not taken by this function. They need to be held
596 * by the caller.
597 */
598static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
599		       struct extent_state *prealloc, u64 split)
600{
601	struct rb_node *node;
602
603	if (tree->private_data && is_data_inode(tree->private_data))
604		btrfs_split_delalloc_extent(tree->private_data, orig, split);
605
606	prealloc->start = orig->start;
607	prealloc->end = split - 1;
608	prealloc->state = orig->state;
609	orig->start = split;
610
611	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
612			   &prealloc->rb_node, NULL, NULL);
613	if (node) {
614		free_extent_state(prealloc);
615		return -EEXIST;
616	}
617	return 0;
618}
619
620static struct extent_state *next_state(struct extent_state *state)
621{
622	struct rb_node *next = rb_next(&state->rb_node);
623	if (next)
624		return rb_entry(next, struct extent_state, rb_node);
625	else
626		return NULL;
627}
628
629/*
630 * utility function to clear some bits in an extent state struct.
631 * it will optionally wake up anyone waiting on this state (wake == 1).
632 *
633 * If no bits are set on the state struct after clearing things, the
634 * struct is freed and removed from the tree
635 */
636static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
637					    struct extent_state *state,
638					    u32 *bits, int wake,
639					    struct extent_changeset *changeset)
640{
641	struct extent_state *next;
642	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
643	int ret;
644
645	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
646		u64 range = state->end - state->start + 1;
647		WARN_ON(range > tree->dirty_bytes);
648		tree->dirty_bytes -= range;
649	}
650
651	if (tree->private_data && is_data_inode(tree->private_data))
652		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
653
654	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
655	BUG_ON(ret < 0);
656	state->state &= ~bits_to_clear;
657	if (wake)
658		wake_up(&state->wq);
659	if (state->state == 0) {
660		next = next_state(state);
661		if (extent_state_in_tree(state)) {
662			rb_erase(&state->rb_node, &tree->state);
663			RB_CLEAR_NODE(&state->rb_node);
664			free_extent_state(state);
665		} else {
666			WARN_ON(1);
667		}
668	} else {
669		merge_state(tree, state);
670		next = next_state(state);
671	}
672	return next;
673}
674
675static struct extent_state *
676alloc_extent_state_atomic(struct extent_state *prealloc)
677{
678	if (!prealloc)
679		prealloc = alloc_extent_state(GFP_ATOMIC);
680
681	return prealloc;
682}
683
684static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
685{
686	btrfs_panic(tree->fs_info, err,
687	"locking error: extent tree was modified by another thread while locked");
688}
689
690/*
691 * clear some bits on a range in the tree.  This may require splitting
692 * or inserting elements in the tree, so the gfp mask is used to
693 * indicate which allocations or sleeping are allowed.
694 *
695 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
696 * the given range from the tree regardless of state (ie for truncate).
697 *
698 * the range [start, end] is inclusive.
699 *
700 * This takes the tree lock, and returns 0 on success and < 0 on error.
701 */
702int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
703		       u32 bits, int wake, int delete,
704		       struct extent_state **cached_state,
705		       gfp_t mask, struct extent_changeset *changeset)
706{
707	struct extent_state *state;
708	struct extent_state *cached;
709	struct extent_state *prealloc = NULL;
710	struct rb_node *node;
711	u64 last_end;
712	int err;
713	int clear = 0;
714
715	btrfs_debug_check_extent_io_range(tree, start, end);
716	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
717
718	if (bits & EXTENT_DELALLOC)
719		bits |= EXTENT_NORESERVE;
720
721	if (delete)
722		bits |= ~EXTENT_CTLBITS;
723
724	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
725		clear = 1;
726again:
727	if (!prealloc && gfpflags_allow_blocking(mask)) {
728		/*
729		 * Don't care for allocation failure here because we might end
730		 * up not needing the pre-allocated extent state at all, which
731		 * is the case if we only have in the tree extent states that
732		 * cover our input range and don't cover too any other range.
733		 * If we end up needing a new extent state we allocate it later.
734		 */
735		prealloc = alloc_extent_state(mask);
736	}
737
738	spin_lock(&tree->lock);
739	if (cached_state) {
740		cached = *cached_state;
741
742		if (clear) {
743			*cached_state = NULL;
744			cached_state = NULL;
745		}
746
747		if (cached && extent_state_in_tree(cached) &&
748		    cached->start <= start && cached->end > start) {
749			if (clear)
750				refcount_dec(&cached->refs);
751			state = cached;
752			goto hit_next;
753		}
754		if (clear)
755			free_extent_state(cached);
756	}
757	/*
758	 * this search will find the extents that end after
759	 * our range starts
760	 */
761	node = tree_search(tree, start);
762	if (!node)
763		goto out;
764	state = rb_entry(node, struct extent_state, rb_node);
765hit_next:
766	if (state->start > end)
767		goto out;
768	WARN_ON(state->end < start);
769	last_end = state->end;
770
771	/* the state doesn't have the wanted bits, go ahead */
772	if (!(state->state & bits)) {
773		state = next_state(state);
774		goto next;
775	}
776
777	/*
778	 *     | ---- desired range ---- |
779	 *  | state | or
780	 *  | ------------- state -------------- |
781	 *
782	 * We need to split the extent we found, and may flip
783	 * bits on second half.
784	 *
785	 * If the extent we found extends past our range, we
786	 * just split and search again.  It'll get split again
787	 * the next time though.
788	 *
789	 * If the extent we found is inside our range, we clear
790	 * the desired bit on it.
791	 */
792
793	if (state->start < start) {
794		prealloc = alloc_extent_state_atomic(prealloc);
795		BUG_ON(!prealloc);
796		err = split_state(tree, state, prealloc, start);
797		if (err)
798			extent_io_tree_panic(tree, err);
799
800		prealloc = NULL;
801		if (err)
802			goto out;
803		if (state->end <= end) {
804			state = clear_state_bit(tree, state, &bits, wake,
805						changeset);
806			goto next;
807		}
808		goto search_again;
809	}
810	/*
811	 * | ---- desired range ---- |
812	 *                        | state |
813	 * We need to split the extent, and clear the bit
814	 * on the first half
815	 */
816	if (state->start <= end && state->end > end) {
817		prealloc = alloc_extent_state_atomic(prealloc);
818		BUG_ON(!prealloc);
819		err = split_state(tree, state, prealloc, end + 1);
820		if (err)
821			extent_io_tree_panic(tree, err);
822
823		if (wake)
824			wake_up(&state->wq);
825
826		clear_state_bit(tree, prealloc, &bits, wake, changeset);
827
828		prealloc = NULL;
829		goto out;
830	}
831
832	state = clear_state_bit(tree, state, &bits, wake, changeset);
833next:
834	if (last_end == (u64)-1)
835		goto out;
836	start = last_end + 1;
837	if (start <= end && state && !need_resched())
838		goto hit_next;
839
840search_again:
841	if (start > end)
842		goto out;
843	spin_unlock(&tree->lock);
844	if (gfpflags_allow_blocking(mask))
845		cond_resched();
846	goto again;
847
848out:
849	spin_unlock(&tree->lock);
850	if (prealloc)
851		free_extent_state(prealloc);
852
853	return 0;
854
855}
856
857static void wait_on_state(struct extent_io_tree *tree,
858			  struct extent_state *state)
859		__releases(tree->lock)
860		__acquires(tree->lock)
861{
862	DEFINE_WAIT(wait);
863	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
864	spin_unlock(&tree->lock);
865	schedule();
866	spin_lock(&tree->lock);
867	finish_wait(&state->wq, &wait);
868}
869
870/*
871 * waits for one or more bits to clear on a range in the state tree.
872 * The range [start, end] is inclusive.
873 * The tree lock is taken by this function
874 */
875static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
876			    u32 bits)
877{
878	struct extent_state *state;
879	struct rb_node *node;
880
881	btrfs_debug_check_extent_io_range(tree, start, end);
882
883	spin_lock(&tree->lock);
884again:
885	while (1) {
886		/*
887		 * this search will find all the extents that end after
888		 * our range starts
889		 */
890		node = tree_search(tree, start);
891process_node:
892		if (!node)
893			break;
894
895		state = rb_entry(node, struct extent_state, rb_node);
896
897		if (state->start > end)
898			goto out;
899
900		if (state->state & bits) {
901			start = state->start;
902			refcount_inc(&state->refs);
903			wait_on_state(tree, state);
904			free_extent_state(state);
905			goto again;
906		}
907		start = state->end + 1;
908
909		if (start > end)
910			break;
911
912		if (!cond_resched_lock(&tree->lock)) {
913			node = rb_next(node);
914			goto process_node;
915		}
916	}
917out:
918	spin_unlock(&tree->lock);
919}
920
921static void set_state_bits(struct extent_io_tree *tree,
922			   struct extent_state *state,
923			   u32 *bits, struct extent_changeset *changeset)
924{
925	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
926	int ret;
927
928	if (tree->private_data && is_data_inode(tree->private_data))
929		btrfs_set_delalloc_extent(tree->private_data, state, bits);
930
931	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
932		u64 range = state->end - state->start + 1;
933		tree->dirty_bytes += range;
934	}
935	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
936	BUG_ON(ret < 0);
937	state->state |= bits_to_set;
938}
939
940static void cache_state_if_flags(struct extent_state *state,
941				 struct extent_state **cached_ptr,
942				 unsigned flags)
943{
944	if (cached_ptr && !(*cached_ptr)) {
945		if (!flags || (state->state & flags)) {
946			*cached_ptr = state;
947			refcount_inc(&state->refs);
948		}
949	}
950}
951
952static void cache_state(struct extent_state *state,
953			struct extent_state **cached_ptr)
954{
955	return cache_state_if_flags(state, cached_ptr,
956				    EXTENT_LOCKED | EXTENT_BOUNDARY);
957}
958
959/*
960 * set some bits on a range in the tree.  This may require allocations or
961 * sleeping, so the gfp mask is used to indicate what is allowed.
962 *
963 * If any of the exclusive bits are set, this will fail with -EEXIST if some
964 * part of the range already has the desired bits set.  The start of the
965 * existing range is returned in failed_start in this case.
966 *
967 * [start, end] is inclusive This takes the tree lock.
968 */
969int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
970		   u32 exclusive_bits, u64 *failed_start,
971		   struct extent_state **cached_state, gfp_t mask,
972		   struct extent_changeset *changeset)
973{
974	struct extent_state *state;
975	struct extent_state *prealloc = NULL;
976	struct rb_node *node;
977	struct rb_node **p;
978	struct rb_node *parent;
979	int err = 0;
980	u64 last_start;
981	u64 last_end;
982
983	btrfs_debug_check_extent_io_range(tree, start, end);
984	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
985
986	if (exclusive_bits)
987		ASSERT(failed_start);
988	else
989		ASSERT(failed_start == NULL);
990again:
991	if (!prealloc && gfpflags_allow_blocking(mask)) {
992		/*
993		 * Don't care for allocation failure here because we might end
994		 * up not needing the pre-allocated extent state at all, which
995		 * is the case if we only have in the tree extent states that
996		 * cover our input range and don't cover too any other range.
997		 * If we end up needing a new extent state we allocate it later.
998		 */
999		prealloc = alloc_extent_state(mask);
1000	}
1001
1002	spin_lock(&tree->lock);
1003	if (cached_state && *cached_state) {
1004		state = *cached_state;
1005		if (state->start <= start && state->end > start &&
1006		    extent_state_in_tree(state)) {
1007			node = &state->rb_node;
1008			goto hit_next;
1009		}
1010	}
1011	/*
1012	 * this search will find all the extents that end after
1013	 * our range starts.
1014	 */
1015	node = tree_search_for_insert(tree, start, &p, &parent);
1016	if (!node) {
1017		prealloc = alloc_extent_state_atomic(prealloc);
1018		BUG_ON(!prealloc);
1019		err = insert_state(tree, prealloc, start, end,
1020				   &p, &parent, &bits, changeset);
1021		if (err)
1022			extent_io_tree_panic(tree, err);
1023
1024		cache_state(prealloc, cached_state);
1025		prealloc = NULL;
1026		goto out;
1027	}
1028	state = rb_entry(node, struct extent_state, rb_node);
1029hit_next:
1030	last_start = state->start;
1031	last_end = state->end;
1032
1033	/*
1034	 * | ---- desired range ---- |
1035	 * | state |
1036	 *
1037	 * Just lock what we found and keep going
1038	 */
1039	if (state->start == start && state->end <= end) {
1040		if (state->state & exclusive_bits) {
1041			*failed_start = state->start;
1042			err = -EEXIST;
1043			goto out;
1044		}
1045
1046		set_state_bits(tree, state, &bits, changeset);
1047		cache_state(state, cached_state);
1048		merge_state(tree, state);
1049		if (last_end == (u64)-1)
1050			goto out;
1051		start = last_end + 1;
1052		state = next_state(state);
1053		if (start < end && state && state->start == start &&
1054		    !need_resched())
1055			goto hit_next;
1056		goto search_again;
1057	}
1058
1059	/*
1060	 *     | ---- desired range ---- |
1061	 * | state |
1062	 *   or
1063	 * | ------------- state -------------- |
1064	 *
1065	 * We need to split the extent we found, and may flip bits on
1066	 * second half.
1067	 *
1068	 * If the extent we found extends past our
1069	 * range, we just split and search again.  It'll get split
1070	 * again the next time though.
1071	 *
1072	 * If the extent we found is inside our range, we set the
1073	 * desired bit on it.
1074	 */
1075	if (state->start < start) {
1076		if (state->state & exclusive_bits) {
1077			*failed_start = start;
1078			err = -EEXIST;
1079			goto out;
1080		}
1081
1082		/*
1083		 * If this extent already has all the bits we want set, then
1084		 * skip it, not necessary to split it or do anything with it.
1085		 */
1086		if ((state->state & bits) == bits) {
1087			start = state->end + 1;
1088			cache_state(state, cached_state);
1089			goto search_again;
1090		}
1091
1092		prealloc = alloc_extent_state_atomic(prealloc);
1093		BUG_ON(!prealloc);
1094		err = split_state(tree, state, prealloc, start);
1095		if (err)
1096			extent_io_tree_panic(tree, err);
1097
1098		prealloc = NULL;
1099		if (err)
1100			goto out;
1101		if (state->end <= end) {
1102			set_state_bits(tree, state, &bits, changeset);
1103			cache_state(state, cached_state);
1104			merge_state(tree, state);
1105			if (last_end == (u64)-1)
1106				goto out;
1107			start = last_end + 1;
1108			state = next_state(state);
1109			if (start < end && state && state->start == start &&
1110			    !need_resched())
1111				goto hit_next;
1112		}
1113		goto search_again;
1114	}
1115	/*
1116	 * | ---- desired range ---- |
1117	 *     | state | or               | state |
1118	 *
1119	 * There's a hole, we need to insert something in it and
1120	 * ignore the extent we found.
1121	 */
1122	if (state->start > start) {
1123		u64 this_end;
1124		if (end < last_start)
1125			this_end = end;
1126		else
1127			this_end = last_start - 1;
1128
1129		prealloc = alloc_extent_state_atomic(prealloc);
1130		BUG_ON(!prealloc);
1131
1132		/*
1133		 * Avoid to free 'prealloc' if it can be merged with
1134		 * the later extent.
1135		 */
1136		err = insert_state(tree, prealloc, start, this_end,
1137				   NULL, NULL, &bits, changeset);
1138		if (err)
1139			extent_io_tree_panic(tree, err);
1140
1141		cache_state(prealloc, cached_state);
1142		prealloc = NULL;
1143		start = this_end + 1;
1144		goto search_again;
1145	}
1146	/*
1147	 * | ---- desired range ---- |
1148	 *                        | state |
1149	 * We need to split the extent, and set the bit
1150	 * on the first half
1151	 */
1152	if (state->start <= end && state->end > end) {
1153		if (state->state & exclusive_bits) {
1154			*failed_start = start;
1155			err = -EEXIST;
1156			goto out;
1157		}
1158
1159		prealloc = alloc_extent_state_atomic(prealloc);
1160		BUG_ON(!prealloc);
1161		err = split_state(tree, state, prealloc, end + 1);
1162		if (err)
1163			extent_io_tree_panic(tree, err);
1164
1165		set_state_bits(tree, prealloc, &bits, changeset);
1166		cache_state(prealloc, cached_state);
1167		merge_state(tree, prealloc);
1168		prealloc = NULL;
1169		goto out;
1170	}
1171
1172search_again:
1173	if (start > end)
1174		goto out;
1175	spin_unlock(&tree->lock);
1176	if (gfpflags_allow_blocking(mask))
1177		cond_resched();
1178	goto again;
1179
1180out:
1181	spin_unlock(&tree->lock);
1182	if (prealloc)
1183		free_extent_state(prealloc);
1184
1185	return err;
1186
1187}
1188
1189/**
1190 * convert_extent_bit - convert all bits in a given range from one bit to
1191 * 			another
1192 * @tree:	the io tree to search
1193 * @start:	the start offset in bytes
1194 * @end:	the end offset in bytes (inclusive)
1195 * @bits:	the bits to set in this range
1196 * @clear_bits:	the bits to clear in this range
1197 * @cached_state:	state that we're going to cache
1198 *
1199 * This will go through and set bits for the given range.  If any states exist
1200 * already in this range they are set with the given bit and cleared of the
1201 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1202 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1203 * boundary bits like LOCK.
1204 *
1205 * All allocations are done with GFP_NOFS.
1206 */
1207int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1208		       u32 bits, u32 clear_bits,
1209		       struct extent_state **cached_state)
1210{
1211	struct extent_state *state;
1212	struct extent_state *prealloc = NULL;
1213	struct rb_node *node;
1214	struct rb_node **p;
1215	struct rb_node *parent;
1216	int err = 0;
1217	u64 last_start;
1218	u64 last_end;
1219	bool first_iteration = true;
1220
1221	btrfs_debug_check_extent_io_range(tree, start, end);
1222	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1223				       clear_bits);
1224
1225again:
1226	if (!prealloc) {
1227		/*
1228		 * Best effort, don't worry if extent state allocation fails
1229		 * here for the first iteration. We might have a cached state
1230		 * that matches exactly the target range, in which case no
1231		 * extent state allocations are needed. We'll only know this
1232		 * after locking the tree.
1233		 */
1234		prealloc = alloc_extent_state(GFP_NOFS);
1235		if (!prealloc && !first_iteration)
1236			return -ENOMEM;
1237	}
1238
1239	spin_lock(&tree->lock);
1240	if (cached_state && *cached_state) {
1241		state = *cached_state;
1242		if (state->start <= start && state->end > start &&
1243		    extent_state_in_tree(state)) {
1244			node = &state->rb_node;
1245			goto hit_next;
1246		}
1247	}
1248
1249	/*
1250	 * this search will find all the extents that end after
1251	 * our range starts.
1252	 */
1253	node = tree_search_for_insert(tree, start, &p, &parent);
1254	if (!node) {
1255		prealloc = alloc_extent_state_atomic(prealloc);
1256		if (!prealloc) {
1257			err = -ENOMEM;
1258			goto out;
1259		}
1260		err = insert_state(tree, prealloc, start, end,
1261				   &p, &parent, &bits, NULL);
1262		if (err)
1263			extent_io_tree_panic(tree, err);
1264		cache_state(prealloc, cached_state);
1265		prealloc = NULL;
1266		goto out;
1267	}
1268	state = rb_entry(node, struct extent_state, rb_node);
1269hit_next:
1270	last_start = state->start;
1271	last_end = state->end;
1272
1273	/*
1274	 * | ---- desired range ---- |
1275	 * | state |
1276	 *
1277	 * Just lock what we found and keep going
1278	 */
1279	if (state->start == start && state->end <= end) {
1280		set_state_bits(tree, state, &bits, NULL);
1281		cache_state(state, cached_state);
1282		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1283		if (last_end == (u64)-1)
1284			goto out;
1285		start = last_end + 1;
1286		if (start < end && state && state->start == start &&
1287		    !need_resched())
1288			goto hit_next;
1289		goto search_again;
1290	}
1291
1292	/*
1293	 *     | ---- desired range ---- |
1294	 * | state |
1295	 *   or
1296	 * | ------------- state -------------- |
1297	 *
1298	 * We need to split the extent we found, and may flip bits on
1299	 * second half.
1300	 *
1301	 * If the extent we found extends past our
1302	 * range, we just split and search again.  It'll get split
1303	 * again the next time though.
1304	 *
1305	 * If the extent we found is inside our range, we set the
1306	 * desired bit on it.
1307	 */
1308	if (state->start < start) {
1309		prealloc = alloc_extent_state_atomic(prealloc);
1310		if (!prealloc) {
1311			err = -ENOMEM;
1312			goto out;
1313		}
1314		err = split_state(tree, state, prealloc, start);
1315		if (err)
1316			extent_io_tree_panic(tree, err);
1317		prealloc = NULL;
1318		if (err)
1319			goto out;
1320		if (state->end <= end) {
1321			set_state_bits(tree, state, &bits, NULL);
1322			cache_state(state, cached_state);
1323			state = clear_state_bit(tree, state, &clear_bits, 0,
1324						NULL);
1325			if (last_end == (u64)-1)
1326				goto out;
1327			start = last_end + 1;
1328			if (start < end && state && state->start == start &&
1329			    !need_resched())
1330				goto hit_next;
1331		}
1332		goto search_again;
1333	}
1334	/*
1335	 * | ---- desired range ---- |
1336	 *     | state | or               | state |
1337	 *
1338	 * There's a hole, we need to insert something in it and
1339	 * ignore the extent we found.
1340	 */
1341	if (state->start > start) {
1342		u64 this_end;
1343		if (end < last_start)
1344			this_end = end;
1345		else
1346			this_end = last_start - 1;
1347
1348		prealloc = alloc_extent_state_atomic(prealloc);
1349		if (!prealloc) {
1350			err = -ENOMEM;
1351			goto out;
1352		}
1353
1354		/*
1355		 * Avoid to free 'prealloc' if it can be merged with
1356		 * the later extent.
1357		 */
1358		err = insert_state(tree, prealloc, start, this_end,
1359				   NULL, NULL, &bits, NULL);
1360		if (err)
1361			extent_io_tree_panic(tree, err);
1362		cache_state(prealloc, cached_state);
1363		prealloc = NULL;
1364		start = this_end + 1;
1365		goto search_again;
1366	}
1367	/*
1368	 * | ---- desired range ---- |
1369	 *                        | state |
1370	 * We need to split the extent, and set the bit
1371	 * on the first half
1372	 */
1373	if (state->start <= end && state->end > end) {
1374		prealloc = alloc_extent_state_atomic(prealloc);
1375		if (!prealloc) {
1376			err = -ENOMEM;
1377			goto out;
1378		}
1379
1380		err = split_state(tree, state, prealloc, end + 1);
1381		if (err)
1382			extent_io_tree_panic(tree, err);
1383
1384		set_state_bits(tree, prealloc, &bits, NULL);
1385		cache_state(prealloc, cached_state);
1386		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1387		prealloc = NULL;
1388		goto out;
1389	}
1390
1391search_again:
1392	if (start > end)
1393		goto out;
1394	spin_unlock(&tree->lock);
1395	cond_resched();
1396	first_iteration = false;
1397	goto again;
1398
1399out:
1400	spin_unlock(&tree->lock);
1401	if (prealloc)
1402		free_extent_state(prealloc);
1403
1404	return err;
1405}
1406
1407/* wrappers around set/clear extent bit */
1408int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1409			   u32 bits, struct extent_changeset *changeset)
1410{
1411	/*
1412	 * We don't support EXTENT_LOCKED yet, as current changeset will
1413	 * record any bits changed, so for EXTENT_LOCKED case, it will
1414	 * either fail with -EEXIST or changeset will record the whole
1415	 * range.
1416	 */
1417	BUG_ON(bits & EXTENT_LOCKED);
1418
1419	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1420			      changeset);
1421}
1422
1423int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1424			   u32 bits)
1425{
1426	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1427			      GFP_NOWAIT, NULL);
1428}
1429
1430int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1431		     u32 bits, int wake, int delete,
1432		     struct extent_state **cached)
1433{
1434	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1435				  cached, GFP_NOFS, NULL);
1436}
1437
1438int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1439		u32 bits, struct extent_changeset *changeset)
1440{
1441	/*
1442	 * Don't support EXTENT_LOCKED case, same reason as
1443	 * set_record_extent_bits().
1444	 */
1445	BUG_ON(bits & EXTENT_LOCKED);
1446
1447	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1448				  changeset);
1449}
1450
1451/*
1452 * either insert or lock state struct between start and end use mask to tell
1453 * us if waiting is desired.
1454 */
1455int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1456		     struct extent_state **cached_state)
1457{
1458	int err;
1459	u64 failed_start;
1460
1461	while (1) {
1462		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1463				     EXTENT_LOCKED, &failed_start,
1464				     cached_state, GFP_NOFS, NULL);
1465		if (err == -EEXIST) {
1466			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1467			start = failed_start;
1468		} else
1469			break;
1470		WARN_ON(start > end);
1471	}
1472	return err;
1473}
1474
1475int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1476{
1477	int err;
1478	u64 failed_start;
1479
1480	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1481			     &failed_start, NULL, GFP_NOFS, NULL);
1482	if (err == -EEXIST) {
1483		if (failed_start > start)
1484			clear_extent_bit(tree, start, failed_start - 1,
1485					 EXTENT_LOCKED, 1, 0, NULL);
1486		return 0;
1487	}
1488	return 1;
1489}
1490
1491void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1492{
1493	unsigned long index = start >> PAGE_SHIFT;
1494	unsigned long end_index = end >> PAGE_SHIFT;
1495	struct page *page;
1496
1497	while (index <= end_index) {
1498		page = find_get_page(inode->i_mapping, index);
1499		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1500		clear_page_dirty_for_io(page);
1501		put_page(page);
1502		index++;
1503	}
1504}
1505
1506void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1507{
1508	unsigned long index = start >> PAGE_SHIFT;
1509	unsigned long end_index = end >> PAGE_SHIFT;
1510	struct page *page;
1511
1512	while (index <= end_index) {
1513		page = find_get_page(inode->i_mapping, index);
1514		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1515		__set_page_dirty_nobuffers(page);
1516		account_page_redirty(page);
1517		put_page(page);
1518		index++;
1519	}
1520}
1521
1522/* find the first state struct with 'bits' set after 'start', and
1523 * return it.  tree->lock must be held.  NULL will returned if
1524 * nothing was found after 'start'
1525 */
1526static struct extent_state *
1527find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
1528{
1529	struct rb_node *node;
1530	struct extent_state *state;
1531
1532	/*
1533	 * this search will find all the extents that end after
1534	 * our range starts.
1535	 */
1536	node = tree_search(tree, start);
1537	if (!node)
1538		goto out;
1539
1540	while (1) {
1541		state = rb_entry(node, struct extent_state, rb_node);
1542		if (state->end >= start && (state->state & bits))
1543			return state;
1544
1545		node = rb_next(node);
1546		if (!node)
1547			break;
1548	}
1549out:
1550	return NULL;
1551}
1552
1553/*
1554 * Find the first offset in the io tree with one or more @bits set.
1555 *
1556 * Note: If there are multiple bits set in @bits, any of them will match.
1557 *
1558 * Return 0 if we find something, and update @start_ret and @end_ret.
1559 * Return 1 if we found nothing.
1560 */
1561int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1562			  u64 *start_ret, u64 *end_ret, u32 bits,
1563			  struct extent_state **cached_state)
1564{
1565	struct extent_state *state;
1566	int ret = 1;
1567
1568	spin_lock(&tree->lock);
1569	if (cached_state && *cached_state) {
1570		state = *cached_state;
1571		if (state->end == start - 1 && extent_state_in_tree(state)) {
1572			while ((state = next_state(state)) != NULL) {
1573				if (state->state & bits)
1574					goto got_it;
1575			}
1576			free_extent_state(*cached_state);
1577			*cached_state = NULL;
1578			goto out;
1579		}
1580		free_extent_state(*cached_state);
1581		*cached_state = NULL;
1582	}
1583
1584	state = find_first_extent_bit_state(tree, start, bits);
1585got_it:
1586	if (state) {
1587		cache_state_if_flags(state, cached_state, 0);
1588		*start_ret = state->start;
1589		*end_ret = state->end;
1590		ret = 0;
1591	}
1592out:
1593	spin_unlock(&tree->lock);
1594	return ret;
1595}
1596
1597/**
1598 * Find a contiguous area of bits
1599 *
1600 * @tree:      io tree to check
1601 * @start:     offset to start the search from
1602 * @start_ret: the first offset we found with the bits set
1603 * @end_ret:   the final contiguous range of the bits that were set
1604 * @bits:      bits to look for
1605 *
1606 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1607 * to set bits appropriately, and then merge them again.  During this time it
1608 * will drop the tree->lock, so use this helper if you want to find the actual
1609 * contiguous area for given bits.  We will search to the first bit we find, and
1610 * then walk down the tree until we find a non-contiguous area.  The area
1611 * returned will be the full contiguous area with the bits set.
1612 */
1613int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1614			       u64 *start_ret, u64 *end_ret, u32 bits)
1615{
1616	struct extent_state *state;
1617	int ret = 1;
1618
1619	spin_lock(&tree->lock);
1620	state = find_first_extent_bit_state(tree, start, bits);
1621	if (state) {
1622		*start_ret = state->start;
1623		*end_ret = state->end;
1624		while ((state = next_state(state)) != NULL) {
1625			if (state->start > (*end_ret + 1))
1626				break;
1627			*end_ret = state->end;
1628		}
1629		ret = 0;
1630	}
1631	spin_unlock(&tree->lock);
1632	return ret;
1633}
1634
1635/**
1636 * Find the first range that has @bits not set. This range could start before
1637 * @start.
1638 *
1639 * @tree:      the tree to search
1640 * @start:     offset at/after which the found extent should start
1641 * @start_ret: records the beginning of the range
1642 * @end_ret:   records the end of the range (inclusive)
1643 * @bits:      the set of bits which must be unset
1644 *
1645 * Since unallocated range is also considered one which doesn't have the bits
1646 * set it's possible that @end_ret contains -1, this happens in case the range
1647 * spans (last_range_end, end of device]. In this case it's up to the caller to
1648 * trim @end_ret to the appropriate size.
1649 */
1650void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1651				 u64 *start_ret, u64 *end_ret, u32 bits)
1652{
1653	struct extent_state *state;
1654	struct rb_node *node, *prev = NULL, *next;
1655
1656	spin_lock(&tree->lock);
1657
1658	/* Find first extent with bits cleared */
1659	while (1) {
1660		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1661		if (!node && !next && !prev) {
1662			/*
1663			 * Tree is completely empty, send full range and let
1664			 * caller deal with it
1665			 */
1666			*start_ret = 0;
1667			*end_ret = -1;
1668			goto out;
1669		} else if (!node && !next) {
1670			/*
1671			 * We are past the last allocated chunk, set start at
1672			 * the end of the last extent.
1673			 */
1674			state = rb_entry(prev, struct extent_state, rb_node);
1675			*start_ret = state->end + 1;
1676			*end_ret = -1;
1677			goto out;
1678		} else if (!node) {
1679			node = next;
1680		}
1681		/*
1682		 * At this point 'node' either contains 'start' or start is
1683		 * before 'node'
1684		 */
1685		state = rb_entry(node, struct extent_state, rb_node);
1686
1687		if (in_range(start, state->start, state->end - state->start + 1)) {
1688			if (state->state & bits) {
1689				/*
1690				 * |--range with bits sets--|
1691				 *    |
1692				 *    start
1693				 */
1694				start = state->end + 1;
1695			} else {
1696				/*
1697				 * 'start' falls within a range that doesn't
1698				 * have the bits set, so take its start as
1699				 * the beginning of the desired range
1700				 *
1701				 * |--range with bits cleared----|
1702				 *      |
1703				 *      start
1704				 */
1705				*start_ret = state->start;
1706				break;
1707			}
1708		} else {
1709			/*
1710			 * |---prev range---|---hole/unset---|---node range---|
1711			 *                          |
1712			 *                        start
1713			 *
1714			 *                        or
1715			 *
1716			 * |---hole/unset--||--first node--|
1717			 * 0   |
1718			 *    start
1719			 */
1720			if (prev) {
1721				state = rb_entry(prev, struct extent_state,
1722						 rb_node);
1723				*start_ret = state->end + 1;
1724			} else {
1725				*start_ret = 0;
1726			}
1727			break;
1728		}
1729	}
1730
1731	/*
1732	 * Find the longest stretch from start until an entry which has the
1733	 * bits set
1734	 */
1735	while (1) {
1736		state = rb_entry(node, struct extent_state, rb_node);
1737		if (state->end >= start && !(state->state & bits)) {
1738			*end_ret = state->end;
1739		} else {
1740			*end_ret = state->start - 1;
1741			break;
1742		}
1743
1744		node = rb_next(node);
1745		if (!node)
1746			break;
1747	}
1748out:
1749	spin_unlock(&tree->lock);
1750}
1751
1752/*
1753 * find a contiguous range of bytes in the file marked as delalloc, not
1754 * more than 'max_bytes'.  start and end are used to return the range,
1755 *
1756 * true is returned if we find something, false if nothing was in the tree
1757 */
1758bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1759			       u64 *end, u64 max_bytes,
1760			       struct extent_state **cached_state)
1761{
1762	struct rb_node *node;
1763	struct extent_state *state;
1764	u64 cur_start = *start;
1765	bool found = false;
1766	u64 total_bytes = 0;
1767
1768	spin_lock(&tree->lock);
1769
1770	/*
1771	 * this search will find all the extents that end after
1772	 * our range starts.
1773	 */
1774	node = tree_search(tree, cur_start);
1775	if (!node) {
1776		*end = (u64)-1;
1777		goto out;
1778	}
1779
1780	while (1) {
1781		state = rb_entry(node, struct extent_state, rb_node);
1782		if (found && (state->start != cur_start ||
1783			      (state->state & EXTENT_BOUNDARY))) {
1784			goto out;
1785		}
1786		if (!(state->state & EXTENT_DELALLOC)) {
1787			if (!found)
1788				*end = state->end;
1789			goto out;
1790		}
1791		if (!found) {
1792			*start = state->start;
1793			*cached_state = state;
1794			refcount_inc(&state->refs);
1795		}
1796		found = true;
1797		*end = state->end;
1798		cur_start = state->end + 1;
1799		node = rb_next(node);
1800		total_bytes += state->end - state->start + 1;
1801		if (total_bytes >= max_bytes)
1802			break;
1803		if (!node)
1804			break;
1805	}
1806out:
1807	spin_unlock(&tree->lock);
1808	return found;
1809}
1810
1811/*
1812 * Process one page for __process_pages_contig().
1813 *
1814 * Return >0 if we hit @page == @locked_page.
1815 * Return 0 if we updated the page status.
1816 * Return -EGAIN if the we need to try again.
1817 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
1818 */
1819static int process_one_page(struct btrfs_fs_info *fs_info,
1820			    struct address_space *mapping,
1821			    struct page *page, struct page *locked_page,
1822			    unsigned long page_ops, u64 start, u64 end)
1823{
1824	u32 len;
1825
1826	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
1827	len = end + 1 - start;
1828
1829	if (page_ops & PAGE_SET_ORDERED)
1830		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
1831	if (page_ops & PAGE_SET_ERROR)
1832		btrfs_page_clamp_set_error(fs_info, page, start, len);
1833	if (page_ops & PAGE_START_WRITEBACK) {
1834		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
1835		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
1836	}
1837	if (page_ops & PAGE_END_WRITEBACK)
1838		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
1839
1840	if (page == locked_page)
1841		return 1;
1842
1843	if (page_ops & PAGE_LOCK) {
1844		int ret;
1845
1846		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
1847		if (ret)
1848			return ret;
1849		if (!PageDirty(page) || page->mapping != mapping) {
1850			btrfs_page_end_writer_lock(fs_info, page, start, len);
1851			return -EAGAIN;
1852		}
1853	}
1854	if (page_ops & PAGE_UNLOCK)
1855		btrfs_page_end_writer_lock(fs_info, page, start, len);
1856	return 0;
1857}
1858
1859static int __process_pages_contig(struct address_space *mapping,
1860				  struct page *locked_page,
1861				  u64 start, u64 end, unsigned long page_ops,
1862				  u64 *processed_end)
1863{
1864	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
1865	pgoff_t start_index = start >> PAGE_SHIFT;
1866	pgoff_t end_index = end >> PAGE_SHIFT;
1867	pgoff_t index = start_index;
1868	unsigned long nr_pages = end_index - start_index + 1;
1869	unsigned long pages_processed = 0;
1870	struct page *pages[16];
1871	int err = 0;
1872	int i;
1873
1874	if (page_ops & PAGE_LOCK) {
1875		ASSERT(page_ops == PAGE_LOCK);
1876		ASSERT(processed_end && *processed_end == start);
1877	}
1878
1879	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1880		mapping_set_error(mapping, -EIO);
1881
1882	while (nr_pages > 0) {
1883		int found_pages;
1884
1885		found_pages = find_get_pages_contig(mapping, index,
1886				     min_t(unsigned long,
1887				     nr_pages, ARRAY_SIZE(pages)), pages);
1888		if (found_pages == 0) {
1889			/*
1890			 * Only if we're going to lock these pages, we can find
1891			 * nothing at @index.
1892			 */
1893			ASSERT(page_ops & PAGE_LOCK);
1894			err = -EAGAIN;
1895			goto out;
1896		}
1897
1898		for (i = 0; i < found_pages; i++) {
1899			int process_ret;
1900
1901			process_ret = process_one_page(fs_info, mapping,
1902					pages[i], locked_page, page_ops,
1903					start, end);
1904			if (process_ret < 0) {
1905				for (; i < found_pages; i++)
1906					put_page(pages[i]);
1907				err = -EAGAIN;
1908				goto out;
1909			}
1910			put_page(pages[i]);
1911			pages_processed++;
1912		}
1913		nr_pages -= found_pages;
1914		index += found_pages;
1915		cond_resched();
1916	}
1917out:
1918	if (err && processed_end) {
1919		/*
1920		 * Update @processed_end. I know this is awful since it has
1921		 * two different return value patterns (inclusive vs exclusive).
1922		 *
1923		 * But the exclusive pattern is necessary if @start is 0, or we
1924		 * underflow and check against processed_end won't work as
1925		 * expected.
1926		 */
1927		if (pages_processed)
1928			*processed_end = min(end,
1929			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
1930		else
1931			*processed_end = start;
1932	}
1933	return err;
1934}
1935
1936static noinline void __unlock_for_delalloc(struct inode *inode,
1937					   struct page *locked_page,
1938					   u64 start, u64 end)
1939{
1940	unsigned long index = start >> PAGE_SHIFT;
1941	unsigned long end_index = end >> PAGE_SHIFT;
1942
1943	ASSERT(locked_page);
1944	if (index == locked_page->index && end_index == index)
1945		return;
1946
1947	__process_pages_contig(inode->i_mapping, locked_page, start, end,
1948			       PAGE_UNLOCK, NULL);
1949}
1950
1951static noinline int lock_delalloc_pages(struct inode *inode,
1952					struct page *locked_page,
1953					u64 delalloc_start,
1954					u64 delalloc_end)
1955{
1956	unsigned long index = delalloc_start >> PAGE_SHIFT;
1957	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1958	u64 processed_end = delalloc_start;
1959	int ret;
1960
1961	ASSERT(locked_page);
1962	if (index == locked_page->index && index == end_index)
1963		return 0;
1964
1965	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
1966				     delalloc_end, PAGE_LOCK, &processed_end);
1967	if (ret == -EAGAIN && processed_end > delalloc_start)
1968		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1969				      processed_end);
1970	return ret;
1971}
1972
1973/*
1974 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1975 * more than @max_bytes.  @Start and @end are used to return the range,
1976 *
1977 * Return: true if we find something
1978 *         false if nothing was in the tree
1979 */
1980EXPORT_FOR_TESTS
1981noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1982				    struct page *locked_page, u64 *start,
1983				    u64 *end)
1984{
1985	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1986	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1987	u64 delalloc_start;
1988	u64 delalloc_end;
1989	bool found;
1990	struct extent_state *cached_state = NULL;
1991	int ret;
1992	int loops = 0;
1993
1994again:
1995	/* step one, find a bunch of delalloc bytes starting at start */
1996	delalloc_start = *start;
1997	delalloc_end = 0;
1998	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1999					  max_bytes, &cached_state);
2000	if (!found || delalloc_end <= *start) {
2001		*start = delalloc_start;
2002		*end = delalloc_end;
2003		free_extent_state(cached_state);
2004		return false;
2005	}
2006
2007	/*
2008	 * start comes from the offset of locked_page.  We have to lock
2009	 * pages in order, so we can't process delalloc bytes before
2010	 * locked_page
2011	 */
2012	if (delalloc_start < *start)
2013		delalloc_start = *start;
2014
2015	/*
2016	 * make sure to limit the number of pages we try to lock down
2017	 */
2018	if (delalloc_end + 1 - delalloc_start > max_bytes)
2019		delalloc_end = delalloc_start + max_bytes - 1;
2020
2021	/* step two, lock all the pages after the page that has start */
2022	ret = lock_delalloc_pages(inode, locked_page,
2023				  delalloc_start, delalloc_end);
2024	ASSERT(!ret || ret == -EAGAIN);
2025	if (ret == -EAGAIN) {
2026		/* some of the pages are gone, lets avoid looping by
2027		 * shortening the size of the delalloc range we're searching
2028		 */
2029		free_extent_state(cached_state);
2030		cached_state = NULL;
2031		if (!loops) {
2032			max_bytes = PAGE_SIZE;
2033			loops = 1;
2034			goto again;
2035		} else {
2036			found = false;
2037			goto out_failed;
2038		}
2039	}
2040
2041	/* step three, lock the state bits for the whole range */
2042	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
2043
2044	/* then test to make sure it is all still delalloc */
2045	ret = test_range_bit(tree, delalloc_start, delalloc_end,
2046			     EXTENT_DELALLOC, 1, cached_state);
2047	if (!ret) {
2048		unlock_extent_cached(tree, delalloc_start, delalloc_end,
2049				     &cached_state);
2050		__unlock_for_delalloc(inode, locked_page,
2051			      delalloc_start, delalloc_end);
2052		cond_resched();
2053		goto again;
2054	}
2055	free_extent_state(cached_state);
2056	*start = delalloc_start;
2057	*end = delalloc_end;
2058out_failed:
2059	return found;
2060}
2061
2062void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2063				  struct page *locked_page,
2064				  u32 clear_bits, unsigned long page_ops)
2065{
2066	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2067
2068	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2069			       start, end, page_ops, NULL);
2070}
2071
2072/*
2073 * count the number of bytes in the tree that have a given bit(s)
2074 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
2075 * cached.  The total number found is returned.
2076 */
2077u64 count_range_bits(struct extent_io_tree *tree,
2078		     u64 *start, u64 search_end, u64 max_bytes,
2079		     u32 bits, int contig)
2080{
2081	struct rb_node *node;
2082	struct extent_state *state;
2083	u64 cur_start = *start;
2084	u64 total_bytes = 0;
2085	u64 last = 0;
2086	int found = 0;
2087
2088	if (WARN_ON(search_end <= cur_start))
2089		return 0;
2090
2091	spin_lock(&tree->lock);
2092	if (cur_start == 0 && bits == EXTENT_DIRTY) {
2093		total_bytes = tree->dirty_bytes;
2094		goto out;
2095	}
2096	/*
2097	 * this search will find all the extents that end after
2098	 * our range starts.
2099	 */
2100	node = tree_search(tree, cur_start);
2101	if (!node)
2102		goto out;
2103
2104	while (1) {
2105		state = rb_entry(node, struct extent_state, rb_node);
2106		if (state->start > search_end)
2107			break;
2108		if (contig && found && state->start > last + 1)
2109			break;
2110		if (state->end >= cur_start && (state->state & bits) == bits) {
2111			total_bytes += min(search_end, state->end) + 1 -
2112				       max(cur_start, state->start);
2113			if (total_bytes >= max_bytes)
2114				break;
2115			if (!found) {
2116				*start = max(cur_start, state->start);
2117				found = 1;
2118			}
2119			last = state->end;
2120		} else if (contig && found) {
2121			break;
2122		}
2123		node = rb_next(node);
2124		if (!node)
2125			break;
2126	}
2127out:
2128	spin_unlock(&tree->lock);
2129	return total_bytes;
2130}
2131
2132/*
2133 * set the private field for a given byte offset in the tree.  If there isn't
2134 * an extent_state there already, this does nothing.
2135 */
2136int set_state_failrec(struct extent_io_tree *tree, u64 start,
2137		      struct io_failure_record *failrec)
2138{
2139	struct rb_node *node;
2140	struct extent_state *state;
2141	int ret = 0;
2142
2143	spin_lock(&tree->lock);
2144	/*
2145	 * this search will find all the extents that end after
2146	 * our range starts.
2147	 */
2148	node = tree_search(tree, start);
2149	if (!node) {
2150		ret = -ENOENT;
2151		goto out;
2152	}
2153	state = rb_entry(node, struct extent_state, rb_node);
2154	if (state->start != start) {
2155		ret = -ENOENT;
2156		goto out;
2157	}
2158	state->failrec = failrec;
2159out:
2160	spin_unlock(&tree->lock);
2161	return ret;
2162}
2163
2164struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2165{
2166	struct rb_node *node;
2167	struct extent_state *state;
2168	struct io_failure_record *failrec;
2169
2170	spin_lock(&tree->lock);
2171	/*
2172	 * this search will find all the extents that end after
2173	 * our range starts.
2174	 */
2175	node = tree_search(tree, start);
2176	if (!node) {
2177		failrec = ERR_PTR(-ENOENT);
2178		goto out;
2179	}
2180	state = rb_entry(node, struct extent_state, rb_node);
2181	if (state->start != start) {
2182		failrec = ERR_PTR(-ENOENT);
2183		goto out;
2184	}
2185
2186	failrec = state->failrec;
2187out:
2188	spin_unlock(&tree->lock);
2189	return failrec;
2190}
2191
2192/*
2193 * searches a range in the state tree for a given mask.
2194 * If 'filled' == 1, this returns 1 only if every extent in the tree
2195 * has the bits set.  Otherwise, 1 is returned if any bit in the
2196 * range is found set.
2197 */
2198int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2199		   u32 bits, int filled, struct extent_state *cached)
2200{
2201	struct extent_state *state = NULL;
2202	struct rb_node *node;
2203	int bitset = 0;
2204
2205	spin_lock(&tree->lock);
2206	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2207	    cached->end > start)
2208		node = &cached->rb_node;
2209	else
2210		node = tree_search(tree, start);
2211	while (node && start <= end) {
2212		state = rb_entry(node, struct extent_state, rb_node);
2213
2214		if (filled && state->start > start) {
2215			bitset = 0;
2216			break;
2217		}
2218
2219		if (state->start > end)
2220			break;
2221
2222		if (state->state & bits) {
2223			bitset = 1;
2224			if (!filled)
2225				break;
2226		} else if (filled) {
2227			bitset = 0;
2228			break;
2229		}
2230
2231		if (state->end == (u64)-1)
2232			break;
2233
2234		start = state->end + 1;
2235		if (start > end)
2236			break;
2237		node = rb_next(node);
2238		if (!node) {
2239			if (filled)
2240				bitset = 0;
2241			break;
2242		}
2243	}
2244	spin_unlock(&tree->lock);
2245	return bitset;
2246}
2247
2248/*
2249 * helper function to set a given page up to date if all the
2250 * extents in the tree for that page are up to date
2251 */
2252static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2253{
2254	u64 start = page_offset(page);
2255	u64 end = start + PAGE_SIZE - 1;
2256	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2257		SetPageUptodate(page);
2258}
2259
2260int free_io_failure(struct extent_io_tree *failure_tree,
2261		    struct extent_io_tree *io_tree,
2262		    struct io_failure_record *rec)
2263{
2264	int ret;
2265	int err = 0;
2266
2267	set_state_failrec(failure_tree, rec->start, NULL);
2268	ret = clear_extent_bits(failure_tree, rec->start,
2269				rec->start + rec->len - 1,
2270				EXTENT_LOCKED | EXTENT_DIRTY);
2271	if (ret)
2272		err = ret;
2273
2274	ret = clear_extent_bits(io_tree, rec->start,
2275				rec->start + rec->len - 1,
2276				EXTENT_DAMAGED);
2277	if (ret && !err)
2278		err = ret;
2279
2280	kfree(rec);
2281	return err;
2282}
2283
2284/*
2285 * this bypasses the standard btrfs submit functions deliberately, as
2286 * the standard behavior is to write all copies in a raid setup. here we only
2287 * want to write the one bad copy. so we do the mapping for ourselves and issue
2288 * submit_bio directly.
2289 * to avoid any synchronization issues, wait for the data after writing, which
2290 * actually prevents the read that triggered the error from finishing.
2291 * currently, there can be no more than two copies of every data bit. thus,
2292 * exactly one rewrite is required.
2293 */
2294int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2295		      u64 length, u64 logical, struct page *page,
2296		      unsigned int pg_offset, int mirror_num)
2297{
2298	struct bio *bio;
2299	struct btrfs_device *dev;
2300	u64 map_length = 0;
2301	u64 sector;
2302	struct btrfs_bio *bbio = NULL;
2303	int ret;
2304
2305	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2306	BUG_ON(!mirror_num);
2307
2308	if (btrfs_is_zoned(fs_info))
2309		return btrfs_repair_one_zone(fs_info, logical);
2310
2311	bio = btrfs_io_bio_alloc(1);
2312	bio->bi_iter.bi_size = 0;
2313	map_length = length;
2314
2315	/*
2316	 * Avoid races with device replace and make sure our bbio has devices
2317	 * associated to its stripes that don't go away while we are doing the
2318	 * read repair operation.
2319	 */
2320	btrfs_bio_counter_inc_blocked(fs_info);
2321	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2322		/*
2323		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2324		 * to update all raid stripes, but here we just want to correct
2325		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2326		 * stripe's dev and sector.
2327		 */
2328		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2329				      &map_length, &bbio, 0);
2330		if (ret) {
2331			btrfs_bio_counter_dec(fs_info);
2332			bio_put(bio);
2333			return -EIO;
2334		}
2335		ASSERT(bbio->mirror_num == 1);
2336	} else {
2337		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2338				      &map_length, &bbio, mirror_num);
2339		if (ret) {
2340			btrfs_bio_counter_dec(fs_info);
2341			bio_put(bio);
2342			return -EIO;
2343		}
2344		BUG_ON(mirror_num != bbio->mirror_num);
2345	}
2346
2347	sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2348	bio->bi_iter.bi_sector = sector;
2349	dev = bbio->stripes[bbio->mirror_num - 1].dev;
2350	btrfs_put_bbio(bbio);
2351	if (!dev || !dev->bdev ||
2352	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2353		btrfs_bio_counter_dec(fs_info);
2354		bio_put(bio);
2355		return -EIO;
2356	}
2357	bio_set_dev(bio, dev->bdev);
2358	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2359	bio_add_page(bio, page, length, pg_offset);
2360
2361	if (btrfsic_submit_bio_wait(bio)) {
2362		/* try to remap that extent elsewhere? */
2363		btrfs_bio_counter_dec(fs_info);
2364		bio_put(bio);
2365		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2366		return -EIO;
2367	}
2368
2369	btrfs_info_rl_in_rcu(fs_info,
2370		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2371				  ino, start,
2372				  rcu_str_deref(dev->name), sector);
2373	btrfs_bio_counter_dec(fs_info);
2374	bio_put(bio);
2375	return 0;
2376}
2377
2378int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2379{
2380	struct btrfs_fs_info *fs_info = eb->fs_info;
2381	u64 start = eb->start;
2382	int i, num_pages = num_extent_pages(eb);
2383	int ret = 0;
2384
2385	if (sb_rdonly(fs_info->sb))
2386		return -EROFS;
2387
2388	for (i = 0; i < num_pages; i++) {
2389		struct page *p = eb->pages[i];
2390
2391		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2392					start - page_offset(p), mirror_num);
2393		if (ret)
2394			break;
2395		start += PAGE_SIZE;
2396	}
2397
2398	return ret;
2399}
2400
2401/*
2402 * each time an IO finishes, we do a fast check in the IO failure tree
2403 * to see if we need to process or clean up an io_failure_record
2404 */
2405int clean_io_failure(struct btrfs_fs_info *fs_info,
2406		     struct extent_io_tree *failure_tree,
2407		     struct extent_io_tree *io_tree, u64 start,
2408		     struct page *page, u64 ino, unsigned int pg_offset)
2409{
2410	u64 private;
2411	struct io_failure_record *failrec;
2412	struct extent_state *state;
2413	int num_copies;
2414	int ret;
2415
2416	private = 0;
2417	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2418			       EXTENT_DIRTY, 0);
2419	if (!ret)
2420		return 0;
2421
2422	failrec = get_state_failrec(failure_tree, start);
2423	if (IS_ERR(failrec))
2424		return 0;
2425
2426	BUG_ON(!failrec->this_mirror);
2427
2428	if (sb_rdonly(fs_info->sb))
2429		goto out;
2430
2431	spin_lock(&io_tree->lock);
2432	state = find_first_extent_bit_state(io_tree,
2433					    failrec->start,
2434					    EXTENT_LOCKED);
2435	spin_unlock(&io_tree->lock);
2436
2437	if (state && state->start <= failrec->start &&
2438	    state->end >= failrec->start + failrec->len - 1) {
2439		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2440					      failrec->len);
2441		if (num_copies > 1)  {
2442			repair_io_failure(fs_info, ino, start, failrec->len,
2443					  failrec->logical, page, pg_offset,
2444					  failrec->failed_mirror);
2445		}
2446	}
2447
2448out:
2449	free_io_failure(failure_tree, io_tree, failrec);
2450
2451	return 0;
2452}
2453
2454/*
2455 * Can be called when
2456 * - hold extent lock
2457 * - under ordered extent
2458 * - the inode is freeing
2459 */
2460void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2461{
2462	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2463	struct io_failure_record *failrec;
2464	struct extent_state *state, *next;
2465
2466	if (RB_EMPTY_ROOT(&failure_tree->state))
2467		return;
2468
2469	spin_lock(&failure_tree->lock);
2470	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2471	while (state) {
2472		if (state->start > end)
2473			break;
2474
2475		ASSERT(state->end <= end);
2476
2477		next = next_state(state);
2478
2479		failrec = state->failrec;
2480		free_extent_state(state);
2481		kfree(failrec);
2482
2483		state = next;
2484	}
2485	spin_unlock(&failure_tree->lock);
2486}
2487
2488static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2489							     u64 start)
2490{
2491	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2492	struct io_failure_record *failrec;
2493	struct extent_map *em;
2494	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2495	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2496	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2497	const u32 sectorsize = fs_info->sectorsize;
2498	int ret;
2499	u64 logical;
2500
2501	failrec = get_state_failrec(failure_tree, start);
2502	if (!IS_ERR(failrec)) {
2503		btrfs_debug(fs_info,
2504	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
2505			failrec->logical, failrec->start, failrec->len);
2506		/*
2507		 * when data can be on disk more than twice, add to failrec here
2508		 * (e.g. with a list for failed_mirror) to make
2509		 * clean_io_failure() clean all those errors at once.
2510		 */
2511
2512		return failrec;
2513	}
2514
2515	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2516	if (!failrec)
2517		return ERR_PTR(-ENOMEM);
2518
2519	failrec->start = start;
2520	failrec->len = sectorsize;
2521	failrec->this_mirror = 0;
2522	failrec->bio_flags = 0;
2523
2524	read_lock(&em_tree->lock);
2525	em = lookup_extent_mapping(em_tree, start, failrec->len);
2526	if (!em) {
2527		read_unlock(&em_tree->lock);
2528		kfree(failrec);
2529		return ERR_PTR(-EIO);
2530	}
2531
2532	if (em->start > start || em->start + em->len <= start) {
2533		free_extent_map(em);
2534		em = NULL;
2535	}
2536	read_unlock(&em_tree->lock);
2537	if (!em) {
2538		kfree(failrec);
2539		return ERR_PTR(-EIO);
2540	}
2541
2542	logical = start - em->start;
2543	logical = em->block_start + logical;
2544	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2545		logical = em->block_start;
2546		failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2547		extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2548	}
2549
2550	btrfs_debug(fs_info,
2551		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2552		    logical, start, failrec->len);
2553
2554	failrec->logical = logical;
2555	free_extent_map(em);
2556
2557	/* Set the bits in the private failure tree */
2558	ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
2559			      EXTENT_LOCKED | EXTENT_DIRTY);
2560	if (ret >= 0) {
2561		ret = set_state_failrec(failure_tree, start, failrec);
2562		/* Set the bits in the inode's tree */
2563		ret = set_extent_bits(tree, start, start + sectorsize - 1,
2564				      EXTENT_DAMAGED);
2565	} else if (ret < 0) {
2566		kfree(failrec);
2567		return ERR_PTR(ret);
2568	}
2569
2570	return failrec;
2571}
2572
2573static bool btrfs_check_repairable(struct inode *inode,
2574				   struct io_failure_record *failrec,
2575				   int failed_mirror)
2576{
2577	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2578	int num_copies;
2579
2580	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2581	if (num_copies == 1) {
2582		/*
2583		 * we only have a single copy of the data, so don't bother with
2584		 * all the retry and error correction code that follows. no
2585		 * matter what the error is, it is very likely to persist.
2586		 */
2587		btrfs_debug(fs_info,
2588			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2589			num_copies, failrec->this_mirror, failed_mirror);
2590		return false;
2591	}
2592
2593	/* The failure record should only contain one sector */
2594	ASSERT(failrec->len == fs_info->sectorsize);
2595
2596	/*
2597	 * There are two premises:
2598	 * a) deliver good data to the caller
2599	 * b) correct the bad sectors on disk
2600	 *
2601	 * Since we're only doing repair for one sector, we only need to get
2602	 * a good copy of the failed sector and if we succeed, we have setup
2603	 * everything for repair_io_failure to do the rest for us.
2604	 */
2605	failrec->failed_mirror = failed_mirror;
2606	failrec->this_mirror++;
2607	if (failrec->this_mirror == failed_mirror)
2608		failrec->this_mirror++;
2609
2610	if (failrec->this_mirror > num_copies) {
2611		btrfs_debug(fs_info,
2612			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2613			num_copies, failrec->this_mirror, failed_mirror);
2614		return false;
2615	}
2616
2617	return true;
2618}
2619
2620int btrfs_repair_one_sector(struct inode *inode,
2621			    struct bio *failed_bio, u32 bio_offset,
2622			    struct page *page, unsigned int pgoff,
2623			    u64 start, int failed_mirror,
2624			    submit_bio_hook_t *submit_bio_hook)
2625{
2626	struct io_failure_record *failrec;
2627	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2628	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2629	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2630	struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
2631	const int icsum = bio_offset >> fs_info->sectorsize_bits;
2632	struct bio *repair_bio;
2633	struct btrfs_io_bio *repair_io_bio;
2634	blk_status_t status;
2635
2636	btrfs_debug(fs_info,
2637		   "repair read error: read error at %llu", start);
2638
2639	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2640
2641	failrec = btrfs_get_io_failure_record(inode, start);
2642	if (IS_ERR(failrec))
2643		return PTR_ERR(failrec);
2644
2645
2646	if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
2647		free_io_failure(failure_tree, tree, failrec);
2648		return -EIO;
2649	}
2650
2651	repair_bio = btrfs_io_bio_alloc(1);
2652	repair_io_bio = btrfs_io_bio(repair_bio);
2653	repair_bio->bi_opf = REQ_OP_READ;
2654	repair_bio->bi_end_io = failed_bio->bi_end_io;
2655	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2656	repair_bio->bi_private = failed_bio->bi_private;
2657
2658	if (failed_io_bio->csum) {
2659		const u32 csum_size = fs_info->csum_size;
2660
2661		repair_io_bio->csum = repair_io_bio->csum_inline;
2662		memcpy(repair_io_bio->csum,
2663		       failed_io_bio->csum + csum_size * icsum, csum_size);
2664	}
2665
2666	bio_add_page(repair_bio, page, failrec->len, pgoff);
2667	repair_io_bio->logical = failrec->start;
2668	repair_io_bio->iter = repair_bio->bi_iter;
2669
2670	btrfs_debug(btrfs_sb(inode->i_sb),
2671		    "repair read error: submitting new read to mirror %d",
2672		    failrec->this_mirror);
2673
2674	status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2675				 failrec->bio_flags);
2676	if (status) {
2677		free_io_failure(failure_tree, tree, failrec);
2678		bio_put(repair_bio);
2679	}
2680	return blk_status_to_errno(status);
2681}
2682
2683static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
2684{
2685	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2686
2687	ASSERT(page_offset(page) <= start &&
2688	       start + len <= page_offset(page) + PAGE_SIZE);
2689
2690	if (uptodate) {
2691		btrfs_page_set_uptodate(fs_info, page, start, len);
2692	} else {
2693		btrfs_page_clear_uptodate(fs_info, page, start, len);
2694		btrfs_page_set_error(fs_info, page, start, len);
2695	}
2696
2697	if (fs_info->sectorsize == PAGE_SIZE)
2698		unlock_page(page);
2699	else
2700		btrfs_subpage_end_reader(fs_info, page, start, len);
2701}
2702
2703static blk_status_t submit_read_repair(struct inode *inode,
2704				      struct bio *failed_bio, u32 bio_offset,
2705				      struct page *page, unsigned int pgoff,
2706				      u64 start, u64 end, int failed_mirror,
2707				      unsigned int error_bitmap,
2708				      submit_bio_hook_t *submit_bio_hook)
2709{
2710	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2711	const u32 sectorsize = fs_info->sectorsize;
2712	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
2713	int error = 0;
2714	int i;
2715
2716	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2717
2718	/* We're here because we had some read errors or csum mismatch */
2719	ASSERT(error_bitmap);
2720
2721	/*
2722	 * We only get called on buffered IO, thus page must be mapped and bio
2723	 * must not be cloned.
2724	 */
2725	ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
2726
2727	/* Iterate through all the sectors in the range */
2728	for (i = 0; i < nr_bits; i++) {
2729		const unsigned int offset = i * sectorsize;
2730		struct extent_state *cached = NULL;
2731		bool uptodate = false;
2732		int ret;
2733
2734		if (!(error_bitmap & (1U << i))) {
2735			/*
2736			 * This sector has no error, just end the page read
2737			 * and unlock the range.
2738			 */
2739			uptodate = true;
2740			goto next;
2741		}
2742
2743		ret = btrfs_repair_one_sector(inode, failed_bio,
2744				bio_offset + offset,
2745				page, pgoff + offset, start + offset,
2746				failed_mirror, submit_bio_hook);
2747		if (!ret) {
2748			/*
2749			 * We have submitted the read repair, the page release
2750			 * will be handled by the endio function of the
2751			 * submitted repair bio.
2752			 * Thus we don't need to do any thing here.
2753			 */
2754			continue;
2755		}
2756		/*
2757		 * Repair failed, just record the error but still continue.
2758		 * Or the remaining sectors will not be properly unlocked.
2759		 */
2760		if (!error)
2761			error = ret;
2762next:
2763		end_page_read(page, uptodate, start + offset, sectorsize);
2764		if (uptodate)
2765			set_extent_uptodate(&BTRFS_I(inode)->io_tree,
2766					start + offset,
2767					start + offset + sectorsize - 1,
2768					&cached, GFP_ATOMIC);
2769		unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
2770				start + offset,
2771				start + offset + sectorsize - 1,
2772				&cached);
2773	}
2774	return errno_to_blk_status(error);
2775}
2776
2777/* lots and lots of room for performance fixes in the end_bio funcs */
2778
2779void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2780{
2781	struct btrfs_inode *inode;
2782	int uptodate = (err == 0);
2783	int ret = 0;
2784
2785	ASSERT(page && page->mapping);
2786	inode = BTRFS_I(page->mapping->host);
2787	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
2788
2789	if (!uptodate) {
2790		ClearPageUptodate(page);
2791		SetPageError(page);
2792		ret = err < 0 ? err : -EIO;
2793		mapping_set_error(page->mapping, ret);
2794	}
2795}
2796
2797/*
2798 * after a writepage IO is done, we need to:
2799 * clear the uptodate bits on error
2800 * clear the writeback bits in the extent tree for this IO
2801 * end_page_writeback if the page has no more pending IO
2802 *
2803 * Scheduling is not allowed, so the extent state tree is expected
2804 * to have one and only one object corresponding to this IO.
2805 */
2806static void end_bio_extent_writepage(struct bio *bio)
2807{
2808	int error = blk_status_to_errno(bio->bi_status);
2809	struct bio_vec *bvec;
2810	u64 start;
2811	u64 end;
2812	struct bvec_iter_all iter_all;
2813	bool first_bvec = true;
2814
2815	ASSERT(!bio_flagged(bio, BIO_CLONED));
2816	bio_for_each_segment_all(bvec, bio, iter_all) {
2817		struct page *page = bvec->bv_page;
2818		struct inode *inode = page->mapping->host;
2819		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2820		const u32 sectorsize = fs_info->sectorsize;
2821
2822		/* Our read/write should always be sector aligned. */
2823		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2824			btrfs_err(fs_info,
2825		"partial page write in btrfs with offset %u and length %u",
2826				  bvec->bv_offset, bvec->bv_len);
2827		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
2828			btrfs_info(fs_info,
2829		"incomplete page write with offset %u and length %u",
2830				   bvec->bv_offset, bvec->bv_len);
2831
2832		start = page_offset(page) + bvec->bv_offset;
2833		end = start + bvec->bv_len - 1;
2834
2835		if (first_bvec) {
2836			btrfs_record_physical_zoned(inode, start, bio);
2837			first_bvec = false;
2838		}
2839
2840		end_extent_writepage(page, error, start, end);
2841
2842		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
2843	}
2844
2845	bio_put(bio);
2846}
2847
2848/*
2849 * Record previously processed extent range
2850 *
2851 * For endio_readpage_release_extent() to handle a full extent range, reducing
2852 * the extent io operations.
2853 */
2854struct processed_extent {
2855	struct btrfs_inode *inode;
2856	/* Start of the range in @inode */
2857	u64 start;
2858	/* End of the range in @inode */
2859	u64 end;
2860	bool uptodate;
2861};
2862
2863/*
2864 * Try to release processed extent range
2865 *
2866 * May not release the extent range right now if the current range is
2867 * contiguous to processed extent.
2868 *
2869 * Will release processed extent when any of @inode, @uptodate, the range is
2870 * no longer contiguous to the processed range.
2871 *
2872 * Passing @inode == NULL will force processed extent to be released.
2873 */
2874static void endio_readpage_release_extent(struct processed_extent *processed,
2875			      struct btrfs_inode *inode, u64 start, u64 end,
2876			      bool uptodate)
2877{
2878	struct extent_state *cached = NULL;
2879	struct extent_io_tree *tree;
2880
2881	/* The first extent, initialize @processed */
2882	if (!processed->inode)
2883		goto update;
2884
2885	/*
2886	 * Contiguous to processed extent, just uptodate the end.
2887	 *
2888	 * Several things to notice:
2889	 *
2890	 * - bio can be merged as long as on-disk bytenr is contiguous
2891	 *   This means we can have page belonging to other inodes, thus need to
2892	 *   check if the inode still matches.
2893	 * - bvec can contain range beyond current page for multi-page bvec
2894	 *   Thus we need to do processed->end + 1 >= start check
2895	 */
2896	if (processed->inode == inode && processed->uptodate == uptodate &&
2897	    processed->end + 1 >= start && end >= processed->end) {
2898		processed->end = end;
2899		return;
2900	}
2901
2902	tree = &processed->inode->io_tree;
2903	/*
2904	 * Now we don't have range contiguous to the processed range, release
2905	 * the processed range now.
2906	 */
2907	if (processed->uptodate && tree->track_uptodate)
2908		set_extent_uptodate(tree, processed->start, processed->end,
2909				    &cached, GFP_ATOMIC);
2910	unlock_extent_cached_atomic(tree, processed->start, processed->end,
2911				    &cached);
2912
2913update:
2914	/* Update processed to current range */
2915	processed->inode = inode;
2916	processed->start = start;
2917	processed->end = end;
2918	processed->uptodate = uptodate;
2919}
2920
2921static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2922{
2923	ASSERT(PageLocked(page));
2924	if (fs_info->sectorsize == PAGE_SIZE)
2925		return;
2926
2927	ASSERT(PagePrivate(page));
2928	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2929}
2930
2931/*
2932 * Find extent buffer for a givne bytenr.
2933 *
2934 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2935 * in endio context.
2936 */
2937static struct extent_buffer *find_extent_buffer_readpage(
2938		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2939{
2940	struct extent_buffer *eb;
2941
2942	/*
2943	 * For regular sectorsize, we can use page->private to grab extent
2944	 * buffer
2945	 */
2946	if (fs_info->sectorsize == PAGE_SIZE) {
2947		ASSERT(PagePrivate(page) && page->private);
2948		return (struct extent_buffer *)page->private;
2949	}
2950
2951	/* For subpage case, we need to lookup buffer radix tree */
2952	rcu_read_lock();
2953	eb = radix_tree_lookup(&fs_info->buffer_radix,
2954			       bytenr >> fs_info->sectorsize_bits);
2955	rcu_read_unlock();
2956	ASSERT(eb);
2957	return eb;
2958}
2959
2960/*
2961 * after a readpage IO is done, we need to:
2962 * clear the uptodate bits on error
2963 * set the uptodate bits if things worked
2964 * set the page up to date if all extents in the tree are uptodate
2965 * clear the lock bit in the extent tree
2966 * unlock the page if there are no other extents locked for it
2967 *
2968 * Scheduling is not allowed, so the extent state tree is expected
2969 * to have one and only one object corresponding to this IO.
2970 */
2971static void end_bio_extent_readpage(struct bio *bio)
2972{
2973	struct bio_vec *bvec;
2974	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2975	struct extent_io_tree *tree, *failure_tree;
2976	struct processed_extent processed = { 0 };
2977	/*
2978	 * The offset to the beginning of a bio, since one bio can never be
2979	 * larger than UINT_MAX, u32 here is enough.
2980	 */
2981	u32 bio_offset = 0;
2982	int mirror;
2983	int ret;
2984	struct bvec_iter_all iter_all;
2985
2986	ASSERT(!bio_flagged(bio, BIO_CLONED));
2987	bio_for_each_segment_all(bvec, bio, iter_all) {
2988		bool uptodate = !bio->bi_status;
2989		struct page *page = bvec->bv_page;
2990		struct inode *inode = page->mapping->host;
2991		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2992		const u32 sectorsize = fs_info->sectorsize;
2993		unsigned int error_bitmap = (unsigned int)-1;
2994		u64 start;
2995		u64 end;
2996		u32 len;
2997
2998		btrfs_debug(fs_info,
2999			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
3000			bio->bi_iter.bi_sector, bio->bi_status,
3001			io_bio->mirror_num);
3002		tree = &BTRFS_I(inode)->io_tree;
3003		failure_tree = &BTRFS_I(inode)->io_failure_tree;
3004
3005		/*
3006		 * We always issue full-sector reads, but if some block in a
3007		 * page fails to read, blk_update_request() will advance
3008		 * bv_offset and adjust bv_len to compensate.  Print a warning
3009		 * for unaligned offsets, and an error if they don't add up to
3010		 * a full sector.
3011		 */
3012		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
3013			btrfs_err(fs_info,
3014		"partial page read in btrfs with offset %u and length %u",
3015				  bvec->bv_offset, bvec->bv_len);
3016		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
3017				     sectorsize))
3018			btrfs_info(fs_info,
3019		"incomplete page read with offset %u and length %u",
3020				   bvec->bv_offset, bvec->bv_len);
3021
3022		start = page_offset(page) + bvec->bv_offset;
3023		end = start + bvec->bv_len - 1;
3024		len = bvec->bv_len;
3025
3026		mirror = io_bio->mirror_num;
3027		if (likely(uptodate)) {
3028			if (is_data_inode(inode)) {
3029				error_bitmap = btrfs_verify_data_csum(io_bio,
3030						bio_offset, page, start, end);
3031				ret = error_bitmap;
3032			} else {
3033				ret = btrfs_validate_metadata_buffer(io_bio,
3034					page, start, end, mirror);
3035			}
3036			if (ret)
3037				uptodate = false;
3038			else
3039				clean_io_failure(BTRFS_I(inode)->root->fs_info,
3040						 failure_tree, tree, start,
3041						 page,
3042						 btrfs_ino(BTRFS_I(inode)), 0);
3043		}
3044
3045		if (likely(uptodate))
3046			goto readpage_ok;
3047
3048		if (is_data_inode(inode)) {
3049			/*
3050			 * btrfs_submit_read_repair() will handle all the good
3051			 * and bad sectors, we just continue to the next bvec.
3052			 */
3053			submit_read_repair(inode, bio, bio_offset, page,
3054					   start - page_offset(page), start,
3055					   end, mirror, error_bitmap,
3056					   btrfs_submit_data_bio);
3057
3058			ASSERT(bio_offset + len > bio_offset);
3059			bio_offset += len;
3060			continue;
3061		} else {
3062			struct extent_buffer *eb;
3063
3064			eb = find_extent_buffer_readpage(fs_info, page, start);
3065			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3066			eb->read_mirror = mirror;
3067			atomic_dec(&eb->io_pages);
3068			if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
3069					       &eb->bflags))
3070				btree_readahead_hook(eb, -EIO);
3071		}
3072readpage_ok:
3073		if (likely(uptodate)) {
3074			loff_t i_size = i_size_read(inode);
3075			pgoff_t end_index = i_size >> PAGE_SHIFT;
3076
3077			/*
3078			 * Zero out the remaining part if this range straddles
3079			 * i_size.
3080			 *
3081			 * Here we should only zero the range inside the bvec,
3082			 * not touch anything else.
3083			 *
3084			 * NOTE: i_size is exclusive while end is inclusive.
3085			 */
3086			if (page->index == end_index && i_size <= end) {
3087				u32 zero_start = max(offset_in_page(i_size),
3088						     offset_in_page(start));
3089
3090				zero_user_segment(page, zero_start,
3091						  offset_in_page(end) + 1);
3092			}
3093		}
3094		ASSERT(bio_offset + len > bio_offset);
3095		bio_offset += len;
3096
3097		/* Update page status and unlock */
3098		end_page_read(page, uptodate, start, len);
3099		endio_readpage_release_extent(&processed, BTRFS_I(inode),
3100					      start, end, uptodate);
3101	}
3102	/* Release the last extent */
3103	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
3104	btrfs_io_bio_free_csum(io_bio);
3105	bio_put(bio);
3106}
3107
3108/*
3109 * Initialize the members up to but not including 'bio'. Use after allocating a
3110 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3111 * 'bio' because use of __GFP_ZERO is not supported.
3112 */
3113static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
3114{
3115	memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
3116}
3117
3118/*
3119 * The following helpers allocate a bio. As it's backed by a bioset, it'll
3120 * never fail.  We're returning a bio right now but you can call btrfs_io_bio
3121 * for the appropriate container_of magic
3122 */
3123struct bio *btrfs_bio_alloc(u64 first_byte)
3124{
3125	struct bio *bio;
3126
3127	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
3128	bio->bi_iter.bi_sector = first_byte >> 9;
3129	btrfs_io_bio_init(btrfs_io_bio(bio));
3130	return bio;
3131}
3132
3133struct bio *btrfs_bio_clone(struct bio *bio)
3134{
3135	struct btrfs_io_bio *btrfs_bio;
3136	struct bio *new;
3137
3138	/* Bio allocation backed by a bioset does not fail */
3139	new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
3140	btrfs_bio = btrfs_io_bio(new);
3141	btrfs_io_bio_init(btrfs_bio);
3142	btrfs_bio->iter = bio->bi_iter;
3143	return new;
3144}
3145
3146struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
3147{
3148	struct bio *bio;
3149
3150	/* Bio allocation backed by a bioset does not fail */
3151	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
3152	btrfs_io_bio_init(btrfs_io_bio(bio));
3153	return bio;
3154}
3155
3156struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
3157{
3158	struct bio *bio;
3159	struct btrfs_io_bio *btrfs_bio;
3160
3161	/* this will never fail when it's backed by a bioset */
3162	bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
3163	ASSERT(bio);
3164
3165	btrfs_bio = btrfs_io_bio(bio);
3166	btrfs_io_bio_init(btrfs_bio);
3167
3168	bio_trim(bio, offset >> 9, size >> 9);
3169	btrfs_bio->iter = bio->bi_iter;
3170	return bio;
3171}
3172
3173/**
3174 * Attempt to add a page to bio
3175 *
3176 * @bio:	destination bio
3177 * @page:	page to add to the bio
3178 * @disk_bytenr:  offset of the new bio or to check whether we are adding
3179 *                a contiguous page to the previous one
3180 * @pg_offset:	starting offset in the page
3181 * @size:	portion of page that we want to write
3182 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3183 * @bio_flags:	flags of the current bio to see if we can merge them
3184 * @return:	true if page was added, false otherwise
3185 *
3186 * Attempt to add a page to bio considering stripe alignment etc.
3187 *
3188 * Return true if successfully page added. Otherwise, return false.
3189 */
3190static bool btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
3191			       struct page *page,
3192			       u64 disk_bytenr, unsigned int size,
3193			       unsigned int pg_offset,
3194			       unsigned long bio_flags)
3195{
3196	struct bio *bio = bio_ctrl->bio;
3197	u32 bio_size = bio->bi_iter.bi_size;
3198	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3199	bool contig;
3200	int ret;
3201
3202	ASSERT(bio);
3203	/* The limit should be calculated when bio_ctrl->bio is allocated */
3204	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3205	if (bio_ctrl->bio_flags != bio_flags)
3206		return false;
3207
3208	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
3209		contig = bio->bi_iter.bi_sector == sector;
3210	else
3211		contig = bio_end_sector(bio) == sector;
3212	if (!contig)
3213		return false;
3214
3215	if (bio_size + size > bio_ctrl->len_to_oe_boundary ||
3216	    bio_size + size > bio_ctrl->len_to_stripe_boundary)
3217		return false;
3218
3219	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
3220		ret = bio_add_zone_append_page(bio, page, size, pg_offset);
3221	else
3222		ret = bio_add_page(bio, page, size, pg_offset);
3223
3224	return ret == size;
3225}
3226
3227static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
3228			       struct btrfs_inode *inode)
3229{
3230	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3231	struct btrfs_io_geometry geom;
3232	struct btrfs_ordered_extent *ordered;
3233	struct extent_map *em;
3234	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
3235	int ret;
3236
3237	/*
3238	 * Pages for compressed extent are never submitted to disk directly,
3239	 * thus it has no real boundary, just set them to U32_MAX.
3240	 *
3241	 * The split happens for real compressed bio, which happens in
3242	 * btrfs_submit_compressed_read/write().
3243	 */
3244	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
3245		bio_ctrl->len_to_oe_boundary = U32_MAX;
3246		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3247		return 0;
3248	}
3249	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
3250	if (IS_ERR(em))
3251		return PTR_ERR(em);
3252	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
3253				    logical, &geom);
3254	free_extent_map(em);
3255	if (ret < 0) {
3256		return ret;
3257	}
3258	if (geom.len > U32_MAX)
3259		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3260	else
3261		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
3262
3263	if (!btrfs_is_zoned(fs_info) ||
3264	    bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
3265		bio_ctrl->len_to_oe_boundary = U32_MAX;
3266		return 0;
3267	}
3268
3269	/* Ordered extent not yet created, so we're good */
3270	ordered = btrfs_lookup_ordered_extent(inode, logical);
3271	if (!ordered) {
3272		bio_ctrl->len_to_oe_boundary = U32_MAX;
3273		return 0;
3274	}
3275
3276	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
3277		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
3278	btrfs_put_ordered_extent(ordered);
3279	return 0;
3280}
3281
3282/*
3283 * @opf:	bio REQ_OP_* and REQ_* flags as one value
3284 * @wbc:	optional writeback control for io accounting
3285 * @page:	page to add to the bio
3286 * @disk_bytenr: logical bytenr where the write will be
3287 * @size:	portion of page that we want to write to
3288 * @pg_offset:	offset of the new bio or to check whether we are adding
3289 *              a contiguous page to the previous one
3290 * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
3291 * @end_io_func:     end_io callback for new bio
3292 * @mirror_num:	     desired mirror to read/write
3293 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3294 * @bio_flags:	flags of the current bio to see if we can merge them
3295 */
3296static int submit_extent_page(unsigned int opf,
3297			      struct writeback_control *wbc,
3298			      struct btrfs_bio_ctrl *bio_ctrl,
3299			      struct page *page, u64 disk_bytenr,
3300			      size_t size, unsigned long pg_offset,
3301			      bio_end_io_t end_io_func,
3302			      int mirror_num,
3303			      unsigned long bio_flags,
3304			      bool force_bio_submit)
3305{
3306	int ret = 0;
3307	struct bio *bio;
3308	size_t io_size = min_t(size_t, size, PAGE_SIZE);
3309	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3310	struct extent_io_tree *tree = &inode->io_tree;
3311	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3312
3313	ASSERT(bio_ctrl);
3314
3315	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
3316	       pg_offset + size <= PAGE_SIZE);
3317	if (bio_ctrl->bio) {
3318		bio = bio_ctrl->bio;
3319		if (force_bio_submit ||
3320		    !btrfs_bio_add_page(bio_ctrl, page, disk_bytenr, io_size,
3321					pg_offset, bio_flags)) {
3322			ret = submit_one_bio(bio, mirror_num, bio_ctrl->bio_flags);
3323			bio_ctrl->bio = NULL;
3324			if (ret < 0)
3325				return ret;
3326		} else {
3327			if (wbc)
3328				wbc_account_cgroup_owner(wbc, page, io_size);
3329			return 0;
3330		}
3331	}
3332
3333	bio = btrfs_bio_alloc(disk_bytenr);
3334	bio_add_page(bio, page, io_size, pg_offset);
3335	bio->bi_end_io = end_io_func;
3336	bio->bi_private = tree;
3337	bio->bi_write_hint = page->mapping->host->i_write_hint;
3338	bio->bi_opf = opf;
3339	if (wbc) {
3340		struct block_device *bdev;
3341
3342		bdev = fs_info->fs_devices->latest_bdev;
3343		bio_set_dev(bio, bdev);
3344		wbc_init_bio(wbc, bio);
3345		wbc_account_cgroup_owner(wbc, page, io_size);
3346	}
3347	if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
3348		struct btrfs_device *device;
3349
3350		device = btrfs_zoned_get_device(fs_info, disk_bytenr, io_size);
3351		if (IS_ERR(device))
3352			return PTR_ERR(device);
3353
3354		btrfs_io_bio(bio)->device = device;
3355	}
3356
3357	bio_ctrl->bio = bio;
3358	bio_ctrl->bio_flags = bio_flags;
3359	ret = calc_bio_boundaries(bio_ctrl, inode);
3360
3361	return ret;
3362}
3363
3364static int attach_extent_buffer_page(struct extent_buffer *eb,
3365				     struct page *page,
3366				     struct btrfs_subpage *prealloc)
3367{
3368	struct btrfs_fs_info *fs_info = eb->fs_info;
3369	int ret = 0;
3370
3371	/*
3372	 * If the page is mapped to btree inode, we should hold the private
3373	 * lock to prevent race.
3374	 * For cloned or dummy extent buffers, their pages are not mapped and
3375	 * will not race with any other ebs.
3376	 */
3377	if (page->mapping)
3378		lockdep_assert_held(&page->mapping->private_lock);
3379
3380	if (fs_info->sectorsize == PAGE_SIZE) {
3381		if (!PagePrivate(page))
3382			attach_page_private(page, eb);
3383		else
3384			WARN_ON(page->private != (unsigned long)eb);
3385		return 0;
3386	}
3387
3388	/* Already mapped, just free prealloc */
3389	if (PagePrivate(page)) {
3390		btrfs_free_subpage(prealloc);
3391		return 0;
3392	}
3393
3394	if (prealloc)
3395		/* Has preallocated memory for subpage */
3396		attach_page_private(page, prealloc);
3397	else
3398		/* Do new allocation to attach subpage */
3399		ret = btrfs_attach_subpage(fs_info, page,
3400					   BTRFS_SUBPAGE_METADATA);
3401	return ret;
3402}
3403
3404int set_page_extent_mapped(struct page *page)
3405{
3406	struct btrfs_fs_info *fs_info;
3407
3408	ASSERT(page->mapping);
3409
3410	if (PagePrivate(page))
3411		return 0;
3412
3413	fs_info = btrfs_sb(page->mapping->host->i_sb);
3414
3415	if (fs_info->sectorsize < PAGE_SIZE)
3416		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3417
3418	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3419	return 0;
3420}
3421
3422void clear_page_extent_mapped(struct page *page)
3423{
3424	struct btrfs_fs_info *fs_info;
3425
3426	ASSERT(page->mapping);
3427
3428	if (!PagePrivate(page))
3429		return;
3430
3431	fs_info = btrfs_sb(page->mapping->host->i_sb);
3432	if (fs_info->sectorsize < PAGE_SIZE)
3433		return btrfs_detach_subpage(fs_info, page);
3434
3435	detach_page_private(page);
3436}
3437
3438static struct extent_map *
3439__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3440		 u64 start, u64 len, struct extent_map **em_cached)
3441{
3442	struct extent_map *em;
3443
3444	if (em_cached && *em_cached) {
3445		em = *em_cached;
3446		if (extent_map_in_tree(em) && start >= em->start &&
3447		    start < extent_map_end(em)) {
3448			refcount_inc(&em->refs);
3449			return em;
3450		}
3451
3452		free_extent_map(em);
3453		*em_cached = NULL;
3454	}
3455
3456	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3457	if (em_cached && !IS_ERR_OR_NULL(em)) {
3458		BUG_ON(*em_cached);
3459		refcount_inc(&em->refs);
3460		*em_cached = em;
3461	}
3462	return em;
3463}
3464/*
3465 * basic readpage implementation.  Locked extent state structs are inserted
3466 * into the tree that are removed when the IO is done (by the end_io
3467 * handlers)
3468 * XXX JDM: This needs looking at to ensure proper page locking
3469 * return 0 on success, otherwise return error
3470 */
3471int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3472		      struct btrfs_bio_ctrl *bio_ctrl,
3473		      unsigned int read_flags, u64 *prev_em_start)
3474{
3475	struct inode *inode = page->mapping->host;
3476	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3477	u64 start = page_offset(page);
3478	const u64 end = start + PAGE_SIZE - 1;
3479	u64 cur = start;
3480	u64 extent_offset;
3481	u64 last_byte = i_size_read(inode);
3482	u64 block_start;
3483	u64 cur_end;
3484	struct extent_map *em;
3485	int ret = 0;
3486	int nr = 0;
3487	size_t pg_offset = 0;
3488	size_t iosize;
3489	size_t blocksize = inode->i_sb->s_blocksize;
3490	unsigned long this_bio_flag = 0;
3491	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3492
3493	ret = set_page_extent_mapped(page);
3494	if (ret < 0) {
3495		unlock_extent(tree, start, end);
3496		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3497		unlock_page(page);
3498		goto out;
3499	}
3500
3501	if (!PageUptodate(page)) {
3502		if (cleancache_get_page(page) == 0) {
3503			BUG_ON(blocksize != PAGE_SIZE);
3504			unlock_extent(tree, start, end);
3505			unlock_page(page);
3506			goto out;
3507		}
3508	}
3509
3510	if (page->index == last_byte >> PAGE_SHIFT) {
3511		size_t zero_offset = offset_in_page(last_byte);
3512
3513		if (zero_offset) {
3514			iosize = PAGE_SIZE - zero_offset;
3515			memzero_page(page, zero_offset, iosize);
3516			flush_dcache_page(page);
3517		}
3518	}
3519	begin_page_read(fs_info, page);
3520	while (cur <= end) {
3521		bool force_bio_submit = false;
3522		u64 disk_bytenr;
3523
3524		if (cur >= last_byte) {
3525			struct extent_state *cached = NULL;
3526
3527			iosize = PAGE_SIZE - pg_offset;
3528			memzero_page(page, pg_offset, iosize);
3529			flush_dcache_page(page);
3530			set_extent_uptodate(tree, cur, cur + iosize - 1,
3531					    &cached, GFP_NOFS);
3532			unlock_extent_cached(tree, cur,
3533					     cur + iosize - 1, &cached);
3534			end_page_read(page, true, cur, iosize);
3535			break;
3536		}
3537		em = __get_extent_map(inode, page, pg_offset, cur,
3538				      end - cur + 1, em_cached);
3539		if (IS_ERR_OR_NULL(em)) {
3540			unlock_extent(tree, cur, end);
3541			end_page_read(page, false, cur, end + 1 - cur);
3542			break;
3543		}
3544		extent_offset = cur - em->start;
3545		BUG_ON(extent_map_end(em) <= cur);
3546		BUG_ON(end < cur);
3547
3548		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3549			this_bio_flag |= EXTENT_BIO_COMPRESSED;
3550			extent_set_compress_type(&this_bio_flag,
3551						 em->compress_type);
3552		}
3553
3554		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3555		cur_end = min(extent_map_end(em) - 1, end);
3556		iosize = ALIGN(iosize, blocksize);
3557		if (this_bio_flag & EXTENT_BIO_COMPRESSED)
3558			disk_bytenr = em->block_start;
3559		else
3560			disk_bytenr = em->block_start + extent_offset;
3561		block_start = em->block_start;
3562		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3563			block_start = EXTENT_MAP_HOLE;
3564
3565		/*
3566		 * If we have a file range that points to a compressed extent
3567		 * and it's followed by a consecutive file range that points
3568		 * to the same compressed extent (possibly with a different
3569		 * offset and/or length, so it either points to the whole extent
3570		 * or only part of it), we must make sure we do not submit a
3571		 * single bio to populate the pages for the 2 ranges because
3572		 * this makes the compressed extent read zero out the pages
3573		 * belonging to the 2nd range. Imagine the following scenario:
3574		 *
3575		 *  File layout
3576		 *  [0 - 8K]                     [8K - 24K]
3577		 *    |                               |
3578		 *    |                               |
3579		 * points to extent X,         points to extent X,
3580		 * offset 4K, length of 8K     offset 0, length 16K
3581		 *
3582		 * [extent X, compressed length = 4K uncompressed length = 16K]
3583		 *
3584		 * If the bio to read the compressed extent covers both ranges,
3585		 * it will decompress extent X into the pages belonging to the
3586		 * first range and then it will stop, zeroing out the remaining
3587		 * pages that belong to the other range that points to extent X.
3588		 * So here we make sure we submit 2 bios, one for the first
3589		 * range and another one for the third range. Both will target
3590		 * the same physical extent from disk, but we can't currently
3591		 * make the compressed bio endio callback populate the pages
3592		 * for both ranges because each compressed bio is tightly
3593		 * coupled with a single extent map, and each range can have
3594		 * an extent map with a different offset value relative to the
3595		 * uncompressed data of our extent and different lengths. This
3596		 * is a corner case so we prioritize correctness over
3597		 * non-optimal behavior (submitting 2 bios for the same extent).
3598		 */
3599		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3600		    prev_em_start && *prev_em_start != (u64)-1 &&
3601		    *prev_em_start != em->start)
3602			force_bio_submit = true;
3603
3604		if (prev_em_start)
3605			*prev_em_start = em->start;
3606
3607		free_extent_map(em);
3608		em = NULL;
3609
3610		/* we've found a hole, just zero and go on */
3611		if (block_start == EXTENT_MAP_HOLE) {
3612			struct extent_state *cached = NULL;
3613
3614			memzero_page(page, pg_offset, iosize);
3615			flush_dcache_page(page);
3616
3617			set_extent_uptodate(tree, cur, cur + iosize - 1,
3618					    &cached, GFP_NOFS);
3619			unlock_extent_cached(tree, cur,
3620					     cur + iosize - 1, &cached);
3621			end_page_read(page, true, cur, iosize);
3622			cur = cur + iosize;
3623			pg_offset += iosize;
3624			continue;
3625		}
3626		/* the get_extent function already copied into the page */
3627		if (test_range_bit(tree, cur, cur_end,
3628				   EXTENT_UPTODATE, 1, NULL)) {
3629			check_page_uptodate(tree, page);
3630			unlock_extent(tree, cur, cur + iosize - 1);
3631			end_page_read(page, true, cur, iosize);
3632			cur = cur + iosize;
3633			pg_offset += iosize;
3634			continue;
3635		}
3636		/* we have an inline extent but it didn't get marked up
3637		 * to date.  Error out
3638		 */
3639		if (block_start == EXTENT_MAP_INLINE) {
3640			unlock_extent(tree, cur, cur + iosize - 1);
3641			end_page_read(page, false, cur, iosize);
3642			cur = cur + iosize;
3643			pg_offset += iosize;
3644			continue;
3645		}
3646
3647		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3648					 bio_ctrl, page, disk_bytenr, iosize,
3649					 pg_offset,
3650					 end_bio_extent_readpage, 0,
3651					 this_bio_flag,
3652					 force_bio_submit);
3653		if (!ret) {
3654			nr++;
3655		} else {
3656			unlock_extent(tree, cur, cur + iosize - 1);
3657			end_page_read(page, false, cur, iosize);
3658			goto out;
3659		}
3660		cur = cur + iosize;
3661		pg_offset += iosize;
3662	}
3663out:
3664	return ret;
3665}
3666
3667static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3668					u64 start, u64 end,
3669					struct extent_map **em_cached,
3670					struct btrfs_bio_ctrl *bio_ctrl,
3671					u64 *prev_em_start)
3672{
3673	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3674	int index;
3675
3676	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3677
3678	for (index = 0; index < nr_pages; index++) {
3679		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
3680				  REQ_RAHEAD, prev_em_start);
3681		put_page(pages[index]);
3682	}
3683}
3684
3685static void update_nr_written(struct writeback_control *wbc,
3686			      unsigned long nr_written)
3687{
3688	wbc->nr_to_write -= nr_written;
3689}
3690
3691/*
3692 * helper for __extent_writepage, doing all of the delayed allocation setup.
3693 *
3694 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3695 * to write the page (copy into inline extent).  In this case the IO has
3696 * been started and the page is already unlocked.
3697 *
3698 * This returns 0 if all went well (page still locked)
3699 * This returns < 0 if there were errors (page still locked)
3700 */
3701static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3702		struct page *page, struct writeback_control *wbc,
3703		u64 delalloc_start, unsigned long *nr_written)
3704{
3705	u64 page_end = delalloc_start + PAGE_SIZE - 1;
3706	bool found;
3707	u64 delalloc_to_write = 0;
3708	u64 delalloc_end = 0;
3709	int ret;
3710	int page_started = 0;
3711
3712
3713	while (delalloc_end < page_end) {
3714		found = find_lock_delalloc_range(&inode->vfs_inode, page,
3715					       &delalloc_start,
3716					       &delalloc_end);
3717		if (!found) {
3718			delalloc_start = delalloc_end + 1;
3719			continue;
3720		}
3721		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3722				delalloc_end, &page_started, nr_written, wbc);
3723		if (ret) {
3724			SetPageError(page);
3725			/*
3726			 * btrfs_run_delalloc_range should return < 0 for error
3727			 * but just in case, we use > 0 here meaning the IO is
3728			 * started, so we don't want to return > 0 unless
3729			 * things are going well.
3730			 */
3731			return ret < 0 ? ret : -EIO;
3732		}
3733		/*
3734		 * delalloc_end is already one less than the total length, so
3735		 * we don't subtract one from PAGE_SIZE
3736		 */
3737		delalloc_to_write += (delalloc_end - delalloc_start +
3738				      PAGE_SIZE) >> PAGE_SHIFT;
3739		delalloc_start = delalloc_end + 1;
3740	}
3741	if (wbc->nr_to_write < delalloc_to_write) {
3742		int thresh = 8192;
3743
3744		if (delalloc_to_write < thresh * 2)
3745			thresh = delalloc_to_write;
3746		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3747					 thresh);
3748	}
3749
3750	/* did the fill delalloc function already unlock and start
3751	 * the IO?
3752	 */
3753	if (page_started) {
3754		/*
3755		 * we've unlocked the page, so we can't update
3756		 * the mapping's writeback index, just update
3757		 * nr_to_write.
3758		 */
3759		wbc->nr_to_write -= *nr_written;
3760		return 1;
3761	}
3762
3763	return 0;
3764}
3765
3766/*
3767 * Find the first byte we need to write.
3768 *
3769 * For subpage, one page can contain several sectors, and
3770 * __extent_writepage_io() will just grab all extent maps in the page
3771 * range and try to submit all non-inline/non-compressed extents.
3772 *
3773 * This is a big problem for subpage, we shouldn't re-submit already written
3774 * data at all.
3775 * This function will lookup subpage dirty bit to find which range we really
3776 * need to submit.
3777 *
3778 * Return the next dirty range in [@start, @end).
3779 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
3780 */
3781static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
3782				 struct page *page, u64 *start, u64 *end)
3783{
3784	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3785	u64 orig_start = *start;
3786	/* Declare as unsigned long so we can use bitmap ops */
3787	unsigned long dirty_bitmap;
3788	unsigned long flags;
3789	int nbits = (orig_start - page_offset(page)) >> fs_info->sectorsize_bits;
3790	int range_start_bit = nbits;
3791	int range_end_bit;
3792
3793	/*
3794	 * For regular sector size == page size case, since one page only
3795	 * contains one sector, we return the page offset directly.
3796	 */
3797	if (fs_info->sectorsize == PAGE_SIZE) {
3798		*start = page_offset(page);
3799		*end = page_offset(page) + PAGE_SIZE;
3800		return;
3801	}
3802
3803	/* We should have the page locked, but just in case */
3804	spin_lock_irqsave(&subpage->lock, flags);
3805	dirty_bitmap = subpage->dirty_bitmap;
3806	spin_unlock_irqrestore(&subpage->lock, flags);
3807
3808	bitmap_next_set_region(&dirty_bitmap, &range_start_bit, &range_end_bit,
3809			       BTRFS_SUBPAGE_BITMAP_SIZE);
3810	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
3811	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
3812}
3813
3814/*
3815 * helper for __extent_writepage.  This calls the writepage start hooks,
3816 * and does the loop to map the page into extents and bios.
3817 *
3818 * We return 1 if the IO is started and the page is unlocked,
3819 * 0 if all went well (page still locked)
3820 * < 0 if there were errors (page still locked)
3821 */
3822static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3823				 struct page *page,
3824				 struct writeback_control *wbc,
3825				 struct extent_page_data *epd,
3826				 loff_t i_size,
3827				 unsigned long nr_written,
3828				 int *nr_ret)
3829{
3830	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3831	u64 cur = page_offset(page);
3832	u64 end = cur + PAGE_SIZE - 1;
3833	u64 extent_offset;
3834	u64 block_start;
3835	struct extent_map *em;
3836	int ret = 0;
3837	int nr = 0;
3838	u32 opf = REQ_OP_WRITE;
3839	const unsigned int write_flags = wbc_to_write_flags(wbc);
3840	bool compressed;
3841
3842	ret = btrfs_writepage_cow_fixup(page);
3843	if (ret) {
3844		/* Fixup worker will requeue */
3845		redirty_page_for_writepage(wbc, page);
3846		update_nr_written(wbc, nr_written);
3847		unlock_page(page);
3848		return 1;
3849	}
3850
3851	/*
3852	 * we don't want to touch the inode after unlocking the page,
3853	 * so we update the mapping writeback index now
3854	 */
3855	update_nr_written(wbc, nr_written + 1);
3856
3857	while (cur <= end) {
3858		u64 disk_bytenr;
3859		u64 em_end;
3860		u64 dirty_range_start = cur;
3861		u64 dirty_range_end;
3862		u32 iosize;
3863
3864		if (cur >= i_size) {
3865			btrfs_writepage_endio_finish_ordered(inode, page, cur,
3866							     end, 1);
3867			break;
3868		}
3869
3870		find_next_dirty_byte(fs_info, page, &dirty_range_start,
3871				     &dirty_range_end);
3872		if (cur < dirty_range_start) {
3873			cur = dirty_range_start;
3874			continue;
3875		}
3876
3877		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3878		if (IS_ERR_OR_NULL(em)) {
3879			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
3880			ret = PTR_ERR_OR_ZERO(em);
3881			break;
3882		}
3883
3884		extent_offset = cur - em->start;
3885		em_end = extent_map_end(em);
3886		ASSERT(cur <= em_end);
3887		ASSERT(cur < end);
3888		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3889		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
3890		block_start = em->block_start;
3891		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3892		disk_bytenr = em->block_start + extent_offset;
3893
3894		/*
3895		 * Note that em_end from extent_map_end() and dirty_range_end from
3896		 * find_next_dirty_byte() are all exclusive
3897		 */
3898		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
3899
3900		if (btrfs_use_zone_append(inode, em->block_start))
3901			opf = REQ_OP_ZONE_APPEND;
3902
3903		free_extent_map(em);
3904		em = NULL;
3905
3906		/*
3907		 * compressed and inline extents are written through other
3908		 * paths in the FS
3909		 */
3910		if (compressed || block_start == EXTENT_MAP_HOLE ||
3911		    block_start == EXTENT_MAP_INLINE) {
3912			if (compressed)
3913				nr++;
3914			else
3915				btrfs_writepage_endio_finish_ordered(inode,
3916						page, cur, cur + iosize - 1, 1);
3917			cur += iosize;
3918			continue;
3919		}
3920
3921		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
3922		if (!PageWriteback(page)) {
3923			btrfs_err(inode->root->fs_info,
3924				   "page %lu not writeback, cur %llu end %llu",
3925			       page->index, cur, end);
3926		}
3927
3928		/*
3929		 * Although the PageDirty bit is cleared before entering this
3930		 * function, subpage dirty bit is not cleared.
3931		 * So clear subpage dirty bit here so next time we won't submit
3932		 * page for range already written to disk.
3933		 */
3934		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
3935
3936		ret = submit_extent_page(opf | write_flags, wbc,
3937					 &epd->bio_ctrl, page,
3938					 disk_bytenr, iosize,
3939					 cur - page_offset(page),
3940					 end_bio_extent_writepage,
3941					 0, 0, false);
3942		if (ret) {
3943			btrfs_page_set_error(fs_info, page, cur, iosize);
3944			if (PageWriteback(page))
3945				btrfs_page_clear_writeback(fs_info, page, cur,
3946							   iosize);
3947		}
3948
3949		cur += iosize;
3950		nr++;
3951	}
3952	*nr_ret = nr;
3953	return ret;
3954}
3955
3956/*
3957 * the writepage semantics are similar to regular writepage.  extent
3958 * records are inserted to lock ranges in the tree, and as dirty areas
3959 * are found, they are marked writeback.  Then the lock bits are removed
3960 * and the end_io handler clears the writeback ranges
3961 *
3962 * Return 0 if everything goes well.
3963 * Return <0 for error.
3964 */
3965static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3966			      struct extent_page_data *epd)
3967{
3968	struct inode *inode = page->mapping->host;
3969	u64 start = page_offset(page);
3970	u64 page_end = start + PAGE_SIZE - 1;
3971	int ret;
3972	int nr = 0;
3973	size_t pg_offset;
3974	loff_t i_size = i_size_read(inode);
3975	unsigned long end_index = i_size >> PAGE_SHIFT;
3976	unsigned long nr_written = 0;
3977
3978	trace___extent_writepage(page, inode, wbc);
3979
3980	WARN_ON(!PageLocked(page));
3981
3982	ClearPageError(page);
3983
3984	pg_offset = offset_in_page(i_size);
3985	if (page->index > end_index ||
3986	   (page->index == end_index && !pg_offset)) {
3987		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3988		unlock_page(page);
3989		return 0;
3990	}
3991
3992	if (page->index == end_index) {
3993		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
3994		flush_dcache_page(page);
3995	}
3996
3997	ret = set_page_extent_mapped(page);
3998	if (ret < 0) {
3999		SetPageError(page);
4000		goto done;
4001	}
4002
4003	if (!epd->extent_locked) {
4004		ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
4005					 &nr_written);
4006		if (ret == 1)
4007			return 0;
4008		if (ret)
4009			goto done;
4010	}
4011
4012	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
4013				    nr_written, &nr);
4014	if (ret == 1)
4015		return 0;
4016
4017done:
4018	if (nr == 0) {
4019		/* make sure the mapping tag for page dirty gets cleared */
4020		set_page_writeback(page);
4021		end_page_writeback(page);
4022	}
4023	if (PageError(page)) {
4024		ret = ret < 0 ? ret : -EIO;
4025		end_extent_writepage(page, ret, start, page_end);
4026	}
4027	unlock_page(page);
4028	ASSERT(ret <= 0);
4029	return ret;
4030}
4031
4032void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
4033{
4034	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
4035		       TASK_UNINTERRUPTIBLE);
4036}
4037
4038static void end_extent_buffer_writeback(struct extent_buffer *eb)
4039{
4040	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4041	smp_mb__after_atomic();
4042	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
4043}
4044
4045/*
4046 * Lock extent buffer status and pages for writeback.
4047 *
4048 * May try to flush write bio if we can't get the lock.
4049 *
4050 * Return  0 if the extent buffer doesn't need to be submitted.
4051 *           (E.g. the extent buffer is not dirty)
4052 * Return >0 is the extent buffer is submitted to bio.
4053 * Return <0 if something went wrong, no page is locked.
4054 */
4055static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
4056			  struct extent_page_data *epd)
4057{
4058	struct btrfs_fs_info *fs_info = eb->fs_info;
4059	int i, num_pages, failed_page_nr;
4060	int flush = 0;
4061	int ret = 0;
4062
4063	if (!btrfs_try_tree_write_lock(eb)) {
4064		ret = flush_write_bio(epd);
4065		if (ret < 0)
4066			return ret;
4067		flush = 1;
4068		btrfs_tree_lock(eb);
4069	}
4070
4071	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
4072		btrfs_tree_unlock(eb);
4073		if (!epd->sync_io)
4074			return 0;
4075		if (!flush) {
4076			ret = flush_write_bio(epd);
4077			if (ret < 0)
4078				return ret;
4079			flush = 1;
4080		}
4081		while (1) {
4082			wait_on_extent_buffer_writeback(eb);
4083			btrfs_tree_lock(eb);
4084			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
4085				break;
4086			btrfs_tree_unlock(eb);
4087		}
4088	}
4089
4090	/*
4091	 * We need to do this to prevent races in people who check if the eb is
4092	 * under IO since we can end up having no IO bits set for a short period
4093	 * of time.
4094	 */
4095	spin_lock(&eb->refs_lock);
4096	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4097		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4098		spin_unlock(&eb->refs_lock);
4099		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4100		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4101					 -eb->len,
4102					 fs_info->dirty_metadata_batch);
4103		ret = 1;
4104	} else {
4105		spin_unlock(&eb->refs_lock);
4106	}
4107
4108	btrfs_tree_unlock(eb);
4109
4110	/*
4111	 * Either we don't need to submit any tree block, or we're submitting
4112	 * subpage eb.
4113	 * Subpage metadata doesn't use page locking at all, so we can skip
4114	 * the page locking.
4115	 */
4116	if (!ret || fs_info->sectorsize < PAGE_SIZE)
4117		return ret;
4118
4119	num_pages = num_extent_pages(eb);
4120	for (i = 0; i < num_pages; i++) {
4121		struct page *p = eb->pages[i];
4122
4123		if (!trylock_page(p)) {
4124			if (!flush) {
4125				int err;
4126
4127				err = flush_write_bio(epd);
4128				if (err < 0) {
4129					ret = err;
4130					failed_page_nr = i;
4131					goto err_unlock;
4132				}
4133				flush = 1;
4134			}
4135			lock_page(p);
4136		}
4137	}
4138
4139	return ret;
4140err_unlock:
4141	/* Unlock already locked pages */
4142	for (i = 0; i < failed_page_nr; i++)
4143		unlock_page(eb->pages[i]);
4144	/*
4145	 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4146	 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4147	 * be made and undo everything done before.
4148	 */
4149	btrfs_tree_lock(eb);
4150	spin_lock(&eb->refs_lock);
4151	set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4152	end_extent_buffer_writeback(eb);
4153	spin_unlock(&eb->refs_lock);
4154	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4155				 fs_info->dirty_metadata_batch);
4156	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4157	btrfs_tree_unlock(eb);
4158	return ret;
4159}
4160
4161static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
4162{
4163	struct btrfs_fs_info *fs_info = eb->fs_info;
4164
4165	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
4166	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4167		return;
4168
4169	/*
4170	 * If we error out, we should add back the dirty_metadata_bytes
4171	 * to make it consistent.
4172	 */
4173	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4174				 eb->len, fs_info->dirty_metadata_batch);
4175
4176	/*
4177	 * If writeback for a btree extent that doesn't belong to a log tree
4178	 * failed, increment the counter transaction->eb_write_errors.
4179	 * We do this because while the transaction is running and before it's
4180	 * committing (when we call filemap_fdata[write|wait]_range against
4181	 * the btree inode), we might have
4182	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4183	 * returns an error or an error happens during writeback, when we're
4184	 * committing the transaction we wouldn't know about it, since the pages
4185	 * can be no longer dirty nor marked anymore for writeback (if a
4186	 * subsequent modification to the extent buffer didn't happen before the
4187	 * transaction commit), which makes filemap_fdata[write|wait]_range not
4188	 * able to find the pages tagged with SetPageError at transaction
4189	 * commit time. So if this happens we must abort the transaction,
4190	 * otherwise we commit a super block with btree roots that point to
4191	 * btree nodes/leafs whose content on disk is invalid - either garbage
4192	 * or the content of some node/leaf from a past generation that got
4193	 * cowed or deleted and is no longer valid.
4194	 *
4195	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4196	 * not be enough - we need to distinguish between log tree extents vs
4197	 * non-log tree extents, and the next filemap_fdatawait_range() call
4198	 * will catch and clear such errors in the mapping - and that call might
4199	 * be from a log sync and not from a transaction commit. Also, checking
4200	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4201	 * not done and would not be reliable - the eb might have been released
4202	 * from memory and reading it back again means that flag would not be
4203	 * set (since it's a runtime flag, not persisted on disk).
4204	 *
4205	 * Using the flags below in the btree inode also makes us achieve the
4206	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4207	 * writeback for all dirty pages and before filemap_fdatawait_range()
4208	 * is called, the writeback for all dirty pages had already finished
4209	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4210	 * filemap_fdatawait_range() would return success, as it could not know
4211	 * that writeback errors happened (the pages were no longer tagged for
4212	 * writeback).
4213	 */
4214	switch (eb->log_index) {
4215	case -1:
4216		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
4217		break;
4218	case 0:
4219		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
4220		break;
4221	case 1:
4222		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
4223		break;
4224	default:
4225		BUG(); /* unexpected, logic error */
4226	}
4227}
4228
4229/*
4230 * The endio specific version which won't touch any unsafe spinlock in endio
4231 * context.
4232 */
4233static struct extent_buffer *find_extent_buffer_nolock(
4234		struct btrfs_fs_info *fs_info, u64 start)
4235{
4236	struct extent_buffer *eb;
4237
4238	rcu_read_lock();
4239	eb = radix_tree_lookup(&fs_info->buffer_radix,
4240			       start >> fs_info->sectorsize_bits);
4241	if (eb && atomic_inc_not_zero(&eb->refs)) {
4242		rcu_read_unlock();
4243		return eb;
4244	}
4245	rcu_read_unlock();
4246	return NULL;
4247}
4248
4249/*
4250 * The endio function for subpage extent buffer write.
4251 *
4252 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4253 * after all extent buffers in the page has finished their writeback.
4254 */
4255static void end_bio_subpage_eb_writepage(struct bio *bio)
4256{
4257	struct btrfs_fs_info *fs_info;
4258	struct bio_vec *bvec;
4259	struct bvec_iter_all iter_all;
4260
4261	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4262	ASSERT(fs_info->sectorsize < PAGE_SIZE);
4263
4264	ASSERT(!bio_flagged(bio, BIO_CLONED));
4265	bio_for_each_segment_all(bvec, bio, iter_all) {
4266		struct page *page = bvec->bv_page;
4267		u64 bvec_start = page_offset(page) + bvec->bv_offset;
4268		u64 bvec_end = bvec_start + bvec->bv_len - 1;
4269		u64 cur_bytenr = bvec_start;
4270
4271		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4272
4273		/* Iterate through all extent buffers in the range */
4274		while (cur_bytenr <= bvec_end) {
4275			struct extent_buffer *eb;
4276			int done;
4277
4278			/*
4279			 * Here we can't use find_extent_buffer(), as it may
4280			 * try to lock eb->refs_lock, which is not safe in endio
4281			 * context.
4282			 */
4283			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4284			ASSERT(eb);
4285
4286			cur_bytenr = eb->start + eb->len;
4287
4288			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4289			done = atomic_dec_and_test(&eb->io_pages);
4290			ASSERT(done);
4291
4292			if (bio->bi_status ||
4293			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4294				ClearPageUptodate(page);
4295				set_btree_ioerr(page, eb);
4296			}
4297
4298			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4299						      eb->len);
4300			end_extent_buffer_writeback(eb);
4301			/*
4302			 * free_extent_buffer() will grab spinlock which is not
4303			 * safe in endio context. Thus here we manually dec
4304			 * the ref.
4305			 */
4306			atomic_dec(&eb->refs);
4307		}
4308	}
4309	bio_put(bio);
4310}
4311
4312static void end_bio_extent_buffer_writepage(struct bio *bio)
4313{
4314	struct bio_vec *bvec;
4315	struct extent_buffer *eb;
4316	int done;
4317	struct bvec_iter_all iter_all;
4318
4319	ASSERT(!bio_flagged(bio, BIO_CLONED));
4320	bio_for_each_segment_all(bvec, bio, iter_all) {
4321		struct page *page = bvec->bv_page;
4322
4323		eb = (struct extent_buffer *)page->private;
4324		BUG_ON(!eb);
4325		done = atomic_dec_and_test(&eb->io_pages);
4326
4327		if (bio->bi_status ||
4328		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4329			ClearPageUptodate(page);
4330			set_btree_ioerr(page, eb);
4331		}
4332
4333		end_page_writeback(page);
4334
4335		if (!done)
4336			continue;
4337
4338		end_extent_buffer_writeback(eb);
4339	}
4340
4341	bio_put(bio);
4342}
4343
4344static void prepare_eb_write(struct extent_buffer *eb)
4345{
4346	u32 nritems;
4347	unsigned long start;
4348	unsigned long end;
4349
4350	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
4351	atomic_set(&eb->io_pages, num_extent_pages(eb));
4352
4353	/* Set btree blocks beyond nritems with 0 to avoid stale content */
4354	nritems = btrfs_header_nritems(eb);
4355	if (btrfs_header_level(eb) > 0) {
4356		end = btrfs_node_key_ptr_offset(nritems);
4357		memzero_extent_buffer(eb, end, eb->len - end);
4358	} else {
4359		/*
4360		 * Leaf:
4361		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4362		 */
4363		start = btrfs_item_nr_offset(nritems);
4364		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
4365		memzero_extent_buffer(eb, start, end - start);
4366	}
4367}
4368
4369/*
4370 * Unlike the work in write_one_eb(), we rely completely on extent locking.
4371 * Page locking is only utilized at minimum to keep the VMM code happy.
4372 */
4373static int write_one_subpage_eb(struct extent_buffer *eb,
4374				struct writeback_control *wbc,
4375				struct extent_page_data *epd)
4376{
4377	struct btrfs_fs_info *fs_info = eb->fs_info;
4378	struct page *page = eb->pages[0];
4379	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4380	bool no_dirty_ebs = false;
4381	int ret;
4382
4383	prepare_eb_write(eb);
4384
4385	/* clear_page_dirty_for_io() in subpage helper needs page locked */
4386	lock_page(page);
4387	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4388
4389	/* Check if this is the last dirty bit to update nr_written */
4390	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4391							  eb->start, eb->len);
4392	if (no_dirty_ebs)
4393		clear_page_dirty_for_io(page);
4394
4395	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4396			&epd->bio_ctrl, page, eb->start, eb->len,
4397			eb->start - page_offset(page),
4398			end_bio_subpage_eb_writepage, 0, 0, false);
4399	if (ret) {
4400		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4401		set_btree_ioerr(page, eb);
4402		unlock_page(page);
4403
4404		if (atomic_dec_and_test(&eb->io_pages))
4405			end_extent_buffer_writeback(eb);
4406		return -EIO;
4407	}
4408	unlock_page(page);
4409	/*
4410	 * Submission finished without problem, if no range of the page is
4411	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
4412	 */
4413	if (no_dirty_ebs)
4414		update_nr_written(wbc, 1);
4415	return ret;
4416}
4417
4418static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4419			struct writeback_control *wbc,
4420			struct extent_page_data *epd)
4421{
4422	u64 disk_bytenr = eb->start;
4423	int i, num_pages;
4424	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4425	int ret = 0;
4426
4427	prepare_eb_write(eb);
4428
4429	num_pages = num_extent_pages(eb);
4430	for (i = 0; i < num_pages; i++) {
4431		struct page *p = eb->pages[i];
4432
4433		clear_page_dirty_for_io(p);
4434		set_page_writeback(p);
4435		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4436					 &epd->bio_ctrl, p, disk_bytenr,
4437					 PAGE_SIZE, 0,
4438					 end_bio_extent_buffer_writepage,
4439					 0, 0, false);
4440		if (ret) {
4441			set_btree_ioerr(p, eb);
4442			if (PageWriteback(p))
4443				end_page_writeback(p);
4444			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4445				end_extent_buffer_writeback(eb);
4446			ret = -EIO;
4447			break;
4448		}
4449		disk_bytenr += PAGE_SIZE;
4450		update_nr_written(wbc, 1);
4451		unlock_page(p);
4452	}
4453
4454	if (unlikely(ret)) {
4455		for (; i < num_pages; i++) {
4456			struct page *p = eb->pages[i];
4457			clear_page_dirty_for_io(p);
4458			unlock_page(p);
4459		}
4460	}
4461
4462	return ret;
4463}
4464
4465/*
4466 * Submit one subpage btree page.
4467 *
4468 * The main difference to submit_eb_page() is:
4469 * - Page locking
4470 *   For subpage, we don't rely on page locking at all.
4471 *
4472 * - Flush write bio
4473 *   We only flush bio if we may be unable to fit current extent buffers into
4474 *   current bio.
4475 *
4476 * Return >=0 for the number of submitted extent buffers.
4477 * Return <0 for fatal error.
4478 */
4479static int submit_eb_subpage(struct page *page,
4480			     struct writeback_control *wbc,
4481			     struct extent_page_data *epd)
4482{
4483	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4484	int submitted = 0;
4485	u64 page_start = page_offset(page);
4486	int bit_start = 0;
4487	const int nbits = BTRFS_SUBPAGE_BITMAP_SIZE;
4488	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4489	int ret;
4490
4491	/* Lock and write each dirty extent buffers in the range */
4492	while (bit_start < nbits) {
4493		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4494		struct extent_buffer *eb;
4495		unsigned long flags;
4496		u64 start;
4497
4498		/*
4499		 * Take private lock to ensure the subpage won't be detached
4500		 * in the meantime.
4501		 */
4502		spin_lock(&page->mapping->private_lock);
4503		if (!PagePrivate(page)) {
4504			spin_unlock(&page->mapping->private_lock);
4505			break;
4506		}
4507		spin_lock_irqsave(&subpage->lock, flags);
4508		if (!((1 << bit_start) & subpage->dirty_bitmap)) {
4509			spin_unlock_irqrestore(&subpage->lock, flags);
4510			spin_unlock(&page->mapping->private_lock);
4511			bit_start++;
4512			continue;
4513		}
4514
4515		start = page_start + bit_start * fs_info->sectorsize;
4516		bit_start += sectors_per_node;
4517
4518		/*
4519		 * Here we just want to grab the eb without touching extra
4520		 * spin locks, so call find_extent_buffer_nolock().
4521		 */
4522		eb = find_extent_buffer_nolock(fs_info, start);
4523		spin_unlock_irqrestore(&subpage->lock, flags);
4524		spin_unlock(&page->mapping->private_lock);
4525
4526		/*
4527		 * The eb has already reached 0 refs thus find_extent_buffer()
4528		 * doesn't return it. We don't need to write back such eb
4529		 * anyway.
4530		 */
4531		if (!eb)
4532			continue;
4533
4534		ret = lock_extent_buffer_for_io(eb, epd);
4535		if (ret == 0) {
4536			free_extent_buffer(eb);
4537			continue;
4538		}
4539		if (ret < 0) {
4540			free_extent_buffer(eb);
4541			goto cleanup;
4542		}
4543		ret = write_one_subpage_eb(eb, wbc, epd);
4544		free_extent_buffer(eb);
4545		if (ret < 0)
4546			goto cleanup;
4547		submitted++;
4548	}
4549	return submitted;
4550
4551cleanup:
4552	/* We hit error, end bio for the submitted extent buffers */
4553	end_write_bio(epd, ret);
4554	return ret;
4555}
4556
4557/*
4558 * Submit all page(s) of one extent buffer.
4559 *
4560 * @page:	the page of one extent buffer
4561 * @eb_context:	to determine if we need to submit this page, if current page
4562 *		belongs to this eb, we don't need to submit
4563 *
4564 * The caller should pass each page in their bytenr order, and here we use
4565 * @eb_context to determine if we have submitted pages of one extent buffer.
4566 *
4567 * If we have, we just skip until we hit a new page that doesn't belong to
4568 * current @eb_context.
4569 *
4570 * If not, we submit all the page(s) of the extent buffer.
4571 *
4572 * Return >0 if we have submitted the extent buffer successfully.
4573 * Return 0 if we don't need to submit the page, as it's already submitted by
4574 * previous call.
4575 * Return <0 for fatal error.
4576 */
4577static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4578			  struct extent_page_data *epd,
4579			  struct extent_buffer **eb_context)
4580{
4581	struct address_space *mapping = page->mapping;
4582	struct btrfs_block_group *cache = NULL;
4583	struct extent_buffer *eb;
4584	int ret;
4585
4586	if (!PagePrivate(page))
4587		return 0;
4588
4589	if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4590		return submit_eb_subpage(page, wbc, epd);
4591
4592	spin_lock(&mapping->private_lock);
4593	if (!PagePrivate(page)) {
4594		spin_unlock(&mapping->private_lock);
4595		return 0;
4596	}
4597
4598	eb = (struct extent_buffer *)page->private;
4599
4600	/*
4601	 * Shouldn't happen and normally this would be a BUG_ON but no point
4602	 * crashing the machine for something we can survive anyway.
4603	 */
4604	if (WARN_ON(!eb)) {
4605		spin_unlock(&mapping->private_lock);
4606		return 0;
4607	}
4608
4609	if (eb == *eb_context) {
4610		spin_unlock(&mapping->private_lock);
4611		return 0;
4612	}
4613	ret = atomic_inc_not_zero(&eb->refs);
4614	spin_unlock(&mapping->private_lock);
4615	if (!ret)
4616		return 0;
4617
4618	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4619		/*
4620		 * If for_sync, this hole will be filled with
4621		 * trasnsaction commit.
4622		 */
4623		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4624			ret = -EAGAIN;
4625		else
4626			ret = 0;
4627		free_extent_buffer(eb);
4628		return ret;
4629	}
4630
4631	*eb_context = eb;
4632
4633	ret = lock_extent_buffer_for_io(eb, epd);
4634	if (ret <= 0) {
4635		btrfs_revert_meta_write_pointer(cache, eb);
4636		if (cache)
4637			btrfs_put_block_group(cache);
4638		free_extent_buffer(eb);
4639		return ret;
4640	}
4641	if (cache)
4642		btrfs_put_block_group(cache);
4643	ret = write_one_eb(eb, wbc, epd);
4644	free_extent_buffer(eb);
4645	if (ret < 0)
4646		return ret;
4647	return 1;
4648}
4649
4650int btree_write_cache_pages(struct address_space *mapping,
4651				   struct writeback_control *wbc)
4652{
4653	struct extent_buffer *eb_context = NULL;
4654	struct extent_page_data epd = {
4655		.bio_ctrl = { 0 },
4656		.extent_locked = 0,
4657		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4658	};
4659	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
4660	int ret = 0;
4661	int done = 0;
4662	int nr_to_write_done = 0;
4663	struct pagevec pvec;
4664	int nr_pages;
4665	pgoff_t index;
4666	pgoff_t end;		/* Inclusive */
4667	int scanned = 0;
4668	xa_mark_t tag;
4669
4670	pagevec_init(&pvec);
4671	if (wbc->range_cyclic) {
4672		index = mapping->writeback_index; /* Start from prev offset */
4673		end = -1;
4674		/*
4675		 * Start from the beginning does not need to cycle over the
4676		 * range, mark it as scanned.
4677		 */
4678		scanned = (index == 0);
4679	} else {
4680		index = wbc->range_start >> PAGE_SHIFT;
4681		end = wbc->range_end >> PAGE_SHIFT;
4682		scanned = 1;
4683	}
4684	if (wbc->sync_mode == WB_SYNC_ALL)
4685		tag = PAGECACHE_TAG_TOWRITE;
4686	else
4687		tag = PAGECACHE_TAG_DIRTY;
4688	btrfs_zoned_meta_io_lock(fs_info);
4689retry:
4690	if (wbc->sync_mode == WB_SYNC_ALL)
4691		tag_pages_for_writeback(mapping, index, end);
4692	while (!done && !nr_to_write_done && (index <= end) &&
4693	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4694			tag))) {
4695		unsigned i;
4696
4697		for (i = 0; i < nr_pages; i++) {
4698			struct page *page = pvec.pages[i];
4699
4700			ret = submit_eb_page(page, wbc, &epd, &eb_context);
4701			if (ret == 0)
4702				continue;
4703			if (ret < 0) {
4704				done = 1;
4705				break;
4706			}
4707
4708			/*
4709			 * the filesystem may choose to bump up nr_to_write.
4710			 * We have to make sure to honor the new nr_to_write
4711			 * at any time
4712			 */
4713			nr_to_write_done = wbc->nr_to_write <= 0;
4714		}
4715		pagevec_release(&pvec);
4716		cond_resched();
4717	}
4718	if (!scanned && !done) {
4719		/*
4720		 * We hit the last page and there is more work to be done: wrap
4721		 * back to the start of the file
4722		 */
4723		scanned = 1;
4724		index = 0;
4725		goto retry;
4726	}
4727	if (ret < 0) {
4728		end_write_bio(&epd, ret);
4729		goto out;
4730	}
4731	/*
4732	 * If something went wrong, don't allow any metadata write bio to be
4733	 * submitted.
4734	 *
4735	 * This would prevent use-after-free if we had dirty pages not
4736	 * cleaned up, which can still happen by fuzzed images.
4737	 *
4738	 * - Bad extent tree
4739	 *   Allowing existing tree block to be allocated for other trees.
4740	 *
4741	 * - Log tree operations
4742	 *   Exiting tree blocks get allocated to log tree, bumps its
4743	 *   generation, then get cleaned in tree re-balance.
4744	 *   Such tree block will not be written back, since it's clean,
4745	 *   thus no WRITTEN flag set.
4746	 *   And after log writes back, this tree block is not traced by
4747	 *   any dirty extent_io_tree.
4748	 *
4749	 * - Offending tree block gets re-dirtied from its original owner
4750	 *   Since it has bumped generation, no WRITTEN flag, it can be
4751	 *   reused without COWing. This tree block will not be traced
4752	 *   by btrfs_transaction::dirty_pages.
4753	 *
4754	 *   Now such dirty tree block will not be cleaned by any dirty
4755	 *   extent io tree. Thus we don't want to submit such wild eb
4756	 *   if the fs already has error.
4757	 */
4758	if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4759		ret = flush_write_bio(&epd);
4760	} else {
4761		ret = -EROFS;
4762		end_write_bio(&epd, ret);
4763	}
4764out:
4765	btrfs_zoned_meta_io_unlock(fs_info);
4766	return ret;
4767}
4768
4769/**
4770 * Walk the list of dirty pages of the given address space and write all of them.
4771 *
4772 * @mapping: address space structure to write
4773 * @wbc:     subtract the number of written pages from *@wbc->nr_to_write
4774 * @epd:     holds context for the write, namely the bio
4775 *
4776 * If a page is already under I/O, write_cache_pages() skips it, even
4777 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
4778 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
4779 * and msync() need to guarantee that all the data which was dirty at the time
4780 * the call was made get new I/O started against them.  If wbc->sync_mode is
4781 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4782 * existing IO to complete.
4783 */
4784static int extent_write_cache_pages(struct address_space *mapping,
4785			     struct writeback_control *wbc,
4786			     struct extent_page_data *epd)
4787{
4788	struct inode *inode = mapping->host;
4789	int ret = 0;
4790	int done = 0;
4791	int nr_to_write_done = 0;
4792	struct pagevec pvec;
4793	int nr_pages;
4794	pgoff_t index;
4795	pgoff_t end;		/* Inclusive */
4796	pgoff_t done_index;
4797	int range_whole = 0;
4798	int scanned = 0;
4799	xa_mark_t tag;
4800
4801	/*
4802	 * We have to hold onto the inode so that ordered extents can do their
4803	 * work when the IO finishes.  The alternative to this is failing to add
4804	 * an ordered extent if the igrab() fails there and that is a huge pain
4805	 * to deal with, so instead just hold onto the inode throughout the
4806	 * writepages operation.  If it fails here we are freeing up the inode
4807	 * anyway and we'd rather not waste our time writing out stuff that is
4808	 * going to be truncated anyway.
4809	 */
4810	if (!igrab(inode))
4811		return 0;
4812
4813	pagevec_init(&pvec);
4814	if (wbc->range_cyclic) {
4815		index = mapping->writeback_index; /* Start from prev offset */
4816		end = -1;
4817		/*
4818		 * Start from the beginning does not need to cycle over the
4819		 * range, mark it as scanned.
4820		 */
4821		scanned = (index == 0);
4822	} else {
4823		index = wbc->range_start >> PAGE_SHIFT;
4824		end = wbc->range_end >> PAGE_SHIFT;
4825		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4826			range_whole = 1;
4827		scanned = 1;
4828	}
4829
4830	/*
4831	 * We do the tagged writepage as long as the snapshot flush bit is set
4832	 * and we are the first one who do the filemap_flush() on this inode.
4833	 *
4834	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4835	 * not race in and drop the bit.
4836	 */
4837	if (range_whole && wbc->nr_to_write == LONG_MAX &&
4838	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4839			       &BTRFS_I(inode)->runtime_flags))
4840		wbc->tagged_writepages = 1;
4841
4842	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4843		tag = PAGECACHE_TAG_TOWRITE;
4844	else
4845		tag = PAGECACHE_TAG_DIRTY;
4846retry:
4847	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4848		tag_pages_for_writeback(mapping, index, end);
4849	done_index = index;
4850	while (!done && !nr_to_write_done && (index <= end) &&
4851			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4852						&index, end, tag))) {
4853		unsigned i;
4854
4855		for (i = 0; i < nr_pages; i++) {
4856			struct page *page = pvec.pages[i];
4857
4858			done_index = page->index + 1;
4859			/*
4860			 * At this point we hold neither the i_pages lock nor
4861			 * the page lock: the page may be truncated or
4862			 * invalidated (changing page->mapping to NULL),
4863			 * or even swizzled back from swapper_space to
4864			 * tmpfs file mapping
4865			 */
4866			if (!trylock_page(page)) {
4867				ret = flush_write_bio(epd);
4868				BUG_ON(ret < 0);
4869				lock_page(page);
4870			}
4871
4872			if (unlikely(page->mapping != mapping)) {
4873				unlock_page(page);
4874				continue;
4875			}
4876
4877			if (wbc->sync_mode != WB_SYNC_NONE) {
4878				if (PageWriteback(page)) {
4879					ret = flush_write_bio(epd);
4880					BUG_ON(ret < 0);
4881				}
4882				wait_on_page_writeback(page);
4883			}
4884
4885			if (PageWriteback(page) ||
4886			    !clear_page_dirty_for_io(page)) {
4887				unlock_page(page);
4888				continue;
4889			}
4890
4891			ret = __extent_writepage(page, wbc, epd);
4892			if (ret < 0) {
4893				done = 1;
4894				break;
4895			}
4896
4897			/*
4898			 * the filesystem may choose to bump up nr_to_write.
4899			 * We have to make sure to honor the new nr_to_write
4900			 * at any time
4901			 */
4902			nr_to_write_done = wbc->nr_to_write <= 0;
4903		}
4904		pagevec_release(&pvec);
4905		cond_resched();
4906	}
4907	if (!scanned && !done) {
4908		/*
4909		 * We hit the last page and there is more work to be done: wrap
4910		 * back to the start of the file
4911		 */
4912		scanned = 1;
4913		index = 0;
4914
4915		/*
4916		 * If we're looping we could run into a page that is locked by a
4917		 * writer and that writer could be waiting on writeback for a
4918		 * page in our current bio, and thus deadlock, so flush the
4919		 * write bio here.
4920		 */
4921		ret = flush_write_bio(epd);
4922		if (!ret)
4923			goto retry;
4924	}
4925
4926	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4927		mapping->writeback_index = done_index;
4928
4929	btrfs_add_delayed_iput(inode);
4930	return ret;
4931}
4932
4933int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4934{
4935	int ret;
4936	struct extent_page_data epd = {
4937		.bio_ctrl = { 0 },
4938		.extent_locked = 0,
4939		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4940	};
4941
4942	ret = __extent_writepage(page, wbc, &epd);
4943	ASSERT(ret <= 0);
4944	if (ret < 0) {
4945		end_write_bio(&epd, ret);
4946		return ret;
4947	}
4948
4949	ret = flush_write_bio(&epd);
4950	ASSERT(ret <= 0);
4951	return ret;
4952}
4953
4954int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4955			      int mode)
4956{
4957	int ret = 0;
4958	struct address_space *mapping = inode->i_mapping;
4959	struct page *page;
4960	unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4961		PAGE_SHIFT;
4962
4963	struct extent_page_data epd = {
4964		.bio_ctrl = { 0 },
4965		.extent_locked = 1,
4966		.sync_io = mode == WB_SYNC_ALL,
4967	};
4968	struct writeback_control wbc_writepages = {
4969		.sync_mode	= mode,
4970		.nr_to_write	= nr_pages * 2,
4971		.range_start	= start,
4972		.range_end	= end + 1,
4973		/* We're called from an async helper function */
4974		.punt_to_cgroup	= 1,
4975		.no_cgroup_owner = 1,
4976	};
4977
4978	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
4979	while (start <= end) {
4980		page = find_get_page(mapping, start >> PAGE_SHIFT);
4981		if (clear_page_dirty_for_io(page))
4982			ret = __extent_writepage(page, &wbc_writepages, &epd);
4983		else {
4984			btrfs_writepage_endio_finish_ordered(BTRFS_I(inode),
4985					page, start, start + PAGE_SIZE - 1, 1);
4986			unlock_page(page);
4987		}
4988		put_page(page);
4989		start += PAGE_SIZE;
4990	}
4991
4992	ASSERT(ret <= 0);
4993	if (ret == 0)
4994		ret = flush_write_bio(&epd);
4995	else
4996		end_write_bio(&epd, ret);
4997
4998	wbc_detach_inode(&wbc_writepages);
4999	return ret;
5000}
5001
5002int extent_writepages(struct address_space *mapping,
5003		      struct writeback_control *wbc)
5004{
5005	int ret = 0;
5006	struct extent_page_data epd = {
5007		.bio_ctrl = { 0 },
5008		.extent_locked = 0,
5009		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5010	};
5011
5012	ret = extent_write_cache_pages(mapping, wbc, &epd);
5013	ASSERT(ret <= 0);
5014	if (ret < 0) {
5015		end_write_bio(&epd, ret);
5016		return ret;
5017	}
5018	ret = flush_write_bio(&epd);
5019	return ret;
5020}
5021
5022void extent_readahead(struct readahead_control *rac)
5023{
5024	struct btrfs_bio_ctrl bio_ctrl = { 0 };
5025	struct page *pagepool[16];
5026	struct extent_map *em_cached = NULL;
5027	u64 prev_em_start = (u64)-1;
5028	int nr;
5029
5030	while ((nr = readahead_page_batch(rac, pagepool))) {
5031		u64 contig_start = readahead_pos(rac);
5032		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
5033
5034		contiguous_readpages(pagepool, nr, contig_start, contig_end,
5035				&em_cached, &bio_ctrl, &prev_em_start);
5036	}
5037
5038	if (em_cached)
5039		free_extent_map(em_cached);
5040
5041	if (bio_ctrl.bio) {
5042		if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
5043			return;
5044	}
5045}
5046
5047/*
5048 * basic invalidatepage code, this waits on any locked or writeback
5049 * ranges corresponding to the page, and then deletes any extent state
5050 * records from the tree
5051 */
5052int extent_invalidatepage(struct extent_io_tree *tree,
5053			  struct page *page, unsigned long offset)
5054{
5055	struct extent_state *cached_state = NULL;
5056	u64 start = page_offset(page);
5057	u64 end = start + PAGE_SIZE - 1;
5058	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
5059
5060	/* This function is only called for the btree inode */
5061	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
5062
5063	start += ALIGN(offset, blocksize);
5064	if (start > end)
5065		return 0;
5066
5067	lock_extent_bits(tree, start, end, &cached_state);
5068	wait_on_page_writeback(page);
5069
5070	/*
5071	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
5072	 * so here we only need to unlock the extent range to free any
5073	 * existing extent state.
5074	 */
5075	unlock_extent_cached(tree, start, end, &cached_state);
5076	return 0;
5077}
5078
5079/*
5080 * a helper for releasepage, this tests for areas of the page that
5081 * are locked or under IO and drops the related state bits if it is safe
5082 * to drop the page.
5083 */
5084static int try_release_extent_state(struct extent_io_tree *tree,
5085				    struct page *page, gfp_t mask)
5086{
5087	u64 start = page_offset(page);
5088	u64 end = start + PAGE_SIZE - 1;
5089	int ret = 1;
5090
5091	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
5092		ret = 0;
5093	} else {
5094		/*
5095		 * At this point we can safely clear everything except the
5096		 * locked bit, the nodatasum bit and the delalloc new bit.
5097		 * The delalloc new bit will be cleared by ordered extent
5098		 * completion.
5099		 */
5100		ret = __clear_extent_bit(tree, start, end,
5101			 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
5102			 0, 0, NULL, mask, NULL);
5103
5104		/* if clear_extent_bit failed for enomem reasons,
5105		 * we can't allow the release to continue.
5106		 */
5107		if (ret < 0)
5108			ret = 0;
5109		else
5110			ret = 1;
5111	}
5112	return ret;
5113}
5114
5115/*
5116 * a helper for releasepage.  As long as there are no locked extents
5117 * in the range corresponding to the page, both state records and extent
5118 * map records are removed
5119 */
5120int try_release_extent_mapping(struct page *page, gfp_t mask)
5121{
5122	struct extent_map *em;
5123	u64 start = page_offset(page);
5124	u64 end = start + PAGE_SIZE - 1;
5125	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
5126	struct extent_io_tree *tree = &btrfs_inode->io_tree;
5127	struct extent_map_tree *map = &btrfs_inode->extent_tree;
5128
5129	if (gfpflags_allow_blocking(mask) &&
5130	    page->mapping->host->i_size > SZ_16M) {
5131		u64 len;
5132		while (start <= end) {
5133			struct btrfs_fs_info *fs_info;
5134			u64 cur_gen;
5135
5136			len = end - start + 1;
5137			write_lock(&map->lock);
5138			em = lookup_extent_mapping(map, start, len);
5139			if (!em) {
5140				write_unlock(&map->lock);
5141				break;
5142			}
5143			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5144			    em->start != start) {
5145				write_unlock(&map->lock);
5146				free_extent_map(em);
5147				break;
5148			}
5149			if (test_range_bit(tree, em->start,
5150					   extent_map_end(em) - 1,
5151					   EXTENT_LOCKED, 0, NULL))
5152				goto next;
5153			/*
5154			 * If it's not in the list of modified extents, used
5155			 * by a fast fsync, we can remove it. If it's being
5156			 * logged we can safely remove it since fsync took an
5157			 * extra reference on the em.
5158			 */
5159			if (list_empty(&em->list) ||
5160			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5161				goto remove_em;
5162			/*
5163			 * If it's in the list of modified extents, remove it
5164			 * only if its generation is older then the current one,
5165			 * in which case we don't need it for a fast fsync.
5166			 * Otherwise don't remove it, we could be racing with an
5167			 * ongoing fast fsync that could miss the new extent.
5168			 */
5169			fs_info = btrfs_inode->root->fs_info;
5170			spin_lock(&fs_info->trans_lock);
5171			cur_gen = fs_info->generation;
5172			spin_unlock(&fs_info->trans_lock);
5173			if (em->generation >= cur_gen)
5174				goto next;
5175remove_em:
5176			/*
5177			 * We only remove extent maps that are not in the list of
5178			 * modified extents or that are in the list but with a
5179			 * generation lower then the current generation, so there
5180			 * is no need to set the full fsync flag on the inode (it
5181			 * hurts the fsync performance for workloads with a data
5182			 * size that exceeds or is close to the system's memory).
5183			 */
5184			remove_extent_mapping(map, em);
5185			/* once for the rb tree */
5186			free_extent_map(em);
5187next:
5188			start = extent_map_end(em);
5189			write_unlock(&map->lock);
5190
5191			/* once for us */
5192			free_extent_map(em);
5193
5194			cond_resched(); /* Allow large-extent preemption. */
5195		}
5196	}
5197	return try_release_extent_state(tree, page, mask);
5198}
5199
5200/*
5201 * helper function for fiemap, which doesn't want to see any holes.
5202 * This maps until we find something past 'last'
5203 */
5204static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
5205						u64 offset, u64 last)
5206{
5207	u64 sectorsize = btrfs_inode_sectorsize(inode);
5208	struct extent_map *em;
5209	u64 len;
5210
5211	if (offset >= last)
5212		return NULL;
5213
5214	while (1) {
5215		len = last - offset;
5216		if (len == 0)
5217			break;
5218		len = ALIGN(len, sectorsize);
5219		em = btrfs_get_extent_fiemap(inode, offset, len);
5220		if (IS_ERR_OR_NULL(em))
5221			return em;
5222
5223		/* if this isn't a hole return it */
5224		if (em->block_start != EXTENT_MAP_HOLE)
5225			return em;
5226
5227		/* this is a hole, advance to the next extent */
5228		offset = extent_map_end(em);
5229		free_extent_map(em);
5230		if (offset >= last)
5231			break;
5232	}
5233	return NULL;
5234}
5235
5236/*
5237 * To cache previous fiemap extent
5238 *
5239 * Will be used for merging fiemap extent
5240 */
5241struct fiemap_cache {
5242	u64 offset;
5243	u64 phys;
5244	u64 len;
5245	u32 flags;
5246	bool cached;
5247};
5248
5249/*
5250 * Helper to submit fiemap extent.
5251 *
5252 * Will try to merge current fiemap extent specified by @offset, @phys,
5253 * @len and @flags with cached one.
5254 * And only when we fails to merge, cached one will be submitted as
5255 * fiemap extent.
5256 *
5257 * Return value is the same as fiemap_fill_next_extent().
5258 */
5259static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5260				struct fiemap_cache *cache,
5261				u64 offset, u64 phys, u64 len, u32 flags)
5262{
5263	int ret = 0;
5264
5265	if (!cache->cached)
5266		goto assign;
5267
5268	/*
5269	 * Sanity check, extent_fiemap() should have ensured that new
5270	 * fiemap extent won't overlap with cached one.
5271	 * Not recoverable.
5272	 *
5273	 * NOTE: Physical address can overlap, due to compression
5274	 */
5275	if (cache->offset + cache->len > offset) {
5276		WARN_ON(1);
5277		return -EINVAL;
5278	}
5279
5280	/*
5281	 * Only merges fiemap extents if
5282	 * 1) Their logical addresses are continuous
5283	 *
5284	 * 2) Their physical addresses are continuous
5285	 *    So truly compressed (physical size smaller than logical size)
5286	 *    extents won't get merged with each other
5287	 *
5288	 * 3) Share same flags except FIEMAP_EXTENT_LAST
5289	 *    So regular extent won't get merged with prealloc extent
5290	 */
5291	if (cache->offset + cache->len  == offset &&
5292	    cache->phys + cache->len == phys  &&
5293	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5294			(flags & ~FIEMAP_EXTENT_LAST)) {
5295		cache->len += len;
5296		cache->flags |= flags;
5297		goto try_submit_last;
5298	}
5299
5300	/* Not mergeable, need to submit cached one */
5301	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5302				      cache->len, cache->flags);
5303	cache->cached = false;
5304	if (ret)
5305		return ret;
5306assign:
5307	cache->cached = true;
5308	cache->offset = offset;
5309	cache->phys = phys;
5310	cache->len = len;
5311	cache->flags = flags;
5312try_submit_last:
5313	if (cache->flags & FIEMAP_EXTENT_LAST) {
5314		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5315				cache->phys, cache->len, cache->flags);
5316		cache->cached = false;
5317	}
5318	return ret;
5319}
5320
5321/*
5322 * Emit last fiemap cache
5323 *
5324 * The last fiemap cache may still be cached in the following case:
5325 * 0		      4k		    8k
5326 * |<- Fiemap range ->|
5327 * |<------------  First extent ----------->|
5328 *
5329 * In this case, the first extent range will be cached but not emitted.
5330 * So we must emit it before ending extent_fiemap().
5331 */
5332static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
5333				  struct fiemap_cache *cache)
5334{
5335	int ret;
5336
5337	if (!cache->cached)
5338		return 0;
5339
5340	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5341				      cache->len, cache->flags);
5342	cache->cached = false;
5343	if (ret > 0)
5344		ret = 0;
5345	return ret;
5346}
5347
5348int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
5349		  u64 start, u64 len)
5350{
5351	int ret = 0;
5352	u64 off;
5353	u64 max = start + len;
5354	u32 flags = 0;
5355	u32 found_type;
5356	u64 last;
5357	u64 last_for_get_extent = 0;
5358	u64 disko = 0;
5359	u64 isize = i_size_read(&inode->vfs_inode);
5360	struct btrfs_key found_key;
5361	struct extent_map *em = NULL;
5362	struct extent_state *cached_state = NULL;
5363	struct btrfs_path *path;
5364	struct btrfs_root *root = inode->root;
5365	struct fiemap_cache cache = { 0 };
5366	struct ulist *roots;
5367	struct ulist *tmp_ulist;
5368	int end = 0;
5369	u64 em_start = 0;
5370	u64 em_len = 0;
5371	u64 em_end = 0;
5372
5373	if (len == 0)
5374		return -EINVAL;
5375
5376	path = btrfs_alloc_path();
5377	if (!path)
5378		return -ENOMEM;
5379
5380	roots = ulist_alloc(GFP_KERNEL);
5381	tmp_ulist = ulist_alloc(GFP_KERNEL);
5382	if (!roots || !tmp_ulist) {
5383		ret = -ENOMEM;
5384		goto out_free_ulist;
5385	}
5386
5387	/*
5388	 * We can't initialize that to 'start' as this could miss extents due
5389	 * to extent item merging
5390	 */
5391	off = 0;
5392	start = round_down(start, btrfs_inode_sectorsize(inode));
5393	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
5394
5395	/*
5396	 * lookup the last file extent.  We're not using i_size here
5397	 * because there might be preallocation past i_size
5398	 */
5399	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5400				       0);
5401	if (ret < 0) {
5402		goto out_free_ulist;
5403	} else {
5404		WARN_ON(!ret);
5405		if (ret == 1)
5406			ret = 0;
5407	}
5408
5409	path->slots[0]--;
5410	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5411	found_type = found_key.type;
5412
5413	/* No extents, but there might be delalloc bits */
5414	if (found_key.objectid != btrfs_ino(inode) ||
5415	    found_type != BTRFS_EXTENT_DATA_KEY) {
5416		/* have to trust i_size as the end */
5417		last = (u64)-1;
5418		last_for_get_extent = isize;
5419	} else {
5420		/*
5421		 * remember the start of the last extent.  There are a
5422		 * bunch of different factors that go into the length of the
5423		 * extent, so its much less complex to remember where it started
5424		 */
5425		last = found_key.offset;
5426		last_for_get_extent = last + 1;
5427	}
5428	btrfs_release_path(path);
5429
5430	/*
5431	 * we might have some extents allocated but more delalloc past those
5432	 * extents.  so, we trust isize unless the start of the last extent is
5433	 * beyond isize
5434	 */
5435	if (last < isize) {
5436		last = (u64)-1;
5437		last_for_get_extent = isize;
5438	}
5439
5440	lock_extent_bits(&inode->io_tree, start, start + len - 1,
5441			 &cached_state);
5442
5443	em = get_extent_skip_holes(inode, start, last_for_get_extent);
5444	if (!em)
5445		goto out;
5446	if (IS_ERR(em)) {
5447		ret = PTR_ERR(em);
5448		goto out;
5449	}
5450
5451	while (!end) {
5452		u64 offset_in_extent = 0;
5453
5454		/* break if the extent we found is outside the range */
5455		if (em->start >= max || extent_map_end(em) < off)
5456			break;
5457
5458		/*
5459		 * get_extent may return an extent that starts before our
5460		 * requested range.  We have to make sure the ranges
5461		 * we return to fiemap always move forward and don't
5462		 * overlap, so adjust the offsets here
5463		 */
5464		em_start = max(em->start, off);
5465
5466		/*
5467		 * record the offset from the start of the extent
5468		 * for adjusting the disk offset below.  Only do this if the
5469		 * extent isn't compressed since our in ram offset may be past
5470		 * what we have actually allocated on disk.
5471		 */
5472		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5473			offset_in_extent = em_start - em->start;
5474		em_end = extent_map_end(em);
5475		em_len = em_end - em_start;
5476		flags = 0;
5477		if (em->block_start < EXTENT_MAP_LAST_BYTE)
5478			disko = em->block_start + offset_in_extent;
5479		else
5480			disko = 0;
5481
5482		/*
5483		 * bump off for our next call to get_extent
5484		 */
5485		off = extent_map_end(em);
5486		if (off >= max)
5487			end = 1;
5488
5489		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
5490			end = 1;
5491			flags |= FIEMAP_EXTENT_LAST;
5492		} else if (em->block_start == EXTENT_MAP_INLINE) {
5493			flags |= (FIEMAP_EXTENT_DATA_INLINE |
5494				  FIEMAP_EXTENT_NOT_ALIGNED);
5495		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
5496			flags |= (FIEMAP_EXTENT_DELALLOC |
5497				  FIEMAP_EXTENT_UNKNOWN);
5498		} else if (fieinfo->fi_extents_max) {
5499			u64 bytenr = em->block_start -
5500				(em->start - em->orig_start);
5501
5502			/*
5503			 * As btrfs supports shared space, this information
5504			 * can be exported to userspace tools via
5505			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
5506			 * then we're just getting a count and we can skip the
5507			 * lookup stuff.
5508			 */
5509			ret = btrfs_check_shared(root, btrfs_ino(inode),
5510						 bytenr, roots, tmp_ulist);
5511			if (ret < 0)
5512				goto out_free;
5513			if (ret)
5514				flags |= FIEMAP_EXTENT_SHARED;
5515			ret = 0;
5516		}
5517		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5518			flags |= FIEMAP_EXTENT_ENCODED;
5519		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5520			flags |= FIEMAP_EXTENT_UNWRITTEN;
5521
5522		free_extent_map(em);
5523		em = NULL;
5524		if ((em_start >= last) || em_len == (u64)-1 ||
5525		   (last == (u64)-1 && isize <= em_end)) {
5526			flags |= FIEMAP_EXTENT_LAST;
5527			end = 1;
5528		}
5529
5530		/* now scan forward to see if this is really the last extent. */
5531		em = get_extent_skip_holes(inode, off, last_for_get_extent);
5532		if (IS_ERR(em)) {
5533			ret = PTR_ERR(em);
5534			goto out;
5535		}
5536		if (!em) {
5537			flags |= FIEMAP_EXTENT_LAST;
5538			end = 1;
5539		}
5540		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5541					   em_len, flags);
5542		if (ret) {
5543			if (ret == 1)
5544				ret = 0;
5545			goto out_free;
5546		}
5547	}
5548out_free:
5549	if (!ret)
5550		ret = emit_last_fiemap_cache(fieinfo, &cache);
5551	free_extent_map(em);
5552out:
5553	unlock_extent_cached(&inode->io_tree, start, start + len - 1,
5554			     &cached_state);
5555
5556out_free_ulist:
5557	btrfs_free_path(path);
5558	ulist_free(roots);
5559	ulist_free(tmp_ulist);
5560	return ret;
5561}
5562
5563static void __free_extent_buffer(struct extent_buffer *eb)
5564{
5565	kmem_cache_free(extent_buffer_cache, eb);
5566}
5567
5568int extent_buffer_under_io(const struct extent_buffer *eb)
5569{
5570	return (atomic_read(&eb->io_pages) ||
5571		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5572		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5573}
5574
5575static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
5576{
5577	struct btrfs_subpage *subpage;
5578
5579	lockdep_assert_held(&page->mapping->private_lock);
5580
5581	if (PagePrivate(page)) {
5582		subpage = (struct btrfs_subpage *)page->private;
5583		if (atomic_read(&subpage->eb_refs))
5584			return true;
5585		/*
5586		 * Even there is no eb refs here, we may still have
5587		 * end_page_read() call relying on page::private.
5588		 */
5589		if (atomic_read(&subpage->readers))
5590			return true;
5591	}
5592	return false;
5593}
5594
5595static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5596{
5597	struct btrfs_fs_info *fs_info = eb->fs_info;
5598	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5599
5600	/*
5601	 * For mapped eb, we're going to change the page private, which should
5602	 * be done under the private_lock.
5603	 */
5604	if (mapped)
5605		spin_lock(&page->mapping->private_lock);
5606
5607	if (!PagePrivate(page)) {
5608		if (mapped)
5609			spin_unlock(&page->mapping->private_lock);
5610		return;
5611	}
5612
5613	if (fs_info->sectorsize == PAGE_SIZE) {
5614		/*
5615		 * We do this since we'll remove the pages after we've
5616		 * removed the eb from the radix tree, so we could race
5617		 * and have this page now attached to the new eb.  So
5618		 * only clear page_private if it's still connected to
5619		 * this eb.
5620		 */
5621		if (PagePrivate(page) &&
5622		    page->private == (unsigned long)eb) {
5623			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5624			BUG_ON(PageDirty(page));
5625			BUG_ON(PageWriteback(page));
5626			/*
5627			 * We need to make sure we haven't be attached
5628			 * to a new eb.
5629			 */
5630			detach_page_private(page);
5631		}
5632		if (mapped)
5633			spin_unlock(&page->mapping->private_lock);
5634		return;
5635	}
5636
5637	/*
5638	 * For subpage, we can have dummy eb with page private.  In this case,
5639	 * we can directly detach the private as such page is only attached to
5640	 * one dummy eb, no sharing.
5641	 */
5642	if (!mapped) {
5643		btrfs_detach_subpage(fs_info, page);
5644		return;
5645	}
5646
5647	btrfs_page_dec_eb_refs(fs_info, page);
5648
5649	/*
5650	 * We can only detach the page private if there are no other ebs in the
5651	 * page range and no unfinished IO.
5652	 */
5653	if (!page_range_has_eb(fs_info, page))
5654		btrfs_detach_subpage(fs_info, page);
5655
5656	spin_unlock(&page->mapping->private_lock);
5657}
5658
5659/* Release all pages attached to the extent buffer */
5660static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5661{
5662	int i;
5663	int num_pages;
5664
5665	ASSERT(!extent_buffer_under_io(eb));
5666
5667	num_pages = num_extent_pages(eb);
5668	for (i = 0; i < num_pages; i++) {
5669		struct page *page = eb->pages[i];
5670
5671		if (!page)
5672			continue;
5673
5674		detach_extent_buffer_page(eb, page);
5675
5676		/* One for when we allocated the page */
5677		put_page(page);
5678	}
5679}
5680
5681/*
5682 * Helper for releasing the extent buffer.
5683 */
5684static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5685{
5686	btrfs_release_extent_buffer_pages(eb);
5687	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5688	__free_extent_buffer(eb);
5689}
5690
5691static struct extent_buffer *
5692__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
5693		      unsigned long len)
5694{
5695	struct extent_buffer *eb = NULL;
5696
5697	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
5698	eb->start = start;
5699	eb->len = len;
5700	eb->fs_info = fs_info;
5701	eb->bflags = 0;
5702	init_rwsem(&eb->lock);
5703
5704	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5705			     &fs_info->allocated_ebs);
5706	INIT_LIST_HEAD(&eb->release_list);
5707
5708	spin_lock_init(&eb->refs_lock);
5709	atomic_set(&eb->refs, 1);
5710	atomic_set(&eb->io_pages, 0);
5711
5712	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
5713
5714	return eb;
5715}
5716
5717struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5718{
5719	int i;
5720	struct page *p;
5721	struct extent_buffer *new;
5722	int num_pages = num_extent_pages(src);
5723
5724	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5725	if (new == NULL)
5726		return NULL;
5727
5728	/*
5729	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5730	 * btrfs_release_extent_buffer() have different behavior for
5731	 * UNMAPPED subpage extent buffer.
5732	 */
5733	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5734
5735	for (i = 0; i < num_pages; i++) {
5736		int ret;
5737
5738		p = alloc_page(GFP_NOFS);
5739		if (!p) {
5740			btrfs_release_extent_buffer(new);
5741			return NULL;
5742		}
5743		ret = attach_extent_buffer_page(new, p, NULL);
5744		if (ret < 0) {
5745			put_page(p);
5746			btrfs_release_extent_buffer(new);
5747			return NULL;
5748		}
5749		WARN_ON(PageDirty(p));
5750		new->pages[i] = p;
5751		copy_page(page_address(p), page_address(src->pages[i]));
5752	}
5753	set_extent_buffer_uptodate(new);
5754
5755	return new;
5756}
5757
5758struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5759						  u64 start, unsigned long len)
5760{
5761	struct extent_buffer *eb;
5762	int num_pages;
5763	int i;
5764
5765	eb = __alloc_extent_buffer(fs_info, start, len);
5766	if (!eb)
5767		return NULL;
5768
5769	num_pages = num_extent_pages(eb);
5770	for (i = 0; i < num_pages; i++) {
5771		int ret;
5772
5773		eb->pages[i] = alloc_page(GFP_NOFS);
5774		if (!eb->pages[i])
5775			goto err;
5776		ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5777		if (ret < 0)
5778			goto err;
5779	}
5780	set_extent_buffer_uptodate(eb);
5781	btrfs_set_header_nritems(eb, 0);
5782	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5783
5784	return eb;
5785err:
5786	for (; i > 0; i--) {
5787		detach_extent_buffer_page(eb, eb->pages[i - 1]);
5788		__free_page(eb->pages[i - 1]);
5789	}
5790	__free_extent_buffer(eb);
5791	return NULL;
5792}
5793
5794struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5795						u64 start)
5796{
5797	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5798}
5799
5800static void check_buffer_tree_ref(struct extent_buffer *eb)
5801{
5802	int refs;
5803	/*
5804	 * The TREE_REF bit is first set when the extent_buffer is added
5805	 * to the radix tree. It is also reset, if unset, when a new reference
5806	 * is created by find_extent_buffer.
5807	 *
5808	 * It is only cleared in two cases: freeing the last non-tree
5809	 * reference to the extent_buffer when its STALE bit is set or
5810	 * calling releasepage when the tree reference is the only reference.
5811	 *
5812	 * In both cases, care is taken to ensure that the extent_buffer's
5813	 * pages are not under io. However, releasepage can be concurrently
5814	 * called with creating new references, which is prone to race
5815	 * conditions between the calls to check_buffer_tree_ref in those
5816	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
5817	 *
5818	 * The actual lifetime of the extent_buffer in the radix tree is
5819	 * adequately protected by the refcount, but the TREE_REF bit and
5820	 * its corresponding reference are not. To protect against this
5821	 * class of races, we call check_buffer_tree_ref from the codepaths
5822	 * which trigger io after they set eb->io_pages. Note that once io is
5823	 * initiated, TREE_REF can no longer be cleared, so that is the
5824	 * moment at which any such race is best fixed.
5825	 */
5826	refs = atomic_read(&eb->refs);
5827	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5828		return;
5829
5830	spin_lock(&eb->refs_lock);
5831	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5832		atomic_inc(&eb->refs);
5833	spin_unlock(&eb->refs_lock);
5834}
5835
5836static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5837		struct page *accessed)
5838{
5839	int num_pages, i;
5840
5841	check_buffer_tree_ref(eb);
5842
5843	num_pages = num_extent_pages(eb);
5844	for (i = 0; i < num_pages; i++) {
5845		struct page *p = eb->pages[i];
5846
5847		if (p != accessed)
5848			mark_page_accessed(p);
5849	}
5850}
5851
5852struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5853					 u64 start)
5854{
5855	struct extent_buffer *eb;
5856
5857	eb = find_extent_buffer_nolock(fs_info, start);
5858	if (!eb)
5859		return NULL;
5860	/*
5861	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
5862	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
5863	 * another task running free_extent_buffer() might have seen that flag
5864	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
5865	 * writeback flags not set) and it's still in the tree (flag
5866	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
5867	 * decrementing the extent buffer's reference count twice.  So here we
5868	 * could race and increment the eb's reference count, clear its stale
5869	 * flag, mark it as dirty and drop our reference before the other task
5870	 * finishes executing free_extent_buffer, which would later result in
5871	 * an attempt to free an extent buffer that is dirty.
5872	 */
5873	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5874		spin_lock(&eb->refs_lock);
5875		spin_unlock(&eb->refs_lock);
5876	}
5877	mark_extent_buffer_accessed(eb, NULL);
5878	return eb;
5879}
5880
5881#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5882struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
5883					u64 start)
5884{
5885	struct extent_buffer *eb, *exists = NULL;
5886	int ret;
5887
5888	eb = find_extent_buffer(fs_info, start);
5889	if (eb)
5890		return eb;
5891	eb = alloc_dummy_extent_buffer(fs_info, start);
5892	if (!eb)
5893		return ERR_PTR(-ENOMEM);
5894	eb->fs_info = fs_info;
5895again:
5896	ret = radix_tree_preload(GFP_NOFS);
5897	if (ret) {
5898		exists = ERR_PTR(ret);
5899		goto free_eb;
5900	}
5901	spin_lock(&fs_info->buffer_lock);
5902	ret = radix_tree_insert(&fs_info->buffer_radix,
5903				start >> fs_info->sectorsize_bits, eb);
5904	spin_unlock(&fs_info->buffer_lock);
5905	radix_tree_preload_end();
5906	if (ret == -EEXIST) {
5907		exists = find_extent_buffer(fs_info, start);
5908		if (exists)
5909			goto free_eb;
5910		else
5911			goto again;
5912	}
5913	check_buffer_tree_ref(eb);
5914	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5915
5916	return eb;
5917free_eb:
5918	btrfs_release_extent_buffer(eb);
5919	return exists;
5920}
5921#endif
5922
5923static struct extent_buffer *grab_extent_buffer(
5924		struct btrfs_fs_info *fs_info, struct page *page)
5925{
5926	struct extent_buffer *exists;
5927
5928	/*
5929	 * For subpage case, we completely rely on radix tree to ensure we
5930	 * don't try to insert two ebs for the same bytenr.  So here we always
5931	 * return NULL and just continue.
5932	 */
5933	if (fs_info->sectorsize < PAGE_SIZE)
5934		return NULL;
5935
5936	/* Page not yet attached to an extent buffer */
5937	if (!PagePrivate(page))
5938		return NULL;
5939
5940	/*
5941	 * We could have already allocated an eb for this page and attached one
5942	 * so lets see if we can get a ref on the existing eb, and if we can we
5943	 * know it's good and we can just return that one, else we know we can
5944	 * just overwrite page->private.
5945	 */
5946	exists = (struct extent_buffer *)page->private;
5947	if (atomic_inc_not_zero(&exists->refs))
5948		return exists;
5949
5950	WARN_ON(PageDirty(page));
5951	detach_page_private(page);
5952	return NULL;
5953}
5954
5955struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5956					  u64 start, u64 owner_root, int level)
5957{
5958	unsigned long len = fs_info->nodesize;
5959	int num_pages;
5960	int i;
5961	unsigned long index = start >> PAGE_SHIFT;
5962	struct extent_buffer *eb;
5963	struct extent_buffer *exists = NULL;
5964	struct page *p;
5965	struct address_space *mapping = fs_info->btree_inode->i_mapping;
5966	int uptodate = 1;
5967	int ret;
5968
5969	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5970		btrfs_err(fs_info, "bad tree block start %llu", start);
5971		return ERR_PTR(-EINVAL);
5972	}
5973
5974#if BITS_PER_LONG == 32
5975	if (start >= MAX_LFS_FILESIZE) {
5976		btrfs_err_rl(fs_info,
5977		"extent buffer %llu is beyond 32bit page cache limit", start);
5978		btrfs_err_32bit_limit(fs_info);
5979		return ERR_PTR(-EOVERFLOW);
5980	}
5981	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
5982		btrfs_warn_32bit_limit(fs_info);
5983#endif
5984
5985	if (fs_info->sectorsize < PAGE_SIZE &&
5986	    offset_in_page(start) + len > PAGE_SIZE) {
5987		btrfs_err(fs_info,
5988		"tree block crosses page boundary, start %llu nodesize %lu",
5989			  start, len);
5990		return ERR_PTR(-EINVAL);
5991	}
5992
5993	eb = find_extent_buffer(fs_info, start);
5994	if (eb)
5995		return eb;
5996
5997	eb = __alloc_extent_buffer(fs_info, start, len);
5998	if (!eb)
5999		return ERR_PTR(-ENOMEM);
6000	btrfs_set_buffer_lockdep_class(owner_root, eb, level);
6001
6002	num_pages = num_extent_pages(eb);
6003	for (i = 0; i < num_pages; i++, index++) {
6004		struct btrfs_subpage *prealloc = NULL;
6005
6006		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
6007		if (!p) {
6008			exists = ERR_PTR(-ENOMEM);
6009			goto free_eb;
6010		}
6011
6012		/*
6013		 * Preallocate page->private for subpage case, so that we won't
6014		 * allocate memory with private_lock hold.  The memory will be
6015		 * freed by attach_extent_buffer_page() or freed manually if
6016		 * we exit earlier.
6017		 *
6018		 * Although we have ensured one subpage eb can only have one
6019		 * page, but it may change in the future for 16K page size
6020		 * support, so we still preallocate the memory in the loop.
6021		 */
6022		ret = btrfs_alloc_subpage(fs_info, &prealloc,
6023					  BTRFS_SUBPAGE_METADATA);
6024		if (ret < 0) {
6025			unlock_page(p);
6026			put_page(p);
6027			exists = ERR_PTR(ret);
6028			goto free_eb;
6029		}
6030
6031		spin_lock(&mapping->private_lock);
6032		exists = grab_extent_buffer(fs_info, p);
6033		if (exists) {
6034			spin_unlock(&mapping->private_lock);
6035			unlock_page(p);
6036			put_page(p);
6037			mark_extent_buffer_accessed(exists, p);
6038			btrfs_free_subpage(prealloc);
6039			goto free_eb;
6040		}
6041		/* Should not fail, as we have preallocated the memory */
6042		ret = attach_extent_buffer_page(eb, p, prealloc);
6043		ASSERT(!ret);
6044		/*
6045		 * To inform we have extra eb under allocation, so that
6046		 * detach_extent_buffer_page() won't release the page private
6047		 * when the eb hasn't yet been inserted into radix tree.
6048		 *
6049		 * The ref will be decreased when the eb released the page, in
6050		 * detach_extent_buffer_page().
6051		 * Thus needs no special handling in error path.
6052		 */
6053		btrfs_page_inc_eb_refs(fs_info, p);
6054		spin_unlock(&mapping->private_lock);
6055
6056		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
6057		eb->pages[i] = p;
6058		if (!PageUptodate(p))
6059			uptodate = 0;
6060
6061		/*
6062		 * We can't unlock the pages just yet since the extent buffer
6063		 * hasn't been properly inserted in the radix tree, this
6064		 * opens a race with btree_releasepage which can free a page
6065		 * while we are still filling in all pages for the buffer and
6066		 * we could crash.
6067		 */
6068	}
6069	if (uptodate)
6070		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
6071again:
6072	ret = radix_tree_preload(GFP_NOFS);
6073	if (ret) {
6074		exists = ERR_PTR(ret);
6075		goto free_eb;
6076	}
6077
6078	spin_lock(&fs_info->buffer_lock);
6079	ret = radix_tree_insert(&fs_info->buffer_radix,
6080				start >> fs_info->sectorsize_bits, eb);
6081	spin_unlock(&fs_info->buffer_lock);
6082	radix_tree_preload_end();
6083	if (ret == -EEXIST) {
6084		exists = find_extent_buffer(fs_info, start);
6085		if (exists)
6086			goto free_eb;
6087		else
6088			goto again;
6089	}
6090	/* add one reference for the tree */
6091	check_buffer_tree_ref(eb);
6092	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
6093
6094	/*
6095	 * Now it's safe to unlock the pages because any calls to
6096	 * btree_releasepage will correctly detect that a page belongs to a
6097	 * live buffer and won't free them prematurely.
6098	 */
6099	for (i = 0; i < num_pages; i++)
6100		unlock_page(eb->pages[i]);
6101	return eb;
6102
6103free_eb:
6104	WARN_ON(!atomic_dec_and_test(&eb->refs));
6105	for (i = 0; i < num_pages; i++) {
6106		if (eb->pages[i])
6107			unlock_page(eb->pages[i]);
6108	}
6109
6110	btrfs_release_extent_buffer(eb);
6111	return exists;
6112}
6113
6114static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
6115{
6116	struct extent_buffer *eb =
6117			container_of(head, struct extent_buffer, rcu_head);
6118
6119	__free_extent_buffer(eb);
6120}
6121
6122static int release_extent_buffer(struct extent_buffer *eb)
6123	__releases(&eb->refs_lock)
6124{
6125	lockdep_assert_held(&eb->refs_lock);
6126
6127	WARN_ON(atomic_read(&eb->refs) == 0);
6128	if (atomic_dec_and_test(&eb->refs)) {
6129		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
6130			struct btrfs_fs_info *fs_info = eb->fs_info;
6131
6132			spin_unlock(&eb->refs_lock);
6133
6134			spin_lock(&fs_info->buffer_lock);
6135			radix_tree_delete(&fs_info->buffer_radix,
6136					  eb->start >> fs_info->sectorsize_bits);
6137			spin_unlock(&fs_info->buffer_lock);
6138		} else {
6139			spin_unlock(&eb->refs_lock);
6140		}
6141
6142		btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
6143		/* Should be safe to release our pages at this point */
6144		btrfs_release_extent_buffer_pages(eb);
6145#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6146		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
6147			__free_extent_buffer(eb);
6148			return 1;
6149		}
6150#endif
6151		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
6152		return 1;
6153	}
6154	spin_unlock(&eb->refs_lock);
6155
6156	return 0;
6157}
6158
6159void free_extent_buffer(struct extent_buffer *eb)
6160{
6161	int refs;
6162	int old;
6163	if (!eb)
6164		return;
6165
6166	while (1) {
6167		refs = atomic_read(&eb->refs);
6168		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
6169		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
6170			refs == 1))
6171			break;
6172		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
6173		if (old == refs)
6174			return;
6175	}
6176
6177	spin_lock(&eb->refs_lock);
6178	if (atomic_read(&eb->refs) == 2 &&
6179	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
6180	    !