1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include <linux/prefetch.h>
15#include <linux/cleancache.h>
16#include <linux/fsverity.h>
17#include "misc.h"
18#include "extent_io.h"
19#include "extent-io-tree.h"
20#include "extent_map.h"
21#include "ctree.h"
22#include "btrfs_inode.h"
23#include "volumes.h"
24#include "check-integrity.h"
25#include "locking.h"
26#include "rcu-string.h"
27#include "backref.h"
28#include "disk-io.h"
29#include "subpage.h"
30#include "zoned.h"
31#include "block-group.h"
32
33static struct kmem_cache *extent_state_cache;
34static struct kmem_cache *extent_buffer_cache;
35static struct bio_set btrfs_bioset;
36
37static inline bool extent_state_in_tree(const struct extent_state *state)
38{
39	return !RB_EMPTY_NODE(&state->rb_node);
40}
41
42#ifdef CONFIG_BTRFS_DEBUG
43static LIST_HEAD(states);
44static DEFINE_SPINLOCK(leak_lock);
45
46static inline void btrfs_leak_debug_add(spinlock_t *lock,
47					struct list_head *new,
48					struct list_head *head)
49{
50	unsigned long flags;
51
52	spin_lock_irqsave(lock, flags);
53	list_add(new, head);
54	spin_unlock_irqrestore(lock, flags);
55}
56
57static inline void btrfs_leak_debug_del(spinlock_t *lock,
58					struct list_head *entry)
59{
60	unsigned long flags;
61
62	spin_lock_irqsave(lock, flags);
63	list_del(entry);
64	spin_unlock_irqrestore(lock, flags);
65}
66
67void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
68{
69	struct extent_buffer *eb;
70	unsigned long flags;
71
72	/*
73	 * If we didn't get into open_ctree our allocated_ebs will not be
74	 * initialized, so just skip this.
75	 */
76	if (!fs_info->allocated_ebs.next)
77		return;
78
79	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
80	while (!list_empty(&fs_info->allocated_ebs)) {
81		eb = list_first_entry(&fs_info->allocated_ebs,
82				      struct extent_buffer, leak_list);
83		pr_err(
84	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
85		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
86		       btrfs_header_owner(eb));
87		list_del(&eb->leak_list);
88		kmem_cache_free(extent_buffer_cache, eb);
89	}
90	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
91}
92
93static inline void btrfs_extent_state_leak_debug_check(void)
94{
95	struct extent_state *state;
96
97	while (!list_empty(&states)) {
98		state = list_entry(states.next, struct extent_state, leak_list);
99		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
100		       state->start, state->end, state->state,
101		       extent_state_in_tree(state),
102		       refcount_read(&state->refs));
103		list_del(&state->leak_list);
104		kmem_cache_free(extent_state_cache, state);
105	}
106}
107
108#define btrfs_debug_check_extent_io_range(tree, start, end)		\
109	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
110static inline void __btrfs_debug_check_extent_io_range(const char *caller,
111		struct extent_io_tree *tree, u64 start, u64 end)
112{
113	struct inode *inode = tree->private_data;
114	u64 isize;
115
116	if (!inode || !is_data_inode(inode))
117		return;
118
119	isize = i_size_read(inode);
120	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
121		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
122		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
123			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
124	}
125}
126#else
127#define btrfs_leak_debug_add(lock, new, head)	do {} while (0)
128#define btrfs_leak_debug_del(lock, entry)	do {} while (0)
129#define btrfs_extent_state_leak_debug_check()	do {} while (0)
130#define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
131#endif
132
133struct tree_entry {
134	u64 start;
135	u64 end;
136	struct rb_node rb_node;
137};
138
139struct extent_page_data {
140	struct btrfs_bio_ctrl bio_ctrl;
141	/* tells writepage not to lock the state bits for this range
142	 * it still does the unlocking
143	 */
144	unsigned int extent_locked:1;
145
146	/* tells the submit_bio code to use REQ_SYNC */
147	unsigned int sync_io:1;
148};
149
150static int add_extent_changeset(struct extent_state *state, u32 bits,
151				 struct extent_changeset *changeset,
152				 int set)
153{
154	int ret;
155
156	if (!changeset)
157		return 0;
158	if (set && (state->state & bits) == bits)
159		return 0;
160	if (!set && (state->state & bits) == 0)
161		return 0;
162	changeset->bytes_changed += state->end - state->start + 1;
163	ret = ulist_add(&changeset->range_changed, state->start, state->end,
164			GFP_ATOMIC);
165	return ret;
166}
167
168int __must_check submit_one_bio(struct bio *bio, int mirror_num,
169				unsigned long bio_flags)
170{
171	blk_status_t ret = 0;
172	struct extent_io_tree *tree = bio->bi_private;
173
174	bio->bi_private = NULL;
175
176	/* Caller should ensure the bio has at least some range added */
177	ASSERT(bio->bi_iter.bi_size);
178	if (is_data_inode(tree->private_data))
179		ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
180					    bio_flags);
181	else
182		ret = btrfs_submit_metadata_bio(tree->private_data, bio,
183						mirror_num, bio_flags);
184
185	return blk_status_to_errno(ret);
186}
187
188/* Cleanup unsubmitted bios */
189static void end_write_bio(struct extent_page_data *epd, int ret)
190{
191	struct bio *bio = epd->bio_ctrl.bio;
192
193	if (bio) {
194		bio->bi_status = errno_to_blk_status(ret);
195		bio_endio(bio);
196		epd->bio_ctrl.bio = NULL;
197	}
198}
199
200/*
201 * Submit bio from extent page data via submit_one_bio
202 *
203 * Return 0 if everything is OK.
204 * Return <0 for error.
205 */
206static int __must_check flush_write_bio(struct extent_page_data *epd)
207{
208	int ret = 0;
209	struct bio *bio = epd->bio_ctrl.bio;
210
211	if (bio) {
212		ret = submit_one_bio(bio, 0, 0);
213		/*
214		 * Clean up of epd->bio is handled by its endio function.
215		 * And endio is either triggered by successful bio execution
216		 * or the error handler of submit bio hook.
217		 * So at this point, no matter what happened, we don't need
218		 * to clean up epd->bio.
219		 */
220		epd->bio_ctrl.bio = NULL;
221	}
222	return ret;
223}
224
225int __init extent_state_cache_init(void)
226{
227	extent_state_cache = kmem_cache_create("btrfs_extent_state",
228			sizeof(struct extent_state), 0,
229			SLAB_MEM_SPREAD, NULL);
230	if (!extent_state_cache)
231		return -ENOMEM;
232	return 0;
233}
234
235int __init extent_io_init(void)
236{
237	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
238			sizeof(struct extent_buffer), 0,
239			SLAB_MEM_SPREAD, NULL);
240	if (!extent_buffer_cache)
241		return -ENOMEM;
242
243	if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
244			offsetof(struct btrfs_bio, bio),
245			BIOSET_NEED_BVECS))
246		goto free_buffer_cache;
247
248	if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
249		goto free_bioset;
250
251	return 0;
252
253free_bioset:
254	bioset_exit(&btrfs_bioset);
255
256free_buffer_cache:
257	kmem_cache_destroy(extent_buffer_cache);
258	extent_buffer_cache = NULL;
259	return -ENOMEM;
260}
261
262void __cold extent_state_cache_exit(void)
263{
264	btrfs_extent_state_leak_debug_check();
265	kmem_cache_destroy(extent_state_cache);
266}
267
268void __cold extent_io_exit(void)
269{
270	/*
271	 * Make sure all delayed rcu free are flushed before we
272	 * destroy caches.
273	 */
274	rcu_barrier();
275	kmem_cache_destroy(extent_buffer_cache);
276	bioset_exit(&btrfs_bioset);
277}
278
279/*
280 * For the file_extent_tree, we want to hold the inode lock when we lookup and
281 * update the disk_i_size, but lockdep will complain because our io_tree we hold
282 * the tree lock and get the inode lock when setting delalloc.  These two things
283 * are unrelated, so make a class for the file_extent_tree so we don't get the
284 * two locking patterns mixed up.
285 */
286static struct lock_class_key file_extent_tree_class;
287
288void extent_io_tree_init(struct btrfs_fs_info *fs_info,
289			 struct extent_io_tree *tree, unsigned int owner,
290			 void *private_data)
291{
292	tree->fs_info = fs_info;
293	tree->state = RB_ROOT;
294	tree->dirty_bytes = 0;
295	spin_lock_init(&tree->lock);
296	tree->private_data = private_data;
297	tree->owner = owner;
298	if (owner == IO_TREE_INODE_FILE_EXTENT)
299		lockdep_set_class(&tree->lock, &file_extent_tree_class);
300}
301
302void extent_io_tree_release(struct extent_io_tree *tree)
303{
304	spin_lock(&tree->lock);
305	/*
306	 * Do a single barrier for the waitqueue_active check here, the state
307	 * of the waitqueue should not change once extent_io_tree_release is
308	 * called.
309	 */
310	smp_mb();
311	while (!RB_EMPTY_ROOT(&tree->state)) {
312		struct rb_node *node;
313		struct extent_state *state;
314
315		node = rb_first(&tree->state);
316		state = rb_entry(node, struct extent_state, rb_node);
317		rb_erase(&state->rb_node, &tree->state);
318		RB_CLEAR_NODE(&state->rb_node);
319		/*
320		 * btree io trees aren't supposed to have tasks waiting for
321		 * changes in the flags of extent states ever.
322		 */
323		ASSERT(!waitqueue_active(&state->wq));
324		free_extent_state(state);
325
326		cond_resched_lock(&tree->lock);
327	}
328	spin_unlock(&tree->lock);
329}
330
331static struct extent_state *alloc_extent_state(gfp_t mask)
332{
333	struct extent_state *state;
334
335	/*
336	 * The given mask might be not appropriate for the slab allocator,
337	 * drop the unsupported bits
338	 */
339	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
340	state = kmem_cache_alloc(extent_state_cache, mask);
341	if (!state)
342		return state;
343	state->state = 0;
344	state->failrec = NULL;
345	RB_CLEAR_NODE(&state->rb_node);
346	btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
347	refcount_set(&state->refs, 1);
348	init_waitqueue_head(&state->wq);
349	trace_alloc_extent_state(state, mask, _RET_IP_);
350	return state;
351}
352
353void free_extent_state(struct extent_state *state)
354{
355	if (!state)
356		return;
357	if (refcount_dec_and_test(&state->refs)) {
358		WARN_ON(extent_state_in_tree(state));
359		btrfs_leak_debug_del(&leak_lock, &state->leak_list);
360		trace_free_extent_state(state, _RET_IP_);
361		kmem_cache_free(extent_state_cache, state);
362	}
363}
364
365static struct rb_node *tree_insert(struct rb_root *root,
366				   struct rb_node *search_start,
367				   u64 offset,
368				   struct rb_node *node,
369				   struct rb_node ***p_in,
370				   struct rb_node **parent_in)
371{
372	struct rb_node **p;
373	struct rb_node *parent = NULL;
374	struct tree_entry *entry;
375
376	if (p_in && parent_in) {
377		p = *p_in;
378		parent = *parent_in;
379		goto do_insert;
380	}
381
382	p = search_start ? &search_start : &root->rb_node;
383	while (*p) {
384		parent = *p;
385		entry = rb_entry(parent, struct tree_entry, rb_node);
386
387		if (offset < entry->start)
388			p = &(*p)->rb_left;
389		else if (offset > entry->end)
390			p = &(*p)->rb_right;
391		else
392			return parent;
393	}
394
395do_insert:
396	rb_link_node(node, parent, p);
397	rb_insert_color(node, root);
398	return NULL;
399}
400
401/**
402 * Search @tree for an entry that contains @offset. Such entry would have
403 * entry->start <= offset && entry->end >= offset.
404 *
405 * @tree:       the tree to search
406 * @offset:     offset that should fall within an entry in @tree
407 * @next_ret:   pointer to the first entry whose range ends after @offset
408 * @prev_ret:   pointer to the first entry whose range begins before @offset
409 * @p_ret:      pointer where new node should be anchored (used when inserting an
410 *	        entry in the tree)
411 * @parent_ret: points to entry which would have been the parent of the entry,
412 *               containing @offset
413 *
414 * This function returns a pointer to the entry that contains @offset byte
415 * address. If no such entry exists, then NULL is returned and the other
416 * pointer arguments to the function are filled, otherwise the found entry is
417 * returned and other pointers are left untouched.
418 */
419static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
420				      struct rb_node **next_ret,
421				      struct rb_node **prev_ret,
422				      struct rb_node ***p_ret,
423				      struct rb_node **parent_ret)
424{
425	struct rb_root *root = &tree->state;
426	struct rb_node **n = &root->rb_node;
427	struct rb_node *prev = NULL;
428	struct rb_node *orig_prev = NULL;
429	struct tree_entry *entry;
430	struct tree_entry *prev_entry = NULL;
431
432	while (*n) {
433		prev = *n;
434		entry = rb_entry(prev, struct tree_entry, rb_node);
435		prev_entry = entry;
436
437		if (offset < entry->start)
438			n = &(*n)->rb_left;
439		else if (offset > entry->end)
440			n = &(*n)->rb_right;
441		else
442			return *n;
443	}
444
445	if (p_ret)
446		*p_ret = n;
447	if (parent_ret)
448		*parent_ret = prev;
449
450	if (next_ret) {
451		orig_prev = prev;
452		while (prev && offset > prev_entry->end) {
453			prev = rb_next(prev);
454			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
455		}
456		*next_ret = prev;
457		prev = orig_prev;
458	}
459
460	if (prev_ret) {
461		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
462		while (prev && offset < prev_entry->start) {
463			prev = rb_prev(prev);
464			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
465		}
466		*prev_ret = prev;
467	}
468	return NULL;
469}
470
471static inline struct rb_node *
472tree_search_for_insert(struct extent_io_tree *tree,
473		       u64 offset,
474		       struct rb_node ***p_ret,
475		       struct rb_node **parent_ret)
476{
477	struct rb_node *next= NULL;
478	struct rb_node *ret;
479
480	ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
481	if (!ret)
482		return next;
483	return ret;
484}
485
486static inline struct rb_node *tree_search(struct extent_io_tree *tree,
487					  u64 offset)
488{
489	return tree_search_for_insert(tree, offset, NULL, NULL);
490}
491
492/*
493 * utility function to look for merge candidates inside a given range.
494 * Any extents with matching state are merged together into a single
495 * extent in the tree.  Extents with EXTENT_IO in their state field
496 * are not merged because the end_io handlers need to be able to do
497 * operations on them without sleeping (or doing allocations/splits).
498 *
499 * This should be called with the tree lock held.
500 */
501static void merge_state(struct extent_io_tree *tree,
502		        struct extent_state *state)
503{
504	struct extent_state *other;
505	struct rb_node *other_node;
506
507	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
508		return;
509
510	other_node = rb_prev(&state->rb_node);
511	if (other_node) {
512		other = rb_entry(other_node, struct extent_state, rb_node);
513		if (other->end == state->start - 1 &&
514		    other->state == state->state) {
515			if (tree->private_data &&
516			    is_data_inode(tree->private_data))
517				btrfs_merge_delalloc_extent(tree->private_data,
518							    state, other);
519			state->start = other->start;
520			rb_erase(&other->rb_node, &tree->state);
521			RB_CLEAR_NODE(&other->rb_node);
522			free_extent_state(other);
523		}
524	}
525	other_node = rb_next(&state->rb_node);
526	if (other_node) {
527		other = rb_entry(other_node, struct extent_state, rb_node);
528		if (other->start == state->end + 1 &&
529		    other->state == state->state) {
530			if (tree->private_data &&
531			    is_data_inode(tree->private_data))
532				btrfs_merge_delalloc_extent(tree->private_data,
533							    state, other);
534			state->end = other->end;
535			rb_erase(&other->rb_node, &tree->state);
536			RB_CLEAR_NODE(&other->rb_node);
537			free_extent_state(other);
538		}
539	}
540}
541
542static void set_state_bits(struct extent_io_tree *tree,
543			   struct extent_state *state, u32 *bits,
544			   struct extent_changeset *changeset);
545
546/*
547 * insert an extent_state struct into the tree.  'bits' are set on the
548 * struct before it is inserted.
549 *
550 * This may return -EEXIST if the extent is already there, in which case the
551 * state struct is freed.
552 *
553 * The tree lock is not taken internally.  This is a utility function and
554 * probably isn't what you want to call (see set/clear_extent_bit).
555 */
556static int insert_state(struct extent_io_tree *tree,
557			struct extent_state *state, u64 start, u64 end,
558			struct rb_node ***p,
559			struct rb_node **parent,
560			u32 *bits, struct extent_changeset *changeset)
561{
562	struct rb_node *node;
563
564	if (end < start) {
565		btrfs_err(tree->fs_info,
566			"insert state: end < start %llu %llu", end, start);
567		WARN_ON(1);
568	}
569	state->start = start;
570	state->end = end;
571
572	set_state_bits(tree, state, bits, changeset);
573
574	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
575	if (node) {
576		struct extent_state *found;
577		found = rb_entry(node, struct extent_state, rb_node);
578		btrfs_err(tree->fs_info,
579		       "found node %llu %llu on insert of %llu %llu",
580		       found->start, found->end, start, end);
581		return -EEXIST;
582	}
583	merge_state(tree, state);
584	return 0;
585}
586
587/*
588 * split a given extent state struct in two, inserting the preallocated
589 * struct 'prealloc' as the newly created second half.  'split' indicates an
590 * offset inside 'orig' where it should be split.
591 *
592 * Before calling,
593 * the tree has 'orig' at [orig->start, orig->end].  After calling, there
594 * are two extent state structs in the tree:
595 * prealloc: [orig->start, split - 1]
596 * orig: [ split, orig->end ]
597 *
598 * The tree locks are not taken by this function. They need to be held
599 * by the caller.
600 */
601static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
602		       struct extent_state *prealloc, u64 split)
603{
604	struct rb_node *node;
605
606	if (tree->private_data && is_data_inode(tree->private_data))
607		btrfs_split_delalloc_extent(tree->private_data, orig, split);
608
609	prealloc->start = orig->start;
610	prealloc->end = split - 1;
611	prealloc->state = orig->state;
612	orig->start = split;
613
614	node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
615			   &prealloc->rb_node, NULL, NULL);
616	if (node) {
617		free_extent_state(prealloc);
618		return -EEXIST;
619	}
620	return 0;
621}
622
623static struct extent_state *next_state(struct extent_state *state)
624{
625	struct rb_node *next = rb_next(&state->rb_node);
626	if (next)
627		return rb_entry(next, struct extent_state, rb_node);
628	else
629		return NULL;
630}
631
632/*
633 * utility function to clear some bits in an extent state struct.
634 * it will optionally wake up anyone waiting on this state (wake == 1).
635 *
636 * If no bits are set on the state struct after clearing things, the
637 * struct is freed and removed from the tree
638 */
639static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
640					    struct extent_state *state,
641					    u32 *bits, int wake,
642					    struct extent_changeset *changeset)
643{
644	struct extent_state *next;
645	u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
646	int ret;
647
648	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
649		u64 range = state->end - state->start + 1;
650		WARN_ON(range > tree->dirty_bytes);
651		tree->dirty_bytes -= range;
652	}
653
654	if (tree->private_data && is_data_inode(tree->private_data))
655		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
656
657	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
658	BUG_ON(ret < 0);
659	state->state &= ~bits_to_clear;
660	if (wake)
661		wake_up(&state->wq);
662	if (state->state == 0) {
663		next = next_state(state);
664		if (extent_state_in_tree(state)) {
665			rb_erase(&state->rb_node, &tree->state);
666			RB_CLEAR_NODE(&state->rb_node);
667			free_extent_state(state);
668		} else {
669			WARN_ON(1);
670		}
671	} else {
672		merge_state(tree, state);
673		next = next_state(state);
674	}
675	return next;
676}
677
678static struct extent_state *
679alloc_extent_state_atomic(struct extent_state *prealloc)
680{
681	if (!prealloc)
682		prealloc = alloc_extent_state(GFP_ATOMIC);
683
684	return prealloc;
685}
686
687static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
688{
689	btrfs_panic(tree->fs_info, err,
690	"locking error: extent tree was modified by another thread while locked");
691}
692
693/*
694 * clear some bits on a range in the tree.  This may require splitting
695 * or inserting elements in the tree, so the gfp mask is used to
696 * indicate which allocations or sleeping are allowed.
697 *
698 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
699 * the given range from the tree regardless of state (ie for truncate).
700 *
701 * the range [start, end] is inclusive.
702 *
703 * This takes the tree lock, and returns 0 on success and < 0 on error.
704 */
705int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
706		       u32 bits, int wake, int delete,
707		       struct extent_state **cached_state,
708		       gfp_t mask, struct extent_changeset *changeset)
709{
710	struct extent_state *state;
711	struct extent_state *cached;
712	struct extent_state *prealloc = NULL;
713	struct rb_node *node;
714	u64 last_end;
715	int err;
716	int clear = 0;
717
718	btrfs_debug_check_extent_io_range(tree, start, end);
719	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
720
721	if (bits & EXTENT_DELALLOC)
722		bits |= EXTENT_NORESERVE;
723
724	if (delete)
725		bits |= ~EXTENT_CTLBITS;
726
727	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
728		clear = 1;
729again:
730	if (!prealloc && gfpflags_allow_blocking(mask)) {
731		/*
732		 * Don't care for allocation failure here because we might end
733		 * up not needing the pre-allocated extent state at all, which
734		 * is the case if we only have in the tree extent states that
735		 * cover our input range and don't cover too any other range.
736		 * If we end up needing a new extent state we allocate it later.
737		 */
738		prealloc = alloc_extent_state(mask);
739	}
740
741	spin_lock(&tree->lock);
742	if (cached_state) {
743		cached = *cached_state;
744
745		if (clear) {
746			*cached_state = NULL;
747			cached_state = NULL;
748		}
749
750		if (cached && extent_state_in_tree(cached) &&
751		    cached->start <= start && cached->end > start) {
752			if (clear)
753				refcount_dec(&cached->refs);
754			state = cached;
755			goto hit_next;
756		}
757		if (clear)
758			free_extent_state(cached);
759	}
760	/*
761	 * this search will find the extents that end after
762	 * our range starts
763	 */
764	node = tree_search(tree, start);
765	if (!node)
766		goto out;
767	state = rb_entry(node, struct extent_state, rb_node);
768hit_next:
769	if (state->start > end)
770		goto out;
771	WARN_ON(state->end < start);
772	last_end = state->end;
773
774	/* the state doesn't have the wanted bits, go ahead */
775	if (!(state->state & bits)) {
776		state = next_state(state);
777		goto next;
778	}
779
780	/*
781	 *     | ---- desired range ---- |
782	 *  | state | or
783	 *  | ------------- state -------------- |
784	 *
785	 * We need to split the extent we found, and may flip
786	 * bits on second half.
787	 *
788	 * If the extent we found extends past our range, we
789	 * just split and search again.  It'll get split again
790	 * the next time though.
791	 *
792	 * If the extent we found is inside our range, we clear
793	 * the desired bit on it.
794	 */
795
796	if (state->start < start) {
797		prealloc = alloc_extent_state_atomic(prealloc);
798		BUG_ON(!prealloc);
799		err = split_state(tree, state, prealloc, start);
800		if (err)
801			extent_io_tree_panic(tree, err);
802
803		prealloc = NULL;
804		if (err)
805			goto out;
806		if (state->end <= end) {
807			state = clear_state_bit(tree, state, &bits, wake,
808						changeset);
809			goto next;
810		}
811		goto search_again;
812	}
813	/*
814	 * | ---- desired range ---- |
815	 *                        | state |
816	 * We need to split the extent, and clear the bit
817	 * on the first half
818	 */
819	if (state->start <= end && state->end > end) {
820		prealloc = alloc_extent_state_atomic(prealloc);
821		BUG_ON(!prealloc);
822		err = split_state(tree, state, prealloc, end + 1);
823		if (err)
824			extent_io_tree_panic(tree, err);
825
826		if (wake)
827			wake_up(&state->wq);
828
829		clear_state_bit(tree, prealloc, &bits, wake, changeset);
830
831		prealloc = NULL;
832		goto out;
833	}
834
835	state = clear_state_bit(tree, state, &bits, wake, changeset);
836next:
837	if (last_end == (u64)-1)
838		goto out;
839	start = last_end + 1;
840	if (start <= end && state && !need_resched())
841		goto hit_next;
842
843search_again:
844	if (start > end)
845		goto out;
846	spin_unlock(&tree->lock);
847	if (gfpflags_allow_blocking(mask))
848		cond_resched();
849	goto again;
850
851out:
852	spin_unlock(&tree->lock);
853	if (prealloc)
854		free_extent_state(prealloc);
855
856	return 0;
857
858}
859
860static void wait_on_state(struct extent_io_tree *tree,
861			  struct extent_state *state)
862		__releases(tree->lock)
863		__acquires(tree->lock)
864{
865	DEFINE_WAIT(wait);
866	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
867	spin_unlock(&tree->lock);
868	schedule();
869	spin_lock(&tree->lock);
870	finish_wait(&state->wq, &wait);
871}
872
873/*
874 * waits for one or more bits to clear on a range in the state tree.
875 * The range [start, end] is inclusive.
876 * The tree lock is taken by this function
877 */
878static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
879			    u32 bits)
880{
881	struct extent_state *state;
882	struct rb_node *node;
883
884	btrfs_debug_check_extent_io_range(tree, start, end);
885
886	spin_lock(&tree->lock);
887again:
888	while (1) {
889		/*
890		 * this search will find all the extents that end after
891		 * our range starts
892		 */
893		node = tree_search(tree, start);
894process_node:
895		if (!node)
896			break;
897
898		state = rb_entry(node, struct extent_state, rb_node);
899
900		if (state->start > end)
901			goto out;
902
903		if (state->state & bits) {
904			start = state->start;
905			refcount_inc(&state->refs);
906			wait_on_state(tree, state);
907			free_extent_state(state);
908			goto again;
909		}
910		start = state->end + 1;
911
912		if (start > end)
913			break;
914
915		if (!cond_resched_lock(&tree->lock)) {
916			node = rb_next(node);
917			goto process_node;
918		}
919	}
920out:
921	spin_unlock(&tree->lock);
922}
923
924static void set_state_bits(struct extent_io_tree *tree,
925			   struct extent_state *state,
926			   u32 *bits, struct extent_changeset *changeset)
927{
928	u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
929	int ret;
930
931	if (tree->private_data && is_data_inode(tree->private_data))
932		btrfs_set_delalloc_extent(tree->private_data, state, bits);
933
934	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
935		u64 range = state->end - state->start + 1;
936		tree->dirty_bytes += range;
937	}
938	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
939	BUG_ON(ret < 0);
940	state->state |= bits_to_set;
941}
942
943static void cache_state_if_flags(struct extent_state *state,
944				 struct extent_state **cached_ptr,
945				 unsigned flags)
946{
947	if (cached_ptr && !(*cached_ptr)) {
948		if (!flags || (state->state & flags)) {
949			*cached_ptr = state;
950			refcount_inc(&state->refs);
951		}
952	}
953}
954
955static void cache_state(struct extent_state *state,
956			struct extent_state **cached_ptr)
957{
958	return cache_state_if_flags(state, cached_ptr,
959				    EXTENT_LOCKED | EXTENT_BOUNDARY);
960}
961
962/*
963 * set some bits on a range in the tree.  This may require allocations or
964 * sleeping, so the gfp mask is used to indicate what is allowed.
965 *
966 * If any of the exclusive bits are set, this will fail with -EEXIST if some
967 * part of the range already has the desired bits set.  The start of the
968 * existing range is returned in failed_start in this case.
969 *
970 * [start, end] is inclusive This takes the tree lock.
971 */
972int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
973		   u32 exclusive_bits, u64 *failed_start,
974		   struct extent_state **cached_state, gfp_t mask,
975		   struct extent_changeset *changeset)
976{
977	struct extent_state *state;
978	struct extent_state *prealloc = NULL;
979	struct rb_node *node;
980	struct rb_node **p;
981	struct rb_node *parent;
982	int err = 0;
983	u64 last_start;
984	u64 last_end;
985
986	btrfs_debug_check_extent_io_range(tree, start, end);
987	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
988
989	if (exclusive_bits)
990		ASSERT(failed_start);
991	else
992		ASSERT(failed_start == NULL);
993again:
994	if (!prealloc && gfpflags_allow_blocking(mask)) {
995		/*
996		 * Don't care for allocation failure here because we might end
997		 * up not needing the pre-allocated extent state at all, which
998		 * is the case if we only have in the tree extent states that
999		 * cover our input range and don't cover too any other range.
1000		 * If we end up needing a new extent state we allocate it later.
1001		 */
1002		prealloc = alloc_extent_state(mask);
1003	}
1004
1005	spin_lock(&tree->lock);
1006	if (cached_state && *cached_state) {
1007		state = *cached_state;
1008		if (state->start <= start && state->end > start &&
1009		    extent_state_in_tree(state)) {
1010			node = &state->rb_node;
1011			goto hit_next;
1012		}
1013	}
1014	/*
1015	 * this search will find all the extents that end after
1016	 * our range starts.
1017	 */
1018	node = tree_search_for_insert(tree, start, &p, &parent);
1019	if (!node) {
1020		prealloc = alloc_extent_state_atomic(prealloc);
1021		BUG_ON(!prealloc);
1022		err = insert_state(tree, prealloc, start, end,
1023				   &p, &parent, &bits, changeset);
1024		if (err)
1025			extent_io_tree_panic(tree, err);
1026
1027		cache_state(prealloc, cached_state);
1028		prealloc = NULL;
1029		goto out;
1030	}
1031	state = rb_entry(node, struct extent_state, rb_node);
1032hit_next:
1033	last_start = state->start;
1034	last_end = state->end;
1035
1036	/*
1037	 * | ---- desired range ---- |
1038	 * | state |
1039	 *
1040	 * Just lock what we found and keep going
1041	 */
1042	if (state->start == start && state->end <= end) {
1043		if (state->state & exclusive_bits) {
1044			*failed_start = state->start;
1045			err = -EEXIST;
1046			goto out;
1047		}
1048
1049		set_state_bits(tree, state, &bits, changeset);
1050		cache_state(state, cached_state);
1051		merge_state(tree, state);
1052		if (last_end == (u64)-1)
1053			goto out;
1054		start = last_end + 1;
1055		state = next_state(state);
1056		if (start < end && state && state->start == start &&
1057		    !need_resched())
1058			goto hit_next;
1059		goto search_again;
1060	}
1061
1062	/*
1063	 *     | ---- desired range ---- |
1064	 * | state |
1065	 *   or
1066	 * | ------------- state -------------- |
1067	 *
1068	 * We need to split the extent we found, and may flip bits on
1069	 * second half.
1070	 *
1071	 * If the extent we found extends past our
1072	 * range, we just split and search again.  It'll get split
1073	 * again the next time though.
1074	 *
1075	 * If the extent we found is inside our range, we set the
1076	 * desired bit on it.
1077	 */
1078	if (state->start < start) {
1079		if (state->state & exclusive_bits) {
1080			*failed_start = start;
1081			err = -EEXIST;
1082			goto out;
1083		}
1084
1085		/*
1086		 * If this extent already has all the bits we want set, then
1087		 * skip it, not necessary to split it or do anything with it.
1088		 */
1089		if ((state->state & bits) == bits) {
1090			start = state->end + 1;
1091			cache_state(state, cached_state);
1092			goto search_again;
1093		}
1094
1095		prealloc = alloc_extent_state_atomic(prealloc);
1096		BUG_ON(!prealloc);
1097		err = split_state(tree, state, prealloc, start);
1098		if (err)
1099			extent_io_tree_panic(tree, err);
1100
1101		prealloc = NULL;
1102		if (err)
1103			goto out;
1104		if (state->end <= end) {
1105			set_state_bits(tree, state, &bits, changeset);
1106			cache_state(state, cached_state);
1107			merge_state(tree, state);
1108			if (last_end == (u64)-1)
1109				goto out;
1110			start = last_end + 1;
1111			state = next_state(state);
1112			if (start < end && state && state->start == start &&
1113			    !need_resched())
1114				goto hit_next;
1115		}
1116		goto search_again;
1117	}
1118	/*
1119	 * | ---- desired range ---- |
1120	 *     | state | or               | state |
1121	 *
1122	 * There's a hole, we need to insert something in it and
1123	 * ignore the extent we found.
1124	 */
1125	if (state->start > start) {
1126		u64 this_end;
1127		if (end < last_start)
1128			this_end = end;
1129		else
1130			this_end = last_start - 1;
1131
1132		prealloc = alloc_extent_state_atomic(prealloc);
1133		BUG_ON(!prealloc);
1134
1135		/*
1136		 * Avoid to free 'prealloc' if it can be merged with
1137		 * the later extent.
1138		 */
1139		err = insert_state(tree, prealloc, start, this_end,
1140				   NULL, NULL, &bits, changeset);
1141		if (err)
1142			extent_io_tree_panic(tree, err);
1143
1144		cache_state(prealloc, cached_state);
1145		prealloc = NULL;
1146		start = this_end + 1;
1147		goto search_again;
1148	}
1149	/*
1150	 * | ---- desired range ---- |
1151	 *                        | state |
1152	 * We need to split the extent, and set the bit
1153	 * on the first half
1154	 */
1155	if (state->start <= end && state->end > end) {
1156		if (state->state & exclusive_bits) {
1157			*failed_start = start;
1158			err = -EEXIST;
1159			goto out;
1160		}
1161
1162		prealloc = alloc_extent_state_atomic(prealloc);
1163		BUG_ON(!prealloc);
1164		err = split_state(tree, state, prealloc, end + 1);
1165		if (err)
1166			extent_io_tree_panic(tree, err);
1167
1168		set_state_bits(tree, prealloc, &bits, changeset);
1169		cache_state(prealloc, cached_state);
1170		merge_state(tree, prealloc);
1171		prealloc = NULL;
1172		goto out;
1173	}
1174
1175search_again:
1176	if (start > end)
1177		goto out;
1178	spin_unlock(&tree->lock);
1179	if (gfpflags_allow_blocking(mask))
1180		cond_resched();
1181	goto again;
1182
1183out:
1184	spin_unlock(&tree->lock);
1185	if (prealloc)
1186		free_extent_state(prealloc);
1187
1188	return err;
1189
1190}
1191
1192/**
1193 * convert_extent_bit - convert all bits in a given range from one bit to
1194 * 			another
1195 * @tree:	the io tree to search
1196 * @start:	the start offset in bytes
1197 * @end:	the end offset in bytes (inclusive)
1198 * @bits:	the bits to set in this range
1199 * @clear_bits:	the bits to clear in this range
1200 * @cached_state:	state that we're going to cache
1201 *
1202 * This will go through and set bits for the given range.  If any states exist
1203 * already in this range they are set with the given bit and cleared of the
1204 * clear_bits.  This is only meant to be used by things that are mergeable, ie
1205 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1206 * boundary bits like LOCK.
1207 *
1208 * All allocations are done with GFP_NOFS.
1209 */
1210int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1211		       u32 bits, u32 clear_bits,
1212		       struct extent_state **cached_state)
1213{
1214	struct extent_state *state;
1215	struct extent_state *prealloc = NULL;
1216	struct rb_node *node;
1217	struct rb_node **p;
1218	struct rb_node *parent;
1219	int err = 0;
1220	u64 last_start;
1221	u64 last_end;
1222	bool first_iteration = true;
1223
1224	btrfs_debug_check_extent_io_range(tree, start, end);
1225	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1226				       clear_bits);
1227
1228again:
1229	if (!prealloc) {
1230		/*
1231		 * Best effort, don't worry if extent state allocation fails
1232		 * here for the first iteration. We might have a cached state
1233		 * that matches exactly the target range, in which case no
1234		 * extent state allocations are needed. We'll only know this
1235		 * after locking the tree.
1236		 */
1237		prealloc = alloc_extent_state(GFP_NOFS);
1238		if (!prealloc && !first_iteration)
1239			return -ENOMEM;
1240	}
1241
1242	spin_lock(&tree->lock);
1243	if (cached_state && *cached_state) {
1244		state = *cached_state;
1245		if (state->start <= start && state->end > start &&
1246		    extent_state_in_tree(state)) {
1247			node = &state->rb_node;
1248			goto hit_next;
1249		}
1250	}
1251
1252	/*
1253	 * this search will find all the extents that end after
1254	 * our range starts.
1255	 */
1256	node = tree_search_for_insert(tree, start, &p, &parent);
1257	if (!node) {
1258		prealloc = alloc_extent_state_atomic(prealloc);
1259		if (!prealloc) {
1260			err = -ENOMEM;
1261			goto out;
1262		}
1263		err = insert_state(tree, prealloc, start, end,
1264				   &p, &parent, &bits, NULL);
1265		if (err)
1266			extent_io_tree_panic(tree, err);
1267		cache_state(prealloc, cached_state);
1268		prealloc = NULL;
1269		goto out;
1270	}
1271	state = rb_entry(node, struct extent_state, rb_node);
1272hit_next:
1273	last_start = state->start;
1274	last_end = state->end;
1275
1276	/*
1277	 * | ---- desired range ---- |
1278	 * | state |
1279	 *
1280	 * Just lock what we found and keep going
1281	 */
1282	if (state->start == start && state->end <= end) {
1283		set_state_bits(tree, state, &bits, NULL);
1284		cache_state(state, cached_state);
1285		state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1286		if (last_end == (u64)-1)
1287			goto out;
1288		start = last_end + 1;
1289		if (start < end && state && state->start == start &&
1290		    !need_resched())
1291			goto hit_next;
1292		goto search_again;
1293	}
1294
1295	/*
1296	 *     | ---- desired range ---- |
1297	 * | state |
1298	 *   or
1299	 * | ------------- state -------------- |
1300	 *
1301	 * We need to split the extent we found, and may flip bits on
1302	 * second half.
1303	 *
1304	 * If the extent we found extends past our
1305	 * range, we just split and search again.  It'll get split
1306	 * again the next time though.
1307	 *
1308	 * If the extent we found is inside our range, we set the
1309	 * desired bit on it.
1310	 */
1311	if (state->start < start) {
1312		prealloc = alloc_extent_state_atomic(prealloc);
1313		if (!prealloc) {
1314			err = -ENOMEM;
1315			goto out;
1316		}
1317		err = split_state(tree, state, prealloc, start);
1318		if (err)
1319			extent_io_tree_panic(tree, err);
1320		prealloc = NULL;
1321		if (err)
1322			goto out;
1323		if (state->end <= end) {
1324			set_state_bits(tree, state, &bits, NULL);
1325			cache_state(state, cached_state);
1326			state = clear_state_bit(tree, state, &clear_bits, 0,
1327						NULL);
1328			if (last_end == (u64)-1)
1329				goto out;
1330			start = last_end + 1;
1331			if (start < end && state && state->start == start &&
1332			    !need_resched())
1333				goto hit_next;
1334		}
1335		goto search_again;
1336	}
1337	/*
1338	 * | ---- desired range ---- |
1339	 *     | state | or               | state |
1340	 *
1341	 * There's a hole, we need to insert something in it and
1342	 * ignore the extent we found.
1343	 */
1344	if (state->start > start) {
1345		u64 this_end;
1346		if (end < last_start)
1347			this_end = end;
1348		else
1349			this_end = last_start - 1;
1350
1351		prealloc = alloc_extent_state_atomic(prealloc);
1352		if (!prealloc) {
1353			err = -ENOMEM;
1354			goto out;
1355		}
1356
1357		/*
1358		 * Avoid to free 'prealloc' if it can be merged with
1359		 * the later extent.
1360		 */
1361		err = insert_state(tree, prealloc, start, this_end,
1362				   NULL, NULL, &bits, NULL);
1363		if (err)
1364			extent_io_tree_panic(tree, err);
1365		cache_state(prealloc, cached_state);
1366		prealloc = NULL;
1367		start = this_end + 1;
1368		goto search_again;
1369	}
1370	/*
1371	 * | ---- desired range ---- |
1372	 *                        | state |
1373	 * We need to split the extent, and set the bit
1374	 * on the first half
1375	 */
1376	if (state->start <= end && state->end > end) {
1377		prealloc = alloc_extent_state_atomic(prealloc);
1378		if (!prealloc) {
1379			err = -ENOMEM;
1380			goto out;
1381		}
1382
1383		err = split_state(tree, state, prealloc, end + 1);
1384		if (err)
1385			extent_io_tree_panic(tree, err);
1386
1387		set_state_bits(tree, prealloc, &bits, NULL);
1388		cache_state(prealloc, cached_state);
1389		clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1390		prealloc = NULL;
1391		goto out;
1392	}
1393
1394search_again:
1395	if (start > end)
1396		goto out;
1397	spin_unlock(&tree->lock);
1398	cond_resched();
1399	first_iteration = false;
1400	goto again;
1401
1402out:
1403	spin_unlock(&tree->lock);
1404	if (prealloc)
1405		free_extent_state(prealloc);
1406
1407	return err;
1408}
1409
1410/* wrappers around set/clear extent bit */
1411int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1412			   u32 bits, struct extent_changeset *changeset)
1413{
1414	/*
1415	 * We don't support EXTENT_LOCKED yet, as current changeset will
1416	 * record any bits changed, so for EXTENT_LOCKED case, it will
1417	 * either fail with -EEXIST or changeset will record the whole
1418	 * range.
1419	 */
1420	BUG_ON(bits & EXTENT_LOCKED);
1421
1422	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1423			      changeset);
1424}
1425
1426int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1427			   u32 bits)
1428{
1429	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1430			      GFP_NOWAIT, NULL);
1431}
1432
1433int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1434		     u32 bits, int wake, int delete,
1435		     struct extent_state **cached)
1436{
1437	return __clear_extent_bit(tree, start, end, bits, wake, delete,
1438				  cached, GFP_NOFS, NULL);
1439}
1440
1441int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1442		u32 bits, struct extent_changeset *changeset)
1443{
1444	/*
1445	 * Don't support EXTENT_LOCKED case, same reason as
1446	 * set_record_extent_bits().
1447	 */
1448	BUG_ON(bits & EXTENT_LOCKED);
1449
1450	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1451				  changeset);
1452}
1453
1454/*
1455 * either insert or lock state struct between start and end use mask to tell
1456 * us if waiting is desired.
1457 */
1458int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1459		     struct extent_state **cached_state)
1460{
1461	int err;
1462	u64 failed_start;
1463
1464	while (1) {
1465		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
1466				     EXTENT_LOCKED, &failed_start,
1467				     cached_state, GFP_NOFS, NULL);
1468		if (err == -EEXIST) {
1469			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1470			start = failed_start;
1471		} else
1472			break;
1473		WARN_ON(start > end);
1474	}
1475	return err;
1476}
1477
1478int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1479{
1480	int err;
1481	u64 failed_start;
1482
1483	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1484			     &failed_start, NULL, GFP_NOFS, NULL);
1485	if (err == -EEXIST) {
1486		if (failed_start > start)
1487			clear_extent_bit(tree, start, failed_start - 1,
1488					 EXTENT_LOCKED, 1, 0, NULL);
1489		return 0;
1490	}
1491	return 1;
1492}
1493
1494void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1495{
1496	unsigned long index = start >> PAGE_SHIFT;
1497	unsigned long end_index = end >> PAGE_SHIFT;
1498	struct page *page;
1499
1500	while (index <= end_index) {
1501		page = find_get_page(inode->i_mapping, index);
1502		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1503		clear_page_dirty_for_io(page);
1504		put_page(page);
1505		index++;
1506	}
1507}
1508
1509void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1510{
1511	unsigned long index = start >> PAGE_SHIFT;
1512	unsigned long end_index = end >> PAGE_SHIFT;
1513	struct page *page;
1514
1515	while (index <= end_index) {
1516		page = find_get_page(inode->i_mapping, index);
1517		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1518		__set_page_dirty_nobuffers(page);
1519		account_page_redirty(page);
1520		put_page(page);
1521		index++;
1522	}
1523}
1524
1525/* find the first state struct with 'bits' set after 'start', and
1526 * return it.  tree->lock must be held.  NULL will returned if
1527 * nothing was found after 'start'
1528 */
1529static struct extent_state *
1530find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
1531{
1532	struct rb_node *node;
1533	struct extent_state *state;
1534
1535	/*
1536	 * this search will find all the extents that end after
1537	 * our range starts.
1538	 */
1539	node = tree_search(tree, start);
1540	if (!node)
1541		goto out;
1542
1543	while (1) {
1544		state = rb_entry(node, struct extent_state, rb_node);
1545		if (state->end >= start && (state->state & bits))
1546			return state;
1547
1548		node = rb_next(node);
1549		if (!node)
1550			break;
1551	}
1552out:
1553	return NULL;
1554}
1555
1556/*
1557 * Find the first offset in the io tree with one or more @bits set.
1558 *
1559 * Note: If there are multiple bits set in @bits, any of them will match.
1560 *
1561 * Return 0 if we find something, and update @start_ret and @end_ret.
1562 * Return 1 if we found nothing.
1563 */
1564int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1565			  u64 *start_ret, u64 *end_ret, u32 bits,
1566			  struct extent_state **cached_state)
1567{
1568	struct extent_state *state;
1569	int ret = 1;
1570
1571	spin_lock(&tree->lock);
1572	if (cached_state && *cached_state) {
1573		state = *cached_state;
1574		if (state->end == start - 1 && extent_state_in_tree(state)) {
1575			while ((state = next_state(state)) != NULL) {
1576				if (state->state & bits)
1577					goto got_it;
1578			}
1579			free_extent_state(*cached_state);
1580			*cached_state = NULL;
1581			goto out;
1582		}
1583		free_extent_state(*cached_state);
1584		*cached_state = NULL;
1585	}
1586
1587	state = find_first_extent_bit_state(tree, start, bits);
1588got_it:
1589	if (state) {
1590		cache_state_if_flags(state, cached_state, 0);
1591		*start_ret = state->start;
1592		*end_ret = state->end;
1593		ret = 0;
1594	}
1595out:
1596	spin_unlock(&tree->lock);
1597	return ret;
1598}
1599
1600/**
1601 * Find a contiguous area of bits
1602 *
1603 * @tree:      io tree to check
1604 * @start:     offset to start the search from
1605 * @start_ret: the first offset we found with the bits set
1606 * @end_ret:   the final contiguous range of the bits that were set
1607 * @bits:      bits to look for
1608 *
1609 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1610 * to set bits appropriately, and then merge them again.  During this time it
1611 * will drop the tree->lock, so use this helper if you want to find the actual
1612 * contiguous area for given bits.  We will search to the first bit we find, and
1613 * then walk down the tree until we find a non-contiguous area.  The area
1614 * returned will be the full contiguous area with the bits set.
1615 */
1616int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1617			       u64 *start_ret, u64 *end_ret, u32 bits)
1618{
1619	struct extent_state *state;
1620	int ret = 1;
1621
1622	spin_lock(&tree->lock);
1623	state = find_first_extent_bit_state(tree, start, bits);
1624	if (state) {
1625		*start_ret = state->start;
1626		*end_ret = state->end;
1627		while ((state = next_state(state)) != NULL) {
1628			if (state->start > (*end_ret + 1))
1629				break;
1630			*end_ret = state->end;
1631		}
1632		ret = 0;
1633	}
1634	spin_unlock(&tree->lock);
1635	return ret;
1636}
1637
1638/**
1639 * Find the first range that has @bits not set. This range could start before
1640 * @start.
1641 *
1642 * @tree:      the tree to search
1643 * @start:     offset at/after which the found extent should start
1644 * @start_ret: records the beginning of the range
1645 * @end_ret:   records the end of the range (inclusive)
1646 * @bits:      the set of bits which must be unset
1647 *
1648 * Since unallocated range is also considered one which doesn't have the bits
1649 * set it's possible that @end_ret contains -1, this happens in case the range
1650 * spans (last_range_end, end of device]. In this case it's up to the caller to
1651 * trim @end_ret to the appropriate size.
1652 */
1653void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1654				 u64 *start_ret, u64 *end_ret, u32 bits)
1655{
1656	struct extent_state *state;
1657	struct rb_node *node, *prev = NULL, *next;
1658
1659	spin_lock(&tree->lock);
1660
1661	/* Find first extent with bits cleared */
1662	while (1) {
1663		node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1664		if (!node && !next && !prev) {
1665			/*
1666			 * Tree is completely empty, send full range and let
1667			 * caller deal with it
1668			 */
1669			*start_ret = 0;
1670			*end_ret = -1;
1671			goto out;
1672		} else if (!node && !next) {
1673			/*
1674			 * We are past the last allocated chunk, set start at
1675			 * the end of the last extent.
1676			 */
1677			state = rb_entry(prev, struct extent_state, rb_node);
1678			*start_ret = state->end + 1;
1679			*end_ret = -1;
1680			goto out;
1681		} else if (!node) {
1682			node = next;
1683		}
1684		/*
1685		 * At this point 'node' either contains 'start' or start is
1686		 * before 'node'
1687		 */
1688		state = rb_entry(node, struct extent_state, rb_node);
1689
1690		if (in_range(start, state->start, state->end - state->start + 1)) {
1691			if (state->state & bits) {
1692				/*
1693				 * |--range with bits sets--|
1694				 *    |
1695				 *    start
1696				 */
1697				start = state->end + 1;
1698			} else {
1699				/*
1700				 * 'start' falls within a range that doesn't
1701				 * have the bits set, so take its start as
1702				 * the beginning of the desired range
1703				 *
1704				 * |--range with bits cleared----|
1705				 *      |
1706				 *      start
1707				 */
1708				*start_ret = state->start;
1709				break;
1710			}
1711		} else {
1712			/*
1713			 * |---prev range---|---hole/unset---|---node range---|
1714			 *                          |
1715			 *                        start
1716			 *
1717			 *                        or
1718			 *
1719			 * |---hole/unset--||--first node--|
1720			 * 0   |
1721			 *    start
1722			 */
1723			if (prev) {
1724				state = rb_entry(prev, struct extent_state,
1725						 rb_node);
1726				*start_ret = state->end + 1;
1727			} else {
1728				*start_ret = 0;
1729			}
1730			break;
1731		}
1732	}
1733
1734	/*
1735	 * Find the longest stretch from start until an entry which has the
1736	 * bits set
1737	 */
1738	while (1) {
1739		state = rb_entry(node, struct extent_state, rb_node);
1740		if (state->end >= start && !(state->state & bits)) {
1741			*end_ret = state->end;
1742		} else {
1743			*end_ret = state->start - 1;
1744			break;
1745		}
1746
1747		node = rb_next(node);
1748		if (!node)
1749			break;
1750	}
1751out:
1752	spin_unlock(&tree->lock);
1753}
1754
1755/*
1756 * find a contiguous range of bytes in the file marked as delalloc, not
1757 * more than 'max_bytes'.  start and end are used to return the range,
1758 *
1759 * true is returned if we find something, false if nothing was in the tree
1760 */
1761bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1762			       u64 *end, u64 max_bytes,
1763			       struct extent_state **cached_state)
1764{
1765	struct rb_node *node;
1766	struct extent_state *state;
1767	u64 cur_start = *start;
1768	bool found = false;
1769	u64 total_bytes = 0;
1770
1771	spin_lock(&tree->lock);
1772
1773	/*
1774	 * this search will find all the extents that end after
1775	 * our range starts.
1776	 */
1777	node = tree_search(tree, cur_start);
1778	if (!node) {
1779		*end = (u64)-1;
1780		goto out;
1781	}
1782
1783	while (1) {
1784		state = rb_entry(node, struct extent_state, rb_node);
1785		if (found && (state->start != cur_start ||
1786			      (state->state & EXTENT_BOUNDARY))) {
1787			goto out;
1788		}
1789		if (!(state->state & EXTENT_DELALLOC)) {
1790			if (!found)
1791				*end = state->end;
1792			goto out;
1793		}
1794		if (!found) {
1795			*start = state->start;
1796			*cached_state = state;
1797			refcount_inc(&state->refs);
1798		}
1799		found = true;
1800		*end = state->end;
1801		cur_start = state->end + 1;
1802		node = rb_next(node);
1803		total_bytes += state->end - state->start + 1;
1804		if (total_bytes >= max_bytes)
1805			break;
1806		if (!node)
1807			break;
1808	}
1809out:
1810	spin_unlock(&tree->lock);
1811	return found;
1812}
1813
1814/*
1815 * Process one page for __process_pages_contig().
1816 *
1817 * Return >0 if we hit @page == @locked_page.
1818 * Return 0 if we updated the page status.
1819 * Return -EGAIN if the we need to try again.
1820 * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
1821 */
1822static int process_one_page(struct btrfs_fs_info *fs_info,
1823			    struct address_space *mapping,
1824			    struct page *page, struct page *locked_page,
1825			    unsigned long page_ops, u64 start, u64 end)
1826{
1827	u32 len;
1828
1829	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
1830	len = end + 1 - start;
1831
1832	if (page_ops & PAGE_SET_ORDERED)
1833		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
1834	if (page_ops & PAGE_SET_ERROR)
1835		btrfs_page_clamp_set_error(fs_info, page, start, len);
1836	if (page_ops & PAGE_START_WRITEBACK) {
1837		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
1838		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
1839	}
1840	if (page_ops & PAGE_END_WRITEBACK)
1841		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
1842
1843	if (page == locked_page)
1844		return 1;
1845
1846	if (page_ops & PAGE_LOCK) {
1847		int ret;
1848
1849		ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
1850		if (ret)
1851			return ret;
1852		if (!PageDirty(page) || page->mapping != mapping) {
1853			btrfs_page_end_writer_lock(fs_info, page, start, len);
1854			return -EAGAIN;
1855		}
1856	}
1857	if (page_ops & PAGE_UNLOCK)
1858		btrfs_page_end_writer_lock(fs_info, page, start, len);
1859	return 0;
1860}
1861
1862static int __process_pages_contig(struct address_space *mapping,
1863				  struct page *locked_page,
1864				  u64 start, u64 end, unsigned long page_ops,
1865				  u64 *processed_end)
1866{
1867	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
1868	pgoff_t start_index = start >> PAGE_SHIFT;
1869	pgoff_t end_index = end >> PAGE_SHIFT;
1870	pgoff_t index = start_index;
1871	unsigned long nr_pages = end_index - start_index + 1;
1872	unsigned long pages_processed = 0;
1873	struct page *pages[16];
1874	int err = 0;
1875	int i;
1876
1877	if (page_ops & PAGE_LOCK) {
1878		ASSERT(page_ops == PAGE_LOCK);
1879		ASSERT(processed_end && *processed_end == start);
1880	}
1881
1882	if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1883		mapping_set_error(mapping, -EIO);
1884
1885	while (nr_pages > 0) {
1886		int found_pages;
1887
1888		found_pages = find_get_pages_contig(mapping, index,
1889				     min_t(unsigned long,
1890				     nr_pages, ARRAY_SIZE(pages)), pages);
1891		if (found_pages == 0) {
1892			/*
1893			 * Only if we're going to lock these pages, we can find
1894			 * nothing at @index.
1895			 */
1896			ASSERT(page_ops & PAGE_LOCK);
1897			err = -EAGAIN;
1898			goto out;
1899		}
1900
1901		for (i = 0; i < found_pages; i++) {
1902			int process_ret;
1903
1904			process_ret = process_one_page(fs_info, mapping,
1905					pages[i], locked_page, page_ops,
1906					start, end);
1907			if (process_ret < 0) {
1908				for (; i < found_pages; i++)
1909					put_page(pages[i]);
1910				err = -EAGAIN;
1911				goto out;
1912			}
1913			put_page(pages[i]);
1914			pages_processed++;
1915		}
1916		nr_pages -= found_pages;
1917		index += found_pages;
1918		cond_resched();
1919	}
1920out:
1921	if (err && processed_end) {
1922		/*
1923		 * Update @processed_end. I know this is awful since it has
1924		 * two different return value patterns (inclusive vs exclusive).
1925		 *
1926		 * But the exclusive pattern is necessary if @start is 0, or we
1927		 * underflow and check against processed_end won't work as
1928		 * expected.
1929		 */
1930		if (pages_processed)
1931			*processed_end = min(end,
1932			((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
1933		else
1934			*processed_end = start;
1935	}
1936	return err;
1937}
1938
1939static noinline void __unlock_for_delalloc(struct inode *inode,
1940					   struct page *locked_page,
1941					   u64 start, u64 end)
1942{
1943	unsigned long index = start >> PAGE_SHIFT;
1944	unsigned long end_index = end >> PAGE_SHIFT;
1945
1946	ASSERT(locked_page);
1947	if (index == locked_page->index && end_index == index)
1948		return;
1949
1950	__process_pages_contig(inode->i_mapping, locked_page, start, end,
1951			       PAGE_UNLOCK, NULL);
1952}
1953
1954static noinline int lock_delalloc_pages(struct inode *inode,
1955					struct page *locked_page,
1956					u64 delalloc_start,
1957					u64 delalloc_end)
1958{
1959	unsigned long index = delalloc_start >> PAGE_SHIFT;
1960	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1961	u64 processed_end = delalloc_start;
1962	int ret;
1963
1964	ASSERT(locked_page);
1965	if (index == locked_page->index && index == end_index)
1966		return 0;
1967
1968	ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
1969				     delalloc_end, PAGE_LOCK, &processed_end);
1970	if (ret == -EAGAIN && processed_end > delalloc_start)
1971		__unlock_for_delalloc(inode, locked_page, delalloc_start,
1972				      processed_end);
1973	return ret;
1974}
1975
1976/*
1977 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1978 * more than @max_bytes.
1979 *
1980 * @start:	The original start bytenr to search.
1981 *		Will store the extent range start bytenr.
1982 * @end:	The original end bytenr of the search range
1983 *		Will store the extent range end bytenr.
1984 *
1985 * Return true if we find a delalloc range which starts inside the original
1986 * range, and @start/@end will store the delalloc range start/end.
1987 *
1988 * Return false if we can't find any delalloc range which starts inside the
1989 * original range, and @start/@end will be the non-delalloc range start/end.
1990 */
1991EXPORT_FOR_TESTS
1992noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1993				    struct page *locked_page, u64 *start,
1994				    u64 *end)
1995{
1996	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1997	const u64 orig_start = *start;
1998	const u64 orig_end = *end;
1999	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
2000	u64 delalloc_start;
2001	u64 delalloc_end;
2002	bool found;
2003	struct extent_state *cached_state = NULL;
2004	int ret;
2005	int loops = 0;
2006
2007	/* Caller should pass a valid @end to indicate the search range end */
2008	ASSERT(orig_end > orig_start);
2009
2010	/* The range should at least cover part of the page */
2011	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
2012		 orig_end <= page_offset(locked_page)));
2013again:
2014	/* step one, find a bunch of delalloc bytes starting at start */
2015	delalloc_start = *start;
2016	delalloc_end = 0;
2017	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
2018					  max_bytes, &cached_state);
2019	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
2020		*start = delalloc_start;
2021
2022		/* @delalloc_end can be -1, never go beyond @orig_end */
2023		*end = min(delalloc_end, orig_end);
2024		free_extent_state(cached_state);
2025		return false;
2026	}
2027
2028	/*
2029	 * start comes from the offset of locked_page.  We have to lock
2030	 * pages in order, so we can't process delalloc bytes before
2031	 * locked_page
2032	 */
2033	if (delalloc_start < *start)
2034		delalloc_start = *start;
2035
2036	/*
2037	 * make sure to limit the number of pages we try to lock down
2038	 */
2039	if (delalloc_end + 1 - delalloc_start > max_bytes)
2040		delalloc_end = delalloc_start + max_bytes - 1;
2041
2042	/* step two, lock all the pages after the page that has start */
2043	ret = lock_delalloc_pages(inode, locked_page,
2044				  delalloc_start, delalloc_end);
2045	ASSERT(!ret || ret == -EAGAIN);
2046	if (ret == -EAGAIN) {
2047		/* some of the pages are gone, lets avoid looping by
2048		 * shortening the size of the delalloc range we're searching
2049		 */
2050		free_extent_state(cached_state);
2051		cached_state = NULL;
2052		if (!loops) {
2053			max_bytes = PAGE_SIZE;
2054			loops = 1;
2055			goto again;
2056		} else {
2057			found = false;
2058			goto out_failed;
2059		}
2060	}
2061
2062	/* step three, lock the state bits for the whole range */
2063	lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
2064
2065	/* then test to make sure it is all still delalloc */
2066	ret = test_range_bit(tree, delalloc_start, delalloc_end,
2067			     EXTENT_DELALLOC, 1, cached_state);
2068	if (!ret) {
2069		unlock_extent_cached(tree, delalloc_start, delalloc_end,
2070				     &cached_state);
2071		__unlock_for_delalloc(inode, locked_page,
2072			      delalloc_start, delalloc_end);
2073		cond_resched();
2074		goto again;
2075	}
2076	free_extent_state(cached_state);
2077	*start = delalloc_start;
2078	*end = delalloc_end;
2079out_failed:
2080	return found;
2081}
2082
2083void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2084				  struct page *locked_page,
2085				  u32 clear_bits, unsigned long page_ops)
2086{
2087	clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2088
2089	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2090			       start, end, page_ops, NULL);
2091}
2092
2093/*
2094 * count the number of bytes in the tree that have a given bit(s)
2095 * set.  This can be fairly slow, except for EXTENT_DIRTY which is
2096 * cached.  The total number found is returned.
2097 */
2098u64 count_range_bits(struct extent_io_tree *tree,
2099		     u64 *start, u64 search_end, u64 max_bytes,
2100		     u32 bits, int contig)
2101{
2102	struct rb_node *node;
2103	struct extent_state *state;
2104	u64 cur_start = *start;
2105	u64 total_bytes = 0;
2106	u64 last = 0;
2107	int found = 0;
2108
2109	if (WARN_ON(search_end <= cur_start))
2110		return 0;
2111
2112	spin_lock(&tree->lock);
2113	if (cur_start == 0 && bits == EXTENT_DIRTY) {
2114		total_bytes = tree->dirty_bytes;
2115		goto out;
2116	}
2117	/*
2118	 * this search will find all the extents that end after
2119	 * our range starts.
2120	 */
2121	node = tree_search(tree, cur_start);
2122	if (!node)
2123		goto out;
2124
2125	while (1) {
2126		state = rb_entry(node, struct extent_state, rb_node);
2127		if (state->start > search_end)
2128			break;
2129		if (contig && found && state->start > last + 1)
2130			break;
2131		if (state->end >= cur_start && (state->state & bits) == bits) {
2132			total_bytes += min(search_end, state->end) + 1 -
2133				       max(cur_start, state->start);
2134			if (total_bytes >= max_bytes)
2135				break;
2136			if (!found) {
2137				*start = max(cur_start, state->start);
2138				found = 1;
2139			}
2140			last = state->end;
2141		} else if (contig && found) {
2142			break;
2143		}
2144		node = rb_next(node);
2145		if (!node)
2146			break;
2147	}
2148out:
2149	spin_unlock(&tree->lock);
2150	return total_bytes;
2151}
2152
2153/*
2154 * set the private field for a given byte offset in the tree.  If there isn't
2155 * an extent_state there already, this does nothing.
2156 */
2157int set_state_failrec(struct extent_io_tree *tree, u64 start,
2158		      struct io_failure_record *failrec)
2159{
2160	struct rb_node *node;
2161	struct extent_state *state;
2162	int ret = 0;
2163
2164	spin_lock(&tree->lock);
2165	/*
2166	 * this search will find all the extents that end after
2167	 * our range starts.
2168	 */
2169	node = tree_search(tree, start);
2170	if (!node) {
2171		ret = -ENOENT;
2172		goto out;
2173	}
2174	state = rb_entry(node, struct extent_state, rb_node);
2175	if (state->start != start) {
2176		ret = -ENOENT;
2177		goto out;
2178	}
2179	state->failrec = failrec;
2180out:
2181	spin_unlock(&tree->lock);
2182	return ret;
2183}
2184
2185struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2186{
2187	struct rb_node *node;
2188	struct extent_state *state;
2189	struct io_failure_record *failrec;
2190
2191	spin_lock(&tree->lock);
2192	/*
2193	 * this search will find all the extents that end after
2194	 * our range starts.
2195	 */
2196	node = tree_search(tree, start);
2197	if (!node) {
2198		failrec = ERR_PTR(-ENOENT);
2199		goto out;
2200	}
2201	state = rb_entry(node, struct extent_state, rb_node);
2202	if (state->start != start) {
2203		failrec = ERR_PTR(-ENOENT);
2204		goto out;
2205	}
2206
2207	failrec = state->failrec;
2208out:
2209	spin_unlock(&tree->lock);
2210	return failrec;
2211}
2212
2213/*
2214 * searches a range in the state tree for a given mask.
2215 * If 'filled' == 1, this returns 1 only if every extent in the tree
2216 * has the bits set.  Otherwise, 1 is returned if any bit in the
2217 * range is found set.
2218 */
2219int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2220		   u32 bits, int filled, struct extent_state *cached)
2221{
2222	struct extent_state *state = NULL;
2223	struct rb_node *node;
2224	int bitset = 0;
2225
2226	spin_lock(&tree->lock);
2227	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2228	    cached->end > start)
2229		node = &cached->rb_node;
2230	else
2231		node = tree_search(tree, start);
2232	while (node && start <= end) {
2233		state = rb_entry(node, struct extent_state, rb_node);
2234
2235		if (filled && state->start > start) {
2236			bitset = 0;
2237			break;
2238		}
2239
2240		if (state->start > end)
2241			break;
2242
2243		if (state->state & bits) {
2244			bitset = 1;
2245			if (!filled)
2246				break;
2247		} else if (filled) {
2248			bitset = 0;
2249			break;
2250		}
2251
2252		if (state->end == (u64)-1)
2253			break;
2254
2255		start = state->end + 1;
2256		if (start > end)
2257			break;
2258		node = rb_next(node);
2259		if (!node) {
2260			if (filled)
2261				bitset = 0;
2262			break;
2263		}
2264	}
2265	spin_unlock(&tree->lock);
2266	return bitset;
2267}
2268
2269int free_io_failure(struct extent_io_tree *failure_tree,
2270		    struct extent_io_tree *io_tree,
2271		    struct io_failure_record *rec)
2272{
2273	int ret;
2274	int err = 0;
2275
2276	set_state_failrec(failure_tree, rec->start, NULL);
2277	ret = clear_extent_bits(failure_tree, rec->start,
2278				rec->start + rec->len - 1,
2279				EXTENT_LOCKED | EXTENT_DIRTY);
2280	if (ret)
2281		err = ret;
2282
2283	ret = clear_extent_bits(io_tree, rec->start,
2284				rec->start + rec->len - 1,
2285				EXTENT_DAMAGED);
2286	if (ret && !err)
2287		err = ret;
2288
2289	kfree(rec);
2290	return err;
2291}
2292
2293/*
2294 * this bypasses the standard btrfs submit functions deliberately, as
2295 * the standard behavior is to write all copies in a raid setup. here we only
2296 * want to write the one bad copy. so we do the mapping for ourselves and issue
2297 * submit_bio directly.
2298 * to avoid any synchronization issues, wait for the data after writing, which
2299 * actually prevents the read that triggered the error from finishing.
2300 * currently, there can be no more than two copies of every data bit. thus,
2301 * exactly one rewrite is required.
2302 */
2303static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2304			     u64 length, u64 logical, struct page *page,
2305			     unsigned int pg_offset, int mirror_num)
2306{
2307	struct bio *bio;
2308	struct btrfs_device *dev;
2309	u64 map_length = 0;
2310	u64 sector;
2311	struct btrfs_io_context *bioc = NULL;
2312	int ret;
2313
2314	ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2315	BUG_ON(!mirror_num);
2316
2317	if (btrfs_repair_one_zone(fs_info, logical))
2318		return 0;
2319
2320	bio = btrfs_bio_alloc(1);
2321	bio->bi_iter.bi_size = 0;
2322	map_length = length;
2323
2324	/*
2325	 * Avoid races with device replace and make sure our bioc has devices
2326	 * associated to its stripes that don't go away while we are doing the
2327	 * read repair operation.
2328	 */
2329	btrfs_bio_counter_inc_blocked(fs_info);
2330	if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2331		/*
2332		 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2333		 * to update all raid stripes, but here we just want to correct
2334		 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2335		 * stripe's dev and sector.
2336		 */
2337		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2338				      &map_length, &bioc, 0);
2339		if (ret) {
2340			btrfs_bio_counter_dec(fs_info);
2341			bio_put(bio);
2342			return -EIO;
2343		}
2344		ASSERT(bioc->mirror_num == 1);
2345	} else {
2346		ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2347				      &map_length, &bioc, mirror_num);
2348		if (ret) {
2349			btrfs_bio_counter_dec(fs_info);
2350			bio_put(bio);
2351			return -EIO;
2352		}
2353		BUG_ON(mirror_num != bioc->mirror_num);
2354	}
2355
2356	sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
2357	bio->bi_iter.bi_sector = sector;
2358	dev = bioc->stripes[bioc->mirror_num - 1].dev;
2359	btrfs_put_bioc(bioc);
2360	if (!dev || !dev->bdev ||
2361	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2362		btrfs_bio_counter_dec(fs_info);
2363		bio_put(bio);
2364		return -EIO;
2365	}
2366	bio_set_dev(bio, dev->bdev);
2367	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2368	bio_add_page(bio, page, length, pg_offset);
2369
2370	if (btrfsic_submit_bio_wait(bio)) {
2371		/* try to remap that extent elsewhere? */
2372		btrfs_bio_counter_dec(fs_info);
2373		bio_put(bio);
2374		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2375		return -EIO;
2376	}
2377
2378	btrfs_info_rl_in_rcu(fs_info,
2379		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
2380				  ino, start,
2381				  rcu_str_deref(dev->name), sector);
2382	btrfs_bio_counter_dec(fs_info);
2383	bio_put(bio);
2384	return 0;
2385}
2386
2387int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2388{
2389	struct btrfs_fs_info *fs_info = eb->fs_info;
2390	u64 start = eb->start;
2391	int i, num_pages = num_extent_pages(eb);
2392	int ret = 0;
2393
2394	if (sb_rdonly(fs_info->sb))
2395		return -EROFS;
2396
2397	for (i = 0; i < num_pages; i++) {
2398		struct page *p = eb->pages[i];
2399
2400		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2401					start - page_offset(p), mirror_num);
2402		if (ret)
2403			break;
2404		start += PAGE_SIZE;
2405	}
2406
2407	return ret;
2408}
2409
2410/*
2411 * each time an IO finishes, we do a fast check in the IO failure tree
2412 * to see if we need to process or clean up an io_failure_record
2413 */
2414int clean_io_failure(struct btrfs_fs_info *fs_info,
2415		     struct extent_io_tree *failure_tree,
2416		     struct extent_io_tree *io_tree, u64 start,
2417		     struct page *page, u64 ino, unsigned int pg_offset)
2418{
2419	u64 private;
2420	struct io_failure_record *failrec;
2421	struct extent_state *state;
2422	int num_copies;
2423	int ret;
2424
2425	private = 0;
2426	ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2427			       EXTENT_DIRTY, 0);
2428	if (!ret)
2429		return 0;
2430
2431	failrec = get_state_failrec(failure_tree, start);
2432	if (IS_ERR(failrec))
2433		return 0;
2434
2435	BUG_ON(!failrec->this_mirror);
2436
2437	if (sb_rdonly(fs_info->sb))
2438		goto out;
2439
2440	spin_lock(&io_tree->lock);
2441	state = find_first_extent_bit_state(io_tree,
2442					    failrec->start,
2443					    EXTENT_LOCKED);
2444	spin_unlock(&io_tree->lock);
2445
2446	if (state && state->start <= failrec->start &&
2447	    state->end >= failrec->start + failrec->len - 1) {
2448		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2449					      failrec->len);
2450		if (num_copies > 1)  {
2451			repair_io_failure(fs_info, ino, start, failrec->len,
2452					  failrec->logical, page, pg_offset,
2453					  failrec->failed_mirror);
2454		}
2455	}
2456
2457out:
2458	free_io_failure(failure_tree, io_tree, failrec);
2459
2460	return 0;
2461}
2462
2463/*
2464 * Can be called when
2465 * - hold extent lock
2466 * - under ordered extent
2467 * - the inode is freeing
2468 */
2469void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2470{
2471	struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2472	struct io_failure_record *failrec;
2473	struct extent_state *state, *next;
2474
2475	if (RB_EMPTY_ROOT(&failure_tree->state))
2476		return;
2477
2478	spin_lock(&failure_tree->lock);
2479	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2480	while (state) {
2481		if (state->start > end)
2482			break;
2483
2484		ASSERT(state->end <= end);
2485
2486		next = next_state(state);
2487
2488		failrec = state->failrec;
2489		free_extent_state(state);
2490		kfree(failrec);
2491
2492		state = next;
2493	}
2494	spin_unlock(&failure_tree->lock);
2495}
2496
2497static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2498							     u64 start)
2499{
2500	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2501	struct io_failure_record *failrec;
2502	struct extent_map *em;
2503	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2504	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2505	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2506	const u32 sectorsize = fs_info->sectorsize;
2507	int ret;
2508	u64 logical;
2509
2510	failrec = get_state_failrec(failure_tree, start);
2511	if (!IS_ERR(failrec)) {
2512		btrfs_debug(fs_info,
2513	"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu",
2514			failrec->logical, failrec->start, failrec->len);
2515		/*
2516		 * when data can be on disk more than twice, add to failrec here
2517		 * (e.g. with a list for failed_mirror) to make
2518		 * clean_io_failure() clean all those errors at once.
2519		 */
2520
2521		return failrec;
2522	}
2523
2524	failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2525	if (!failrec)
2526		return ERR_PTR(-ENOMEM);
2527
2528	failrec->start = start;
2529	failrec->len = sectorsize;
2530	failrec->this_mirror = 0;
2531	failrec->bio_flags = 0;
2532
2533	read_lock(&em_tree->lock);
2534	em = lookup_extent_mapping(em_tree, start, failrec->len);
2535	if (!em) {
2536		read_unlock(&em_tree->lock);
2537		kfree(failrec);
2538		return ERR_PTR(-EIO);
2539	}
2540
2541	if (em->start > start || em->start + em->len <= start) {
2542		free_extent_map(em);
2543		em = NULL;
2544	}
2545	read_unlock(&em_tree->lock);
2546	if (!em) {
2547		kfree(failrec);
2548		return ERR_PTR(-EIO);
2549	}
2550
2551	logical = start - em->start;
2552	logical = em->block_start + logical;
2553	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2554		logical = em->block_start;
2555		failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2556		extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2557	}
2558
2559	btrfs_debug(fs_info,
2560		    "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2561		    logical, start, failrec->len);
2562
2563	failrec->logical = logical;
2564	free_extent_map(em);
2565
2566	/* Set the bits in the private failure tree */
2567	ret = set_extent_bits(failure_tree, start, start + sectorsize - 1,
2568			      EXTENT_LOCKED | EXTENT_DIRTY);
2569	if (ret >= 0) {
2570		ret = set_state_failrec(failure_tree, start, failrec);
2571		/* Set the bits in the inode's tree */
2572		ret = set_extent_bits(tree, start, start + sectorsize - 1,
2573				      EXTENT_DAMAGED);
2574	} else if (ret < 0) {
2575		kfree(failrec);
2576		return ERR_PTR(ret);
2577	}
2578
2579	return failrec;
2580}
2581
2582static bool btrfs_check_repairable(struct inode *inode,
2583				   struct io_failure_record *failrec,
2584				   int failed_mirror)
2585{
2586	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2587	int num_copies;
2588
2589	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2590	if (num_copies == 1) {
2591		/*
2592		 * we only have a single copy of the data, so don't bother with
2593		 * all the retry and error correction code that follows. no
2594		 * matter what the error is, it is very likely to persist.
2595		 */
2596		btrfs_debug(fs_info,
2597			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2598			num_copies, failrec->this_mirror, failed_mirror);
2599		return false;
2600	}
2601
2602	/* The failure record should only contain one sector */
2603	ASSERT(failrec->len == fs_info->sectorsize);
2604
2605	/*
2606	 * There are two premises:
2607	 * a) deliver good data to the caller
2608	 * b) correct the bad sectors on disk
2609	 *
2610	 * Since we're only doing repair for one sector, we only need to get
2611	 * a good copy of the failed sector and if we succeed, we have setup
2612	 * everything for repair_io_failure to do the rest for us.
2613	 */
2614	failrec->failed_mirror = failed_mirror;
2615	failrec->this_mirror++;
2616	if (failrec->this_mirror == failed_mirror)
2617		failrec->this_mirror++;
2618
2619	if (failrec->this_mirror > num_copies) {
2620		btrfs_debug(fs_info,
2621			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2622			num_copies, failrec->this_mirror, failed_mirror);
2623		return false;
2624	}
2625
2626	return true;
2627}
2628
2629int btrfs_repair_one_sector(struct inode *inode,
2630			    struct bio *failed_bio, u32 bio_offset,
2631			    struct page *page, unsigned int pgoff,
2632			    u64 start, int failed_mirror,
2633			    submit_bio_hook_t *submit_bio_hook)
2634{
2635	struct io_failure_record *failrec;
2636	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2637	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2638	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2639	struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio);
2640	const int icsum = bio_offset >> fs_info->sectorsize_bits;
2641	struct bio *repair_bio;
2642	struct btrfs_bio *repair_bbio;
2643	blk_status_t status;
2644
2645	btrfs_debug(fs_info,
2646		   "repair read error: read error at %llu", start);
2647
2648	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2649
2650	failrec = btrfs_get_io_failure_record(inode, start);
2651	if (IS_ERR(failrec))
2652		return PTR_ERR(failrec);
2653
2654
2655	if (!btrfs_check_repairable(inode, failrec, failed_mirror)) {
2656		free_io_failure(failure_tree, tree, failrec);
2657		return -EIO;
2658	}
2659
2660	repair_bio = btrfs_bio_alloc(1);
2661	repair_bbio = btrfs_bio(repair_bio);
2662	repair_bio->bi_opf = REQ_OP_READ;
2663	repair_bio->bi_end_io = failed_bio->bi_end_io;
2664	repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2665	repair_bio->bi_private = failed_bio->bi_private;
2666
2667	if (failed_bbio->csum) {
2668		const u32 csum_size = fs_info->csum_size;
2669
2670		repair_bbio->csum = repair_bbio->csum_inline;
2671		memcpy(repair_bbio->csum,
2672		       failed_bbio->csum + csum_size * icsum, csum_size);
2673	}
2674
2675	bio_add_page(repair_bio, page, failrec->len, pgoff);
2676	repair_bbio->iter = repair_bio->bi_iter;
2677
2678	btrfs_debug(btrfs_sb(inode->i_sb),
2679		    "repair read error: submitting new read to mirror %d",
2680		    failrec->this_mirror);
2681
2682	status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2683				 failrec->bio_flags);
2684	if (status) {
2685		free_io_failure(failure_tree, tree, failrec);
2686		bio_put(repair_bio);
2687	}
2688	return blk_status_to_errno(status);
2689}
2690
2691static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
2692{
2693	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
2694
2695	ASSERT(page_offset(page) <= start &&
2696	       start + len <= page_offset(page) + PAGE_SIZE);
2697
2698	if (uptodate) {
2699		if (fsverity_active(page->mapping->host) &&
2700		    !PageError(page) &&
2701		    !PageUptodate(page) &&
2702		    start < i_size_read(page->mapping->host) &&
2703		    !fsverity_verify_page(page)) {
2704			btrfs_page_set_error(fs_info, page, start, len);
2705		} else {
2706			btrfs_page_set_uptodate(fs_info, page, start, len);
2707		}
2708	} else {
2709		btrfs_page_clear_uptodate(fs_info, page, start, len);
2710		btrfs_page_set_error(fs_info, page, start, len);
2711	}
2712
2713	if (fs_info->sectorsize == PAGE_SIZE)
2714		unlock_page(page);
2715	else
2716		btrfs_subpage_end_reader(fs_info, page, start, len);
2717}
2718
2719static blk_status_t submit_read_repair(struct inode *inode,
2720				      struct bio *failed_bio, u32 bio_offset,
2721				      struct page *page, unsigned int pgoff,
2722				      u64 start, u64 end, int failed_mirror,
2723				      unsigned int error_bitmap,
2724				      submit_bio_hook_t *submit_bio_hook)
2725{
2726	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2727	const u32 sectorsize = fs_info->sectorsize;
2728	const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits;
2729	int error = 0;
2730	int i;
2731
2732	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2733
2734	/* We're here because we had some read errors or csum mismatch */
2735	ASSERT(error_bitmap);
2736
2737	/*
2738	 * We only get called on buffered IO, thus page must be mapped and bio
2739	 * must not be cloned.
2740	 */
2741	ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED));
2742
2743	/* Iterate through all the sectors in the range */
2744	for (i = 0; i < nr_bits; i++) {
2745		const unsigned int offset = i * sectorsize;
2746		struct extent_state *cached = NULL;
2747		bool uptodate = false;
2748		int ret;
2749
2750		if (!(error_bitmap & (1U << i))) {
2751			/*
2752			 * This sector has no error, just end the page read
2753			 * and unlock the range.
2754			 */
2755			uptodate = true;
2756			goto next;
2757		}
2758
2759		ret = btrfs_repair_one_sector(inode, failed_bio,
2760				bio_offset + offset,
2761				page, pgoff + offset, start + offset,
2762				failed_mirror, submit_bio_hook);
2763		if (!ret) {
2764			/*
2765			 * We have submitted the read repair, the page release
2766			 * will be handled by the endio function of the
2767			 * submitted repair bio.
2768			 * Thus we don't need to do any thing here.
2769			 */
2770			continue;
2771		}
2772		/*
2773		 * Repair failed, just record the error but still continue.
2774		 * Or the remaining sectors will not be properly unlocked.
2775		 */
2776		if (!error)
2777			error = ret;
2778next:
2779		end_page_read(page, uptodate, start + offset, sectorsize);
2780		if (uptodate)
2781			set_extent_uptodate(&BTRFS_I(inode)->io_tree,
2782					start + offset,
2783					start + offset + sectorsize - 1,
2784					&cached, GFP_ATOMIC);
2785		unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree,
2786				start + offset,
2787				start + offset + sectorsize - 1,
2788				&cached);
2789	}
2790	return errno_to_blk_status(error);
2791}
2792
2793/* lots and lots of room for performance fixes in the end_bio funcs */
2794
2795void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2796{
2797	struct btrfs_inode *inode;
2798	const bool uptodate = (err == 0);
2799	int ret = 0;
2800
2801	ASSERT(page && page->mapping);
2802	inode = BTRFS_I(page->mapping->host);
2803	btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
2804
2805	if (!uptodate) {
2806		const struct btrfs_fs_info *fs_info = inode->root->fs_info;
2807		u32 len;
2808
2809		ASSERT(end + 1 - start <= U32_MAX);
2810		len = end + 1 - start;
2811
2812		btrfs_page_clear_uptodate(fs_info, page, start, len);
2813		btrfs_page_set_error(fs_info, page, start, len);
2814		ret = err < 0 ? err : -EIO;
2815		mapping_set_error(page->mapping, ret);
2816	}
2817}
2818
2819/*
2820 * after a writepage IO is done, we need to:
2821 * clear the uptodate bits on error
2822 * clear the writeback bits in the extent tree for this IO
2823 * end_page_writeback if the page has no more pending IO
2824 *
2825 * Scheduling is not allowed, so the extent state tree is expected
2826 * to have one and only one object corresponding to this IO.
2827 */
2828static void end_bio_extent_writepage(struct bio *bio)
2829{
2830	int error = blk_status_to_errno(bio->bi_status);
2831	struct bio_vec *bvec;
2832	u64 start;
2833	u64 end;
2834	struct bvec_iter_all iter_all;
2835	bool first_bvec = true;
2836
2837	ASSERT(!bio_flagged(bio, BIO_CLONED));
2838	bio_for_each_segment_all(bvec, bio, iter_all) {
2839		struct page *page = bvec->bv_page;
2840		struct inode *inode = page->mapping->host;
2841		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2842		const u32 sectorsize = fs_info->sectorsize;
2843
2844		/* Our read/write should always be sector aligned. */
2845		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
2846			btrfs_err(fs_info,
2847		"partial page write in btrfs with offset %u and length %u",
2848				  bvec->bv_offset, bvec->bv_len);
2849		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
2850			btrfs_info(fs_info,
2851		"incomplete page write with offset %u and length %u",
2852				   bvec->bv_offset, bvec->bv_len);
2853
2854		start = page_offset(page) + bvec->bv_offset;
2855		end = start + bvec->bv_len - 1;
2856
2857		if (first_bvec) {
2858			btrfs_record_physical_zoned(inode, start, bio);
2859			first_bvec = false;
2860		}
2861
2862		end_extent_writepage(page, error, start, end);
2863
2864		btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
2865	}
2866
2867	bio_put(bio);
2868}
2869
2870/*
2871 * Record previously processed extent range
2872 *
2873 * For endio_readpage_release_extent() to handle a full extent range, reducing
2874 * the extent io operations.
2875 */
2876struct processed_extent {
2877	struct btrfs_inode *inode;
2878	/* Start of the range in @inode */
2879	u64 start;
2880	/* End of the range in @inode */
2881	u64 end;
2882	bool uptodate;
2883};
2884
2885/*
2886 * Try to release processed extent range
2887 *
2888 * May not release the extent range right now if the current range is
2889 * contiguous to processed extent.
2890 *
2891 * Will release processed extent when any of @inode, @uptodate, the range is
2892 * no longer contiguous to the processed range.
2893 *
2894 * Passing @inode == NULL will force processed extent to be released.
2895 */
2896static void endio_readpage_release_extent(struct processed_extent *processed,
2897			      struct btrfs_inode *inode, u64 start, u64 end,
2898			      bool uptodate)
2899{
2900	struct extent_state *cached = NULL;
2901	struct extent_io_tree *tree;
2902
2903	/* The first extent, initialize @processed */
2904	if (!processed->inode)
2905		goto update;
2906
2907	/*
2908	 * Contiguous to processed extent, just uptodate the end.
2909	 *
2910	 * Several things to notice:
2911	 *
2912	 * - bio can be merged as long as on-disk bytenr is contiguous
2913	 *   This means we can have page belonging to other inodes, thus need to
2914	 *   check if the inode still matches.
2915	 * - bvec can contain range beyond current page for multi-page bvec
2916	 *   Thus we need to do processed->end + 1 >= start check
2917	 */
2918	if (processed->inode == inode && processed->uptodate == uptodate &&
2919	    processed->end + 1 >= start && end >= processed->end) {
2920		processed->end = end;
2921		return;
2922	}
2923
2924	tree = &processed->inode->io_tree;
2925	/*
2926	 * Now we don't have range contiguous to the processed range, release
2927	 * the processed range now.
2928	 */
2929	if (processed->uptodate && tree->track_uptodate)
2930		set_extent_uptodate(tree, processed->start, processed->end,
2931				    &cached, GFP_ATOMIC);
2932	unlock_extent_cached_atomic(tree, processed->start, processed->end,
2933				    &cached);
2934
2935update:
2936	/* Update processed to current range */
2937	processed->inode = inode;
2938	processed->start = start;
2939	processed->end = end;
2940	processed->uptodate = uptodate;
2941}
2942
2943static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
2944{
2945	ASSERT(PageLocked(page));
2946	if (fs_info->sectorsize == PAGE_SIZE)
2947		return;
2948
2949	ASSERT(PagePrivate(page));
2950	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
2951}
2952
2953/*
2954 * Find extent buffer for a givne bytenr.
2955 *
2956 * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
2957 * in endio context.
2958 */
2959static struct extent_buffer *find_extent_buffer_readpage(
2960		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
2961{
2962	struct extent_buffer *eb;
2963
2964	/*
2965	 * For regular sectorsize, we can use page->private to grab extent
2966	 * buffer
2967	 */
2968	if (fs_info->sectorsize == PAGE_SIZE) {
2969		ASSERT(PagePrivate(page) && page->private);
2970		return (struct extent_buffer *)page->private;
2971	}
2972
2973	/* For subpage case, we need to lookup buffer radix tree */
2974	rcu_read_lock();
2975	eb = radix_tree_lookup(&fs_info->buffer_radix,
2976			       bytenr >> fs_info->sectorsize_bits);
2977	rcu_read_unlock();
2978	ASSERT(eb);
2979	return eb;
2980}
2981
2982/*
2983 * after a readpage IO is done, we need to:
2984 * clear the uptodate bits on error
2985 * set the uptodate bits if things worked
2986 * set the page up to date if all extents in the tree are uptodate
2987 * clear the lock bit in the extent tree
2988 * unlock the page if there are no other extents locked for it
2989 *
2990 * Scheduling is not allowed, so the extent state tree is expected
2991 * to have one and only one object corresponding to this IO.
2992 */
2993static void end_bio_extent_readpage(struct bio *bio)
2994{
2995	struct bio_vec *bvec;
2996	struct btrfs_bio *bbio = btrfs_bio(bio);
2997	struct extent_io_tree *tree, *failure_tree;
2998	struct processed_extent processed = { 0 };
2999	/*
3000	 * The offset to the beginning of a bio, since one bio can never be
3001	 * larger than UINT_MAX, u32 here is enough.
3002	 */
3003	u32 bio_offset = 0;
3004	int mirror;
3005	int ret;
3006	struct bvec_iter_all iter_all;
3007
3008	ASSERT(!bio_flagged(bio, BIO_CLONED));
3009	bio_for_each_segment_all(bvec, bio, iter_all) {
3010		bool uptodate = !bio->bi_status;
3011		struct page *page = bvec->bv_page;
3012		struct inode *inode = page->mapping->host;
3013		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3014		const u32 sectorsize = fs_info->sectorsize;
3015		unsigned int error_bitmap = (unsigned int)-1;
3016		u64 start;
3017		u64 end;
3018		u32 len;
3019
3020		btrfs_debug(fs_info,
3021			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
3022			bio->bi_iter.bi_sector, bio->bi_status,
3023			bbio->mirror_num);
3024		tree = &BTRFS_I(inode)->io_tree;
3025		failure_tree = &BTRFS_I(inode)->io_failure_tree;
3026
3027		/*
3028		 * We always issue full-sector reads, but if some block in a
3029		 * page fails to read, blk_update_request() will advance
3030		 * bv_offset and adjust bv_len to compensate.  Print a warning
3031		 * for unaligned offsets, and an error if they don't add up to
3032		 * a full sector.
3033		 */
3034		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
3035			btrfs_err(fs_info,
3036		"partial page read in btrfs with offset %u and length %u",
3037				  bvec->bv_offset, bvec->bv_len);
3038		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
3039				     sectorsize))
3040			btrfs_info(fs_info,
3041		"incomplete page read with offset %u and length %u",
3042				   bvec->bv_offset, bvec->bv_len);
3043
3044		start = page_offset(page) + bvec->bv_offset;
3045		end = start + bvec->bv_len - 1;
3046		len = bvec->bv_len;
3047
3048		mirror = bbio->mirror_num;
3049		if (likely(uptodate)) {
3050			if (is_data_inode(inode)) {
3051				error_bitmap = btrfs_verify_data_csum(bbio,
3052						bio_offset, page, start, end);
3053				ret = error_bitmap;
3054			} else {
3055				ret = btrfs_validate_metadata_buffer(bbio,
3056					page, start, end, mirror);
3057			}
3058			if (ret)
3059				uptodate = false;
3060			else
3061				clean_io_failure(BTRFS_I(inode)->root->fs_info,
3062						 failure_tree, tree, start,
3063						 page,
3064						 btrfs_ino(BTRFS_I(inode)), 0);
3065		}
3066
3067		if (likely(uptodate))
3068			goto readpage_ok;
3069
3070		if (is_data_inode(inode)) {
3071			/*
3072			 * btrfs_submit_read_repair() will handle all the good
3073			 * and bad sectors, we just continue to the next bvec.
3074			 */
3075			submit_read_repair(inode, bio, bio_offset, page,
3076					   start - page_offset(page), start,
3077					   end, mirror, error_bitmap,
3078					   btrfs_submit_data_bio);
3079
3080			ASSERT(bio_offset + len > bio_offset);
3081			bio_offset += len;
3082			continue;
3083		} else {
3084			struct extent_buffer *eb;
3085
3086			eb = find_extent_buffer_readpage(fs_info, page, start);
3087			set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3088			eb->read_mirror = mirror;
3089			atomic_dec(&eb->io_pages);
3090		}
3091readpage_ok:
3092		if (likely(uptodate)) {
3093			loff_t i_size = i_size_read(inode);
3094			pgoff_t end_index = i_size >> PAGE_SHIFT;
3095
3096			/*
3097			 * Zero out the remaining part if this range straddles
3098			 * i_size.
3099			 *
3100			 * Here we should only zero the range inside the bvec,
3101			 * not touch anything else.
3102			 *
3103			 * NOTE: i_size is exclusive while end is inclusive.
3104			 */
3105			if (page->index == end_index && i_size <= end) {
3106				u32 zero_start = max(offset_in_page(i_size),
3107						     offset_in_page(start));
3108
3109				zero_user_segment(page, zero_start,
3110						  offset_in_page(end) + 1);
3111			}
3112		}
3113		ASSERT(bio_offset + len > bio_offset);
3114		bio_offset += len;
3115
3116		/* Update page status and unlock */
3117		end_page_read(page, uptodate, start, len);
3118		endio_readpage_release_extent(&processed, BTRFS_I(inode),
3119					      start, end, PageUptodate(page));
3120	}
3121	/* Release the last extent */
3122	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
3123	btrfs_bio_free_csum(bbio);
3124	bio_put(bio);
3125}
3126
3127/*
3128 * Initialize the members up to but not including 'bio'. Use after allocating a
3129 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
3130 * 'bio' because use of __GFP_ZERO is not supported.
3131 */
3132static inline void btrfs_bio_init(struct btrfs_bio *bbio)
3133{
3134	memset(bbio, 0, offsetof(struct btrfs_bio, bio));
3135}
3136
3137/*
3138 * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
3139 *
3140 * The bio allocation is backed by bioset and does not fail.
3141 */
3142struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
3143{
3144	struct bio *bio;
3145
3146	ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
3147	bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
3148	btrfs_bio_init(btrfs_bio(bio));
3149	return bio;
3150}
3151
3152struct bio *btrfs_bio_clone(struct bio *bio)
3153{
3154	struct btrfs_bio *bbio;
3155	struct bio *new;
3156
3157	/* Bio allocation backed by a bioset does not fail */
3158	new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
3159	bbio = btrfs_bio(new);
3160	btrfs_bio_init(bbio);
3161	bbio->iter = bio->bi_iter;
3162	return new;
3163}
3164
3165struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
3166{
3167	struct bio *bio;
3168	struct btrfs_bio *bbio;
3169
3170	ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
3171
3172	/* this will never fail when it's backed by a bioset */
3173	bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
3174	ASSERT(bio);
3175
3176	bbio = btrfs_bio(bio);
3177	btrfs_bio_init(bbio);
3178
3179	bio_trim(bio, offset >> 9, size >> 9);
3180	bbio->iter = bio->bi_iter;
3181	return bio;
3182}
3183
3184/**
3185 * Attempt to add a page to bio
3186 *
3187 * @bio_ctrl:	record both the bio, and its bio_flags
3188 * @page:	page to add to the bio
3189 * @disk_bytenr:  offset of the new bio or to check whether we are adding
3190 *                a contiguous page to the previous one
3191 * @size:	portion of page that we want to write
3192 * @pg_offset:	starting offset in the page
3193 * @bio_flags:	flags of the current bio to see if we can merge them
3194 *
3195 * Attempt to add a page to bio considering stripe alignment etc.
3196 *
3197 * Return >= 0 for the number of bytes added to the bio.
3198 * Can return 0 if the current bio is already at stripe/zone boundary.
3199 * Return <0 for error.
3200 */
3201static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
3202			      struct page *page,
3203			      u64 disk_bytenr, unsigned int size,
3204			      unsigned int pg_offset,
3205			      unsigned long bio_flags)
3206{
3207	struct bio *bio = bio_ctrl->bio;
3208	u32 bio_size = bio->bi_iter.bi_size;
3209	u32 real_size;
3210	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
3211	bool contig;
3212	int ret;
3213
3214	ASSERT(bio);
3215	/* The limit should be calculated when bio_ctrl->bio is allocated */
3216	ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
3217	if (bio_ctrl->bio_flags != bio_flags)
3218		return 0;
3219
3220	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
3221		contig = bio->bi_iter.bi_sector == sector;
3222	else
3223		contig = bio_end_sector(bio) == sector;
3224	if (!contig)
3225		return 0;
3226
3227	real_size = min(bio_ctrl->len_to_oe_boundary,
3228			bio_ctrl->len_to_stripe_boundary) - bio_size;
3229	real_size = min(real_size, size);
3230
3231	/*
3232	 * If real_size is 0, never call bio_add_*_page(), as even size is 0,
3233	 * bio will still execute its endio function on the page!
3234	 */
3235	if (real_size == 0)
3236		return 0;
3237
3238	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
3239		ret = bio_add_zone_append_page(bio, page, real_size, pg_offset);
3240	else
3241		ret = bio_add_page(bio, page, real_size, pg_offset);
3242
3243	return ret;
3244}
3245
3246static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
3247			       struct btrfs_inode *inode, u64 file_offset)
3248{
3249	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3250	struct btrfs_io_geometry geom;
3251	struct btrfs_ordered_extent *ordered;
3252	struct extent_map *em;
3253	u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
3254	int ret;
3255
3256	/*
3257	 * Pages for compressed extent are never submitted to disk directly,
3258	 * thus it has no real boundary, just set them to U32_MAX.
3259	 *
3260	 * The split happens for real compressed bio, which happens in
3261	 * btrfs_submit_compressed_read/write().
3262	 */
3263	if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
3264		bio_ctrl->len_to_oe_boundary = U32_MAX;
3265		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3266		return 0;
3267	}
3268	em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
3269	if (IS_ERR(em))
3270		return PTR_ERR(em);
3271	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
3272				    logical, &geom);
3273	free_extent_map(em);
3274	if (ret < 0) {
3275		return ret;
3276	}
3277	if (geom.len > U32_MAX)
3278		bio_ctrl->len_to_stripe_boundary = U32_MAX;
3279	else
3280		bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
3281
3282	if (bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
3283		bio_ctrl->len_to_oe_boundary = U32_MAX;
3284		return 0;
3285	}
3286
3287	/* Ordered extent not yet created, so we're good */
3288	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
3289	if (!ordered) {
3290		bio_ctrl->len_to_oe_boundary = U32_MAX;
3291		return 0;
3292	}
3293
3294	bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
3295		ordered->disk_bytenr + ordered->disk_num_bytes - logical);
3296	btrfs_put_ordered_extent(ordered);
3297	return 0;
3298}
3299
3300static int alloc_new_bio(struct btrfs_inode *inode,
3301			 struct btrfs_bio_ctrl *bio_ctrl,
3302			 struct writeback_control *wbc,
3303			 unsigned int opf,
3304			 bio_end_io_t end_io_func,
3305			 u64 disk_bytenr, u32 offset, u64 file_offset,
3306			 unsigned long bio_flags)
3307{
3308	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3309	struct bio *bio;
3310	int ret;
3311
3312	bio = btrfs_bio_alloc(BIO_MAX_VECS);
3313	/*
3314	 * For compressed page range, its disk_bytenr is always @disk_bytenr
3315	 * passed in, no matter if we have added any range into previous bio.
3316	 */
3317	if (bio_flags & EXTENT_BIO_COMPRESSED)
3318		bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
3319	else
3320		bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
3321	bio_ctrl->bio = bio;
3322	bio_ctrl->bio_flags = bio_flags;
3323	bio->bi_end_io = end_io_func;
3324	bio->bi_private = &inode->io_tree;
3325	bio->bi_write_hint = inode->vfs_inode.i_write_hint;
3326	bio->bi_opf = opf;
3327	ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
3328	if (ret < 0)
3329		goto error;
3330	if (wbc) {
3331		struct block_device *bdev;
3332
3333		bdev = fs_info->fs_devices->latest_dev->bdev;
3334		bio_set_dev(bio, bdev);
3335		wbc_init_bio(wbc, bio);
3336	}
3337	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
3338		struct btrfs_device *device;
3339
3340		device = btrfs_zoned_get_device(fs_info, disk_bytenr,
3341						fs_info->sectorsize);
3342		if (IS_ERR(device)) {
3343			ret = PTR_ERR(device);
3344			goto error;
3345		}
3346
3347		btrfs_bio(bio)->device = device;
3348	}
3349	return 0;
3350error:
3351	bio_ctrl->bio = NULL;
3352	bio->bi_status = errno_to_blk_status(ret);
3353	bio_endio(bio);
3354	return ret;
3355}
3356
3357/*
3358 * @opf:	bio REQ_OP_* and REQ_* flags as one value
3359 * @wbc:	optional writeback control for io accounting
3360 * @page:	page to add to the bio
3361 * @disk_bytenr: logical bytenr where the write will be
3362 * @size:	portion of page that we want to write to
3363 * @pg_offset:	offset of the new bio or to check whether we are adding
3364 *              a contiguous page to the previous one
3365 * @bio_ret:	must be valid pointer, newly allocated bio will be stored there
3366 * @end_io_func:     end_io callback for new bio
3367 * @mirror_num:	     desired mirror to read/write
3368 * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
3369 * @bio_flags:	flags of the current bio to see if we can merge them
3370 */
3371static int submit_extent_page(unsigned int opf,
3372			      struct writeback_control *wbc,
3373			      struct btrfs_bio_ctrl *bio_ctrl,
3374			      struct page *page, u64 disk_bytenr,
3375			      size_t size, unsigned long pg_offset,
3376			      bio_end_io_t end_io_func,
3377			      int mirror_num,
3378			      unsigned long bio_flags,
3379			      bool force_bio_submit)
3380{
3381	int ret = 0;
3382	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3383	unsigned int cur = pg_offset;
3384
3385	ASSERT(bio_ctrl);
3386
3387	ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
3388	       pg_offset + size <= PAGE_SIZE);
3389	if (force_bio_submit && bio_ctrl->bio) {
3390		ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
3391		bio_ctrl->bio = NULL;
3392		if (ret < 0)
3393			return ret;
3394	}
3395
3396	while (cur < pg_offset + size) {
3397		u32 offset = cur - pg_offset;
3398		int added;
3399
3400		/* Allocate new bio if needed */
3401		if (!bio_ctrl->bio) {
3402			ret = alloc_new_bio(inode, bio_ctrl, wbc, opf,
3403					    end_io_func, disk_bytenr, offset,
3404					    page_offset(page) + cur,
3405					    bio_flags);
3406			if (ret < 0)
3407				return ret;
3408		}
3409		/*
3410		 * We must go through btrfs_bio_add_page() to ensure each
3411		 * page range won't cross various boundaries.
3412		 */
3413		if (bio_flags & EXTENT_BIO_COMPRESSED)
3414			added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr,
3415					size - offset, pg_offset + offset,
3416					bio_flags);
3417		else
3418			added = btrfs_bio_add_page(bio_ctrl, page,
3419					disk_bytenr + offset, size - offset,
3420					pg_offset + offset, bio_flags);
3421
3422		/* Metadata page range should never be split */
3423		if (!is_data_inode(&inode->vfs_inode))
3424			ASSERT(added == 0 || added == size - offset);
3425
3426		/* At least we added some page, update the account */
3427		if (wbc && added)
3428			wbc_account_cgroup_owner(wbc, page, added);
3429
3430		/* We have reached boundary, submit right now */
3431		if (added < size - offset) {
3432			/* The bio should contain some page(s) */
3433			ASSERT(bio_ctrl->bio->bi_iter.bi_size);
3434			ret = submit_one_bio(bio_ctrl->bio, mirror_num,
3435					bio_ctrl->bio_flags);
3436			bio_ctrl->bio = NULL;
3437			if (ret < 0)
3438				return ret;
3439		}
3440		cur += added;
3441	}
3442	return 0;
3443}
3444
3445static int attach_extent_buffer_page(struct extent_buffer *eb,
3446				     struct page *page,
3447				     struct btrfs_subpage *prealloc)
3448{
3449	struct btrfs_fs_info *fs_info = eb->fs_info;
3450	int ret = 0;
3451
3452	/*
3453	 * If the page is mapped to btree inode, we should hold the private
3454	 * lock to prevent race.
3455	 * For cloned or dummy extent buffers, their pages are not mapped and
3456	 * will not race with any other ebs.
3457	 */
3458	if (page->mapping)
3459		lockdep_assert_held(&page->mapping->private_lock);
3460
3461	if (fs_info->sectorsize == PAGE_SIZE) {
3462		if (!PagePrivate(page))
3463			attach_page_private(page, eb);
3464		else
3465			WARN_ON(page->private != (unsigned long)eb);
3466		return 0;
3467	}
3468
3469	/* Already mapped, just free prealloc */
3470	if (PagePrivate(page)) {
3471		btrfs_free_subpage(prealloc);
3472		return 0;
3473	}
3474
3475	if (prealloc)
3476		/* Has preallocated memory for subpage */
3477		attach_page_private(page, prealloc);
3478	else
3479		/* Do new allocation to attach subpage */
3480		ret = btrfs_attach_subpage(fs_info, page,
3481					   BTRFS_SUBPAGE_METADATA);
3482	return ret;
3483}
3484
3485int set_page_extent_mapped(struct page *page)
3486{
3487	struct btrfs_fs_info *fs_info;
3488
3489	ASSERT(page->mapping);
3490
3491	if (PagePrivate(page))
3492		return 0;
3493
3494	fs_info = btrfs_sb(page->mapping->host->i_sb);
3495
3496	if (fs_info->sectorsize < PAGE_SIZE)
3497		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
3498
3499	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3500	return 0;
3501}
3502
3503void clear_page_extent_mapped(struct page *page)
3504{
3505	struct btrfs_fs_info *fs_info;
3506
3507	ASSERT(page->mapping);
3508
3509	if (!PagePrivate(page))
3510		return;
3511
3512	fs_info = btrfs_sb(page->mapping->host->i_sb);
3513	if (fs_info->sectorsize < PAGE_SIZE)
3514		return btrfs_detach_subpage(fs_info, page);
3515
3516	detach_page_private(page);
3517}
3518
3519static struct extent_map *
3520__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3521		 u64 start, u64 len, struct extent_map **em_cached)
3522{
3523	struct extent_map *em;
3524
3525	if (em_cached && *em_cached) {
3526		em = *em_cached;
3527		if (extent_map_in_tree(em) && start >= em->start &&
3528		    start < extent_map_end(em)) {
3529			refcount_inc(&em->refs);
3530			return em;
3531		}
3532
3533		free_extent_map(em);
3534		*em_cached = NULL;
3535	}
3536
3537	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3538	if (em_cached && !IS_ERR_OR_NULL(em)) {
3539		BUG_ON(*em_cached);
3540		refcount_inc(&em->refs);
3541		*em_cached = em;
3542	}
3543	return em;
3544}
3545/*
3546 * basic readpage implementation.  Locked extent state structs are inserted
3547 * into the tree that are removed when the IO is done (by the end_io
3548 * handlers)
3549 * XXX JDM: This needs looking at to ensure proper page locking
3550 * return 0 on success, otherwise return error
3551 */
3552int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3553		      struct btrfs_bio_ctrl *bio_ctrl,
3554		      unsigned int read_flags, u64 *prev_em_start)
3555{
3556	struct inode *inode = page->mapping->host;
3557	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3558	u64 start = page_offset(page);
3559	const u64 end = start + PAGE_SIZE - 1;
3560	u64 cur = start;
3561	u64 extent_offset;
3562	u64 last_byte = i_size_read(inode);
3563	u64 block_start;
3564	u64 cur_end;
3565	struct extent_map *em;
3566	int ret = 0;
3567	int nr = 0;
3568	size_t pg_offset = 0;
3569	size_t iosize;
3570	size_t blocksize = inode->i_sb->s_blocksize;
3571	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3572
3573	ret = set_page_extent_mapped(page);
3574	if (ret < 0) {
3575		unlock_extent(tree, start, end);
3576		btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
3577		unlock_page(page);
3578		goto out;
3579	}
3580
3581	if (!PageUptodate(page)) {
3582		if (cleancache_get_page(page) == 0) {
3583			BUG_ON(blocksize != PAGE_SIZE);
3584			unlock_extent(tree, start, end);
3585			unlock_page(page);
3586			goto out;
3587		}
3588	}
3589
3590	if (page->index == last_byte >> PAGE_SHIFT) {
3591		size_t zero_offset = offset_in_page(last_byte);
3592
3593		if (zero_offset) {
3594			iosize = PAGE_SIZE - zero_offset;
3595			memzero_page(page, zero_offset, iosize);
3596			flush_dcache_page(page);
3597		}
3598	}
3599	begin_page_read(fs_info, page);
3600	while (cur <= end) {
3601		unsigned long this_bio_flag = 0;
3602		bool force_bio_submit = false;
3603		u64 disk_bytenr;
3604
3605		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
3606		if (cur >= last_byte) {
3607			struct extent_state *cached = NULL;
3608
3609			iosize = PAGE_SIZE - pg_offset;
3610			memzero_page(page, pg_offset, iosize);
3611			flush_dcache_page(page);
3612			set_extent_uptodate(tree, cur, cur + iosize - 1,
3613					    &cached, GFP_NOFS);
3614			unlock_extent_cached(tree, cur,
3615					     cur + iosize - 1, &cached);
3616			end_page_read(page, true, cur, iosize);
3617			break;
3618		}
3619		em = __get_extent_map(inode, page, pg_offset, cur,
3620				      end - cur + 1, em_cached);
3621		if (IS_ERR_OR_NULL(em)) {
3622			unlock_extent(tree, cur, end);
3623			end_page_read(page, false, cur, end + 1 - cur);
3624			break;
3625		}
3626		extent_offset = cur - em->start;
3627		BUG_ON(extent_map_end(em) <= cur);
3628		BUG_ON(end < cur);
3629
3630		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3631			this_bio_flag |= EXTENT_BIO_COMPRESSED;
3632			extent_set_compress_type(&this_bio_flag,
3633						 em->compress_type);
3634		}
3635
3636		iosize = min(extent_map_end(em) - cur, end - cur + 1);
3637		cur_end = min(extent_map_end(em) - 1, end);
3638		iosize = ALIGN(iosize, blocksize);
3639		if (this_bio_flag & EXTENT_BIO_COMPRESSED)
3640			disk_bytenr = em->block_start;
3641		else
3642			disk_bytenr = em->block_start + extent_offset;
3643		block_start = em->block_start;
3644		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3645			block_start = EXTENT_MAP_HOLE;
3646
3647		/*
3648		 * If we have a file range that points to a compressed extent
3649		 * and it's followed by a consecutive file range that points
3650		 * to the same compressed extent (possibly with a different
3651		 * offset and/or length, so it either points to the whole extent
3652		 * or only part of it), we must make sure we do not submit a
3653		 * single bio to populate the pages for the 2 ranges because
3654		 * this makes the compressed extent read zero out the pages
3655		 * belonging to the 2nd range. Imagine the following scenario:
3656		 *
3657		 *  File layout
3658		 *  [0 - 8K]                     [8K - 24K]
3659		 *    |                               |
3660		 *    |                               |
3661		 * points to extent X,         points to extent X,
3662		 * offset 4K, length of 8K     offset 0, length 16K
3663		 *
3664		 * [extent X, compressed length = 4K uncompressed length = 16K]
3665		 *
3666		 * If the bio to read the compressed extent covers both ranges,
3667		 * it will decompress extent X into the pages belonging to the
3668		 * first range and then it will stop, zeroing out the remaining
3669		 * pages that belong to the other range that points to extent X.
3670		 * So here we make sure we submit 2 bios, one for the first
3671		 * range and another one for the third range. Both will target
3672		 * the same physical extent from disk, but we can't currently
3673		 * make the compressed bio endio callback populate the pages
3674		 * for both ranges because each compressed bio is tightly
3675		 * coupled with a single extent map, and each range can have
3676		 * an extent map with a different offset value relative to the
3677		 * uncompressed data of our extent and different lengths. This
3678		 * is a corner case so we prioritize correctness over
3679		 * non-optimal behavior (submitting 2 bios for the same extent).
3680		 */
3681		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3682		    prev_em_start && *prev_em_start != (u64)-1 &&
3683		    *prev_em_start != em->start)
3684			force_bio_submit = true;
3685
3686		if (prev_em_start)
3687			*prev_em_start = em->start;
3688
3689		free_extent_map(em);
3690		em = NULL;
3691
3692		/* we've found a hole, just zero and go on */
3693		if (block_start == EXTENT_MAP_HOLE) {
3694			struct extent_state *cached = NULL;
3695
3696			memzero_page(page, pg_offset, iosize);
3697			flush_dcache_page(page);
3698
3699			set_extent_uptodate(tree, cur, cur + iosize - 1,
3700					    &cached, GFP_NOFS);
3701			unlock_extent_cached(tree, cur,
3702					     cur + iosize - 1, &cached);
3703			end_page_read(page, true, cur, iosize);
3704			cur = cur + iosize;
3705			pg_offset += iosize;
3706			continue;
3707		}
3708		/* the get_extent function already copied into the page */
3709		if (test_range_bit(tree, cur, cur_end,
3710				   EXTENT_UPTODATE, 1, NULL)) {
3711			unlock_extent(tree, cur, cur + iosize - 1);
3712			end_page_read(page, true, cur, iosize);
3713			cur = cur + iosize;
3714			pg_offset += iosize;
3715			continue;
3716		}
3717		/* we have an inline extent but it didn't get marked up
3718		 * to date.  Error out
3719		 */
3720		if (block_start == EXTENT_MAP_INLINE) {
3721			unlock_extent(tree, cur, cur + iosize - 1);
3722			end_page_read(page, false, cur, iosize);
3723			cur = cur + iosize;
3724			pg_offset += iosize;
3725			continue;
3726		}
3727
3728		ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3729					 bio_ctrl, page, disk_bytenr, iosize,
3730					 pg_offset,
3731					 end_bio_extent_readpage, 0,
3732					 this_bio_flag,
3733					 force_bio_submit);
3734		if (!ret) {
3735			nr++;
3736		} else {
3737			unlock_extent(tree, cur, cur + iosize - 1);
3738			end_page_read(page, false, cur, iosize);
3739			goto out;
3740		}
3741		cur = cur + iosize;
3742		pg_offset += iosize;
3743	}
3744out:
3745	return ret;
3746}
3747
3748static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3749					u64 start, u64 end,
3750					struct extent_map **em_cached,
3751					struct btrfs_bio_ctrl *bio_ctrl,
3752					u64 *prev_em_start)
3753{
3754	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3755	int index;
3756
3757	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3758
3759	for (index = 0; index < nr_pages; index++) {
3760		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
3761				  REQ_RAHEAD, prev_em_start);
3762		put_page(pages[index]);
3763	}
3764}
3765
3766static void update_nr_written(struct writeback_control *wbc,
3767			      unsigned long nr_written)
3768{
3769	wbc->nr_to_write -= nr_written;
3770}
3771
3772/*
3773 * helper for __extent_writepage, doing all of the delayed allocation setup.
3774 *
3775 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3776 * to write the page (copy into inline extent).  In this case the IO has
3777 * been started and the page is already unlocked.
3778 *
3779 * This returns 0 if all went well (page still locked)
3780 * This returns < 0 if there were errors (page still locked)
3781 */
3782static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3783		struct page *page, struct writeback_control *wbc)
3784{
3785	const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
3786	u64 delalloc_start = page_offset(page);
3787	u64 delalloc_to_write = 0;
3788	/* How many pages are started by btrfs_run_delalloc_range() */
3789	unsigned long nr_written = 0;
3790	int ret;
3791	int page_started = 0;
3792
3793	while (delalloc_start < page_end) {
3794		u64 delalloc_end = page_end;
3795		bool found;
3796
3797		found = find_lock_delalloc_range(&inode->vfs_inode, page,
3798					       &delalloc_start,
3799					       &delalloc_end);
3800		if (!found) {
3801			delalloc_start = delalloc_end + 1;
3802			continue;
3803		}
3804		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3805				delalloc_end, &page_started, &nr_written, wbc);
3806		if (ret) {
3807			btrfs_page_set_error(inode->root->fs_info, page,
3808					     page_offset(page), PAGE_SIZE);
3809			return ret;
3810		}
3811		/*
3812		 * delalloc_end is already one less than the total length, so
3813		 * we don't subtract one from PAGE_SIZE
3814		 */
3815		delalloc_to_write += (delalloc_end - delalloc_start +
3816				      PAGE_SIZE) >> PAGE_SHIFT;
3817		delalloc_start = delalloc_end + 1;
3818	}
3819	if (wbc->nr_to_write < delalloc_to_write) {
3820		int thresh = 8192;
3821
3822		if (delalloc_to_write < thresh * 2)
3823			thresh = delalloc_to_write;
3824		wbc->nr_to_write = min_t(u64, delalloc_to_write,
3825					 thresh);
3826	}
3827
3828	/* Did btrfs_run_dealloc_range() already unlock and start the IO? */
3829	if (page_started) {
3830		/*
3831		 * We've unlocked the page, so we can't update the mapping's
3832		 * writeback index, just update nr_to_write.
3833		 */
3834		wbc->nr_to_write -= nr_written;
3835		return 1;
3836	}
3837
3838	return 0;
3839}
3840
3841/*
3842 * Find the first byte we need to write.
3843 *
3844 * For subpage, one page can contain several sectors, and
3845 * __extent_writepage_io() will just grab all extent maps in the page
3846 * range and try to submit all non-inline/non-compressed extents.
3847 *
3848 * This is a big problem for subpage, we shouldn't re-submit already written
3849 * data at all.
3850 * This function will lookup subpage dirty bit to find which range we really
3851 * need to submit.
3852 *
3853 * Return the next dirty range in [@start, @end).
3854 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
3855 */
3856static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
3857				 struct page *page, u64 *start, u64 *end)
3858{
3859	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
3860	struct btrfs_subpage_info *spi = fs_info->subpage_info;
3861	u64 orig_start = *start;
3862	/* Declare as unsigned long so we can use bitmap ops */
3863	unsigned long flags;
3864	int range_start_bit;
3865	int range_end_bit;
3866
3867	/*
3868	 * For regular sector size == page size case, since one page only
3869	 * contains one sector, we return the page offset directly.
3870	 */
3871	if (fs_info->sectorsize == PAGE_SIZE) {
3872		*start = page_offset(page);
3873		*end = page_offset(page) + PAGE_SIZE;
3874		return;
3875	}
3876
3877	range_start_bit = spi->dirty_offset +
3878			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
3879
3880	/* We should have the page locked, but just in case */
3881	spin_lock_irqsave(&subpage->lock, flags);
3882	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
3883			       spi->dirty_offset + spi->bitmap_nr_bits);
3884	spin_unlock_irqrestore(&subpage->lock, flags);
3885
3886	range_start_bit -= spi->dirty_offset;
3887	range_end_bit -= spi->dirty_offset;
3888
3889	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
3890	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
3891}
3892
3893/*
3894 * helper for __extent_writepage.  This calls the writepage start hooks,
3895 * and does the loop to map the page into extents and bios.
3896 *
3897 * We return 1 if the IO is started and the page is unlocked,
3898 * 0 if all went well (page still locked)
3899 * < 0 if there were errors (page still locked)
3900 */
3901static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3902				 struct page *page,
3903				 struct writeback_control *wbc,
3904				 struct extent_page_data *epd,
3905				 loff_t i_size,
3906				 int *nr_ret)
3907{
3908	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3909	u64 cur = page_offset(page);
3910	u64 end = cur + PAGE_SIZE - 1;
3911	u64 extent_offset;
3912	u64 block_start;
3913	struct extent_map *em;
3914	int ret = 0;
3915	int nr = 0;
3916	u32 opf = REQ_OP_WRITE;
3917	const unsigned int write_flags = wbc_to_write_flags(wbc);
3918	bool compressed;
3919
3920	ret = btrfs_writepage_cow_fixup(page);
3921	if (ret) {
3922		/* Fixup worker will requeue */
3923		redirty_page_for_writepage(wbc, page);
3924		unlock_page(page);
3925		return 1;
3926	}
3927
3928	/*
3929	 * we don't want to touch the inode after unlocking the page,
3930	 * so we update the mapping writeback index now
3931	 */
3932	update_nr_written(wbc, 1);
3933
3934	while (cur <= end) {
3935		u64 disk_bytenr;
3936		u64 em_end;
3937		u64 dirty_range_start = cur;
3938		u64 dirty_range_end;
3939		u32 iosize;
3940
3941		if (cur >= i_size) {
3942			btrfs_writepage_endio_finish_ordered(inode, page, cur,
3943							     end, true);
3944			/*
3945			 * This range is beyond i_size, thus we don't need to
3946			 * bother writing back.
3947			 * But we still need to clear the dirty subpage bit, or
3948			 * the next time the page gets dirtied, we will try to
3949			 * writeback the sectors with subpage dirty bits,
3950			 * causing writeback without ordered extent.
3951			 */
3952			btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
3953			break;
3954		}
3955
3956		find_next_dirty_byte(fs_info, page, &dirty_range_start,
3957				     &dirty_range_end);
3958		if (cur < dirty_range_start) {
3959			cur = dirty_range_start;
3960			continue;
3961		}
3962
3963		em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3964		if (IS_ERR_OR_NULL(em)) {
3965			btrfs_page_set_error(fs_info, page, cur, end - cur + 1);
3966			ret = PTR_ERR_OR_ZERO(em);
3967			break;
3968		}
3969
3970		extent_offset = cur - em->start;
3971		em_end = extent_map_end(em);
3972		ASSERT(cur <= em_end);
3973		ASSERT(cur < end);
3974		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
3975		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
3976		block_start = em->block_start;
3977		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3978		disk_bytenr = em->block_start + extent_offset;
3979
3980		/*
3981		 * Note that em_end from extent_map_end() and dirty_range_end from
3982		 * find_next_dirty_byte() are all exclusive
3983		 */
3984		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
3985
3986		if (btrfs_use_zone_append(inode, em->block_start))
3987			opf = REQ_OP_ZONE_APPEND;
3988
3989		free_extent_map(em);
3990		em = NULL;
3991
3992		/*
3993		 * compressed and inline extents are written through other
3994		 * paths in the FS
3995		 */
3996		if (compressed || block_start == EXTENT_MAP_HOLE ||
3997		    block_start == EXTENT_MAP_INLINE) {
3998			if (compressed)
3999				nr++;
4000			else
4001				btrfs_writepage_endio_finish_ordered(inode,
4002						page, cur, cur + iosize - 1, true);
4003			btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4004			cur += iosize;
4005			continue;
4006		}
4007
4008		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
4009		if (!PageWriteback(page)) {
4010			btrfs_err(inode->root->fs_info,
4011				   "page %lu not writeback, cur %llu end %llu",
4012			       page->index, cur, end);
4013		}
4014
4015		/*
4016		 * Although the PageDirty bit is cleared before entering this
4017		 * function, subpage dirty bit is not cleared.
4018		 * So clear subpage dirty bit here so next time we won't submit
4019		 * page for range already written to disk.
4020		 */
4021		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
4022
4023		ret = submit_extent_page(opf | write_flags, wbc,
4024					 &epd->bio_ctrl, page,
4025					 disk_bytenr, iosize,
4026					 cur - page_offset(page),
4027					 end_bio_extent_writepage,
4028					 0, 0, false);
4029		if (ret) {
4030			btrfs_page_set_error(fs_info, page, cur, iosize);
4031			if (PageWriteback(page))
4032				btrfs_page_clear_writeback(fs_info, page, cur,
4033							   iosize);
4034		}
4035
4036		cur += iosize;
4037		nr++;
4038	}
4039	/*
4040	 * If we finish without problem, we should not only clear page dirty,
4041	 * but also empty subpage dirty bits
4042	 */
4043	if (!ret)
4044		btrfs_page_assert_not_dirty(fs_info, page);
4045	*nr_ret = nr;
4046	return ret;
4047}
4048
4049/*
4050 * the writepage semantics are similar to regular writepage.  extent
4051 * records are inserted to lock ranges in the tree, and as dirty areas
4052 * are found, they are marked writeback.  Then the lock bits are removed
4053 * and the end_io handler clears the writeback ranges
4054 *
4055 * Return 0 if everything goes well.
4056 * Return <0 for error.
4057 */
4058static int __extent_writepage(struct page *page, struct writeback_control *wbc,
4059			      struct extent_page_data *epd)
4060{
4061	struct inode *inode = page->mapping->host;
4062	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4063	const u64 page_start = page_offset(page);
4064	const u64 page_end = page_start + PAGE_SIZE - 1;
4065	int ret;
4066	int nr = 0;
4067	size_t pg_offset;
4068	loff_t i_size = i_size_read(inode);
4069	unsigned long end_index = i_size >> PAGE_SHIFT;
4070
4071	trace___extent_writepage(page, inode, wbc);
4072
4073	WARN_ON(!PageLocked(page));
4074
4075	btrfs_page_clear_error(btrfs_sb(inode->i_sb), page,
4076			       page_offset(page), PAGE_SIZE);
4077
4078	pg_offset = offset_in_page(i_size);
4079	if (page->index > end_index ||
4080	   (page->index == end_index && !pg_offset)) {
4081		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
4082		unlock_page(page);
4083		return 0;
4084	}
4085
4086	if (page->index == end_index) {
4087		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
4088		flush_dcache_page(page);
4089	}
4090
4091	ret = set_page_extent_mapped(page);
4092	if (ret < 0) {
4093		SetPageError(page);
4094		goto done;
4095	}
4096
4097	if (!epd->extent_locked) {
4098		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
4099		if (ret == 1)
4100			return 0;
4101		if (ret)
4102			goto done;
4103	}
4104
4105	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
4106				    &nr);
4107	if (ret == 1)
4108		return 0;
4109
4110done:
4111	if (nr == 0) {
4112		/* make sure the mapping tag for page dirty gets cleared */
4113		set_page_writeback(page);
4114		end_page_writeback(page);
4115	}
4116	/*
4117	 * Here we used to have a check for PageError() and then set @ret and
4118	 * call end_extent_writepage().
4119	 *
4120	 * But in fact setting @ret here will cause different error paths
4121	 * between subpage and regular sectorsize.
4122	 *
4123	 * For regular page size, we never submit current page, but only add
4124	 * current page to current bio.
4125	 * The bio submission can only happen in next page.
4126	 * Thus if we hit the PageError() branch, @ret is already set to
4127	 * non-zero value and will not get updated for regular sectorsize.
4128	 *
4129	 * But for subpage case, it's possible we submit part of current page,
4130	 * thus can get PageError() set by submitted bio of the same page,
4131	 * while our @ret is still 0.
4132	 *
4133	 * So here we unify the behavior and don't set @ret.
4134	 * Error can still be properly passed to higher layer as page will
4135	 * be set error, here we just don't handle the IO failure.
4136	 *
4137	 * NOTE: This is just a hotfix for subpage.
4138	 * The root fix will be properly ending ordered extent when we hit
4139	 * an error during writeback.
4140	 *
4141	 * But that needs a bigger refactoring, as we not only need to grab the
4142	 * submitted OE, but also need to know exactly at which bytenr we hit
4143	 * the error.
4144	 * Currently the full page based __extent_writepage_io() is not
4145	 * capable of that.
4146	 */
4147	if (PageError(page))
4148		end_extent_writepage(page, ret, page_start, page_end);
4149	if (epd->extent_locked) {
4150		/*
4151		 * If epd->extent_locked, it's from extent_write_locked_range(),
4152		 * the page can either be locked by lock_page() or
4153		 * process_one_page().
4154		 * Let btrfs_page_unlock_writer() handle both cases.
4155		 */
4156		ASSERT(wbc);
4157		btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
4158					 wbc->range_end + 1 - wbc->range_start);
4159	} else {
4160		unlock_page(page);
4161	}
4162	ASSERT(ret <= 0);
4163	return ret;
4164}
4165
4166void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
4167{
4168	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
4169		       TASK_UNINTERRUPTIBLE);
4170}
4171
4172static void end_extent_buffer_writeback(struct extent_buffer *eb)
4173{
4174	if (test_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags))
4175		btrfs_zone_finish_endio(eb->fs_info, eb->start, eb->len);
4176
4177	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4178	smp_mb__after_atomic();
4179	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
4180}
4181
4182/*
4183 * Lock extent buffer status and pages for writeback.
4184 *
4185 * May try to flush write bio if we can't get the lock.
4186 *
4187 * Return  0 if the extent buffer doesn't need to be submitted.
4188 *           (E.g. the extent buffer is not dirty)
4189 * Return >0 is the extent buffer is submitted to bio.
4190 * Return <0 if something went wrong, no page is locked.
4191 */
4192static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
4193			  struct extent_page_data *epd)
4194{
4195	struct btrfs_fs_info *fs_info = eb->fs_info;
4196	int i, num_pages, failed_page_nr;
4197	int flush = 0;
4198	int ret = 0;
4199
4200	if (!btrfs_try_tree_write_lock(eb)) {
4201		ret = flush_write_bio(epd);
4202		if (ret < 0)
4203			return ret;
4204		flush = 1;
4205		btrfs_tree_lock(eb);
4206	}
4207
4208	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
4209		btrfs_tree_unlock(eb);
4210		if (!epd->sync_io)
4211			return 0;
4212		if (!flush) {
4213			ret = flush_write_bio(epd);
4214			if (ret < 0)
4215				return ret;
4216			flush = 1;
4217		}
4218		while (1) {
4219			wait_on_extent_buffer_writeback(eb);
4220			btrfs_tree_lock(eb);
4221			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
4222				break;
4223			btrfs_tree_unlock(eb);
4224		}
4225	}
4226
4227	/*
4228	 * We need to do this to prevent races in people who check if the eb is
4229	 * under IO since we can end up having no IO bits set for a short period
4230	 * of time.
4231	 */
4232	spin_lock(&eb->refs_lock);
4233	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4234		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
4235		spin_unlock(&eb->refs_lock);
4236		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4237		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4238					 -eb->len,
4239					 fs_info->dirty_metadata_batch);
4240		ret = 1;
4241	} else {
4242		spin_unlock(&eb->refs_lock);
4243	}
4244
4245	btrfs_tree_unlock(eb);
4246
4247	/*
4248	 * Either we don't need to submit any tree block, or we're submitting
4249	 * subpage eb.
4250	 * Subpage metadata doesn't use page locking at all, so we can skip
4251	 * the page locking.
4252	 */
4253	if (!ret || fs_info->sectorsize < PAGE_SIZE)
4254		return ret;
4255
4256	num_pages = num_extent_pages(eb);
4257	for (i = 0; i < num_pages; i++) {
4258		struct page *p = eb->pages[i];
4259
4260		if (!trylock_page(p)) {
4261			if (!flush) {
4262				int err;
4263
4264				err = flush_write_bio(epd);
4265				if (err < 0) {
4266					ret = err;
4267					failed_page_nr = i;
4268					goto err_unlock;
4269				}
4270				flush = 1;
4271			}
4272			lock_page(p);
4273		}
4274	}
4275
4276	return ret;
4277err_unlock:
4278	/* Unlock already locked pages */
4279	for (i = 0; i < failed_page_nr; i++)
4280		unlock_page(eb->pages[i]);
4281	/*
4282	 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
4283	 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
4284	 * be made and undo everything done before.
4285	 */
4286	btrfs_tree_lock(eb);
4287	spin_lock(&eb->refs_lock);
4288	set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4289	end_extent_buffer_writeback(eb);
4290	spin_unlock(&eb->refs_lock);
4291	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
4292				 fs_info->dirty_metadata_batch);
4293	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
4294	btrfs_tree_unlock(eb);
4295	return ret;
4296}
4297
4298static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
4299{
4300	struct btrfs_fs_info *fs_info = eb->fs_info;
4301
4302	btrfs_page_set_error(fs_info, page, eb->start, eb->len);
4303	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4304		return;
4305
4306	/*
4307	 * A read may stumble upon this buffer later, make sure that it gets an
4308	 * error and knows there was an error.
4309	 */
4310	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4311
4312	/*
4313	 * We need to set the mapping with the io error as well because a write
4314	 * error will flip the file system readonly, and then syncfs() will
4315	 * return a 0 because we are readonly if we don't modify the err seq for
4316	 * the superblock.
4317	 */
4318	mapping_set_error(page->mapping, -EIO);
4319
4320	/*
4321	 * If we error out, we should add back the dirty_metadata_bytes
4322	 * to make it consistent.
4323	 */
4324	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4325				 eb->len, fs_info->dirty_metadata_batch);
4326
4327	/*
4328	 * If writeback for a btree extent that doesn't belong to a log tree
4329	 * failed, increment the counter transaction->eb_write_errors.
4330	 * We do this because while the transaction is running and before it's
4331	 * committing (when we call filemap_fdata[write|wait]_range against
4332	 * the btree inode), we might have
4333	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
4334	 * returns an error or an error happens during writeback, when we're
4335	 * committing the transaction we wouldn't know about it, since the pages
4336	 * can be no longer dirty nor marked anymore for writeback (if a
4337	 * subsequent modification to the extent buffer didn't happen before the
4338	 * transaction commit), which makes filemap_fdata[write|wait]_range not
4339	 * able to find the pages tagged with SetPageError at transaction
4340	 * commit time. So if this happens we must abort the transaction,
4341	 * otherwise we commit a super block with btree roots that point to
4342	 * btree nodes/leafs whose content on disk is invalid - either garbage
4343	 * or the content of some node/leaf from a past generation that got
4344	 * cowed or deleted and is no longer valid.
4345	 *
4346	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
4347	 * not be enough - we need to distinguish between log tree extents vs
4348	 * non-log tree extents, and the next filemap_fdatawait_range() call
4349	 * will catch and clear such errors in the mapping - and that call might
4350	 * be from a log sync and not from a transaction commit. Also, checking
4351	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
4352	 * not done and would not be reliable - the eb might have been released
4353	 * from memory and reading it back again means that flag would not be
4354	 * set (since it's a runtime flag, not persisted on disk).
4355	 *
4356	 * Using the flags below in the btree inode also makes us achieve the
4357	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
4358	 * writeback for all dirty pages and before filemap_fdatawait_range()
4359	 * is called, the writeback for all dirty pages had already finished
4360	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
4361	 * filemap_fdatawait_range() would return success, as it could not know
4362	 * that writeback errors happened (the pages were no longer tagged for
4363	 * writeback).
4364	 */
4365	switch (eb->log_index) {
4366	case -1:
4367		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
4368		break;
4369	case 0:
4370		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
4371		break;
4372	case 1:
4373		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
4374		break;
4375	default:
4376		BUG(); /* unexpected, logic error */
4377	}
4378}
4379
4380/*
4381 * The endio specific version which won't touch any unsafe spinlock in endio
4382 * context.
4383 */
4384static struct extent_buffer *find_extent_buffer_nolock(
4385		struct btrfs_fs_info *fs_info, u64 start)
4386{
4387	struct extent_buffer *eb;
4388
4389	rcu_read_lock();
4390	eb = radix_tree_lookup(&fs_info->buffer_radix,
4391			       start >> fs_info->sectorsize_bits);
4392	if (eb && atomic_inc_not_zero(&eb->refs)) {
4393		rcu_read_unlock();
4394		return eb;
4395	}
4396	rcu_read_unlock();
4397	return NULL;
4398}
4399
4400/*
4401 * The endio function for subpage extent buffer write.
4402 *
4403 * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
4404 * after all extent buffers in the page has finished their writeback.
4405 */
4406static void end_bio_subpage_eb_writepage(struct bio *bio)
4407{
4408	struct btrfs_fs_info *fs_info;
4409	struct bio_vec *bvec;
4410	struct bvec_iter_all iter_all;
4411
4412	fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
4413	ASSERT(fs_info->sectorsize < PAGE_SIZE);
4414
4415	ASSERT(!bio_flagged(bio, BIO_CLONED));
4416	bio_for_each_segment_all(bvec, bio, iter_all) {
4417		struct page *page = bvec->bv_page;
4418		u64 bvec_start = page_offset(page) + bvec->bv_offset;
4419		u64 bvec_end = bvec_start + bvec->bv_len - 1;
4420		u64 cur_bytenr = bvec_start;
4421
4422		ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
4423
4424		/* Iterate through all extent buffers in the range */
4425		while (cur_bytenr <= bvec_end) {
4426			struct extent_buffer *eb;
4427			int done;
4428
4429			/*
4430			 * Here we can't use find_extent_buffer(), as it may
4431			 * try to lock eb->refs_lock, which is not safe in endio
4432			 * context.
4433			 */
4434			eb = find_extent_buffer_nolock(fs_info, cur_bytenr);
4435			ASSERT(eb);
4436
4437			cur_bytenr = eb->start + eb->len;
4438
4439			ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags));
4440			done = atomic_dec_and_test(&eb->io_pages);
4441			ASSERT(done);
4442
4443			if (bio->bi_status ||
4444			    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4445				ClearPageUptodate(page);
4446				set_btree_ioerr(page, eb);
4447			}
4448
4449			btrfs_subpage_clear_writeback(fs_info, page, eb->start,
4450						      eb->len);
4451			end_extent_buffer_writeback(eb);
4452			/*
4453			 * free_extent_buffer() will grab spinlock which is not
4454			 * safe in endio context. Thus here we manually dec
4455			 * the ref.
4456			 */
4457			atomic_dec(&eb->refs);
4458		}
4459	}
4460	bio_put(bio);
4461}
4462
4463static void end_bio_extent_buffer_writepage(struct bio *bio)
4464{
4465	struct bio_vec *bvec;
4466	struct extent_buffer *eb;
4467	int done;
4468	struct bvec_iter_all iter_all;
4469
4470	ASSERT(!bio_flagged(bio, BIO_CLONED));
4471	bio_for_each_segment_all(bvec, bio, iter_all) {
4472		struct page *page = bvec->bv_page;
4473
4474		eb = (struct extent_buffer *)page->private;
4475		BUG_ON(!eb);
4476		done = atomic_dec_and_test(&eb->io_pages);
4477
4478		if (bio->bi_status ||
4479		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
4480			ClearPageUptodate(page);
4481			set_btree_ioerr(page, eb);
4482		}
4483
4484		end_page_writeback(page);
4485
4486		if (!done)
4487			continue;
4488
4489		end_extent_buffer_writeback(eb);
4490	}
4491
4492	bio_put(bio);
4493}
4494
4495static void prepare_eb_write(struct extent_buffer *eb)
4496{
4497	u32 nritems;
4498	unsigned long start;
4499	unsigned long end;
4500
4501	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
4502	atomic_set(&eb->io_pages, num_extent_pages(eb));
4503
4504	/* Set btree blocks beyond nritems with 0 to avoid stale content */
4505	nritems = btrfs_header_nritems(eb);
4506	if (btrfs_header_level(eb) > 0) {
4507		end = btrfs_node_key_ptr_offset(nritems);
4508		memzero_extent_buffer(eb, end, eb->len - end);
4509	} else {
4510		/*
4511		 * Leaf:
4512		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
4513		 */
4514		start = btrfs_item_nr_offset(nritems);
4515		end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
4516		memzero_extent_buffer(eb, start, end - start);
4517	}
4518}
4519
4520/*
4521 * Unlike the work in write_one_eb(), we rely completely on extent locking.
4522 * Page locking is only utilized at minimum to keep the VMM code happy.
4523 */
4524static int write_one_subpage_eb(struct extent_buffer *eb,
4525				struct writeback_control *wbc,
4526				struct extent_page_data *epd)
4527{
4528	struct btrfs_fs_info *fs_info = eb->fs_info;
4529	struct page *page = eb->pages[0];
4530	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4531	bool no_dirty_ebs = false;
4532	int ret;
4533
4534	prepare_eb_write(eb);
4535
4536	/* clear_page_dirty_for_io() in subpage helper needs page locked */
4537	lock_page(page);
4538	btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len);
4539
4540	/* Check if this is the last dirty bit to update nr_written */
4541	no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page,
4542							  eb->start, eb->len);
4543	if (no_dirty_ebs)
4544		clear_page_dirty_for_io(page);
4545
4546	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4547			&epd->bio_ctrl, page, eb->start, eb->len,
4548			eb->start - page_offset(page),
4549			end_bio_subpage_eb_writepage, 0, 0, false);
4550	if (ret) {
4551		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
4552		set_btree_ioerr(page, eb);
4553		unlock_page(page);
4554
4555		if (atomic_dec_and_test(&eb->io_pages))
4556			end_extent_buffer_writeback(eb);
4557		return -EIO;
4558	}
4559	unlock_page(page);
4560	/*
4561	 * Submission finished without problem, if no range of the page is
4562	 * dirty anymore, we have submitted a page.  Update nr_written in wbc.
4563	 */
4564	if (no_dirty_ebs)
4565		update_nr_written(wbc, 1);
4566	return ret;
4567}
4568
4569static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4570			struct writeback_control *wbc,
4571			struct extent_page_data *epd)
4572{
4573	u64 disk_bytenr = eb->start;
4574	int i, num_pages;
4575	unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
4576	int ret = 0;
4577
4578	prepare_eb_write(eb);
4579
4580	num_pages = num_extent_pages(eb);
4581	for (i = 0; i < num_pages; i++) {
4582		struct page *p = eb->pages[i];
4583
4584		clear_page_dirty_for_io(p);
4585		set_page_writeback(p);
4586		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
4587					 &epd->bio_ctrl, p, disk_bytenr,
4588					 PAGE_SIZE, 0,
4589					 end_bio_extent_buffer_writepage,
4590					 0, 0, false);
4591		if (ret) {
4592			set_btree_ioerr(p, eb);
4593			if (PageWriteback(p))
4594				end_page_writeback(p);
4595			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
4596				end_extent_buffer_writeback(eb);
4597			ret = -EIO;
4598			break;
4599		}
4600		disk_bytenr += PAGE_SIZE;
4601		update_nr_written(wbc, 1);
4602		unlock_page(p);
4603	}
4604
4605	if (unlikely(ret)) {
4606		for (; i < num_pages; i++) {
4607			struct page *p = eb->pages[i];
4608			clear_page_dirty_for_io(p);
4609			unlock_page(p);
4610		}
4611	}
4612
4613	return ret;
4614}
4615
4616/*
4617 * Submit one subpage btree page.
4618 *
4619 * The main difference to submit_eb_page() is:
4620 * - Page locking
4621 *   For subpage, we don't rely on page locking at all.
4622 *
4623 * - Flush write bio
4624 *   We only flush bio if we may be unable to fit current extent buffers into
4625 *   current bio.
4626 *
4627 * Return >=0 for the number of submitted extent buffers.
4628 * Return <0 for fatal error.
4629 */
4630static int submit_eb_subpage(struct page *page,
4631			     struct writeback_control *wbc,
4632			     struct extent_page_data *epd)
4633{
4634	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4635	int submitted = 0;
4636	u64 page_start = page_offset(page);
4637	int bit_start = 0;
4638	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
4639	int ret;
4640
4641	/* Lock and write each dirty extent buffers in the range */
4642	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
4643		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
4644		struct extent_buffer *eb;
4645		unsigned long flags;
4646		u64 start;
4647
4648		/*
4649		 * Take private lock to ensure the subpage won't be detached
4650		 * in the meantime.
4651		 */
4652		spin_lock(&page->mapping->private_lock);
4653		if (!PagePrivate(page)) {
4654			spin_unlock(&page->mapping->private_lock);
4655			break;
4656		}
4657		spin_lock_irqsave(&subpage->lock, flags);
4658		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
4659			      subpage->bitmaps)) {
4660			spin_unlock_irqrestore(&subpage->lock, flags);
4661			spin_unlock(&page->mapping->private_lock);
4662			bit_start++;
4663			continue;
4664		}
4665
4666		start = page_start + bit_start * fs_info->sectorsize;
4667		bit_start += sectors_per_node;
4668
4669		/*
4670		 * Here we just want to grab the eb without touching extra
4671		 * spin locks, so call find_extent_buffer_nolock().
4672		 */
4673		eb = find_extent_buffer_nolock(fs_info, start);
4674		spin_unlock_irqrestore(&subpage->lock, flags);
4675		spin_unlock(&page->mapping->private_lock);
4676
4677		/*
4678		 * The eb has already reached 0 refs thus find_extent_buffer()
4679		 * doesn't return it. We don't need to write back such eb
4680		 * anyway.
4681		 */
4682		if (!eb)
4683			continue;
4684
4685		ret = lock_extent_buffer_for_io(eb, epd);
4686		if (ret == 0) {
4687			free_extent_buffer(eb);
4688			continue;
4689		}
4690		if (ret < 0) {
4691			free_extent_buffer(eb);
4692			goto cleanup;
4693		}
4694		ret = write_one_subpage_eb(eb, wbc, epd);
4695		free_extent_buffer(eb);
4696		if (ret < 0)
4697			goto cleanup;
4698		submitted++;
4699	}
4700	return submitted;
4701
4702cleanup:
4703	/* We hit error, end bio for the submitted extent buffers */
4704	end_write_bio(epd, ret);
4705	return ret;
4706}
4707
4708/*
4709 * Submit all page(s) of one extent buffer.
4710 *
4711 * @page:	the page of one extent buffer
4712 * @eb_context:	to determine if we need to submit this page, if current page
4713 *		belongs to this eb, we don't need to submit
4714 *
4715 * The caller should pass each page in their bytenr order, and here we use
4716 * @eb_context to determine if we have submitted pages of one extent buffer.
4717 *
4718 * If we have, we just skip until we hit a new page that doesn't belong to
4719 * current @eb_context.
4720 *
4721 * If not, we submit all the page(s) of the extent buffer.
4722 *
4723 * Return >0 if we have submitted the extent buffer successfully.
4724 * Return 0 if we don't need to submit the page, as it's already submitted by
4725 * previous call.
4726 * Return <0 for fatal error.
4727 */
4728static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4729			  struct extent_page_data *epd,
4730			  struct extent_buffer **eb_context)
4731{
4732	struct address_space *mapping = page->mapping;
4733	struct btrfs_block_group *cache = NULL;
4734	struct extent_buffer *eb;
4735	int ret;
4736
4737	if (!PagePrivate(page))
4738		return 0;
4739
4740	if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
4741		return submit_eb_subpage(page, wbc, epd);
4742
4743	spin_lock(&mapping->private_lock);
4744	if (!PagePrivate(page)) {
4745		spin_unlock(&mapping->private_lock);
4746		return 0;
4747	}
4748
4749	eb = (struct extent_buffer *)page->private;
4750
4751	/*
4752	 * Shouldn't happen and normally this would be a BUG_ON but no point
4753	 * crashing the machine for something we can survive anyway.
4754	 */
4755	if (WARN_ON(!eb)) {
4756		spin_unlock(&mapping->private_lock);
4757		return 0;
4758	}
4759
4760	if (eb == *eb_context) {
4761		spin_unlock(&mapping->private_lock);
4762		return 0;
4763	}
4764	ret = atomic_inc_not_zero(&eb->refs);
4765	spin_unlock(&mapping->private_lock);
4766	if (!ret)
4767		return 0;
4768
4769	if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
4770		/*
4771		 * If for_sync, this hole will be filled with
4772		 * trasnsaction commit.
4773		 */
4774		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4775			ret = -EAGAIN;
4776		else
4777			ret = 0;
4778		free_extent_buffer(eb);
4779		return ret;
4780	}
4781
4782	*eb_context = eb;
4783
4784	ret = lock_extent_buffer_for_io(eb, epd);
4785	if (ret <= 0) {
4786		btrfs_revert_meta_write_pointer(cache, eb);
4787		if (cache)
4788			btrfs_put_block_group(cache);
4789		free_extent_buffer(eb);
4790		return ret;
4791	}
4792	if (cache) {
4793		/* Impiles write in zoned mode */
4794		btrfs_put_block_group(cache);
4795		/* Mark the last eb in a block group */
4796		if (cache->seq_zone && eb->start + eb->len == cache->zone_capacity)
4797			set_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags);
4798	}
4799	ret = write_one_eb(eb, wbc, epd);
4800	free_extent_buffer(eb);
4801	if (ret < 0)
4802		return ret;
4803	return 1;
4804}
4805
4806int btree_write_cache_pages(struct address_space *mapping,
4807				   struct writeback_control *wbc)
4808{
4809	struct extent_buffer *eb_context = NULL;
4810	struct extent_page_data epd = {
4811		.bio_ctrl = { 0 },
4812		.extent_locked = 0,
4813		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
4814	};
4815	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
4816	int ret = 0;
4817	int done = 0;
4818	int nr_to_write_done = 0;
4819	struct pagevec pvec;
4820	int nr_pages;
4821	pgoff_t index;
4822	pgoff_t end;		/* Inclusive */
4823	int scanned = 0;
4824	xa_mark_t tag;
4825
4826	pagevec_init(&pvec);
4827	if (wbc->range_cyclic) {
4828		index = mapping->writeback_index; /* Start from prev offset */
4829		end = -1;
4830		/*
4831		 * Start from the beginning does not need to cycle over the
4832		 * range, mark it as scanned.
4833		 */
4834		scanned = (index == 0);
4835	} else {
4836		index = wbc->range_start >> PAGE_SHIFT;
4837		end = wbc->range_end >> PAGE_SHIFT;
4838		scanned = 1;
4839	}
4840	if (wbc->sync_mode == WB_SYNC_ALL)
4841		tag = PAGECACHE_TAG_TOWRITE;
4842	else
4843		tag = PAGECACHE_TAG_DIRTY;
4844	btrfs_zoned_meta_io_lock(fs_info);
4845retry:
4846	if (wbc->sync_mode == WB_SYNC_ALL)
4847		tag_pages_for_writeback(mapping, index, end);
4848	while (!done && !nr_to_write_done && (index <= end) &&
4849	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4850			tag))) {
4851		unsigned i;
4852
4853		for (i = 0; i < nr_pages; i++) {
4854			struct page *page = pvec.pages[i];
4855
4856			ret = submit_eb_page(page, wbc, &epd, &eb_context);
4857			if (ret == 0)
4858				continue;
4859			if (ret < 0) {
4860				done = 1;
4861				break;
4862			}
4863
4864			/*
4865			 * the filesystem may choose to bump up nr_to_write.
4866			 * We have to make sure to honor the new nr_to_write
4867			 * at any time
4868			 */
4869			nr_to_write_done = wbc->nr_to_write <= 0;
4870		}
4871		pagevec_release(&pvec);
4872		cond_resched();
4873	}
4874	if (!scanned && !done) {
4875		/*
4876		 * We hit the last page and there is more work to be done: wrap
4877		 * back to the start of the file
4878		 */
4879		scanned = 1;
4880		index = 0;
4881		goto retry;
4882	}
4883	if (ret < 0) {
4884		end_write_bio(&epd, ret);
4885		goto out;
4886	}
4887	/*
4888	 * If something went wrong, don't allow any metadata write bio to be
4889	 * submitted.
4890	 *
4891	 * This would prevent use-after-free if we had dirty pages not
4892	 * cleaned up, which can still happen by fuzzed images.
4893	 *
4894	 * - Bad extent tree
4895	 *   Allowing existing tree block to be allocated for other trees.
4896	 *
4897	 * - Log tree operations
4898	 *   Exiting tree blocks get allocated to log tree, bumps its
4899	 *   generation, then get cleaned in tree re-balance.
4900	 *   Such tree block will not be written back, since it's clean,
4901	 *   thus no WRITTEN flag set.
4902	 *   And after log writes back, this tree block is not traced by
4903	 *   any dirty extent_io_tree.
4904	 *
4905	 * - Offending tree block gets re-dirtied from its original owner
4906	 *   Since it has bumped generation, no WRITTEN flag, it can be
4907	 *   reused without COWing. This tree block will not be traced
4908	 *   by btrfs_transaction::dirty_pages.
4909	 *
4910	 *   Now such dirty tree block will not be cleaned by any dirty
4911	 *   extent io tree. Thus we don't want to submit such wild eb
4912	 *   if the fs already has error.
4913	 */
4914	if (!BTRFS_FS_ERROR(fs_info)) {
4915		ret = flush_write_bio(&epd);
4916	} else {
4917		ret = -EROFS;
4918		end_write_bio(&epd, ret);
4919	}
4920out:
4921	btrfs_zoned_meta_io_unlock(fs_info);
4922	return ret;
4923}
4924
4925/**
4926 * Walk the list of dirty pages of the given address space and write all of them.
4927 *
4928 * @mapping: address space structure to write
4929 * @wbc:     subtract the number of written pages from *@wbc->nr_to_write
4930 * @epd:     holds context for the write, namely the bio
4931 *
4932 * If a page is already under I/O, write_cache_pages() skips it, even
4933 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
4934 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
4935 * and msync() need to guarantee that all the data which was dirty at the time
4936 * the call was made get new I/O started against them.  If wbc->sync_mode is
4937 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4938 * existing IO to complete.
4939 */
4940static int extent_write_cache_pages(struct address_space *mapping,
4941			     struct writeback_control *wbc,
4942			     struct extent_page_data *epd)
4943{
4944	struct inode *inode = mapping->host;
4945	int ret = 0;
4946	int done = 0;
4947	int nr_to_write_done = 0;
4948	struct pagevec pvec;
4949	int nr_pages;
4950	pgoff_t index;
4951	pgoff_t end;		/* Inclusive */
4952	pgoff_t done_index;
4953	int range_whole = 0;
4954	int scanned = 0;
4955	xa_mark_t tag;
4956
4957	/*
4958	 * We have to hold onto the inode so that ordered extents can do their
4959	 * work when the IO finishes.  The alternative to this is failing to add
4960	 * an ordered extent if the igrab() fails there and that is a huge pain
4961	 * to deal with, so instead just hold onto the inode throughout the
4962	 * writepages operation.  If it fails here we are freeing up the inode
4963	 * anyway and we'd rather not waste our time writing out stuff that is
4964	 * going to be truncated anyway.
4965	 */
4966	if (!igrab(inode))
4967		return 0;
4968
4969	pagevec_init(&pvec);
4970	if (wbc->range_cyclic) {
4971		index = mapping->writeback_index; /* Start from prev offset */
4972		end = -1;
4973		/*
4974		 * Start from the beginning does not need to cycle over the
4975		 * range, mark it as scanned.
4976		 */
4977		scanned = (index == 0);
4978	} else {
4979		index = wbc->range_start >> PAGE_SHIFT;
4980		end = wbc->range_end >> PAGE_SHIFT;
4981		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4982			range_whole = 1;
4983		scanned = 1;
4984	}
4985
4986	/*
4987	 * We do the tagged writepage as long as the snapshot flush bit is set
4988	 * and we are the first one who do the filemap_flush() on this inode.
4989	 *
4990	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4991	 * not race in and drop the bit.
4992	 */
4993	if (range_whole && wbc->nr_to_write == LONG_MAX &&
4994	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4995			       &BTRFS_I(inode)->runtime_flags))
4996		wbc->tagged_writepages = 1;
4997
4998	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4999		tag = PAGECACHE_TAG_TOWRITE;
5000	else
5001		tag = PAGECACHE_TAG_DIRTY;
5002retry:
5003	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
5004		tag_pages_for_writeback(mapping, index, end);
5005	done_index = index;
5006	while (!done && !nr_to_write_done && (index <= end) &&
5007			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
5008						&index, end, tag))) {
5009		unsigned i;
5010
5011		for (i = 0; i < nr_pages; i++) {
5012			struct page *page = pvec.pages[i];
5013
5014			done_index = page->index + 1;
5015			/*
5016			 * At this point we hold neither the i_pages lock nor
5017			 * the page lock: the page may be truncated or
5018			 * invalidated (changing page->mapping to NULL),
5019			 * or even swizzled back from swapper_space to
5020			 * tmpfs file mapping
5021			 */
5022			if (!trylock_page(page)) {
5023				ret = flush_write_bio(epd);
5024				BUG_ON(ret < 0);
5025				lock_page(page);
5026			}
5027
5028			if (unlikely(page->mapping != mapping)) {
5029				unlock_page(page);
5030				continue;
5031			}
5032
5033			if (wbc->sync_mode != WB_SYNC_NONE) {
5034				if (PageWriteback(page)) {
5035					ret = flush_write_bio(epd);
5036					BUG_ON(ret < 0);
5037				}
5038				wait_on_page_writeback(page);
5039			}
5040
5041			if (PageWriteback(page) ||
5042			    !clear_page_dirty_for_io(page)) {
5043				unlock_page(page);
5044				continue;
5045			}
5046
5047			ret = __extent_writepage(page, wbc, epd);
5048			if (ret < 0) {
5049				done = 1;
5050				break;
5051			}
5052
5053			/*
5054			 * the filesystem may choose to bump up nr_to_write.
5055			 * We have to make sure to honor the new nr_to_write
5056			 * at any time
5057			 */
5058			nr_to_write_done = wbc->nr_to_write <= 0;
5059		}
5060		pagevec_release(&pvec);
5061		cond_resched();
5062	}
5063	if (!scanned && !done) {
5064		/*
5065		 * We hit the last page and there is more work to be done: wrap
5066		 * back to the start of the file
5067		 */
5068		scanned = 1;
5069		index = 0;
5070
5071		/*
5072		 * If we're looping we could run into a page that is locked by a
5073		 * writer and that writer could be waiting on writeback for a
5074		 * page in our current bio, and thus deadlock, so flush the
5075		 * write bio here.
5076		 */
5077		ret = flush_write_bio(epd);
5078		if (!ret)
5079			goto retry;
5080	}
5081
5082	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
5083		mapping->writeback_index = done_index;
5084
5085	btrfs_add_delayed_iput(inode);
5086	return ret;
5087}
5088
5089int extent_write_full_page(struct page *page, struct writeback_control *wbc)
5090{
5091	int ret;
5092	struct extent_page_data epd = {
5093		.bio_ctrl = { 0 },
5094		.extent_locked = 0,
5095		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5096	};
5097
5098	ret = __extent_writepage(page, wbc, &epd);
5099	ASSERT(ret <= 0);
5100	if (ret < 0) {
5101		end_write_bio(&epd, ret);
5102		return ret;
5103	}
5104
5105	ret = flush_write_bio(&epd);
5106	ASSERT(ret <= 0);
5107	return ret;
5108}
5109
5110/*
5111 * Submit the pages in the range to bio for call sites which delalloc range has
5112 * already been ran (aka, ordered extent inserted) and all pages are still
5113 * locked.
5114 */
5115int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
5116{
5117	bool found_error = false;
5118	int first_error = 0;
5119	int ret = 0;
5120	struct address_space *mapping = inode->i_mapping;
5121	struct page *page;
5122	u64 cur = start;
5123	unsigned long nr_pages;
5124	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
5125	struct extent_page_data epd = {
5126		.bio_ctrl = { 0 },
5127		.extent_locked = 1,
5128		.sync_io = 1,
5129	};
5130	struct writeback_control wbc_writepages = {
5131		.sync_mode	= WB_SYNC_ALL,
5132		.range_start	= start,
5133		.range_end	= end + 1,
5134		/* We're called from an async helper function */
5135		.punt_to_cgroup	= 1,
5136		.no_cgroup_owner = 1,
5137	};
5138
5139	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
5140	nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
5141		   PAGE_SHIFT;
5142	wbc_writepages.nr_to_write = nr_pages * 2;
5143
5144	wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
5145	while (cur <= end) {
5146		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
5147
5148		page = find_get_page(mapping, cur >> PAGE_SHIFT);
5149		/*
5150		 * All pages in the range are locked since
5151		 * btrfs_run_delalloc_range(), thus there is no way to clear
5152		 * the page dirty flag.
5153		 */
5154		ASSERT(PageLocked(page));
5155		ASSERT(PageDirty(page));
5156		clear_page_dirty_for_io(page);
5157		ret = __extent_writepage(page, &wbc_writepages, &epd);
5158		ASSERT(ret <= 0);
5159		if (ret < 0) {
5160			found_error = true;
5161			first_error = ret;
5162		}
5163		put_page(page);
5164		cur = cur_end + 1;
5165	}
5166
5167	if (!found_error)
5168		ret = flush_write_bio(&epd);
5169	else
5170		end_write_bio(&epd, ret);
5171
5172	wbc_detach_inode(&wbc_writepages);
5173	if (found_error)
5174		return first_error;
5175	return ret;
5176}
5177
5178int extent_writepages(struct address_space *mapping,
5179		      struct writeback_control *wbc)
5180{
5181	struct inode *inode = mapping->host;
5182	int ret = 0;
5183	struct extent_page_data epd = {
5184		.bio_ctrl = { 0 },
5185		.extent_locked = 0,
5186		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
5187	};
5188
5189	/*
5190	 * Allow only a single thread to do the reloc work in zoned mode to
5191	 * protect the write pointer updates.
5192	 */
5193	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
5194	ret = extent_write_cache_pages(mapping, wbc, &epd);
5195	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
5196	ASSERT(ret <= 0);
5197	if (ret < 0) {
5198		end_write_bio(&epd, ret);
5199		return ret;
5200	}
5201	ret = flush_write_bio(&epd);
5202	return ret;
5203}
5204
5205void extent_readahead(struct readahead_control *rac)
5206{
5207	struct btrfs_bio_ctrl bio_ctrl = { 0 };
5208	struct page *pagepool[16];
5209	struct extent_map *em_cached = NULL;
5210	u64 prev_em_start = (u64)-1;
5211	int nr;
5212
5213	while ((nr = readahead_page_batch(rac, pagepool))) {
5214		u64 contig_start = readahead_pos(rac);
5215		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
5216
5217		contiguous_readpages(pagepool, nr, contig_start, contig_end,
5218				&em_cached, &bio_ctrl, &prev_em_start);
5219	}
5220
5221	if (em_cached)
5222		free_extent_map(em_cached);
5223
5224	if (bio_ctrl.bio) {
5225		if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
5226			return;
5227	}
5228}
5229
5230/*
5231 * basic invalidatepage code, this waits on any locked or writeback
5232 * ranges corresponding to the page, and then deletes any extent state
5233 * records from the tree
5234 */
5235int extent_invalidatepage(struct extent_io_tree *tree,
5236			  struct page *page, unsigned long offset)
5237{
5238	struct extent_state *cached_state = NULL;
5239	u64 start = page_offset(page);
5240	u64 end = start + PAGE_SIZE - 1;
5241	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
5242
5243	/* This function is only called for the btree inode */
5244	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
5245
5246	start += ALIGN(offset, blocksize);
5247	if (start > end)
5248		return 0;
5249
5250	lock_extent_bits(tree, start, end, &cached_state);
5251	wait_on_page_writeback(page);
5252
5253	/*
5254	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
5255	 * so here we only need to unlock the extent range to free any
5256	 * existing extent state.
5257	 */
5258	unlock_extent_cached(tree, start, end, &cached_state);
5259	return 0;
5260}
5261
5262/*
5263 * a helper for releasepage, this tests for areas of the page that
5264 * are locked or under IO and drops the related state bits if it is safe
5265 * to drop the page.
5266 */
5267static int try_release_extent_state(struct extent_io_tree *tree,
5268				    struct page *page, gfp_t mask)
5269{
5270	u64 start = page_offset(page);
5271	u64 end = start + PAGE_SIZE - 1;
5272	int ret = 1;
5273
5274	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
5275		ret = 0;
5276	} else {
5277		/*
5278		 * At this point we can safely clear everything except the
5279		 * locked bit, the nodatasum bit and the delalloc new bit.
5280		 * The delalloc new bit will be cleared by ordered extent
5281		 * completion.
5282		 */
5283		ret = __clear_extent_bit(tree, start, end,
5284			 ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
5285			 0, 0, NULL, mask, NULL);
5286
5287		/* if clear_extent_bit failed for enomem reasons,
5288		 * we can't allow the release to continue.
5289		 */
5290		if (ret < 0)
5291			ret = 0;
5292		else
5293			ret = 1;
5294	}
5295	return ret;
5296}
5297
5298/*
5299 * a helper for releasepage.  As long as there are no locked extents
5300 * in the range corresponding to the page, both state records and extent
5301 * map records are removed
5302 */
5303int try_release_extent_mapping(struct page *page, gfp_t mask)
5304{
5305	struct extent_map *em;
5306	u64 start = page_offset(page);
5307	u64 end = start + PAGE_SIZE - 1;
5308	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
5309	struct extent_io_tree *tree = &btrfs_inode->io_tree;
5310	struct extent_map_tree *map = &btrfs_inode->extent_tree;
5311
5312	if (gfpflags_allow_blocking(mask) &&
5313	    page->mapping->host->i_size > SZ_16M) {
5314		u64 len;
5315		while (start <= end) {
5316			struct btrfs_fs_info *fs_info;
5317			u64 cur_gen;
5318
5319			len = end - start + 1;
5320			write_lock(&map->lock);
5321			em = lookup_extent_mapping(map, start, len);
5322			if (!em) {
5323				write_unlock(&map->lock);
5324				break;
5325			}
5326			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
5327			    em->start != start) {
5328				write_unlock(&map->lock);
5329				free_extent_map(em);
5330				break;
5331			}
5332			if (test_range_bit(tree, em->start,
5333					   extent_map_end(em) - 1,
5334					   EXTENT_LOCKED, 0, NULL))
5335				goto next;
5336			/*
5337			 * If it's not in the list of modified extents, used
5338			 * by a fast fsync, we can remove it. If it's being
5339			 * logged we can safely remove it since fsync took an
5340			 * extra reference on the em.
5341			 */
5342			if (list_empty(&em->list) ||
5343			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
5344				goto remove_em;
5345			/*
5346			 * If it's in the list of modified extents, remove it
5347			 * only if its generation is older then the current one,
5348			 * in which case we don't need it for a fast fsync.
5349			 * Otherwise don't remove it, we could be racing with an
5350			 * ongoing fast fsync that could miss the new extent.
5351			 */
5352			fs_info = btrfs_inode->root->fs_info;
5353			spin_lock(&fs_info->trans_lock);
5354			cur_gen = fs_info->generation;
5355			spin_unlock(&fs_info->trans_lock);
5356			if (em->generation >= cur_gen)
5357				goto next;
5358remove_em:
5359			/*
5360			 * We only remove extent maps that are not in the list of
5361			 * modified extents or that are in the list but with a
5362			 * generation lower then the current generation, so there
5363			 * is no need to set the full fsync flag on the inode (it
5364			 * hurts the fsync performance for workloads with a data
5365			 * size that exceeds or is close to the system's memory).
5366			 */
5367			remove_extent_mapping(map, em);
5368			/* once for the rb tree */
5369			free_extent_map(em);
5370next:
5371			start = extent_map_end(em);
5372			write_unlock(&map->lock);
5373
5374			/* once for us */
5375			free_extent_map(em);
5376
5377			cond_resched(); /* Allow large-extent preemption. */
5378		}
5379	}
5380	return try_release_extent_state(tree, page, mask);
5381}
5382
5383/*
5384 * helper function for fiemap, which doesn't want to see any holes.
5385 * This maps until we find something past 'last'
5386 */
5387static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
5388						u64 offset, u64 last)
5389{
5390	u64 sectorsize = btrfs_inode_sectorsize(inode);
5391	struct extent_map *em;
5392	u64 len;
5393
5394	if (offset >= last)
5395		return NULL;
5396
5397	while (1) {
5398		len = last - offset;
5399		if (len == 0)
5400			break;
5401		len = ALIGN(len, sectorsize);
5402		em = btrfs_get_extent_fiemap(inode, offset, len);
5403		if (IS_ERR_OR_NULL(em))
5404			return em;
5405
5406		/* if this isn't a hole return it */
5407		if (em->block_start != EXTENT_MAP_HOLE)
5408			return em;
5409
5410		/* this is a hole, advance to the next extent */
5411		offset = extent_map_end(em);
5412		free_extent_map(em);
5413		if (offset >= last)
5414			break;
5415	}
5416	return NULL;
5417}
5418
5419/*
5420 * To cache previous fiemap extent
5421 *
5422 * Will be used for merging fiemap extent
5423 */
5424struct fiemap_cache {
5425	u64 offset;
5426	u64 phys;
5427	u64 len;
5428	u32 flags;
5429	bool cached;
5430};
5431
5432/*
5433 * Helper to submit fiemap extent.
5434 *
5435 * Will try to merge current fiemap extent specified by @offset, @phys,
5436 * @len and @flags with cached one.
5437 * And only when we fails to merge, cached one will be submitted as
5438 * fiemap extent.
5439 *
5440 * Return value is the same as fiemap_fill_next_extent().
5441 */
5442static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
5443				struct fiemap_cache *cache,
5444				u64 offset, u64 phys, u64 len, u32 flags)
5445{
5446	int ret = 0;
5447
5448	if (!cache->cached)
5449		goto assign;
5450
5451	/*
5452	 * Sanity check, extent_fiemap() should have ensured that new
5453	 * fiemap extent won't overlap with cached one.
5454	 * Not recoverable.
5455	 *
5456	 * NOTE: Physical address can overlap, due to compression
5457	 */
5458	if (cache->offset + cache->len > offset) {
5459		WARN_ON(1);
5460		return -EINVAL;
5461	}
5462
5463	/*
5464	 * Only merges fiemap extents if
5465	 * 1) Their logical addresses are continuous
5466	 *
5467	 * 2) Their physical addresses are continuous
5468	 *    So truly compressed (physical size smaller than logical size)
5469	 *    extents won't get merged with each other
5470	 *
5471	 * 3) Share same flags except FIEMAP_EXTENT_LAST
5472	 *    So regular extent won't get merged with prealloc extent
5473	 */
5474	if (cache->offset + cache->len  == offset &&
5475	    cache->phys + cache->len == phys  &&
5476	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
5477			(flags & ~FIEMAP_EXTENT_LAST)) {
5478		cache->len += len;
5479		cache->flags |= flags;
5480		goto try_submit_last;
5481	}
5482
5483	/* Not mergeable, need to submit cached one */
5484	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5485				      cache->len, cache->flags);
5486	cache->cached = false;
5487	if (ret)
5488		return ret;
5489assign:
5490	cache->cached = true;
5491	cache->offset = offset;
5492	cache->phys = phys;
5493	cache->len = len;
5494	cache->flags = flags;
5495try_submit_last:
5496	if (cache->flags & FIEMAP_EXTENT_LAST) {
5497		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
5498				cache->phys, cache->len, cache->flags);
5499		cache->cached = false;
5500	}
5501	return ret;
5502}
5503
5504/*
5505 * Emit last fiemap cache
5506 *
5507 * The last fiemap cache may still be cached in the following case:
5508 * 0		      4k		    8k
5509 * |<- Fiemap range ->|
5510 * |<------------  First extent ----------->|
5511 *
5512 * In this case, the first extent range will be cached but not emitted.
5513 * So we must emit it before ending extent_fiemap().
5514 */
5515static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
5516				  struct fiemap_cache *cache)
5517{
5518	int ret;
5519
5520	if (!cache->cached)
5521		return 0;
5522
5523	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
5524				      cache->len, cache->flags);
5525	cache->cached = false;
5526	if (ret > 0)
5527		ret = 0;
5528	return ret;
5529}
5530
5531int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
5532		  u64 start, u64 len)
5533{
5534	int ret = 0;
5535	u64 off;
5536	u64 max = start + len;
5537	u32 flags = 0;
5538	u32 found_type;
5539	u64 last;
5540	u64 last_for_get_extent = 0;
5541	u64 disko = 0;
5542	u64 isize = i_size_read(&inode->vfs_inode);
5543	struct btrfs_key found_key;
5544	struct extent_map *em = NULL;
5545	struct extent_state *cached_state = NULL;
5546	struct btrfs_path *path;
5547	struct btrfs_root *root = inode->root;
5548	struct fiemap_cache cache = { 0 };
5549	struct ulist *roots;
5550	struct ulist *tmp_ulist;
5551	int end = 0;
5552	u64 em_start = 0;
5553	u64 em_len = 0;
5554	u64 em_end = 0;
5555
5556	if (len == 0)
5557		return -EINVAL;
5558
5559	path = btrfs_alloc_path();
5560	if (!path)
5561		return -ENOMEM;
5562
5563	roots = ulist_alloc(GFP_KERNEL);
5564	tmp_ulist = ulist_alloc(GFP_KERNEL);
5565	if (!roots || !tmp_ulist) {
5566		ret = -ENOMEM;
5567		goto out_free_ulist;
5568	}
5569
5570	/*
5571	 * We can't initialize that to 'start' as this could miss extents due
5572	 * to extent item merging
5573	 */
5574	off = 0;
5575	start = round_down(start, btrfs_inode_sectorsize(inode));
5576	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
5577
5578	/*
5579	 * lookup the last file extent.  We're not using i_size here
5580	 * because there might be preallocation past i_size
5581	 */
5582	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
5583				       0);
5584	if (ret < 0) {
5585		goto out_free_ulist;
5586	} else {
5587		WARN_ON(!ret);
5588		if (ret == 1)
5589			ret = 0;
5590	}
5591
5592	path->slots[0]--;
5593	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5594	found_type = found_key.type;
5595
5596	/* No extents, but there might be delalloc bits */
5597	if (found_key.objectid != btrfs_ino(inode) ||
5598	    found_type != BTRFS_EXTENT_DATA_KEY) {
5599		/* have to trust i_size as the end */
5600		last = (u64)-1;
5601		last_for_get_extent = isize;
5602	} else {
5603		/*
5604		 * remember the start of the last extent.  There are a
5605		 * bunch of different factors that go into the length of the
5606		 * extent, so its much less complex to remember where it started
5607		 */
5608		last = found_key.offset;
5609		last_for_get_extent = last + 1;
5610	}
5611	btrfs_release_path(path);
5612
5613	/*
5614	 * we might have some extents allocated but more delalloc past those
5615	 * extents.  so, we trust isize unless the start of the last extent is
5616	 * beyond isize
5617	 */
5618	if (last < isize) {
5619		last = (u64)-1;
5620		last_for_get_extent = isize;
5621	}
5622
5623	lock_extent_bits(&inode->io_tree, start, start + len - 1,
5624			 &cached_state);
5625
5626	em = get_extent_skip_holes(inode, start, last_for_get_extent);
5627	if (!em)
5628		goto out;
5629	if (IS_ERR(em)) {
5630		ret = PTR_ERR(em);
5631		goto out;
5632	}
5633
5634	while (!end) {
5635		u64 offset_in_extent = 0;
5636
5637		/* break if the extent we found is outside the range */
5638		if (em->start >= max || extent_map_end(em) < off)
5639			break;
5640
5641		/*
5642		 * get_extent may return an extent that starts before our
5643		 * requested range.  We have to make sure the ranges
5644		 * we return to fiemap always move forward and don't
5645		 * overlap, so adjust the offsets here
5646		 */
5647		em_start = max(em->start, off);
5648
5649		/*
5650		 * record the offset from the start of the extent
5651		 * for adjusting the disk offset below.  Only do this if the
5652		 * extent isn't compressed since our in ram offset may be past
5653		 * what we have actually allocated on disk.
5654		 */
5655		if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5656			offset_in_extent = em_start - em->start;
5657		em_end = extent_map_end(em);
5658		em_len = em_end - em_start;
5659		flags = 0;
5660		if (em->block_start < EXTENT_MAP_LAST_BYTE)
5661			disko = em->block_start + offset_in_extent;
5662		else
5663			disko = 0;
5664
5665		/*
5666		 * bump off for our next call to get_extent
5667		 */
5668		off = extent_map_end(em);
5669		if (off >= max)
5670			end = 1;
5671
5672		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
5673			end = 1;
5674			flags |= FIEMAP_EXTENT_LAST;
5675		} else if (em->block_start == EXTENT_MAP_INLINE) {
5676			flags |= (FIEMAP_EXTENT_DATA_INLINE |
5677				  FIEMAP_EXTENT_NOT_ALIGNED);
5678		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
5679			flags |= (FIEMAP_EXTENT_DELALLOC |
5680				  FIEMAP_EXTENT_UNKNOWN);
5681		} else if (fieinfo->fi_extents_max) {
5682			u64 bytenr = em->block_start -
5683				(em->start - em->orig_start);
5684
5685			/*
5686			 * As btrfs supports shared space, this information
5687			 * can be exported to userspace tools via
5688			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
5689			 * then we're just getting a count and we can skip the
5690			 * lookup stuff.
5691			 */
5692			ret = btrfs_check_shared(root, btrfs_ino(inode),
5693						 bytenr, roots, tmp_ulist);
5694			if (ret < 0)
5695				goto out_free;
5696			if (ret)
5697				flags |= FIEMAP_EXTENT_SHARED;
5698			ret = 0;
5699		}
5700		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
5701			flags |= FIEMAP_EXTENT_ENCODED;
5702		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5703			flags |= FIEMAP_EXTENT_UNWRITTEN;
5704
5705		free_extent_map(em);
5706		em = NULL;
5707		if ((em_start >= last) || em_len == (u64)-1 ||
5708		   (last == (u64)-1 && isize <= em_end)) {
5709			flags |= FIEMAP_EXTENT_LAST;
5710			end = 1;
5711		}
5712
5713		/* now scan forward to see if this is really the last extent. */
5714		em = get_extent_skip_holes(inode, off, last_for_get_extent);
5715		if (IS_ERR(em)) {
5716			ret = PTR_ERR(em);
5717			goto out;
5718		}
5719		if (!em) {
5720			flags |= FIEMAP_EXTENT_LAST;
5721			end = 1;
5722		}
5723		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
5724					   em_len, flags);
5725		if (ret) {
5726			if (ret == 1)
5727				ret = 0;
5728			goto out_free;
5729		}
5730	}
5731out_free:
5732	if (!ret)
5733		ret = emit_last_fiemap_cache(fieinfo, &cache);
5734	free_extent_map(em);
5735out:
5736	unlock_extent_cached(&inode->io_tree, start, start + len - 1,
5737			     &cached_state);
5738
5739out_free_ulist:
5740	btrfs_free_path(path);
5741	ulist_free(roots);
5742	ulist_free(tmp_ulist);
5743	return ret;
5744}
5745
5746static void __free_extent_buffer(struct extent_buffer *eb)
5747{
5748	kmem_cache_free(extent_buffer_cache, eb);
5749}
5750
5751int extent_buffer_under_io(const struct extent_buffer *eb)
5752{
5753	return (atomic_read(&eb->io_pages) ||
5754		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
5755		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5756}
5757
5758static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
5759{
5760	struct btrfs_subpage *subpage;
5761
5762	lockdep_assert_held(&page->mapping->private_lock);
5763
5764	if (PagePrivate(page)) {
5765		subpage = (struct btrfs_subpage *)page->private;
5766		if (atomic_read(&subpage->eb_refs))
5767			return true;
5768		/*
5769		 * Even there is no eb refs here, we may still have
5770		 * end_page_read() call relying on page::private.
5771		 */
5772		if (atomic_read(&subpage->readers))
5773			return true;
5774	}
5775	return false;
5776}
5777
5778static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
5779{
5780	struct btrfs_fs_info *fs_info = eb->fs_info;
5781	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5782
5783	/*
5784	 * For mapped eb, we're going to change the page private, which should
5785	 * be done under the private_lock.
5786	 */
5787	if (mapped)
5788		spin_lock(&page->mapping->private_lock);
5789
5790	if (!PagePrivate(page)) {
5791		if (mapped)
5792			spin_unlock(&page->mapping->private_lock);
5793		return;
5794	}
5795
5796	if (fs_info->sectorsize == PAGE_SIZE) {
5797		/*
5798		 * We do this since we'll remove the pages after we've
5799		 * removed the eb from the radix tree, so we could race
5800		 * and have this page now attached to the new eb.  So
5801		 * only clear page_private if it's still connected to
5802		 * this eb.
5803		 */
5804		if (PagePrivate(page) &&
5805		    page->private == (unsigned long)eb) {
5806			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
5807			BUG_ON(PageDirty(page));
5808			BUG_ON(PageWriteback(page));
5809			/*
5810			 * We need to make sure we haven't be attached
5811			 * to a new eb.
5812			 */
5813			detach_page_private(page);
5814		}
5815		if (mapped)
5816			spin_unlock(&page->mapping->private_lock);
5817		return;
5818	}
5819
5820	/*
5821	 * For subpage, we can have dummy eb with page private.  In this case,
5822	 * we can directly detach the private as such page is only attached to
5823	 * one dummy eb, no sharing.
5824	 */
5825	if (!mapped) {
5826		btrfs_detach_subpage(fs_info, page);
5827		return;
5828	}
5829
5830	btrfs_page_dec_eb_refs(fs_info, page);
5831
5832	/*
5833	 * We can only detach the page private if there are no other ebs in the
5834	 * page range and no unfinished IO.
5835	 */
5836	if (!page_range_has_eb(fs_info, page))
5837		btrfs_detach_subpage(fs_info, page);
5838
5839	spin_unlock(&page->mapping->private_lock);
5840}
5841
5842/* Release all pages attached to the extent buffer */
5843static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
5844{
5845	int i;
5846	int num_pages;
5847
5848	ASSERT(!extent_buffer_under_io(eb));
5849
5850	num_pages = num_extent_pages(eb);
5851	for (i = 0; i < num_pages; i++) {
5852		struct page *page = eb->pages[i];
5853
5854		if (!page)
5855			continue;
5856
5857		detach_extent_buffer_page(eb, page);
5858
5859		/* One for when we allocated the page */
5860		put_page(page);
5861	}
5862}
5863
5864/*
5865 * Helper for releasing the extent buffer.
5866 */
5867static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
5868{
5869	btrfs_release_extent_buffer_pages(eb);
5870	btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5871	__free_extent_buffer(eb);
5872}
5873
5874static struct extent_buffer *
5875__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
5876		      unsigned long len)
5877{
5878	struct extent_buffer *eb = NULL;
5879
5880	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
5881	eb->start = start;
5882	eb->len = len;
5883	eb->fs_info = fs_info;
5884	eb->bflags = 0;
5885	init_rwsem(&eb->lock);
5886
5887	btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
5888			     &fs_info->allocated_ebs);
5889	INIT_LIST_HEAD(&eb->release_list);
5890
5891	spin_lock_init(&eb->refs_lock);
5892	atomic_set(&eb->refs, 1);
5893	atomic_set(&eb->io_pages, 0);
5894
5895	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
5896
5897	return eb;
5898}
5899
5900struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5901{
5902	int i;
5903	struct page *p;
5904	struct extent_buffer *new;
5905	int num_pages = num_extent_pages(src);
5906
5907	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5908	if (new == NULL)
5909		return NULL;
5910
5911	/*
5912	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
5913	 * btrfs_release_extent_buffer() have different behavior for
5914	 * UNMAPPED subpage extent buffer.
5915	 */
5916	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5917
5918	for (i = 0; i < num_pages; i++) {
5919		int ret;
5920
5921		p = alloc_page(GFP_NOFS);
5922		if (!p) {
5923			btrfs_release_extent_buffer(new);
5924			return NULL;
5925		}
5926		ret = attach_extent_buffer_page(new, p, NULL);
5927		if (ret < 0) {
5928			put_page(p);
5929			btrfs_release_extent_buffer(new);
5930			return NULL;
5931		}
5932		WARN_ON(PageDirty(p));
5933		new->pages[i] = p;
5934		copy_page(page_address(p), page_address(src->pages[i]));
5935	}
5936	set_extent_buffer_uptodate(new);
5937
5938	return new;
5939}
5940
5941struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5942						  u64 start, unsigned long len)
5943{
5944	struct extent_buffer *eb;
5945	int num_pages;
5946	int i;
5947
5948	eb = __alloc_extent_buffer(fs_info, start, len);
5949	if (!eb)
5950		return NULL;
5951
5952	num_pages = num_extent_pages(eb);
5953	for (i = 0; i < num_pages; i++) {
5954		int ret;
5955
5956		eb->pages[i] = alloc_page(GFP_NOFS);
5957		if (!eb->pages[i])
5958			goto err;
5959		ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
5960		if (ret < 0)
5961			goto err;
5962	}
5963	set_extent_buffer_uptodate(eb);
5964	btrfs_set_header_nritems(eb, 0);
5965	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5966
5967	return eb;
5968err:
5969	for (; i > 0; i--) {
5970		detach_extent_buffer_page(eb, eb->pages[i - 1]);
5971		__free_page(eb->pages[i - 1]);
5972	}
5973	__free_extent_buffer(eb);
5974	return NULL;
5975}
5976
5977struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5978						u64 start)
5979{
5980	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5981}
5982
5983static void check_buffer_tree_ref(struct extent_buffer *eb)
5984{
5985	int refs;
5986	/*
5987	 * The TREE_REF bit is first set when the extent_buffer is added
5988	 * to the radix tree. It is also reset, if unset, when a new reference
5989	 * is created by find_extent_buffer.
5990	 *
5991	 * It is only cleared in two cases: freeing the last non-tree
5992	 * reference to the extent_buffer when its STALE bit is set or
5993	 * calling releasepage when the tree reference is the only reference.
5994	 *
5995	 * In both cases, care is taken to ensure that the extent_buffer's
5996	 * pages are not under io. However, releasepage can be concurrently
5997	 * called with creating new references, which is prone to race
5998	 * conditions between the calls to check_buffer_tree_ref in those
5999	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
6000	 *
6001	 * The actual lifetime of the extent_buffer in the radix tree is
6002	 * adequately protected by the refcount, but the TREE_REF bit and
6003	 * its corresponding reference are not. To protect against this
6004	 * class of races, we call check_buffer_tree_ref from the codepaths
6005	 * which trigger io after they set eb->io_pages. Note that once io is
6006	 * initiated, TREE_REF can no longer be cleared, so that is the
6007	 * moment at which any such race is best fixed.
6008	 */
6009	refs = atomic_read(&eb->refs);
6010	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6011		return;
6012
6013	spin_lock(&eb->refs_lock);
6014	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
6015		atomic_inc(&eb->refs);
6016	spin_unlock(&eb->refs_lock);
6017}
6018
6019static void mark_extent_buffer_accessed(struct extent_buffer *eb,
6020		struct page *accessed)
6021{
6022	int num_pages, i;
6023
6024	check_buffer_tree_ref(eb);
6025
6026	num_pages = num_extent_pages(eb);
6027	for (i = 0; i < num_pages; i++) {
6028		struct page *p = eb->pages[i];
6029
6030		if (p != accessed)
6031			mark_page_accessed(p);
6032	}
6033}
6034
6035struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
6036					 u64 start)
6037{
6038	struct extent_buffer *eb;
6039
6040	eb = find_extent_buffer_nolock(fs_info, start);
6041	if (!eb)
6042		return NULL;
6043	/*
6044	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
6045	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
6046	 * another task running free_extent_buffer() might have seen that flag
6047	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
6048	 * writeback flags not set) and it's still in the tree (flag
6049	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
6050	 * decrementing the extent buffer's reference count twice.  So here we
6051	 * could race and increment the eb's reference count, clear its stale
6052	 * flag, mark it as dirty and drop our reference before the other task
6053	 * finishes executing free_extent_buffer, which would later result in
6054	 * an attempt to free an extent buffer that is dirty.
6055	 */
6056	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
6057		spin_lock(&eb->refs_lock);
6058		spin_unlock(&eb->refs_lock);
6059	}
6060	mark_extent_buffer_accessed(eb, NULL);
6061	return eb;
6062}
6063
6064#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6065struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
6066					u64 start)
6067{
6068	struct extent_buffer *eb, *exists = NULL</