ordered-data.c revision f79645df
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle.  All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
17#include "delalloc-space.h"
18#include "qgroup.h"
19#include "subpage.h"
20
21static struct kmem_cache *btrfs_ordered_extent_cache;
22
23static u64 entry_end(struct btrfs_ordered_extent *entry)
24{
25	if (entry->file_offset + entry->num_bytes < entry->file_offset)
26		return (u64)-1;
27	return entry->file_offset + entry->num_bytes;
28}
29
30/* returns NULL if the insertion worked, or it returns the node it did find
31 * in the tree
32 */
33static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
34				   struct rb_node *node)
35{
36	struct rb_node **p = &root->rb_node;
37	struct rb_node *parent = NULL;
38	struct btrfs_ordered_extent *entry;
39
40	while (*p) {
41		parent = *p;
42		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43
44		if (file_offset < entry->file_offset)
45			p = &(*p)->rb_left;
46		else if (file_offset >= entry_end(entry))
47			p = &(*p)->rb_right;
48		else
49			return parent;
50	}
51
52	rb_link_node(node, parent, p);
53	rb_insert_color(node, root);
54	return NULL;
55}
56
57/*
58 * look for a given offset in the tree, and if it can't be found return the
59 * first lesser offset
60 */
61static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62				     struct rb_node **prev_ret)
63{
64	struct rb_node *n = root->rb_node;
65	struct rb_node *prev = NULL;
66	struct rb_node *test;
67	struct btrfs_ordered_extent *entry;
68	struct btrfs_ordered_extent *prev_entry = NULL;
69
70	while (n) {
71		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
72		prev = n;
73		prev_entry = entry;
74
75		if (file_offset < entry->file_offset)
76			n = n->rb_left;
77		else if (file_offset >= entry_end(entry))
78			n = n->rb_right;
79		else
80			return n;
81	}
82	if (!prev_ret)
83		return NULL;
84
85	while (prev && file_offset >= entry_end(prev_entry)) {
86		test = rb_next(prev);
87		if (!test)
88			break;
89		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90				      rb_node);
91		if (file_offset < entry_end(prev_entry))
92			break;
93
94		prev = test;
95	}
96	if (prev)
97		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98				      rb_node);
99	while (prev && file_offset < entry_end(prev_entry)) {
100		test = rb_prev(prev);
101		if (!test)
102			break;
103		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
104				      rb_node);
105		prev = test;
106	}
107	*prev_ret = prev;
108	return NULL;
109}
110
111static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
112			  u64 len)
113{
114	if (file_offset + len <= entry->file_offset ||
115	    entry->file_offset + entry->num_bytes <= file_offset)
116		return 0;
117	return 1;
118}
119
120/*
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
123 */
124static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
125					  u64 file_offset)
126{
127	struct rb_root *root = &tree->tree;
128	struct rb_node *prev = NULL;
129	struct rb_node *ret;
130	struct btrfs_ordered_extent *entry;
131
132	if (tree->last) {
133		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134				 rb_node);
135		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
136			return tree->last;
137	}
138	ret = __tree_search(root, file_offset, &prev);
139	if (!ret)
140		ret = prev;
141	if (ret)
142		tree->last = ret;
143	return ret;
144}
145
146/*
147 * Allocate and add a new ordered_extent into the per-inode tree.
148 *
149 * The tree is given a single reference on the ordered extent that was
150 * inserted.
151 */
152static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
153				      u64 disk_bytenr, u64 num_bytes,
154				      u64 disk_num_bytes, int type, int dio,
155				      int compress_type)
156{
157	struct btrfs_root *root = inode->root;
158	struct btrfs_fs_info *fs_info = root->fs_info;
159	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
160	struct rb_node *node;
161	struct btrfs_ordered_extent *entry;
162	int ret;
163
164	if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
165		/* For nocow write, we can release the qgroup rsv right now */
166		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
167		if (ret < 0)
168			return ret;
169		ret = 0;
170	} else {
171		/*
172		 * The ordered extent has reserved qgroup space, release now
173		 * and pass the reserved number for qgroup_record to free.
174		 */
175		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
176		if (ret < 0)
177			return ret;
178	}
179	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
180	if (!entry)
181		return -ENOMEM;
182
183	entry->file_offset = file_offset;
184	entry->disk_bytenr = disk_bytenr;
185	entry->num_bytes = num_bytes;
186	entry->disk_num_bytes = disk_num_bytes;
187	entry->bytes_left = num_bytes;
188	entry->inode = igrab(&inode->vfs_inode);
189	entry->compress_type = compress_type;
190	entry->truncated_len = (u64)-1;
191	entry->qgroup_rsv = ret;
192	entry->physical = (u64)-1;
193
194	ASSERT(type == BTRFS_ORDERED_REGULAR ||
195	       type == BTRFS_ORDERED_NOCOW ||
196	       type == BTRFS_ORDERED_PREALLOC ||
197	       type == BTRFS_ORDERED_COMPRESSED);
198	set_bit(type, &entry->flags);
199
200	percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
201				 fs_info->delalloc_batch);
202
203	if (dio)
204		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
205
206	/* one ref for the tree */
207	refcount_set(&entry->refs, 1);
208	init_waitqueue_head(&entry->wait);
209	INIT_LIST_HEAD(&entry->list);
210	INIT_LIST_HEAD(&entry->log_list);
211	INIT_LIST_HEAD(&entry->root_extent_list);
212	INIT_LIST_HEAD(&entry->work_list);
213	init_completion(&entry->completion);
214
215	trace_btrfs_ordered_extent_add(inode, entry);
216
217	spin_lock_irq(&tree->lock);
218	node = tree_insert(&tree->tree, file_offset,
219			   &entry->rb_node);
220	if (node)
221		btrfs_panic(fs_info, -EEXIST,
222				"inconsistency in ordered tree at offset %llu",
223				file_offset);
224	spin_unlock_irq(&tree->lock);
225
226	spin_lock(&root->ordered_extent_lock);
227	list_add_tail(&entry->root_extent_list,
228		      &root->ordered_extents);
229	root->nr_ordered_extents++;
230	if (root->nr_ordered_extents == 1) {
231		spin_lock(&fs_info->ordered_root_lock);
232		BUG_ON(!list_empty(&root->ordered_root));
233		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
234		spin_unlock(&fs_info->ordered_root_lock);
235	}
236	spin_unlock(&root->ordered_extent_lock);
237
238	/*
239	 * We don't need the count_max_extents here, we can assume that all of
240	 * that work has been done at higher layers, so this is truly the
241	 * smallest the extent is going to get.
242	 */
243	spin_lock(&inode->lock);
244	btrfs_mod_outstanding_extents(inode, 1);
245	spin_unlock(&inode->lock);
246
247	return 0;
248}
249
250int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
251			     u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
252			     int type)
253{
254	ASSERT(type == BTRFS_ORDERED_REGULAR ||
255	       type == BTRFS_ORDERED_NOCOW ||
256	       type == BTRFS_ORDERED_PREALLOC);
257	return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
258					  num_bytes, disk_num_bytes, type, 0,
259					  BTRFS_COMPRESS_NONE);
260}
261
262int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
263				 u64 disk_bytenr, u64 num_bytes,
264				 u64 disk_num_bytes, int type)
265{
266	ASSERT(type == BTRFS_ORDERED_REGULAR ||
267	       type == BTRFS_ORDERED_NOCOW ||
268	       type == BTRFS_ORDERED_PREALLOC);
269	return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
270					  num_bytes, disk_num_bytes, type, 1,
271					  BTRFS_COMPRESS_NONE);
272}
273
274int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
275				      u64 disk_bytenr, u64 num_bytes,
276				      u64 disk_num_bytes, int compress_type)
277{
278	ASSERT(compress_type != BTRFS_COMPRESS_NONE);
279	return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
280					  num_bytes, disk_num_bytes,
281					  BTRFS_ORDERED_COMPRESSED, 0,
282					  compress_type);
283}
284
285/*
286 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
287 * when an ordered extent is finished.  If the list covers more than one
288 * ordered extent, it is split across multiples.
289 */
290void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
291			   struct btrfs_ordered_sum *sum)
292{
293	struct btrfs_ordered_inode_tree *tree;
294
295	tree = &BTRFS_I(entry->inode)->ordered_tree;
296	spin_lock_irq(&tree->lock);
297	list_add_tail(&sum->list, &entry->list);
298	spin_unlock_irq(&tree->lock);
299}
300
301/*
302 * Mark all ordered extents io inside the specified range finished.
303 *
304 * @page:	 The invovled page for the opeartion.
305 *		 For uncompressed buffered IO, the page status also needs to be
306 *		 updated to indicate whether the pending ordered io is finished.
307 *		 Can be NULL for direct IO and compressed write.
308 *		 For these cases, callers are ensured they won't execute the
309 *		 endio function twice.
310 * @finish_func: The function to be executed when all the IO of an ordered
311 *		 extent are finished.
312 *
313 * This function is called for endio, thus the range must have ordered
314 * extent(s) coveri it.
315 */
316void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
317				struct page *page, u64 file_offset,
318				u64 num_bytes, btrfs_func_t finish_func,
319				bool uptodate)
320{
321	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
322	struct btrfs_fs_info *fs_info = inode->root->fs_info;
323	struct btrfs_workqueue *wq;
324	struct rb_node *node;
325	struct btrfs_ordered_extent *entry = NULL;
326	unsigned long flags;
327	u64 cur = file_offset;
328
329	if (btrfs_is_free_space_inode(inode))
330		wq = fs_info->endio_freespace_worker;
331	else
332		wq = fs_info->endio_write_workers;
333
334	if (page)
335		ASSERT(page->mapping && page_offset(page) <= file_offset &&
336		       file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
337
338	spin_lock_irqsave(&tree->lock, flags);
339	while (cur < file_offset + num_bytes) {
340		u64 entry_end;
341		u64 end;
342		u32 len;
343
344		node = tree_search(tree, cur);
345		/* No ordered extents at all */
346		if (!node)
347			break;
348
349		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
350		entry_end = entry->file_offset + entry->num_bytes;
351		/*
352		 * |<-- OE --->|  |
353		 *		  cur
354		 * Go to next OE.
355		 */
356		if (cur >= entry_end) {
357			node = rb_next(node);
358			/* No more ordered extents, exit */
359			if (!node)
360				break;
361			entry = rb_entry(node, struct btrfs_ordered_extent,
362					 rb_node);
363
364			/* Go to next ordered extent and continue */
365			cur = entry->file_offset;
366			continue;
367		}
368		/*
369		 * |	|<--- OE --->|
370		 * cur
371		 * Go to the start of OE.
372		 */
373		if (cur < entry->file_offset) {
374			cur = entry->file_offset;
375			continue;
376		}
377
378		/*
379		 * Now we are definitely inside one ordered extent.
380		 *
381		 * |<--- OE --->|
382		 *	|
383		 *	cur
384		 */
385		end = min(entry->file_offset + entry->num_bytes,
386			  file_offset + num_bytes) - 1;
387		ASSERT(end + 1 - cur < U32_MAX);
388		len = end + 1 - cur;
389
390		if (page) {
391			/*
392			 * Ordered (Private2) bit indicates whether we still
393			 * have pending io unfinished for the ordered extent.
394			 *
395			 * If there's no such bit, we need to skip to next range.
396			 */
397			if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
398				cur += len;
399				continue;
400			}
401			btrfs_page_clear_ordered(fs_info, page, cur, len);
402		}
403
404		/* Now we're fine to update the accounting */
405		if (unlikely(len > entry->bytes_left)) {
406			WARN_ON(1);
407			btrfs_crit(fs_info,
408"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
409				   inode->root->root_key.objectid,
410				   btrfs_ino(inode),
411				   entry->file_offset,
412				   entry->num_bytes,
413				   len, entry->bytes_left);
414			entry->bytes_left = 0;
415		} else {
416			entry->bytes_left -= len;
417		}
418
419		if (!uptodate)
420			set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
421
422		/*
423		 * All the IO of the ordered extent is finished, we need to queue
424		 * the finish_func to be executed.
425		 */
426		if (entry->bytes_left == 0) {
427			set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
428			cond_wake_up(&entry->wait);
429			refcount_inc(&entry->refs);
430			spin_unlock_irqrestore(&tree->lock, flags);
431			btrfs_init_work(&entry->work, finish_func, NULL, NULL);
432			btrfs_queue_work(wq, &entry->work);
433			spin_lock_irqsave(&tree->lock, flags);
434		}
435		cur += len;
436	}
437	spin_unlock_irqrestore(&tree->lock, flags);
438}
439
440/*
441 * Finish IO for one ordered extent across a given range.  The range can only
442 * contain one ordered extent.
443 *
444 * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
445 *               search and use the ordered extent directly.
446 * 		 Will be also used to store the finished ordered extent.
447 * @file_offset: File offset for the finished IO
448 * @io_size:	 Length of the finish IO range
449 *
450 * Return true if the ordered extent is finished in the range, and update
451 * @cached.
452 * Return false otherwise.
453 *
454 * NOTE: The range can NOT cross multiple ordered extents.
455 * Thus caller should ensure the range doesn't cross ordered extents.
456 */
457bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
458				    struct btrfs_ordered_extent **cached,
459				    u64 file_offset, u64 io_size)
460{
461	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
462	struct rb_node *node;
463	struct btrfs_ordered_extent *entry = NULL;
464	unsigned long flags;
465	bool finished = false;
466
467	spin_lock_irqsave(&tree->lock, flags);
468	if (cached && *cached) {
469		entry = *cached;
470		goto have_entry;
471	}
472
473	node = tree_search(tree, file_offset);
474	if (!node)
475		goto out;
476
477	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
478have_entry:
479	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
480		goto out;
481
482	if (io_size > entry->bytes_left)
483		btrfs_crit(inode->root->fs_info,
484			   "bad ordered accounting left %llu size %llu",
485		       entry->bytes_left, io_size);
486
487	entry->bytes_left -= io_size;
488
489	if (entry->bytes_left == 0) {
490		/*
491		 * Ensure only one caller can set the flag and finished_ret
492		 * accordingly
493		 */
494		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
495		/* test_and_set_bit implies a barrier */
496		cond_wake_up_nomb(&entry->wait);
497	}
498out:
499	if (finished && cached && entry) {
500		*cached = entry;
501		refcount_inc(&entry->refs);
502	}
503	spin_unlock_irqrestore(&tree->lock, flags);
504	return finished;
505}
506
507/*
508 * used to drop a reference on an ordered extent.  This will free
509 * the extent if the last reference is dropped
510 */
511void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
512{
513	struct list_head *cur;
514	struct btrfs_ordered_sum *sum;
515
516	trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
517
518	if (refcount_dec_and_test(&entry->refs)) {
519		ASSERT(list_empty(&entry->root_extent_list));
520		ASSERT(list_empty(&entry->log_list));
521		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
522		if (entry->inode)
523			btrfs_add_delayed_iput(entry->inode);
524		while (!list_empty(&entry->list)) {
525			cur = entry->list.next;
526			sum = list_entry(cur, struct btrfs_ordered_sum, list);
527			list_del(&sum->list);
528			kvfree(sum);
529		}
530		kmem_cache_free(btrfs_ordered_extent_cache, entry);
531	}
532}
533
534/*
535 * remove an ordered extent from the tree.  No references are dropped
536 * and waiters are woken up.
537 */
538void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
539				 struct btrfs_ordered_extent *entry)
540{
541	struct btrfs_ordered_inode_tree *tree;
542	struct btrfs_root *root = btrfs_inode->root;
543	struct btrfs_fs_info *fs_info = root->fs_info;
544	struct rb_node *node;
545	bool pending;
546
547	/* This is paired with btrfs_add_ordered_extent. */
548	spin_lock(&btrfs_inode->lock);
549	btrfs_mod_outstanding_extents(btrfs_inode, -1);
550	spin_unlock(&btrfs_inode->lock);
551	if (root != fs_info->tree_root)
552		btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
553						false);
554
555	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
556				 fs_info->delalloc_batch);
557
558	tree = &btrfs_inode->ordered_tree;
559	spin_lock_irq(&tree->lock);
560	node = &entry->rb_node;
561	rb_erase(node, &tree->tree);
562	RB_CLEAR_NODE(node);
563	if (tree->last == node)
564		tree->last = NULL;
565	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
566	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
567	spin_unlock_irq(&tree->lock);
568
569	/*
570	 * The current running transaction is waiting on us, we need to let it
571	 * know that we're complete and wake it up.
572	 */
573	if (pending) {
574		struct btrfs_transaction *trans;
575
576		/*
577		 * The checks for trans are just a formality, it should be set,
578		 * but if it isn't we don't want to deref/assert under the spin
579		 * lock, so be nice and check if trans is set, but ASSERT() so
580		 * if it isn't set a developer will notice.
581		 */
582		spin_lock(&fs_info->trans_lock);
583		trans = fs_info->running_transaction;
584		if (trans)
585			refcount_inc(&trans->use_count);
586		spin_unlock(&fs_info->trans_lock);
587
588		ASSERT(trans);
589		if (trans) {
590			if (atomic_dec_and_test(&trans->pending_ordered))
591				wake_up(&trans->pending_wait);
592			btrfs_put_transaction(trans);
593		}
594	}
595
596	spin_lock(&root->ordered_extent_lock);
597	list_del_init(&entry->root_extent_list);
598	root->nr_ordered_extents--;
599
600	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
601
602	if (!root->nr_ordered_extents) {
603		spin_lock(&fs_info->ordered_root_lock);
604		BUG_ON(list_empty(&root->ordered_root));
605		list_del_init(&root->ordered_root);
606		spin_unlock(&fs_info->ordered_root_lock);
607	}
608	spin_unlock(&root->ordered_extent_lock);
609	wake_up(&entry->wait);
610}
611
612static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
613{
614	struct btrfs_ordered_extent *ordered;
615
616	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
617	btrfs_start_ordered_extent(ordered, 1);
618	complete(&ordered->completion);
619}
620
621/*
622 * wait for all the ordered extents in a root.  This is done when balancing
623 * space between drives.
624 */
625u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
626			       const u64 range_start, const u64 range_len)
627{
628	struct btrfs_fs_info *fs_info = root->fs_info;
629	LIST_HEAD(splice);
630	LIST_HEAD(skipped);
631	LIST_HEAD(works);
632	struct btrfs_ordered_extent *ordered, *next;
633	u64 count = 0;
634	const u64 range_end = range_start + range_len;
635
636	mutex_lock(&root->ordered_extent_mutex);
637	spin_lock(&root->ordered_extent_lock);
638	list_splice_init(&root->ordered_extents, &splice);
639	while (!list_empty(&splice) && nr) {
640		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
641					   root_extent_list);
642
643		if (range_end <= ordered->disk_bytenr ||
644		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
645			list_move_tail(&ordered->root_extent_list, &skipped);
646			cond_resched_lock(&root->ordered_extent_lock);
647			continue;
648		}
649
650		list_move_tail(&ordered->root_extent_list,
651			       &root->ordered_extents);
652		refcount_inc(&ordered->refs);
653		spin_unlock(&root->ordered_extent_lock);
654
655		btrfs_init_work(&ordered->flush_work,
656				btrfs_run_ordered_extent_work, NULL, NULL);
657		list_add_tail(&ordered->work_list, &works);
658		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
659
660		cond_resched();
661		spin_lock(&root->ordered_extent_lock);
662		if (nr != U64_MAX)
663			nr--;
664		count++;
665	}
666	list_splice_tail(&skipped, &root->ordered_extents);
667	list_splice_tail(&splice, &root->ordered_extents);
668	spin_unlock(&root->ordered_extent_lock);
669
670	list_for_each_entry_safe(ordered, next, &works, work_list) {
671		list_del_init(&ordered->work_list);
672		wait_for_completion(&ordered->completion);
673		btrfs_put_ordered_extent(ordered);
674		cond_resched();
675	}
676	mutex_unlock(&root->ordered_extent_mutex);
677
678	return count;
679}
680
681void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
682			     const u64 range_start, const u64 range_len)
683{
684	struct btrfs_root *root;
685	struct list_head splice;
686	u64 done;
687
688	INIT_LIST_HEAD(&splice);
689
690	mutex_lock(&fs_info->ordered_operations_mutex);
691	spin_lock(&fs_info->ordered_root_lock);
692	list_splice_init(&fs_info->ordered_roots, &splice);
693	while (!list_empty(&splice) && nr) {
694		root = list_first_entry(&splice, struct btrfs_root,
695					ordered_root);
696		root = btrfs_grab_root(root);
697		BUG_ON(!root);
698		list_move_tail(&root->ordered_root,
699			       &fs_info->ordered_roots);
700		spin_unlock(&fs_info->ordered_root_lock);
701
702		done = btrfs_wait_ordered_extents(root, nr,
703						  range_start, range_len);
704		btrfs_put_root(root);
705
706		spin_lock(&fs_info->ordered_root_lock);
707		if (nr != U64_MAX) {
708			nr -= done;
709		}
710	}
711	list_splice_tail(&splice, &fs_info->ordered_roots);
712	spin_unlock(&fs_info->ordered_root_lock);
713	mutex_unlock(&fs_info->ordered_operations_mutex);
714}
715
716/*
717 * Used to start IO or wait for a given ordered extent to finish.
718 *
719 * If wait is one, this effectively waits on page writeback for all the pages
720 * in the extent, and it waits on the io completion code to insert
721 * metadata into the btree corresponding to the extent
722 */
723void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
724{
725	u64 start = entry->file_offset;
726	u64 end = start + entry->num_bytes - 1;
727	struct btrfs_inode *inode = BTRFS_I(entry->inode);
728
729	trace_btrfs_ordered_extent_start(inode, entry);
730
731	/*
732	 * pages in the range can be dirty, clean or writeback.  We
733	 * start IO on any dirty ones so the wait doesn't stall waiting
734	 * for the flusher thread to find them
735	 */
736	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
737		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
738	if (wait) {
739		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
740						 &entry->flags));
741	}
742}
743
744/*
745 * Used to wait on ordered extents across a large range of bytes.
746 */
747int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
748{
749	int ret = 0;
750	int ret_wb = 0;
751	u64 end;
752	u64 orig_end;
753	struct btrfs_ordered_extent *ordered;
754
755	if (start + len < start) {
756		orig_end = INT_LIMIT(loff_t);
757	} else {
758		orig_end = start + len - 1;
759		if (orig_end > INT_LIMIT(loff_t))
760			orig_end = INT_LIMIT(loff_t);
761	}
762
763	/* start IO across the range first to instantiate any delalloc
764	 * extents
765	 */
766	ret = btrfs_fdatawrite_range(inode, start, orig_end);
767	if (ret)
768		return ret;
769
770	/*
771	 * If we have a writeback error don't return immediately. Wait first
772	 * for any ordered extents that haven't completed yet. This is to make
773	 * sure no one can dirty the same page ranges and call writepages()
774	 * before the ordered extents complete - to avoid failures (-EEXIST)
775	 * when adding the new ordered extents to the ordered tree.
776	 */
777	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
778
779	end = orig_end;
780	while (1) {
781		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
782		if (!ordered)
783			break;
784		if (ordered->file_offset > orig_end) {
785			btrfs_put_ordered_extent(ordered);
786			break;
787		}
788		if (ordered->file_offset + ordered->num_bytes <= start) {
789			btrfs_put_ordered_extent(ordered);
790			break;
791		}
792		btrfs_start_ordered_extent(ordered, 1);
793		end = ordered->file_offset;
794		/*
795		 * If the ordered extent had an error save the error but don't
796		 * exit without waiting first for all other ordered extents in
797		 * the range to complete.
798		 */
799		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
800			ret = -EIO;
801		btrfs_put_ordered_extent(ordered);
802		if (end == 0 || end == start)
803			break;
804		end--;
805	}
806	return ret_wb ? ret_wb : ret;
807}
808
809/*
810 * find an ordered extent corresponding to file_offset.  return NULL if
811 * nothing is found, otherwise take a reference on the extent and return it
812 */
813struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
814							 u64 file_offset)
815{
816	struct btrfs_ordered_inode_tree *tree;
817	struct rb_node *node;
818	struct btrfs_ordered_extent *entry = NULL;
819	unsigned long flags;
820
821	tree = &inode->ordered_tree;
822	spin_lock_irqsave(&tree->lock, flags);
823	node = tree_search(tree, file_offset);
824	if (!node)
825		goto out;
826
827	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
828	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
829		entry = NULL;
830	if (entry)
831		refcount_inc(&entry->refs);
832out:
833	spin_unlock_irqrestore(&tree->lock, flags);
834	return entry;
835}
836
837/* Since the DIO code tries to lock a wide area we need to look for any ordered
838 * extents that exist in the range, rather than just the start of the range.
839 */
840struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
841		struct btrfs_inode *inode, u64 file_offset, u64 len)
842{
843	struct btrfs_ordered_inode_tree *tree;
844	struct rb_node *node;
845	struct btrfs_ordered_extent *entry = NULL;
846
847	tree = &inode->ordered_tree;
848	spin_lock_irq(&tree->lock);
849	node = tree_search(tree, file_offset);
850	if (!node) {
851		node = tree_search(tree, file_offset + len);
852		if (!node)
853			goto out;
854	}
855
856	while (1) {
857		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
858		if (range_overlaps(entry, file_offset, len))
859			break;
860
861		if (entry->file_offset >= file_offset + len) {
862			entry = NULL;
863			break;
864		}
865		entry = NULL;
866		node = rb_next(node);
867		if (!node)
868			break;
869	}
870out:
871	if (entry)
872		refcount_inc(&entry->refs);
873	spin_unlock_irq(&tree->lock);
874	return entry;
875}
876
877/*
878 * Adds all ordered extents to the given list. The list ends up sorted by the
879 * file_offset of the ordered extents.
880 */
881void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
882					   struct list_head *list)
883{
884	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
885	struct rb_node *n;
886
887	ASSERT(inode_is_locked(&inode->vfs_inode));
888
889	spin_lock_irq(&tree->lock);
890	for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
891		struct btrfs_ordered_extent *ordered;
892
893		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
894
895		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
896			continue;
897
898		ASSERT(list_empty(&ordered->log_list));
899		list_add_tail(&ordered->log_list, list);
900		refcount_inc(&ordered->refs);
901	}
902	spin_unlock_irq(&tree->lock);
903}
904
905/*
906 * lookup and return any extent before 'file_offset'.  NULL is returned
907 * if none is found
908 */
909struct btrfs_ordered_extent *
910btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
911{
912	struct btrfs_ordered_inode_tree *tree;
913	struct rb_node *node;
914	struct btrfs_ordered_extent *entry = NULL;
915
916	tree = &inode->ordered_tree;
917	spin_lock_irq(&tree->lock);
918	node = tree_search(tree, file_offset);
919	if (!node)
920		goto out;
921
922	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
923	refcount_inc(&entry->refs);
924out:
925	spin_unlock_irq(&tree->lock);
926	return entry;
927}
928
929/*
930 * Lookup the first ordered extent that overlaps the range
931 * [@file_offset, @file_offset + @len).
932 *
933 * The difference between this and btrfs_lookup_first_ordered_extent() is
934 * that this one won't return any ordered extent that does not overlap the range.
935 * And the difference against btrfs_lookup_ordered_extent() is, this function
936 * ensures the first ordered extent gets returned.
937 */
938struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
939			struct btrfs_inode *inode, u64 file_offset, u64 len)
940{
941	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
942	struct rb_node *node;
943	struct rb_node *cur;
944	struct rb_node *prev;
945	struct rb_node *next;
946	struct btrfs_ordered_extent *entry = NULL;
947
948	spin_lock_irq(&tree->lock);
949	node = tree->tree.rb_node;
950	/*
951	 * Here we don't want to use tree_search() which will use tree->last
952	 * and screw up the search order.
953	 * And __tree_search() can't return the adjacent ordered extents
954	 * either, thus here we do our own search.
955	 */
956	while (node) {
957		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
958
959		if (file_offset < entry->file_offset) {
960			node = node->rb_left;
961		} else if (file_offset >= entry_end(entry)) {
962			node = node->rb_right;
963		} else {
964			/*
965			 * Direct hit, got an ordered extent that starts at
966			 * @file_offset
967			 */
968			goto out;
969		}
970	}
971	if (!entry) {
972		/* Empty tree */
973		goto out;
974	}
975
976	cur = &entry->rb_node;
977	/* We got an entry around @file_offset, check adjacent entries */
978	if (entry->file_offset < file_offset) {
979		prev = cur;
980		next = rb_next(cur);
981	} else {
982		prev = rb_prev(cur);
983		next = cur;
984	}
985	if (prev) {
986		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
987		if (range_overlaps(entry, file_offset, len))
988			goto out;
989	}
990	if (next) {
991		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
992		if (range_overlaps(entry, file_offset, len))
993			goto out;
994	}
995	/* No ordered extent in the range */
996	entry = NULL;
997out:
998	if (entry)
999		refcount_inc(&entry->refs);
1000	spin_unlock_irq(&tree->lock);
1001	return entry;
1002}
1003
1004/*
1005 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1006 * ordered extents in it are run to completion.
1007 *
1008 * @inode:        Inode whose ordered tree is to be searched
1009 * @start:        Beginning of range to flush
1010 * @end:          Last byte of range to lock
1011 * @cached_state: If passed, will return the extent state responsible for the
1012 * locked range. It's the caller's responsibility to free the cached state.
1013 *
1014 * This function always returns with the given range locked, ensuring after it's
1015 * called no order extent can be pending.
1016 */
1017void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1018					u64 end,
1019					struct extent_state **cached_state)
1020{
1021	struct btrfs_ordered_extent *ordered;
1022	struct extent_state *cache = NULL;
1023	struct extent_state **cachedp = &cache;
1024
1025	if (cached_state)
1026		cachedp = cached_state;
1027
1028	while (1) {
1029		lock_extent_bits(&inode->io_tree, start, end, cachedp);
1030		ordered = btrfs_lookup_ordered_range(inode, start,
1031						     end - start + 1);
1032		if (!ordered) {
1033			/*
1034			 * If no external cached_state has been passed then
1035			 * decrement the extra ref taken for cachedp since we
1036			 * aren't exposing it outside of this function
1037			 */
1038			if (!cached_state)
1039				refcount_dec(&cache->refs);
1040			break;
1041		}
1042		unlock_extent_cached(&inode->io_tree, start, end, cachedp);
1043		btrfs_start_ordered_extent(ordered, 1);
1044		btrfs_put_ordered_extent(ordered);
1045	}
1046}
1047
1048static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1049				u64 len)
1050{
1051	struct inode *inode = ordered->inode;
1052	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1053	u64 file_offset = ordered->file_offset + pos;
1054	u64 disk_bytenr = ordered->disk_bytenr + pos;
1055	u64 num_bytes = len;
1056	u64 disk_num_bytes = len;
1057	int type;
1058	unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
1059	int compress_type = ordered->compress_type;
1060	unsigned long weight;
1061	int ret;
1062
1063	weight = hweight_long(flags_masked);
1064	WARN_ON_ONCE(weight > 1);
1065	if (!weight)
1066		type = 0;
1067	else
1068		type = __ffs(flags_masked);
1069
1070	/*
1071	 * The splitting extent is already counted and will be added again
1072	 * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
1073	 * double counting.
1074	 */
1075	percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
1076				 fs_info->delalloc_batch);
1077	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
1078		WARN_ON_ONCE(1);
1079		ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
1080				file_offset, disk_bytenr, num_bytes,
1081				disk_num_bytes, compress_type);
1082	} else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
1083		ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
1084				disk_bytenr, num_bytes, disk_num_bytes, type);
1085	} else {
1086		ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
1087				disk_bytenr, num_bytes, disk_num_bytes, type);
1088	}
1089
1090	return ret;
1091}
1092
1093int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1094				u64 post)
1095{
1096	struct inode *inode = ordered->inode;
1097	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1098	struct rb_node *node;
1099	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1100	int ret = 0;
1101
1102	spin_lock_irq(&tree->lock);
1103	/* Remove from tree once */
1104	node = &ordered->rb_node;
1105	rb_erase(node, &tree->tree);
1106	RB_CLEAR_NODE(node);
1107	if (tree->last == node)
1108		tree->last = NULL;
1109
1110	ordered->file_offset += pre;
1111	ordered->disk_bytenr += pre;
1112	ordered->num_bytes -= (pre + post);
1113	ordered->disk_num_bytes -= (pre + post);
1114	ordered->bytes_left -= (pre + post);
1115
1116	/* Re-insert the node */
1117	node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1118	if (node)
1119		btrfs_panic(fs_info, -EEXIST,
1120			"zoned: inconsistency in ordered tree at offset %llu",
1121			    ordered->file_offset);
1122
1123	spin_unlock_irq(&tree->lock);
1124
1125	if (pre)
1126		ret = clone_ordered_extent(ordered, 0, pre);
1127	if (ret == 0 && post)
1128		ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1129					   post);
1130
1131	return ret;
1132}
1133
1134int __init ordered_data_init(void)
1135{
1136	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1137				     sizeof(struct btrfs_ordered_extent), 0,
1138				     SLAB_MEM_SPREAD,
1139				     NULL);
1140	if (!btrfs_ordered_extent_cache)
1141		return -ENOMEM;
1142
1143	return 0;
1144}
1145
1146void __cold ordered_data_exit(void)
1147{
1148	kmem_cache_destroy(btrfs_ordered_extent_cache);
1149}
1150