1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle.  All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "messages.h"
11#include "misc.h"
12#include "ctree.h"
13#include "transaction.h"
14#include "btrfs_inode.h"
15#include "extent_io.h"
16#include "disk-io.h"
17#include "compression.h"
18#include "delalloc-space.h"
19#include "qgroup.h"
20#include "subpage.h"
21#include "file.h"
22
23static struct kmem_cache *btrfs_ordered_extent_cache;
24
25static u64 entry_end(struct btrfs_ordered_extent *entry)
26{
27	if (entry->file_offset + entry->num_bytes < entry->file_offset)
28		return (u64)-1;
29	return entry->file_offset + entry->num_bytes;
30}
31
32/* returns NULL if the insertion worked, or it returns the node it did find
33 * in the tree
34 */
35static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
36				   struct rb_node *node)
37{
38	struct rb_node **p = &root->rb_node;
39	struct rb_node *parent = NULL;
40	struct btrfs_ordered_extent *entry;
41
42	while (*p) {
43		parent = *p;
44		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
45
46		if (file_offset < entry->file_offset)
47			p = &(*p)->rb_left;
48		else if (file_offset >= entry_end(entry))
49			p = &(*p)->rb_right;
50		else
51			return parent;
52	}
53
54	rb_link_node(node, parent, p);
55	rb_insert_color(node, root);
56	return NULL;
57}
58
59/*
60 * look for a given offset in the tree, and if it can't be found return the
61 * first lesser offset
62 */
63static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
64				     struct rb_node **prev_ret)
65{
66	struct rb_node *n = root->rb_node;
67	struct rb_node *prev = NULL;
68	struct rb_node *test;
69	struct btrfs_ordered_extent *entry;
70	struct btrfs_ordered_extent *prev_entry = NULL;
71
72	while (n) {
73		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
74		prev = n;
75		prev_entry = entry;
76
77		if (file_offset < entry->file_offset)
78			n = n->rb_left;
79		else if (file_offset >= entry_end(entry))
80			n = n->rb_right;
81		else
82			return n;
83	}
84	if (!prev_ret)
85		return NULL;
86
87	while (prev && file_offset >= entry_end(prev_entry)) {
88		test = rb_next(prev);
89		if (!test)
90			break;
91		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
92				      rb_node);
93		if (file_offset < entry_end(prev_entry))
94			break;
95
96		prev = test;
97	}
98	if (prev)
99		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
100				      rb_node);
101	while (prev && file_offset < entry_end(prev_entry)) {
102		test = rb_prev(prev);
103		if (!test)
104			break;
105		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106				      rb_node);
107		prev = test;
108	}
109	*prev_ret = prev;
110	return NULL;
111}
112
113static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
114			  u64 len)
115{
116	if (file_offset + len <= entry->file_offset ||
117	    entry->file_offset + entry->num_bytes <= file_offset)
118		return 0;
119	return 1;
120}
121
122/*
123 * look find the first ordered struct that has this offset, otherwise
124 * the first one less than this offset
125 */
126static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
127						  u64 file_offset)
128{
129	struct rb_node *prev = NULL;
130	struct rb_node *ret;
131	struct btrfs_ordered_extent *entry;
132
133	if (inode->ordered_tree_last) {
134		entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
135				 rb_node);
136		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
137			return inode->ordered_tree_last;
138	}
139	ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
140	if (!ret)
141		ret = prev;
142	if (ret)
143		inode->ordered_tree_last = ret;
144	return ret;
145}
146
147static struct btrfs_ordered_extent *alloc_ordered_extent(
148			struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
149			u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
150			u64 offset, unsigned long flags, int compress_type)
151{
152	struct btrfs_ordered_extent *entry;
153	int ret;
154	u64 qgroup_rsv = 0;
155
156	if (flags &
157	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
158		/* For nocow write, we can release the qgroup rsv right now */
159		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
160		if (ret < 0)
161			return ERR_PTR(ret);
162	} else {
163		/*
164		 * The ordered extent has reserved qgroup space, release now
165		 * and pass the reserved number for qgroup_record to free.
166		 */
167		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
168		if (ret < 0)
169			return ERR_PTR(ret);
170	}
171	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
172	if (!entry)
173		return ERR_PTR(-ENOMEM);
174
175	entry->file_offset = file_offset;
176	entry->num_bytes = num_bytes;
177	entry->ram_bytes = ram_bytes;
178	entry->disk_bytenr = disk_bytenr;
179	entry->disk_num_bytes = disk_num_bytes;
180	entry->offset = offset;
181	entry->bytes_left = num_bytes;
182	entry->inode = igrab(&inode->vfs_inode);
183	entry->compress_type = compress_type;
184	entry->truncated_len = (u64)-1;
185	entry->qgroup_rsv = qgroup_rsv;
186	entry->flags = flags;
187	refcount_set(&entry->refs, 1);
188	init_waitqueue_head(&entry->wait);
189	INIT_LIST_HEAD(&entry->list);
190	INIT_LIST_HEAD(&entry->log_list);
191	INIT_LIST_HEAD(&entry->root_extent_list);
192	INIT_LIST_HEAD(&entry->work_list);
193	INIT_LIST_HEAD(&entry->bioc_list);
194	init_completion(&entry->completion);
195
196	/*
197	 * We don't need the count_max_extents here, we can assume that all of
198	 * that work has been done at higher layers, so this is truly the
199	 * smallest the extent is going to get.
200	 */
201	spin_lock(&inode->lock);
202	btrfs_mod_outstanding_extents(inode, 1);
203	spin_unlock(&inode->lock);
204
205	return entry;
206}
207
208static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
209{
210	struct btrfs_inode *inode = BTRFS_I(entry->inode);
211	struct btrfs_root *root = inode->root;
212	struct btrfs_fs_info *fs_info = root->fs_info;
213	struct rb_node *node;
214
215	trace_btrfs_ordered_extent_add(inode, entry);
216
217	percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
218				 fs_info->delalloc_batch);
219
220	/* One ref for the tree. */
221	refcount_inc(&entry->refs);
222
223	spin_lock_irq(&inode->ordered_tree_lock);
224	node = tree_insert(&inode->ordered_tree, entry->file_offset,
225			   &entry->rb_node);
226	if (node)
227		btrfs_panic(fs_info, -EEXIST,
228				"inconsistency in ordered tree at offset %llu",
229				entry->file_offset);
230	spin_unlock_irq(&inode->ordered_tree_lock);
231
232	spin_lock(&root->ordered_extent_lock);
233	list_add_tail(&entry->root_extent_list,
234		      &root->ordered_extents);
235	root->nr_ordered_extents++;
236	if (root->nr_ordered_extents == 1) {
237		spin_lock(&fs_info->ordered_root_lock);
238		BUG_ON(!list_empty(&root->ordered_root));
239		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
240		spin_unlock(&fs_info->ordered_root_lock);
241	}
242	spin_unlock(&root->ordered_extent_lock);
243}
244
245/*
246 * Add an ordered extent to the per-inode tree.
247 *
248 * @inode:           Inode that this extent is for.
249 * @file_offset:     Logical offset in file where the extent starts.
250 * @num_bytes:       Logical length of extent in file.
251 * @ram_bytes:       Full length of unencoded data.
252 * @disk_bytenr:     Offset of extent on disk.
253 * @disk_num_bytes:  Size of extent on disk.
254 * @offset:          Offset into unencoded data where file data starts.
255 * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
256 * @compress_type:   Compression algorithm used for data.
257 *
258 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
259 * tree is given a single reference on the ordered extent that was inserted, and
260 * the returned pointer is given a second reference.
261 *
262 * Return: the new ordered extent or error pointer.
263 */
264struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
265			struct btrfs_inode *inode, u64 file_offset,
266			u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
267			u64 disk_num_bytes, u64 offset, unsigned long flags,
268			int compress_type)
269{
270	struct btrfs_ordered_extent *entry;
271
272	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
273
274	entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
275				     disk_bytenr, disk_num_bytes, offset, flags,
276				     compress_type);
277	if (!IS_ERR(entry))
278		insert_ordered_extent(entry);
279	return entry;
280}
281
282/*
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
284 * when an ordered extent is finished.  If the list covers more than one
285 * ordered extent, it is split across multiples.
286 */
287void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
288			   struct btrfs_ordered_sum *sum)
289{
290	struct btrfs_inode *inode = BTRFS_I(entry->inode);
291
292	spin_lock_irq(&inode->ordered_tree_lock);
293	list_add_tail(&sum->list, &entry->list);
294	spin_unlock_irq(&inode->ordered_tree_lock);
295}
296
297static void finish_ordered_fn(struct btrfs_work *work)
298{
299	struct btrfs_ordered_extent *ordered_extent;
300
301	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
302	btrfs_finish_ordered_io(ordered_extent);
303}
304
305static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
306				      struct page *page, u64 file_offset,
307				      u64 len, bool uptodate)
308{
309	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
310	struct btrfs_fs_info *fs_info = inode->root->fs_info;
311
312	lockdep_assert_held(&inode->ordered_tree_lock);
313
314	if (page) {
315		ASSERT(page->mapping);
316		ASSERT(page_offset(page) <= file_offset);
317		ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
318
319		/*
320		 * Ordered (Private2) bit indicates whether we still have
321		 * pending io unfinished for the ordered extent.
322		 *
323		 * If there's no such bit, we need to skip to next range.
324		 */
325		if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
326					      file_offset, len))
327			return false;
328		btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
329	}
330
331	/* Now we're fine to update the accounting. */
332	if (WARN_ON_ONCE(len > ordered->bytes_left)) {
333		btrfs_crit(fs_info,
334"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
335			   inode->root->root_key.objectid, btrfs_ino(inode),
336			   ordered->file_offset, ordered->num_bytes,
337			   len, ordered->bytes_left);
338		ordered->bytes_left = 0;
339	} else {
340		ordered->bytes_left -= len;
341	}
342
343	if (!uptodate)
344		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
345
346	if (ordered->bytes_left)
347		return false;
348
349	/*
350	 * All the IO of the ordered extent is finished, we need to queue
351	 * the finish_func to be executed.
352	 */
353	set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
354	cond_wake_up(&ordered->wait);
355	refcount_inc(&ordered->refs);
356	trace_btrfs_ordered_extent_mark_finished(inode, ordered);
357	return true;
358}
359
360static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
361{
362	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
363	struct btrfs_fs_info *fs_info = inode->root->fs_info;
364	struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
365		fs_info->endio_freespace_worker : fs_info->endio_write_workers;
366
367	btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
368	btrfs_queue_work(wq, &ordered->work);
369}
370
371bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
372				 struct page *page, u64 file_offset, u64 len,
373				 bool uptodate)
374{
375	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
376	unsigned long flags;
377	bool ret;
378
379	trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
380
381	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
382	ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
383	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
384
385	if (ret)
386		btrfs_queue_ordered_fn(ordered);
387	return ret;
388}
389
390/*
391 * Mark all ordered extents io inside the specified range finished.
392 *
393 * @page:	 The involved page for the operation.
394 *		 For uncompressed buffered IO, the page status also needs to be
395 *		 updated to indicate whether the pending ordered io is finished.
396 *		 Can be NULL for direct IO and compressed write.
397 *		 For these cases, callers are ensured they won't execute the
398 *		 endio function twice.
399 *
400 * This function is called for endio, thus the range must have ordered
401 * extent(s) covering it.
402 */
403void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
404				    struct page *page, u64 file_offset,
405				    u64 num_bytes, bool uptodate)
406{
407	struct rb_node *node;
408	struct btrfs_ordered_extent *entry = NULL;
409	unsigned long flags;
410	u64 cur = file_offset;
411
412	trace_btrfs_writepage_end_io_hook(inode, file_offset,
413					  file_offset + num_bytes - 1,
414					  uptodate);
415
416	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
417	while (cur < file_offset + num_bytes) {
418		u64 entry_end;
419		u64 end;
420		u32 len;
421
422		node = ordered_tree_search(inode, cur);
423		/* No ordered extents at all */
424		if (!node)
425			break;
426
427		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
428		entry_end = entry->file_offset + entry->num_bytes;
429		/*
430		 * |<-- OE --->|  |
431		 *		  cur
432		 * Go to next OE.
433		 */
434		if (cur >= entry_end) {
435			node = rb_next(node);
436			/* No more ordered extents, exit */
437			if (!node)
438				break;
439			entry = rb_entry(node, struct btrfs_ordered_extent,
440					 rb_node);
441
442			/* Go to next ordered extent and continue */
443			cur = entry->file_offset;
444			continue;
445		}
446		/*
447		 * |	|<--- OE --->|
448		 * cur
449		 * Go to the start of OE.
450		 */
451		if (cur < entry->file_offset) {
452			cur = entry->file_offset;
453			continue;
454		}
455
456		/*
457		 * Now we are definitely inside one ordered extent.
458		 *
459		 * |<--- OE --->|
460		 *	|
461		 *	cur
462		 */
463		end = min(entry->file_offset + entry->num_bytes,
464			  file_offset + num_bytes) - 1;
465		ASSERT(end + 1 - cur < U32_MAX);
466		len = end + 1 - cur;
467
468		if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
469			spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
470			btrfs_queue_ordered_fn(entry);
471			spin_lock_irqsave(&inode->ordered_tree_lock, flags);
472		}
473		cur += len;
474	}
475	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
476}
477
478/*
479 * Finish IO for one ordered extent across a given range.  The range can only
480 * contain one ordered extent.
481 *
482 * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
483 *               search and use the ordered extent directly.
484 * 		 Will be also used to store the finished ordered extent.
485 * @file_offset: File offset for the finished IO
486 * @io_size:	 Length of the finish IO range
487 *
488 * Return true if the ordered extent is finished in the range, and update
489 * @cached.
490 * Return false otherwise.
491 *
492 * NOTE: The range can NOT cross multiple ordered extents.
493 * Thus caller should ensure the range doesn't cross ordered extents.
494 */
495bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
496				    struct btrfs_ordered_extent **cached,
497				    u64 file_offset, u64 io_size)
498{
499	struct rb_node *node;
500	struct btrfs_ordered_extent *entry = NULL;
501	unsigned long flags;
502	bool finished = false;
503
504	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
505	if (cached && *cached) {
506		entry = *cached;
507		goto have_entry;
508	}
509
510	node = ordered_tree_search(inode, file_offset);
511	if (!node)
512		goto out;
513
514	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
515have_entry:
516	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
517		goto out;
518
519	if (io_size > entry->bytes_left)
520		btrfs_crit(inode->root->fs_info,
521			   "bad ordered accounting left %llu size %llu",
522		       entry->bytes_left, io_size);
523
524	entry->bytes_left -= io_size;
525
526	if (entry->bytes_left == 0) {
527		/*
528		 * Ensure only one caller can set the flag and finished_ret
529		 * accordingly
530		 */
531		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
532		/* test_and_set_bit implies a barrier */
533		cond_wake_up_nomb(&entry->wait);
534	}
535out:
536	if (finished && cached && entry) {
537		*cached = entry;
538		refcount_inc(&entry->refs);
539		trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
540	}
541	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
542	return finished;
543}
544
545/*
546 * used to drop a reference on an ordered extent.  This will free
547 * the extent if the last reference is dropped
548 */
549void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
550{
551	struct list_head *cur;
552	struct btrfs_ordered_sum *sum;
553
554	trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
555
556	if (refcount_dec_and_test(&entry->refs)) {
557		ASSERT(list_empty(&entry->root_extent_list));
558		ASSERT(list_empty(&entry->log_list));
559		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
560		if (entry->inode)
561			btrfs_add_delayed_iput(BTRFS_I(entry->inode));
562		while (!list_empty(&entry->list)) {
563			cur = entry->list.next;
564			sum = list_entry(cur, struct btrfs_ordered_sum, list);
565			list_del(&sum->list);
566			kvfree(sum);
567		}
568		kmem_cache_free(btrfs_ordered_extent_cache, entry);
569	}
570}
571
572/*
573 * remove an ordered extent from the tree.  No references are dropped
574 * and waiters are woken up.
575 */
576void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
577				 struct btrfs_ordered_extent *entry)
578{
579	struct btrfs_root *root = btrfs_inode->root;
580	struct btrfs_fs_info *fs_info = root->fs_info;
581	struct rb_node *node;
582	bool pending;
583	bool freespace_inode;
584
585	/*
586	 * If this is a free space inode the thread has not acquired the ordered
587	 * extents lockdep map.
588	 */
589	freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
590
591	btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
592	/* This is paired with btrfs_alloc_ordered_extent. */
593	spin_lock(&btrfs_inode->lock);
594	btrfs_mod_outstanding_extents(btrfs_inode, -1);
595	spin_unlock(&btrfs_inode->lock);
596	if (root != fs_info->tree_root) {
597		u64 release;
598
599		if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
600			release = entry->disk_num_bytes;
601		else
602			release = entry->num_bytes;
603		btrfs_delalloc_release_metadata(btrfs_inode, release,
604						test_bit(BTRFS_ORDERED_IOERR,
605							 &entry->flags));
606	}
607
608	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
609				 fs_info->delalloc_batch);
610
611	spin_lock_irq(&btrfs_inode->ordered_tree_lock);
612	node = &entry->rb_node;
613	rb_erase(node, &btrfs_inode->ordered_tree);
614	RB_CLEAR_NODE(node);
615	if (btrfs_inode->ordered_tree_last == node)
616		btrfs_inode->ordered_tree_last = NULL;
617	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
618	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
619	spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
620
621	/*
622	 * The current running transaction is waiting on us, we need to let it
623	 * know that we're complete and wake it up.
624	 */
625	if (pending) {
626		struct btrfs_transaction *trans;
627
628		/*
629		 * The checks for trans are just a formality, it should be set,
630		 * but if it isn't we don't want to deref/assert under the spin
631		 * lock, so be nice and check if trans is set, but ASSERT() so
632		 * if it isn't set a developer will notice.
633		 */
634		spin_lock(&fs_info->trans_lock);
635		trans = fs_info->running_transaction;
636		if (trans)
637			refcount_inc(&trans->use_count);
638		spin_unlock(&fs_info->trans_lock);
639
640		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
641		if (trans) {
642			if (atomic_dec_and_test(&trans->pending_ordered))
643				wake_up(&trans->pending_wait);
644			btrfs_put_transaction(trans);
645		}
646	}
647
648	btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
649
650	spin_lock(&root->ordered_extent_lock);
651	list_del_init(&entry->root_extent_list);
652	root->nr_ordered_extents--;
653
654	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
655
656	if (!root->nr_ordered_extents) {
657		spin_lock(&fs_info->ordered_root_lock);
658		BUG_ON(list_empty(&root->ordered_root));
659		list_del_init(&root->ordered_root);
660		spin_unlock(&fs_info->ordered_root_lock);
661	}
662	spin_unlock(&root->ordered_extent_lock);
663	wake_up(&entry->wait);
664	if (!freespace_inode)
665		btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
666}
667
668static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
669{
670	struct btrfs_ordered_extent *ordered;
671
672	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
673	btrfs_start_ordered_extent(ordered);
674	complete(&ordered->completion);
675}
676
677/*
678 * wait for all the ordered extents in a root.  This is done when balancing
679 * space between drives.
680 */
681u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
682			       const u64 range_start, const u64 range_len)
683{
684	struct btrfs_fs_info *fs_info = root->fs_info;
685	LIST_HEAD(splice);
686	LIST_HEAD(skipped);
687	LIST_HEAD(works);
688	struct btrfs_ordered_extent *ordered, *next;
689	u64 count = 0;
690	const u64 range_end = range_start + range_len;
691
692	mutex_lock(&root->ordered_extent_mutex);
693	spin_lock(&root->ordered_extent_lock);
694	list_splice_init(&root->ordered_extents, &splice);
695	while (!list_empty(&splice) && nr) {
696		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
697					   root_extent_list);
698
699		if (range_end <= ordered->disk_bytenr ||
700		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
701			list_move_tail(&ordered->root_extent_list, &skipped);
702			cond_resched_lock(&root->ordered_extent_lock);
703			continue;
704		}
705
706		list_move_tail(&ordered->root_extent_list,
707			       &root->ordered_extents);
708		refcount_inc(&ordered->refs);
709		spin_unlock(&root->ordered_extent_lock);
710
711		btrfs_init_work(&ordered->flush_work,
712				btrfs_run_ordered_extent_work, NULL);
713		list_add_tail(&ordered->work_list, &works);
714		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
715
716		cond_resched();
717		spin_lock(&root->ordered_extent_lock);
718		if (nr != U64_MAX)
719			nr--;
720		count++;
721	}
722	list_splice_tail(&skipped, &root->ordered_extents);
723	list_splice_tail(&splice, &root->ordered_extents);
724	spin_unlock(&root->ordered_extent_lock);
725
726	list_for_each_entry_safe(ordered, next, &works, work_list) {
727		list_del_init(&ordered->work_list);
728		wait_for_completion(&ordered->completion);
729		btrfs_put_ordered_extent(ordered);
730		cond_resched();
731	}
732	mutex_unlock(&root->ordered_extent_mutex);
733
734	return count;
735}
736
737void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
738			     const u64 range_start, const u64 range_len)
739{
740	struct btrfs_root *root;
741	LIST_HEAD(splice);
742	u64 done;
743
744	mutex_lock(&fs_info->ordered_operations_mutex);
745	spin_lock(&fs_info->ordered_root_lock);
746	list_splice_init(&fs_info->ordered_roots, &splice);
747	while (!list_empty(&splice) && nr) {
748		root = list_first_entry(&splice, struct btrfs_root,
749					ordered_root);
750		root = btrfs_grab_root(root);
751		BUG_ON(!root);
752		list_move_tail(&root->ordered_root,
753			       &fs_info->ordered_roots);
754		spin_unlock(&fs_info->ordered_root_lock);
755
756		done = btrfs_wait_ordered_extents(root, nr,
757						  range_start, range_len);
758		btrfs_put_root(root);
759
760		spin_lock(&fs_info->ordered_root_lock);
761		if (nr != U64_MAX) {
762			nr -= done;
763		}
764	}
765	list_splice_tail(&splice, &fs_info->ordered_roots);
766	spin_unlock(&fs_info->ordered_root_lock);
767	mutex_unlock(&fs_info->ordered_operations_mutex);
768}
769
770/*
771 * Start IO and wait for a given ordered extent to finish.
772 *
773 * Wait on page writeback for all the pages in the extent and the IO completion
774 * code to insert metadata into the btree corresponding to the extent.
775 */
776void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
777{
778	u64 start = entry->file_offset;
779	u64 end = start + entry->num_bytes - 1;
780	struct btrfs_inode *inode = BTRFS_I(entry->inode);
781	bool freespace_inode;
782
783	trace_btrfs_ordered_extent_start(inode, entry);
784
785	/*
786	 * If this is a free space inode do not take the ordered extents lockdep
787	 * map.
788	 */
789	freespace_inode = btrfs_is_free_space_inode(inode);
790
791	/*
792	 * pages in the range can be dirty, clean or writeback.  We
793	 * start IO on any dirty ones so the wait doesn't stall waiting
794	 * for the flusher thread to find them
795	 */
796	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
797		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
798
799	if (!freespace_inode)
800		btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
801	wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
802}
803
804/*
805 * Used to wait on ordered extents across a large range of bytes.
806 */
807int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
808{
809	int ret = 0;
810	int ret_wb = 0;
811	u64 end;
812	u64 orig_end;
813	struct btrfs_ordered_extent *ordered;
814
815	if (start + len < start) {
816		orig_end = OFFSET_MAX;
817	} else {
818		orig_end = start + len - 1;
819		if (orig_end > OFFSET_MAX)
820			orig_end = OFFSET_MAX;
821	}
822
823	/* start IO across the range first to instantiate any delalloc
824	 * extents
825	 */
826	ret = btrfs_fdatawrite_range(inode, start, orig_end);
827	if (ret)
828		return ret;
829
830	/*
831	 * If we have a writeback error don't return immediately. Wait first
832	 * for any ordered extents that haven't completed yet. This is to make
833	 * sure no one can dirty the same page ranges and call writepages()
834	 * before the ordered extents complete - to avoid failures (-EEXIST)
835	 * when adding the new ordered extents to the ordered tree.
836	 */
837	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
838
839	end = orig_end;
840	while (1) {
841		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
842		if (!ordered)
843			break;
844		if (ordered->file_offset > orig_end) {
845			btrfs_put_ordered_extent(ordered);
846			break;
847		}
848		if (ordered->file_offset + ordered->num_bytes <= start) {
849			btrfs_put_ordered_extent(ordered);
850			break;
851		}
852		btrfs_start_ordered_extent(ordered);
853		end = ordered->file_offset;
854		/*
855		 * If the ordered extent had an error save the error but don't
856		 * exit without waiting first for all other ordered extents in
857		 * the range to complete.
858		 */
859		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
860			ret = -EIO;
861		btrfs_put_ordered_extent(ordered);
862		if (end == 0 || end == start)
863			break;
864		end--;
865	}
866	return ret_wb ? ret_wb : ret;
867}
868
869/*
870 * find an ordered extent corresponding to file_offset.  return NULL if
871 * nothing is found, otherwise take a reference on the extent and return it
872 */
873struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
874							 u64 file_offset)
875{
876	struct rb_node *node;
877	struct btrfs_ordered_extent *entry = NULL;
878	unsigned long flags;
879
880	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
881	node = ordered_tree_search(inode, file_offset);
882	if (!node)
883		goto out;
884
885	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
886	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
887		entry = NULL;
888	if (entry) {
889		refcount_inc(&entry->refs);
890		trace_btrfs_ordered_extent_lookup(inode, entry);
891	}
892out:
893	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
894	return entry;
895}
896
897/* Since the DIO code tries to lock a wide area we need to look for any ordered
898 * extents that exist in the range, rather than just the start of the range.
899 */
900struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
901		struct btrfs_inode *inode, u64 file_offset, u64 len)
902{
903	struct rb_node *node;
904	struct btrfs_ordered_extent *entry = NULL;
905
906	spin_lock_irq(&inode->ordered_tree_lock);
907	node = ordered_tree_search(inode, file_offset);
908	if (!node) {
909		node = ordered_tree_search(inode, file_offset + len);
910		if (!node)
911			goto out;
912	}
913
914	while (1) {
915		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
916		if (range_overlaps(entry, file_offset, len))
917			break;
918
919		if (entry->file_offset >= file_offset + len) {
920			entry = NULL;
921			break;
922		}
923		entry = NULL;
924		node = rb_next(node);
925		if (!node)
926			break;
927	}
928out:
929	if (entry) {
930		refcount_inc(&entry->refs);
931		trace_btrfs_ordered_extent_lookup_range(inode, entry);
932	}
933	spin_unlock_irq(&inode->ordered_tree_lock);
934	return entry;
935}
936
937/*
938 * Adds all ordered extents to the given list. The list ends up sorted by the
939 * file_offset of the ordered extents.
940 */
941void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
942					   struct list_head *list)
943{
944	struct rb_node *n;
945
946	ASSERT(inode_is_locked(&inode->vfs_inode));
947
948	spin_lock_irq(&inode->ordered_tree_lock);
949	for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
950		struct btrfs_ordered_extent *ordered;
951
952		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
953
954		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
955			continue;
956
957		ASSERT(list_empty(&ordered->log_list));
958		list_add_tail(&ordered->log_list, list);
959		refcount_inc(&ordered->refs);
960		trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
961	}
962	spin_unlock_irq(&inode->ordered_tree_lock);
963}
964
965/*
966 * lookup and return any extent before 'file_offset'.  NULL is returned
967 * if none is found
968 */
969struct btrfs_ordered_extent *
970btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
971{
972	struct rb_node *node;
973	struct btrfs_ordered_extent *entry = NULL;
974
975	spin_lock_irq(&inode->ordered_tree_lock);
976	node = ordered_tree_search(inode, file_offset);
977	if (!node)
978		goto out;
979
980	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
981	refcount_inc(&entry->refs);
982	trace_btrfs_ordered_extent_lookup_first(inode, entry);
983out:
984	spin_unlock_irq(&inode->ordered_tree_lock);
985	return entry;
986}
987
988/*
989 * Lookup the first ordered extent that overlaps the range
990 * [@file_offset, @file_offset + @len).
991 *
992 * The difference between this and btrfs_lookup_first_ordered_extent() is
993 * that this one won't return any ordered extent that does not overlap the range.
994 * And the difference against btrfs_lookup_ordered_extent() is, this function
995 * ensures the first ordered extent gets returned.
996 */
997struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
998			struct btrfs_inode *inode, u64 file_offset, u64 len)
999{
1000	struct rb_node *node;
1001	struct rb_node *cur;
1002	struct rb_node *prev;
1003	struct rb_node *next;
1004	struct btrfs_ordered_extent *entry = NULL;
1005
1006	spin_lock_irq(&inode->ordered_tree_lock);
1007	node = inode->ordered_tree.rb_node;
1008	/*
1009	 * Here we don't want to use tree_search() which will use tree->last
1010	 * and screw up the search order.
1011	 * And __tree_search() can't return the adjacent ordered extents
1012	 * either, thus here we do our own search.
1013	 */
1014	while (node) {
1015		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1016
1017		if (file_offset < entry->file_offset) {
1018			node = node->rb_left;
1019		} else if (file_offset >= entry_end(entry)) {
1020			node = node->rb_right;
1021		} else {
1022			/*
1023			 * Direct hit, got an ordered extent that starts at
1024			 * @file_offset
1025			 */
1026			goto out;
1027		}
1028	}
1029	if (!entry) {
1030		/* Empty tree */
1031		goto out;
1032	}
1033
1034	cur = &entry->rb_node;
1035	/* We got an entry around @file_offset, check adjacent entries */
1036	if (entry->file_offset < file_offset) {
1037		prev = cur;
1038		next = rb_next(cur);
1039	} else {
1040		prev = rb_prev(cur);
1041		next = cur;
1042	}
1043	if (prev) {
1044		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1045		if (range_overlaps(entry, file_offset, len))
1046			goto out;
1047	}
1048	if (next) {
1049		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1050		if (range_overlaps(entry, file_offset, len))
1051			goto out;
1052	}
1053	/* No ordered extent in the range */
1054	entry = NULL;
1055out:
1056	if (entry) {
1057		refcount_inc(&entry->refs);
1058		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1059	}
1060
1061	spin_unlock_irq(&inode->ordered_tree_lock);
1062	return entry;
1063}
1064
1065/*
1066 * Lock the passed range and ensures all pending ordered extents in it are run
1067 * to completion.
1068 *
1069 * @inode:        Inode whose ordered tree is to be searched
1070 * @start:        Beginning of range to flush
1071 * @end:          Last byte of range to lock
1072 * @cached_state: If passed, will return the extent state responsible for the
1073 *                locked range. It's the caller's responsibility to free the
1074 *                cached state.
1075 *
1076 * Always return with the given range locked, ensuring after it's called no
1077 * order extent can be pending.
1078 */
1079void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1080					u64 end,
1081					struct extent_state **cached_state)
1082{
1083	struct btrfs_ordered_extent *ordered;
1084	struct extent_state *cache = NULL;
1085	struct extent_state **cachedp = &cache;
1086
1087	if (cached_state)
1088		cachedp = cached_state;
1089
1090	while (1) {
1091		lock_extent(&inode->io_tree, start, end, cachedp);
1092		ordered = btrfs_lookup_ordered_range(inode, start,
1093						     end - start + 1);
1094		if (!ordered) {
1095			/*
1096			 * If no external cached_state has been passed then
1097			 * decrement the extra ref taken for cachedp since we
1098			 * aren't exposing it outside of this function
1099			 */
1100			if (!cached_state)
1101				refcount_dec(&cache->refs);
1102			break;
1103		}
1104		unlock_extent(&inode->io_tree, start, end, cachedp);
1105		btrfs_start_ordered_extent(ordered);
1106		btrfs_put_ordered_extent(ordered);
1107	}
1108}
1109
1110/*
1111 * Lock the passed range and ensure all pending ordered extents in it are run
1112 * to completion in nowait mode.
1113 *
1114 * Return true if btrfs_lock_ordered_range does not return any extents,
1115 * otherwise false.
1116 */
1117bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1118				  struct extent_state **cached_state)
1119{
1120	struct btrfs_ordered_extent *ordered;
1121
1122	if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1123		return false;
1124
1125	ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1126	if (!ordered)
1127		return true;
1128
1129	btrfs_put_ordered_extent(ordered);
1130	unlock_extent(&inode->io_tree, start, end, cached_state);
1131
1132	return false;
1133}
1134
1135/* Split out a new ordered extent for this first @len bytes of @ordered. */
1136struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1137			struct btrfs_ordered_extent *ordered, u64 len)
1138{
1139	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1140	struct btrfs_root *root = inode->root;
1141	struct btrfs_fs_info *fs_info = root->fs_info;
1142	u64 file_offset = ordered->file_offset;
1143	u64 disk_bytenr = ordered->disk_bytenr;
1144	unsigned long flags = ordered->flags;
1145	struct btrfs_ordered_sum *sum, *tmpsum;
1146	struct btrfs_ordered_extent *new;
1147	struct rb_node *node;
1148	u64 offset = 0;
1149
1150	trace_btrfs_ordered_extent_split(inode, ordered);
1151
1152	ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1153
1154	/*
1155	 * The entire bio must be covered by the ordered extent, but we can't
1156	 * reduce the original extent to a zero length either.
1157	 */
1158	if (WARN_ON_ONCE(len >= ordered->num_bytes))
1159		return ERR_PTR(-EINVAL);
1160	/* We cannot split partially completed ordered extents. */
1161	if (ordered->bytes_left) {
1162		ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1163		if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1164			return ERR_PTR(-EINVAL);
1165	}
1166	/* We cannot split a compressed ordered extent. */
1167	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1168		return ERR_PTR(-EINVAL);
1169
1170	new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1171				   len, 0, flags, ordered->compress_type);
1172	if (IS_ERR(new))
1173		return new;
1174
1175	/* One ref for the tree. */
1176	refcount_inc(&new->refs);
1177
1178	spin_lock_irq(&root->ordered_extent_lock);
1179	spin_lock(&inode->ordered_tree_lock);
1180	/* Remove from tree once */
1181	node = &ordered->rb_node;
1182	rb_erase(node, &inode->ordered_tree);
1183	RB_CLEAR_NODE(node);
1184	if (inode->ordered_tree_last == node)
1185		inode->ordered_tree_last = NULL;
1186
1187	ordered->file_offset += len;
1188	ordered->disk_bytenr += len;
1189	ordered->num_bytes -= len;
1190	ordered->disk_num_bytes -= len;
1191
1192	if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1193		ASSERT(ordered->bytes_left == 0);
1194		new->bytes_left = 0;
1195	} else {
1196		ordered->bytes_left -= len;
1197	}
1198
1199	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1200		if (ordered->truncated_len > len) {
1201			ordered->truncated_len -= len;
1202		} else {
1203			new->truncated_len = ordered->truncated_len;
1204			ordered->truncated_len = 0;
1205		}
1206	}
1207
1208	list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1209		if (offset == len)
1210			break;
1211		list_move_tail(&sum->list, &new->list);
1212		offset += sum->len;
1213	}
1214
1215	/* Re-insert the node */
1216	node = tree_insert(&inode->ordered_tree, ordered->file_offset,
1217			   &ordered->rb_node);
1218	if (node)
1219		btrfs_panic(fs_info, -EEXIST,
1220			"zoned: inconsistency in ordered tree at offset %llu",
1221			ordered->file_offset);
1222
1223	node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1224	if (node)
1225		btrfs_panic(fs_info, -EEXIST,
1226			"zoned: inconsistency in ordered tree at offset %llu",
1227			new->file_offset);
1228	spin_unlock(&inode->ordered_tree_lock);
1229
1230	list_add_tail(&new->root_extent_list, &root->ordered_extents);
1231	root->nr_ordered_extents++;
1232	spin_unlock_irq(&root->ordered_extent_lock);
1233	return new;
1234}
1235
1236int __init ordered_data_init(void)
1237{
1238	btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
1239	if (!btrfs_ordered_extent_cache)
1240		return -ENOMEM;
1241
1242	return 0;
1243}
1244
1245void __cold ordered_data_exit(void)
1246{
1247	kmem_cache_destroy(btrfs_ordered_extent_cache);
1248}
1249