1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle.  All rights reserved.
4 */
5
6#include <crypto/hash.h>
7#include <linux/kernel.h>
8#include <linux/bio.h>
9#include <linux/blk-cgroup.h>
10#include <linux/file.h>
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14#include <linux/time.h>
15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/backing-dev.h>
18#include <linux/writeback.h>
19#include <linux/compat.h>
20#include <linux/xattr.h>
21#include <linux/posix_acl.h>
22#include <linux/falloc.h>
23#include <linux/slab.h>
24#include <linux/ratelimit.h>
25#include <linux/btrfs.h>
26#include <linux/blkdev.h>
27#include <linux/posix_acl_xattr.h>
28#include <linux/uio.h>
29#include <linux/magic.h>
30#include <linux/iversion.h>
31#include <linux/swap.h>
32#include <linux/migrate.h>
33#include <linux/sched/mm.h>
34#include <linux/iomap.h>
35#include <asm/unaligned.h>
36#include <linux/fsverity.h>
37#include "misc.h"
38#include "ctree.h"
39#include "disk-io.h"
40#include "transaction.h"
41#include "btrfs_inode.h"
42#include "ordered-data.h"
43#include "xattr.h"
44#include "tree-log.h"
45#include "bio.h"
46#include "compression.h"
47#include "locking.h"
48#include "props.h"
49#include "qgroup.h"
50#include "delalloc-space.h"
51#include "block-group.h"
52#include "space-info.h"
53#include "zoned.h"
54#include "subpage.h"
55#include "inode-item.h"
56#include "fs.h"
57#include "accessors.h"
58#include "extent-tree.h"
59#include "root-tree.h"
60#include "defrag.h"
61#include "dir-item.h"
62#include "file-item.h"
63#include "uuid-tree.h"
64#include "ioctl.h"
65#include "file.h"
66#include "acl.h"
67#include "relocation.h"
68#include "verity.h"
69#include "super.h"
70#include "orphan.h"
71#include "backref.h"
72#include "raid-stripe-tree.h"
73
74struct btrfs_iget_args {
75	u64 ino;
76	struct btrfs_root *root;
77};
78
79struct btrfs_dio_data {
80	ssize_t submitted;
81	struct extent_changeset *data_reserved;
82	struct btrfs_ordered_extent *ordered;
83	bool data_space_reserved;
84	bool nocow_done;
85};
86
87struct btrfs_dio_private {
88	/* Range of I/O */
89	u64 file_offset;
90	u32 bytes;
91
92	/* This must be last */
93	struct btrfs_bio bbio;
94};
95
96static struct bio_set btrfs_dio_bioset;
97
98struct btrfs_rename_ctx {
99	/* Output field. Stores the index number of the old directory entry. */
100	u64 index;
101};
102
103/*
104 * Used by data_reloc_print_warning_inode() to pass needed info for filename
105 * resolution and output of error message.
106 */
107struct data_reloc_warn {
108	struct btrfs_path path;
109	struct btrfs_fs_info *fs_info;
110	u64 extent_item_size;
111	u64 logical;
112	int mirror_num;
113};
114
115/*
116 * For the file_extent_tree, we want to hold the inode lock when we lookup and
117 * update the disk_i_size, but lockdep will complain because our io_tree we hold
118 * the tree lock and get the inode lock when setting delalloc. These two things
119 * are unrelated, so make a class for the file_extent_tree so we don't get the
120 * two locking patterns mixed up.
121 */
122static struct lock_class_key file_extent_tree_class;
123
124static const struct inode_operations btrfs_dir_inode_operations;
125static const struct inode_operations btrfs_symlink_inode_operations;
126static const struct inode_operations btrfs_special_inode_operations;
127static const struct inode_operations btrfs_file_inode_operations;
128static const struct address_space_operations btrfs_aops;
129static const struct file_operations btrfs_dir_file_operations;
130
131static struct kmem_cache *btrfs_inode_cachep;
132
133static int btrfs_setsize(struct inode *inode, struct iattr *attr);
134static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
135
136static noinline int run_delalloc_cow(struct btrfs_inode *inode,
137				     struct page *locked_page, u64 start,
138				     u64 end, struct writeback_control *wbc,
139				     bool pages_dirty);
140static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
141				       u64 len, u64 orig_start, u64 block_start,
142				       u64 block_len, u64 orig_block_len,
143				       u64 ram_bytes, int compress_type,
144				       int type);
145
146static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
147					  u64 root, void *warn_ctx)
148{
149	struct data_reloc_warn *warn = warn_ctx;
150	struct btrfs_fs_info *fs_info = warn->fs_info;
151	struct extent_buffer *eb;
152	struct btrfs_inode_item *inode_item;
153	struct inode_fs_paths *ipath = NULL;
154	struct btrfs_root *local_root;
155	struct btrfs_key key;
156	unsigned int nofs_flag;
157	u32 nlink;
158	int ret;
159
160	local_root = btrfs_get_fs_root(fs_info, root, true);
161	if (IS_ERR(local_root)) {
162		ret = PTR_ERR(local_root);
163		goto err;
164	}
165
166	/* This makes the path point to (inum INODE_ITEM ioff). */
167	key.objectid = inum;
168	key.type = BTRFS_INODE_ITEM_KEY;
169	key.offset = 0;
170
171	ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
172	if (ret) {
173		btrfs_put_root(local_root);
174		btrfs_release_path(&warn->path);
175		goto err;
176	}
177
178	eb = warn->path.nodes[0];
179	inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
180	nlink = btrfs_inode_nlink(eb, inode_item);
181	btrfs_release_path(&warn->path);
182
183	nofs_flag = memalloc_nofs_save();
184	ipath = init_ipath(4096, local_root, &warn->path);
185	memalloc_nofs_restore(nofs_flag);
186	if (IS_ERR(ipath)) {
187		btrfs_put_root(local_root);
188		ret = PTR_ERR(ipath);
189		ipath = NULL;
190		/*
191		 * -ENOMEM, not a critical error, just output an generic error
192		 * without filename.
193		 */
194		btrfs_warn(fs_info,
195"checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
196			   warn->logical, warn->mirror_num, root, inum, offset);
197		return ret;
198	}
199	ret = paths_from_inode(inum, ipath);
200	if (ret < 0)
201		goto err;
202
203	/*
204	 * We deliberately ignore the bit ipath might have been too small to
205	 * hold all of the paths here
206	 */
207	for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
208		btrfs_warn(fs_info,
209"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
210			   warn->logical, warn->mirror_num, root, inum, offset,
211			   fs_info->sectorsize, nlink,
212			   (char *)(unsigned long)ipath->fspath->val[i]);
213	}
214
215	btrfs_put_root(local_root);
216	free_ipath(ipath);
217	return 0;
218
219err:
220	btrfs_warn(fs_info,
221"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
222		   warn->logical, warn->mirror_num, root, inum, offset, ret);
223
224	free_ipath(ipath);
225	return ret;
226}
227
228/*
229 * Do extra user-friendly error output (e.g. lookup all the affected files).
230 *
231 * Return true if we succeeded doing the backref lookup.
232 * Return false if such lookup failed, and has to fallback to the old error message.
233 */
234static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
235				   const u8 *csum, const u8 *csum_expected,
236				   int mirror_num)
237{
238	struct btrfs_fs_info *fs_info = inode->root->fs_info;
239	struct btrfs_path path = { 0 };
240	struct btrfs_key found_key = { 0 };
241	struct extent_buffer *eb;
242	struct btrfs_extent_item *ei;
243	const u32 csum_size = fs_info->csum_size;
244	u64 logical;
245	u64 flags;
246	u32 item_size;
247	int ret;
248
249	mutex_lock(&fs_info->reloc_mutex);
250	logical = btrfs_get_reloc_bg_bytenr(fs_info);
251	mutex_unlock(&fs_info->reloc_mutex);
252
253	if (logical == U64_MAX) {
254		btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
255		btrfs_warn_rl(fs_info,
256"csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
257			inode->root->root_key.objectid, btrfs_ino(inode), file_off,
258			CSUM_FMT_VALUE(csum_size, csum),
259			CSUM_FMT_VALUE(csum_size, csum_expected),
260			mirror_num);
261		return;
262	}
263
264	logical += file_off;
265	btrfs_warn_rl(fs_info,
266"csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
267			inode->root->root_key.objectid,
268			btrfs_ino(inode), file_off, logical,
269			CSUM_FMT_VALUE(csum_size, csum),
270			CSUM_FMT_VALUE(csum_size, csum_expected),
271			mirror_num);
272
273	ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
274	if (ret < 0) {
275		btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
276			     logical, ret);
277		return;
278	}
279	eb = path.nodes[0];
280	ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
281	item_size = btrfs_item_size(eb, path.slots[0]);
282	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
283		unsigned long ptr = 0;
284		u64 ref_root;
285		u8 ref_level;
286
287		while (true) {
288			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
289						      item_size, &ref_root,
290						      &ref_level);
291			if (ret < 0) {
292				btrfs_warn_rl(fs_info,
293				"failed to resolve tree backref for logical %llu: %d",
294					      logical, ret);
295				break;
296			}
297			if (ret > 0)
298				break;
299
300			btrfs_warn_rl(fs_info,
301"csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
302				logical, mirror_num,
303				(ref_level ? "node" : "leaf"),
304				ref_level, ref_root);
305		}
306		btrfs_release_path(&path);
307	} else {
308		struct btrfs_backref_walk_ctx ctx = { 0 };
309		struct data_reloc_warn reloc_warn = { 0 };
310
311		btrfs_release_path(&path);
312
313		ctx.bytenr = found_key.objectid;
314		ctx.extent_item_pos = logical - found_key.objectid;
315		ctx.fs_info = fs_info;
316
317		reloc_warn.logical = logical;
318		reloc_warn.extent_item_size = found_key.offset;
319		reloc_warn.mirror_num = mirror_num;
320		reloc_warn.fs_info = fs_info;
321
322		iterate_extent_inodes(&ctx, true,
323				      data_reloc_print_warning_inode, &reloc_warn);
324	}
325}
326
327static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
328		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
329{
330	struct btrfs_root *root = inode->root;
331	const u32 csum_size = root->fs_info->csum_size;
332
333	/* For data reloc tree, it's better to do a backref lookup instead. */
334	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
335		return print_data_reloc_error(inode, logical_start, csum,
336					      csum_expected, mirror_num);
337
338	/* Output without objectid, which is more meaningful */
339	if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) {
340		btrfs_warn_rl(root->fs_info,
341"csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
342			root->root_key.objectid, btrfs_ino(inode),
343			logical_start,
344			CSUM_FMT_VALUE(csum_size, csum),
345			CSUM_FMT_VALUE(csum_size, csum_expected),
346			mirror_num);
347	} else {
348		btrfs_warn_rl(root->fs_info,
349"csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
350			root->root_key.objectid, btrfs_ino(inode),
351			logical_start,
352			CSUM_FMT_VALUE(csum_size, csum),
353			CSUM_FMT_VALUE(csum_size, csum_expected),
354			mirror_num);
355	}
356}
357
358/*
359 * Lock inode i_rwsem based on arguments passed.
360 *
361 * ilock_flags can have the following bit set:
362 *
363 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
364 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
365 *		     return -EAGAIN
366 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
367 */
368int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
369{
370	if (ilock_flags & BTRFS_ILOCK_SHARED) {
371		if (ilock_flags & BTRFS_ILOCK_TRY) {
372			if (!inode_trylock_shared(&inode->vfs_inode))
373				return -EAGAIN;
374			else
375				return 0;
376		}
377		inode_lock_shared(&inode->vfs_inode);
378	} else {
379		if (ilock_flags & BTRFS_ILOCK_TRY) {
380			if (!inode_trylock(&inode->vfs_inode))
381				return -EAGAIN;
382			else
383				return 0;
384		}
385		inode_lock(&inode->vfs_inode);
386	}
387	if (ilock_flags & BTRFS_ILOCK_MMAP)
388		down_write(&inode->i_mmap_lock);
389	return 0;
390}
391
392/*
393 * Unock inode i_rwsem.
394 *
395 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
396 * to decide whether the lock acquired is shared or exclusive.
397 */
398void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
399{
400	if (ilock_flags & BTRFS_ILOCK_MMAP)
401		up_write(&inode->i_mmap_lock);
402	if (ilock_flags & BTRFS_ILOCK_SHARED)
403		inode_unlock_shared(&inode->vfs_inode);
404	else
405		inode_unlock(&inode->vfs_inode);
406}
407
408/*
409 * Cleanup all submitted ordered extents in specified range to handle errors
410 * from the btrfs_run_delalloc_range() callback.
411 *
412 * NOTE: caller must ensure that when an error happens, it can not call
413 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
414 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
415 * to be released, which we want to happen only when finishing the ordered
416 * extent (btrfs_finish_ordered_io()).
417 */
418static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
419						 struct page *locked_page,
420						 u64 offset, u64 bytes)
421{
422	unsigned long index = offset >> PAGE_SHIFT;
423	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
424	u64 page_start = 0, page_end = 0;
425	struct page *page;
426
427	if (locked_page) {
428		page_start = page_offset(locked_page);
429		page_end = page_start + PAGE_SIZE - 1;
430	}
431
432	while (index <= end_index) {
433		/*
434		 * For locked page, we will call btrfs_mark_ordered_io_finished
435		 * through btrfs_mark_ordered_io_finished() on it
436		 * in run_delalloc_range() for the error handling, which will
437		 * clear page Ordered and run the ordered extent accounting.
438		 *
439		 * Here we can't just clear the Ordered bit, or
440		 * btrfs_mark_ordered_io_finished() would skip the accounting
441		 * for the page range, and the ordered extent will never finish.
442		 */
443		if (locked_page && index == (page_start >> PAGE_SHIFT)) {
444			index++;
445			continue;
446		}
447		page = find_get_page(inode->vfs_inode.i_mapping, index);
448		index++;
449		if (!page)
450			continue;
451
452		/*
453		 * Here we just clear all Ordered bits for every page in the
454		 * range, then btrfs_mark_ordered_io_finished() will handle
455		 * the ordered extent accounting for the range.
456		 */
457		btrfs_folio_clamp_clear_ordered(inode->root->fs_info,
458						page_folio(page), offset, bytes);
459		put_page(page);
460	}
461
462	if (locked_page) {
463		/* The locked page covers the full range, nothing needs to be done */
464		if (bytes + offset <= page_start + PAGE_SIZE)
465			return;
466		/*
467		 * In case this page belongs to the delalloc range being
468		 * instantiated then skip it, since the first page of a range is
469		 * going to be properly cleaned up by the caller of
470		 * run_delalloc_range
471		 */
472		if (page_start >= offset && page_end <= (offset + bytes - 1)) {
473			bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
474			offset = page_offset(locked_page) + PAGE_SIZE;
475		}
476	}
477
478	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
479}
480
481static int btrfs_dirty_inode(struct btrfs_inode *inode);
482
483static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
484				     struct btrfs_new_inode_args *args)
485{
486	int err;
487
488	if (args->default_acl) {
489		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
490				      ACL_TYPE_DEFAULT);
491		if (err)
492			return err;
493	}
494	if (args->acl) {
495		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
496		if (err)
497			return err;
498	}
499	if (!args->default_acl && !args->acl)
500		cache_no_acl(args->inode);
501	return btrfs_xattr_security_init(trans, args->inode, args->dir,
502					 &args->dentry->d_name);
503}
504
505/*
506 * this does all the hard work for inserting an inline extent into
507 * the btree.  The caller should have done a btrfs_drop_extents so that
508 * no overlapping inline items exist in the btree
509 */
510static int insert_inline_extent(struct btrfs_trans_handle *trans,
511				struct btrfs_path *path,
512				struct btrfs_inode *inode, bool extent_inserted,
513				size_t size, size_t compressed_size,
514				int compress_type,
515				struct page **compressed_pages,
516				bool update_i_size)
517{
518	struct btrfs_root *root = inode->root;
519	struct extent_buffer *leaf;
520	struct page *page = NULL;
521	char *kaddr;
522	unsigned long ptr;
523	struct btrfs_file_extent_item *ei;
524	int ret;
525	size_t cur_size = size;
526	u64 i_size;
527
528	ASSERT((compressed_size > 0 && compressed_pages) ||
529	       (compressed_size == 0 && !compressed_pages));
530
531	if (compressed_size && compressed_pages)
532		cur_size = compressed_size;
533
534	if (!extent_inserted) {
535		struct btrfs_key key;
536		size_t datasize;
537
538		key.objectid = btrfs_ino(inode);
539		key.offset = 0;
540		key.type = BTRFS_EXTENT_DATA_KEY;
541
542		datasize = btrfs_file_extent_calc_inline_size(cur_size);
543		ret = btrfs_insert_empty_item(trans, root, path, &key,
544					      datasize);
545		if (ret)
546			goto fail;
547	}
548	leaf = path->nodes[0];
549	ei = btrfs_item_ptr(leaf, path->slots[0],
550			    struct btrfs_file_extent_item);
551	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
552	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
553	btrfs_set_file_extent_encryption(leaf, ei, 0);
554	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
555	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
556	ptr = btrfs_file_extent_inline_start(ei);
557
558	if (compress_type != BTRFS_COMPRESS_NONE) {
559		struct page *cpage;
560		int i = 0;
561		while (compressed_size > 0) {
562			cpage = compressed_pages[i];
563			cur_size = min_t(unsigned long, compressed_size,
564				       PAGE_SIZE);
565
566			kaddr = kmap_local_page(cpage);
567			write_extent_buffer(leaf, kaddr, ptr, cur_size);
568			kunmap_local(kaddr);
569
570			i++;
571			ptr += cur_size;
572			compressed_size -= cur_size;
573		}
574		btrfs_set_file_extent_compression(leaf, ei,
575						  compress_type);
576	} else {
577		page = find_get_page(inode->vfs_inode.i_mapping, 0);
578		btrfs_set_file_extent_compression(leaf, ei, 0);
579		kaddr = kmap_local_page(page);
580		write_extent_buffer(leaf, kaddr, ptr, size);
581		kunmap_local(kaddr);
582		put_page(page);
583	}
584	btrfs_mark_buffer_dirty(trans, leaf);
585	btrfs_release_path(path);
586
587	/*
588	 * We align size to sectorsize for inline extents just for simplicity
589	 * sake.
590	 */
591	ret = btrfs_inode_set_file_extent_range(inode, 0,
592					ALIGN(size, root->fs_info->sectorsize));
593	if (ret)
594		goto fail;
595
596	/*
597	 * We're an inline extent, so nobody can extend the file past i_size
598	 * without locking a page we already have locked.
599	 *
600	 * We must do any i_size and inode updates before we unlock the pages.
601	 * Otherwise we could end up racing with unlink.
602	 */
603	i_size = i_size_read(&inode->vfs_inode);
604	if (update_i_size && size > i_size) {
605		i_size_write(&inode->vfs_inode, size);
606		i_size = size;
607	}
608	inode->disk_i_size = i_size;
609
610fail:
611	return ret;
612}
613
614
615/*
616 * conditionally insert an inline extent into the file.  This
617 * does the checks required to make sure the data is small enough
618 * to fit as an inline extent.
619 */
620static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
621					  size_t compressed_size,
622					  int compress_type,
623					  struct page **compressed_pages,
624					  bool update_i_size)
625{
626	struct btrfs_drop_extents_args drop_args = { 0 };
627	struct btrfs_root *root = inode->root;
628	struct btrfs_fs_info *fs_info = root->fs_info;
629	struct btrfs_trans_handle *trans;
630	u64 data_len = (compressed_size ?: size);
631	int ret;
632	struct btrfs_path *path;
633
634	/*
635	 * We can create an inline extent if it ends at or beyond the current
636	 * i_size, is no larger than a sector (decompressed), and the (possibly
637	 * compressed) data fits in a leaf and the configured maximum inline
638	 * size.
639	 */
640	if (size < i_size_read(&inode->vfs_inode) ||
641	    size > fs_info->sectorsize ||
642	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
643	    data_len > fs_info->max_inline)
644		return 1;
645
646	path = btrfs_alloc_path();
647	if (!path)
648		return -ENOMEM;
649
650	trans = btrfs_join_transaction(root);
651	if (IS_ERR(trans)) {
652		btrfs_free_path(path);
653		return PTR_ERR(trans);
654	}
655	trans->block_rsv = &inode->block_rsv;
656
657	drop_args.path = path;
658	drop_args.start = 0;
659	drop_args.end = fs_info->sectorsize;
660	drop_args.drop_cache = true;
661	drop_args.replace_extent = true;
662	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
663	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
664	if (ret) {
665		btrfs_abort_transaction(trans, ret);
666		goto out;
667	}
668
669	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
670				   size, compressed_size, compress_type,
671				   compressed_pages, update_i_size);
672	if (ret && ret != -ENOSPC) {
673		btrfs_abort_transaction(trans, ret);
674		goto out;
675	} else if (ret == -ENOSPC) {
676		ret = 1;
677		goto out;
678	}
679
680	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
681	ret = btrfs_update_inode(trans, inode);
682	if (ret && ret != -ENOSPC) {
683		btrfs_abort_transaction(trans, ret);
684		goto out;
685	} else if (ret == -ENOSPC) {
686		ret = 1;
687		goto out;
688	}
689
690	btrfs_set_inode_full_sync(inode);
691out:
692	/*
693	 * Don't forget to free the reserved space, as for inlined extent
694	 * it won't count as data extent, free them directly here.
695	 * And at reserve time, it's always aligned to page size, so
696	 * just free one page here.
697	 */
698	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
699	btrfs_free_path(path);
700	btrfs_end_transaction(trans);
701	return ret;
702}
703
704struct async_extent {
705	u64 start;
706	u64 ram_size;
707	u64 compressed_size;
708	struct page **pages;
709	unsigned long nr_pages;
710	int compress_type;
711	struct list_head list;
712};
713
714struct async_chunk {
715	struct btrfs_inode *inode;
716	struct page *locked_page;
717	u64 start;
718	u64 end;
719	blk_opf_t write_flags;
720	struct list_head extents;
721	struct cgroup_subsys_state *blkcg_css;
722	struct btrfs_work work;
723	struct async_cow *async_cow;
724};
725
726struct async_cow {
727	atomic_t num_chunks;
728	struct async_chunk chunks[];
729};
730
731static noinline int add_async_extent(struct async_chunk *cow,
732				     u64 start, u64 ram_size,
733				     u64 compressed_size,
734				     struct page **pages,
735				     unsigned long nr_pages,
736				     int compress_type)
737{
738	struct async_extent *async_extent;
739
740	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
741	if (!async_extent)
742		return -ENOMEM;
743	async_extent->start = start;
744	async_extent->ram_size = ram_size;
745	async_extent->compressed_size = compressed_size;
746	async_extent->pages = pages;
747	async_extent->nr_pages = nr_pages;
748	async_extent->compress_type = compress_type;
749	list_add_tail(&async_extent->list, &cow->extents);
750	return 0;
751}
752
753/*
754 * Check if the inode needs to be submitted to compression, based on mount
755 * options, defragmentation, properties or heuristics.
756 */
757static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
758				      u64 end)
759{
760	struct btrfs_fs_info *fs_info = inode->root->fs_info;
761
762	if (!btrfs_inode_can_compress(inode)) {
763		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
764			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
765			btrfs_ino(inode));
766		return 0;
767	}
768	/*
769	 * Special check for subpage.
770	 *
771	 * We lock the full page then run each delalloc range in the page, thus
772	 * for the following case, we will hit some subpage specific corner case:
773	 *
774	 * 0		32K		64K
775	 * |	|///////|	|///////|
776	 *		\- A		\- B
777	 *
778	 * In above case, both range A and range B will try to unlock the full
779	 * page [0, 64K), causing the one finished later will have page
780	 * unlocked already, triggering various page lock requirement BUG_ON()s.
781	 *
782	 * So here we add an artificial limit that subpage compression can only
783	 * if the range is fully page aligned.
784	 *
785	 * In theory we only need to ensure the first page is fully covered, but
786	 * the tailing partial page will be locked until the full compression
787	 * finishes, delaying the write of other range.
788	 *
789	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
790	 * first to prevent any submitted async extent to unlock the full page.
791	 * By this, we can ensure for subpage case that only the last async_cow
792	 * will unlock the full page.
793	 */
794	if (fs_info->sectorsize < PAGE_SIZE) {
795		if (!PAGE_ALIGNED(start) ||
796		    !PAGE_ALIGNED(end + 1))
797			return 0;
798	}
799
800	/* force compress */
801	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
802		return 1;
803	/* defrag ioctl */
804	if (inode->defrag_compress)
805		return 1;
806	/* bad compression ratios */
807	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
808		return 0;
809	if (btrfs_test_opt(fs_info, COMPRESS) ||
810	    inode->flags & BTRFS_INODE_COMPRESS ||
811	    inode->prop_compress)
812		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
813	return 0;
814}
815
816static inline void inode_should_defrag(struct btrfs_inode *inode,
817		u64 start, u64 end, u64 num_bytes, u32 small_write)
818{
819	/* If this is a small write inside eof, kick off a defrag */
820	if (num_bytes < small_write &&
821	    (start > 0 || end + 1 < inode->disk_i_size))
822		btrfs_add_inode_defrag(NULL, inode, small_write);
823}
824
825/*
826 * Work queue call back to started compression on a file and pages.
827 *
828 * This is done inside an ordered work queue, and the compression is spread
829 * across many cpus.  The actual IO submission is step two, and the ordered work
830 * queue takes care of making sure that happens in the same order things were
831 * put onto the queue by writepages and friends.
832 *
833 * If this code finds it can't get good compression, it puts an entry onto the
834 * work queue to write the uncompressed bytes.  This makes sure that both
835 * compressed inodes and uncompressed inodes are written in the same order that
836 * the flusher thread sent them down.
837 */
838static void compress_file_range(struct btrfs_work *work)
839{
840	struct async_chunk *async_chunk =
841		container_of(work, struct async_chunk, work);
842	struct btrfs_inode *inode = async_chunk->inode;
843	struct btrfs_fs_info *fs_info = inode->root->fs_info;
844	struct address_space *mapping = inode->vfs_inode.i_mapping;
845	u64 blocksize = fs_info->sectorsize;
846	u64 start = async_chunk->start;
847	u64 end = async_chunk->end;
848	u64 actual_end;
849	u64 i_size;
850	int ret = 0;
851	struct page **pages;
852	unsigned long nr_pages;
853	unsigned long total_compressed = 0;
854	unsigned long total_in = 0;
855	unsigned int poff;
856	int i;
857	int compress_type = fs_info->compress_type;
858
859	inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
860
861	/*
862	 * We need to call clear_page_dirty_for_io on each page in the range.
863	 * Otherwise applications with the file mmap'd can wander in and change
864	 * the page contents while we are compressing them.
865	 */
866	extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
867
868	/*
869	 * We need to save i_size before now because it could change in between
870	 * us evaluating the size and assigning it.  This is because we lock and
871	 * unlock the page in truncate and fallocate, and then modify the i_size
872	 * later on.
873	 *
874	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
875	 * does that for us.
876	 */
877	barrier();
878	i_size = i_size_read(&inode->vfs_inode);
879	barrier();
880	actual_end = min_t(u64, i_size, end + 1);
881again:
882	pages = NULL;
883	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
884	nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
885
886	/*
887	 * we don't want to send crud past the end of i_size through
888	 * compression, that's just a waste of CPU time.  So, if the
889	 * end of the file is before the start of our current
890	 * requested range of bytes, we bail out to the uncompressed
891	 * cleanup code that can deal with all of this.
892	 *
893	 * It isn't really the fastest way to fix things, but this is a
894	 * very uncommon corner.
895	 */
896	if (actual_end <= start)
897		goto cleanup_and_bail_uncompressed;
898
899	total_compressed = actual_end - start;
900
901	/*
902	 * Skip compression for a small file range(<=blocksize) that
903	 * isn't an inline extent, since it doesn't save disk space at all.
904	 */
905	if (total_compressed <= blocksize &&
906	   (start > 0 || end + 1 < inode->disk_i_size))
907		goto cleanup_and_bail_uncompressed;
908
909	/*
910	 * For subpage case, we require full page alignment for the sector
911	 * aligned range.
912	 * Thus we must also check against @actual_end, not just @end.
913	 */
914	if (blocksize < PAGE_SIZE) {
915		if (!PAGE_ALIGNED(start) ||
916		    !PAGE_ALIGNED(round_up(actual_end, blocksize)))
917			goto cleanup_and_bail_uncompressed;
918	}
919
920	total_compressed = min_t(unsigned long, total_compressed,
921			BTRFS_MAX_UNCOMPRESSED);
922	total_in = 0;
923	ret = 0;
924
925	/*
926	 * We do compression for mount -o compress and when the inode has not
927	 * been flagged as NOCOMPRESS.  This flag can change at any time if we
928	 * discover bad compression ratios.
929	 */
930	if (!inode_need_compress(inode, start, end))
931		goto cleanup_and_bail_uncompressed;
932
933	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
934	if (!pages) {
935		/*
936		 * Memory allocation failure is not a fatal error, we can fall
937		 * back to uncompressed code.
938		 */
939		goto cleanup_and_bail_uncompressed;
940	}
941
942	if (inode->defrag_compress)
943		compress_type = inode->defrag_compress;
944	else if (inode->prop_compress)
945		compress_type = inode->prop_compress;
946
947	/* Compression level is applied here. */
948	ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
949				   mapping, start, pages, &nr_pages, &total_in,
950				   &total_compressed);
951	if (ret)
952		goto mark_incompressible;
953
954	/*
955	 * Zero the tail end of the last page, as we might be sending it down
956	 * to disk.
957	 */
958	poff = offset_in_page(total_compressed);
959	if (poff)
960		memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
961
962	/*
963	 * Try to create an inline extent.
964	 *
965	 * If we didn't compress the entire range, try to create an uncompressed
966	 * inline extent, else a compressed one.
967	 *
968	 * Check cow_file_range() for why we don't even try to create inline
969	 * extent for the subpage case.
970	 */
971	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
972		if (total_in < actual_end) {
973			ret = cow_file_range_inline(inode, actual_end, 0,
974						    BTRFS_COMPRESS_NONE, NULL,
975						    false);
976		} else {
977			ret = cow_file_range_inline(inode, actual_end,
978						    total_compressed,
979						    compress_type, pages,
980						    false);
981		}
982		if (ret <= 0) {
983			unsigned long clear_flags = EXTENT_DELALLOC |
984				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
985				EXTENT_DO_ACCOUNTING;
986
987			if (ret < 0)
988				mapping_set_error(mapping, -EIO);
989
990			/*
991			 * inline extent creation worked or returned error,
992			 * we don't need to create any more async work items.
993			 * Unlock and free up our temp pages.
994			 *
995			 * We use DO_ACCOUNTING here because we need the
996			 * delalloc_release_metadata to be done _after_ we drop
997			 * our outstanding extent for clearing delalloc for this
998			 * range.
999			 */
1000			extent_clear_unlock_delalloc(inode, start, end,
1001						     NULL,
1002						     clear_flags,
1003						     PAGE_UNLOCK |
1004						     PAGE_START_WRITEBACK |
1005						     PAGE_END_WRITEBACK);
1006			goto free_pages;
1007		}
1008	}
1009
1010	/*
1011	 * We aren't doing an inline extent. Round the compressed size up to a
1012	 * block size boundary so the allocator does sane things.
1013	 */
1014	total_compressed = ALIGN(total_compressed, blocksize);
1015
1016	/*
1017	 * One last check to make sure the compression is really a win, compare
1018	 * the page count read with the blocks on disk, compression must free at
1019	 * least one sector.
1020	 */
1021	total_in = round_up(total_in, fs_info->sectorsize);
1022	if (total_compressed + blocksize > total_in)
1023		goto mark_incompressible;
1024
1025	/*
1026	 * The async work queues will take care of doing actual allocation on
1027	 * disk for these compressed pages, and will submit the bios.
1028	 */
1029	ret = add_async_extent(async_chunk, start, total_in, total_compressed, pages,
1030			       nr_pages, compress_type);
1031	BUG_ON(ret);
1032	if (start + total_in < end) {
1033		start += total_in;
1034		cond_resched();
1035		goto again;
1036	}
1037	return;
1038
1039mark_incompressible:
1040	if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1041		inode->flags |= BTRFS_INODE_NOCOMPRESS;
1042cleanup_and_bail_uncompressed:
1043	ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1044			       BTRFS_COMPRESS_NONE);
1045	BUG_ON(ret);
1046free_pages:
1047	if (pages) {
1048		for (i = 0; i < nr_pages; i++) {
1049			WARN_ON(pages[i]->mapping);
1050			btrfs_free_compr_page(pages[i]);
1051		}
1052		kfree(pages);
1053	}
1054}
1055
1056static void free_async_extent_pages(struct async_extent *async_extent)
1057{
1058	int i;
1059
1060	if (!async_extent->pages)
1061		return;
1062
1063	for (i = 0; i < async_extent->nr_pages; i++) {
1064		WARN_ON(async_extent->pages[i]->mapping);
1065		btrfs_free_compr_page(async_extent->pages[i]);
1066	}
1067	kfree(async_extent->pages);
1068	async_extent->nr_pages = 0;
1069	async_extent->pages = NULL;
1070}
1071
1072static void submit_uncompressed_range(struct btrfs_inode *inode,
1073				      struct async_extent *async_extent,
1074				      struct page *locked_page)
1075{
1076	u64 start = async_extent->start;
1077	u64 end = async_extent->start + async_extent->ram_size - 1;
1078	int ret;
1079	struct writeback_control wbc = {
1080		.sync_mode		= WB_SYNC_ALL,
1081		.range_start		= start,
1082		.range_end		= end,
1083		.no_cgroup_owner	= 1,
1084	};
1085
1086	wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1087	ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
1088	wbc_detach_inode(&wbc);
1089	if (ret < 0) {
1090		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1091		if (locked_page) {
1092			const u64 page_start = page_offset(locked_page);
1093
1094			set_page_writeback(locked_page);
1095			end_page_writeback(locked_page);
1096			btrfs_mark_ordered_io_finished(inode, locked_page,
1097						       page_start, PAGE_SIZE,
1098						       !ret);
1099			mapping_set_error(locked_page->mapping, ret);
1100			unlock_page(locked_page);
1101		}
1102	}
1103}
1104
1105static void submit_one_async_extent(struct async_chunk *async_chunk,
1106				    struct async_extent *async_extent,
1107				    u64 *alloc_hint)
1108{
1109	struct btrfs_inode *inode = async_chunk->inode;
1110	struct extent_io_tree *io_tree = &inode->io_tree;
1111	struct btrfs_root *root = inode->root;
1112	struct btrfs_fs_info *fs_info = root->fs_info;
1113	struct btrfs_ordered_extent *ordered;
1114	struct btrfs_key ins;
1115	struct page *locked_page = NULL;
1116	struct extent_map *em;
1117	int ret = 0;
1118	u64 start = async_extent->start;
1119	u64 end = async_extent->start + async_extent->ram_size - 1;
1120
1121	if (async_chunk->blkcg_css)
1122		kthread_associate_blkcg(async_chunk->blkcg_css);
1123
1124	/*
1125	 * If async_chunk->locked_page is in the async_extent range, we need to
1126	 * handle it.
1127	 */
1128	if (async_chunk->locked_page) {
1129		u64 locked_page_start = page_offset(async_chunk->locked_page);
1130		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
1131
1132		if (!(start >= locked_page_end || end <= locked_page_start))
1133			locked_page = async_chunk->locked_page;
1134	}
1135	lock_extent(io_tree, start, end, NULL);
1136
1137	if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
1138		submit_uncompressed_range(inode, async_extent, locked_page);
1139		goto done;
1140	}
1141
1142	ret = btrfs_reserve_extent(root, async_extent->ram_size,
1143				   async_extent->compressed_size,
1144				   async_extent->compressed_size,
1145				   0, *alloc_hint, &ins, 1, 1);
1146	if (ret) {
1147		/*
1148		 * Here we used to try again by going back to non-compressed
1149		 * path for ENOSPC.  But we can't reserve space even for
1150		 * compressed size, how could it work for uncompressed size
1151		 * which requires larger size?  So here we directly go error
1152		 * path.
1153		 */
1154		goto out_free;
1155	}
1156
1157	/* Here we're doing allocation and writeback of the compressed pages */
1158	em = create_io_em(inode, start,
1159			  async_extent->ram_size,	/* len */
1160			  start,			/* orig_start */
1161			  ins.objectid,			/* block_start */
1162			  ins.offset,			/* block_len */
1163			  ins.offset,			/* orig_block_len */
1164			  async_extent->ram_size,	/* ram_bytes */
1165			  async_extent->compress_type,
1166			  BTRFS_ORDERED_COMPRESSED);
1167	if (IS_ERR(em)) {
1168		ret = PTR_ERR(em);
1169		goto out_free_reserve;
1170	}
1171	free_extent_map(em);
1172
1173	ordered = btrfs_alloc_ordered_extent(inode, start,	/* file_offset */
1174				       async_extent->ram_size,	/* num_bytes */
1175				       async_extent->ram_size,	/* ram_bytes */
1176				       ins.objectid,		/* disk_bytenr */
1177				       ins.offset,		/* disk_num_bytes */
1178				       0,			/* offset */
1179				       1 << BTRFS_ORDERED_COMPRESSED,
1180				       async_extent->compress_type);
1181	if (IS_ERR(ordered)) {
1182		btrfs_drop_extent_map_range(inode, start, end, false);
1183		ret = PTR_ERR(ordered);
1184		goto out_free_reserve;
1185	}
1186	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1187
1188	/* Clear dirty, set writeback and unlock the pages. */
1189	extent_clear_unlock_delalloc(inode, start, end,
1190			NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1191			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1192	btrfs_submit_compressed_write(ordered,
1193			    async_extent->pages,	/* compressed_pages */
1194			    async_extent->nr_pages,
1195			    async_chunk->write_flags, true);
1196	*alloc_hint = ins.objectid + ins.offset;
1197done:
1198	if (async_chunk->blkcg_css)
1199		kthread_associate_blkcg(NULL);
1200	kfree(async_extent);
1201	return;
1202
1203out_free_reserve:
1204	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1205	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1206out_free:
1207	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1208	extent_clear_unlock_delalloc(inode, start, end,
1209				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1210				     EXTENT_DELALLOC_NEW |
1211				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1212				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1213				     PAGE_END_WRITEBACK);
1214	free_async_extent_pages(async_extent);
1215	if (async_chunk->blkcg_css)
1216		kthread_associate_blkcg(NULL);
1217	btrfs_debug(fs_info,
1218"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1219		    root->root_key.objectid, btrfs_ino(inode), start,
1220		    async_extent->ram_size, ret);
1221	kfree(async_extent);
1222}
1223
1224static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1225				      u64 num_bytes)
1226{
1227	struct extent_map_tree *em_tree = &inode->extent_tree;
1228	struct extent_map *em;
1229	u64 alloc_hint = 0;
1230
1231	read_lock(&em_tree->lock);
1232	em = search_extent_mapping(em_tree, start, num_bytes);
1233	if (em) {
1234		/*
1235		 * if block start isn't an actual block number then find the
1236		 * first block in this inode and use that as a hint.  If that
1237		 * block is also bogus then just don't worry about it.
1238		 */
1239		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1240			free_extent_map(em);
1241			em = search_extent_mapping(em_tree, 0, 0);
1242			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1243				alloc_hint = em->block_start;
1244			if (em)
1245				free_extent_map(em);
1246		} else {
1247			alloc_hint = em->block_start;
1248			free_extent_map(em);
1249		}
1250	}
1251	read_unlock(&em_tree->lock);
1252
1253	return alloc_hint;
1254}
1255
1256/*
1257 * when extent_io.c finds a delayed allocation range in the file,
1258 * the call backs end up in this code.  The basic idea is to
1259 * allocate extents on disk for the range, and create ordered data structs
1260 * in ram to track those extents.
1261 *
1262 * locked_page is the page that writepage had locked already.  We use
1263 * it to make sure we don't do extra locks or unlocks.
1264 *
1265 * When this function fails, it unlocks all pages except @locked_page.
1266 *
1267 * When this function successfully creates an inline extent, it returns 1 and
1268 * unlocks all pages including locked_page and starts I/O on them.
1269 * (In reality inline extents are limited to a single page, so locked_page is
1270 * the only page handled anyway).
1271 *
1272 * When this function succeed and creates a normal extent, the page locking
1273 * status depends on the passed in flags:
1274 *
1275 * - If @keep_locked is set, all pages are kept locked.
1276 * - Else all pages except for @locked_page are unlocked.
1277 *
1278 * When a failure happens in the second or later iteration of the
1279 * while-loop, the ordered extents created in previous iterations are kept
1280 * intact. So, the caller must clean them up by calling
1281 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1282 * example.
1283 */
1284static noinline int cow_file_range(struct btrfs_inode *inode,
1285				   struct page *locked_page, u64 start, u64 end,
1286				   u64 *done_offset,
1287				   bool keep_locked, bool no_inline)
1288{
1289	struct btrfs_root *root = inode->root;
1290	struct btrfs_fs_info *fs_info = root->fs_info;
1291	u64 alloc_hint = 0;
1292	u64 orig_start = start;
1293	u64 num_bytes;
1294	unsigned long ram_size;
1295	u64 cur_alloc_size = 0;
1296	u64 min_alloc_size;
1297	u64 blocksize = fs_info->sectorsize;
1298	struct btrfs_key ins;
1299	struct extent_map *em;
1300	unsigned clear_bits;
1301	unsigned long page_ops;
1302	bool extent_reserved = false;
1303	int ret = 0;
1304
1305	if (btrfs_is_free_space_inode(inode)) {
1306		ret = -EINVAL;
1307		goto out_unlock;
1308	}
1309
1310	num_bytes = ALIGN(end - start + 1, blocksize);
1311	num_bytes = max(blocksize,  num_bytes);
1312	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1313
1314	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1315
1316	/*
1317	 * Due to the page size limit, for subpage we can only trigger the
1318	 * writeback for the dirty sectors of page, that means data writeback
1319	 * is doing more writeback than what we want.
1320	 *
1321	 * This is especially unexpected for some call sites like fallocate,
1322	 * where we only increase i_size after everything is done.
1323	 * This means we can trigger inline extent even if we didn't want to.
1324	 * So here we skip inline extent creation completely.
1325	 */
1326	if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) {
1327		u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1328				       end + 1);
1329
1330		/* lets try to make an inline extent */
1331		ret = cow_file_range_inline(inode, actual_end, 0,
1332					    BTRFS_COMPRESS_NONE, NULL, false);
1333		if (ret == 0) {
1334			/*
1335			 * We use DO_ACCOUNTING here because we need the
1336			 * delalloc_release_metadata to be run _after_ we drop
1337			 * our outstanding extent for clearing delalloc for this
1338			 * range.
1339			 */
1340			extent_clear_unlock_delalloc(inode, start, end,
1341				     locked_page,
1342				     EXTENT_LOCKED | EXTENT_DELALLOC |
1343				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1344				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1345				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1346			/*
1347			 * locked_page is locked by the caller of
1348			 * writepage_delalloc(), not locked by
1349			 * __process_pages_contig().
1350			 *
1351			 * We can't let __process_pages_contig() to unlock it,
1352			 * as it doesn't have any subpage::writers recorded.
1353			 *
1354			 * Here we manually unlock the page, since the caller
1355			 * can't determine if it's an inline extent or a
1356			 * compressed extent.
1357			 */
1358			unlock_page(locked_page);
1359			ret = 1;
1360			goto done;
1361		} else if (ret < 0) {
1362			goto out_unlock;
1363		}
1364	}
1365
1366	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1367
1368	/*
1369	 * Relocation relies on the relocated extents to have exactly the same
1370	 * size as the original extents. Normally writeback for relocation data
1371	 * extents follows a NOCOW path because relocation preallocates the
1372	 * extents. However, due to an operation such as scrub turning a block
1373	 * group to RO mode, it may fallback to COW mode, so we must make sure
1374	 * an extent allocated during COW has exactly the requested size and can
1375	 * not be split into smaller extents, otherwise relocation breaks and
1376	 * fails during the stage where it updates the bytenr of file extent
1377	 * items.
1378	 */
1379	if (btrfs_is_data_reloc_root(root))
1380		min_alloc_size = num_bytes;
1381	else
1382		min_alloc_size = fs_info->sectorsize;
1383
1384	while (num_bytes > 0) {
1385		struct btrfs_ordered_extent *ordered;
1386
1387		cur_alloc_size = num_bytes;
1388		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1389					   min_alloc_size, 0, alloc_hint,
1390					   &ins, 1, 1);
1391		if (ret == -EAGAIN) {
1392			/*
1393			 * btrfs_reserve_extent only returns -EAGAIN for zoned
1394			 * file systems, which is an indication that there are
1395			 * no active zones to allocate from at the moment.
1396			 *
1397			 * If this is the first loop iteration, wait for at
1398			 * least one zone to finish before retrying the
1399			 * allocation.  Otherwise ask the caller to write out
1400			 * the already allocated blocks before coming back to
1401			 * us, or return -ENOSPC if it can't handle retries.
1402			 */
1403			ASSERT(btrfs_is_zoned(fs_info));
1404			if (start == orig_start) {
1405				wait_on_bit_io(&inode->root->fs_info->flags,
1406					       BTRFS_FS_NEED_ZONE_FINISH,
1407					       TASK_UNINTERRUPTIBLE);
1408				continue;
1409			}
1410			if (done_offset) {
1411				*done_offset = start - 1;
1412				return 0;
1413			}
1414			ret = -ENOSPC;
1415		}
1416		if (ret < 0)
1417			goto out_unlock;
1418		cur_alloc_size = ins.offset;
1419		extent_reserved = true;
1420
1421		ram_size = ins.offset;
1422		em = create_io_em(inode, start, ins.offset, /* len */
1423				  start, /* orig_start */
1424				  ins.objectid, /* block_start */
1425				  ins.offset, /* block_len */
1426				  ins.offset, /* orig_block_len */
1427				  ram_size, /* ram_bytes */
1428				  BTRFS_COMPRESS_NONE, /* compress_type */
1429				  BTRFS_ORDERED_REGULAR /* type */);
1430		if (IS_ERR(em)) {
1431			ret = PTR_ERR(em);
1432			goto out_reserve;
1433		}
1434		free_extent_map(em);
1435
1436		ordered = btrfs_alloc_ordered_extent(inode, start, ram_size,
1437					ram_size, ins.objectid, cur_alloc_size,
1438					0, 1 << BTRFS_ORDERED_REGULAR,
1439					BTRFS_COMPRESS_NONE);
1440		if (IS_ERR(ordered)) {
1441			ret = PTR_ERR(ordered);
1442			goto out_drop_extent_cache;
1443		}
1444
1445		if (btrfs_is_data_reloc_root(root)) {
1446			ret = btrfs_reloc_clone_csums(ordered);
1447
1448			/*
1449			 * Only drop cache here, and process as normal.
1450			 *
1451			 * We must not allow extent_clear_unlock_delalloc()
1452			 * at out_unlock label to free meta of this ordered
1453			 * extent, as its meta should be freed by
1454			 * btrfs_finish_ordered_io().
1455			 *
1456			 * So we must continue until @start is increased to
1457			 * skip current ordered extent.
1458			 */
1459			if (ret)
1460				btrfs_drop_extent_map_range(inode, start,
1461							    start + ram_size - 1,
1462							    false);
1463		}
1464		btrfs_put_ordered_extent(ordered);
1465
1466		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1467
1468		/*
1469		 * We're not doing compressed IO, don't unlock the first page
1470		 * (which the caller expects to stay locked), don't clear any
1471		 * dirty bits and don't set any writeback bits
1472		 *
1473		 * Do set the Ordered (Private2) bit so we know this page was
1474		 * properly setup for writepage.
1475		 */
1476		page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
1477		page_ops |= PAGE_SET_ORDERED;
1478
1479		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1480					     locked_page,
1481					     EXTENT_LOCKED | EXTENT_DELALLOC,
1482					     page_ops);
1483		if (num_bytes < cur_alloc_size)
1484			num_bytes = 0;
1485		else
1486			num_bytes -= cur_alloc_size;
1487		alloc_hint = ins.objectid + ins.offset;
1488		start += cur_alloc_size;
1489		extent_reserved = false;
1490
1491		/*
1492		 * btrfs_reloc_clone_csums() error, since start is increased
1493		 * extent_clear_unlock_delalloc() at out_unlock label won't
1494		 * free metadata of current ordered extent, we're OK to exit.
1495		 */
1496		if (ret)
1497			goto out_unlock;
1498	}
1499done:
1500	if (done_offset)
1501		*done_offset = end;
1502	return ret;
1503
1504out_drop_extent_cache:
1505	btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
1506out_reserve:
1507	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1508	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1509out_unlock:
1510	/*
1511	 * Now, we have three regions to clean up:
1512	 *
1513	 * |-------(1)----|---(2)---|-------------(3)----------|
1514	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1515	 *
1516	 * We process each region below.
1517	 */
1518
1519	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1520		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1521	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1522
1523	/*
1524	 * For the range (1). We have already instantiated the ordered extents
1525	 * for this region. They are cleaned up by
1526	 * btrfs_cleanup_ordered_extents() in e.g,
1527	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1528	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1529	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1530	 * function.
1531	 *
1532	 * However, in case of @keep_locked, we still need to unlock the pages
1533	 * (except @locked_page) to ensure all the pages are unlocked.
1534	 */
1535	if (keep_locked && orig_start < start) {
1536		if (!locked_page)
1537			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1538		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1539					     locked_page, 0, page_ops);
1540	}
1541
1542	/*
1543	 * For the range (2). If we reserved an extent for our delalloc range
1544	 * (or a subrange) and failed to create the respective ordered extent,
1545	 * then it means that when we reserved the extent we decremented the
1546	 * extent's size from the data space_info's bytes_may_use counter and
1547	 * incremented the space_info's bytes_reserved counter by the same
1548	 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1549	 * to decrement again the data space_info's bytes_may_use counter,
1550	 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1551	 */
1552	if (extent_reserved) {
1553		extent_clear_unlock_delalloc(inode, start,
1554					     start + cur_alloc_size - 1,
1555					     locked_page,
1556					     clear_bits,
1557					     page_ops);
1558		start += cur_alloc_size;
1559	}
1560
1561	/*
1562	 * For the range (3). We never touched the region. In addition to the
1563	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1564	 * space_info's bytes_may_use counter, reserved in
1565	 * btrfs_check_data_free_space().
1566	 */
1567	if (start < end) {
1568		clear_bits |= EXTENT_CLEAR_DATA_RESV;
1569		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1570					     clear_bits, page_ops);
1571	}
1572	return ret;
1573}
1574
1575/*
1576 * Phase two of compressed writeback.  This is the ordered portion of the code,
1577 * which only gets called in the order the work was queued.  We walk all the
1578 * async extents created by compress_file_range and send them down to the disk.
1579 *
1580 * If called with @do_free == true then it'll try to finish the work and free
1581 * the work struct eventually.
1582 */
1583static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1584{
1585	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1586						     work);
1587	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1588	struct async_extent *async_extent;
1589	unsigned long nr_pages;
1590	u64 alloc_hint = 0;
1591
1592	if (do_free) {
1593		struct async_chunk *async_chunk;
1594		struct async_cow *async_cow;
1595
1596		async_chunk = container_of(work, struct async_chunk, work);
1597		btrfs_add_delayed_iput(async_chunk->inode);
1598		if (async_chunk->blkcg_css)
1599			css_put(async_chunk->blkcg_css);
1600
1601		async_cow = async_chunk->async_cow;
1602		if (atomic_dec_and_test(&async_cow->num_chunks))
1603			kvfree(async_cow);
1604		return;
1605	}
1606
1607	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1608		PAGE_SHIFT;
1609
1610	while (!list_empty(&async_chunk->extents)) {
1611		async_extent = list_entry(async_chunk->extents.next,
1612					  struct async_extent, list);
1613		list_del(&async_extent->list);
1614		submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1615	}
1616
1617	/* atomic_sub_return implies a barrier */
1618	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1619	    5 * SZ_1M)
1620		cond_wake_up_nomb(&fs_info->async_submit_wait);
1621}
1622
1623static bool run_delalloc_compressed(struct btrfs_inode *inode,
1624				    struct page *locked_page, u64 start,
1625				    u64 end, struct writeback_control *wbc)
1626{
1627	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1628	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1629	struct async_cow *ctx;
1630	struct async_chunk *async_chunk;
1631	unsigned long nr_pages;
1632	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1633	int i;
1634	unsigned nofs_flag;
1635	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1636
1637	nofs_flag = memalloc_nofs_save();
1638	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1639	memalloc_nofs_restore(nofs_flag);
1640	if (!ctx)
1641		return false;
1642
1643	unlock_extent(&inode->io_tree, start, end, NULL);
1644	set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1645
1646	async_chunk = ctx->chunks;
1647	atomic_set(&ctx->num_chunks, num_chunks);
1648
1649	for (i = 0; i < num_chunks; i++) {
1650		u64 cur_end = min(end, start + SZ_512K - 1);
1651
1652		/*
1653		 * igrab is called higher up in the call chain, take only the
1654		 * lightweight reference for the callback lifetime
1655		 */
1656		ihold(&inode->vfs_inode);
1657		async_chunk[i].async_cow = ctx;
1658		async_chunk[i].inode = inode;
1659		async_chunk[i].start = start;
1660		async_chunk[i].end = cur_end;
1661		async_chunk[i].write_flags = write_flags;
1662		INIT_LIST_HEAD(&async_chunk[i].extents);
1663
1664		/*
1665		 * The locked_page comes all the way from writepage and its
1666		 * the original page we were actually given.  As we spread
1667		 * this large delalloc region across multiple async_chunk
1668		 * structs, only the first struct needs a pointer to locked_page
1669		 *
1670		 * This way we don't need racey decisions about who is supposed
1671		 * to unlock it.
1672		 */
1673		if (locked_page) {
1674			/*
1675			 * Depending on the compressibility, the pages might or
1676			 * might not go through async.  We want all of them to
1677			 * be accounted against wbc once.  Let's do it here
1678			 * before the paths diverge.  wbc accounting is used
1679			 * only for foreign writeback detection and doesn't
1680			 * need full accuracy.  Just account the whole thing
1681			 * against the first page.
1682			 */
1683			wbc_account_cgroup_owner(wbc, locked_page,
1684						 cur_end - start);
1685			async_chunk[i].locked_page = locked_page;
1686			locked_page = NULL;
1687		} else {
1688			async_chunk[i].locked_page = NULL;
1689		}
1690
1691		if (blkcg_css != blkcg_root_css) {
1692			css_get(blkcg_css);
1693			async_chunk[i].blkcg_css = blkcg_css;
1694			async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1695		} else {
1696			async_chunk[i].blkcg_css = NULL;
1697		}
1698
1699		btrfs_init_work(&async_chunk[i].work, compress_file_range,
1700				submit_compressed_extents);
1701
1702		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1703		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1704
1705		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1706
1707		start = cur_end + 1;
1708	}
1709	return true;
1710}
1711
1712/*
1713 * Run the delalloc range from start to end, and write back any dirty pages
1714 * covered by the range.
1715 */
1716static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1717				     struct page *locked_page, u64 start,
1718				     u64 end, struct writeback_control *wbc,
1719				     bool pages_dirty)
1720{
1721	u64 done_offset = end;
1722	int ret;
1723
1724	while (start <= end) {
1725		ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1726				     true, false);
1727		if (ret)
1728			return ret;
1729		extent_write_locked_range(&inode->vfs_inode, locked_page, start,
1730					  done_offset, wbc, pages_dirty);
1731		start = done_offset + 1;
1732	}
1733
1734	return 1;
1735}
1736
1737static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1738					u64 bytenr, u64 num_bytes, bool nowait)
1739{
1740	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1741	struct btrfs_ordered_sum *sums;
1742	int ret;
1743	LIST_HEAD(list);
1744
1745	ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1,
1746				      &list, 0, nowait);
1747	if (ret == 0 && list_empty(&list))
1748		return 0;
1749
1750	while (!list_empty(&list)) {
1751		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1752		list_del(&sums->list);
1753		kfree(sums);
1754	}
1755	if (ret < 0)
1756		return ret;
1757	return 1;
1758}
1759
1760static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1761			   const u64 start, const u64 end)
1762{
1763	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1764	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1765	const u64 range_bytes = end + 1 - start;
1766	struct extent_io_tree *io_tree = &inode->io_tree;
1767	u64 range_start = start;
1768	u64 count;
1769	int ret;
1770
1771	/*
1772	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1773	 * made we had not enough available data space and therefore we did not
1774	 * reserve data space for it, since we though we could do NOCOW for the
1775	 * respective file range (either there is prealloc extent or the inode
1776	 * has the NOCOW bit set).
1777	 *
1778	 * However when we need to fallback to COW mode (because for example the
1779	 * block group for the corresponding extent was turned to RO mode by a
1780	 * scrub or relocation) we need to do the following:
1781	 *
1782	 * 1) We increment the bytes_may_use counter of the data space info.
1783	 *    If COW succeeds, it allocates a new data extent and after doing
1784	 *    that it decrements the space info's bytes_may_use counter and
1785	 *    increments its bytes_reserved counter by the same amount (we do
1786	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1787	 *    bytes_may_use counter to compensate (when space is reserved at
1788	 *    buffered write time, the bytes_may_use counter is incremented);
1789	 *
1790	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1791	 *    that if the COW path fails for any reason, it decrements (through
1792	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1793	 *    data space info, which we incremented in the step above.
1794	 *
1795	 * If we need to fallback to cow and the inode corresponds to a free
1796	 * space cache inode or an inode of the data relocation tree, we must
1797	 * also increment bytes_may_use of the data space_info for the same
1798	 * reason. Space caches and relocated data extents always get a prealloc
1799	 * extent for them, however scrub or balance may have set the block
1800	 * group that contains that extent to RO mode and therefore force COW
1801	 * when starting writeback.
1802	 */
1803	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1804				 EXTENT_NORESERVE, 0, NULL);
1805	if (count > 0 || is_space_ino || is_reloc_ino) {
1806		u64 bytes = count;
1807		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1808		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1809
1810		if (is_space_ino || is_reloc_ino)
1811			bytes = range_bytes;
1812
1813		spin_lock(&sinfo->lock);
1814		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1815		spin_unlock(&sinfo->lock);
1816
1817		if (count > 0)
1818			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1819					 NULL);
1820	}
1821
1822	/*
1823	 * Don't try to create inline extents, as a mix of inline extent that
1824	 * is written out and unlocked directly and a normal NOCOW extent
1825	 * doesn't work.
1826	 */
1827	ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1828	ASSERT(ret != 1);
1829	return ret;
1830}
1831
1832struct can_nocow_file_extent_args {
1833	/* Input fields. */
1834
1835	/* Start file offset of the range we want to NOCOW. */
1836	u64 start;
1837	/* End file offset (inclusive) of the range we want to NOCOW. */
1838	u64 end;
1839	bool writeback_path;
1840	bool strict;
1841	/*
1842	 * Free the path passed to can_nocow_file_extent() once it's not needed
1843	 * anymore.
1844	 */
1845	bool free_path;
1846
1847	/* Output fields. Only set when can_nocow_file_extent() returns 1. */
1848
1849	u64 disk_bytenr;
1850	u64 disk_num_bytes;
1851	u64 extent_offset;
1852	/* Number of bytes that can be written to in NOCOW mode. */
1853	u64 num_bytes;
1854};
1855
1856/*
1857 * Check if we can NOCOW the file extent that the path points to.
1858 * This function may return with the path released, so the caller should check
1859 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1860 *
1861 * Returns: < 0 on error
1862 *            0 if we can not NOCOW
1863 *            1 if we can NOCOW
1864 */
1865static int can_nocow_file_extent(struct btrfs_path *path,
1866				 struct btrfs_key *key,
1867				 struct btrfs_inode *inode,
1868				 struct can_nocow_file_extent_args *args)
1869{
1870	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1871	struct extent_buffer *leaf = path->nodes[0];
1872	struct btrfs_root *root = inode->root;
1873	struct btrfs_file_extent_item *fi;
1874	u64 extent_end;
1875	u8 extent_type;
1876	int can_nocow = 0;
1877	int ret = 0;
1878	bool nowait = path->nowait;
1879
1880	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1881	extent_type = btrfs_file_extent_type(leaf, fi);
1882
1883	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1884		goto out;
1885
1886	/* Can't access these fields unless we know it's not an inline extent. */
1887	args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1888	args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1889	args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1890
1891	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1892	    extent_type == BTRFS_FILE_EXTENT_REG)
1893		goto out;
1894
1895	/*
1896	 * If the extent was created before the generation where the last snapshot
1897	 * for its subvolume was created, then this implies the extent is shared,
1898	 * hence we must COW.
1899	 */
1900	if (!args->strict &&
1901	    btrfs_file_extent_generation(leaf, fi) <=
1902	    btrfs_root_last_snapshot(&root->root_item))
1903		goto out;
1904
1905	/* An explicit hole, must COW. */
1906	if (args->disk_bytenr == 0)
1907		goto out;
1908
1909	/* Compressed/encrypted/encoded extents must be COWed. */
1910	if (btrfs_file_extent_compression(leaf, fi) ||
1911	    btrfs_file_extent_encryption(leaf, fi) ||
1912	    btrfs_file_extent_other_encoding(leaf, fi))
1913		goto out;
1914
1915	extent_end = btrfs_file_extent_end(path);
1916
1917	/*
1918	 * The following checks can be expensive, as they need to take other
1919	 * locks and do btree or rbtree searches, so release the path to avoid
1920	 * blocking other tasks for too long.
1921	 */
1922	btrfs_release_path(path);
1923
1924	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1925				    key->offset - args->extent_offset,
1926				    args->disk_bytenr, args->strict, path);
1927	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1928	if (ret != 0)
1929		goto out;
1930
1931	if (args->free_path) {
1932		/*
1933		 * We don't need the path anymore, plus through the
1934		 * csum_exist_in_range() call below we will end up allocating
1935		 * another path. So free the path to avoid unnecessary extra
1936		 * memory usage.
1937		 */
1938		btrfs_free_path(path);
1939		path = NULL;
1940	}
1941
1942	/* If there are pending snapshots for this root, we must COW. */
1943	if (args->writeback_path && !is_freespace_inode &&
1944	    atomic_read(&root->snapshot_force_cow))
1945		goto out;
1946
1947	args->disk_bytenr += args->extent_offset;
1948	args->disk_bytenr += args->start - key->offset;
1949	args->num_bytes = min(args->end + 1, extent_end) - args->start;
1950
1951	/*
1952	 * Force COW if csums exist in the range. This ensures that csums for a
1953	 * given extent are either valid or do not exist.
1954	 */
1955	ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes,
1956				  nowait);
1957	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1958	if (ret != 0)
1959		goto out;
1960
1961	can_nocow = 1;
1962 out:
1963	if (args->free_path && path)
1964		btrfs_free_path(path);
1965
1966	return ret < 0 ? ret : can_nocow;
1967}
1968
1969/*
1970 * when nowcow writeback call back.  This checks for snapshots or COW copies
1971 * of the extents that exist in the file, and COWs the file as required.
1972 *
1973 * If no cow copies or snapshots exist, we write directly to the existing
1974 * blocks on disk
1975 */
1976static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1977				       struct page *locked_page,
1978				       const u64 start, const u64 end)
1979{
1980	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1981	struct btrfs_root *root = inode->root;
1982	struct btrfs_path *path;
1983	u64 cow_start = (u64)-1;
1984	u64 cur_offset = start;
1985	int ret;
1986	bool check_prev = true;
1987	u64 ino = btrfs_ino(inode);
1988	struct can_nocow_file_extent_args nocow_args = { 0 };
1989
1990	/*
1991	 * Normally on a zoned device we're only doing COW writes, but in case
1992	 * of relocation on a zoned filesystem serializes I/O so that we're only
1993	 * writing sequentially and can end up here as well.
1994	 */
1995	ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
1996
1997	path = btrfs_alloc_path();
1998	if (!path) {
1999		ret = -ENOMEM;
2000		goto error;
2001	}
2002
2003	nocow_args.end = end;
2004	nocow_args.writeback_path = true;
2005
2006	while (1) {
2007		struct btrfs_block_group *nocow_bg = NULL;
2008		struct btrfs_ordered_extent *ordered;
2009		struct btrfs_key found_key;
2010		struct btrfs_file_extent_item *fi;
2011		struct extent_buffer *leaf;
2012		u64 extent_end;
2013		u64 ram_bytes;
2014		u64 nocow_end;
2015		int extent_type;
2016		bool is_prealloc;
2017
2018		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2019					       cur_offset, 0);
2020		if (ret < 0)
2021			goto error;
2022
2023		/*
2024		 * If there is no extent for our range when doing the initial
2025		 * search, then go back to the previous slot as it will be the
2026		 * one containing the search offset
2027		 */
2028		if (ret > 0 && path->slots[0] > 0 && check_prev) {
2029			leaf = path->nodes[0];
2030			btrfs_item_key_to_cpu(leaf, &found_key,
2031					      path->slots[0] - 1);
2032			if (found_key.objectid == ino &&
2033			    found_key.type == BTRFS_EXTENT_DATA_KEY)
2034				path->slots[0]--;
2035		}
2036		check_prev = false;
2037next_slot:
2038		/* Go to next leaf if we have exhausted the current one */
2039		leaf = path->nodes[0];
2040		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2041			ret = btrfs_next_leaf(root, path);
2042			if (ret < 0)
2043				goto error;
2044			if (ret > 0)
2045				break;
2046			leaf = path->nodes[0];
2047		}
2048
2049		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2050
2051		/* Didn't find anything for our INO */
2052		if (found_key.objectid > ino)
2053			break;
2054		/*
2055		 * Keep searching until we find an EXTENT_ITEM or there are no
2056		 * more extents for this inode
2057		 */
2058		if (WARN_ON_ONCE(found_key.objectid < ino) ||
2059		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
2060			path->slots[0]++;
2061			goto next_slot;
2062		}
2063
2064		/* Found key is not EXTENT_DATA_KEY or starts after req range */
2065		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2066		    found_key.offset > end)
2067			break;
2068
2069		/*
2070		 * If the found extent starts after requested offset, then
2071		 * adjust extent_end to be right before this extent begins
2072		 */
2073		if (found_key.offset > cur_offset) {
2074			extent_end = found_key.offset;
2075			extent_type = 0;
2076			goto must_cow;
2077		}
2078
2079		/*
2080		 * Found extent which begins before our range and potentially
2081		 * intersect it
2082		 */
2083		fi = btrfs_item_ptr(leaf, path->slots[0],
2084				    struct btrfs_file_extent_item);
2085		extent_type = btrfs_file_extent_type(leaf, fi);
2086		/* If this is triggered then we have a memory corruption. */
2087		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2088		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2089			ret = -EUCLEAN;
2090			goto error;
2091		}
2092		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2093		extent_end = btrfs_file_extent_end(path);
2094
2095		/*
2096		 * If the extent we got ends before our current offset, skip to
2097		 * the next extent.
2098		 */
2099		if (extent_end <= cur_offset) {
2100			path->slots[0]++;
2101			goto next_slot;
2102		}
2103
2104		nocow_args.start = cur_offset;
2105		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2106		if (ret < 0)
2107			goto error;
2108		if (ret == 0)
2109			goto must_cow;
2110
2111		ret = 0;
2112		nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
2113		if (!nocow_bg) {
2114must_cow:
2115			/*
2116			 * If we can't perform NOCOW writeback for the range,
2117			 * then record the beginning of the range that needs to
2118			 * be COWed.  It will be written out before the next
2119			 * NOCOW range if we find one, or when exiting this
2120			 * loop.
2121			 */
2122			if (cow_start == (u64)-1)
2123				cow_start = cur_offset;
2124			cur_offset = extent_end;
2125			if (cur_offset > end)
2126				break;
2127			if (!path->nodes[0])
2128				continue;
2129			path->slots[0]++;
2130			goto next_slot;
2131		}
2132
2133		/*
2134		 * COW range from cow_start to found_key.offset - 1. As the key
2135		 * will contain the beginning of the first extent that can be
2136		 * NOCOW, following one which needs to be COW'ed
2137		 */
2138		if (cow_start != (u64)-1) {
2139			ret = fallback_to_cow(inode, locked_page,
2140					      cow_start, found_key.offset - 1);
2141			cow_start = (u64)-1;
2142			if (ret) {
2143				btrfs_dec_nocow_writers(nocow_bg);
2144				goto error;
2145			}
2146		}
2147
2148		nocow_end = cur_offset + nocow_args.num_bytes - 1;
2149		is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
2150		if (is_prealloc) {
2151			u64 orig_start = found_key.offset - nocow_args.extent_offset;
2152			struct extent_map *em;
2153
2154			em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
2155					  orig_start,
2156					  nocow_args.disk_bytenr, /* block_start */
2157					  nocow_args.num_bytes, /* block_len */
2158					  nocow_args.disk_num_bytes, /* orig_block_len */
2159					  ram_bytes, BTRFS_COMPRESS_NONE,
2160					  BTRFS_ORDERED_PREALLOC);
2161			if (IS_ERR(em)) {
2162				btrfs_dec_nocow_writers(nocow_bg);
2163				ret = PTR_ERR(em);
2164				goto error;
2165			}
2166			free_extent_map(em);
2167		}
2168
2169		ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
2170				nocow_args.num_bytes, nocow_args.num_bytes,
2171				nocow_args.disk_bytenr, nocow_args.num_bytes, 0,
2172				is_prealloc
2173				? (1 << BTRFS_ORDERED_PREALLOC)
2174				: (1 << BTRFS_ORDERED_NOCOW),
2175				BTRFS_COMPRESS_NONE);
2176		btrfs_dec_nocow_writers(nocow_bg);
2177		if (IS_ERR(ordered)) {
2178			if (is_prealloc) {
2179				btrfs_drop_extent_map_range(inode, cur_offset,
2180							    nocow_end, false);
2181			}
2182			ret = PTR_ERR(ordered);
2183			goto error;
2184		}
2185
2186		if (btrfs_is_data_reloc_root(root))
2187			/*
2188			 * Error handled later, as we must prevent
2189			 * extent_clear_unlock_delalloc() in error handler
2190			 * from freeing metadata of created ordered extent.
2191			 */
2192			ret = btrfs_reloc_clone_csums(ordered);
2193		btrfs_put_ordered_extent(ordered);
2194
2195		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2196					     locked_page, EXTENT_LOCKED |
2197					     EXTENT_DELALLOC |
2198					     EXTENT_CLEAR_DATA_RESV,
2199					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2200
2201		cur_offset = extent_end;
2202
2203		/*
2204		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2205		 * handler, as metadata for created ordered extent will only
2206		 * be freed by btrfs_finish_ordered_io().
2207		 */
2208		if (ret)
2209			goto error;
2210		if (cur_offset > end)
2211			break;
2212	}
2213	btrfs_release_path(path);
2214
2215	if (cur_offset <= end && cow_start == (u64)-1)
2216		cow_start = cur_offset;
2217
2218	if (cow_start != (u64)-1) {
2219		cur_offset = end;
2220		ret = fallback_to_cow(inode, locked_page, cow_start, end);
2221		cow_start = (u64)-1;
2222		if (ret)
2223			goto error;
2224	}
2225
2226	btrfs_free_path(path);
2227	return 0;
2228
2229error:
2230	/*
2231	 * If an error happened while a COW region is outstanding, cur_offset
2232	 * needs to be reset to cow_start to ensure the COW region is unlocked
2233	 * as well.
2234	 */
2235	if (cow_start != (u64)-1)
2236		cur_offset = cow_start;
2237	if (cur_offset < end)
2238		extent_clear_unlock_delalloc(inode, cur_offset, end,
2239					     locked_page, EXTENT_LOCKED |
2240					     EXTENT_DELALLOC | EXTENT_DEFRAG |
2241					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2242					     PAGE_START_WRITEBACK |
2243					     PAGE_END_WRITEBACK);
2244	btrfs_free_path(path);
2245	return ret;
2246}
2247
2248static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2249{
2250	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2251		if (inode->defrag_bytes &&
2252		    test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2253			return false;
2254		return true;
2255	}
2256	return false;
2257}
2258
2259/*
2260 * Function to process delayed allocation (create CoW) for ranges which are
2261 * being touched for the first time.
2262 */
2263int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2264			     u64 start, u64 end, struct writeback_control *wbc)
2265{
2266	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2267	int ret;
2268
2269	/*
2270	 * The range must cover part of the @locked_page, or a return of 1
2271	 * can confuse the caller.
2272	 */
2273	ASSERT(!(end <= page_offset(locked_page) ||
2274		 start >= page_offset(locked_page) + PAGE_SIZE));
2275
2276	if (should_nocow(inode, start, end)) {
2277		ret = run_delalloc_nocow(inode, locked_page, start, end);
2278		goto out;
2279	}
2280
2281	if (btrfs_inode_can_compress(inode) &&
2282	    inode_need_compress(inode, start, end) &&
2283	    run_delalloc_compressed(inode, locked_page, start, end, wbc))
2284		return 1;
2285
2286	if (zoned)
2287		ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
2288				       true);
2289	else
2290		ret = cow_file_range(inode, locked_page, start, end, NULL,
2291				     false, false);
2292
2293out:
2294	if (ret < 0)
2295		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2296					      end - start + 1);
2297	return ret;
2298}
2299
2300void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2301				 struct extent_state *orig, u64 split)
2302{
2303	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2304	u64 size;
2305
2306	lockdep_assert_held(&inode->io_tree.lock);
2307
2308	/* not delalloc, ignore it */
2309	if (!(orig->state & EXTENT_DELALLOC))
2310		return;
2311
2312	size = orig->end - orig->start + 1;
2313	if (size > fs_info->max_extent_size) {
2314		u32 num_extents;
2315		u64 new_size;
2316
2317		/*
2318		 * See the explanation in btrfs_merge_delalloc_extent, the same
2319		 * applies here, just in reverse.
2320		 */
2321		new_size = orig->end - split + 1;
2322		num_extents = count_max_extents(fs_info, new_size);
2323		new_size = split - orig->start;
2324		num_extents += count_max_extents(fs_info, new_size);
2325		if (count_max_extents(fs_info, size) >= num_extents)
2326			return;
2327	}
2328
2329	spin_lock(&inode->lock);
2330	btrfs_mod_outstanding_extents(inode, 1);
2331	spin_unlock(&inode->lock);
2332}
2333
2334/*
2335 * Handle merged delayed allocation extents so we can keep track of new extents
2336 * that are just merged onto old extents, such as when we are doing sequential
2337 * writes, so we can properly account for the metadata space we'll need.
2338 */
2339void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2340				 struct extent_state *other)
2341{
2342	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2343	u64 new_size, old_size;
2344	u32 num_extents;
2345
2346	lockdep_assert_held(&inode->io_tree.lock);
2347
2348	/* not delalloc, ignore it */
2349	if (!(other->state & EXTENT_DELALLOC))
2350		return;
2351
2352	if (new->start > other->start)
2353		new_size = new->end - other->start + 1;
2354	else
2355		new_size = other->end - new->start + 1;
2356
2357	/* we're not bigger than the max, unreserve the space and go */
2358	if (new_size <= fs_info->max_extent_size) {
2359		spin_lock(&inode->lock);
2360		btrfs_mod_outstanding_extents(inode, -1);
2361		spin_unlock(&inode->lock);
2362		return;
2363	}
2364
2365	/*
2366	 * We have to add up either side to figure out how many extents were
2367	 * accounted for before we merged into one big extent.  If the number of
2368	 * extents we accounted for is <= the amount we need for the new range
2369	 * then we can return, otherwise drop.  Think of it like this
2370	 *
2371	 * [ 4k][MAX_SIZE]
2372	 *
2373	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2374	 * need 2 outstanding extents, on one side we have 1 and the other side
2375	 * we have 1 so they are == and we can return.  But in this case
2376	 *
2377	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2378	 *
2379	 * Each range on their own accounts for 2 extents, but merged together
2380	 * they are only 3 extents worth of accounting, so we need to drop in
2381	 * this case.
2382	 */
2383	old_size = other->end - other->start + 1;
2384	num_extents = count_max_extents(fs_info, old_size);
2385	old_size = new->end - new->start + 1;
2386	num_extents += count_max_extents(fs_info, old_size);
2387	if (count_max_extents(fs_info, new_size) >= num_extents)
2388		return;
2389
2390	spin_lock(&inode->lock);
2391	btrfs_mod_outstanding_extents(inode, -1);
2392	spin_unlock(&inode->lock);
2393}
2394
2395static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2396{
2397	struct btrfs_root *root = inode->root;
2398	struct btrfs_fs_info *fs_info = root->fs_info;
2399
2400	spin_lock(&root->delalloc_lock);
2401	ASSERT(list_empty(&inode->delalloc_inodes));
2402	list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2403	root->nr_delalloc_inodes++;
2404	if (root->nr_delalloc_inodes == 1) {
2405		spin_lock(&fs_info->delalloc_root_lock);
2406		ASSERT(list_empty(&root->delalloc_root));
2407		list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2408		spin_unlock(&fs_info->delalloc_root_lock);
2409	}
2410	spin_unlock(&root->delalloc_lock);
2411}
2412
2413void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2414{
2415	struct btrfs_root *root = inode->root;
2416	struct btrfs_fs_info *fs_info = root->fs_info;
2417
2418	lockdep_assert_held(&root->delalloc_lock);
2419
2420	/*
2421	 * We may be called after the inode was already deleted from the list,
2422	 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2423	 * and then later through btrfs_clear_delalloc_extent() while the inode
2424	 * still has ->delalloc_bytes > 0.
2425	 */
2426	if (!list_empty(&inode->delalloc_inodes)) {
2427		list_del_init(&inode->delalloc_inodes);
2428		root->nr_delalloc_inodes--;
2429		if (!root->nr_delalloc_inodes) {
2430			ASSERT(list_empty(&root->delalloc_inodes));
2431			spin_lock(&fs_info->delalloc_root_lock);
2432			ASSERT(!list_empty(&root->delalloc_root));
2433			list_del_init(&root->delalloc_root);
2434			spin_unlock(&fs_info->delalloc_root_lock);
2435		}
2436	}
2437}
2438
2439/*
2440 * Properly track delayed allocation bytes in the inode and to maintain the
2441 * list of inodes that have pending delalloc work to be done.
2442 */
2443void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2444			       u32 bits)
2445{
2446	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2447
2448	lockdep_assert_held(&inode->io_tree.lock);
2449
2450	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2451		WARN_ON(1);
2452	/*
2453	 * set_bit and clear bit hooks normally require _irqsave/restore
2454	 * but in this case, we are only testing for the DELALLOC
2455	 * bit, which is only set or cleared with irqs on
2456	 */
2457	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2458		u64 len = state->end + 1 - state->start;
2459		u64 prev_delalloc_bytes;
2460		u32 num_extents = count_max_extents(fs_info, len);
2461
2462		spin_lock(&inode->lock);
2463		btrfs_mod_outstanding_extents(inode, num_extents);
2464		spin_unlock(&inode->lock);
2465
2466		/* For sanity tests */
2467		if (btrfs_is_testing(fs_info))
2468			return;
2469
2470		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2471					 fs_info->delalloc_batch);
2472		spin_lock(&inode->lock);
2473		prev_delalloc_bytes = inode->delalloc_bytes;
2474		inode->delalloc_bytes += len;
2475		if (bits & EXTENT_DEFRAG)
2476			inode->defrag_bytes += len;
2477		spin_unlock(&inode->lock);
2478
2479		/*
2480		 * We don't need to be under the protection of the inode's lock,
2481		 * because we are called while holding the inode's io_tree lock
2482		 * and are therefore protected against concurrent calls of this
2483		 * function and btrfs_clear_delalloc_extent().
2484		 */
2485		if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2486			btrfs_add_delalloc_inode(inode);
2487	}
2488
2489	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2490	    (bits & EXTENT_DELALLOC_NEW)) {
2491		spin_lock(&inode->lock);
2492		inode->new_delalloc_bytes += state->end + 1 - state->start;
2493		spin_unlock(&inode->lock);
2494	}
2495}
2496
2497/*
2498 * Once a range is no longer delalloc this function ensures that proper
2499 * accounting happens.
2500 */
2501void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2502				 struct extent_state *state, u32 bits)
2503{
2504	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2505	u64 len = state->end + 1 - state->start;
2506	u32 num_extents = count_max_extents(fs_info, len);
2507
2508	lockdep_assert_held(&inode->io_tree.lock);
2509
2510	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2511		spin_lock(&inode->lock);
2512		inode->defrag_bytes -= len;
2513		spin_unlock(&inode->lock);
2514	}
2515
2516	/*
2517	 * set_bit and clear bit hooks normally require _irqsave/restore
2518	 * but in this case, we are only testing for the DELALLOC
2519	 * bit, which is only set or cleared with irqs on
2520	 */
2521	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2522		struct btrfs_root *root = inode->root;
2523		u64 new_delalloc_bytes;
2524
2525		spin_lock(&inode->lock);
2526		btrfs_mod_outstanding_extents(inode, -num_extents);
2527		spin_unlock(&inode->lock);
2528
2529		/*
2530		 * We don't reserve metadata space for space cache inodes so we
2531		 * don't need to call delalloc_release_metadata if there is an
2532		 * error.
2533		 */
2534		if (bits & EXTENT_CLEAR_META_RESV &&
2535		    root != fs_info->tree_root)
2536			btrfs_delalloc_release_metadata(inode, len, true);
2537
2538		/* For sanity tests. */
2539		if (btrfs_is_testing(fs_info))
2540			return;
2541
2542		if (!btrfs_is_data_reloc_root(root) &&
2543		    !btrfs_is_free_space_inode(inode) &&
2544		    !(state->state & EXTENT_NORESERVE) &&
2545		    (bits & EXTENT_CLEAR_DATA_RESV))
2546			btrfs_free_reserved_data_space_noquota(fs_info, len);
2547
2548		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2549					 fs_info->delalloc_batch);
2550		spin_lock(&inode->lock);
2551		inode->delalloc_bytes -= len;
2552		new_delalloc_bytes = inode->delalloc_bytes;
2553		spin_unlock(&inode->lock);
2554
2555		/*
2556		 * We don't need to be under the protection of the inode's lock,
2557		 * because we are called while holding the inode's io_tree lock
2558		 * and are therefore protected against concurrent calls of this
2559		 * function and btrfs_set_delalloc_extent().
2560		 */
2561		if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2562			spin_lock(&root->delalloc_lock);
2563			btrfs_del_delalloc_inode(inode);
2564			spin_unlock(&root->delalloc_lock);
2565		}
2566	}
2567
2568	if ((state->state & EXTENT_DELALLOC_NEW) &&
2569	    (bits & EXTENT_DELALLOC_NEW)) {
2570		spin_lock(&inode->lock);
2571		ASSERT(inode->new_delalloc_bytes >= len);
2572		inode->new_delalloc_bytes -= len;
2573		if (bits & EXTENT_ADD_INODE_BYTES)
2574			inode_add_bytes(&inode->vfs_inode, len);
2575		spin_unlock(&inode->lock);
2576	}
2577}
2578
2579static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
2580					struct btrfs_ordered_extent *ordered)
2581{
2582	u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
2583	u64 len = bbio->bio.bi_iter.bi_size;
2584	struct btrfs_ordered_extent *new;
2585	int ret;
2586
2587	/* Must always be called for the beginning of an ordered extent. */
2588	if (WARN_ON_ONCE(start != ordered->disk_bytenr))
2589		return -EINVAL;
2590
2591	/* No need to split if the ordered extent covers the entire bio. */
2592	if (ordered->disk_num_bytes == len) {
2593		refcount_inc(&ordered->refs);
2594		bbio->ordered = ordered;
2595		return 0;
2596	}
2597
2598	/*
2599	 * Don't split the extent_map for NOCOW extents, as we're writing into
2600	 * a pre-existing one.
2601	 */
2602	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
2603		ret = split_extent_map(bbio->inode, bbio->file_offset,
2604				       ordered->num_bytes, len,
2605				       ordered->disk_bytenr);
2606		if (ret)
2607			return ret;
2608	}
2609
2610	new = btrfs_split_ordered_extent(ordered, len);
2611	if (IS_ERR(new))
2612		return PTR_ERR(new);
2613	bbio->ordered = new;
2614	return 0;
2615}
2616
2617/*
2618 * given a list of ordered sums record them in the inode.  This happens
2619 * at IO completion time based on sums calculated at bio submission time.
2620 */
2621static int add_pending_csums(struct btrfs_trans_handle *trans,
2622			     struct list_head *list)
2623{
2624	struct btrfs_ordered_sum *sum;
2625	struct btrfs_root *csum_root = NULL;
2626	int ret;
2627
2628	list_for_each_entry(sum, list, list) {
2629		trans->adding_csums = true;
2630		if (!csum_root)
2631			csum_root = btrfs_csum_root(trans->fs_info,
2632						    sum->logical);
2633		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2634		trans->adding_csums = false;
2635		if (ret)
2636			return ret;
2637	}
2638	return 0;
2639}
2640
2641static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2642					 const u64 start,
2643					 const u64 len,
2644					 struct extent_state **cached_state)
2645{
2646	u64 search_start = start;
2647	const u64 end = start + len - 1;
2648
2649	while (search_start < end) {
2650		const u64 search_len = end - search_start + 1;
2651		struct extent_map *em;
2652		u64 em_len;
2653		int ret = 0;
2654
2655		em = btrfs_get_extent(inode, NULL, search_start, search_len);
2656		if (IS_ERR(em))
2657			return PTR_ERR(em);
2658
2659		if (em->block_start != EXTENT_MAP_HOLE)
2660			goto next;
2661
2662		em_len = em->len;
2663		if (em->start < search_start)
2664			em_len -= search_start - em->start;
2665		if (em_len > search_len)
2666			em_len = search_len;
2667
2668		ret = set_extent_bit(&inode->io_tree, search_start,
2669				     search_start + em_len - 1,
2670				     EXTENT_DELALLOC_NEW, cached_state);
2671next:
2672		search_start = extent_map_end(em);
2673		free_extent_map(em);
2674		if (ret)
2675			return ret;
2676	}
2677	return 0;
2678}
2679
2680int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2681			      unsigned int extra_bits,
2682			      struct extent_state **cached_state)
2683{
2684	WARN_ON(PAGE_ALIGNED(end));
2685
2686	if (start >= i_size_read(&inode->vfs_inode) &&
2687	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2688		/*
2689		 * There can't be any extents following eof in this case so just
2690		 * set the delalloc new bit for the range directly.
2691		 */
2692		extra_bits |= EXTENT_DELALLOC_NEW;
2693	} else {
2694		int ret;
2695
2696		ret = btrfs_find_new_delalloc_bytes(inode, start,
2697						    end + 1 - start,
2698						    cached_state);
2699		if (ret)
2700			return ret;
2701	}
2702
2703	return set_extent_bit(&inode->io_tree, start, end,
2704			      EXTENT_DELALLOC | extra_bits, cached_state);
2705}
2706
2707/* see btrfs_writepage_start_hook for details on why this is required */
2708struct btrfs_writepage_fixup {
2709	struct page *page;
2710	struct btrfs_inode *inode;
2711	struct btrfs_work work;
2712};
2713
2714static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2715{
2716	struct btrfs_writepage_fixup *fixup =
2717		container_of(work, struct btrfs_writepage_fixup, work);
2718	struct btrfs_ordered_extent *ordered;
2719	struct extent_state *cached_state = NULL;
2720	struct extent_changeset *data_reserved = NULL;
2721	struct page *page = fixup->page;
2722	struct btrfs_inode *inode = fixup->inode;
2723	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2724	u64 page_start = page_offset(page);
2725	u64 page_end = page_offset(page) + PAGE_SIZE - 1;
2726	int ret = 0;
2727	bool free_delalloc_space = true;
2728
2729	/*
2730	 * This is similar to page_mkwrite, we need to reserve the space before
2731	 * we take the page lock.
2732	 */
2733	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2734					   PAGE_SIZE);
2735again:
2736	lock_page(page);
2737
2738	/*
2739	 * Before we queued this fixup, we took a reference on the page.
2740	 * page->mapping may go NULL, but it shouldn't be moved to a different
2741	 * address space.
2742	 */
2743	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2744		/*
2745		 * Unfortunately this is a little tricky, either
2746		 *
2747		 * 1) We got here and our page had already been dealt with and
2748		 *    we reserved our space, thus ret == 0, so we need to just
2749		 *    drop our space reservation and bail.  This can happen the
2750		 *    first time we come into the fixup worker, or could happen
2751		 *    while waiting for the ordered extent.
2752		 * 2) Our page was already dealt with, but we happened to get an
2753		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2754		 *    this case we obviously don't have anything to release, but
2755		 *    because the page was already dealt with we don't want to
2756		 *    mark the page with an error, so make sure we're resetting
2757		 *    ret to 0.  This is why we have this check _before_ the ret
2758		 *    check, because we do not want to have a surprise ENOSPC
2759		 *    when the page was already properly dealt with.
2760		 */
2761		if (!ret) {
2762			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2763			btrfs_delalloc_release_space(inode, data_reserved,
2764						     page_start, PAGE_SIZE,
2765						     true);
2766		}
2767		ret = 0;
2768		goto out_page;
2769	}
2770
2771	/*
2772	 * We can't mess with the page state unless it is locked, so now that
2773	 * it is locked bail if we failed to make our space reservation.
2774	 */
2775	if (ret)
2776		goto out_page;
2777
2778	lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2779
2780	/* already ordered? We're done */
2781	if (PageOrdered(page))
2782		goto out_reserved;
2783
2784	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2785	if (ordered) {
2786		unlock_extent(&inode->io_tree, page_start, page_end,
2787			      &cached_state);
2788		unlock_page(page);
2789		btrfs_start_ordered_extent(ordered);
2790		btrfs_put_ordered_extent(ordered);
2791		goto again;
2792	}
2793
2794	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2795					&cached_state);
2796	if (ret)
2797		goto out_reserved;
2798
2799	/*
2800	 * Everything went as planned, we're now the owner of a dirty page with
2801	 * delayed allocation bits set and space reserved for our COW
2802	 * destination.
2803	 *
2804	 * The page was dirty when we started, nothing should have cleaned it.
2805	 */
2806	BUG_ON(!PageDirty(page));
2807	free_delalloc_space = false;
2808out_reserved:
2809	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2810	if (free_delalloc_space)
2811		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2812					     PAGE_SIZE, true);
2813	unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2814out_page:
2815	if (ret) {
2816		/*
2817		 * We hit ENOSPC or other errors.  Update the mapping and page
2818		 * to reflect the errors and clean the page.
2819		 */
2820		mapping_set_error(page->mapping, ret);
2821		btrfs_mark_ordered_io_finished(inode, page, page_start,
2822					       PAGE_SIZE, !ret);
2823		clear_page_dirty_for_io(page);
2824	}
2825	btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
2826	unlock_page(page);
2827	put_page(page);
2828	kfree(fixup);
2829	extent_changeset_free(data_reserved);
2830	/*
2831	 * As a precaution, do a delayed iput in case it would be the last iput
2832	 * that could need flushing space. Recursing back to fixup worker would
2833	 * deadlock.
2834	 */
2835	btrfs_add_delayed_iput(inode);
2836}
2837
2838/*
2839 * There are a few paths in the higher layers of the kernel that directly
2840 * set the page dirty bit without asking the filesystem if it is a
2841 * good idea.  This causes problems because we want to make sure COW
2842 * properly happens and the data=ordered rules are followed.
2843 *
2844 * In our case any range that doesn't have the ORDERED bit set
2845 * hasn't been properly setup for IO.  We kick off an async process
2846 * to fix it up.  The async helper will wait for ordered extents, set
2847 * the delalloc bit and make it safe to write the page.
2848 */
2849int btrfs_writepage_cow_fixup(struct page *page)
2850{
2851	struct inode *inode = page->mapping->host;
2852	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2853	struct btrfs_writepage_fixup *fixup;
2854
2855	/* This page has ordered extent covering it already */
2856	if (PageOrdered(page))
2857		return 0;
2858
2859	/*
2860	 * PageChecked is set below when we create a fixup worker for this page,
2861	 * don't try to create another one if we're already PageChecked()
2862	 *
2863	 * The extent_io writepage code will redirty the page if we send back
2864	 * EAGAIN.
2865	 */
2866	if (PageChecked(page))
2867		return -EAGAIN;
2868
2869	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2870	if (!fixup)
2871		return -EAGAIN;
2872
2873	/*
2874	 * We are already holding a reference to this inode from
2875	 * write_cache_pages.  We need to hold it because the space reservation
2876	 * takes place outside of the page lock, and we can't trust
2877	 * page->mapping outside of the page lock.
2878	 */
2879	ihold(inode);
2880	btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE);
2881	get_page(page);
2882	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
2883	fixup->page = page;
2884	fixup->inode = BTRFS_I(inode);
2885	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2886
2887	return -EAGAIN;
2888}
2889
2890static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2891				       struct btrfs_inode *inode, u64 file_pos,
2892				       struct btrfs_file_extent_item *stack_fi,
2893				       const bool update_inode_bytes,
2894				       u64 qgroup_reserved)
2895{
2896	struct btrfs_root *root = inode->root;
2897	const u64 sectorsize = root->fs_info->sectorsize;
2898	struct btrfs_path *path;
2899	struct extent_buffer *leaf;
2900	struct btrfs_key ins;
2901	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2902	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2903	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2904	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2905	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2906	struct btrfs_drop_extents_args drop_args = { 0 };
2907	int ret;
2908
2909	path = btrfs_alloc_path();
2910	if (!path)
2911		return -ENOMEM;
2912
2913	/*
2914	 * we may be replacing one extent in the tree with another.
2915	 * The new extent is pinned in the extent map, and we don't want
2916	 * to drop it from the cache until it is completely in the btree.
2917	 *
2918	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2919	 * the caller is expected to unpin it and allow it to be merged
2920	 * with the others.
2921	 */
2922	drop_args.path = path;
2923	drop_args.start = file_pos;
2924	drop_args.end = file_pos + num_bytes;
2925	drop_args.replace_extent = true;
2926	drop_args.extent_item_size = sizeof(*stack_fi);
2927	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2928	if (ret)
2929		goto out;
2930
2931	if (!drop_args.extent_inserted) {
2932		ins.objectid = btrfs_ino(inode);
2933		ins.offset = file_pos;
2934		ins.type = BTRFS_EXTENT_DATA_KEY;
2935
2936		ret = btrfs_insert_empty_item(trans, root, path, &ins,
2937					      sizeof(*stack_fi));
2938		if (ret)
2939			goto out;
2940	}
2941	leaf = path->nodes[0];
2942	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2943	write_extent_buffer(leaf, stack_fi,
2944			btrfs_item_ptr_offset(leaf, path->slots[0]),
2945			sizeof(struct btrfs_file_extent_item));
2946
2947	btrfs_mark_buffer_dirty(trans, leaf);
2948	btrfs_release_path(path);
2949
2950	/*
2951	 * If we dropped an inline extent here, we know the range where it is
2952	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2953	 * number of bytes only for that range containing the inline extent.
2954	 * The remaining of the range will be processed when clearning the
2955	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2956	 */
2957	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2958		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2959
2960		inline_size = drop_args.bytes_found - inline_size;
2961		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2962		drop_args.bytes_found -= inline_size;
2963		num_bytes -= sectorsize;
2964	}
2965
2966	if (update_inode_bytes)
2967		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2968
2969	ins.objectid = disk_bytenr;
2970	ins.offset = disk_num_bytes;
2971	ins.type = BTRFS_EXTENT_ITEM_KEY;
2972
2973	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2974	if (ret)
2975		goto out;
2976
2977	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2978					       file_pos - offset,
2979					       qgroup_reserved, &ins);
2980out:
2981	btrfs_free_path(path);
2982
2983	return ret;
2984}
2985
2986static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2987					 u64 start, u64 len)
2988{
2989	struct btrfs_block_group *cache;
2990
2991	cache = btrfs_lookup_block_group(fs_info, start);
2992	ASSERT(cache);
2993
2994	spin_lock(&cache->lock);
2995	cache->delalloc_bytes -= len;
2996	spin_unlock(&cache->lock);
2997
2998	btrfs_put_block_group(cache);
2999}
3000
3001static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3002					     struct btrfs_ordered_extent *oe)
3003{
3004	struct btrfs_file_extent_item stack_fi;
3005	bool update_inode_bytes;
3006	u64 num_bytes = oe->num_bytes;
3007	u64 ram_bytes = oe->ram_bytes;
3008
3009	memset(&stack_fi, 0, sizeof(stack_fi));
3010	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3011	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3012	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3013						   oe->disk_num_bytes);
3014	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3015	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) {
3016		num_bytes = oe->truncated_len;
3017		ram_bytes = num_bytes;
3018	}
3019	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3020	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3021	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3022	/* Encryption and other encoding is reserved and all 0 */
3023
3024	/*
3025	 * For delalloc, when completing an ordered extent we update the inode's
3026	 * bytes when clearing the range in the inode's io tree, so pass false
3027	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3028	 * except if the ordered extent was truncated.
3029	 */
3030	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3031			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3032			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3033
3034	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3035					   oe->file_offset, &stack_fi,
3036					   update_inode_bytes, oe->qgroup_rsv);
3037}
3038
3039/*
3040 * As ordered data IO finishes, this gets called so we can finish
3041 * an ordered extent if the range of bytes in the file it covers are
3042 * fully written.
3043 */
3044int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3045{
3046	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3047	struct btrfs_root *root = inode->root;
3048	struct btrfs_fs_info *fs_info = root->fs_info;
3049	struct btrfs_trans_handle *trans = NULL;
3050	struct extent_io_tree *io_tree = &inode->io_tree;
3051	struct extent_state *cached_state = NULL;
3052	u64 start, end;
3053	int compress_type = 0;
3054	int ret = 0;
3055	u64 logical_len = ordered_extent->num_bytes;
3056	bool freespace_inode;
3057	bool truncated = false;
3058	bool clear_reserved_extent = true;
3059	unsigned int clear_bits = EXTENT_DEFRAG;
3060
3061	start = ordered_extent->file_offset;
3062	end = start + ordered_extent->num_bytes - 1;
3063
3064	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3065	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3066	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3067	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3068		clear_bits |= EXTENT_DELALLOC_NEW;
3069
3070	freespace_inode = btrfs_is_free_space_inode(inode);
3071	if (!freespace_inode)
3072		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3073
3074	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3075		ret = -EIO;
3076		goto out;
3077	}
3078
3079	if (btrfs_is_zoned(fs_info))
3080		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3081					ordered_extent->disk_num_bytes);
3082
3083	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3084		truncated = true;
3085		logical_len = ordered_extent->truncated_len;
3086		/* Truncated the entire extent, don't bother adding */
3087		if (!logical_len)
3088			goto out;
3089	}
3090
3091	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3092		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3093
3094		btrfs_inode_safe_disk_i_size_write(inode, 0);
3095		if (freespace_inode)
3096			trans = btrfs_join_transaction_spacecache(root);
3097		else
3098			trans = btrfs_join_transaction(root);
3099		if (IS_ERR(trans)) {
3100			ret = PTR_ERR(trans);
3101			trans = NULL;
3102			goto out;
3103		}
3104		trans->block_rsv = &inode->block_rsv;
3105		ret = btrfs_update_inode_fallback(trans, inode);
3106		if (ret) /* -ENOMEM or corruption */
3107			btrfs_abort_transaction(trans, ret);
3108		goto out;
3109	}
3110
3111	clear_bits |= EXTENT_LOCKED;
3112	lock_extent(io_tree, start, end, &cached_state);
3113
3114	if (freespace_inode)
3115		trans = btrfs_join_transaction_spacecache(root);
3116	else
3117		trans = btrfs_join_transaction(root);
3118	if (IS_ERR(trans)) {
3119		ret = PTR_ERR(trans);
3120		trans = NULL;
3121		goto out;
3122	}
3123
3124	trans->block_rsv = &inode->block_rsv;
3125
3126	ret = btrfs_insert_raid_extent(trans, ordered_extent);
3127	if (ret)
3128		goto out;
3129
3130	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3131		compress_type = ordered_extent->compress_type;
3132	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3133		BUG_ON(compress_type);
3134		ret = btrfs_mark_extent_written(trans, inode,
3135						ordered_extent->file_offset,
3136						ordered_extent->file_offset +
3137						logical_len);
3138		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3139						  ordered_extent->disk_num_bytes);
3140	} else {
3141		BUG_ON(root == fs_info->tree_root);
3142		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3143		if (!ret) {
3144			clear_reserved_extent = false;
3145			btrfs_release_delalloc_bytes(fs_info,
3146						ordered_extent->disk_bytenr,
3147						ordered_extent->disk_num_bytes);
3148		}
3149	}
3150	if (ret < 0) {
3151		btrfs_abort_transaction(trans, ret);
3152		goto out;
3153	}
3154
3155	ret = unpin_extent_cache(inode, ordered_extent->file_offset,
3156				 ordered_extent->num_bytes, trans->transid);
3157	if (ret < 0) {
3158		btrfs_abort_transaction(trans, ret);
3159		goto out;
3160	}
3161
3162	ret = add_pending_csums(trans, &ordered_extent->list);
3163	if (ret) {
3164		btrfs_abort_transaction(trans, ret);
3165		goto out;
3166	}
3167
3168	/*
3169	 * If this is a new delalloc range, clear its new delalloc flag to
3170	 * update the inode's number of bytes. This needs to be done first
3171	 * before updating the inode item.
3172	 */
3173	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3174	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3175		clear_extent_bit(&inode->io_tree, start, end,
3176				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3177				 &cached_state);
3178
3179	btrfs_inode_safe_disk_i_size_write(inode, 0);
3180	ret = btrfs_update_inode_fallback(trans, inode);
3181	if (ret) { /* -ENOMEM or corruption */
3182		btrfs_abort_transaction(trans, ret);
3183		goto out;
3184	}
3185	ret = 0;
3186out:
3187	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3188			 &cached_state);
3189
3190	if (trans)
3191		btrfs_end_transaction(trans);
3192
3193	if (ret || truncated) {
3194		u64 unwritten_start = start;
3195
3196		/*
3197		 * If we failed to finish this ordered extent for any reason we
3198		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3199		 * extent, and mark the inode with the error if it wasn't
3200		 * already set.  Any error during writeback would have already
3201		 * set the mapping error, so we need to set it if we're the ones
3202		 * marking this ordered extent as failed.
3203		 */
3204		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3205					     &ordered_extent->flags))
3206			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3207
3208		if (truncated)
3209			unwritten_start += logical_len;
3210		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3211
3212		/*
3213		 * Drop extent maps for the part of the extent we didn't write.
3214		 *
3215		 * We have an exception here for the free_space_inode, this is
3216		 * because when we do btrfs_get_extent() on the free space inode
3217		 * we will search the commit root.  If this is a new block group
3218		 * we won't find anything, and we will trip over the assert in
3219		 * writepage where we do ASSERT(em->block_start !=
3220		 * EXTENT_MAP_HOLE).
3221		 *
3222		 * Theoretically we could also skip this for any NOCOW extent as
3223		 * we don't mess with the extent map tree in the NOCOW case, but
3224		 * for now simply skip this if we are the free space inode.
3225		 */
3226		if (!btrfs_is_free_space_inode(inode))
3227			btrfs_drop_extent_map_range(inode, unwritten_start,
3228						    end, false);
3229
3230		/*
3231		 * If the ordered extent had an IOERR or something else went
3232		 * wrong we need to return the space for this ordered extent
3233		 * back to the allocator.  We only free the extent in the
3234		 * truncated case if we didn't write out the extent at all.
3235		 *
3236		 * If we made it past insert_reserved_file_extent before we
3237		 * errored out then we don't need to do this as the accounting
3238		 * has already been done.
3239		 */
3240		if ((ret || !logical_len) &&
3241		    clear_reserved_extent &&
3242		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3243		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3244			/*
3245			 * Discard the range before returning it back to the
3246			 * free space pool
3247			 */
3248			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3249				btrfs_discard_extent(fs_info,
3250						ordered_extent->disk_bytenr,
3251						ordered_extent->disk_num_bytes,
3252						NULL);
3253			btrfs_free_reserved_extent(fs_info,
3254					ordered_extent->disk_bytenr,
3255					ordered_extent->disk_num_bytes, 1);
3256			/*
3257			 * Actually free the qgroup rsv which was released when
3258			 * the ordered extent was created.
3259			 */
3260			btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
3261						  ordered_extent->qgroup_rsv,
3262						  BTRFS_QGROUP_RSV_DATA);
3263		}
3264	}
3265
3266	/*
3267	 * This needs to be done to make sure anybody waiting knows we are done
3268	 * updating everything for this ordered extent.
3269	 */
3270	btrfs_remove_ordered_extent(inode, ordered_extent);
3271
3272	/* once for us */
3273	btrfs_put_ordered_extent(ordered_extent);
3274	/* once for the tree */
3275	btrfs_put_ordered_extent(ordered_extent);
3276
3277	return ret;
3278}
3279
3280int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3281{
3282	if (btrfs_is_zoned(inode_to_fs_info(ordered->inode)) &&
3283	    !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3284	    list_empty(&ordered->bioc_list))
3285		btrfs_finish_ordered_zoned(ordered);
3286	return btrfs_finish_one_ordered(ordered);
3287}
3288
3289/*
3290 * Verify the checksum for a single sector without any extra action that depend
3291 * on the type of I/O.
3292 */
3293int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3294			    u32 pgoff, u8 *csum, const u8 * const csum_expected)
3295{
3296	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3297	char *kaddr;
3298
3299	ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3300
3301	shash->tfm = fs_info->csum_shash;
3302
3303	kaddr = kmap_local_page(page) + pgoff;
3304	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3305	kunmap_local(kaddr);
3306
3307	if (memcmp(csum, csum_expected, fs_info->csum_size))
3308		return -EIO;
3309	return 0;
3310}
3311
3312/*
3313 * Verify the checksum of a single data sector.
3314 *
3315 * @bbio:	btrfs_io_bio which contains the csum
3316 * @dev:	device the sector is on
3317 * @bio_offset:	offset to the beginning of the bio (in bytes)
3318 * @bv:		bio_vec to check
3319 *
3320 * Check if the checksum on a data block is valid.  When a checksum mismatch is
3321 * detected, report the error and fill the corrupted range with zero.
3322 *
3323 * Return %true if the sector is ok or had no checksum to start with, else %false.
3324 */
3325bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3326			u32 bio_offset, struct bio_vec *bv)
3327{
3328	struct btrfs_inode *inode = bbio->inode;
3329	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3330	u64 file_offset = bbio->file_offset + bio_offset;
3331	u64 end = file_offset + bv->bv_len - 1;
3332	u8 *csum_expected;
3333	u8 csum[BTRFS_CSUM_SIZE];
3334
3335	ASSERT(bv->bv_len == fs_info->sectorsize);
3336
3337	if (!bbio->csum)
3338		return true;
3339
3340	if (btrfs_is_data_reloc_root(inode->root) &&
3341	    test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3342			   NULL)) {
3343		/* Skip the range without csum for data reloc inode */
3344		clear_extent_bits(&inode->io_tree, file_offset, end,
3345				  EXTENT_NODATASUM);
3346		return true;
3347	}
3348
3349	csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3350				fs_info->csum_size;
3351	if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
3352				    csum_expected))
3353		goto zeroit;
3354	return true;
3355
3356zeroit:
3357	btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3358				    bbio->mirror_num);
3359	if (dev)
3360		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3361	memzero_bvec(bv);
3362	return false;
3363}
3364
3365/*
3366 * Perform a delayed iput on @inode.
3367 *
3368 * @inode: The inode we want to perform iput on
3369 *
3370 * This function uses the generic vfs_inode::i_count to track whether we should
3371 * just decrement it (in case it's > 1) or if this is the last iput then link
3372 * the inode to the delayed iput machinery. Delayed iputs are processed at
3373 * transaction commit time/superblock commit/cleaner kthread.
3374 */
3375void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3376{
3377	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3378	unsigned long flags;
3379
3380	if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3381		return;
3382
3383	atomic_inc(&fs_info->nr_delayed_iputs);
3384	/*
3385	 * Need to be irq safe here because we can be called from either an irq
3386	 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3387	 * context.
3388	 */
3389	spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3390	ASSERT(list_empty(&inode->delayed_iput));
3391	list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3392	spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3393	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3394		wake_up_process(fs_info->cleaner_kthread);
3395}
3396
3397static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3398				    struct btrfs_inode *inode)
3399{
3400	list_del_init(&inode->delayed_iput);
3401	spin_unlock_irq(&fs_info->delayed_iput_lock);
3402	iput(&inode->vfs_inode);
3403	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3404		wake_up(&fs_info->delayed_iputs_wait);
3405	spin_lock_irq(&fs_info->delayed_iput_lock);
3406}
3407
3408static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3409				   struct btrfs_inode *inode)
3410{
3411	if (!list_empty(&inode->delayed_iput)) {
3412		spin_lock_irq(&fs_info->delayed_iput_lock);
3413		if (!list_empty(&inode->delayed_iput))
3414			run_delayed_iput_locked(fs_info, inode);
3415		spin_unlock_irq(&fs_info->delayed_iput_lock);
3416	}
3417}
3418
3419void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3420{
3421	/*
3422	 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3423	 * calls btrfs_add_delayed_iput() and that needs to lock
3424	 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3425	 * prevent a deadlock.
3426	 */
3427	spin_lock_irq(&fs_info->delayed_iput_lock);
3428	while (!list_empty(&fs_info->delayed_iputs)) {
3429		struct btrfs_inode *inode;
3430
3431		inode = list_first_entry(&fs_info->delayed_iputs,
3432				struct btrfs_inode, delayed_iput);
3433		run_delayed_iput_locked(fs_info, inode);
3434		if (need_resched()) {
3435			spin_unlock_irq(&fs_info->delayed_iput_lock);
3436			cond_resched();
3437			spin_lock_irq(&fs_info->delayed_iput_lock);
3438		}
3439	}
3440	spin_unlock_irq(&fs_info->delayed_iput_lock);
3441}
3442
3443/*
3444 * Wait for flushing all delayed iputs
3445 *
3446 * @fs_info:  the filesystem
3447 *
3448 * This will wait on any delayed iputs that are currently running with KILLABLE
3449 * set.  Once they are all done running we will return, unless we are killed in
3450 * which case we return EINTR. This helps in user operations like fallocate etc
3451 * that might get blocked on the iputs.
3452 *
3453 * Return EINTR if we were killed, 0 if nothing's pending
3454 */
3455int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3456{
3457	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3458			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3459	if (ret)
3460		return -EINTR;
3461	return 0;
3462}
3463
3464/*
3465 * This creates an orphan entry for the given inode in case something goes wrong
3466 * in the middle of an unlink.
3467 */
3468int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3469		     struct btrfs_inode *inode)
3470{
3471	int ret;
3472
3473	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3474	if (ret && ret != -EEXIST) {
3475		btrfs_abort_transaction(trans, ret);
3476		return ret;
3477	}
3478
3479	return 0;
3480}
3481
3482/*
3483 * We have done the delete so we can go ahead and remove the orphan item for
3484 * this particular inode.
3485 */
3486static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3487			    struct btrfs_inode *inode)
3488{
3489	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3490}
3491
3492/*
3493 * this cleans up any orphans that may be left on the list from the last use
3494 * of this root.
3495 */
3496int btrfs_orphan_cleanup(struct btrfs_root *root)
3497{
3498	struct btrfs_fs_info *fs_info = root->fs_info;
3499	struct btrfs_path *path;
3500	struct extent_buffer *leaf;
3501	struct btrfs_key key, found_key;
3502	struct btrfs_trans_handle *trans;
3503	struct inode *inode;
3504	u64 last_objectid = 0;
3505	int ret = 0, nr_unlink = 0;
3506
3507	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3508		return 0;
3509
3510	path = btrfs_alloc_path();
3511	if (!path) {
3512		ret = -ENOMEM;
3513		goto out;
3514	}
3515	path->reada = READA_BACK;
3516
3517	key.objectid = BTRFS_ORPHAN_OBJECTID;
3518	key.type = BTRFS_ORPHAN_ITEM_KEY;
3519	key.offset = (u64)-1;
3520
3521	while (1) {
3522		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3523		if (ret < 0)
3524			goto out;
3525
3526		/*
3527		 * if ret == 0 means we found what we were searching for, which
3528		 * is weird, but possible, so only screw with path if we didn't
3529		 * find the key and see if we have stuff that matches
3530		 */
3531		if (ret > 0) {
3532			ret = 0;
3533			if (path->slots[0] == 0)
3534				break;
3535			path->slots[0]--;
3536		}
3537
3538		/* pull out the item */
3539		leaf = path->nodes[0];
3540		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3541
3542		/* make sure the item matches what we want */
3543		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3544			break;
3545		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3546			break;
3547
3548		/* release the path since we're done with it */
3549		btrfs_release_path(path);
3550
3551		/*
3552		 * this is where we are basically btrfs_lookup, without the
3553		 * crossing root thing.  we store the inode number in the
3554		 * offset of the orphan item.
3555		 */
3556
3557		if (found_key.offset == last_objectid) {
3558			/*
3559			 * We found the same inode as before. This means we were
3560			 * not able to remove its items via eviction triggered
3561			 * by an iput(). A transaction abort may have happened,
3562			 * due to -ENOSPC for example, so try to grab the error
3563			 * that lead to a transaction abort, if any.
3564			 */
3565			btrfs_err(fs_info,
3566				  "Error removing orphan entry, stopping orphan cleanup");
3567			ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3568			goto out;
3569		}
3570
3571		last_objectid = found_key.offset;
3572
3573		found_key.objectid = found_key.offset;
3574		found_key.type = BTRFS_INODE_ITEM_KEY;
3575		found_key.offset = 0;
3576		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3577		if (IS_ERR(inode)) {
3578			ret = PTR_ERR(inode);
3579			inode = NULL;
3580			if (ret != -ENOENT)
3581				goto out;
3582		}
3583
3584		if (!inode && root == fs_info->tree_root) {
3585			struct btrfs_root *dead_root;
3586			int is_dead_root = 0;
3587
3588			/*
3589			 * This is an orphan in the tree root. Currently these
3590			 * could come from 2 sources:
3591			 *  a) a root (snapshot/subvolume) deletion in progress
3592			 *  b) a free space cache inode
3593			 * We need to distinguish those two, as the orphan item
3594			 * for a root must not get deleted before the deletion
3595			 * of the snapshot/subvolume's tree completes.
3596			 *
3597			 * btrfs_find_orphan_roots() ran before us, which has
3598			 * found all deleted roots and loaded them into
3599			 * fs_info->fs_roots_radix. So here we can find if an
3600			 * orphan item corresponds to a deleted root by looking
3601			 * up the root from that radix tree.
3602			 */
3603
3604			spin_lock(&fs_info->fs_roots_radix_lock);
3605			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3606							 (unsigned long)found_key.objectid);
3607			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3608				is_dead_root = 1;
3609			spin_unlock(&fs_info->fs_roots_radix_lock);
3610
3611			if (is_dead_root) {
3612				/* prevent this orphan from being found again */
3613				key.offset = found_key.objectid - 1;
3614				continue;
3615			}
3616
3617		}
3618
3619		/*
3620		 * If we have an inode with links, there are a couple of
3621		 * possibilities:
3622		 *
3623		 * 1. We were halfway through creating fsverity metadata for the
3624		 * file. In that case, the orphan item represents incomplete
3625		 * fsverity metadata which must be cleaned up with
3626		 * btrfs_drop_verity_items and deleting the orphan item.
3627
3628		 * 2. Old kernels (before v3.12) used to create an
3629		 * orphan item for truncate indicating that there were possibly
3630		 * extent items past i_size that needed to be deleted. In v3.12,
3631		 * truncate was changed to update i_size in sync with the extent
3632		 * items, but the (useless) orphan item was still created. Since
3633		 * v4.18, we don't create the orphan item for truncate at all.
3634		 *
3635		 * So, this item could mean that we need to do a truncate, but
3636		 * only if this filesystem was last used on a pre-v3.12 kernel
3637		 * and was not cleanly unmounted. The odds of that are quite
3638		 * slim, and it's a pain to do the truncate now, so just delete
3639		 * the orphan item.
3640		 *
3641		 * It's also possible that this orphan item was supposed to be
3642		 * deleted but wasn't. The inode number may have been reused,
3643		 * but either way, we can delete the orphan item.
3644		 */
3645		if (!inode || inode->i_nlink) {
3646			if (inode) {
3647				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3648				iput(inode);
3649				inode = NULL;
3650				if (ret)
3651					goto out;
3652			}
3653			trans = btrfs_start_transaction(root, 1);
3654			if (IS_ERR(trans)) {
3655				ret = PTR_ERR(trans);
3656				goto out;
3657			}
3658			btrfs_debug(fs_info, "auto deleting %Lu",
3659				    found_key.objectid);
3660			ret = btrfs_del_orphan_item(trans, root,
3661						    found_key.objectid);
3662			btrfs_end_transaction(trans);
3663			if (ret)
3664				goto out;
3665			continue;
3666		}
3667
3668		nr_unlink++;
3669
3670		/* this will do delete_inode and everything for us */
3671		iput(inode);
3672	}
3673	/* release the path since we're done with it */
3674	btrfs_release_path(path);
3675
3676	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3677		trans = btrfs_join_transaction(root);
3678		if (!IS_ERR(trans))
3679			btrfs_end_transaction(trans);
3680	}
3681
3682	if (nr_unlink)
3683		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3684
3685out:
3686	if (ret)
3687		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3688	btrfs_free_path(path);
3689	return ret;
3690}
3691
3692/*
3693 * very simple check to peek ahead in the leaf looking for xattrs.  If we
3694 * don't find any xattrs, we know there can't be any acls.
3695 *
3696 * slot is the slot the inode is in, objectid is the objectid of the inode
3697 */
3698static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3699					  int slot, u64 objectid,
3700					  int *first_xattr_slot)
3701{
3702	u32 nritems = btrfs_header_nritems(leaf);
3703	struct btrfs_key found_key;
3704	static u64 xattr_access = 0;
3705	static u64 xattr_default = 0;
3706	int scanned = 0;
3707
3708	if (!xattr_access) {
3709		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3710					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3711		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3712					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3713	}
3714
3715	slot++;
3716	*first_xattr_slot = -1;
3717	while (slot < nritems) {
3718		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3719
3720		/* we found a different objectid, there must not be acls */
3721		if (found_key.objectid != objectid)
3722			return 0;
3723
3724		/* we found an xattr, assume we've got an acl */
3725		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3726			if (*first_xattr_slot == -1)
3727				*first_xattr_slot = slot;
3728			if (found_key.offset == xattr_access ||
3729			    found_key.offset == xattr_default)
3730				return 1;
3731		}
3732
3733		/*
3734		 * we found a key greater than an xattr key, there can't
3735		 * be any acls later on
3736		 */
3737		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3738			return 0;
3739
3740		slot++;
3741		scanned++;
3742
3743		/*
3744		 * it goes inode, inode backrefs, xattrs, extents,
3745		 * so if there are a ton of hard links to an inode there can
3746		 * be a lot of backrefs.  Don't waste time searching too hard,
3747		 * this is just an optimization
3748		 */
3749		if (scanned >= 8)
3750			break;
3751	}
3752	/* we hit the end of the leaf before we found an xattr or
3753	 * something larger than an xattr.  We have to assume the inode
3754	 * has acls
3755	 */
3756	if (*first_xattr_slot == -1)
3757		*first_xattr_slot = slot;
3758	return 1;
3759}
3760
3761/*
3762 * read an inode from the btree into the in-memory inode
3763 */
3764static int btrfs_read_locked_inode(struct inode *inode,
3765				   struct btrfs_path *in_path)
3766{
3767	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
3768	struct btrfs_path *path = in_path;
3769	struct extent_buffer *leaf;
3770	struct btrfs_inode_item *inode_item;
3771	struct btrfs_root *root = BTRFS_I(inode)->root;
3772	struct btrfs_key location;
3773	unsigned long ptr;
3774	int maybe_acls;
3775	u32 rdev;
3776	int ret;
3777	bool filled = false;
3778	int first_xattr_slot;
3779
3780	ret = btrfs_fill_inode(inode, &rdev);
3781	if (!ret)
3782		filled = true;
3783
3784	if (!path) {
3785		path = btrfs_alloc_path();
3786		if (!path)
3787			return -ENOMEM;
3788	}
3789
3790	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3791
3792	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3793	if (ret) {
3794		if (path != in_path)
3795			btrfs_free_path(path);
3796		return ret;
3797	}
3798
3799	leaf = path->nodes[0];
3800
3801	if (filled)
3802		goto cache_index;
3803
3804	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3805				    struct btrfs_inode_item);
3806	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3807	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3808	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3809	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3810	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3811	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3812			round_up(i_size_read(inode), fs_info->sectorsize));
3813
3814	inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
3815			btrfs_timespec_nsec(leaf, &inode_item->atime));
3816
3817	inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
3818			btrfs_timespec_nsec(leaf, &inode_item->mtime));
3819
3820	inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
3821			btrfs_timespec_nsec(leaf, &inode_item->ctime));
3822
3823	BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
3824	BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
3825
3826	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3827	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3828	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3829
3830	inode_set_iversion_queried(inode,
3831				   btrfs_inode_sequence(leaf, inode_item));
3832	inode->i_generation = BTRFS_I(inode)->generation;
3833	inode->i_rdev = 0;
3834	rdev = btrfs_inode_rdev(leaf, inode_item);
3835
3836	BTRFS_I(inode)->index_cnt = (u64)-1;
3837	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3838				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3839
3840cache_index:
3841	/*
3842	 * If we were modified in the current generation and evicted from memory
3843	 * and then re-read we need to do a full sync since we don't have any
3844	 * idea about which extents were modified before we were evicted from
3845	 * cache.
3846	 *
3847	 * This is required for both inode re-read from disk and delayed inode
3848	 * in the delayed_nodes xarray.
3849	 */
3850	if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
3851		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3852			&BTRFS_I(inode)->runtime_flags);
3853
3854	/*
3855	 * We don't persist the id of the transaction where an unlink operation
3856	 * against the inode was last made. So here we assume the inode might
3857	 * have been evicted, and therefore the exact value of last_unlink_trans
3858	 * lost, and set it to last_trans to avoid metadata inconsistencies
3859	 * between the inode and its parent if the inode is fsync'ed and the log
3860	 * replayed. For example, in the scenario:
3861	 *
3862	 * touch mydir/foo
3863	 * ln mydir/foo mydir/bar
3864	 * sync
3865	 * unlink mydir/bar
3866	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3867	 * xfs_io -c fsync mydir/foo
3868	 * <power failure>
3869	 * mount fs, triggers fsync log replay
3870	 *
3871	 * We must make sure that when we fsync our inode foo we also log its
3872	 * parent inode, otherwise after log replay the parent still has the
3873	 * dentry with the "bar" name but our inode foo has a link count of 1
3874	 * and doesn't have an inode ref with the name "bar" anymore.
3875	 *
3876	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3877	 * but it guarantees correctness at the expense of occasional full
3878	 * transaction commits on fsync if our inode is a directory, or if our
3879	 * inode is not a directory, logging its parent unnecessarily.
3880	 */
3881	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3882
3883	/*
3884	 * Same logic as for last_unlink_trans. We don't persist the generation
3885	 * of the last transaction where this inode was used for a reflink
3886	 * operation, so after eviction and reloading the inode we must be
3887	 * pessimistic and assume the last transaction that modified the inode.
3888	 */
3889	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3890
3891	path->slots[0]++;
3892	if (inode->i_nlink != 1 ||
3893	    path->slots[0] >= btrfs_header_nritems(leaf))
3894		goto cache_acl;
3895
3896	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3897	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3898		goto cache_acl;
3899
3900	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3901	if (location.type == BTRFS_INODE_REF_KEY) {
3902		struct btrfs_inode_ref *ref;
3903
3904		ref = (struct btrfs_inode_ref *)ptr;
3905		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3906	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3907		struct btrfs_inode_extref *extref;
3908
3909		extref = (struct btrfs_inode_extref *)ptr;
3910		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3911								     extref);
3912	}
3913cache_acl:
3914	/*
3915	 * try to precache a NULL acl entry for files that don't have
3916	 * any xattrs or acls
3917	 */
3918	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3919			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3920	if (first_xattr_slot != -1) {
3921		path->slots[0] = first_xattr_slot;
3922		ret = btrfs_load_inode_props(inode, path);
3923		if (ret)
3924			btrfs_err(fs_info,
3925				  "error loading props for ino %llu (root %llu): %d",
3926				  btrfs_ino(BTRFS_I(inode)),
3927				  root->root_key.objectid, ret);
3928	}
3929	if (path != in_path)
3930		btrfs_free_path(path);
3931
3932	if (!maybe_acls)
3933		cache_no_acl(inode);
3934
3935	switch (inode->i_mode & S_IFMT) {
3936	case S_IFREG:
3937		inode->i_mapping->a_ops = &btrfs_aops;
3938		inode->i_fop = &btrfs_file_operations;
3939		inode->i_op = &btrfs_file_inode_operations;
3940		break;
3941	case S_IFDIR:
3942		inode->i_fop = &btrfs_dir_file_operations;
3943		inode->i_op = &btrfs_dir_inode_operations;
3944		break;
3945	case S_IFLNK:
3946		inode->i_op = &btrfs_symlink_inode_operations;
3947		inode_nohighmem(inode);
3948		inode->i_mapping->a_ops = &btrfs_aops;
3949		break;
3950	default:
3951		inode->i_op = &btrfs_special_inode_operations;
3952		init_special_inode(inode, inode->i_mode, rdev);
3953		break;
3954	}
3955
3956	btrfs_sync_inode_flags_to_i_flags(inode);
3957	return 0;
3958}
3959
3960/*
3961 * given a leaf and an inode, copy the inode fields into the leaf
3962 */
3963static void fill_inode_item(struct btrfs_trans_handle *trans,
3964			    struct extent_buffer *leaf,
3965			    struct btrfs_inode_item *item,
3966			    struct inode *inode)
3967{
3968	struct btrfs_map_token token;
3969	u64 flags;
3970
3971	btrfs_init_map_token(&token, leaf);
3972
3973	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3974	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3975	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
3976	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3977	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3978
3979	btrfs_set_token_timespec_sec(&token, &item->atime,
3980				     inode_get_atime_sec(inode));
3981	btrfs_set_token_timespec_nsec(&token, &item->atime,
3982				      inode_get_atime_nsec(inode));
3983
3984	btrfs_set_token_timespec_sec(&token, &item->mtime,
3985				     inode_get_mtime_sec(inode));
3986	btrfs_set_token_timespec_nsec(&token, &item->mtime,
3987				      inode_get_mtime_nsec(inode));
3988
3989	btrfs_set_token_timespec_sec(&token, &item->ctime,
3990				     inode_get_ctime_sec(inode));
3991	btrfs_set_token_timespec_nsec(&token, &item->ctime,
3992				      inode_get_ctime_nsec(inode));
3993
3994	btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
3995	btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
3996
3997	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
3998	btrfs_set_token_inode_generation(&token, item,
3999					 BTRFS_I(inode)->generation);
4000	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4001	btrfs_set_token_inode_transid(&token, item, trans->transid);
4002	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4003	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4004					  BTRFS_I(inode)->ro_flags);
4005	btrfs_set_token_inode_flags(&token, item, flags);
4006	btrfs_set_token_inode_block_group(&token, item, 0);
4007}
4008
4009/*
4010 * copy everything in the in-memory inode into the btree.
4011 */
4012static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4013					    struct btrfs_inode *inode)
4014{
4015	struct btrfs_inode_item *inode_item;
4016	struct btrfs_path *path;
4017	struct extent_buffer *leaf;
4018	int ret;
4019
4020	path = btrfs_alloc_path();
4021	if (!path)
4022		return -ENOMEM;
4023
4024	ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1);
4025	if (ret) {
4026		if (ret > 0)
4027			ret = -ENOENT;
4028		goto failed;
4029	}
4030
4031	leaf = path->nodes[0];
4032	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4033				    struct btrfs_inode_item);
4034
4035	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4036	btrfs_mark_buffer_dirty(trans, leaf);
4037	btrfs_set_inode_last_trans(trans, inode);
4038	ret = 0;
4039failed:
4040	btrfs_free_path(path);
4041	return ret;
4042}
4043
4044/*
4045 * copy everything in the in-memory inode into the btree.
4046 */
4047int btrfs_update_inode(struct btrfs_trans_handle *trans,
4048		       struct btrfs_inode *inode)
4049{
4050	struct btrfs_root *root = inode->root;
4051	struct btrfs_fs_info *fs_info = root->fs_info;
4052	int ret;
4053
4054	/*
4055	 * If the inode is a free space inode, we can deadlock during commit
4056	 * if we put it into the delayed code.
4057	 *
4058	 * The data relocation inode should also be directly updated
4059	 * without delay
4060	 */
4061	if (!btrfs_is_free_space_inode(inode)
4062	    && !btrfs_is_data_reloc_root(root)
4063	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4064		btrfs_update_root_times(trans, root);
4065
4066		ret = btrfs_delayed_update_inode(trans, inode);
4067		if (!ret)
4068			btrfs_set_inode_last_trans(trans, inode);
4069		return ret;
4070	}
4071
4072	return btrfs_update_inode_item(trans, inode);
4073}
4074
4075int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4076				struct btrfs_inode *inode)
4077{
4078	int ret;
4079
4080	ret = btrfs_update_inode(trans, inode);
4081	if (ret == -ENOSPC)
4082		return btrfs_update_inode_item(trans, inode);
4083	return ret;
4084}
4085
4086/*
4087 * unlink helper that gets used here in inode.c and in the tree logging
4088 * recovery code.  It remove a link in a directory with a given name, and
4089 * also drops the back refs in the inode to the directory
4090 */
4091static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4092				struct btrfs_inode *dir,
4093				struct btrfs_inode *inode,
4094				const struct fscrypt_str *name,
4095				struct btrfs_rename_ctx *rename_ctx)
4096{
4097	struct btrfs_root *root = dir->root;
4098	struct btrfs_fs_info *fs_info = root->fs_info;
4099	struct btrfs_path *path;
4100	int ret = 0;
4101	struct btrfs_dir_item *di;
4102	u64 index;
4103	u64 ino = btrfs_ino(inode);
4104	u64 dir_ino = btrfs_ino(dir);
4105
4106	path = btrfs_alloc_path();
4107	if (!path) {
4108		ret = -ENOMEM;
4109		goto out;
4110	}
4111
4112	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4113	if (IS_ERR_OR_NULL(di)) {
4114		ret = di ? PTR_ERR(di) : -ENOENT;
4115		goto err;
4116	}
4117	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4118	if (ret)
4119		goto err;
4120	btrfs_release_path(path);
4121
4122	/*
4123	 * If we don't have dir index, we have to get it by looking up
4124	 * the inode ref, since we get the inode ref, remove it directly,
4125	 * it is unnecessary to do delayed deletion.
4126	 *
4127	 * But if we have dir index, needn't search inode ref to get it.
4128	 * Since the inode ref is close to the inode item, it is better
4129	 * that we delay to delete it, and just do this deletion when
4130	 * we update the inode item.
4131	 */
4132	if (inode->dir_index) {
4133		ret = btrfs_delayed_delete_inode_ref(inode);
4134		if (!ret) {
4135			index = inode->dir_index;
4136			goto skip_backref;
4137		}
4138	}
4139
4140	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4141	if (ret) {
4142		btrfs_info(fs_info,
4143			"failed to delete reference to %.*s, inode %llu parent %llu",
4144			name->len, name->name, ino, dir_ino);
4145		btrfs_abort_transaction(trans, ret);
4146		goto err;
4147	}
4148skip_backref:
4149	if (rename_ctx)
4150		rename_ctx->index = index;
4151
4152	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4153	if (ret) {
4154		btrfs_abort_transaction(trans, ret);
4155		goto err;
4156	}
4157
4158	/*
4159	 * If we are in a rename context, we don't need to update anything in the
4160	 * log. That will be done later during the rename by btrfs_log_new_name().
4161	 * Besides that, doing it here would only cause extra unnecessary btree
4162	 * operations on the log tree, increasing latency for applications.
4163	 */
4164	if (!rename_ctx) {
4165		btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
4166		btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
4167	}
4168
4169	/*
4170	 * If we have a pending delayed iput we could end up with the final iput
4171	 * being run in btrfs-cleaner context.  If we have enough of these built
4172	 * up we can end up burning a lot of time in btrfs-cleaner without any
4173	 * way to throttle the unlinks.  Since we're currently holding a ref on
4174	 * the inode we can run the delayed iput here without any issues as the
4175	 * final iput won't be done until after we drop the ref we're currently
4176	 * holding.
4177	 */
4178	btrfs_run_delayed_iput(fs_info, inode);
4179err:
4180	btrfs_free_path(path);
4181	if (ret)
4182		goto out;
4183
4184	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4185	inode_inc_iversion(&inode->vfs_inode);
4186	inode_inc_iversion(&dir->vfs_inode);
4187 	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4188	ret = btrfs_update_inode(trans, dir);
4189out:
4190	return ret;
4191}
4192
4193int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4194		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4195		       const struct fscrypt_str *name)
4196{
4197	int ret;
4198
4199	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4200	if (!ret) {
4201		drop_nlink(&inode->vfs_inode);
4202		ret = btrfs_update_inode(trans, inode);
4203	}
4204	return ret;
4205}
4206
4207/*
4208 * helper to start transaction for unlink and rmdir.
4209 *
4210 * unlink and rmdir are special in btrfs, they do not always free space, so
4211 * if we cannot make our reservations the normal way try and see if there is
4212 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4213 * allow the unlink to occur.
4214 */
4215static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4216{
4217	struct btrfs_root *root = dir->root;
4218
4219	return btrfs_start_transaction_fallback_global_rsv(root,
4220						   BTRFS_UNLINK_METADATA_UNITS);
4221}
4222
4223static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4224{
4225	struct btrfs_trans_handle *trans;
4226	struct inode *inode = d_inode(dentry);
4227	int ret;
4228	struct fscrypt_name fname;
4229
4230	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4231	if (ret)
4232		return ret;
4233
4234	/* This needs to handle no-key deletions later on */
4235
4236	trans = __unlink_start_trans(BTRFS_I(dir));
4237	if (IS_ERR(trans)) {
4238		ret = PTR_ERR(trans);
4239		goto fscrypt_free;
4240	}
4241
4242	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4243				false);
4244
4245	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4246				 &fname.disk_name);
4247	if (ret)
4248		goto end_trans;
4249
4250	if (inode->i_nlink == 0) {
4251		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4252		if (ret)
4253			goto end_trans;
4254	}
4255
4256end_trans:
4257	btrfs_end_transaction(trans);
4258	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4259fscrypt_free:
4260	fscrypt_free_filename(&fname);
4261	return ret;
4262}
4263
4264static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4265			       struct btrfs_inode *dir, struct dentry *dentry)
4266{
4267	struct btrfs_root *root = dir->root;
4268	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4269	struct btrfs_path *path;
4270	struct extent_buffer *leaf;
4271	struct btrfs_dir_item *di;
4272	struct btrfs_key key;
4273	u64 index;
4274	int ret;
4275	u64 objectid;
4276	u64 dir_ino = btrfs_ino(dir);
4277	struct fscrypt_name fname;
4278
4279	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4280	if (ret)
4281		return ret;
4282
4283	/* This needs to handle no-key deletions later on */
4284
4285	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4286		objectid = inode->root->root_key.objectid;
4287	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4288		objectid = inode->location.objectid;
4289	} else {
4290		WARN_ON(1);
4291		fscrypt_free_filename(&fname);
4292		return -EINVAL;
4293	}
4294
4295	path = btrfs_alloc_path();
4296	if (!path) {
4297		ret = -ENOMEM;
4298		goto out;
4299	}
4300
4301	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4302				   &fname.disk_name, -1);
4303	if (IS_ERR_OR_NULL(di)) {
4304		ret = di ? PTR_ERR(di) : -ENOENT;
4305		goto out;
4306	}
4307
4308	leaf = path->nodes[0];
4309	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4310	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4311	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4312	if (ret) {
4313		btrfs_abort_transaction(trans, ret);
4314		goto out;
4315	}
4316	btrfs_release_path(path);
4317
4318	/*
4319	 * This is a placeholder inode for a subvolume we didn't have a
4320	 * reference to at the time of the snapshot creation.  In the meantime
4321	 * we could have renamed the real subvol link into our snapshot, so
4322	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4323	 * Instead simply lookup the dir_index_item for this entry so we can
4324	 * remove it.  Otherwise we know we have a ref to the root and we can
4325	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4326	 */
4327	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4328		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4329		if (IS_ERR_OR_NULL(di)) {
4330			if (!di)
4331				ret = -ENOENT;
4332			else
4333				ret = PTR_ERR(di);
4334			btrfs_abort_transaction(trans, ret);
4335			goto out;
4336		}
4337
4338		leaf = path->nodes[0];
4339		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4340		index = key.offset;
4341		btrfs_release_path(path);
4342	} else {
4343		ret = btrfs_del_root_ref(trans, objectid,
4344					 root->root_key.objectid, dir_ino,
4345					 &index, &fname.disk_name);
4346		if (ret) {
4347			btrfs_abort_transaction(trans, ret);
4348			goto out;
4349		}
4350	}
4351
4352	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4353	if (ret) {
4354		btrfs_abort_transaction(trans, ret);
4355		goto out;
4356	}
4357
4358	btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4359	inode_inc_iversion(&dir->vfs_inode);
4360	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4361	ret = btrfs_update_inode_fallback(trans, dir);
4362	if (ret)
4363		btrfs_abort_transaction(trans, ret);
4364out:
4365	btrfs_free_path(path);
4366	fscrypt_free_filename(&fname);
4367	return ret;
4368}
4369
4370/*
4371 * Helper to check if the subvolume references other subvolumes or if it's
4372 * default.
4373 */
4374static noinline int may_destroy_subvol(struct btrfs_root *root)
4375{
4376	struct btrfs_fs_info *fs_info = root->fs_info;
4377	struct btrfs_path *path;
4378	struct btrfs_dir_item *di;
4379	struct btrfs_key key;
4380	struct fscrypt_str name = FSTR_INIT("default", 7);
4381	u64 dir_id;
4382	int ret;
4383
4384	path = btrfs_alloc_path();
4385	if (!path)
4386		return -ENOMEM;
4387
4388	/* Make sure this root isn't set as the default subvol */
4389	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4390	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4391				   dir_id, &name, 0);
4392	if (di && !IS_ERR(di)) {
4393		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4394		if (key.objectid == root->root_key.objectid) {
4395			ret = -EPERM;
4396			btrfs_err(fs_info,
4397				  "deleting default subvolume %llu is not allowed",
4398				  key.objectid);
4399			goto out;
4400		}
4401		btrfs_release_path(path);
4402	}
4403
4404	key.objectid = root->root_key.objectid;
4405	key.type = BTRFS_ROOT_REF_KEY;
4406	key.offset = (u64)-1;
4407
4408	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4409	if (ret < 0)
4410		goto out;
4411	if (ret == 0) {
4412		/*
4413		 * Key with offset -1 found, there would have to exist a root
4414		 * with such id, but this is out of valid range.
4415		 */
4416		ret = -EUCLEAN;
4417		goto out;
4418	}
4419
4420	ret = 0;
4421	if (path->slots[0] > 0) {
4422		path->slots[0]--;
4423		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4424		if (key.objectid == root->root_key.objectid &&
4425		    key.type == BTRFS_ROOT_REF_KEY)
4426			ret = -ENOTEMPTY;
4427	}
4428out:
4429	btrfs_free_path(path);
4430	return ret;
4431}
4432
4433/* Delete all dentries for inodes belonging to the root */
4434static void btrfs_prune_dentries(struct btrfs_root *root)
4435{
4436	struct btrfs_fs_info *fs_info = root->fs_info;
4437	struct rb_node *node;
4438	struct rb_node *prev;
4439	struct btrfs_inode *entry;
4440	struct inode *inode;
4441	u64 objectid = 0;
4442
4443	if (!BTRFS_FS_ERROR(fs_info))
4444		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4445
4446	spin_lock(&root->inode_lock);
4447again:
4448	node = root->inode_tree.rb_node;
4449	prev = NULL;
4450	while (node) {
4451		prev = node;
4452		entry = rb_entry(node, struct btrfs_inode, rb_node);
4453
4454		if (objectid < btrfs_ino(entry))
4455			node = node->rb_left;
4456		else if (objectid > btrfs_ino(entry))
4457			node = node->rb_right;
4458		else
4459			break;
4460	}
4461	if (!node) {
4462		while (prev) {
4463			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4464			if (objectid <= btrfs_ino(entry)) {
4465				node = prev;
4466				break;
4467			}
4468			prev = rb_next(prev);
4469		}
4470	}
4471	while (node) {
4472		entry = rb_entry(node, struct btrfs_inode, rb_node);
4473		objectid = btrfs_ino(entry) + 1;
4474		inode = igrab(&entry->vfs_inode);
4475		if (inode) {
4476			spin_unlock(&root->inode_lock);
4477			if (atomic_read(&inode->i_count) > 1)
4478				d_prune_aliases(inode);
4479			/*
4480			 * btrfs_drop_inode will have it removed from the inode
4481			 * cache when its usage count hits zero.
4482			 */
4483			iput(inode);
4484			cond_resched();
4485			spin_lock(&root->inode_lock);
4486			goto again;
4487		}
4488
4489		if (cond_resched_lock(&root->inode_lock))
4490			goto again;
4491
4492		node = rb_next(node);
4493	}
4494	spin_unlock(&root->inode_lock);
4495}
4496
4497int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4498{
4499	struct btrfs_root *root = dir->root;
4500	struct btrfs_fs_info *fs_info = root->fs_info;
4501	struct inode *inode = d_inode(dentry);
4502	struct btrfs_root *dest = BTRFS_I(inode)->root;
4503	struct btrfs_trans_handle *trans;
4504	struct btrfs_block_rsv block_rsv;
4505	u64 root_flags;
4506	u64 qgroup_reserved = 0;
4507	int ret;
4508
4509	down_write(&fs_info->subvol_sem);
4510
4511	/*
4512	 * Don't allow to delete a subvolume with send in progress. This is
4513	 * inside the inode lock so the error handling that has to drop the bit
4514	 * again is not run concurrently.
4515	 */
4516	spin_lock(&dest->root_item_lock);
4517	if (dest->send_in_progress) {
4518		spin_unlock(&dest->root_item_lock);
4519		btrfs_warn(fs_info,
4520			   "attempt to delete subvolume %llu during send",
4521			   dest->root_key.objectid);
4522		ret = -EPERM;
4523		goto out_up_write;
4524	}
4525	if (atomic_read(&dest->nr_swapfiles)) {
4526		spin_unlock(&dest->root_item_lock);
4527		btrfs_warn(fs_info,
4528			   "attempt to delete subvolume %llu with active swapfile",
4529			   root->root_key.objectid);
4530		ret = -EPERM;
4531		goto out_up_write;
4532	}
4533	root_flags = btrfs_root_flags(&dest->root_item);
4534	btrfs_set_root_flags(&dest->root_item,
4535			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4536	spin_unlock(&dest->root_item_lock);
4537
4538	ret = may_destroy_subvol(dest);
4539	if (ret)
4540		goto out_undead;
4541
4542	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4543	/*
4544	 * One for dir inode,
4545	 * two for dir entries,
4546	 * two for root ref/backref.
4547	 */
4548	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4549	if (ret)
4550		goto out_undead;
4551	qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4552
4553	trans = btrfs_start_transaction(root, 0);
4554	if (IS_ERR(trans)) {
4555		ret = PTR_ERR(trans);
4556		goto out_release;
4557	}
4558	ret = btrfs_record_root_in_trans(trans, root);
4559	if (ret) {
4560		btrfs_abort_transaction(trans, ret);
4561		goto out_end_trans;
4562	}
4563	btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4564	qgroup_reserved = 0;
4565	trans->block_rsv = &block_rsv;
4566	trans->bytes_reserved = block_rsv.size;
4567
4568	btrfs_record_snapshot_destroy(trans, dir);
4569
4570	ret = btrfs_unlink_subvol(trans, dir, dentry);
4571	if (ret) {
4572		btrfs_abort_transaction(trans, ret);
4573		goto out_end_trans;
4574	}
4575
4576	ret = btrfs_record_root_in_trans(trans, dest);
4577	if (ret) {
4578		btrfs_abort_transaction(trans, ret);
4579		goto out_end_trans;
4580	}
4581
4582	memset(&dest->root_item.drop_progress, 0,
4583		sizeof(dest->root_item.drop_progress));
4584	btrfs_set_root_drop_level(&dest->root_item, 0);
4585	btrfs_set_root_refs(&dest->root_item, 0);
4586
4587	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4588		ret = btrfs_insert_orphan_item(trans,
4589					fs_info->tree_root,
4590					dest->root_key.objectid);
4591		if (ret) {
4592			btrfs_abort_transaction(trans, ret);
4593			goto out_end_trans;
4594		}
4595	}
4596
4597	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4598				  BTRFS_UUID_KEY_SUBVOL,
4599				  dest->root_key.objectid);
4600	if (ret && ret != -ENOENT) {
4601		btrfs_abort_transaction(trans, ret);
4602		goto out_end_trans;
4603	}
4604	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4605		ret = btrfs_uuid_tree_remove(trans,
4606					  dest->root_item.received_uuid,
4607					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4608					  dest->root_key.objectid);
4609		if (ret && ret != -ENOENT) {
4610			btrfs_abort_transaction(trans, ret);
4611			goto out_end_trans;
4612		}
4613	}
4614
4615	free_anon_bdev(dest->anon_dev);
4616	dest->anon_dev = 0;
4617out_end_trans:
4618	trans->block_rsv = NULL;
4619	trans->bytes_reserved = 0;
4620	ret = btrfs_end_transaction(trans);
4621	inode->i_flags |= S_DEAD;
4622out_release:
4623	btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4624	if (qgroup_reserved)
4625		btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4626out_undead:
4627	if (ret) {
4628		spin_lock(&dest->root_item_lock);
4629		root_flags = btrfs_root_flags(&dest->root_item);
4630		btrfs_set_root_flags(&dest->root_item,
4631				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4632		spin_unlock(&dest->root_item_lock);
4633	}
4634out_up_write:
4635	up_write(&fs_info->subvol_sem);
4636	if (!ret) {
4637		d_invalidate(dentry);
4638		btrfs_prune_dentries(dest);
4639		ASSERT(dest->send_in_progress == 0);
4640	}
4641
4642	return ret;
4643}
4644
4645static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4646{
4647	struct inode *inode = d_inode(dentry);
4648	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4649	int err = 0;
4650	struct btrfs_trans_handle *trans;
4651	u64 last_unlink_trans;
4652	struct fscrypt_name fname;
4653
4654	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4655		return -ENOTEMPTY;
4656	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4657		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4658			btrfs_err(fs_info,
4659			"extent tree v2 doesn't support snapshot deletion yet");
4660			return -EOPNOTSUPP;
4661		}
4662		return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
4663	}
4664
4665	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4666	if (err)
4667		return err;
4668
4669	/* This needs to handle no-key deletions later on */
4670
4671	trans = __unlink_start_trans(BTRFS_I(dir));
4672	if (IS_ERR(trans)) {
4673		err = PTR_ERR(trans);
4674		goto out_notrans;
4675	}
4676
4677	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4678		err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
4679		goto out;
4680	}
4681
4682	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4683	if (err)
4684		goto out;
4685
4686	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4687
4688	/* now the directory is empty */
4689	err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4690				 &fname.disk_name);
4691	if (!err) {
4692		btrfs_i_size_write(BTRFS_I(inode), 0);
4693		/*
4694		 * Propagate the last_unlink_trans value of the deleted dir to
4695		 * its parent directory. This is to prevent an unrecoverable
4696		 * log tree in the case we do something like this:
4697		 * 1) create dir foo
4698		 * 2) create snapshot under dir foo
4699		 * 3) delete the snapshot
4700		 * 4) rmdir foo
4701		 * 5) mkdir foo
4702		 * 6) fsync foo or some file inside foo
4703		 */
4704		if (last_unlink_trans >= trans->transid)
4705			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4706	}
4707out:
4708	btrfs_end_transaction(trans);
4709out_notrans:
4710	btrfs_btree_balance_dirty(fs_info);
4711	fscrypt_free_filename(&fname);
4712
4713	return err;
4714}
4715
4716/*
4717 * Read, zero a chunk and write a block.
4718 *
4719 * @inode - inode that we're zeroing
4720 * @from - the offset to start zeroing
4721 * @len - the length to zero, 0 to zero the entire range respective to the
4722 *	offset
4723 * @front - zero up to the offset instead of from the offset on
4724 *
4725 * This will find the block for the "from" offset and cow the block and zero the
4726 * part we want to zero.  This is used with truncate and hole punching.
4727 */
4728int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4729			 int front)
4730{
4731	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4732	struct address_space *mapping = inode->vfs_inode.i_mapping;
4733	struct extent_io_tree *io_tree = &inode->io_tree;
4734	struct btrfs_ordered_extent *ordered;
4735	struct extent_state *cached_state = NULL;
4736	struct extent_changeset *data_reserved = NULL;
4737	bool only_release_metadata = false;
4738	u32 blocksize = fs_info->sectorsize;
4739	pgoff_t index = from >> PAGE_SHIFT;
4740	unsigned offset = from & (blocksize - 1);
4741	struct folio *folio;
4742	gfp_t mask = btrfs_alloc_write_mask(mapping);
4743	size_t write_bytes = blocksize;
4744	int ret = 0;
4745	u64 block_start;
4746	u64 block_end;
4747
4748	if (IS_ALIGNED(offset, blocksize) &&
4749	    (!len || IS_ALIGNED(len, blocksize)))
4750		goto out;
4751
4752	block_start = round_down(from, blocksize);
4753	block_end = block_start + blocksize - 1;
4754
4755	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4756					  blocksize, false);
4757	if (ret < 0) {
4758		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4759			/* For nocow case, no need to reserve data space */
4760			only_release_metadata = true;
4761		} else {
4762			goto out;
4763		}
4764	}
4765	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4766	if (ret < 0) {
4767		if (!only_release_metadata)
4768			btrfs_free_reserved_data_space(inode, data_reserved,
4769						       block_start, blocksize);
4770		goto out;
4771	}
4772again:
4773	folio = __filemap_get_folio(mapping, index,
4774				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
4775	if (IS_ERR(folio)) {
4776		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4777					     blocksize, true);
4778		btrfs_delalloc_release_extents(inode, blocksize);
4779		ret = -ENOMEM;
4780		goto out;
4781	}
4782
4783	if (!folio_test_uptodate(folio)) {
4784		ret = btrfs_read_folio(NULL, folio);
4785		folio_lock(folio);
4786		if (folio->mapping != mapping) {
4787			folio_unlock(folio);
4788			folio_put(folio);
4789			goto again;
4790		}
4791		if (!folio_test_uptodate(folio)) {
4792			ret = -EIO;
4793			goto out_unlock;
4794		}
4795	}
4796
4797	/*
4798	 * We unlock the page after the io is completed and then re-lock it
4799	 * above.  release_folio() could have come in between that and cleared
4800	 * folio private, but left the page in the mapping.  Set the page mapped
4801	 * here to make sure it's properly set for the subpage stuff.
4802	 */
4803	ret = set_folio_extent_mapped(folio);
4804	if (ret < 0)
4805		goto out_unlock;
4806
4807	folio_wait_writeback(folio);
4808
4809	lock_extent(io_tree, block_start, block_end, &cached_state);
4810
4811	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4812	if (ordered) {
4813		unlock_extent(io_tree, block_start, block_end, &cached_state);
4814		folio_unlock(folio);
4815		folio_put(folio);
4816		btrfs_start_ordered_extent(ordered);
4817		btrfs_put_ordered_extent(ordered);
4818		goto again;
4819	}
4820
4821	clear_extent_bit(&inode->io_tree, block_start, block_end,
4822			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4823			 &cached_state);
4824
4825	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4826					&cached_state);
4827	if (ret) {
4828		unlock_extent(io_tree, block_start, block_end, &cached_state);
4829		goto out_unlock;
4830	}
4831
4832	if (offset != blocksize) {
4833		if (!len)
4834			len = blocksize - offset;
4835		if (front)
4836			folio_zero_range(folio, block_start - folio_pos(folio),
4837					 offset);
4838		else
4839			folio_zero_range(folio,
4840					 (block_start - folio_pos(folio)) + offset,
4841					 len);
4842	}
4843	btrfs_folio_clear_checked(fs_info, folio, block_start,
4844				  block_end + 1 - block_start);
4845	btrfs_folio_set_dirty(fs_info, folio, block_start,
4846			      block_end + 1 - block_start);
4847	unlock_extent(io_tree, block_start, block_end, &cached_state);
4848
4849	if (only_release_metadata)
4850		set_extent_bit(&inode->io_tree, block_start, block_end,
4851			       EXTENT_NORESERVE, NULL);
4852
4853out_unlock:
4854	if (ret) {
4855		if (only_release_metadata)
4856			btrfs_delalloc_release_metadata(inode, blocksize, true);
4857		else
4858			btrfs_delalloc_release_space(inode, data_reserved,
4859					block_start, blocksize, true);
4860	}
4861	btrfs_delalloc_release_extents(inode, blocksize);
4862	folio_unlock(folio);
4863	folio_put(folio);
4864out:
4865	if (only_release_metadata)
4866		btrfs_check_nocow_unlock(inode);
4867	extent_changeset_free(data_reserved);
4868	return ret;
4869}
4870
4871static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
4872{
4873	struct btrfs_root *root = inode->root;
4874	struct btrfs_fs_info *fs_info = root->fs_info;
4875	struct btrfs_trans_handle *trans;
4876	struct btrfs_drop_extents_args drop_args = { 0 };
4877	int ret;
4878
4879	/*
4880	 * If NO_HOLES is enabled, we don't need to do anything.
4881	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4882	 * or btrfs_update_inode() will be called, which guarantee that the next
4883	 * fsync will know this inode was changed and needs to be logged.
4884	 */
4885	if (btrfs_fs_incompat(fs_info, NO_HOLES))
4886		return 0;
4887
4888	/*
4889	 * 1 - for the one we're dropping
4890	 * 1 - for the one we're adding
4891	 * 1 - for updating the inode.
4892	 */
4893	trans = btrfs_start_transaction(root, 3);
4894	if (IS_ERR(trans))
4895		return PTR_ERR(trans);
4896
4897	drop_args.start = offset;
4898	drop_args.end = offset + len;
4899	drop_args.drop_cache = true;
4900
4901	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4902	if (ret) {
4903		btrfs_abort_transaction(trans, ret);
4904		btrfs_end_transaction(trans);
4905		return ret;
4906	}
4907
4908	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
4909	if (ret) {
4910		btrfs_abort_transaction(trans, ret);
4911	} else {
4912		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4913		btrfs_update_inode(trans, inode);
4914	}
4915	btrfs_end_transaction(trans);
4916	return ret;
4917}
4918
4919/*
4920 * This function puts in dummy file extents for the area we're creating a hole
4921 * for.  So if we are truncating this file to a larger size we need to insert
4922 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4923 * the range between oldsize and size
4924 */
4925int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4926{
4927	struct btrfs_root *root = inode->root;
4928	struct btrfs_fs_info *fs_info = root->fs_info;
4929	struct extent_io_tree *io_tree = &inode->io_tree;
4930	struct extent_map *em = NULL;
4931	struct extent_state *cached_state = NULL;
4932	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4933	u64 block_end = ALIGN(size, fs_info->sectorsize);
4934	u64 last_byte;
4935	u64 cur_offset;
4936	u64 hole_size;
4937	int err = 0;
4938
4939	/*
4940	 * If our size started in the middle of a block we need to zero out the
4941	 * rest of the block before we expand the i_size, otherwise we could
4942	 * expose stale data.
4943	 */
4944	err = btrfs_truncate_block(inode, oldsize, 0, 0);
4945	if (err)
4946		return err;
4947
4948	if (size <= hole_start)
4949		return 0;
4950
4951	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
4952					   &cached_state);
4953	cur_offset = hole_start;
4954	while (1) {
4955		em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
4956		if (IS_ERR(em)) {
4957			err = PTR_ERR(em);
4958			em = NULL;
4959			break;
4960		}
4961		last_byte = min(extent_map_end(em), block_end);
4962		last_byte = ALIGN(last_byte, fs_info->sectorsize);
4963		hole_size = last_byte - cur_offset;
4964
4965		if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
4966			struct extent_map *hole_em;
4967
4968			err = maybe_insert_hole(inode, cur_offset, hole_size);
4969			if (err)
4970				break;
4971
4972			err = btrfs_inode_set_file_extent_range(inode,
4973							cur_offset, hole_size);
4974			if (err)
4975				break;
4976
4977			hole_em = alloc_extent_map();
4978			if (!hole_em) {
4979				btrfs_drop_extent_map_range(inode, cur_offset,
4980						    cur_offset + hole_size - 1,
4981						    false);
4982				btrfs_set_inode_full_sync(inode);
4983				goto next;
4984			}
4985			hole_em->start = cur_offset;
4986			hole_em->len = hole_size;
4987			hole_em->orig_start = cur_offset;
4988
4989			hole_em->block_start = EXTENT_MAP_HOLE;
4990			hole_em->block_len = 0;
4991			hole_em->orig_block_len = 0;
4992			hole_em->ram_bytes = hole_size;
4993			hole_em->generation = btrfs_get_fs_generation(fs_info);
4994
4995			err = btrfs_replace_extent_map_range(inode, hole_em, true);
4996			free_extent_map(hole_em);
4997		} else {
4998			err = btrfs_inode_set_file_extent_range(inode,
4999							cur_offset, hole_size);
5000			if (err)
5001				break;
5002		}
5003next:
5004		free_extent_map(em);
5005		em = NULL;
5006		cur_offset = last_byte;
5007		if (cur_offset >= block_end)
5008			break;
5009	}
5010	free_extent_map(em);
5011	unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5012	return err;
5013}
5014
5015static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5016{
5017	struct btrfs_root *root = BTRFS_I(inode)->root;
5018	struct btrfs_trans_handle *trans;
5019	loff_t oldsize = i_size_read(inode);
5020	loff_t newsize = attr->ia_size;
5021	int mask = attr->ia_valid;
5022	int ret;
5023
5024	/*
5025	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5026	 * special case where we need to update the times despite not having
5027	 * these flags set.  For all other operations the VFS set these flags
5028	 * explicitly if it wants a timestamp update.
5029	 */
5030	if (newsize != oldsize) {
5031		inode_inc_iversion(inode);
5032		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5033			inode_set_mtime_to_ts(inode,
5034					      inode_set_ctime_current(inode));
5035		}
5036	}
5037
5038	if (newsize > oldsize) {
5039		/*
5040		 * Don't do an expanding truncate while snapshotting is ongoing.
5041		 * This is to ensure the snapshot captures a fully consistent
5042		 * state of this file - if the snapshot captures this expanding
5043		 * truncation, it must capture all writes that happened before
5044		 * this truncation.
5045		 */
5046		btrfs_drew_write_lock(&root->snapshot_lock);
5047		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5048		if (ret) {
5049			btrfs_drew_write_unlock(&root->snapshot_lock);
5050			return ret;
5051		}
5052
5053		trans = btrfs_start_transaction(root, 1);
5054		if (IS_ERR(trans)) {
5055			btrfs_drew_write_unlock(&root->snapshot_lock);
5056			return PTR_ERR(trans);
5057		}
5058
5059		i_size_write(inode, newsize);
5060		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5061		pagecache_isize_extended(inode, oldsize, newsize);
5062		ret = btrfs_update_inode(trans, BTRFS_I(inode));
5063		btrfs_drew_write_unlock(&root->snapshot_lock);
5064		btrfs_end_transaction(trans);
5065	} else {
5066		struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5067
5068		if (btrfs_is_zoned(fs_info)) {
5069			ret = btrfs_wait_ordered_range(inode,
5070					ALIGN(newsize, fs_info->sectorsize),
5071					(u64)-1);
5072			if (ret)
5073				return ret;
5074		}
5075
5076		/*
5077		 * We're truncating a file that used to have good data down to
5078		 * zero. Make sure any new writes to the file get on disk
5079		 * on close.
5080		 */
5081		if (newsize == 0)
5082			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5083				&BTRFS_I(inode)->runtime_flags);
5084
5085		truncate_setsize(inode, newsize);
5086
5087		inode_dio_wait(inode);
5088
5089		ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5090		if (ret && inode->i_nlink) {
5091			int err;
5092
5093			/*
5094			 * Truncate failed, so fix up the in-memory size. We
5095			 * adjusted disk_i_size down as we removed extents, so
5096			 * wait for disk_i_size to be stable and then update the
5097			 * in-memory size to match.
5098			 */
5099			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5100			if (err)
5101				return err;
5102			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5103		}
5104	}
5105
5106	return ret;
5107}
5108
5109static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5110			 struct iattr *attr)
5111{
5112	struct inode *inode = d_inode(dentry);
5113	struct btrfs_root *root = BTRFS_I(inode)->root;
5114	int err;
5115
5116	if (btrfs_root_readonly(root))
5117		return -EROFS;
5118
5119	err = setattr_prepare(idmap, dentry, attr);
5120	if (err)
5121		return err;
5122
5123	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5124		err = btrfs_setsize(inode, attr);
5125		if (err)
5126			return err;
5127	}
5128
5129	if (attr->ia_valid) {
5130		setattr_copy(idmap, inode, attr);
5131		inode_inc_iversion(inode);
5132		err = btrfs_dirty_inode(BTRFS_I(inode));
5133
5134		if (!err && attr->ia_valid & ATTR_MODE)
5135			err = posix_acl_chmod(idmap, dentry, inode->i_mode);
5136	}
5137
5138	return err;
5139}
5140
5141/*
5142 * While truncating the inode pages during eviction, we get the VFS
5143 * calling btrfs_invalidate_folio() against each folio of the inode. This
5144 * is slow because the calls to btrfs_invalidate_folio() result in a
5145 * huge amount of calls to lock_extent() and clear_extent_bit(),
5146 * which keep merging and splitting extent_state structures over and over,
5147 * wasting lots of time.
5148 *
5149 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5150 * skip all those expensive operations on a per folio basis and do only
5151 * the ordered io finishing, while we release here the extent_map and
5152 * extent_state structures, without the excessive merging and splitting.
5153 */
5154static void evict_inode_truncate_pages(struct inode *inode)
5155{
5156	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5157	struct rb_node *node;
5158
5159	ASSERT(inode->i_state & I_FREEING);
5160	truncate_inode_pages_final(&inode->i_data);
5161
5162	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5163
5164	/*
5165	 * Keep looping until we have no more ranges in the io tree.
5166	 * We can have ongoing bios started by readahead that have
5167	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5168	 * still in progress (unlocked the pages in the bio but did not yet
5169	 * unlocked the ranges in the io tree). Therefore this means some
5170	 * ranges can still be locked and eviction started because before
5171	 * submitting those bios, which are executed by a separate task (work
5172	 * queue kthread), inode references (inode->i_count) were not taken
5173	 * (which would be dropped in the end io callback of each bio).
5174	 * Therefore here we effectively end up waiting for those bios and
5175	 * anyone else holding locked ranges without having bumped the inode's
5176	 * reference count - if we don't do it, when they access the inode's
5177	 * io_tree to unlock a range it may be too late, leading to an
5178	 * use-after-free issue.
5179	 */
5180	spin_lock(&io_tree->lock);
5181	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5182		struct extent_state *state;
5183		struct extent_state *cached_state = NULL;
5184		u64 start;
5185		u64 end;
5186		unsigned state_flags;
5187
5188		node = rb_first(&io_tree->state);
5189		state = rb_entry(node, struct extent_state, rb_node);
5190		start = state->start;
5191		end = state->end;
5192		state_flags = state->state;
5193		spin_unlock(&io_tree->lock);
5194
5195		lock_extent(io_tree, start, end, &cached_state);
5196
5197		/*
5198		 * If still has DELALLOC flag, the extent didn't reach disk,
5199		 * and its reserved space won't be freed by delayed_ref.
5200		 * So we need to free its reserved space here.
5201		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5202		 *
5203		 * Note, end is the bytenr of last byte, so we need + 1 here.
5204		 */
5205		if (state_flags & EXTENT_DELALLOC)
5206			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5207					       end - start + 1, NULL);
5208
5209		clear_extent_bit(io_tree, start, end,
5210				 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5211				 &cached_state);
5212
5213		cond_resched();
5214		spin_lock(&io_tree->lock);
5215	}
5216	spin_unlock(&io_tree->lock);
5217}
5218
5219static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5220							struct btrfs_block_rsv *rsv)
5221{
5222	struct btrfs_fs_info *fs_info = root->fs_info;
5223	struct btrfs_trans_handle *trans;
5224	u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5225	int ret;
5226
5227	/*
5228	 * Eviction should be taking place at some place safe because of our
5229	 * delayed iputs.  However the normal flushing code will run delayed
5230	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5231	 *
5232	 * We reserve the delayed_refs_extra here again because we can't use
5233	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5234	 * above.  We reserve our extra bit here because we generate a ton of
5235	 * delayed refs activity by truncating.
5236	 *
5237	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5238	 * if we fail to make this reservation we can re-try without the
5239	 * delayed_refs_extra so we can make some forward progress.
5240	 */
5241	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5242				     BTRFS_RESERVE_FLUSH_EVICT);
5243	if (ret) {
5244		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5245					     BTRFS_RESERVE_FLUSH_EVICT);
5246		if (ret) {
5247			btrfs_warn(fs_info,
5248				   "could not allocate space for delete; will truncate on mount");
5249			return ERR_PTR(-ENOSPC);
5250		}
5251		delayed_refs_extra = 0;
5252	}
5253
5254	trans = btrfs_join_transaction(root);
5255	if (IS_ERR(trans))
5256		return trans;
5257
5258	if (delayed_refs_extra) {
5259		trans->block_rsv = &fs_info->trans_block_rsv;
5260		trans->bytes_reserved = delayed_refs_extra;
5261		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5262					delayed_refs_extra, true);
5263	}
5264	return trans;
5265}
5266
5267void btrfs_evict_inode(struct inode *inode)
5268{
5269	struct btrfs_fs_info *fs_info;
5270	struct btrfs_trans_handle *trans;
5271	struct btrfs_root *root = BTRFS_I(inode)->root;
5272	struct btrfs_block_rsv *rsv = NULL;
5273	int ret;
5274
5275	trace_btrfs_inode_evict(inode);
5276
5277	if (!root) {
5278		fsverity_cleanup_inode(inode);
5279		clear_inode(inode);
5280		return;
5281	}
5282
5283	fs_info = inode_to_fs_info(inode);
5284	evict_inode_truncate_pages(inode);
5285
5286	if (inode->i_nlink &&
5287	    ((btrfs_root_refs(&root->root_item) != 0 &&
5288	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5289	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5290		goto out;
5291
5292	if (is_bad_inode(inode))
5293		goto out;
5294
5295	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5296		goto out;
5297
5298	if (inode->i_nlink > 0) {
5299		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5300		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5301		goto out;
5302	}
5303
5304	/*
5305	 * This makes sure the inode item in tree is uptodate and the space for
5306	 * the inode update is released.
5307	 */
5308	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5309	if (ret)
5310		goto out;
5311
5312	/*
5313	 * This drops any pending insert or delete operations we have for this
5314	 * inode.  We could have a delayed dir index deletion queued up, but
5315	 * we're removing the inode completely so that'll be taken care of in
5316	 * the truncate.
5317	 */
5318	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5319
5320	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5321	if (!rsv)
5322		goto out;
5323	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5324	rsv->failfast = true;
5325
5326	btrfs_i_size_write(BTRFS_I(inode), 0);
5327
5328	while (1) {
5329		struct btrfs_truncate_control control = {
5330			.inode = BTRFS_I(inode),
5331			.ino = btrfs_ino(BTRFS_I(inode)),
5332			.new_size = 0,
5333			.min_type = 0,
5334		};
5335
5336		trans = evict_refill_and_join(root, rsv);
5337		if (IS_ERR(trans))
5338			goto out;
5339
5340		trans->block_rsv = rsv;
5341
5342		ret = btrfs_truncate_inode_items(trans, root, &control);
5343		trans->block_rsv = &fs_info->trans_block_rsv;
5344		btrfs_end_transaction(trans);
5345		/*
5346		 * We have not added new delayed items for our inode after we
5347		 * have flushed its delayed items, so no need to throttle on
5348		 * delayed items. However we have modified extent buffers.
5349		 */
5350		btrfs_btree_balance_dirty_nodelay(fs_info);
5351		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5352			goto out;
5353		else if (!ret)
5354			break;
5355	}
5356
5357	/*
5358	 * Errors here aren't a big deal, it just means we leave orphan items in
5359	 * the tree. They will be cleaned up on the next mount. If the inode
5360	 * number gets reused, cleanup deletes the orphan item without doing
5361	 * anything, and unlink reuses the existing orphan item.
5362	 *
5363	 * If it turns out that we are dropping too many of these, we might want
5364	 * to add a mechanism for retrying these after a commit.
5365	 */
5366	trans = evict_refill_and_join(root, rsv);
5367	if (!IS_ERR(trans)) {
5368		trans->block_rsv = rsv;
5369		btrfs_orphan_del(trans, BTRFS_I(inode));
5370		trans->block_rsv = &fs_info->trans_block_rsv;
5371		btrfs_end_transaction(trans);
5372	}
5373
5374out:
5375	btrfs_free_block_rsv(fs_info, rsv);
5376	/*
5377	 * If we didn't successfully delete, the orphan item will still be in
5378	 * the tree and we'll retry on the next mount. Again, we might also want
5379	 * to retry these periodically in the future.
5380	 */
5381	btrfs_remove_delayed_node(BTRFS_I(inode));
5382	fsverity_cleanup_inode(inode);
5383	clear_inode(inode);
5384}
5385
5386/*
5387 * Return the key found in the dir entry in the location pointer, fill @type
5388 * with BTRFS_FT_*, and return 0.
5389 *
5390 * If no dir entries were found, returns -ENOENT.
5391 * If found a corrupted location in dir entry, returns -EUCLEAN.
5392 */
5393static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5394			       struct btrfs_key *location, u8 *type)
5395{
5396	struct btrfs_dir_item *di;
5397	struct btrfs_path *path;
5398	struct btrfs_root *root = dir->root;
5399	int ret = 0;
5400	struct fscrypt_name fname;
5401
5402	path = btrfs_alloc_path();
5403	if (!path)
5404		return -ENOMEM;
5405
5406	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5407	if (ret < 0)
5408		goto out;
5409	/*
5410	 * fscrypt_setup_filename() should never return a positive value, but
5411	 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5412	 */
5413	ASSERT(ret == 0);
5414
5415	/* This needs to handle no-key deletions later on */
5416
5417	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5418				   &fname.disk_name, 0);
5419	if (IS_ERR_OR_NULL(di)) {
5420		ret = di ? PTR_ERR(di) : -ENOENT;
5421		goto out;
5422	}
5423
5424	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5425	if (location->type != BTRFS_INODE_ITEM_KEY &&
5426	    location->type != BTRFS_ROOT_ITEM_KEY) {
5427		ret = -EUCLEAN;
5428		btrfs_warn(root->fs_info,
5429"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5430			   __func__, fname.disk_name.name, btrfs_ino(dir),
5431			   location->objectid, location->type, location->offset);
5432	}
5433	if (!ret)
5434		*type = btrfs_dir_ftype(path->nodes[0], di);
5435out:
5436	fscrypt_free_filename(&fname);
5437	btrfs_free_path(path);
5438	return ret;
5439}
5440
5441/*
5442 * when we hit a tree root in a directory, the btrfs part of the inode
5443 * needs to be changed to reflect the root directory of the tree root.  This
5444 * is kind of like crossing a mount point.
5445 */
5446static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5447				    struct btrfs_inode *dir,
5448				    struct dentry *dentry,
5449				    struct btrfs_key *location,
5450				    struct btrfs_root **sub_root)
5451{
5452	struct btrfs_path *path;
5453	struct btrfs_root *new_root;
5454	struct btrfs_root_ref *ref;
5455	struct extent_buffer *leaf;
5456	struct btrfs_key key;
5457	int ret;
5458	int err = 0;
5459	struct fscrypt_name fname;
5460
5461	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5462	if (ret)
5463		return ret;
5464
5465	path = btrfs_alloc_path();
5466	if (!path) {
5467		err = -ENOMEM;
5468		goto out;
5469	}
5470
5471	err = -ENOENT;
5472	key.objectid = dir->root->root_key.objectid;
5473	key.type = BTRFS_ROOT_REF_KEY;
5474	key.offset = location->objectid;
5475
5476	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5477	if (ret) {
5478		if (ret < 0)
5479			err = ret;
5480		goto out;
5481	}
5482
5483	leaf = path->nodes[0];
5484	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5485	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5486	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5487		goto out;
5488
5489	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5490				   (unsigned long)(ref + 1), fname.disk_name.len);
5491	if (ret)
5492		goto out;
5493
5494	btrfs_release_path(path);
5495
5496	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5497	if (IS_ERR(new_root)) {
5498		err = PTR_ERR(new_root);
5499		goto out;
5500	}
5501
5502	*sub_root = new_root;
5503	location->objectid = btrfs_root_dirid(&new_root->root_item);
5504	location->type = BTRFS_INODE_ITEM_KEY;
5505	location->offset = 0;
5506	err = 0;
5507out:
5508	btrfs_free_path(path);
5509	fscrypt_free_filename(&fname);
5510	return err;
5511}
5512
5513static void inode_tree_add(struct btrfs_inode *inode)
5514{
5515	struct btrfs_root *root = inode->root;
5516	struct btrfs_inode *entry;
5517	struct rb_node **p;
5518	struct rb_node *parent;
5519	struct rb_node *new = &inode->rb_node;
5520	u64 ino = btrfs_ino(inode);
5521
5522	if (inode_unhashed(&inode->vfs_inode))
5523		return;
5524	parent = NULL;
5525	spin_lock(&root->inode_lock);
5526	p = &root->inode_tree.rb_node;
5527	while (*p) {
5528		parent = *p;
5529		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5530
5531		if (ino < btrfs_ino(entry))
5532			p = &parent->rb_left;
5533		else if (ino > btrfs_ino(entry))
5534			p = &parent->rb_right;
5535		else {
5536			WARN_ON(!(entry->vfs_inode.i_state &
5537				  (I_WILL_FREE | I_FREEING)));
5538			rb_replace_node(parent, new, &root->inode_tree);
5539			RB_CLEAR_NODE(parent);
5540			spin_unlock(&root->inode_lock);
5541			return;
5542		}
5543	}
5544	rb_link_node(new, parent, p);
5545	rb_insert_color(new, &root->inode_tree);
5546	spin_unlock(&root->inode_lock);
5547}
5548
5549static void inode_tree_del(struct btrfs_inode *inode)
5550{
5551	struct btrfs_root *root = inode->root;
5552	int empty = 0;
5553
5554	spin_lock(&root->inode_lock);
5555	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5556		rb_erase(&inode->rb_node, &root->inode_tree);
5557		RB_CLEAR_NODE(&inode->rb_node);
5558		empty = RB_EMPTY_ROOT(&root->inode_tree);
5559	}
5560	spin_unlock(&root->inode_lock);
5561
5562	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5563		spin_lock(&root->inode_lock);
5564		empty = RB_EMPTY_ROOT(&root->inode_tree);
5565		spin_unlock(&root->inode_lock);
5566		if (empty)
5567			btrfs_add_dead_root(root);
5568	}
5569}
5570
5571
5572static int btrfs_init_locked_inode(struct inode *inode, void *p)
5573{
5574	struct btrfs_iget_args *args = p;
5575
5576	inode->i_ino = args->ino;
5577	BTRFS_I(inode)->location.objectid = args->ino;
5578	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5579	BTRFS_I(inode)->location.offset = 0;
5580	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5581
5582	if (args->root && args->root == args->root->fs_info->tree_root &&
5583	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5584		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5585			&BTRFS_I(inode)->runtime_flags);
5586	return 0;
5587}
5588
5589static int btrfs_find_actor(struct inode *inode, void *opaque)
5590{
5591	struct btrfs_iget_args *args = opaque;
5592
5593	return args->ino == BTRFS_I(inode)->location.objectid &&
5594		args->root == BTRFS_I(inode)->root;
5595}
5596
5597static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5598				       struct btrfs_root *root)
5599{
5600	struct inode *inode;
5601	struct btrfs_iget_args args;
5602	unsigned long hashval = btrfs_inode_hash(ino, root);
5603
5604	args.ino = ino;
5605	args.root = root;
5606
5607	inode = iget5_locked(s, hashval, btrfs_find_actor,
5608			     btrfs_init_locked_inode,
5609			     (void *)&args);
5610	return inode;
5611}
5612
5613/*
5614 * Get an inode object given its inode number and corresponding root.
5615 * Path can be preallocated to prevent recursing back to iget through
5616 * allocator. NULL is also valid but may require an additional allocation
5617 * later.
5618 */
5619struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5620			      struct btrfs_root *root, struct btrfs_path *path)
5621{
5622	struct inode *inode;
5623
5624	inode = btrfs_iget_locked(s, ino, root);
5625	if (!inode)
5626		return ERR_PTR(-ENOMEM);
5627
5628	if (inode->i_state & I_NEW) {
5629		int ret;
5630
5631		ret = btrfs_read_locked_inode(inode, path);
5632		if (!ret) {
5633			inode_tree_add(BTRFS_I(inode));
5634			unlock_new_inode(inode);
5635		} else {
5636			iget_failed(inode);
5637			/*
5638			 * ret > 0 can come from btrfs_search_slot called by
5639			 * btrfs_read_locked_inode, this means the inode item
5640			 * was not found.
5641			 */
5642			if (ret > 0)
5643				ret = -ENOENT;
5644			inode = ERR_PTR(ret);
5645		}
5646	}
5647
5648	return inode;
5649}
5650
5651struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5652{
5653	return btrfs_iget_path(s, ino, root, NULL);
5654}
5655
5656static struct inode *new_simple_dir(struct inode *dir,
5657				    struct btrfs_key *key,
5658				    struct btrfs_root *root)
5659{
5660	struct timespec64 ts;
5661	struct inode *inode = new_inode(dir->i_sb);
5662
5663	if (!inode)
5664		return ERR_PTR(-ENOMEM);
5665
5666	BTRFS_I(inode)->root = btrfs_grab_root(root);
5667	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5668	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5669
5670	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5671	/*
5672	 * We only need lookup, the rest is read-only and there's no inode
5673	 * associated with the dentry
5674	 */
5675	inode->i_op = &simple_dir_inode_operations;
5676	inode->i_opflags &= ~IOP_XATTR;
5677	inode->i_fop = &simple_dir_operations;
5678	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5679
5680	ts = inode_set_ctime_current(inode);
5681	inode_set_mtime_to_ts(inode, ts);
5682	inode_set_atime_to_ts(inode, inode_get_atime(dir));
5683	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
5684	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
5685
5686	inode->i_uid = dir->i_uid;
5687	inode->i_gid = dir->i_gid;
5688
5689	return inode;
5690}
5691
5692static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5693static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5694static_assert(BTRFS_FT_DIR == FT_DIR);
5695static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5696static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5697static_assert(BTRFS_FT_FIFO == FT_FIFO);
5698static_assert(BTRFS_FT_SOCK == FT_SOCK);
5699static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5700
5701static inline u8 btrfs_inode_type(struct inode *inode)
5702{
5703	return fs_umode_to_ftype(inode->i_mode);
5704}
5705
5706struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5707{
5708	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
5709	struct inode *inode;
5710	struct btrfs_root *root = BTRFS_I(dir)->root;
5711	struct btrfs_root *sub_root = root;
5712	struct btrfs_key location;
5713	u8 di_type = 0;
5714	int ret = 0;
5715
5716	if (dentry->d_name.len > BTRFS_NAME_LEN)
5717		return ERR_PTR(-ENAMETOOLONG);
5718
5719	ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
5720	if (ret < 0)
5721		return ERR_PTR(ret);
5722
5723	if (location.type == BTRFS_INODE_ITEM_KEY) {
5724		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5725		if (IS_ERR(inode))
5726			return inode;
5727
5728		/* Do extra check against inode mode with di_type */
5729		if (btrfs_inode_type(inode) != di_type) {
5730			btrfs_crit(fs_info,
5731"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5732				  inode->i_mode, btrfs_inode_type(inode),
5733				  di_type);
5734			iput(inode);
5735			return ERR_PTR(-EUCLEAN);
5736		}
5737		return inode;
5738	}
5739
5740	ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
5741				       &location, &sub_root);
5742	if (ret < 0) {
5743		if (ret != -ENOENT)
5744			inode = ERR_PTR(ret);
5745		else
5746			inode = new_simple_dir(dir, &location, root);
5747	} else {
5748		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5749		btrfs_put_root(sub_root);
5750
5751		if (IS_ERR(inode))
5752			return inode;
5753
5754		down_read(&fs_info->cleanup_work_sem);
5755		if (!sb_rdonly(inode->i_sb))
5756			ret = btrfs_orphan_cleanup(sub_root);
5757		up_read(&fs_info->cleanup_work_sem);
5758		if (ret) {
5759			iput(inode);
5760			inode = ERR_PTR(ret);
5761		}
5762	}
5763
5764	return inode;
5765}
5766
5767static int btrfs_dentry_delete(const struct dentry *dentry)
5768{
5769	struct btrfs_root *root;
5770	struct inode *inode = d_inode(dentry);
5771
5772	if (!inode && !IS_ROOT(dentry))
5773		inode = d_inode(dentry->d_parent);
5774
5775	if (inode) {
5776		root = BTRFS_I(inode)->root;
5777		if (btrfs_root_refs(&root->root_item) == 0)
5778			return 1;
5779
5780		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5781			return 1;
5782	}
5783	return 0;
5784}
5785
5786static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5787				   unsigned int flags)
5788{
5789	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5790
5791	if (inode == ERR_PTR(-ENOENT))
5792		inode = NULL;
5793	return d_splice_alias(inode, dentry);
5794}
5795
5796/*
5797 * Find the highest existing sequence number in a directory and then set the
5798 * in-memory index_cnt variable to the first free sequence number.
5799 */
5800static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5801{
5802	struct btrfs_root *root = inode->root;
5803	struct btrfs_key key, found_key;
5804	struct btrfs_path *path;
5805	struct extent_buffer *leaf;
5806	int ret;
5807
5808	key.objectid = btrfs_ino(inode);
5809	key.type = BTRFS_DIR_INDEX_KEY;
5810	key.offset = (u64)-1;
5811
5812	path = btrfs_alloc_path();
5813	if (!path)
5814		return -ENOMEM;
5815
5816	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5817	if (ret < 0)
5818		goto out;
5819	/* FIXME: we should be able to handle this */
5820	if (ret == 0)
5821		goto out;
5822	ret = 0;
5823
5824	if (path->slots[0] == 0) {
5825		inode->index_cnt = BTRFS_DIR_START_INDEX;
5826		goto out;
5827	}
5828
5829	path->slots[0]--;
5830
5831	leaf = path->nodes[0];
5832	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5833
5834	if (found_key.objectid != btrfs_ino(inode) ||
5835	    found_key.type != BTRFS_DIR_INDEX_KEY) {
5836		inode->index_cnt = BTRFS_DIR_START_INDEX;
5837		goto out;
5838	}
5839
5840	inode->index_cnt = found_key.offset + 1;
5841out:
5842	btrfs_free_path(path);
5843	return ret;
5844}
5845
5846static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
5847{
5848	int ret = 0;
5849
5850	btrfs_inode_lock(dir, 0);
5851	if (dir->index_cnt == (u64)-1) {
5852		ret = btrfs_inode_delayed_dir_index_count(dir);
5853		if (ret) {
5854			ret = btrfs_set_inode_index_count(dir);
5855			if (ret)
5856				goto out;
5857		}
5858	}
5859
5860	/* index_cnt is the index number of next new entry, so decrement it. */
5861	*index = dir->index_cnt - 1;
5862out:
5863	btrfs_inode_unlock(dir, 0);
5864
5865	return ret;
5866}
5867
5868/*
5869 * All this infrastructure exists because dir_emit can fault, and we are holding
5870 * the tree lock when doing readdir.  For now just allocate a buffer and copy
5871 * our information into that, and then dir_emit from the buffer.  This is
5872 * similar to what NFS does, only we don't keep the buffer around in pagecache
5873 * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5874 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5875 * tree lock.
5876 */
5877static int btrfs_opendir(struct inode *inode, struct file *file)
5878{
5879	struct btrfs_file_private *private;
5880	u64 last_index;
5881	int ret;
5882
5883	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
5884	if (ret)
5885		return ret;
5886
5887	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5888	if (!private)
5889		return -ENOMEM;
5890	private->last_index = last_index;
5891	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5892	if (!private->filldir_buf) {
5893		kfree(private);
5894		return -ENOMEM;
5895	}
5896	file->private_data = private;
5897	return 0;
5898}
5899
5900static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
5901{
5902	struct btrfs_file_private *private = file->private_data;
5903	int ret;
5904
5905	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
5906				       &private->last_index);
5907	if (ret)
5908		return ret;
5909
5910	return generic_file_llseek(file, offset, whence);
5911}
5912
5913struct dir_entry {
5914	u64 ino;
5915	u64 offset;
5916	unsigned type;
5917	int name_len;
5918};
5919
5920static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5921{
5922	while (entries--) {
5923		struct dir_entry *entry = addr;
5924		char *name = (char *)(entry + 1);
5925
5926		ctx->pos = get_unaligned(&entry->offset);
5927		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5928					 get_unaligned(&entry->ino),
5929					 get_unaligned(&entry->type)))
5930			return 1;
5931		addr += sizeof(struct dir_entry) +
5932			get_unaligned(&entry->name_len);
5933		ctx->pos++;
5934	}
5935	return 0;
5936}
5937
5938static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5939{
5940	struct inode *inode = file_inode(file);
5941	struct btrfs_root *root = BTRFS_I(inode)->root;
5942	struct btrfs_file_private *private = file->private_data;
5943	struct btrfs_dir_item *di;
5944	struct btrfs_key key;
5945	struct btrfs_key found_key;
5946	struct btrfs_path *path;
5947	void *addr;
5948	LIST_HEAD(ins_list);
5949	LIST_HEAD(del_list);
5950	int ret;
5951	char *name_ptr;
5952	int name_len;
5953	int entries = 0;
5954	int total_len = 0;
5955	bool put = false;
5956	struct btrfs_key location;
5957
5958	if (!dir_emit_dots(file, ctx))
5959		return 0;
5960
5961	path = btrfs_alloc_path();
5962	if (!path)
5963		return -ENOMEM;
5964
5965	addr = private->filldir_buf;
5966	path->reada = READA_FORWARD;
5967
5968	put = btrfs_readdir_get_delayed_items(inode, private->last_index,
5969					      &ins_list, &del_list);
5970
5971again:
5972	key.type = BTRFS_DIR_INDEX_KEY;
5973	key.offset = ctx->pos;
5974	key.objectid = btrfs_ino(BTRFS_I(inode));
5975
5976	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5977		struct dir_entry *entry;
5978		struct extent_buffer *leaf = path->nodes[0];
5979		u8 ftype;
5980
5981		if (found_key.objectid != key.objectid)
5982			break;
5983		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5984			break;
5985		if (found_key.offset < ctx->pos)
5986			continue;
5987		if (found_key.offset > private->last_index)
5988			break;
5989		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5990			continue;
5991		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5992		name_len = btrfs_dir_name_len(leaf, di);
5993		if ((total_len + sizeof(struct dir_entry) + name_len) >=
5994		    PAGE_SIZE) {
5995			btrfs_release_path(path);
5996			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5997			if (ret)
5998				goto nopos;
5999			addr = private->filldir_buf;
6000			entries = 0;
6001			total_len = 0;
6002			goto again;
6003		}
6004
6005		ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6006		entry = addr;
6007		name_ptr = (char *)(entry + 1);
6008		read_extent_buffer(leaf, name_ptr,
6009				   (unsigned long)(di + 1), name_len);
6010		put_unaligned(name_len, &entry->name_len);
6011		put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6012		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6013		put_unaligned(location.objectid, &entry->ino);
6014		put_unaligned(found_key.offset, &entry->offset);
6015		entries++;
6016		addr += sizeof(struct dir_entry) + name_len;
6017		total_len += sizeof(struct dir_entry) + name_len;
6018	}
6019	/* Catch error encountered during iteration */
6020	if (ret < 0)
6021		goto err;
6022
6023	btrfs_release_path(path);
6024
6025	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6026	if (ret)
6027		goto nopos;
6028
6029	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6030	if (ret)
6031		goto nopos;
6032
6033	/*
6034	 * Stop new entries from being returned after we return the last
6035	 * entry.
6036	 *
6037	 * New directory entries are assigned a strictly increasing
6038	 * offset.  This means that new entries created during readdir
6039	 * are *guaranteed* to be seen in the future by that readdir.
6040	 * This has broken buggy programs which operate on names as
6041	 * they're returned by readdir.  Until we re-use freed offsets
6042	 * we have this hack to stop new entries from being returned
6043	 * under the assumption that they'll never reach this huge
6044	 * offset.
6045	 *
6046	 * This is being careful not to overflow 32bit loff_t unless the
6047	 * last entry requires it because doing so has broken 32bit apps
6048	 * in the past.
6049	 */
6050	if (ctx->pos >= INT_MAX)
6051		ctx->pos = LLONG_MAX;
6052	else
6053		ctx->pos = INT_MAX;
6054nopos:
6055	ret = 0;
6056err:
6057	if (put)
6058		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6059	btrfs_free_path(path);
6060	return ret;
6061}
6062
6063/*
6064 * This is somewhat expensive, updating the tree every time the
6065 * inode changes.  But, it is most likely to find the inode in cache.
6066 * FIXME, needs more benchmarking...there are no reasons other than performance
6067 * to keep or drop this code.
6068 */
6069static int btrfs_dirty_inode(struct btrfs_inode *inode)
6070{
6071	struct btrfs_root *root = inode->root;
6072	struct btrfs_fs_info *fs_info = root->fs_info;
6073	struct btrfs_trans_handle *trans;
6074	int ret;
6075
6076	if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6077		return 0;
6078
6079	trans = btrfs_join_transaction(root);
6080	if (IS_ERR(trans))
6081		return PTR_ERR(trans);
6082
6083	ret = btrfs_update_inode(trans, inode);
6084	if (ret == -ENOSPC || ret == -EDQUOT) {
6085		/* whoops, lets try again with the full transaction */
6086		btrfs_end_transaction(trans);
6087		trans = btrfs_start_transaction(root, 1);
6088		if (IS_ERR(trans))
6089			return PTR_ERR(trans);
6090
6091		ret = btrfs_update_inode(trans, inode);
6092	}
6093	btrfs_end_transaction(trans);
6094	if (inode->delayed_node)
6095		btrfs_balance_delayed_items(fs_info);
6096
6097	return ret;
6098}
6099
6100/*
6101 * This is a copy of file_update_time.  We need this so we can return error on
6102 * ENOSPC for updating the inode in the case of file write and mmap writes.
6103 */
6104static int btrfs_update_time(struct inode *inode, int flags)
6105{
6106	struct btrfs_root *root = BTRFS_I(inode)->root;
6107	bool dirty;
6108
6109	if (btrfs_root_readonly(root))
6110		return -EROFS;
6111
6112	dirty = inode_update_timestamps(inode, flags);
6113	return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
6114}
6115
6116/*
6117 * helper to find a free sequence number in a given directory.  This current
6118 * code is very simple, later versions will do smarter things in the btree
6119 */
6120int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6121{
6122	int ret = 0;
6123
6124	if (dir->index_cnt == (u64)-1) {
6125		ret = btrfs_inode_delayed_dir_index_count(dir);
6126		if (ret) {
6127			ret = btrfs_set_inode_index_count(dir);
6128			if (ret)
6129				return ret;
6130		}
6131	}
6132
6133	*index = dir->index_cnt;
6134	dir->index_cnt++;
6135
6136	return ret;
6137}
6138
6139static int btrfs_insert_inode_locked(struct inode *inode)
6140{
6141	struct btrfs_iget_args args;
6142
6143	args.ino = BTRFS_I(inode)->location.objectid;
6144	args.root = BTRFS_I(inode)->root;
6145
6146	return insert_inode_locked4(inode,
6147		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6148		   btrfs_find_actor, &args);
6149}
6150
6151int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6152			    unsigned int *trans_num_items)
6153{
6154	struct inode *dir = args->dir;
6155	struct inode *inode = args->inode;
6156	int ret;
6157
6158	if (!args->orphan) {
6159		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6160					     &args->fname);
6161		if (ret)
6162			return ret;
6163	}
6164
6165	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6166	if (ret) {
6167		fscrypt_free_filename(&args->fname);
6168		return ret;
6169	}
6170
6171	/* 1 to add inode item */
6172	*trans_num_items = 1;
6173	/* 1 to add compression property */
6174	if (BTRFS_I(dir)->prop_compress)
6175		(*trans_num_items)++;
6176	/* 1 to add default ACL xattr */
6177	if (args->default_acl)
6178		(*trans_num_items)++;
6179	/* 1 to add access ACL xattr */
6180	if (args->acl)
6181		(*trans_num_items)++;
6182#ifdef CONFIG_SECURITY
6183	/* 1 to add LSM xattr */
6184	if (dir->i_security)
6185		(*trans_num_items)++;
6186#endif
6187	if (args->orphan) {
6188		/* 1 to add orphan item */
6189		(*trans_num_items)++;
6190	} else {
6191		/*
6192		 * 1 to add dir item
6193		 * 1 to add dir index
6194		 * 1 to update parent inode item
6195		 *
6196		 * No need for 1 unit for the inode ref item because it is
6197		 * inserted in a batch together with the inode item at
6198		 * btrfs_create_new_inode().
6199		 */
6200		*trans_num_items += 3;
6201	}
6202	return 0;
6203}
6204
6205void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6206{
6207	posix_acl_release(args->acl);
6208	posix_acl_release(args->default_acl);
6209	fscrypt_free_filename(&args->fname);
6210}
6211
6212/*
6213 * Inherit flags from the parent inode.
6214 *
6215 * Currently only the compression flags and the cow flags are inherited.
6216 */
6217static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6218{
6219	unsigned int flags;
6220
6221	flags = dir->flags;
6222
6223	if (flags & BTRFS_INODE_NOCOMPRESS) {
6224		inode->flags &= ~BTRFS_INODE_COMPRESS;
6225		inode->flags |= BTRFS_INODE_NOCOMPRESS;
6226	} else if (flags & BTRFS_INODE_COMPRESS) {
6227		inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6228		inode->flags |= BTRFS_INODE_COMPRESS;
6229	}
6230
6231	if (flags & BTRFS_INODE_NODATACOW) {
6232		inode->flags |= BTRFS_INODE_NODATACOW;
6233		if (S_ISREG(inode->vfs_inode.i_mode))
6234			inode->flags |= BTRFS_INODE_NODATASUM;
6235	}
6236
6237	btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
6238}
6239
6240int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6241			   struct btrfs_new_inode_args *args)
6242{
6243	struct timespec64 ts;
6244	struct inode *dir = args->dir;
6245	struct inode *inode = args->inode;
6246	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6247	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6248	struct btrfs_root *root;
6249	struct btrfs_inode_item *inode_item;
6250	struct btrfs_key *location;
6251	struct btrfs_path *path;
6252	u64 objectid;
6253	struct btrfs_inode_ref *ref;
6254	struct btrfs_key key[2];
6255	u32 sizes[2];
6256	struct btrfs_item_batch batch;
6257	unsigned long ptr;
6258	int ret;
6259
6260	path = btrfs_alloc_path();
6261	if (!path)
6262		return -ENOMEM;
6263
6264	if (!args->subvol)
6265		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6266	root = BTRFS_I(inode)->root;
6267
6268	ret = btrfs_get_free_objectid(root, &objectid);
6269	if (ret)
6270		goto out;
6271	inode->i_ino = objectid;
6272
6273	if (args->orphan) {
6274		/*
6275		 * O_TMPFILE, set link count to 0, so that after this point, we
6276		 * fill in an inode item with the correct link count.
6277		 */
6278		set_nlink(inode, 0);
6279	} else {
6280		trace_btrfs_inode_request(dir);
6281
6282		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6283		if (ret)
6284			goto out;
6285	}
6286	/* index_cnt is ignored for everything but a dir. */
6287	BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6288	BTRFS_I(inode)->generation = trans->transid;
6289	inode->i_generation = BTRFS_I(inode)->generation;
6290
6291	/*
6292	 * We don't have any capability xattrs set here yet, shortcut any
6293	 * queries for the xattrs here.  If we add them later via the inode
6294	 * security init path or any other path this flag will be cleared.
6295	 */
6296	set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6297
6298	/*
6299	 * Subvolumes don't inherit flags from their parent directory.
6300	 * Originally this was probably by accident, but we probably can't
6301	 * change it now without compatibility issues.
6302	 */
6303	if (!args->subvol)
6304		btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6305
6306	if (S_ISREG(inode->i_mode)) {
6307		if (btrfs_test_opt(fs_info, NODATASUM))
6308			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6309		if (btrfs_test_opt(fs_info, NODATACOW))
6310			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6311				BTRFS_INODE_NODATASUM;
6312	}
6313
6314	location = &BTRFS_I(inode)->location;
6315	location->objectid = objectid;
6316	location->offset = 0;
6317	location->type = BTRFS_INODE_ITEM_KEY;
6318
6319	ret = btrfs_insert_inode_locked(inode);
6320	if (ret < 0) {
6321		if (!args->orphan)
6322			BTRFS_I(dir)->index_cnt--;
6323		goto out;
6324	}
6325
6326	/*
6327	 * We could have gotten an inode number from somebody who was fsynced
6328	 * and then removed in this same transaction, so let's just set full
6329	 * sync since it will be a full sync anyway and this will blow away the
6330	 * old info in the log.
6331	 */
6332	btrfs_set_inode_full_sync(BTRFS_I(inode));
6333
6334	key[0].objectid = objectid;
6335	key[0].type = BTRFS_INODE_ITEM_KEY;
6336	key[0].offset = 0;
6337
6338	sizes[0] = sizeof(struct btrfs_inode_item);
6339
6340	if (!args->orphan) {
6341		/*
6342		 * Start new inodes with an inode_ref. This is slightly more
6343		 * efficient for small numbers of hard links since they will
6344		 * be packed into one item. Extended refs will kick in if we
6345		 * add more hard links than can fit in the ref item.
6346		 */
6347		key[1].objectid = objectid;
6348		key[1].type = BTRFS_INODE_REF_KEY;
6349		if (args->subvol) {
6350			key[1].offset = objectid;
6351			sizes[1] = 2 + sizeof(*ref);
6352		} else {
6353			key[1].offset = btrfs_ino(BTRFS_I(dir));
6354			sizes[1] = name->len + sizeof(*ref);
6355		}
6356	}
6357
6358	batch.keys = &key[0];
6359	batch.data_sizes = &sizes[0];
6360	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6361	batch.nr = args->orphan ? 1 : 2;
6362	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6363	if (ret != 0) {
6364		btrfs_abort_transaction(trans, ret);
6365		goto discard;
6366	}
6367
6368	ts = simple_inode_init_ts(inode);
6369	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6370	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6371
6372	/*
6373	 * We're going to fill the inode item now, so at this point the inode
6374	 * must be fully initialized.
6375	 */
6376
6377	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6378				  struct btrfs_inode_item);
6379	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6380			     sizeof(*inode_item));
6381	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6382
6383	if (!args->orphan) {
6384		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6385				     struct btrfs_inode_ref);
6386		ptr = (unsigned long)(ref + 1);
6387		if (args->subvol) {
6388			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6389			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6390			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6391		} else {
6392			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6393						     name->len);
6394			btrfs_set_inode_ref_index(path->nodes[0], ref,
6395						  BTRFS_I(inode)->dir_index);
6396			write_extent_buffer(path->nodes[0], name->name, ptr,
6397					    name->len);
6398		}
6399	}
6400
6401	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
6402	/*
6403	 * We don't need the path anymore, plus inheriting properties, adding
6404	 * ACLs, security xattrs, orphan item or adding the link, will result in
6405	 * allocating yet another path. So just free our path.
6406	 */
6407	btrfs_free_path(path);
6408	path = NULL;
6409
6410	if (args->subvol) {
6411		struct inode *parent;
6412
6413		/*
6414		 * Subvolumes inherit properties from their parent subvolume,
6415		 * not the directory they were created in.
6416		 */
6417		parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6418				    BTRFS_I(dir)->root);
6419		if (IS_ERR(parent)) {
6420			ret = PTR_ERR(parent);
6421		} else {
6422			ret = btrfs_inode_inherit_props(trans, inode, parent);
6423			iput(parent);
6424		}
6425	} else {
6426		ret = btrfs_inode_inherit_props(trans, inode, dir);
6427	}
6428	if (ret) {
6429		btrfs_err(fs_info,
6430			  "error inheriting props for ino %llu (root %llu): %d",
6431			  btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6432			  ret);
6433	}
6434
6435	/*
6436	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6437	 * probably a bug.
6438	 */
6439	if (!args->subvol) {
6440		ret = btrfs_init_inode_security(trans, args);
6441		if (ret) {
6442			btrfs_abort_transaction(trans, ret);
6443			goto discard;
6444		}
6445	}
6446
6447	inode_tree_add(BTRFS_I(inode));
6448
6449	trace_btrfs_inode_new(inode);
6450	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6451
6452	btrfs_update_root_times(trans, root);
6453
6454	if (args->orphan) {
6455		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6456	} else {
6457		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6458				     0, BTRFS_I(inode)->dir_index);
6459	}
6460	if (ret) {
6461		btrfs_abort_transaction(trans, ret);
6462		goto discard;
6463	}
6464
6465	return 0;
6466
6467discard:
6468	/*
6469	 * discard_new_inode() calls iput(), but the caller owns the reference
6470	 * to the inode.
6471	 */
6472	ihold(inode);
6473	discard_new_inode(inode);
6474out:
6475	btrfs_free_path(path);
6476	return ret;
6477}
6478
6479/*
6480 * utility function to add 'inode' into 'parent_inode' with
6481 * a give name and a given sequence number.
6482 * if 'add_backref' is true, also insert a backref from the
6483 * inode to the parent directory.
6484 */
6485int btrfs_add_link(struct btrfs_trans_handle *trans,
6486		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6487		   const struct fscrypt_str *name, int add_backref, u64 index)
6488{
6489	int ret = 0;
6490	struct btrfs_key key;
6491	struct btrfs_root *root = parent_inode->root;
6492	u64 ino = btrfs_ino(inode);
6493	u64 parent_ino = btrfs_ino(parent_inode);
6494
6495	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6496		memcpy(&key, &inode->root->root_key, sizeof(key));
6497	} else {
6498		key.objectid = ino;
6499		key.type = BTRFS_INODE_ITEM_KEY;
6500		key.offset = 0;
6501	}
6502
6503	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6504		ret = btrfs_add_root_ref(trans, key.objectid,
6505					 root->root_key.objectid, parent_ino,
6506					 index, name);
6507	} else if (add_backref) {
6508		ret = btrfs_insert_inode_ref(trans, root, name,
6509					     ino, parent_ino, index);
6510	}
6511
6512	/* Nothing to clean up yet */
6513	if (ret)
6514		return ret;
6515
6516	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6517				    btrfs_inode_type(&inode->vfs_inode), index);
6518	if (ret == -EEXIST || ret == -EOVERFLOW)
6519		goto fail_dir_item;
6520	else if (ret) {
6521		btrfs_abort_transaction(trans, ret);
6522		return ret;
6523	}
6524
6525	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6526			   name->len * 2);
6527	inode_inc_iversion(&parent_inode->vfs_inode);
6528	/*
6529	 * If we are replaying a log tree, we do not want to update the mtime
6530	 * and ctime of the parent directory with the current time, since the
6531	 * log replay procedure is responsible for setting them to their correct
6532	 * values (the ones it had when the fsync was done).
6533	 */
6534	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
6535		inode_set_mtime_to_ts(&parent_inode->vfs_inode,
6536				      inode_set_ctime_current(&parent_inode->vfs_inode));
6537
6538	ret = btrfs_update_inode(trans, parent_inode);
6539	if (ret)
6540		btrfs_abort_transaction(trans, ret);
6541	return ret;
6542
6543fail_dir_item:
6544	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6545		u64 local_index;
6546		int err;
6547		err = btrfs_del_root_ref(trans, key.objectid,
6548					 root->root_key.objectid, parent_ino,
6549					 &local_index, name);
6550		if (err)
6551			btrfs_abort_transaction(trans, err);
6552	} else if (add_backref) {
6553		u64 local_index;
6554		int err;
6555
6556		err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
6557					  &local_index);
6558		if (err)
6559			btrfs_abort_transaction(trans, err);
6560	}
6561
6562	/* Return the original error code */
6563	return ret;
6564}
6565
6566static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6567			       struct inode *inode)
6568{
6569	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6570	struct btrfs_root *root = BTRFS_I(dir)->root;
6571	struct btrfs_new_inode_args new_inode_args = {
6572		.dir = dir,
6573		.dentry = dentry,
6574		.inode = inode,
6575	};
6576	unsigned int trans_num_items;
6577	struct btrfs_trans_handle *trans;
6578	int err;
6579
6580	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6581	if (err)
6582		goto out_inode;
6583
6584	trans = btrfs_start_transaction(root, trans_num_items);
6585	if (IS_ERR(trans)) {
6586		err = PTR_ERR(trans);
6587		goto out_new_inode_args;
6588	}
6589
6590	err = btrfs_create_new_inode(trans, &new_inode_args);
6591	if (!err)
6592		d_instantiate_new(dentry, inode);
6593
6594	btrfs_end_transaction(trans);
6595	btrfs_btree_balance_dirty(fs_info);
6596out_new_inode_args:
6597	btrfs_new_inode_args_destroy(&new_inode_args);
6598out_inode:
6599	if (err)
6600		iput(inode);
6601	return err;
6602}
6603
6604static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6605		       struct dentry *dentry, umode_t mode, dev_t rdev)
6606{
6607	struct inode *inode;
6608
6609	inode = new_inode(dir->i_sb);
6610	if (!inode)
6611		return -ENOMEM;
6612	inode_init_owner(idmap, inode, dir, mode);
6613	inode->i_op = &btrfs_special_inode_operations;
6614	init_special_inode(inode, inode->i_mode, rdev);
6615	return btrfs_create_common(dir, dentry, inode);
6616}
6617
6618static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6619			struct dentry *dentry, umode_t mode, bool excl)
6620{
6621	struct inode *inode;
6622
6623	inode = new_inode(dir->i_sb);
6624	if (!inode)
6625		return -ENOMEM;
6626	inode_init_owner(idmap, inode, dir, mode);
6627	inode->i_fop = &btrfs_file_operations;
6628	inode->i_op = &btrfs_file_inode_operations;
6629	inode->i_mapping->a_ops = &btrfs_aops;
6630	return btrfs_create_common(dir, dentry, inode);
6631}
6632
6633static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6634		      struct dentry *dentry)
6635{
6636	struct btrfs_trans_handle *trans = NULL;
6637	struct btrfs_root *root = BTRFS_I(dir)->root;
6638	struct inode *inode = d_inode(old_dentry);
6639	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
6640	struct fscrypt_name fname;
6641	u64 index;
6642	int err;
6643	int drop_inode = 0;
6644
6645	/* do not allow sys_link's with other subvols of the same device */
6646	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6647		return -EXDEV;
6648
6649	if (inode->i_nlink >= BTRFS_LINK_MAX)
6650		return -EMLINK;
6651
6652	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
6653	if (err)
6654		goto fail;
6655
6656	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6657	if (err)
6658		goto fail;
6659
6660	/*
6661	 * 2 items for inode and inode ref
6662	 * 2 items for dir items
6663	 * 1 item for parent inode
6664	 * 1 item for orphan item deletion if O_TMPFILE
6665	 */
6666	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6667	if (IS_ERR(trans)) {
6668		err = PTR_ERR(trans);
6669		trans = NULL;
6670		goto fail;
6671	}
6672
6673	/* There are several dir indexes for this inode, clear the cache. */
6674	BTRFS_I(inode)->dir_index = 0ULL;
6675	inc_nlink(inode);
6676	inode_inc_iversion(inode);
6677	inode_set_ctime_current(inode);
6678	ihold(inode);
6679	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6680
6681	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6682			     &fname.disk_name, 1, index);
6683
6684	if (err) {
6685		drop_inode = 1;
6686	} else {
6687		struct dentry *parent = dentry->d_parent;
6688
6689		err = btrfs_update_inode(trans, BTRFS_I(inode));
6690		if (err)
6691			goto fail;
6692		if (inode->i_nlink == 1) {
6693			/*
6694			 * If new hard link count is 1, it's a file created
6695			 * with open(2) O_TMPFILE flag.
6696			 */
6697			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6698			if (err)
6699				goto fail;
6700		}
6701		d_instantiate(dentry, inode);
6702		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6703	}
6704
6705fail:
6706	fscrypt_free_filename(&fname);
6707	if (trans)
6708		btrfs_end_transaction(trans);
6709	if (drop_inode) {
6710		inode_dec_link_count(inode);
6711		iput(inode);
6712	}
6713	btrfs_btree_balance_dirty(fs_info);
6714	return err;
6715}
6716
6717static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
6718		       struct dentry *dentry, umode_t mode)
6719{
6720	struct inode *inode;
6721
6722	inode = new_inode(dir->i_sb);
6723	if (!inode)
6724		return -ENOMEM;
6725	inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
6726	inode->i_op = &btrfs_dir_inode_operations;
6727	inode->i_fop = &btrfs_dir_file_operations;
6728	return btrfs_create_common(dir, dentry, inode);
6729}
6730
6731static noinline int uncompress_inline(struct btrfs_path *path,
6732				      struct page *page,
6733				      struct btrfs_file_extent_item *item)
6734{
6735	int ret;
6736	struct extent_buffer *leaf = path->nodes[0];
6737	char *tmp;
6738	size_t max_size;
6739	unsigned long inline_size;
6740	unsigned long ptr;
6741	int compress_type;
6742
6743	compress_type = btrfs_file_extent_compression(leaf, item);
6744	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6745	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6746	tmp = kmalloc(inline_size, GFP_NOFS);
6747	if (!tmp)
6748		return -ENOMEM;
6749	ptr = btrfs_file_extent_inline_start(item);
6750
6751	read_extent_buffer(leaf, tmp, ptr, inline_size);
6752
6753	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6754	ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
6755
6756	/*
6757	 * decompression code contains a memset to fill in any space between the end
6758	 * of the uncompressed data and the end of max_size in case the decompressed
6759	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6760	 * the end of an inline extent and the beginning of the next block, so we
6761	 * cover that region here.
6762	 */
6763
6764	if (max_size < PAGE_SIZE)
6765		memzero_page(page, max_size, PAGE_SIZE - max_size);
6766	kfree(tmp);
6767	return ret;
6768}
6769
6770static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
6771			      struct page *page)
6772{
6773	struct btrfs_file_extent_item *fi;
6774	void *kaddr;
6775	size_t copy_size;
6776
6777	if (!page || PageUptodate(page))
6778		return 0;
6779
6780	ASSERT(page_offset(page) == 0);
6781
6782	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6783			    struct btrfs_file_extent_item);
6784	if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
6785		return uncompress_inline(path, page, fi);
6786
6787	copy_size = min_t(u64, PAGE_SIZE,
6788			  btrfs_file_extent_ram_bytes(path->nodes[0], fi));
6789	kaddr = kmap_local_page(page);
6790	read_extent_buffer(path->nodes[0], kaddr,
6791			   btrfs_file_extent_inline_start(fi), copy_size);
6792	kunmap_local(kaddr);
6793	if (copy_size < PAGE_SIZE)
6794		memzero_page(page, copy_size, PAGE_SIZE - copy_size);
6795	return 0;
6796}
6797
6798/*
6799 * Lookup the first extent overlapping a range in a file.
6800 *
6801 * @inode:	file to search in
6802 * @page:	page to read extent data into if the extent is inline
6803 * @start:	file offset
6804 * @len:	length of range starting at @start
6805 *
6806 * Return the first &struct extent_map which overlaps the given range, reading
6807 * it from the B-tree and caching it if necessary. Note that there may be more
6808 * extents which overlap the given range after the returned extent_map.
6809 *
6810 * If @page is not NULL and the extent is inline, this also reads the extent
6811 * data directly into the page and marks the extent up to date in the io_tree.
6812 *
6813 * Return: ERR_PTR on error, non-NULL extent_map on success.
6814 */
6815struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6816				    struct page *page, u64 start, u64 len)
6817{
6818	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6819	int ret = 0;
6820	u64 extent_start = 0;
6821	u64 extent_end = 0;
6822	u64 objectid = btrfs_ino(inode);
6823	int extent_type = -1;
6824	struct btrfs_path *path = NULL;
6825	struct btrfs_root *root = inode->root;
6826	struct btrfs_file_extent_item *item;
6827	struct extent_buffer *leaf;
6828	struct btrfs_key found_key;
6829	struct extent_map *em = NULL;
6830	struct extent_map_tree *em_tree = &inode->extent_tree;
6831
6832	read_lock(&em_tree->lock);
6833	em = lookup_extent_mapping(em_tree, start, len);
6834	read_unlock(&em_tree->lock);
6835
6836	if (em) {
6837		if (em->start > start || em->start + em->len <= start)
6838			free_extent_map(em);
6839		else if (em->block_start == EXTENT_MAP_INLINE && page)
6840			free_extent_map(em);
6841		else
6842			goto out;
6843	}
6844	em = alloc_extent_map();
6845	if (!em) {
6846		ret = -ENOMEM;
6847		goto out;
6848	}
6849	em->start = EXTENT_MAP_HOLE;
6850	em->orig_start = EXTENT_MAP_HOLE;
6851	em->len = (u64)-1;
6852	em->block_len = (u64)-1;
6853
6854	path = btrfs_alloc_path();
6855	if (!path) {
6856		ret = -ENOMEM;
6857		goto out;
6858	}
6859
6860	/* Chances are we'll be called again, so go ahead and do readahead */
6861	path->reada = READA_FORWARD;
6862
6863	/*
6864	 * The same explanation in load_free_space_cache applies here as well,
6865	 * we only read when we're loading the free space cache, and at that
6866	 * point the commit_root has everything we need.
6867	 */
6868	if (btrfs_is_free_space_inode(inode)) {
6869		path->search_commit_root = 1;
6870		path->skip_locking = 1;
6871	}
6872
6873	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6874	if (ret < 0) {
6875		goto out;
6876	} else if (ret > 0) {
6877		if (path->slots[0] == 0)
6878			goto not_found;
6879		path->slots[0]--;
6880		ret = 0;
6881	}
6882
6883	leaf = path->nodes[0];
6884	item = btrfs_item_ptr(leaf, path->slots[0],
6885			      struct btrfs_file_extent_item);
6886	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6887	if (found_key.objectid != objectid ||
6888	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6889		/*
6890		 * If we backup past the first extent we want to move forward
6891		 * and see if there is an extent in front of us, otherwise we'll
6892		 * say there is a hole for our whole search range which can
6893		 * cause problems.
6894		 */
6895		extent_end = start;
6896		goto next;
6897	}
6898
6899	extent_type = btrfs_file_extent_type(leaf, item);
6900	extent_start = found_key.offset;
6901	extent_end = btrfs_file_extent_end(path);
6902	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6903	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6904		/* Only regular file could have regular/prealloc extent */
6905		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6906			ret = -EUCLEAN;
6907			btrfs_crit(fs_info,
6908		"regular/prealloc extent found for non-regular inode %llu",
6909				   btrfs_ino(inode));
6910			goto out;
6911		}
6912		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6913						       extent_start);
6914	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6915		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6916						      path->slots[0],
6917						      extent_start);
6918	}
6919next:
6920	if (start >= extent_end) {
6921		path->slots[0]++;
6922		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6923			ret = btrfs_next_leaf(root, path);
6924			if (ret < 0)
6925				goto out;
6926			else if (ret > 0)
6927				goto not_found;
6928
6929			leaf = path->nodes[0];
6930		}
6931		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6932		if (found_key.objectid != objectid ||
6933		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6934			goto not_found;
6935		if (start + len <= found_key.offset)
6936			goto not_found;
6937		if (start > found_key.offset)
6938			goto next;
6939
6940		/* New extent overlaps with existing one */
6941		em->start = start;
6942		em->orig_start = start;
6943		em->len = found_key.offset - start;
6944		em->block_start = EXTENT_MAP_HOLE;
6945		goto insert;
6946	}
6947
6948	btrfs_extent_item_to_extent_map(inode, path, item, em);
6949
6950	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6951	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6952		goto insert;
6953	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6954		/*
6955		 * Inline extent can only exist at file offset 0. This is
6956		 * ensured by tree-checker and inline extent creation path.
6957		 * Thus all members representing file offsets should be zero.
6958		 */
6959		ASSERT(extent_start == 0);
6960		ASSERT(em->start == 0);
6961
6962		/*
6963		 * btrfs_extent_item_to_extent_map() should have properly
6964		 * initialized em members already.
6965		 *
6966		 * Other members are not utilized for inline extents.
6967		 */
6968		ASSERT(em->block_start == EXTENT_MAP_INLINE);
6969		ASSERT(em->len == fs_info->sectorsize);
6970
6971		ret = read_inline_extent(inode, path, page);
6972		if (ret < 0)
6973			goto out;
6974		goto insert;
6975	}
6976not_found:
6977	em->start = start;
6978	em->orig_start = start;
6979	em->len = len;
6980	em->block_start = EXTENT_MAP_HOLE;
6981insert:
6982	ret = 0;
6983	btrfs_release_path(path);
6984	if (em->start > start || extent_map_end(em) <= start) {
6985		btrfs_err(fs_info,
6986			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
6987			  em->start, em->len, start, len);
6988		ret = -EIO;
6989		goto out;
6990	}
6991
6992	write_lock(&em_tree->lock);
6993	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6994	write_unlock(&em_tree->lock);
6995out:
6996	btrfs_free_path(path);
6997
6998	trace_btrfs_get_extent(root, inode, em);
6999
7000	if (ret) {
7001		free_extent_map(em);
7002		return ERR_PTR(ret);
7003	}
7004	return em;
7005}
7006
7007static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
7008						  struct btrfs_dio_data *dio_data,
7009						  const u64 start,
7010						  const u64 len,
7011						  const u64 orig_start,
7012						  const u64 block_start,
7013						  const u64 block_len,
7014						  const u64 orig_block_len,
7015						  const u64 ram_bytes,
7016						  const int type)
7017{
7018	struct extent_map *em = NULL;
7019	struct btrfs_ordered_extent *ordered;
7020
7021	if (type != BTRFS_ORDERED_NOCOW) {
7022		em = create_io_em(inode, start, len, orig_start, block_start,
7023				  block_len, orig_block_len, ram_bytes,
7024				  BTRFS_COMPRESS_NONE, /* compress_type */
7025				  type);
7026		if (IS_ERR(em))
7027			goto out;
7028	}
7029	ordered = btrfs_alloc_ordered_extent(inode, start, len, len,
7030					     block_start, block_len, 0,
7031					     (1 << type) |
7032					     (1 << BTRFS_ORDERED_DIRECT),
7033					     BTRFS_COMPRESS_NONE);
7034	if (IS_ERR(ordered)) {
7035		if (em) {
7036			free_extent_map(em);
7037			btrfs_drop_extent_map_range(inode, start,
7038						    start + len - 1, false);
7039		}
7040		em = ERR_CAST(ordered);
7041	} else {
7042		ASSERT(!dio_data->ordered);
7043		dio_data->ordered = ordered;
7044	}
7045 out:
7046
7047	return em;
7048}
7049
7050static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
7051						  struct btrfs_dio_data *dio_data,
7052						  u64 start, u64 len)
7053{
7054	struct btrfs_root *root = inode->root;
7055	struct btrfs_fs_info *fs_info = root->fs_info;
7056	struct extent_map *em;
7057	struct btrfs_key ins;
7058	u64 alloc_hint;
7059	int ret;
7060
7061	alloc_hint = get_extent_allocation_hint(inode, start, len);
7062again:
7063	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7064				   0, alloc_hint, &ins, 1, 1);
7065	if (ret == -EAGAIN) {
7066		ASSERT(btrfs_is_zoned(fs_info));
7067		wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
7068			       TASK_UNINTERRUPTIBLE);
7069		goto again;
7070	}
7071	if (ret)
7072		return ERR_PTR(ret);
7073
7074	em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start,
7075				     ins.objectid, ins.offset, ins.offset,
7076				     ins.offset, BTRFS_ORDERED_REGULAR);
7077	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7078	if (IS_ERR(em))
7079		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7080					   1);
7081
7082	return em;
7083}
7084
7085static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7086{
7087	struct btrfs_block_group *block_group;
7088	bool readonly = false;
7089
7090	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7091	if (!block_group || block_group->ro)
7092		readonly = true;
7093	if (block_group)
7094		btrfs_put_block_group(block_group);
7095	return readonly;
7096}
7097
7098/*
7099 * Check if we can do nocow write into the range [@offset, @offset + @len)
7100 *
7101 * @offset:	File offset
7102 * @len:	The length to write, will be updated to the nocow writeable
7103 *		range
7104 * @orig_start:	(optional) Return the original file offset of the file extent
7105 * @orig_len:	(optional) Return the original on-disk length of the file extent
7106 * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7107 * @strict:	if true, omit optimizations that might force us into unnecessary
7108 *		cow. e.g., don't trust generation number.
7109 *
7110 * Return:
7111 * >0	and update @len if we can do nocow write
7112 *  0	if we can't do nocow write
7113 * <0	if error happened
7114 *
7115 * NOTE: This only checks the file extents, caller is responsible to wait for
7116 *	 any ordered extents.
7117 */
7118noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7119			      u64 *orig_start, u64 *orig_block_len,
7120			      u64 *ram_bytes, bool nowait, bool strict)
7121{
7122	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7123	struct can_nocow_file_extent_args nocow_args = { 0 };
7124	struct btrfs_path *path;
7125	int ret;
7126	struct extent_buffer *leaf;
7127	struct btrfs_root *root = BTRFS_I(inode)->root;
7128	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7129	struct btrfs_file_extent_item *fi;
7130	struct btrfs_key key;
7131	int found_type;
7132
7133	path = btrfs_alloc_path();
7134	if (!path)
7135		return -ENOMEM;
7136	path->nowait = nowait;
7137
7138	ret = btrfs_lookup_file_extent(NULL, root, path,
7139			btrfs_ino(BTRFS_I(inode)), offset, 0);
7140	if (ret < 0)
7141		goto out;
7142
7143	if (ret == 1) {
7144		if (path->slots[0] == 0) {
7145			/* can't find the item, must cow */
7146			ret = 0;
7147			goto out;
7148		}
7149		path->slots[0]--;
7150	}
7151	ret = 0;
7152	leaf = path->nodes[0];
7153	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7154	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7155	    key.type != BTRFS_EXTENT_DATA_KEY) {
7156		/* not our file or wrong item type, must cow */
7157		goto out;
7158	}
7159
7160	if (key.offset > offset) {
7161		/* Wrong offset, must cow */
7162		goto out;
7163	}
7164
7165	if (btrfs_file_extent_end(path) <= offset)
7166		goto out;
7167
7168	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7169	found_type = btrfs_file_extent_type(leaf, fi);
7170	if (ram_bytes)
7171		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7172
7173	nocow_args.start = offset;
7174	nocow_args.end = offset + *len - 1;
7175	nocow_args.strict = strict;
7176	nocow_args.free_path = true;
7177
7178	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7179	/* can_nocow_file_extent() has freed the path. */
7180	path = NULL;
7181
7182	if (ret != 1) {
7183		/* Treat errors as not being able to NOCOW. */
7184		ret = 0;
7185		goto out;
7186	}
7187
7188	ret = 0;
7189	if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7190		goto out;
7191
7192	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7193	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7194		u64 range_end;
7195
7196		range_end = round_up(offset + nocow_args.num_bytes,
7197				     root->fs_info->sectorsize) - 1;
7198		ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
7199		if (ret) {
7200			ret = -EAGAIN;
7201			goto out;
7202		}
7203	}
7204
7205	if (orig_start)
7206		*orig_start = key.offset - nocow_args.extent_offset;
7207	if (orig_block_len)
7208		*orig_block_len = nocow_args.disk_num_bytes;
7209
7210	*len = nocow_args.num_bytes;
7211	ret = 1;
7212out:
7213	btrfs_free_path(path);
7214	return ret;
7215}
7216
7217static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7218			      struct extent_state **cached_state,
7219			      unsigned int iomap_flags)
7220{
7221	const bool writing = (iomap_flags & IOMAP_WRITE);
7222	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7223	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7224	struct btrfs_ordered_extent *ordered;
7225	int ret = 0;
7226
7227	while (1) {
7228		if (nowait) {
7229			if (!try_lock_extent(io_tree, lockstart, lockend,
7230					     cached_state))
7231				return -EAGAIN;
7232		} else {
7233			lock_extent(io_tree, lockstart, lockend, cached_state);
7234		}
7235		/*
7236		 * We're concerned with the entire range that we're going to be
7237		 * doing DIO to, so we need to make sure there's no ordered
7238		 * extents in this range.
7239		 */
7240		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7241						     lockend - lockstart + 1);
7242
7243		/*
7244		 * We need to make sure there are no buffered pages in this
7245		 * range either, we could have raced between the invalidate in
7246		 * generic_file_direct_write and locking the extent.  The
7247		 * invalidate needs to happen so that reads after a write do not
7248		 * get stale data.
7249		 */
7250		if (!ordered &&
7251		    (!writing || !filemap_range_has_page(inode->i_mapping,
7252							 lockstart, lockend)))
7253			break;
7254
7255		unlock_extent(io_tree, lockstart, lockend, cached_state);
7256
7257		if (ordered) {
7258			if (nowait) {
7259				btrfs_put_ordered_extent(ordered);
7260				ret = -EAGAIN;
7261				break;
7262			}
7263			/*
7264			 * If we are doing a DIO read and the ordered extent we
7265			 * found is for a buffered write, we can not wait for it
7266			 * to complete and retry, because if we do so we can
7267			 * deadlock with concurrent buffered writes on page
7268			 * locks. This happens only if our DIO read covers more
7269			 * than one extent map, if at this point has already
7270			 * created an ordered extent for a previous extent map
7271			 * and locked its range in the inode's io tree, and a
7272			 * concurrent write against that previous extent map's
7273			 * range and this range started (we unlock the ranges
7274			 * in the io tree only when the bios complete and
7275			 * buffered writes always lock pages before attempting
7276			 * to lock range in the io tree).
7277			 */
7278			if (writing ||
7279			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7280				btrfs_start_ordered_extent(ordered);
7281			else
7282				ret = nowait ? -EAGAIN : -ENOTBLK;
7283			btrfs_put_ordered_extent(ordered);
7284		} else {
7285			/*
7286			 * We could trigger writeback for this range (and wait
7287			 * for it to complete) and then invalidate the pages for
7288			 * this range (through invalidate_inode_pages2_range()),
7289			 * but that can lead us to a deadlock with a concurrent
7290			 * call to readahead (a buffered read or a defrag call
7291			 * triggered a readahead) on a page lock due to an
7292			 * ordered dio extent we created before but did not have
7293			 * yet a corresponding bio submitted (whence it can not
7294			 * complete), which makes readahead wait for that
7295			 * ordered extent to complete while holding a lock on
7296			 * that page.
7297			 */
7298			ret = nowait ? -EAGAIN : -ENOTBLK;
7299		}
7300
7301		if (ret)
7302			break;
7303
7304		cond_resched();
7305	}
7306
7307	return ret;
7308}
7309
7310/* The callers of this must take lock_extent() */
7311static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7312				       u64 len, u64 orig_start, u64 block_start,
7313				       u64 block_len, u64 orig_block_len,
7314				       u64 ram_bytes, int compress_type,
7315				       int type)
7316{
7317	struct extent_map *em;
7318	int ret;
7319
7320	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7321	       type == BTRFS_ORDERED_COMPRESSED ||
7322	       type == BTRFS_ORDERED_NOCOW ||
7323	       type == BTRFS_ORDERED_REGULAR);
7324
7325	em = alloc_extent_map();
7326	if (!em)
7327		return ERR_PTR(-ENOMEM);
7328
7329	em->start = start;
7330	em->orig_start = orig_start;
7331	em->len = len;
7332	em->block_len = block_len;
7333	em->block_start = block_start;
7334	em->orig_block_len = orig_block_len;
7335	em->ram_bytes = ram_bytes;
7336	em->generation = -1;
7337	em->flags |= EXTENT_FLAG_PINNED;
7338	if (type == BTRFS_ORDERED_PREALLOC)
7339		em->flags |= EXTENT_FLAG_FILLING;
7340	else if (type == BTRFS_ORDERED_COMPRESSED)
7341		extent_map_set_compression(em, compress_type);
7342
7343	ret = btrfs_replace_extent_map_range(inode, em, true);
7344	if (ret) {
7345		free_extent_map(em);
7346		return ERR_PTR(ret);
7347	}
7348
7349	/* em got 2 refs now, callers needs to do free_extent_map once. */
7350	return em;
7351}
7352
7353
7354static int btrfs_get_blocks_direct_write(struct extent_map **map,
7355					 struct inode *inode,
7356					 struct btrfs_dio_data *dio_data,
7357					 u64 start, u64 *lenp,
7358					 unsigned int iomap_flags)
7359{
7360	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7361	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7362	struct extent_map *em = *map;
7363	int type;
7364	u64 block_start, orig_start, orig_block_len, ram_bytes;
7365	struct btrfs_block_group *bg;
7366	bool can_nocow = false;
7367	bool space_reserved = false;
7368	u64 len = *lenp;
7369	u64 prev_len;
7370	int ret = 0;
7371
7372	/*
7373	 * We don't allocate a new extent in the following cases
7374	 *
7375	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7376	 * existing extent.
7377	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7378	 * just use the extent.
7379	 *
7380	 */
7381	if ((em->flags & EXTENT_FLAG_PREALLOC) ||
7382	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7383	     em->block_start != EXTENT_MAP_HOLE)) {
7384		if (em->flags & EXTENT_FLAG_PREALLOC)
7385			type = BTRFS_ORDERED_PREALLOC;
7386		else
7387			type = BTRFS_ORDERED_NOCOW;
7388		len = min(len, em->len - (start - em->start));
7389		block_start = em->block_start + (start - em->start);
7390
7391		if (can_nocow_extent(inode, start, &len, &orig_start,
7392				     &orig_block_len, &ram_bytes, false, false) == 1) {
7393			bg = btrfs_inc_nocow_writers(fs_info, block_start);
7394			if (bg)
7395				can_nocow = true;
7396		}
7397	}
7398
7399	prev_len = len;
7400	if (can_nocow) {
7401		struct extent_map *em2;
7402
7403		/* We can NOCOW, so only need to reserve metadata space. */
7404		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7405						      nowait);
7406		if (ret < 0) {
7407			/* Our caller expects us to free the input extent map. */
7408			free_extent_map(em);
7409			*map = NULL;
7410			btrfs_dec_nocow_writers(bg);
7411			if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7412				ret = -EAGAIN;
7413			goto out;
7414		}
7415		space_reserved = true;
7416
7417		em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len,
7418					      orig_start, block_start,
7419					      len, orig_block_len,
7420					      ram_bytes, type);
7421		btrfs_dec_nocow_writers(bg);
7422		if (type == BTRFS_ORDERED_PREALLOC) {
7423			free_extent_map(em);
7424			*map = em2;
7425			em = em2;
7426		}
7427
7428		if (IS_ERR(em2)) {
7429			ret = PTR_ERR(em2);
7430			goto out;
7431		}
7432
7433		dio_data->nocow_done = true;
7434	} else {
7435		/* Our caller expects us to free the input extent map. */
7436		free_extent_map(em);
7437		*map = NULL;
7438
7439		if (nowait) {
7440			ret = -EAGAIN;
7441			goto out;
7442		}
7443
7444		/*
7445		 * If we could not allocate data space before locking the file
7446		 * range and we can't do a NOCOW write, then we have to fail.
7447		 */
7448		if (!dio_data->data_space_reserved) {
7449			ret = -ENOSPC;
7450			goto out;
7451		}
7452
7453		/*
7454		 * We have to COW and we have already reserved data space before,
7455		 * so now we reserve only metadata.
7456		 */
7457		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7458						      false);
7459		if (ret < 0)
7460			goto out;
7461		space_reserved = true;
7462
7463		em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
7464		if (IS_ERR(em)) {
7465			ret = PTR_ERR(em);
7466			goto out;
7467		}
7468		*map = em;
7469		len = min(len, em->len - (start - em->start));
7470		if (len < prev_len)
7471			btrfs_delalloc_release_metadata(BTRFS_I(inode),
7472							prev_len - len, true);
7473	}
7474
7475	/*
7476	 * We have created our ordered extent, so we can now release our reservation
7477	 * for an outstanding extent.
7478	 */
7479	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7480
7481	/*
7482	 * Need to update the i_size under the extent lock so buffered
7483	 * readers will get the updated i_size when we unlock.
7484	 */
7485	if (start + len > i_size_read(inode))
7486		i_size_write(inode, start + len);
7487out:
7488	if (ret && space_reserved) {
7489		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7490		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7491	}
7492	*lenp = len;
7493	return ret;
7494}
7495
7496static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7497		loff_t length, unsigned int flags, struct iomap *iomap,
7498		struct iomap *srcmap)
7499{
7500	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7501	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7502	struct extent_map *em;
7503	struct extent_state *cached_state = NULL;
7504	struct btrfs_dio_data *dio_data = iter->private;
7505	u64 lockstart, lockend;
7506	const bool write = !!(flags & IOMAP_WRITE);
7507	int ret = 0;
7508	u64 len = length;
7509	const u64 data_alloc_len = length;
7510	bool unlock_extents = false;
7511
7512	/*
7513	 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7514	 * we're NOWAIT we may submit a bio for a partial range and return
7515	 * EIOCBQUEUED, which would result in an errant short read.
7516	 *
7517	 * The best way to handle this would be to allow for partial completions
7518	 * of iocb's, so we could submit the partial bio, return and fault in
7519	 * the rest of the pages, and then submit the io for the rest of the
7520	 * range.  However we don't have that currently, so simply return
7521	 * -EAGAIN at this point so that the normal path is used.
7522	 */
7523	if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
7524		return -EAGAIN;
7525
7526	/*
7527	 * Cap the size of reads to that usually seen in buffered I/O as we need
7528	 * to allocate a contiguous array for the checksums.
7529	 */
7530	if (!write)
7531		len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
7532
7533	lockstart = start;
7534	lockend = start + len - 1;
7535
7536	/*
7537	 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7538	 * enough if we've written compressed pages to this area, so we need to
7539	 * flush the dirty pages again to make absolutely sure that any
7540	 * outstanding dirty pages are on disk - the first flush only starts
7541	 * compression on the data, while keeping the pages locked, so by the
7542	 * time the second flush returns we know bios for the compressed pages
7543	 * were submitted and finished, and the pages no longer under writeback.
7544	 *
7545	 * If we have a NOWAIT request and we have any pages in the range that
7546	 * are locked, likely due to compression still in progress, we don't want
7547	 * to block on page locks. We also don't want to block on pages marked as
7548	 * dirty or under writeback (same as for the non-compression case).
7549	 * iomap_dio_rw() did the same check, but after that and before we got
7550	 * here, mmap'ed writes may have happened or buffered reads started
7551	 * (readpage() and readahead(), which lock pages), as we haven't locked
7552	 * the file range yet.
7553	 */
7554	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7555		     &BTRFS_I(inode)->runtime_flags)) {
7556		if (flags & IOMAP_NOWAIT) {
7557			if (filemap_range_needs_writeback(inode->i_mapping,
7558							  lockstart, lockend))
7559				return -EAGAIN;
7560		} else {
7561			ret = filemap_fdatawrite_range(inode->i_mapping, start,
7562						       start + length - 1);
7563			if (ret)
7564				return ret;
7565		}
7566	}
7567
7568	memset(dio_data, 0, sizeof(*dio_data));
7569
7570	/*
7571	 * We always try to allocate data space and must do it before locking
7572	 * the file range, to avoid deadlocks with concurrent writes to the same
7573	 * range if the range has several extents and the writes don't expand the
7574	 * current i_size (the inode lock is taken in shared mode). If we fail to
7575	 * allocate data space here we continue and later, after locking the
7576	 * file range, we fail with ENOSPC only if we figure out we can not do a
7577	 * NOCOW write.
7578	 */
7579	if (write && !(flags & IOMAP_NOWAIT)) {
7580		ret = btrfs_check_data_free_space(BTRFS_I(inode),
7581						  &dio_data->data_reserved,
7582						  start, data_alloc_len, false);
7583		if (!ret)
7584			dio_data->data_space_reserved = true;
7585		else if (ret && !(BTRFS_I(inode)->flags &
7586				  (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7587			goto err;
7588	}
7589
7590	/*
7591	 * If this errors out it's because we couldn't invalidate pagecache for
7592	 * this range and we need to fallback to buffered IO, or we are doing a
7593	 * NOWAIT read/write and we need to block.
7594	 */
7595	ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7596	if (ret < 0)
7597		goto err;
7598
7599	em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
7600	if (IS_ERR(em)) {
7601		ret = PTR_ERR(em);
7602		goto unlock_err;
7603	}
7604
7605	/*
7606	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7607	 * io.  INLINE is special, and we could probably kludge it in here, but
7608	 * it's still buffered so for safety lets just fall back to the generic
7609	 * buffered path.
7610	 *
7611	 * For COMPRESSED we _have_ to read the entire extent in so we can
7612	 * decompress it, so there will be buffering required no matter what we
7613	 * do, so go ahead and fallback to buffered.
7614	 *
7615	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7616	 * to buffered IO.  Don't blame me, this is the price we pay for using
7617	 * the generic code.
7618	 */
7619	if (extent_map_is_compressed(em) ||
7620	    em->block_start == EXTENT_MAP_INLINE) {
7621		free_extent_map(em);
7622		/*
7623		 * If we are in a NOWAIT context, return -EAGAIN in order to
7624		 * fallback to buffered IO. This is not only because we can
7625		 * block with buffered IO (no support for NOWAIT semantics at
7626		 * the moment) but also to avoid returning short reads to user
7627		 * space - this happens if we were able to read some data from
7628		 * previous non-compressed extents and then when we fallback to
7629		 * buffered IO, at btrfs_file_read_iter() by calling
7630		 * filemap_read(), we fail to fault in pages for the read buffer,
7631		 * in which case filemap_read() returns a short read (the number
7632		 * of bytes previously read is > 0, so it does not return -EFAULT).
7633		 */
7634		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7635		goto unlock_err;
7636	}
7637
7638	len = min(len, em->len - (start - em->start));
7639
7640	/*
7641	 * If we have a NOWAIT request and the range contains multiple extents
7642	 * (or a mix of extents and holes), then we return -EAGAIN to make the
7643	 * caller fallback to a context where it can do a blocking (without
7644	 * NOWAIT) request. This way we avoid doing partial IO and returning
7645	 * success to the caller, which is not optimal for writes and for reads
7646	 * it can result in unexpected behaviour for an application.
7647	 *
7648	 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7649	 * iomap_dio_rw(), we can end up returning less data then what the caller
7650	 * asked for, resulting in an unexpected, and incorrect, short read.
7651	 * That is, the caller asked to read N bytes and we return less than that,
7652	 * which is wrong unless we are crossing EOF. This happens if we get a
7653	 * page fault error when trying to fault in pages for the buffer that is
7654	 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7655	 * have previously submitted bios for other extents in the range, in
7656	 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7657	 * those bios have completed by the time we get the page fault error,
7658	 * which we return back to our caller - we should only return EIOCBQUEUED
7659	 * after we have submitted bios for all the extents in the range.
7660	 */
7661	if ((flags & IOMAP_NOWAIT) && len < length) {
7662		free_extent_map(em);
7663		ret = -EAGAIN;
7664		goto unlock_err;
7665	}
7666
7667	if (write) {
7668		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7669						    start, &len, flags);
7670		if (ret < 0)
7671			goto unlock_err;
7672		unlock_extents = true;
7673		/* Recalc len in case the new em is smaller than requested */
7674		len = min(len, em->len - (start - em->start));
7675		if (dio_data->data_space_reserved) {
7676			u64 release_offset;
7677			u64 release_len = 0;
7678
7679			if (dio_data->nocow_done) {
7680				release_offset = start;
7681				release_len = data_alloc_len;
7682			} else if (len < data_alloc_len) {
7683				release_offset = start + len;
7684				release_len = data_alloc_len - len;
7685			}
7686
7687			if (release_len > 0)
7688				btrfs_free_reserved_data_space(BTRFS_I(inode),
7689							       dio_data->data_reserved,
7690							       release_offset,
7691							       release_len);
7692		}
7693	} else {
7694		/*
7695		 * We need to unlock only the end area that we aren't using.
7696		 * The rest is going to be unlocked by the endio routine.
7697		 */
7698		lockstart = start + len;
7699		if (lockstart < lockend)
7700			unlock_extents = true;
7701	}
7702
7703	if (unlock_extents)
7704		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7705			      &cached_state);
7706	else
7707		free_extent_state(cached_state);
7708
7709	/*
7710	 * Translate extent map information to iomap.
7711	 * We trim the extents (and move the addr) even though iomap code does
7712	 * that, since we have locked only the parts we are performing I/O in.
7713	 */
7714	if ((em->block_start == EXTENT_MAP_HOLE) ||
7715	    ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) {
7716		iomap->addr = IOMAP_NULL_ADDR;
7717		iomap->type = IOMAP_HOLE;
7718	} else {
7719		iomap->addr = em->block_start + (start - em->start);
7720		iomap->type = IOMAP_MAPPED;
7721	}
7722	iomap->offset = start;
7723	iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7724	iomap->length = len;
7725	free_extent_map(em);
7726
7727	return 0;
7728
7729unlock_err:
7730	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7731		      &cached_state);
7732err:
7733	if (dio_data->data_space_reserved) {
7734		btrfs_free_reserved_data_space(BTRFS_I(inode),
7735					       dio_data->data_reserved,
7736					       start, data_alloc_len);
7737		extent_changeset_free(dio_data->data_reserved);
7738	}
7739
7740	return ret;
7741}
7742
7743static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7744		ssize_t written, unsigned int flags, struct iomap *iomap)
7745{
7746	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7747	struct btrfs_dio_data *dio_data = iter->private;
7748	size_t submitted = dio_data->submitted;
7749	const bool write = !!(flags & IOMAP_WRITE);
7750	int ret = 0;
7751
7752	if (!write && (iomap->type == IOMAP_HOLE)) {
7753		/* If reading from a hole, unlock and return */
7754		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
7755			      NULL);
7756		return 0;
7757	}
7758
7759	if (submitted < length) {
7760		pos += submitted;
7761		length -= submitted;
7762		if (write)
7763			btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7764						    pos, length, false);
7765		else
7766			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7767				      pos + length - 1, NULL);
7768		ret = -ENOTBLK;
7769	}
7770	if (write) {
7771		btrfs_put_ordered_extent(dio_data->ordered);
7772		dio_data->ordered = NULL;
7773	}
7774
7775	if (write)
7776		extent_changeset_free(dio_data->data_reserved);
7777	return ret;
7778}
7779
7780static void btrfs_dio_end_io(struct btrfs_bio *bbio)
7781{
7782	struct btrfs_dio_private *dip =
7783		container_of(bbio, struct btrfs_dio_private, bbio);
7784	struct btrfs_inode *inode = bbio->inode;
7785	struct bio *bio = &bbio->bio;
7786
7787	if (bio->bi_status) {
7788		btrfs_warn(inode->root->fs_info,
7789		"direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7790			   btrfs_ino(inode), bio->bi_opf,
7791			   dip->file_offset, dip->bytes, bio->bi_status);
7792	}
7793
7794	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
7795		btrfs_finish_ordered_extent(bbio->ordered, NULL,
7796					    dip->file_offset, dip->bytes,
7797					    !bio->bi_status);
7798	} else {
7799		unlock_extent(&inode->io_tree, dip->file_offset,
7800			      dip->file_offset + dip->bytes - 1, NULL);
7801	}
7802
7803	bbio->bio.bi_private = bbio->private;
7804	iomap_dio_bio_end_io(bio);
7805}
7806
7807static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
7808				loff_t file_offset)
7809{
7810	struct btrfs_bio *bbio = btrfs_bio(bio);
7811	struct btrfs_dio_private *dip =
7812		container_of(bbio, struct btrfs_dio_private, bbio);
7813	struct btrfs_dio_data *dio_data = iter->private;
7814
7815	btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
7816		       btrfs_dio_end_io, bio->bi_private);
7817	bbio->inode = BTRFS_I(iter->inode);
7818	bbio->file_offset = file_offset;
7819
7820	dip->file_offset = file_offset;
7821	dip->bytes = bio->bi_iter.bi_size;
7822
7823	dio_data->submitted += bio->bi_iter.bi_size;
7824
7825	/*
7826	 * Check if we are doing a partial write.  If we are, we need to split
7827	 * the ordered extent to match the submitted bio.  Hang on to the
7828	 * remaining unfinishable ordered_extent in dio_data so that it can be
7829	 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7830	 * remaining pages is blocked on the outstanding ordered extent.
7831	 */
7832	if (iter->flags & IOMAP_WRITE) {
7833		int ret;
7834
7835		ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
7836		if (ret) {
7837			btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7838						    file_offset, dip->bytes,
7839						    !ret);
7840			bio->bi_status = errno_to_blk_status(ret);
7841			iomap_dio_bio_end_io(bio);
7842			return;
7843		}
7844	}
7845
7846	btrfs_submit_bio(bbio, 0);
7847}
7848
7849static const struct iomap_ops btrfs_dio_iomap_ops = {
7850	.iomap_begin            = btrfs_dio_iomap_begin,
7851	.iomap_end              = btrfs_dio_iomap_end,
7852};
7853
7854static const struct iomap_dio_ops btrfs_dio_ops = {
7855	.submit_io		= btrfs_dio_submit_io,
7856	.bio_set		= &btrfs_dio_bioset,
7857};
7858
7859ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
7860{
7861	struct btrfs_dio_data data = { 0 };
7862
7863	return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7864			    IOMAP_DIO_PARTIAL, &data, done_before);
7865}
7866
7867struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
7868				  size_t done_before)
7869{
7870	struct btrfs_dio_data data = { 0 };
7871
7872	return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7873			    IOMAP_DIO_PARTIAL, &data, done_before);
7874}
7875
7876static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7877			u64 start, u64 len)
7878{
7879	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
7880	int	ret;
7881
7882	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
7883	if (ret)
7884		return ret;
7885
7886	/*
7887	 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7888	 * file range (0 to LLONG_MAX), but that is not enough if we have
7889	 * compression enabled. The first filemap_fdatawrite_range() only kicks
7890	 * in the compression of data (in an async thread) and will return
7891	 * before the compression is done and writeback is started. A second
7892	 * filemap_fdatawrite_range() is needed to wait for the compression to
7893	 * complete and writeback to start. We also need to wait for ordered
7894	 * extents to complete, because our fiemap implementation uses mainly
7895	 * file extent items to list the extents, searching for extent maps
7896	 * only for file ranges with holes or prealloc extents to figure out
7897	 * if we have delalloc in those ranges.
7898	 */
7899	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
7900		ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
7901		if (ret)
7902			return ret;
7903	}
7904
7905	btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
7906
7907	/*
7908	 * We did an initial flush to avoid holding the inode's lock while
7909	 * triggering writeback and waiting for the completion of IO and ordered
7910	 * extents. Now after we locked the inode we do it again, because it's
7911	 * possible a new write may have happened in between those two steps.
7912	 */
7913	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
7914		ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
7915		if (ret) {
7916			btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
7917			return ret;
7918		}
7919	}
7920
7921	ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
7922	btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
7923
7924	return ret;
7925}
7926
7927static int btrfs_writepages(struct address_space *mapping,
7928			    struct writeback_control *wbc)
7929{
7930	return extent_writepages(mapping, wbc);
7931}
7932
7933static void btrfs_readahead(struct readahead_control *rac)
7934{
7935	extent_readahead(rac);
7936}
7937
7938/*
7939 * For release_folio() and invalidate_folio() we have a race window where
7940 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7941 * If we continue to release/invalidate the page, we could cause use-after-free
7942 * for subpage spinlock.  So this function is to spin and wait for subpage
7943 * spinlock.
7944 */
7945static void wait_subpage_spinlock(struct page *page)
7946{
7947	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
7948	struct folio *folio = page_folio(page);
7949	struct btrfs_subpage *subpage;
7950
7951	if (!btrfs_is_subpage(fs_info, page->mapping))
7952		return;
7953
7954	ASSERT(folio_test_private(folio) && folio_get_private(folio));
7955	subpage = folio_get_private(folio);
7956
7957	/*
7958	 * This may look insane as we just acquire the spinlock and release it,
7959	 * without doing anything.  But we just want to make sure no one is
7960	 * still holding the subpage spinlock.
7961	 * And since the page is not dirty nor writeback, and we have page
7962	 * locked, the only possible way to hold a spinlock is from the endio
7963	 * function to clear page writeback.
7964	 *
7965	 * Here we just acquire the spinlock so that all existing callers
7966	 * should exit and we're safe to release/invalidate the page.
7967	 */
7968	spin_lock_irq(&subpage->lock);
7969	spin_unlock_irq(&subpage->lock);
7970}
7971
7972static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7973{
7974	int ret = try_release_extent_mapping(&folio->page, gfp_flags);
7975
7976	if (ret == 1) {
7977		wait_subpage_spinlock(&folio->page);
7978		clear_page_extent_mapped(&folio->page);
7979	}
7980	return ret;
7981}
7982
7983static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7984{
7985	if (folio_test_writeback(folio) || folio_test_dirty(folio))
7986		return false;
7987	return __btrfs_release_folio(folio, gfp_flags);
7988}
7989
7990#ifdef CONFIG_MIGRATION
7991static int btrfs_migrate_folio(struct address_space *mapping,
7992			     struct folio *dst, struct folio *src,
7993			     enum migrate_mode mode)
7994{
7995	int ret = filemap_migrate_folio(mapping, dst, src, mode);
7996
7997	if (ret != MIGRATEPAGE_SUCCESS)
7998		return ret;
7999
8000	if (folio_test_ordered(src)) {
8001		folio_clear_ordered(src);
8002		folio_set_ordered(dst);
8003	}
8004
8005	return MIGRATEPAGE_SUCCESS;
8006}
8007#else
8008#define btrfs_migrate_folio NULL
8009#endif
8010
8011static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
8012				 size_t length)
8013{
8014	struct btrfs_inode *inode = folio_to_inode(folio);
8015	struct btrfs_fs_info *fs_info = inode->root->fs_info;
8016	struct extent_io_tree *tree = &inode->io_tree;
8017	struct extent_state *cached_state = NULL;
8018	u64 page_start = folio_pos(folio);
8019	u64 page_end = page_start + folio_size(folio) - 1;
8020	u64 cur;
8021	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
8022
8023	/*
8024	 * We have folio locked so no new ordered extent can be created on this
8025	 * page, nor bio can be submitted for this folio.
8026	 *
8027	 * But already submitted bio can still be finished on this folio.
8028	 * Furthermore, endio function won't skip folio which has Ordered
8029	 * (Private2) already cleared, so it's possible for endio and
8030	 * invalidate_folio to do the same ordered extent accounting twice
8031	 * on one folio.
8032	 *
8033	 * So here we wait for any submitted bios to finish, so that we won't
8034	 * do double ordered extent accounting on the same folio.
8035	 */
8036	folio_wait_writeback(folio);
8037	wait_subpage_spinlock(&folio->page);
8038
8039	/*
8040	 * For subpage case, we have call sites like
8041	 * btrfs_punch_hole_lock_range() which passes range not aligned to
8042	 * sectorsize.
8043	 * If the range doesn't cover the full folio, we don't need to and
8044	 * shouldn't clear page extent mapped, as folio->private can still
8045	 * record subpage dirty bits for other part of the range.
8046	 *
8047	 * For cases that invalidate the full folio even the range doesn't
8048	 * cover the full folio, like invalidating the last folio, we're
8049	 * still safe to wait for ordered extent to finish.
8050	 */
8051	if (!(offset == 0 && length == folio_size(folio))) {
8052		btrfs_release_folio(folio, GFP_NOFS);
8053		return;
8054	}
8055
8056	if (!inode_evicting)
8057		lock_extent(tree, page_start, page_end, &cached_state);
8058
8059	cur = page_start;
8060	while (cur < page_end) {
8061		struct btrfs_ordered_extent *ordered;
8062		u64 range_end;
8063		u32 range_len;
8064		u32 extra_flags = 0;
8065
8066		ordered = btrfs_lookup_first_ordered_range(inode, cur,
8067							   page_end + 1 - cur);
8068		if (!ordered) {
8069			range_end = page_end;
8070			/*
8071			 * No ordered extent covering this range, we are safe
8072			 * to delete all extent states in the range.
8073			 */
8074			extra_flags = EXTENT_CLEAR_ALL_BITS;
8075			goto next;
8076		}
8077		if (ordered->file_offset > cur) {
8078			/*
8079			 * There is a range between [cur, oe->file_offset) not
8080			 * covered by any ordered extent.
8081			 * We are safe to delete all extent states, and handle
8082			 * the ordered extent in the next iteration.
8083			 */
8084			range_end = ordered->file_offset - 1;
8085			extra_flags = EXTENT_CLEAR_ALL_BITS;
8086			goto next;
8087		}
8088
8089		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
8090				page_end);
8091		ASSERT(range_end + 1 - cur < U32_MAX);
8092		range_len = range_end + 1 - cur;
8093		if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
8094			/*
8095			 * If Ordered (Private2) is cleared, it means endio has
8096			 * already been executed for the range.
8097			 * We can't delete the extent states as
8098			 * btrfs_finish_ordered_io() may still use some of them.
8099			 */
8100			goto next;
8101		}
8102		btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
8103
8104		/*
8105		 * IO on this page will never be started, so we need to account
8106		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8107		 * here, must leave that up for the ordered extent completion.
8108		 *
8109		 * This will also unlock the range for incoming
8110		 * btrfs_finish_ordered_io().
8111		 */
8112		if (!inode_evicting)
8113			clear_extent_bit(tree, cur, range_end,
8114					 EXTENT_DELALLOC |
8115					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8116					 EXTENT_DEFRAG, &cached_state);
8117
8118		spin_lock_irq(&inode->ordered_tree_lock);
8119		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8120		ordered->truncated_len = min(ordered->truncated_len,
8121					     cur - ordered->file_offset);
8122		spin_unlock_irq(&inode->ordered_tree_lock);
8123
8124		/*
8125		 * If the ordered extent has finished, we're safe to delete all
8126		 * the extent states of the range, otherwise
8127		 * btrfs_finish_ordered_io() will get executed by endio for
8128		 * other pages, so we can't delete extent states.
8129		 */
8130		if (btrfs_dec_test_ordered_pending(inode, &ordered,
8131						   cur, range_end + 1 - cur)) {
8132			btrfs_finish_ordered_io(ordered);
8133			/*
8134			 * The ordered extent has finished, now we're again
8135			 * safe to delete all extent states of the range.
8136			 */
8137			extra_flags = EXTENT_CLEAR_ALL_BITS;
8138		}
8139next:
8140		if (ordered)
8141			btrfs_put_ordered_extent(ordered);
8142		/*
8143		 * Qgroup reserved space handler
8144		 * Sector(s) here will be either:
8145		 *
8146		 * 1) Already written to disk or bio already finished
8147		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
8148		 *    Qgroup will be handled by its qgroup_record then.
8149		 *    btrfs_qgroup_free_data() call will do nothing here.
8150		 *
8151		 * 2) Not written to disk yet
8152		 *    Then btrfs_qgroup_free_data() call will clear the
8153		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
8154		 *    reserved data space.
8155		 *    Since the IO will never happen for this page.
8156		 */
8157		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
8158		if (!inode_evicting) {
8159			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8160				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8161				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
8162				 extra_flags, &cached_state);
8163		}
8164		cur = range_end + 1;
8165	}
8166	/*
8167	 * We have iterated through all ordered extents of the page, the page
8168	 * should not have Ordered (Private2) anymore, or the above iteration
8169	 * did something wrong.
8170	 */
8171	ASSERT(!folio_test_ordered(folio));
8172	btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
8173	if (!inode_evicting)
8174		__btrfs_release_folio(folio, GFP_NOFS);
8175	clear_page_extent_mapped(&folio->page);
8176}
8177
8178/*
8179 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8180 * called from a page fault handler when a page is first dirtied. Hence we must
8181 * be careful to check for EOF conditions here. We set the page up correctly
8182 * for a written page which means we get ENOSPC checking when writing into
8183 * holes and correct delalloc and unwritten extent mapping on filesystems that
8184 * support these features.
8185 *
8186 * We are not allowed to take the i_mutex here so we have to play games to
8187 * protect against truncate races as the page could now be beyond EOF.  Because
8188 * truncate_setsize() writes the inode size before removing pages, once we have
8189 * the page lock we can determine safely if the page is beyond EOF. If it is not
8190 * beyond EOF, then the page is guaranteed safe against truncation until we
8191 * unlock the page.
8192 */
8193vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8194{
8195	struct page *page = vmf->page;
8196	struct folio *folio = page_folio(page);
8197	struct inode *inode = file_inode(vmf->vma->vm_file);
8198	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
8199	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8200	struct btrfs_ordered_extent *ordered;
8201	struct extent_state *cached_state = NULL;
8202	struct extent_changeset *data_reserved = NULL;
8203	unsigned long zero_start;
8204	loff_t size;
8205	vm_fault_t ret;
8206	int ret2;
8207	int reserved = 0;
8208	u64 reserved_space;
8209	u64 page_start;
8210	u64 page_end;
8211	u64 end;
8212
8213	ASSERT(folio_order(folio) == 0);
8214
8215	reserved_space = PAGE_SIZE;
8216
8217	sb_start_pagefault(inode->i_sb);
8218	page_start = page_offset(page);
8219	page_end = page_start + PAGE_SIZE - 1;
8220	end = page_end;
8221
8222	/*
8223	 * Reserving delalloc space after obtaining the page lock can lead to
8224	 * deadlock. For example, if a dirty page is locked by this function
8225	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8226	 * dirty page write out, then the btrfs_writepages() function could
8227	 * end up waiting indefinitely to get a lock on the page currently
8228	 * being processed by btrfs_page_mkwrite() function.
8229	 */
8230	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8231					    page_start, reserved_space);
8232	if (!ret2) {
8233		ret2 = file_update_time(vmf->vma->vm_file);
8234		reserved = 1;
8235	}
8236	if (ret2) {
8237		ret = vmf_error(ret2);
8238		if (reserved)
8239			goto out;
8240		goto out_noreserve;
8241	}
8242
8243	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8244again:
8245	down_read(&BTRFS_I(inode)->i_mmap_lock);
8246	lock_page(page);
8247	size = i_size_read(inode);
8248
8249	if ((page->mapping != inode->i_mapping) ||
8250	    (page_start >= size)) {
8251		/* page got truncated out from underneath us */
8252		goto out_unlock;
8253	}
8254	wait_on_page_writeback(page);
8255
8256	lock_extent(io_tree, page_start, page_end, &cached_state);
8257	ret2 = set_page_extent_mapped(page);
8258	if (ret2 < 0) {
8259		ret = vmf_error(ret2);
8260		unlock_extent(io_tree, page_start, page_end, &cached_state);
8261		goto out_unlock;
8262	}
8263
8264	/*
8265	 * we can't set the delalloc bits if there are pending ordered
8266	 * extents.  Drop our locks and wait for them to finish
8267	 */
8268	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8269			PAGE_SIZE);
8270	if (ordered) {
8271		unlock_extent(io_tree, page_start, page_end, &cached_state);
8272		unlock_page(page);
8273		up_read(&BTRFS_I(inode)->i_mmap_lock);
8274		btrfs_start_ordered_extent(ordered);
8275		btrfs_put_ordered_extent(ordered);
8276		goto again;
8277	}
8278
8279	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8280		reserved_space = round_up(size - page_start,
8281					  fs_info->sectorsize);
8282		if (reserved_space < PAGE_SIZE) {
8283			end = page_start + reserved_space - 1;
8284			btrfs_delalloc_release_space(BTRFS_I(inode),
8285					data_reserved, page_start,
8286					PAGE_SIZE - reserved_space, true);
8287		}
8288	}
8289
8290	/*
8291	 * page_mkwrite gets called when the page is firstly dirtied after it's
8292	 * faulted in, but write(2) could also dirty a page and set delalloc
8293	 * bits, thus in this case for space account reason, we still need to
8294	 * clear any delalloc bits within this page range since we have to
8295	 * reserve data&meta space before lock_page() (see above comments).
8296	 */
8297	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8298			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8299			  EXTENT_DEFRAG, &cached_state);
8300
8301	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8302					&cached_state);
8303	if (ret2) {
8304		unlock_extent(io_tree, page_start, page_end, &cached_state);
8305		ret = VM_FAULT_SIGBUS;
8306		goto out_unlock;
8307	}
8308
8309	/* page is wholly or partially inside EOF */
8310	if (page_start + PAGE_SIZE > size)
8311		zero_start = offset_in_page(size);
8312	else
8313		zero_start = PAGE_SIZE;
8314
8315	if (zero_start != PAGE_SIZE)
8316		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8317
8318	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
8319	btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
8320	btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
8321
8322	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8323
8324	unlock_extent(io_tree, page_start, page_end, &cached_state);
8325	up_read(&BTRFS_I(inode)->i_mmap_lock);
8326
8327	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8328	sb_end_pagefault(inode->i_sb);
8329	extent_changeset_free(data_reserved);
8330	return VM_FAULT_LOCKED;
8331
8332out_unlock:
8333	unlock_page(page);
8334	up_read(&BTRFS_I(inode)->i_mmap_lock);
8335out:
8336	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8337	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8338				     reserved_space, (ret != 0));
8339out_noreserve:
8340	sb_end_pagefault(inode->i_sb);
8341	extent_changeset_free(data_reserved);
8342	return ret;
8343}
8344
8345static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
8346{
8347	struct btrfs_truncate_control control = {
8348		.inode = inode,
8349		.ino = btrfs_ino(inode),
8350		.min_type = BTRFS_EXTENT_DATA_KEY,
8351		.clear_extent_range = true,
8352	};
8353	struct btrfs_root *root = inode->root;
8354	struct btrfs_fs_info *fs_info = root->fs_info;
8355	struct btrfs_block_rsv *rsv;
8356	int ret;
8357	struct btrfs_trans_handle *trans;
8358	u64 mask = fs_info->sectorsize - 1;
8359	const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8360
8361	if (!skip_writeback) {
8362		ret = btrfs_wait_ordered_range(&inode->vfs_inode,
8363					       inode->vfs_inode.i_size & (~mask),
8364					       (u64)-1);
8365		if (ret)
8366			return ret;
8367	}
8368
8369	/*
8370	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8371	 * things going on here:
8372	 *
8373	 * 1) We need to reserve space to update our inode.
8374	 *
8375	 * 2) We need to have something to cache all the space that is going to
8376	 * be free'd up by the truncate operation, but also have some slack
8377	 * space reserved in case it uses space during the truncate (thank you
8378	 * very much snapshotting).
8379	 *
8380	 * And we need these to be separate.  The fact is we can use a lot of
8381	 * space doing the truncate, and we have no earthly idea how much space
8382	 * we will use, so we need the truncate reservation to be separate so it
8383	 * doesn't end up using space reserved for updating the inode.  We also
8384	 * need to be able to stop the transaction and start a new one, which
8385	 * means we need to be able to update the inode several times, and we
8386	 * have no idea of knowing how many times that will be, so we can't just
8387	 * reserve 1 item for the entirety of the operation, so that has to be
8388	 * done separately as well.
8389	 *
8390	 * So that leaves us with
8391	 *
8392	 * 1) rsv - for the truncate reservation, which we will steal from the
8393	 * transaction reservation.
8394	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8395	 * updating the inode.
8396	 */
8397	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8398	if (!rsv)
8399		return -ENOMEM;
8400	rsv->size = min_size;
8401	rsv->failfast = true;
8402
8403	/*
8404	 * 1 for the truncate slack space
8405	 * 1 for updating the inode.
8406	 */
8407	trans = btrfs_start_transaction(root, 2);
8408	if (IS_ERR(trans)) {
8409		ret = PTR_ERR(trans);
8410		goto out;
8411	}
8412
8413	/* Migrate the slack space for the truncate to our reserve */
8414	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8415				      min_size, false);
8416	/*
8417	 * We have reserved 2 metadata units when we started the transaction and
8418	 * min_size matches 1 unit, so this should never fail, but if it does,
8419	 * it's not critical we just fail truncation.
8420	 */
8421	if (WARN_ON(ret)) {
8422		btrfs_end_transaction(trans);
8423		goto out;
8424	}
8425
8426	trans->block_rsv = rsv;
8427
8428	while (1) {
8429		struct extent_state *cached_state = NULL;
8430		const u64 new_size = inode->vfs_inode.i_size;
8431		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8432
8433		control.new_size = new_size;
8434		lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8435		/*
8436		 * We want to drop from the next block forward in case this new
8437		 * size is not block aligned since we will be keeping the last
8438		 * block of the extent just the way it is.
8439		 */
8440		btrfs_drop_extent_map_range(inode,
8441					    ALIGN(new_size, fs_info->sectorsize),
8442					    (u64)-1, false);
8443
8444		ret = btrfs_truncate_inode_items(trans, root, &control);
8445
8446		inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
8447		btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
8448
8449		unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8450
8451		trans->block_rsv = &fs_info->trans_block_rsv;
8452		if (ret != -ENOSPC && ret != -EAGAIN)
8453			break;
8454
8455		ret = btrfs_update_inode(trans, inode);
8456		if (ret)
8457			break;
8458
8459		btrfs_end_transaction(trans);
8460		btrfs_btree_balance_dirty(fs_info);
8461
8462		trans = btrfs_start_transaction(root, 2);
8463		if (IS_ERR(trans)) {
8464			ret = PTR_ERR(trans);
8465			trans = NULL;
8466			break;
8467		}
8468
8469		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8470		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8471					      rsv, min_size, false);
8472		/*
8473		 * We have reserved 2 metadata units when we started the
8474		 * transaction and min_size matches 1 unit, so this should never
8475		 * fail, but if it does, it's not critical we just fail truncation.
8476		 */
8477		if (WARN_ON(ret))
8478			break;
8479
8480		trans->block_rsv = rsv;
8481	}
8482
8483	/*
8484	 * We can't call btrfs_truncate_block inside a trans handle as we could
8485	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8486	 * know we've truncated everything except the last little bit, and can
8487	 * do btrfs_truncate_block and then update the disk_i_size.
8488	 */
8489	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8490		btrfs_end_transaction(trans);
8491		btrfs_btree_balance_dirty(fs_info);
8492
8493		ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
8494		if (ret)
8495			goto out;
8496		trans = btrfs_start_transaction(root, 1);
8497		if (IS_ERR(trans)) {
8498			ret = PTR_ERR(trans);
8499			goto out;
8500		}
8501		btrfs_inode_safe_disk_i_size_write(inode, 0);
8502	}
8503
8504	if (trans) {
8505		int ret2;
8506
8507		trans->block_rsv = &fs_info->trans_block_rsv;
8508		ret2 = btrfs_update_inode(trans, inode);
8509		if (ret2 && !ret)
8510			ret = ret2;
8511
8512		ret2 = btrfs_end_transaction(trans);
8513		if (ret2 && !ret)
8514			ret = ret2;
8515		btrfs_btree_balance_dirty(fs_info);
8516	}
8517out:
8518	btrfs_free_block_rsv(fs_info, rsv);
8519	/*
8520	 * So if we truncate and then write and fsync we normally would just
8521	 * write the extents that changed, which is a problem if we need to
8522	 * first truncate that entire inode.  So set this flag so we write out
8523	 * all of the extents in the inode to the sync log so we're completely
8524	 * safe.
8525	 *
8526	 * If no extents were dropped or trimmed we don't need to force the next
8527	 * fsync to truncate all the inode's items from the log and re-log them
8528	 * all. This means the truncate operation did not change the file size,
8529	 * or changed it to a smaller size but there was only an implicit hole
8530	 * between the old i_size and the new i_size, and there were no prealloc
8531	 * extents beyond i_size to drop.
8532	 */
8533	if (control.extents_found > 0)
8534		btrfs_set_inode_full_sync(inode);
8535
8536	return ret;
8537}
8538
8539struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
8540				     struct inode *dir)
8541{
8542	struct inode *inode;
8543
8544	inode = new_inode(dir->i_sb);
8545	if (inode) {
8546		/*
8547		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8548		 * the parent's sgid bit is set. This is probably a bug.
8549		 */
8550		inode_init_owner(idmap, inode, NULL,
8551				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8552		inode->i_op = &btrfs_dir_inode_operations;
8553		inode->i_fop = &btrfs_dir_file_operations;
8554	}
8555	return inode;
8556}
8557
8558struct inode *btrfs_alloc_inode(struct super_block *sb)
8559{
8560	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8561	struct btrfs_inode *ei;
8562	struct inode *inode;
8563	struct extent_io_tree *file_extent_tree = NULL;
8564
8565	/* Self tests may pass a NULL fs_info. */
8566	if (fs_info && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
8567		file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
8568		if (!file_extent_tree)
8569			return NULL;
8570	}
8571
8572	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8573	if (!ei) {
8574		kfree(file_extent_tree);
8575		return NULL;
8576	}
8577
8578	ei->root = NULL;
8579	ei->generation = 0;
8580	ei->last_trans = 0;
8581	ei->last_sub_trans = 0;
8582	ei->logged_trans = 0;
8583	ei->delalloc_bytes = 0;
8584	ei->new_delalloc_bytes = 0;
8585	ei->defrag_bytes = 0;
8586	ei->disk_i_size = 0;
8587	ei->flags = 0;
8588	ei->ro_flags = 0;
8589	ei->csum_bytes = 0;
8590	ei->index_cnt = (u64)-1;
8591	ei->dir_index = 0;
8592	ei->last_unlink_trans = 0;
8593	ei->last_reflink_trans = 0;
8594	ei->last_log_commit = 0;
8595
8596	spin_lock_init(&ei->lock);
8597	ei->outstanding_extents = 0;
8598	if (sb->s_magic != BTRFS_TEST_MAGIC)
8599		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8600					      BTRFS_BLOCK_RSV_DELALLOC);
8601	ei->runtime_flags = 0;
8602	ei->prop_compress = BTRFS_COMPRESS_NONE;
8603	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8604
8605	ei->delayed_node = NULL;
8606
8607	ei->i_otime_sec = 0;
8608	ei->i_otime_nsec = 0;
8609
8610	inode = &ei->vfs_inode;
8611	extent_map_tree_init(&ei->extent_tree);
8612
8613	/* This io tree sets the valid inode. */
8614	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8615	ei->io_tree.inode = ei;
8616
8617	ei->file_extent_tree = file_extent_tree;
8618	if (file_extent_tree) {
8619		extent_io_tree_init(fs_info, ei->file_extent_tree,
8620				    IO_TREE_INODE_FILE_EXTENT);
8621		/* Lockdep class is set only for the file extent tree. */
8622		lockdep_set_class(&ei->file_extent_tree->lock, &file_extent_tree_class);
8623	}
8624	mutex_init(&ei->log_mutex);
8625	spin_lock_init(&ei->ordered_tree_lock);
8626	ei->ordered_tree = RB_ROOT;
8627	ei->ordered_tree_last = NULL;
8628	INIT_LIST_HEAD(&ei->delalloc_inodes);
8629	INIT_LIST_HEAD(&ei->delayed_iput);
8630	RB_CLEAR_NODE(&ei->rb_node);
8631	init_rwsem(&ei->i_mmap_lock);
8632
8633	return inode;
8634}
8635
8636#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8637void btrfs_test_destroy_inode(struct inode *inode)
8638{
8639	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8640	kfree(BTRFS_I(inode)->file_extent_tree);
8641	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8642}
8643#endif
8644
8645void btrfs_free_inode(struct inode *inode)
8646{
8647	kfree(BTRFS_I(inode)->file_extent_tree);
8648	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8649}
8650
8651void btrfs_destroy_inode(struct inode *vfs_inode)
8652{
8653	struct btrfs_ordered_extent *ordered;
8654	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8655	struct btrfs_root *root = inode->root;
8656	bool freespace_inode;
8657
8658	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8659	WARN_ON(vfs_inode->i_data.nrpages);
8660	WARN_ON(inode->block_rsv.reserved);
8661	WARN_ON(inode->block_rsv.size);
8662	WARN_ON(inode->outstanding_extents);
8663	if (!S_ISDIR(vfs_inode->i_mode)) {
8664		WARN_ON(inode->delalloc_bytes);
8665		WARN_ON(inode->new_delalloc_bytes);
8666	}
8667	WARN_ON(inode->csum_bytes);
8668	WARN_ON(inode->defrag_bytes);
8669
8670	/*
8671	 * This can happen where we create an inode, but somebody else also
8672	 * created the same inode and we need to destroy the one we already
8673	 * created.
8674	 */
8675	if (!root)
8676		return;
8677
8678	/*
8679	 * If this is a free space inode do not take the ordered extents lockdep
8680	 * map.
8681	 */
8682	freespace_inode = btrfs_is_free_space_inode(inode);
8683
8684	while (1) {
8685		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8686		if (!ordered)
8687			break;
8688		else {
8689			btrfs_err(root->fs_info,
8690				  "found ordered extent %llu %llu on inode cleanup",
8691				  ordered->file_offset, ordered->num_bytes);
8692
8693			if (!freespace_inode)
8694				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8695
8696			btrfs_remove_ordered_extent(inode, ordered);
8697			btrfs_put_ordered_extent(ordered);
8698			btrfs_put_ordered_extent(ordered);
8699		}
8700	}
8701	btrfs_qgroup_check_reserved_leak(inode);
8702	inode_tree_del(inode);
8703	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8704	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8705	btrfs_put_root(inode->root);
8706}
8707
8708int btrfs_drop_inode(struct inode *inode)
8709{
8710	struct btrfs_root *root = BTRFS_I(inode)->root;
8711
8712	if (root == NULL)
8713		return 1;
8714
8715	/* the snap/subvol tree is on deleting */
8716	if (btrfs_root_refs(&root->root_item) == 0)
8717		return 1;
8718	else
8719		return generic_drop_inode(inode);
8720}
8721
8722static void init_once(void *foo)
8723{
8724	struct btrfs_inode *ei = foo;
8725
8726	inode_init_once(&ei->vfs_inode);
8727}
8728
8729void __cold btrfs_destroy_cachep(void)
8730{
8731	/*
8732	 * Make sure all delayed rcu free inodes are flushed before we
8733	 * destroy cache.
8734	 */
8735	rcu_barrier();
8736	bioset_exit(&btrfs_dio_bioset);
8737	kmem_cache_destroy(btrfs_inode_cachep);
8738}
8739
8740int __init btrfs_init_cachep(void)
8741{
8742	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8743			sizeof(struct btrfs_inode), 0,
8744			SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
8745			init_once);
8746	if (!btrfs_inode_cachep)
8747		goto fail;
8748
8749	if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
8750			offsetof(struct btrfs_dio_private, bbio.bio),
8751			BIOSET_NEED_BVECS))
8752		goto fail;
8753
8754	return 0;
8755fail:
8756	btrfs_destroy_cachep();
8757	return -ENOMEM;
8758}
8759
8760static int btrfs_getattr(struct mnt_idmap *idmap,
8761			 const struct path *path, struct kstat *stat,
8762			 u32 request_mask, unsigned int flags)
8763{
8764	u64 delalloc_bytes;
8765	u64 inode_bytes;
8766	struct inode *inode = d_inode(path->dentry);
8767	u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
8768	u32 bi_flags = BTRFS_I(inode)->flags;
8769	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8770
8771	stat->result_mask |= STATX_BTIME;
8772	stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8773	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8774	if (bi_flags & BTRFS_INODE_APPEND)
8775		stat->attributes |= STATX_ATTR_APPEND;
8776	if (bi_flags & BTRFS_INODE_COMPRESS)
8777		stat->attributes |= STATX_ATTR_COMPRESSED;
8778	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8779		stat->attributes |= STATX_ATTR_IMMUTABLE;
8780	if (bi_flags & BTRFS_INODE_NODUMP)
8781		stat->attributes |= STATX_ATTR_NODUMP;
8782	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8783		stat->attributes |= STATX_ATTR_VERITY;
8784
8785	stat->attributes_mask |= (STATX_ATTR_APPEND |
8786				  STATX_ATTR_COMPRESSED |
8787				  STATX_ATTR_IMMUTABLE |
8788				  STATX_ATTR_NODUMP);
8789
8790	generic_fillattr(idmap, request_mask, inode, stat);
8791	stat->dev = BTRFS_I(inode)->root->anon_dev;
8792
8793	spin_lock(&BTRFS_I(inode)->lock);
8794	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8795	inode_bytes = inode_get_bytes(inode);
8796	spin_unlock(&BTRFS_I(inode)->lock);
8797	stat->blocks = (ALIGN(inode_bytes, blocksize) +
8798			ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8799	return 0;
8800}
8801
8802static int btrfs_rename_exchange(struct inode *old_dir,
8803			      struct dentry *old_dentry,
8804			      struct inode *new_dir,
8805			      struct dentry *new_dentry)
8806{
8807	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8808	struct btrfs_trans_handle *trans;
8809	unsigned int trans_num_items;
8810	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8811	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8812	struct inode *new_inode = new_dentry->d_inode;
8813	struct inode *old_inode = old_dentry->d_inode;
8814	struct btrfs_rename_ctx old_rename_ctx;
8815	struct btrfs_rename_ctx new_rename_ctx;
8816	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8817	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8818	u64 old_idx = 0;
8819	u64 new_idx = 0;
8820	int ret;
8821	int ret2;
8822	bool need_abort = false;
8823	struct fscrypt_name old_fname, new_fname;
8824	struct fscrypt_str *old_name, *new_name;
8825
8826	/*
8827	 * For non-subvolumes allow exchange only within one subvolume, in the
8828	 * same inode namespace. Two subvolumes (represented as directory) can
8829	 * be exchanged as they're a logical link and have a fixed inode number.
8830	 */
8831	if (root != dest &&
8832	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8833	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
8834		return -EXDEV;
8835
8836	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8837	if (ret)
8838		return ret;
8839
8840	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8841	if (ret) {
8842		fscrypt_free_filename(&old_fname);
8843		return ret;
8844	}
8845
8846	old_name = &old_fname.disk_name;
8847	new_name = &new_fname.disk_name;
8848
8849	/* close the race window with snapshot create/destroy ioctl */
8850	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8851	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
8852		down_read(&fs_info->subvol_sem);
8853
8854	/*
8855	 * For each inode:
8856	 * 1 to remove old dir item
8857	 * 1 to remove old dir index
8858	 * 1 to add new dir item
8859	 * 1 to add new dir index
8860	 * 1 to update parent inode
8861	 *
8862	 * If the parents are the same, we only need to account for one
8863	 */
8864	trans_num_items = (old_dir == new_dir ? 9 : 10);
8865	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8866		/*
8867		 * 1 to remove old root ref
8868		 * 1 to remove old root backref
8869		 * 1 to add new root ref
8870		 * 1 to add new root backref
8871		 */
8872		trans_num_items += 4;
8873	} else {
8874		/*
8875		 * 1 to update inode item
8876		 * 1 to remove old inode ref
8877		 * 1 to add new inode ref
8878		 */
8879		trans_num_items += 3;
8880	}
8881	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8882		trans_num_items += 4;
8883	else
8884		trans_num_items += 3;
8885	trans = btrfs_start_transaction(root, trans_num_items);
8886	if (IS_ERR(trans)) {
8887		ret = PTR_ERR(trans);
8888		goto out_notrans;
8889	}
8890
8891	if (dest != root) {
8892		ret = btrfs_record_root_in_trans(trans, dest);
8893		if (ret)
8894			goto out_fail;
8895	}
8896
8897	/*
8898	 * We need to find a free sequence number both in the source and
8899	 * in the destination directory for the exchange.
8900	 */
8901	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8902	if (ret)
8903		goto out_fail;
8904	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8905	if (ret)
8906		goto out_fail;
8907
8908	BTRFS_I(old_inode)->dir_index = 0ULL;
8909	BTRFS_I(new_inode)->dir_index = 0ULL;
8910
8911	/* Reference for the source. */
8912	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8913		/* force full log commit if subvolume involved. */
8914		btrfs_set_log_full_commit(trans);
8915	} else {
8916		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8917					     btrfs_ino(BTRFS_I(new_dir)),
8918					     old_idx);
8919		if (ret)
8920			goto out_fail;
8921		need_abort = true;
8922	}
8923
8924	/* And now for the dest. */
8925	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8926		/* force full log commit if subvolume involved. */
8927		btrfs_set_log_full_commit(trans);
8928	} else {
8929		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8930					     btrfs_ino(BTRFS_I(old_dir)),
8931					     new_idx);
8932		if (ret) {
8933			if (need_abort)
8934				btrfs_abort_transaction(trans, ret);
8935			goto out_fail;
8936		}
8937	}
8938
8939	/* Update inode version and ctime/mtime. */
8940	inode_inc_iversion(old_dir);
8941	inode_inc_iversion(new_dir);
8942	inode_inc_iversion(old_inode);
8943	inode_inc_iversion(new_inode);
8944	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8945
8946	if (old_dentry->d_parent != new_dentry->d_parent) {
8947		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8948					BTRFS_I(old_inode), true);
8949		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8950					BTRFS_I(new_inode), true);
8951	}
8952
8953	/* src is a subvolume */
8954	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8955		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8956	} else { /* src is an inode */
8957		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8958					   BTRFS_I(old_dentry->d_inode),
8959					   old_name, &old_rename_ctx);
8960		if (!ret)
8961			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8962	}
8963	if (ret) {
8964		btrfs_abort_transaction(trans, ret);
8965		goto out_fail;
8966	}
8967
8968	/* dest is a subvolume */
8969	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8970		ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8971	} else { /* dest is an inode */
8972		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8973					   BTRFS_I(new_dentry->d_inode),
8974					   new_name, &new_rename_ctx);
8975		if (!ret)
8976			ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8977	}
8978	if (ret) {
8979		btrfs_abort_transaction(trans, ret);
8980		goto out_fail;
8981	}
8982
8983	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8984			     new_name, 0, old_idx);
8985	if (ret) {
8986		btrfs_abort_transaction(trans, ret);
8987		goto out_fail;
8988	}
8989
8990	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8991			     old_name, 0, new_idx);
8992	if (ret) {
8993		btrfs_abort_transaction(trans, ret);
8994		goto out_fail;
8995	}
8996
8997	if (old_inode->i_nlink == 1)
8998		BTRFS_I(old_inode)->dir_index = old_idx;
8999	if (new_inode->i_nlink == 1)
9000		BTRFS_I(new_inode)->dir_index = new_idx;
9001
9002	/*
9003	 * Now pin the logs of the roots. We do it to ensure that no other task
9004	 * can sync the logs while we are in progress with the rename, because
9005	 * that could result in an inconsistency in case any of the inodes that
9006	 * are part of this rename operation were logged before.
9007	 */
9008	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9009		btrfs_pin_log_trans(root);
9010	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9011		btrfs_pin_log_trans(dest);
9012
9013	/* Do the log updates for all inodes. */
9014	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9015		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9016				   old_rename_ctx.index, new_dentry->d_parent);
9017	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9018		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
9019				   new_rename_ctx.index, old_dentry->d_parent);
9020
9021	/* Now unpin the logs. */
9022	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9023		btrfs_end_log_trans(root);
9024	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9025		btrfs_end_log_trans(dest);
9026out_fail:
9027	ret2 = btrfs_end_transaction(trans);
9028	ret = ret ? ret : ret2;
9029out_notrans:
9030	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9031	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9032		up_read(&fs_info->subvol_sem);
9033
9034	fscrypt_free_filename(&new_fname);
9035	fscrypt_free_filename(&old_fname);
9036	return ret;
9037}
9038
9039static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
9040					struct inode *dir)
9041{
9042	struct inode *inode;
9043
9044	inode = new_inode(dir->i_sb);
9045	if (inode) {
9046		inode_init_owner(idmap, inode, dir,
9047				 S_IFCHR | WHITEOUT_MODE);
9048		inode->i_op = &btrfs_special_inode_operations;
9049		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
9050	}
9051	return inode;
9052}
9053
9054static int btrfs_rename(struct mnt_idmap *idmap,
9055			struct inode *old_dir, struct dentry *old_dentry,
9056			struct inode *new_dir, struct dentry *new_dentry,
9057			unsigned int flags)
9058{
9059	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
9060	struct btrfs_new_inode_args whiteout_args = {
9061		.dir = old_dir,
9062		.dentry = old_dentry,
9063	};
9064	struct btrfs_trans_handle *trans;
9065	unsigned int trans_num_items;
9066	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9067	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9068	struct inode *new_inode = d_inode(new_dentry);
9069	struct inode *old_inode = d_inode(old_dentry);
9070	struct btrfs_rename_ctx rename_ctx;
9071	u64 index = 0;
9072	int ret;
9073	int ret2;
9074	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9075	struct fscrypt_name old_fname, new_fname;
9076
9077	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9078		return -EPERM;
9079
9080	/* we only allow rename subvolume link between subvolumes */
9081	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9082		return -EXDEV;
9083
9084	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9085	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9086		return -ENOTEMPTY;
9087
9088	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9089	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9090		return -ENOTEMPTY;
9091
9092	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
9093	if (ret)
9094		return ret;
9095
9096	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
9097	if (ret) {
9098		fscrypt_free_filename(&old_fname);
9099		return ret;
9100	}
9101
9102	/* check for collisions, even if the  name isn't there */
9103	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
9104	if (ret) {
9105		if (ret == -EEXIST) {
9106			/* we shouldn't get
9107			 * eexist without a new_inode */
9108			if (WARN_ON(!new_inode)) {
9109				goto out_fscrypt_names;
9110			}
9111		} else {
9112			/* maybe -EOVERFLOW */
9113			goto out_fscrypt_names;
9114		}
9115	}
9116	ret = 0;
9117
9118	/*
9119	 * we're using rename to replace one file with another.  Start IO on it
9120	 * now so  we don't add too much work to the end of the transaction
9121	 */
9122	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9123		filemap_flush(old_inode->i_mapping);
9124
9125	if (flags & RENAME_WHITEOUT) {
9126		whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
9127		if (!whiteout_args.inode) {
9128			ret = -ENOMEM;
9129			goto out_fscrypt_names;
9130		}
9131		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9132		if (ret)
9133			goto out_whiteout_inode;
9134	} else {
9135		/* 1 to update the old parent inode. */
9136		trans_num_items = 1;
9137	}
9138
9139	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9140		/* Close the race window with snapshot create/destroy ioctl */
9141		down_read(&fs_info->subvol_sem);
9142		/*
9143		 * 1 to remove old root ref
9144		 * 1 to remove old root backref
9145		 * 1 to add new root ref
9146		 * 1 to add new root backref
9147		 */
9148		trans_num_items += 4;
9149	} else {
9150		/*
9151		 * 1 to update inode
9152		 * 1 to remove old inode ref
9153		 * 1 to add new inode ref
9154		 */
9155		trans_num_items += 3;
9156	}
9157	/*
9158	 * 1 to remove old dir item
9159	 * 1 to remove old dir index
9160	 * 1 to add new dir item
9161	 * 1 to add new dir index
9162	 */
9163	trans_num_items += 4;
9164	/* 1 to update new parent inode if it's not the same as the old parent */
9165	if (new_dir != old_dir)
9166		trans_num_items++;
9167	if (new_inode) {
9168		/*
9169		 * 1 to update inode
9170		 * 1 to remove inode ref
9171		 * 1 to remove dir item
9172		 * 1 to remove dir index
9173		 * 1 to possibly add orphan item
9174		 */
9175		trans_num_items += 5;
9176	}
9177	trans = btrfs_start_transaction(root, trans_num_items);
9178	if (IS_ERR(trans)) {
9179		ret = PTR_ERR(trans);
9180		goto out_notrans;
9181	}
9182
9183	if (dest != root) {
9184		ret = btrfs_record_root_in_trans(trans, dest);
9185		if (ret)
9186			goto out_fail;
9187	}
9188
9189	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9190	if (ret)
9191		goto out_fail;
9192
9193	BTRFS_I(old_inode)->dir_index = 0ULL;
9194	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9195		/* force full log commit if subvolume involved. */
9196		btrfs_set_log_full_commit(trans);
9197	} else {
9198		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
9199					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
9200					     index);
9201		if (ret)
9202			goto out_fail;
9203	}
9204
9205	inode_inc_iversion(old_dir);
9206	inode_inc_iversion(new_dir);
9207	inode_inc_iversion(old_inode);
9208	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
9209
9210	if (old_dentry->d_parent != new_dentry->d_parent)
9211		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9212					BTRFS_I(old_inode), true);
9213
9214	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9215		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
9216	} else {
9217		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9218					   BTRFS_I(d_inode(old_dentry)),
9219					   &old_fname.disk_name, &rename_ctx);
9220		if (!ret)
9221			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
9222	}
9223	if (ret) {
9224		btrfs_abort_transaction(trans, ret);
9225		goto out_fail;
9226	}
9227
9228	if (new_inode) {
9229		inode_inc_iversion(new_inode);
9230		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9231			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9232			ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
9233			BUG_ON(new_inode->i_nlink == 0);
9234		} else {
9235			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9236						 BTRFS_I(d_inode(new_dentry)),
9237						 &new_fname.disk_name);
9238		}
9239		if (!ret && new_inode->i_nlink == 0)
9240			ret = btrfs_orphan_add(trans,
9241					BTRFS_I(d_inode(new_dentry)));
9242		if (ret) {
9243			btrfs_abort_transaction(trans, ret);
9244			goto out_fail;
9245		}
9246	}
9247
9248	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9249			     &new_fname.disk_name, 0, index);
9250	if (ret) {
9251		btrfs_abort_transaction(trans, ret);
9252		goto out_fail;
9253	}
9254
9255	if (old_inode->i_nlink == 1)
9256		BTRFS_I(old_inode)->dir_index = index;
9257
9258	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9259		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9260				   rename_ctx.index, new_dentry->d_parent);
9261
9262	if (flags & RENAME_WHITEOUT) {
9263		ret = btrfs_create_new_inode(trans, &whiteout_args);
9264		if (ret) {
9265			btrfs_abort_transaction(trans, ret);
9266			goto out_fail;
9267		} else {
9268			unlock_new_inode(whiteout_args.inode);
9269			iput(whiteout_args.inode);
9270			whiteout_args.inode = NULL;
9271		}
9272	}
9273out_fail:
9274	ret2 = btrfs_end_transaction(trans);
9275	ret = ret ? ret : ret2;
9276out_notrans:
9277	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9278		up_read(&fs_info->subvol_sem);
9279	if (flags & RENAME_WHITEOUT)
9280		btrfs_new_inode_args_destroy(&whiteout_args);
9281out_whiteout_inode:
9282	if (flags & RENAME_WHITEOUT)
9283		iput(whiteout_args.inode);
9284out_fscrypt_names:
9285	fscrypt_free_filename(&old_fname);
9286	fscrypt_free_filename(&new_fname);
9287	return ret;
9288}
9289
9290static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
9291			 struct dentry *old_dentry, struct inode *new_dir,
9292			 struct dentry *new_dentry, unsigned int flags)
9293{
9294	int ret;
9295
9296	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9297		return -EINVAL;
9298
9299	if (flags & RENAME_EXCHANGE)
9300		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9301					    new_dentry);
9302	else
9303		ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
9304				   new_dentry, flags);
9305
9306	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
9307
9308	return ret;
9309}
9310
9311struct btrfs_delalloc_work {
9312	struct inode *inode;
9313	struct completion completion;
9314	struct list_head list;
9315	struct btrfs_work work;
9316};
9317
9318static void btrfs_run_delalloc_work(struct btrfs_work *work)
9319{
9320	struct btrfs_delalloc_work *delalloc_work;
9321	struct inode *inode;
9322
9323	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9324				     work);
9325	inode = delalloc_work->inode;
9326	filemap_flush(inode->i_mapping);
9327	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9328				&BTRFS_I(inode)->runtime_flags))
9329		filemap_flush(inode->i_mapping);
9330
9331	iput(inode);
9332	complete(&delalloc_work->completion);
9333}
9334
9335static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9336{
9337	struct btrfs_delalloc_work *work;
9338
9339	work = kmalloc(sizeof(*work), GFP_NOFS);
9340	if (!work)
9341		return NULL;
9342
9343	init_completion(&work->completion);
9344	INIT_LIST_HEAD(&work->list);
9345	work->inode = inode;
9346	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
9347
9348	return work;
9349}
9350
9351/*
9352 * some fairly slow code that needs optimization. This walks the list
9353 * of all the inodes with pending delalloc and forces them to disk.
9354 */
9355static int start_delalloc_inodes(struct btrfs_root *root,
9356				 struct writeback_control *wbc, bool snapshot,
9357				 bool in_reclaim_context)
9358{
9359	struct btrfs_inode *binode;
9360	struct inode *inode;
9361	struct btrfs_delalloc_work *work, *next;
9362	LIST_HEAD(works);
9363	LIST_HEAD(splice);
9364	int ret = 0;
9365	bool full_flush = wbc->nr_to_write == LONG_MAX;
9366
9367	mutex_lock(&root->delalloc_mutex);
9368	spin_lock(&root->delalloc_lock);
9369	list_splice_init(&root->delalloc_inodes, &splice);
9370	while (!list_empty(&splice)) {
9371		binode = list_entry(splice.next, struct btrfs_inode,
9372				    delalloc_inodes);
9373
9374		list_move_tail(&binode->delalloc_inodes,
9375			       &root->delalloc_inodes);
9376
9377		if (in_reclaim_context &&
9378		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9379			continue;
9380
9381		inode = igrab(&binode->vfs_inode);
9382		if (!inode) {
9383			cond_resched_lock(&root->delalloc_lock);
9384			continue;
9385		}
9386		spin_unlock(&root->delalloc_lock);
9387
9388		if (snapshot)
9389			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9390				&binode->runtime_flags);
9391		if (full_flush) {
9392			work = btrfs_alloc_delalloc_work(inode);
9393			if (!work) {
9394				iput(inode);
9395				ret = -ENOMEM;
9396				goto out;
9397			}
9398			list_add_tail(&work->list, &works);
9399			btrfs_queue_work(root->fs_info->flush_workers,
9400					 &work->work);
9401		} else {
9402			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9403			btrfs_add_delayed_iput(BTRFS_I(inode));
9404			if (ret || wbc->nr_to_write <= 0)
9405				goto out;
9406		}
9407		cond_resched();
9408		spin_lock(&root->delalloc_lock);
9409	}
9410	spin_unlock(&root->delalloc_lock);
9411
9412out:
9413	list_for_each_entry_safe(work, next, &works, list) {
9414		list_del_init(&work->list);
9415		wait_for_completion(&work->completion);
9416		kfree(work);
9417	}
9418
9419	if (!list_empty(&splice)) {
9420		spin_lock(&root->delalloc_lock);
9421		list_splice_tail(&splice, &root->delalloc_inodes);
9422		spin_unlock(&root->delalloc_lock);
9423	}
9424	mutex_unlock(&root->delalloc_mutex);
9425	return ret;
9426}
9427
9428int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9429{
9430	struct writeback_control wbc = {
9431		.nr_to_write = LONG_MAX,
9432		.sync_mode = WB_SYNC_NONE,
9433		.range_start = 0,
9434		.range_end = LLONG_MAX,
9435	};
9436	struct btrfs_fs_info *fs_info = root->fs_info;
9437
9438	if (BTRFS_FS_ERROR(fs_info))
9439		return -EROFS;
9440
9441	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9442}
9443
9444int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9445			       bool in_reclaim_context)
9446{
9447	struct writeback_control wbc = {
9448		.nr_to_write = nr,
9449		.sync_mode = WB_SYNC_NONE,
9450		.range_start = 0,
9451		.range_end = LLONG_MAX,
9452	};
9453	struct btrfs_root *root;
9454	LIST_HEAD(splice);
9455	int ret;
9456
9457	if (BTRFS_FS_ERROR(fs_info))
9458		return -EROFS;
9459
9460	mutex_lock(&fs_info->delalloc_root_mutex);
9461	spin_lock(&fs_info->delalloc_root_lock);
9462	list_splice_init(&fs_info->delalloc_roots, &splice);
9463	while (!list_empty(&splice)) {
9464		/*
9465		 * Reset nr_to_write here so we know that we're doing a full
9466		 * flush.
9467		 */
9468		if (nr == LONG_MAX)
9469			wbc.nr_to_write = LONG_MAX;
9470
9471		root = list_first_entry(&splice, struct btrfs_root,
9472					delalloc_root);
9473		root = btrfs_grab_root(root);
9474		BUG_ON(!root);
9475		list_move_tail(&root->delalloc_root,
9476			       &fs_info->delalloc_roots);
9477		spin_unlock(&fs_info->delalloc_root_lock);
9478
9479		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9480		btrfs_put_root(root);
9481		if (ret < 0 || wbc.nr_to_write <= 0)
9482			goto out;
9483		spin_lock(&fs_info->delalloc_root_lock);
9484	}
9485	spin_unlock(&fs_info->delalloc_root_lock);
9486
9487	ret = 0;
9488out:
9489	if (!list_empty(&splice)) {
9490		spin_lock(&fs_info->delalloc_root_lock);
9491		list_splice_tail(&splice, &fs_info->delalloc_roots);
9492		spin_unlock(&fs_info->delalloc_root_lock);
9493	}
9494	mutex_unlock(&fs_info->delalloc_root_mutex);
9495	return ret;
9496}
9497
9498static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
9499			 struct dentry *dentry, const char *symname)
9500{
9501	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9502	struct btrfs_trans_handle *trans;
9503	struct btrfs_root *root = BTRFS_I(dir)->root;
9504	struct btrfs_path *path;
9505	struct btrfs_key key;
9506	struct inode *inode;
9507	struct btrfs_new_inode_args new_inode_args = {
9508		.dir = dir,
9509		.dentry = dentry,
9510	};
9511	unsigned int trans_num_items;
9512	int err;
9513	int name_len;
9514	int datasize;
9515	unsigned long ptr;
9516	struct btrfs_file_extent_item *ei;
9517	struct extent_buffer *leaf;
9518
9519	name_len = strlen(symname);
9520	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9521		return -ENAMETOOLONG;
9522
9523	inode = new_inode(dir->i_sb);
9524	if (!inode)
9525		return -ENOMEM;
9526	inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9527	inode->i_op = &btrfs_symlink_inode_operations;
9528	inode_nohighmem(inode);
9529	inode->i_mapping->a_ops = &btrfs_aops;
9530	btrfs_i_size_write(BTRFS_I(inode), name_len);
9531	inode_set_bytes(inode, name_len);
9532
9533	new_inode_args.inode = inode;
9534	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9535	if (err)
9536		goto out_inode;
9537	/* 1 additional item for the inline extent */
9538	trans_num_items++;
9539
9540	trans = btrfs_start_transaction(root, trans_num_items);
9541	if (IS_ERR(trans)) {
9542		err = PTR_ERR(trans);
9543		goto out_new_inode_args;
9544	}
9545
9546	err = btrfs_create_new_inode(trans, &new_inode_args);
9547	if (err)
9548		goto out;
9549
9550	path = btrfs_alloc_path();
9551	if (!path) {
9552		err = -ENOMEM;
9553		btrfs_abort_transaction(trans, err);
9554		discard_new_inode(inode);
9555		inode = NULL;
9556		goto out;
9557	}
9558	key.objectid = btrfs_ino(BTRFS_I(inode));
9559	key.offset = 0;
9560	key.type = BTRFS_EXTENT_DATA_KEY;
9561	datasize = btrfs_file_extent_calc_inline_size(name_len);
9562	err = btrfs_insert_empty_item(trans, root, path, &key,
9563				      datasize);
9564	if (err) {
9565		btrfs_abort_transaction(trans, err);
9566		btrfs_free_path(path);
9567		discard_new_inode(inode);
9568		inode = NULL;
9569		goto out;
9570	}
9571	leaf = path->nodes[0];
9572	ei = btrfs_item_ptr(leaf, path->slots[0],
9573			    struct btrfs_file_extent_item);
9574	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9575	btrfs_set_file_extent_type(leaf, ei,
9576				   BTRFS_FILE_EXTENT_INLINE);
9577	btrfs_set_file_extent_encryption(leaf, ei, 0);
9578	btrfs_set_file_extent_compression(leaf, ei, 0);
9579	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9580	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9581
9582	ptr = btrfs_file_extent_inline_start(ei);
9583	write_extent_buffer(leaf, symname, ptr, name_len);
9584	btrfs_mark_buffer_dirty(trans, leaf);
9585	btrfs_free_path(path);
9586
9587	d_instantiate_new(dentry, inode);
9588	err = 0;
9589out:
9590	btrfs_end_transaction(trans);
9591	btrfs_btree_balance_dirty(fs_info);
9592out_new_inode_args:
9593	btrfs_new_inode_args_destroy(&new_inode_args);
9594out_inode:
9595	if (err)
9596		iput(inode);
9597	return err;
9598}
9599
9600static struct btrfs_trans_handle *insert_prealloc_file_extent(
9601				       struct btrfs_trans_handle *trans_in,
9602				       struct btrfs_inode *inode,
9603				       struct btrfs_key *ins,
9604				       u64 file_offset)
9605{
9606	struct btrfs_file_extent_item stack_fi;
9607	struct btrfs_replace_extent_info extent_info;
9608	struct btrfs_trans_handle *trans = trans_in;
9609	struct btrfs_path *path;
9610	u64 start = ins->objectid;
9611	u64 len = ins->offset;
9612	u64 qgroup_released = 0;
9613	int ret;
9614
9615	memset(&stack_fi, 0, sizeof(stack_fi));
9616
9617	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9618	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9619	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9620	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9621	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9622	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9623	/* Encryption and other encoding is reserved and all 0 */
9624
9625	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9626	if (ret < 0)
9627		return ERR_PTR(ret);
9628
9629	if (trans) {
9630		ret = insert_reserved_file_extent(trans, inode,
9631						  file_offset, &stack_fi,
9632						  true, qgroup_released);
9633		if (ret)
9634			goto free_qgroup;
9635		return trans;
9636	}
9637
9638	extent_info.disk_offset = start;
9639	extent_info.disk_len = len;
9640	extent_info.data_offset = 0;
9641	extent_info.data_len = len;
9642	extent_info.file_offset = file_offset;
9643	extent_info.extent_buf = (char *)&stack_fi;
9644	extent_info.is_new_extent = true;
9645	extent_info.update_times = true;
9646	extent_info.qgroup_reserved = qgroup_released;
9647	extent_info.insertions = 0;
9648
9649	path = btrfs_alloc_path();
9650	if (!path) {
9651		ret = -ENOMEM;
9652		goto free_qgroup;
9653	}
9654
9655	ret = btrfs_replace_file_extents(inode, path, file_offset,
9656				     file_offset + len - 1, &extent_info,
9657				     &trans);
9658	btrfs_free_path(path);
9659	if (ret)
9660		goto free_qgroup;
9661	return trans;
9662
9663free_qgroup:
9664	/*
9665	 * We have released qgroup data range at the beginning of the function,
9666	 * and normally qgroup_released bytes will be freed when committing
9667	 * transaction.
9668	 * But if we error out early, we have to free what we have released
9669	 * or we leak qgroup data reservation.
9670	 */
9671	btrfs_qgroup_free_refroot(inode->root->fs_info,
9672			inode->root->root_key.objectid, qgroup_released,
9673			BTRFS_QGROUP_RSV_DATA);
9674	return ERR_PTR(ret);
9675}
9676
9677static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9678				       u64 start, u64 num_bytes, u64 min_size,
9679				       loff_t actual_len, u64 *alloc_hint,
9680				       struct btrfs_trans_handle *trans)
9681{
9682	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9683	struct extent_map *em;
9684	struct btrfs_root *root = BTRFS_I(inode)->root;
9685	struct btrfs_key ins;
9686	u64 cur_offset = start;
9687	u64 clear_offset = start;
9688	u64 i_size;
9689	u64 cur_bytes;
9690	u64 last_alloc = (u64)-1;
9691	int ret = 0;
9692	bool own_trans = true;
9693	u64 end = start + num_bytes - 1;
9694
9695	if (trans)
9696		own_trans = false;
9697	while (num_bytes > 0) {
9698		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9699		cur_bytes = max(cur_bytes, min_size);
9700		/*
9701		 * If we are severely fragmented we could end up with really
9702		 * small allocations, so if the allocator is returning small
9703		 * chunks lets make its job easier by only searching for those
9704		 * sized chunks.
9705		 */
9706		cur_bytes = min(cur_bytes, last_alloc);
9707		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9708				min_size, 0, *alloc_hint, &ins, 1, 0);
9709		if (ret)
9710			break;
9711
9712		/*
9713		 * We've reserved this space, and thus converted it from
9714		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9715		 * from here on out we will only need to clear our reservation
9716		 * for the remaining unreserved area, so advance our
9717		 * clear_offset by our extent size.
9718		 */
9719		clear_offset += ins.offset;
9720
9721		last_alloc = ins.offset;
9722		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9723						    &ins, cur_offset);
9724		/*
9725		 * Now that we inserted the prealloc extent we can finally
9726		 * decrement the number of reservations in the block group.
9727		 * If we did it before, we could race with relocation and have
9728		 * relocation miss the reserved extent, making it fail later.
9729		 */
9730		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9731		if (IS_ERR(trans)) {
9732			ret = PTR_ERR(trans);
9733			btrfs_free_reserved_extent(fs_info, ins.objectid,
9734						   ins.offset, 0);
9735			break;
9736		}
9737
9738		em = alloc_extent_map();
9739		if (!em) {
9740			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9741					    cur_offset + ins.offset - 1, false);
9742			btrfs_set_inode_full_sync(BTRFS_I(inode));
9743			goto next;
9744		}
9745
9746		em->start = cur_offset;
9747		em->orig_start = cur_offset;
9748		em->len = ins.offset;
9749		em->block_start = ins.objectid;
9750		em->block_len = ins.offset;
9751		em->orig_block_len = ins.offset;
9752		em->ram_bytes = ins.offset;
9753		em->flags |= EXTENT_FLAG_PREALLOC;
9754		em->generation = trans->transid;
9755
9756		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9757		free_extent_map(em);
9758next:
9759		num_bytes -= ins.offset;
9760		cur_offset += ins.offset;
9761		*alloc_hint = ins.objectid + ins.offset;
9762
9763		inode_inc_iversion(inode);
9764		inode_set_ctime_current(inode);
9765		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9766		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9767		    (actual_len > inode->i_size) &&
9768		    (cur_offset > inode->i_size)) {
9769			if (cur_offset > actual_len)
9770				i_size = actual_len;
9771			else
9772				i_size = cur_offset;
9773			i_size_write(inode, i_size);
9774			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9775		}
9776
9777		ret = btrfs_update_inode(trans, BTRFS_I(inode));
9778
9779		if (ret) {
9780			btrfs_abort_transaction(trans, ret);
9781			if (own_trans)
9782				btrfs_end_transaction(trans);
9783			break;
9784		}
9785
9786		if (own_trans) {
9787			btrfs_end_transaction(trans);
9788			trans = NULL;
9789		}
9790	}
9791	if (clear_offset < end)
9792		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9793			end - clear_offset + 1);
9794	return ret;
9795}
9796
9797int btrfs_prealloc_file_range(struct inode *inode, int mode,
9798			      u64 start, u64 num_bytes, u64 min_size,
9799			      loff_t actual_len, u64 *alloc_hint)
9800{
9801	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9802					   min_size, actual_len, alloc_hint,
9803					   NULL);
9804}
9805
9806int btrfs_prealloc_file_range_trans(struct inode *inode,
9807				    struct btrfs_trans_handle *trans, int mode,
9808				    u64 start, u64 num_bytes, u64 min_size,
9809				    loff_t actual_len, u64 *alloc_hint)
9810{
9811	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9812					   min_size, actual_len, alloc_hint, trans);
9813}
9814
9815static int btrfs_permission(struct mnt_idmap *idmap,
9816			    struct inode *inode, int mask)
9817{
9818	struct btrfs_root *root = BTRFS_I(inode)->root;
9819	umode_t mode = inode->i_mode;
9820
9821	if (mask & MAY_WRITE &&
9822	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9823		if (btrfs_root_readonly(root))
9824			return -EROFS;
9825		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9826			return -EACCES;
9827	}
9828	return generic_permission(idmap, inode, mask);
9829}
9830
9831static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9832			 struct file *file, umode_t mode)
9833{
9834	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9835	struct btrfs_trans_handle *trans;
9836	struct btrfs_root *root = BTRFS_I(dir)->root;
9837	struct inode *inode;
9838	struct btrfs_new_inode_args new_inode_args = {
9839		.dir = dir,
9840		.dentry = file->f_path.dentry,
9841		.orphan = true,
9842	};
9843	unsigned int trans_num_items;
9844	int ret;
9845
9846	inode = new_inode(dir->i_sb);
9847	if (!inode)
9848		return -ENOMEM;
9849	inode_init_owner(idmap, inode, dir, mode);
9850	inode->i_fop = &btrfs_file_operations;
9851	inode->i_op = &btrfs_file_inode_operations;
9852	inode->i_mapping->a_ops = &btrfs_aops;
9853
9854	new_inode_args.inode = inode;
9855	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9856	if (ret)
9857		goto out_inode;
9858
9859	trans = btrfs_start_transaction(root, trans_num_items);
9860	if (IS_ERR(trans)) {
9861		ret = PTR_ERR(trans);
9862		goto out_new_inode_args;
9863	}
9864
9865	ret = btrfs_create_new_inode(trans, &new_inode_args);
9866
9867	/*
9868	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9869	 * set it to 1 because d_tmpfile() will issue a warning if the count is
9870	 * 0, through:
9871	 *
9872	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9873	 */
9874	set_nlink(inode, 1);
9875
9876	if (!ret) {
9877		d_tmpfile(file, inode);
9878		unlock_new_inode(inode);
9879		mark_inode_dirty(inode);
9880	}
9881
9882	btrfs_end_transaction(trans);
9883	btrfs_btree_balance_dirty(fs_info);
9884out_new_inode_args:
9885	btrfs_new_inode_args_destroy(&new_inode_args);
9886out_inode:
9887	if (ret)
9888		iput(inode);
9889	return finish_open_simple(file, ret);
9890}
9891
9892void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
9893{
9894	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9895	unsigned long index = start >> PAGE_SHIFT;
9896	unsigned long end_index = end >> PAGE_SHIFT;
9897	struct page *page;
9898	u32 len;
9899
9900	ASSERT(end + 1 - start <= U32_MAX);
9901	len = end + 1 - start;
9902	while (index <= end_index) {
9903		page = find_get_page(inode->vfs_inode.i_mapping, index);
9904		ASSERT(page); /* Pages should be in the extent_io_tree */
9905
9906		/* This is for data, which doesn't yet support larger folio. */
9907		ASSERT(folio_order(page_folio(page)) == 0);
9908		btrfs_folio_set_writeback(fs_info, page_folio(page), start, len);
9909		put_page(page);
9910		index++;
9911	}
9912}
9913
9914int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9915					     int compress_type)
9916{
9917	switch (compress_type) {
9918	case BTRFS_COMPRESS_NONE:
9919		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9920	case BTRFS_COMPRESS_ZLIB:
9921		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9922	case BTRFS_COMPRESS_LZO:
9923		/*
9924		 * The LZO format depends on the sector size. 64K is the maximum
9925		 * sector size that we support.
9926		 */
9927		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9928			return -EINVAL;
9929		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9930		       (fs_info->sectorsize_bits - 12);
9931	case BTRFS_COMPRESS_ZSTD:
9932		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9933	default:
9934		return -EUCLEAN;
9935	}
9936}
9937
9938static ssize_t btrfs_encoded_read_inline(
9939				struct kiocb *iocb,
9940				struct iov_iter *iter, u64 start,
9941				u64 lockend,
9942				struct extent_state **cached_state,
9943				u64 extent_start, size_t count,
9944				struct btrfs_ioctl_encoded_io_args *encoded,
9945				bool *unlocked)
9946{
9947	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9948	struct btrfs_root *root = inode->root;
9949	struct btrfs_fs_info *fs_info = root->fs_info;
9950	struct extent_io_tree *io_tree = &inode->io_tree;
9951	struct btrfs_path *path;
9952	struct extent_buffer *leaf;
9953	struct btrfs_file_extent_item *item;
9954	u64 ram_bytes;
9955	unsigned long ptr;
9956	void *tmp;
9957	ssize_t ret;
9958
9959	path = btrfs_alloc_path();
9960	if (!path) {
9961		ret = -ENOMEM;
9962		goto out;
9963	}
9964	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9965				       extent_start, 0);
9966	if (ret) {
9967		if (ret > 0) {
9968			/* The extent item disappeared? */
9969			ret = -EIO;
9970		}
9971		goto out;
9972	}
9973	leaf = path->nodes[0];
9974	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9975
9976	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9977	ptr = btrfs_file_extent_inline_start(item);
9978
9979	encoded->len = min_t(u64, extent_start + ram_bytes,
9980			     inode->vfs_inode.i_size) - iocb->ki_pos;
9981	ret = btrfs_encoded_io_compression_from_extent(fs_info,
9982				 btrfs_file_extent_compression(leaf, item));
9983	if (ret < 0)
9984		goto out;
9985	encoded->compression = ret;
9986	if (encoded->compression) {
9987		size_t inline_size;
9988
9989		inline_size = btrfs_file_extent_inline_item_len(leaf,
9990								path->slots[0]);
9991		if (inline_size > count) {
9992			ret = -ENOBUFS;
9993			goto out;
9994		}
9995		count = inline_size;
9996		encoded->unencoded_len = ram_bytes;
9997		encoded->unencoded_offset = iocb->ki_pos - extent_start;
9998	} else {
9999		count = min_t(u64, count, encoded->len);
10000		encoded->len = count;
10001		encoded->unencoded_len = count;
10002		ptr += iocb->ki_pos - extent_start;
10003	}
10004
10005	tmp = kmalloc(count, GFP_NOFS);
10006	if (!tmp) {
10007		ret = -ENOMEM;
10008		goto out;
10009	}
10010	read_extent_buffer(leaf, tmp, ptr, count);
10011	btrfs_release_path(path);
10012	unlock_extent(io_tree, start, lockend, cached_state);
10013	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10014	*unlocked = true;
10015
10016	ret = copy_to_iter(tmp, count, iter);
10017	if (ret != count)
10018		ret = -EFAULT;
10019	kfree(tmp);
10020out:
10021	btrfs_free_path(path);
10022	return ret;
10023}
10024
10025struct btrfs_encoded_read_private {
10026	wait_queue_head_t wait;
10027	atomic_t pending;
10028	blk_status_t status;
10029};
10030
10031static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
10032{
10033	struct btrfs_encoded_read_private *priv = bbio->private;
10034
10035	if (bbio->bio.bi_status) {
10036		/*
10037		 * The memory barrier implied by the atomic_dec_return() here
10038		 * pairs with the memory barrier implied by the
10039		 * atomic_dec_return() or io_wait_event() in
10040		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
10041		 * write is observed before the load of status in
10042		 * btrfs_encoded_read_regular_fill_pages().
10043		 */
10044		WRITE_ONCE(priv->status, bbio->bio.bi_status);
10045	}
10046	if (!atomic_dec_return(&priv->pending))
10047		wake_up(&priv->wait);
10048	bio_put(&bbio->bio);
10049}
10050
10051int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
10052					  u64 file_offset, u64 disk_bytenr,
10053					  u64 disk_io_size, struct page **pages)
10054{
10055	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10056	struct btrfs_encoded_read_private priv = {
10057		.pending = ATOMIC_INIT(1),
10058	};
10059	unsigned long i = 0;
10060	struct btrfs_bio *bbio;
10061
10062	init_waitqueue_head(&priv.wait);
10063
10064	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
10065			       btrfs_encoded_read_endio, &priv);
10066	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
10067	bbio->inode = inode;
10068
10069	do {
10070		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
10071
10072		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
10073			atomic_inc(&priv.pending);
10074			btrfs_submit_bio(bbio, 0);
10075
10076			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
10077					       btrfs_encoded_read_endio, &priv);
10078			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
10079			bbio->inode = inode;
10080			continue;
10081		}
10082
10083		i++;
10084		disk_bytenr += bytes;
10085		disk_io_size -= bytes;
10086	} while (disk_io_size);
10087
10088	atomic_inc(&priv.pending);
10089	btrfs_submit_bio(bbio, 0);
10090
10091	if (atomic_dec_return(&priv.pending))
10092		io_wait_event(priv.wait, !atomic_read(&priv.pending));
10093	/* See btrfs_encoded_read_endio() for ordering. */
10094	return blk_status_to_errno(READ_ONCE(priv.status));
10095}
10096
10097static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
10098					  struct iov_iter *iter,
10099					  u64 start, u64 lockend,
10100					  struct extent_state **cached_state,
10101					  u64 disk_bytenr, u64 disk_io_size,
10102					  size_t count, bool compressed,
10103					  bool *unlocked)
10104{
10105	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10106	struct extent_io_tree *io_tree = &inode->io_tree;
10107	struct page **pages;
10108	unsigned long nr_pages, i;
10109	u64 cur;
10110	size_t page_offset;
10111	ssize_t ret;
10112
10113	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10114	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10115	if (!pages)
10116		return -ENOMEM;
10117	ret = btrfs_alloc_page_array(nr_pages, pages, 0);
10118	if (ret) {
10119		ret = -ENOMEM;
10120		goto out;
10121		}
10122
10123	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10124						    disk_io_size, pages);
10125	if (ret)
10126		goto out;
10127
10128	unlock_extent(io_tree, start, lockend, cached_state);
10129	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10130	*unlocked = true;
10131
10132	if (compressed) {
10133		i = 0;
10134		page_offset = 0;
10135	} else {
10136		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10137		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10138	}
10139	cur = 0;
10140	while (cur < count) {
10141		size_t bytes = min_t(size_t, count - cur,
10142				     PAGE_SIZE - page_offset);
10143
10144		if (copy_page_to_iter(pages[i], page_offset, bytes,
10145				      iter) != bytes) {
10146			ret = -EFAULT;
10147			goto out;
10148		}
10149		i++;
10150		cur += bytes;
10151		page_offset = 0;
10152	}
10153	ret = count;
10154out:
10155	for (i = 0; i < nr_pages; i++) {
10156		if (pages[i])
10157			__free_page(pages[i]);
10158	}
10159	kfree(pages);
10160	return ret;
10161}
10162
10163ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10164			   struct btrfs_ioctl_encoded_io_args *encoded)
10165{
10166	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10167	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10168	struct extent_io_tree *io_tree = &inode->io_tree;
10169	ssize_t ret;
10170	size_t count = iov_iter_count(iter);
10171	u64 start, lockend, disk_bytenr, disk_io_size;
10172	struct extent_state *cached_state = NULL;
10173	struct extent_map *em;
10174	bool unlocked = false;
10175
10176	file_accessed(iocb->ki_filp);
10177
10178	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
10179
10180	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10181		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10182		return 0;
10183	}
10184	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10185	/*
10186	 * We don't know how long the extent containing iocb->ki_pos is, but if
10187	 * it's compressed we know that it won't be longer than this.
10188	 */
10189	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10190
10191	for (;;) {
10192		struct btrfs_ordered_extent *ordered;
10193
10194		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10195					       lockend - start + 1);
10196		if (ret)
10197			goto out_unlock_inode;
10198		lock_extent(io_tree, start, lockend, &cached_state);
10199		ordered = btrfs_lookup_ordered_range(inode, start,
10200						     lockend - start + 1);
10201		if (!ordered)
10202			break;
10203		btrfs_put_ordered_extent(ordered);
10204		unlock_extent(io_tree, start, lockend, &cached_state);
10205		cond_resched();
10206	}
10207
10208	em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
10209	if (IS_ERR(em)) {
10210		ret = PTR_ERR(em);
10211		goto out_unlock_extent;
10212	}
10213
10214	if (em->block_start == EXTENT_MAP_INLINE) {
10215		u64 extent_start = em->start;
10216
10217		/*
10218		 * For inline extents we get everything we need out of the
10219		 * extent item.
10220		 */
10221		free_extent_map(em);
10222		em = NULL;
10223		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10224						&cached_state, extent_start,
10225						count, encoded, &unlocked);
10226		goto out;
10227	}
10228
10229	/*
10230	 * We only want to return up to EOF even if the extent extends beyond
10231	 * that.
10232	 */
10233	encoded->len = min_t(u64, extent_map_end(em),
10234			     inode->vfs_inode.i_size) - iocb->ki_pos;
10235	if (em->block_start == EXTENT_MAP_HOLE ||
10236	    (em->flags & EXTENT_FLAG_PREALLOC)) {
10237		disk_bytenr = EXTENT_MAP_HOLE;
10238		count = min_t(u64, count, encoded->len);
10239		encoded->len = count;
10240		encoded->unencoded_len = count;
10241	} else if (extent_map_is_compressed(em)) {
10242		disk_bytenr = em->block_start;
10243		/*
10244		 * Bail if the buffer isn't large enough to return the whole
10245		 * compressed extent.
10246		 */
10247		if (em->block_len > count) {
10248			ret = -ENOBUFS;
10249			goto out_em;
10250		}
10251		disk_io_size = em->block_len;
10252		count = em->block_len;
10253		encoded->unencoded_len = em->ram_bytes;
10254		encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10255		ret = btrfs_encoded_io_compression_from_extent(fs_info,
10256							       extent_map_compression(em));
10257		if (ret < 0)
10258			goto out_em;
10259		encoded->compression = ret;
10260	} else {
10261		disk_bytenr = em->block_start + (start - em->start);
10262		if (encoded->len > count)
10263			encoded->len = count;
10264		/*
10265		 * Don't read beyond what we locked. This also limits the page
10266		 * allocations that we'll do.
10267		 */
10268		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10269		count = start + disk_io_size - iocb->ki_pos;
10270		encoded->len = count;
10271		encoded->unencoded_len = count;
10272		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10273	}
10274	free_extent_map(em);
10275	em = NULL;
10276
10277	if (disk_bytenr == EXTENT_MAP_HOLE) {
10278		unlock_extent(io_tree, start, lockend, &cached_state);
10279		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10280		unlocked = true;
10281		ret = iov_iter_zero(count, iter);
10282		if (ret != count)
10283			ret = -EFAULT;
10284	} else {
10285		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10286						 &cached_state, disk_bytenr,
10287						 disk_io_size, count,
10288						 encoded->compression,
10289						 &unlocked);
10290	}
10291
10292out:
10293	if (ret >= 0)
10294		iocb->ki_pos += encoded->len;
10295out_em:
10296	free_extent_map(em);
10297out_unlock_extent:
10298	if (!unlocked)
10299		unlock_extent(io_tree, start, lockend, &cached_state);
10300out_unlock_inode:
10301	if (!unlocked)
10302		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10303	return ret;
10304}
10305
10306ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10307			       const struct btrfs_ioctl_encoded_io_args *encoded)
10308{
10309	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10310	struct btrfs_root *root = inode->root;
10311	struct btrfs_fs_info *fs_info = root->fs_info;
10312	struct extent_io_tree *io_tree = &inode->io_tree;
10313	struct extent_changeset *data_reserved = NULL;
10314	struct extent_state *cached_state = NULL;
10315	struct btrfs_ordered_extent *ordered;
10316	int compression;
10317	size_t orig_count;
10318	u64 start, end;
10319	u64 num_bytes, ram_bytes, disk_num_bytes;
10320	unsigned long nr_pages, i;
10321	struct page **pages;
10322	struct btrfs_key ins;
10323	bool extent_reserved = false;
10324	struct extent_map *em;
10325	ssize_t ret;
10326
10327	switch (encoded->compression) {
10328	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10329		compression = BTRFS_COMPRESS_ZLIB;
10330		break;
10331	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10332		compression = BTRFS_COMPRESS_ZSTD;
10333		break;
10334	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10335	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10336	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10337	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10338	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10339		/* The sector size must match for LZO. */
10340		if (encoded->compression -
10341		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10342		    fs_info->sectorsize_bits)
10343			return -EINVAL;
10344		compression = BTRFS_COMPRESS_LZO;
10345		break;
10346	default:
10347		return -EINVAL;
10348	}
10349	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10350		return -EINVAL;
10351
10352	/*
10353	 * Compressed extents should always have checksums, so error out if we
10354	 * have a NOCOW file or inode was created while mounted with NODATASUM.
10355	 */
10356	if (inode->flags & BTRFS_INODE_NODATASUM)
10357		return -EINVAL;
10358
10359	orig_count = iov_iter_count(from);
10360
10361	/* The extent size must be sane. */
10362	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10363	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10364		return -EINVAL;
10365
10366	/*
10367	 * The compressed data must be smaller than the decompressed data.
10368	 *
10369	 * It's of course possible for data to compress to larger or the same
10370	 * size, but the buffered I/O path falls back to no compression for such
10371	 * data, and we don't want to break any assumptions by creating these
10372	 * extents.
10373	 *
10374	 * Note that this is less strict than the current check we have that the
10375	 * compressed data must be at least one sector smaller than the
10376	 * decompressed data. We only want to enforce the weaker requirement
10377	 * from old kernels that it is at least one byte smaller.
10378	 */
10379	if (orig_count >= encoded->unencoded_len)
10380		return -EINVAL;
10381
10382	/* The extent must start on a sector boundary. */
10383	start = iocb->ki_pos;
10384	if (!IS_ALIGNED(start, fs_info->sectorsize))
10385		return -EINVAL;
10386
10387	/*
10388	 * The extent must end on a sector boundary. However, we allow a write
10389	 * which ends at or extends i_size to have an unaligned length; we round
10390	 * up the extent size and set i_size to the unaligned end.
10391	 */
10392	if (start + encoded->len < inode->vfs_inode.i_size &&
10393	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10394		return -EINVAL;
10395
10396	/* Finally, the offset in the unencoded data must be sector-aligned. */
10397	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10398		return -EINVAL;
10399
10400	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10401	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10402	end = start + num_bytes - 1;
10403
10404	/*
10405	 * If the extent cannot be inline, the compressed data on disk must be
10406	 * sector-aligned. For convenience, we extend it with zeroes if it
10407	 * isn't.
10408	 */
10409	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10410	nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10411	pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10412	if (!pages)
10413		return -ENOMEM;
10414	for (i = 0; i < nr_pages; i++) {
10415		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10416		char *kaddr;
10417
10418		pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10419		if (!pages[i]) {
10420			ret = -ENOMEM;
10421			goto out_pages;
10422		}
10423		kaddr = kmap_local_page(pages[i]);
10424		if (copy_from_iter(kaddr, bytes, from) != bytes) {
10425			kunmap_local(kaddr);
10426			ret = -EFAULT;
10427			goto out_pages;
10428		}
10429		if (bytes < PAGE_SIZE)
10430			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10431		kunmap_local(kaddr);
10432	}
10433
10434	for (;;) {
10435		struct btrfs_ordered_extent *ordered;
10436
10437		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10438		if (ret)
10439			goto out_pages;
10440		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10441						    start >> PAGE_SHIFT,
10442						    end >> PAGE_SHIFT);
10443		if (ret)
10444			goto out_pages;
10445		lock_extent(io_tree, start, end, &cached_state);
10446		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10447		if (!ordered &&
10448		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10449			break;
10450		if (ordered)
10451			btrfs_put_ordered_extent(ordered);
10452		unlock_extent(io_tree, start, end, &cached_state);
10453		cond_resched();
10454	}
10455
10456	/*
10457	 * We don't use the higher-level delalloc space functions because our
10458	 * num_bytes and disk_num_bytes are different.
10459	 */
10460	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10461	if (ret)
10462		goto out_unlock;
10463	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10464	if (ret)
10465		goto out_free_data_space;
10466	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10467					      false);
10468	if (ret)
10469		goto out_qgroup_free_data;
10470
10471	/* Try an inline extent first. */
10472	if (start == 0 && encoded->unencoded_len == encoded->len &&
10473	    encoded->unencoded_offset == 0) {
10474		ret = cow_file_range_inline(inode, encoded->len, orig_count,
10475					    compression, pages, true);
10476		if (ret <= 0) {
10477			if (ret == 0)
10478				ret = orig_count;
10479			goto out_delalloc_release;
10480		}
10481	}
10482
10483	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10484				   disk_num_bytes, 0, 0, &ins, 1, 1);
10485	if (ret)
10486		goto out_delalloc_release;
10487	extent_reserved = true;
10488
10489	em = create_io_em(inode, start, num_bytes,
10490			  start - encoded->unencoded_offset, ins.objectid,
10491			  ins.offset, ins.offset, ram_bytes, compression,
10492			  BTRFS_ORDERED_COMPRESSED);
10493	if (IS_ERR(em)) {
10494		ret = PTR_ERR(em);
10495		goto out_free_reserved;
10496	}
10497	free_extent_map(em);
10498
10499	ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes,
10500				       ins.objectid, ins.offset,
10501				       encoded->unencoded_offset,
10502				       (1 << BTRFS_ORDERED_ENCODED) |
10503				       (1 << BTRFS_ORDERED_COMPRESSED),
10504				       compression);
10505	if (IS_ERR(ordered)) {
10506		btrfs_drop_extent_map_range(inode, start, end, false);
10507		ret = PTR_ERR(ordered);
10508		goto out_free_reserved;
10509	}
10510	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10511
10512	if (start + encoded->len > inode->vfs_inode.i_size)
10513		i_size_write(&inode->vfs_inode, start + encoded->len);
10514
10515	unlock_extent(io_tree, start, end, &cached_state);
10516
10517	btrfs_delalloc_release_extents(inode, num_bytes);
10518
10519	btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false);
10520	ret = orig_count;
10521	goto out;
10522
10523out_free_reserved:
10524	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10525	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
10526out_delalloc_release:
10527	btrfs_delalloc_release_extents(inode, num_bytes);
10528	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10529out_qgroup_free_data:
10530	if (ret < 0)
10531		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10532out_free_data_space:
10533	/*
10534	 * If btrfs_reserve_extent() succeeded, then we already decremented
10535	 * bytes_may_use.
10536	 */
10537	if (!extent_reserved)
10538		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
10539out_unlock:
10540	unlock_extent(io_tree, start, end, &cached_state);
10541out_pages:
10542	for (i = 0; i < nr_pages; i++) {
10543		if (pages[i])
10544			__free_page(pages[i]);
10545	}
10546	kvfree(pages);
10547out:
10548	if (ret >= 0)
10549		iocb->ki_pos += encoded->len;
10550	return ret;
10551}
10552
10553#ifdef CONFIG_SWAP
10554/*
10555 * Add an entry indicating a block group or device which is pinned by a
10556 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10557 * negative errno on failure.
10558 */
10559static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10560				  bool is_block_group)
10561{
10562	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10563	struct btrfs_swapfile_pin *sp, *entry;
10564	struct rb_node **p;
10565	struct rb_node *parent = NULL;
10566
10567	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10568	if (!sp)
10569		return -ENOMEM;
10570	sp->ptr = ptr;
10571	sp->inode = inode;
10572	sp->is_block_group = is_block_group;
10573	sp->bg_extent_count = 1;
10574
10575	spin_lock(&fs_info->swapfile_pins_lock);
10576	p = &fs_info->swapfile_pins.rb_node;
10577	while (*p) {
10578		parent = *p;
10579		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10580		if (sp->ptr < entry->ptr ||
10581		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10582			p = &(*p)->rb_left;
10583		} else if (sp->ptr > entry->ptr ||
10584			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10585			p = &(*p)->rb_right;
10586		} else {
10587			if (is_block_group)
10588				entry->bg_extent_count++;
10589			spin_unlock(&fs_info->swapfile_pins_lock);
10590			kfree(sp);
10591			return 1;
10592		}
10593	}
10594	rb_link_node(&sp->node, parent, p);
10595	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10596	spin_unlock(&fs_info->swapfile_pins_lock);
10597	return 0;
10598}
10599
10600/* Free all of the entries pinned by this swapfile. */
10601static void btrfs_free_swapfile_pins(struct inode *inode)
10602{
10603	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10604	struct btrfs_swapfile_pin *sp;
10605	struct rb_node *node, *next;
10606
10607	spin_lock(&fs_info->swapfile_pins_lock);
10608	node = rb_first(&fs_info->swapfile_pins);
10609	while (node) {
10610		next = rb_next(node);
10611		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10612		if (sp->inode == inode) {
10613			rb_erase(&sp->node, &fs_info->swapfile_pins);
10614			if (sp->is_block_group) {
10615				btrfs_dec_block_group_swap_extents(sp->ptr,
10616							   sp->bg_extent_count);
10617				btrfs_put_block_group(sp->ptr);
10618			}
10619			kfree(sp);
10620		}
10621		node = next;
10622	}
10623	spin_unlock(&fs_info->swapfile_pins_lock);
10624}
10625
10626struct btrfs_swap_info {
10627	u64 start;
10628	u64 block_start;
10629	u64 block_len;
10630	u64 lowest_ppage;
10631	u64 highest_ppage;
10632	unsigned long nr_pages;
10633	int nr_extents;
10634};
10635
10636static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10637				 struct btrfs_swap_info *bsi)
10638{
10639	unsigned long nr_pages;
10640	unsigned long max_pages;
10641	u64 first_ppage, first_ppage_reported, next_ppage;
10642	int ret;
10643
10644	/*
10645	 * Our swapfile may have had its size extended after the swap header was
10646	 * written. In that case activating the swapfile should not go beyond
10647	 * the max size set in the swap header.
10648	 */
10649	if (bsi->nr_pages >= sis->max)
10650		return 0;
10651
10652	max_pages = sis->max - bsi->nr_pages;
10653	first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10654	next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10655
10656	if (first_ppage >= next_ppage)
10657		return 0;
10658	nr_pages = next_ppage - first_ppage;
10659	nr_pages = min(nr_pages, max_pages);
10660
10661	first_ppage_reported = first_ppage;
10662	if (bsi->start == 0)
10663		first_ppage_reported++;
10664	if (bsi->lowest_ppage > first_ppage_reported)
10665		bsi->lowest_ppage = first_ppage_reported;
10666	if (bsi->highest_ppage < (next_ppage - 1))
10667		bsi->highest_ppage = next_ppage - 1;
10668
10669	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10670	if (ret < 0)
10671		return ret;
10672	bsi->nr_extents += ret;
10673	bsi->nr_pages += nr_pages;
10674	return 0;
10675}
10676
10677static void btrfs_swap_deactivate(struct file *file)
10678{
10679	struct inode *inode = file_inode(file);
10680
10681	btrfs_free_swapfile_pins(inode);
10682	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10683}
10684
10685static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10686			       sector_t *span)
10687{
10688	struct inode *inode = file_inode(file);
10689	struct btrfs_root *root = BTRFS_I(inode)->root;
10690	struct btrfs_fs_info *fs_info = root->fs_info;
10691	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10692	struct extent_state *cached_state = NULL;
10693	struct extent_map *em = NULL;
10694	struct btrfs_chunk_map *map = NULL;
10695	struct btrfs_device *device = NULL;
10696	struct btrfs_swap_info bsi = {
10697		.lowest_ppage = (sector_t)-1ULL,
10698	};
10699	int ret = 0;
10700	u64 isize;
10701	u64 start;
10702
10703	/*
10704	 * If the swap file was just created, make sure delalloc is done. If the
10705	 * file changes again after this, the user is doing something stupid and
10706	 * we don't really care.
10707	 */
10708	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10709	if (ret)
10710		return ret;
10711
10712	/*
10713	 * The inode is locked, so these flags won't change after we check them.
10714	 */
10715	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10716		btrfs_warn(fs_info, "swapfile must not be compressed");
10717		return -EINVAL;
10718	}
10719	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10720		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10721		return -EINVAL;
10722	}
10723	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10724		btrfs_warn(fs_info, "swapfile must not be checksummed");
10725		return -EINVAL;
10726	}
10727
10728	/*
10729	 * Balance or device remove/replace/resize can move stuff around from
10730	 * under us. The exclop protection makes sure they aren't running/won't
10731	 * run concurrently while we are mapping the swap extents, and
10732	 * fs_info->swapfile_pins prevents them from running while the swap
10733	 * file is active and moving the extents. Note that this also prevents
10734	 * a concurrent device add which isn't actually necessary, but it's not
10735	 * really worth the trouble to allow it.
10736	 */
10737	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10738		btrfs_warn(fs_info,
10739	   "cannot activate swapfile while exclusive operation is running");
10740		return -EBUSY;
10741	}
10742
10743	/*
10744	 * Prevent snapshot creation while we are activating the swap file.
10745	 * We do not want to race with snapshot creation. If snapshot creation
10746	 * already started before we bumped nr_swapfiles from 0 to 1 and
10747	 * completes before the first write into the swap file after it is
10748	 * activated, than that write would fallback to COW.
10749	 */
10750	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10751		btrfs_exclop_finish(fs_info);
10752		btrfs_warn(fs_info,
10753	   "cannot activate swapfile because snapshot creation is in progress");
10754		return -EINVAL;
10755	}
10756	/*
10757	 * Snapshots can create extents which require COW even if NODATACOW is
10758	 * set. We use this counter to prevent snapshots. We must increment it
10759	 * before walking the extents because we don't want a concurrent
10760	 * snapshot to run after we've already checked the extents.
10761	 *
10762	 * It is possible that subvolume is marked for deletion but still not
10763	 * removed yet. To prevent this race, we check the root status before
10764	 * activating the swapfile.
10765	 */
10766	spin_lock(&root->root_item_lock);
10767	if (btrfs_root_dead(root)) {
10768		spin_unlock(&root->root_item_lock);
10769
10770		btrfs_exclop_finish(fs_info);
10771		btrfs_warn(fs_info,
10772		"cannot activate swapfile because subvolume %llu is being deleted",
10773			root->root_key.objectid);
10774		return -EPERM;
10775	}
10776	atomic_inc(&root->nr_swapfiles);
10777	spin_unlock(&root->root_item_lock);
10778
10779	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10780
10781	lock_extent(io_tree, 0, isize - 1, &cached_state);
10782	start = 0;
10783	while (start < isize) {
10784		u64 logical_block_start, physical_block_start;
10785		struct btrfs_block_group *bg;
10786		u64 len = isize - start;
10787
10788		em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
10789		if (IS_ERR(em)) {
10790			ret = PTR_ERR(em);
10791			goto out;
10792		}
10793
10794		if (em->block_start == EXTENT_MAP_HOLE) {
10795			btrfs_warn(fs_info, "swapfile must not have holes");
10796			ret = -EINVAL;
10797			goto out;
10798		}
10799		if (em->block_start == EXTENT_MAP_INLINE) {
10800			/*
10801			 * It's unlikely we'll ever actually find ourselves
10802			 * here, as a file small enough to fit inline won't be
10803			 * big enough to store more than the swap header, but in
10804			 * case something changes in the future, let's catch it
10805			 * here rather than later.
10806			 */
10807			btrfs_warn(fs_info, "swapfile must not be inline");
10808			ret = -EINVAL;
10809			goto out;
10810		}
10811		if (extent_map_is_compressed(em)) {
10812			btrfs_warn(fs_info, "swapfile must not be compressed");
10813			ret = -EINVAL;
10814			goto out;
10815		}
10816
10817		logical_block_start = em->block_start + (start - em->start);
10818		len = min(len, em->len - (start - em->start));
10819		free_extent_map(em);
10820		em = NULL;
10821
10822		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true);
10823		if (ret < 0) {
10824			goto out;
10825		} else if (ret) {
10826			ret = 0;
10827		} else {
10828			btrfs_warn(fs_info,
10829				   "swapfile must not be copy-on-write");
10830			ret = -EINVAL;
10831			goto out;
10832		}
10833
10834		map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10835		if (IS_ERR(map)) {
10836			ret = PTR_ERR(map);
10837			goto out;
10838		}
10839
10840		if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10841			btrfs_warn(fs_info,
10842				   "swapfile must have single data profile");
10843			ret = -EINVAL;
10844			goto out;
10845		}
10846
10847		if (device == NULL) {
10848			device = map->stripes[0].dev;
10849			ret = btrfs_add_swapfile_pin(inode, device, false);
10850			if (ret == 1)
10851				ret = 0;
10852			else if (ret)
10853				goto out;
10854		} else if (device != map->stripes[0].dev) {
10855			btrfs_warn(fs_info, "swapfile must be on one device");
10856			ret = -EINVAL;
10857			goto out;
10858		}
10859
10860		physical_block_start = (map->stripes[0].physical +
10861					(logical_block_start - map->start));
10862		len = min(len, map->chunk_len - (logical_block_start - map->start));
10863		btrfs_free_chunk_map(map);
10864		map = NULL;
10865
10866		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10867		if (!bg) {
10868			btrfs_warn(fs_info,
10869			   "could not find block group containing swapfile");
10870			ret = -EINVAL;
10871			goto out;
10872		}
10873
10874		if (!btrfs_inc_block_group_swap_extents(bg)) {
10875			btrfs_warn(fs_info,
10876			   "block group for swapfile at %llu is read-only%s",
10877			   bg->start,
10878			   atomic_read(&fs_info->scrubs_running) ?
10879				       " (scrub running)" : "");
10880			btrfs_put_block_group(bg);
10881			ret = -EINVAL;
10882			goto out;
10883		}
10884
10885		ret = btrfs_add_swapfile_pin(inode, bg, true);
10886		if (ret) {
10887			btrfs_put_block_group(bg);
10888			if (ret == 1)
10889				ret = 0;
10890			else
10891				goto out;
10892		}
10893
10894		if (bsi.block_len &&
10895		    bsi.block_start + bsi.block_len == physical_block_start) {
10896			bsi.block_len += len;
10897		} else {
10898			if (bsi.block_len) {
10899				ret = btrfs_add_swap_extent(sis, &bsi);
10900				if (ret)
10901					goto out;
10902			}
10903			bsi.start = start;
10904			bsi.block_start = physical_block_start;
10905			bsi.block_len = len;
10906		}
10907
10908		start += len;
10909	}
10910
10911	if (bsi.block_len)
10912		ret = btrfs_add_swap_extent(sis, &bsi);
10913
10914out:
10915	if (!IS_ERR_OR_NULL(em))
10916		free_extent_map(em);
10917	if (!IS_ERR_OR_NULL(map))
10918		btrfs_free_chunk_map(map);
10919
10920	unlock_extent(io_tree, 0, isize - 1, &cached_state);
10921
10922	if (ret)
10923		btrfs_swap_deactivate(file);
10924
10925	btrfs_drew_write_unlock(&root->snapshot_lock);
10926
10927	btrfs_exclop_finish(fs_info);
10928
10929	if (ret)
10930		return ret;
10931
10932	if (device)
10933		sis->bdev = device->bdev;
10934	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10935	sis->max = bsi.nr_pages;
10936	sis->pages = bsi.nr_pages - 1;
10937	sis->highest_bit = bsi.nr_pages - 1;
10938	return bsi.nr_extents;
10939}
10940#else
10941static void btrfs_swap_deactivate(struct file *file)
10942{
10943}
10944
10945static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10946			       sector_t *span)
10947{
10948	return -EOPNOTSUPP;
10949}
10950#endif
10951
10952/*
10953 * Update the number of bytes used in the VFS' inode. When we replace extents in
10954 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10955 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10956 * always get a correct value.
10957 */
10958void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10959			      const u64 add_bytes,
10960			      const u64 del_bytes)
10961{
10962	if (add_bytes == del_bytes)
10963		return;
10964
10965	spin_lock(&inode->lock);
10966	if (del_bytes > 0)
10967		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10968	if (add_bytes > 0)
10969		inode_add_bytes(&inode->vfs_inode, add_bytes);
10970	spin_unlock(&inode->lock);
10971}
10972
10973/*
10974 * Verify that there are no ordered extents for a given file range.
10975 *
10976 * @inode:   The target inode.
10977 * @start:   Start offset of the file range, should be sector size aligned.
10978 * @end:     End offset (inclusive) of the file range, its value +1 should be
10979 *           sector size aligned.
10980 *
10981 * This should typically be used for cases where we locked an inode's VFS lock in
10982 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10983 * we have flushed all delalloc in the range, we have waited for all ordered
10984 * extents in the range to complete and finally we have locked the file range in
10985 * the inode's io_tree.
10986 */
10987void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10988{
10989	struct btrfs_root *root = inode->root;
10990	struct btrfs_ordered_extent *ordered;
10991
10992	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10993		return;
10994
10995	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10996	if (ordered) {
10997		btrfs_err(root->fs_info,
10998"found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10999			  start, end, btrfs_ino(inode), root->root_key.objectid,
11000			  ordered->file_offset,
11001			  ordered->file_offset + ordered->num_bytes - 1);
11002		btrfs_put_ordered_extent(ordered);
11003	}
11004
11005	ASSERT(ordered == NULL);
11006}
11007
11008static const struct inode_operations btrfs_dir_inode_operations = {
11009	.getattr	= btrfs_getattr,
11010	.lookup		= btrfs_lookup,
11011	.create		= btrfs_create,
11012	.unlink		= btrfs_unlink,
11013	.link		= btrfs_link,
11014	.mkdir		= btrfs_mkdir,
11015	.rmdir		= btrfs_rmdir,
11016	.rename		= btrfs_rename2,
11017	.symlink	= btrfs_symlink,
11018	.setattr	= btrfs_setattr,
11019	.mknod		= btrfs_mknod,
11020	.listxattr	= btrfs_listxattr,
11021	.permission	= btrfs_permission,
11022	.get_inode_acl	= btrfs_get_acl,
11023	.set_acl	= btrfs_set_acl,
11024	.update_time	= btrfs_update_time,
11025	.tmpfile        = btrfs_tmpfile,
11026	.fileattr_get	= btrfs_fileattr_get,
11027	.fileattr_set	= btrfs_fileattr_set,
11028};
11029
11030static const struct file_operations btrfs_dir_file_operations = {
11031	.llseek		= btrfs_dir_llseek,
11032	.read		= generic_read_dir,
11033	.iterate_shared	= btrfs_real_readdir,
11034	.open		= btrfs_opendir,
11035	.unlocked_ioctl	= btrfs_ioctl,
11036#ifdef CONFIG_COMPAT
11037	.compat_ioctl	= btrfs_compat_ioctl,
11038#endif
11039	.release        = btrfs_release_file,
11040	.fsync		= btrfs_sync_file,
11041};
11042
11043/*
11044 * btrfs doesn't support the bmap operation because swapfiles
11045 * use bmap to make a mapping of extents in the file.  They assume
11046 * these extents won't change over the life of the file and they
11047 * use the bmap result to do IO directly to the drive.
11048 *
11049 * the btrfs bmap call would return logical addresses that aren't
11050 * suitable for IO and they also will change frequently as COW
11051 * operations happen.  So, swapfile + btrfs == corruption.
11052 *
11053 * For now we're avoiding this by dropping bmap.
11054 */
11055static const struct address_space_operations btrfs_aops = {
11056	.read_folio	= btrfs_read_folio,
11057	.writepages	= btrfs_writepages,
11058	.readahead	= btrfs_readahead,
11059	.invalidate_folio = btrfs_invalidate_folio,
11060	.release_folio	= btrfs_release_folio,
11061	.migrate_folio	= btrfs_migrate_folio,
11062	.dirty_folio	= filemap_dirty_folio,
11063	.error_remove_folio = generic_error_remove_folio,
11064	.swap_activate	= btrfs_swap_activate,
11065	.swap_deactivate = btrfs_swap_deactivate,
11066};
11067
11068static const struct inode_operations btrfs_file_inode_operations = {
11069	.getattr	= btrfs_getattr,
11070	.setattr	= btrfs_setattr,
11071	.listxattr      = btrfs_listxattr,
11072	.permission	= btrfs_permission,
11073	.fiemap		= btrfs_fiemap,
11074	.get_inode_acl	= btrfs_get_acl,
11075	.set_acl	= btrfs_set_acl,
11076	.update_time	= btrfs_update_time,
11077	.fileattr_get	= btrfs_fileattr_get,
11078	.fileattr_set	= btrfs_fileattr_set,
11079};
11080static const struct inode_operations btrfs_special_inode_operations = {
11081	.getattr	= btrfs_getattr,
11082	.setattr	= btrfs_setattr,
11083	.permission	= btrfs_permission,
11084	.listxattr	= btrfs_listxattr,
11085	.get_inode_acl	= btrfs_get_acl,
11086	.set_acl	= btrfs_set_acl,
11087	.update_time	= btrfs_update_time,
11088};
11089static const struct inode_operations btrfs_symlink_inode_operations = {
11090	.get_link	= page_get_link,
11091	.getattr	= btrfs_getattr,
11092	.setattr	= btrfs_setattr,
11093	.permission	= btrfs_permission,
11094	.listxattr	= btrfs_listxattr,
11095	.update_time	= btrfs_update_time,
11096};
11097
11098const struct dentry_operations btrfs_dentry_operations = {
11099	.d_delete	= btrfs_dentry_delete,
11100};
11101