1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle.  All rights reserved.
4 */
5
6#include <crypto/hash.h>
7#include <linux/kernel.h>
8#include <linux/bio.h>
9#include <linux/blk-cgroup.h>
10#include <linux/file.h>
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14#include <linux/time.h>
15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/backing-dev.h>
18#include <linux/writeback.h>
19#include <linux/compat.h>
20#include <linux/xattr.h>
21#include <linux/posix_acl.h>
22#include <linux/falloc.h>
23#include <linux/slab.h>
24#include <linux/ratelimit.h>
25#include <linux/btrfs.h>
26#include <linux/blkdev.h>
27#include <linux/posix_acl_xattr.h>
28#include <linux/uio.h>
29#include <linux/magic.h>
30#include <linux/iversion.h>
31#include <linux/swap.h>
32#include <linux/migrate.h>
33#include <linux/sched/mm.h>
34#include <linux/iomap.h>
35#include <asm/unaligned.h>
36#include <linux/fsverity.h>
37#include "misc.h"
38#include "ctree.h"
39#include "disk-io.h"
40#include "transaction.h"
41#include "btrfs_inode.h"
42#include "ordered-data.h"
43#include "xattr.h"
44#include "tree-log.h"
45#include "bio.h"
46#include "compression.h"
47#include "locking.h"
48#include "props.h"
49#include "qgroup.h"
50#include "delalloc-space.h"
51#include "block-group.h"
52#include "space-info.h"
53#include "zoned.h"
54#include "subpage.h"
55#include "inode-item.h"
56#include "fs.h"
57#include "accessors.h"
58#include "extent-tree.h"
59#include "root-tree.h"
60#include "defrag.h"
61#include "dir-item.h"
62#include "file-item.h"
63#include "uuid-tree.h"
64#include "ioctl.h"
65#include "file.h"
66#include "acl.h"
67#include "relocation.h"
68#include "verity.h"
69#include "super.h"
70#include "orphan.h"
71#include "backref.h"
72#include "raid-stripe-tree.h"
73
74struct btrfs_iget_args {
75	u64 ino;
76	struct btrfs_root *root;
77};
78
79struct btrfs_dio_data {
80	ssize_t submitted;
81	struct extent_changeset *data_reserved;
82	struct btrfs_ordered_extent *ordered;
83	bool data_space_reserved;
84	bool nocow_done;
85};
86
87struct btrfs_dio_private {
88	/* Range of I/O */
89	u64 file_offset;
90	u32 bytes;
91
92	/* This must be last */
93	struct btrfs_bio bbio;
94};
95
96static struct bio_set btrfs_dio_bioset;
97
98struct btrfs_rename_ctx {
99	/* Output field. Stores the index number of the old directory entry. */
100	u64 index;
101};
102
103/*
104 * Used by data_reloc_print_warning_inode() to pass needed info for filename
105 * resolution and output of error message.
106 */
107struct data_reloc_warn {
108	struct btrfs_path path;
109	struct btrfs_fs_info *fs_info;
110	u64 extent_item_size;
111	u64 logical;
112	int mirror_num;
113};
114
115/*
116 * For the file_extent_tree, we want to hold the inode lock when we lookup and
117 * update the disk_i_size, but lockdep will complain because our io_tree we hold
118 * the tree lock and get the inode lock when setting delalloc. These two things
119 * are unrelated, so make a class for the file_extent_tree so we don't get the
120 * two locking patterns mixed up.
121 */
122static struct lock_class_key file_extent_tree_class;
123
124static const struct inode_operations btrfs_dir_inode_operations;
125static const struct inode_operations btrfs_symlink_inode_operations;
126static const struct inode_operations btrfs_special_inode_operations;
127static const struct inode_operations btrfs_file_inode_operations;
128static const struct address_space_operations btrfs_aops;
129static const struct file_operations btrfs_dir_file_operations;
130
131static struct kmem_cache *btrfs_inode_cachep;
132
133static int btrfs_setsize(struct inode *inode, struct iattr *attr);
134static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
135
136static noinline int run_delalloc_cow(struct btrfs_inode *inode,
137				     struct page *locked_page, u64 start,
138				     u64 end, struct writeback_control *wbc,
139				     bool pages_dirty);
140static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
141				       u64 len, u64 orig_start, u64 block_start,
142				       u64 block_len, u64 orig_block_len,
143				       u64 ram_bytes, int compress_type,
144				       int type);
145
146static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
147					  u64 root, void *warn_ctx)
148{
149	struct data_reloc_warn *warn = warn_ctx;
150	struct btrfs_fs_info *fs_info = warn->fs_info;
151	struct extent_buffer *eb;
152	struct btrfs_inode_item *inode_item;
153	struct inode_fs_paths *ipath = NULL;
154	struct btrfs_root *local_root;
155	struct btrfs_key key;
156	unsigned int nofs_flag;
157	u32 nlink;
158	int ret;
159
160	local_root = btrfs_get_fs_root(fs_info, root, true);
161	if (IS_ERR(local_root)) {
162		ret = PTR_ERR(local_root);
163		goto err;
164	}
165
166	/* This makes the path point to (inum INODE_ITEM ioff). */
167	key.objectid = inum;
168	key.type = BTRFS_INODE_ITEM_KEY;
169	key.offset = 0;
170
171	ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
172	if (ret) {
173		btrfs_put_root(local_root);
174		btrfs_release_path(&warn->path);
175		goto err;
176	}
177
178	eb = warn->path.nodes[0];
179	inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
180	nlink = btrfs_inode_nlink(eb, inode_item);
181	btrfs_release_path(&warn->path);
182
183	nofs_flag = memalloc_nofs_save();
184	ipath = init_ipath(4096, local_root, &warn->path);
185	memalloc_nofs_restore(nofs_flag);
186	if (IS_ERR(ipath)) {
187		btrfs_put_root(local_root);
188		ret = PTR_ERR(ipath);
189		ipath = NULL;
190		/*
191		 * -ENOMEM, not a critical error, just output an generic error
192		 * without filename.
193		 */
194		btrfs_warn(fs_info,
195"checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
196			   warn->logical, warn->mirror_num, root, inum, offset);
197		return ret;
198	}
199	ret = paths_from_inode(inum, ipath);
200	if (ret < 0)
201		goto err;
202
203	/*
204	 * We deliberately ignore the bit ipath might have been too small to
205	 * hold all of the paths here
206	 */
207	for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
208		btrfs_warn(fs_info,
209"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
210			   warn->logical, warn->mirror_num, root, inum, offset,
211			   fs_info->sectorsize, nlink,
212			   (char *)(unsigned long)ipath->fspath->val[i]);
213	}
214
215	btrfs_put_root(local_root);
216	free_ipath(ipath);
217	return 0;
218
219err:
220	btrfs_warn(fs_info,
221"checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
222		   warn->logical, warn->mirror_num, root, inum, offset, ret);
223
224	free_ipath(ipath);
225	return ret;
226}
227
228/*
229 * Do extra user-friendly error output (e.g. lookup all the affected files).
230 *
231 * Return true if we succeeded doing the backref lookup.
232 * Return false if such lookup failed, and has to fallback to the old error message.
233 */
234static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
235				   const u8 *csum, const u8 *csum_expected,
236				   int mirror_num)
237{
238	struct btrfs_fs_info *fs_info = inode->root->fs_info;
239	struct btrfs_path path = { 0 };
240	struct btrfs_key found_key = { 0 };
241	struct extent_buffer *eb;
242	struct btrfs_extent_item *ei;
243	const u32 csum_size = fs_info->csum_size;
244	u64 logical;
245	u64 flags;
246	u32 item_size;
247	int ret;
248
249	mutex_lock(&fs_info->reloc_mutex);
250	logical = btrfs_get_reloc_bg_bytenr(fs_info);
251	mutex_unlock(&fs_info->reloc_mutex);
252
253	if (logical == U64_MAX) {
254		btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
255		btrfs_warn_rl(fs_info,
256"csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
257			inode->root->root_key.objectid, btrfs_ino(inode), file_off,
258			CSUM_FMT_VALUE(csum_size, csum),
259			CSUM_FMT_VALUE(csum_size, csum_expected),
260			mirror_num);
261		return;
262	}
263
264	logical += file_off;
265	btrfs_warn_rl(fs_info,
266"csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
267			inode->root->root_key.objectid,
268			btrfs_ino(inode), file_off, logical,
269			CSUM_FMT_VALUE(csum_size, csum),
270			CSUM_FMT_VALUE(csum_size, csum_expected),
271			mirror_num);
272
273	ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
274	if (ret < 0) {
275		btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
276			     logical, ret);
277		return;
278	}
279	eb = path.nodes[0];
280	ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
281	item_size = btrfs_item_size(eb, path.slots[0]);
282	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
283		unsigned long ptr = 0;
284		u64 ref_root;
285		u8 ref_level;
286
287		while (true) {
288			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
289						      item_size, &ref_root,
290						      &ref_level);
291			if (ret < 0) {
292				btrfs_warn_rl(fs_info,
293				"failed to resolve tree backref for logical %llu: %d",
294					      logical, ret);
295				break;
296			}
297			if (ret > 0)
298				break;
299
300			btrfs_warn_rl(fs_info,
301"csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
302				logical, mirror_num,
303				(ref_level ? "node" : "leaf"),
304				ref_level, ref_root);
305		}
306		btrfs_release_path(&path);
307	} else {
308		struct btrfs_backref_walk_ctx ctx = { 0 };
309		struct data_reloc_warn reloc_warn = { 0 };
310
311		btrfs_release_path(&path);
312
313		ctx.bytenr = found_key.objectid;
314		ctx.extent_item_pos = logical - found_key.objectid;
315		ctx.fs_info = fs_info;
316
317		reloc_warn.logical = logical;
318		reloc_warn.extent_item_size = found_key.offset;
319		reloc_warn.mirror_num = mirror_num;
320		reloc_warn.fs_info = fs_info;
321
322		iterate_extent_inodes(&ctx, true,
323				      data_reloc_print_warning_inode, &reloc_warn);
324	}
325}
326
327static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
328		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
329{
330	struct btrfs_root *root = inode->root;
331	const u32 csum_size = root->fs_info->csum_size;
332
333	/* For data reloc tree, it's better to do a backref lookup instead. */
334	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
335		return print_data_reloc_error(inode, logical_start, csum,
336					      csum_expected, mirror_num);
337
338	/* Output without objectid, which is more meaningful */
339	if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) {
340		btrfs_warn_rl(root->fs_info,
341"csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
342			root->root_key.objectid, btrfs_ino(inode),
343			logical_start,
344			CSUM_FMT_VALUE(csum_size, csum),
345			CSUM_FMT_VALUE(csum_size, csum_expected),
346			mirror_num);
347	} else {
348		btrfs_warn_rl(root->fs_info,
349"csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
350			root->root_key.objectid, btrfs_ino(inode),
351			logical_start,
352			CSUM_FMT_VALUE(csum_size, csum),
353			CSUM_FMT_VALUE(csum_size, csum_expected),
354			mirror_num);
355	}
356}
357
358/*
359 * Lock inode i_rwsem based on arguments passed.
360 *
361 * ilock_flags can have the following bit set:
362 *
363 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
364 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
365 *		     return -EAGAIN
366 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
367 */
368int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
369{
370	if (ilock_flags & BTRFS_ILOCK_SHARED) {
371		if (ilock_flags & BTRFS_ILOCK_TRY) {
372			if (!inode_trylock_shared(&inode->vfs_inode))
373				return -EAGAIN;
374			else
375				return 0;
376		}
377		inode_lock_shared(&inode->vfs_inode);
378	} else {
379		if (ilock_flags & BTRFS_ILOCK_TRY) {
380			if (!inode_trylock(&inode->vfs_inode))
381				return -EAGAIN;
382			else
383				return 0;
384		}
385		inode_lock(&inode->vfs_inode);
386	}
387	if (ilock_flags & BTRFS_ILOCK_MMAP)
388		down_write(&inode->i_mmap_lock);
389	return 0;
390}
391
392/*
393 * Unock inode i_rwsem.
394 *
395 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
396 * to decide whether the lock acquired is shared or exclusive.
397 */
398void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
399{
400	if (ilock_flags & BTRFS_ILOCK_MMAP)
401		up_write(&inode->i_mmap_lock);
402	if (ilock_flags & BTRFS_ILOCK_SHARED)
403		inode_unlock_shared(&inode->vfs_inode);
404	else
405		inode_unlock(&inode->vfs_inode);
406}
407
408/*
409 * Cleanup all submitted ordered extents in specified range to handle errors
410 * from the btrfs_run_delalloc_range() callback.
411 *
412 * NOTE: caller must ensure that when an error happens, it can not call
413 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
414 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
415 * to be released, which we want to happen only when finishing the ordered
416 * extent (btrfs_finish_ordered_io()).
417 */
418static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
419						 struct page *locked_page,
420						 u64 offset, u64 bytes)
421{
422	unsigned long index = offset >> PAGE_SHIFT;
423	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
424	u64 page_start = 0, page_end = 0;
425	struct page *page;
426
427	if (locked_page) {
428		page_start = page_offset(locked_page);
429		page_end = page_start + PAGE_SIZE - 1;
430	}
431
432	while (index <= end_index) {
433		/*
434		 * For locked page, we will call btrfs_mark_ordered_io_finished
435		 * through btrfs_mark_ordered_io_finished() on it
436		 * in run_delalloc_range() for the error handling, which will
437		 * clear page Ordered and run the ordered extent accounting.
438		 *
439		 * Here we can't just clear the Ordered bit, or
440		 * btrfs_mark_ordered_io_finished() would skip the accounting
441		 * for the page range, and the ordered extent will never finish.
442		 */
443		if (locked_page && index == (page_start >> PAGE_SHIFT)) {
444			index++;
445			continue;
446		}
447		page = find_get_page(inode->vfs_inode.i_mapping, index);
448		index++;
449		if (!page)
450			continue;
451
452		/*
453		 * Here we just clear all Ordered bits for every page in the
454		 * range, then btrfs_mark_ordered_io_finished() will handle
455		 * the ordered extent accounting for the range.
456		 */
457		btrfs_folio_clamp_clear_ordered(inode->root->fs_info,
458						page_folio(page), offset, bytes);
459		put_page(page);
460	}
461
462	if (locked_page) {
463		/* The locked page covers the full range, nothing needs to be done */
464		if (bytes + offset <= page_start + PAGE_SIZE)
465			return;
466		/*
467		 * In case this page belongs to the delalloc range being
468		 * instantiated then skip it, since the first page of a range is
469		 * going to be properly cleaned up by the caller of
470		 * run_delalloc_range
471		 */
472		if (page_start >= offset && page_end <= (offset + bytes - 1)) {
473			bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
474			offset = page_offset(locked_page) + PAGE_SIZE;
475		}
476	}
477
478	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
479}
480
481static int btrfs_dirty_inode(struct btrfs_inode *inode);
482
483static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
484				     struct btrfs_new_inode_args *args)
485{
486	int err;
487
488	if (args->default_acl) {
489		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
490				      ACL_TYPE_DEFAULT);
491		if (err)
492			return err;
493	}
494	if (args->acl) {
495		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
496		if (err)
497			return err;
498	}
499	if (!args->default_acl && !args->acl)
500		cache_no_acl(args->inode);
501	return btrfs_xattr_security_init(trans, args->inode, args->dir,
502					 &args->dentry->d_name);
503}
504
505/*
506 * this does all the hard work for inserting an inline extent into
507 * the btree.  The caller should have done a btrfs_drop_extents so that
508 * no overlapping inline items exist in the btree
509 */
510static int insert_inline_extent(struct btrfs_trans_handle *trans,
511				struct btrfs_path *path,
512				struct btrfs_inode *inode, bool extent_inserted,
513				size_t size, size_t compressed_size,
514				int compress_type,
515				struct page **compressed_pages,
516				bool update_i_size)
517{
518	struct btrfs_root *root = inode->root;
519	struct extent_buffer *leaf;
520	struct page *page = NULL;
521	char *kaddr;
522	unsigned long ptr;
523	struct btrfs_file_extent_item *ei;
524	int ret;
525	size_t cur_size = size;
526	u64 i_size;
527
528	ASSERT((compressed_size > 0 && compressed_pages) ||
529	       (compressed_size == 0 && !compressed_pages));
530
531	if (compressed_size && compressed_pages)
532		cur_size = compressed_size;
533
534	if (!extent_inserted) {
535		struct btrfs_key key;
536		size_t datasize;
537
538		key.objectid = btrfs_ino(inode);
539		key.offset = 0;
540		key.type = BTRFS_EXTENT_DATA_KEY;
541
542		datasize = btrfs_file_extent_calc_inline_size(cur_size);
543		ret = btrfs_insert_empty_item(trans, root, path, &key,
544					      datasize);
545		if (ret)
546			goto fail;
547	}
548	leaf = path->nodes[0];
549	ei = btrfs_item_ptr(leaf, path->slots[0],
550			    struct btrfs_file_extent_item);
551	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
552	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
553	btrfs_set_file_extent_encryption(leaf, ei, 0);
554	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
555	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
556	ptr = btrfs_file_extent_inline_start(ei);
557
558	if (compress_type != BTRFS_COMPRESS_NONE) {
559		struct page *cpage;
560		int i = 0;
561		while (compressed_size > 0) {
562			cpage = compressed_pages[i];
563			cur_size = min_t(unsigned long, compressed_size,
564				       PAGE_SIZE);
565
566			kaddr = kmap_local_page(cpage);
567			write_extent_buffer(leaf, kaddr, ptr, cur_size);
568			kunmap_local(kaddr);
569
570			i++;
571			ptr += cur_size;
572			compressed_size -= cur_size;
573		}
574		btrfs_set_file_extent_compression(leaf, ei,
575						  compress_type);
576	} else {
577		page = find_get_page(inode->vfs_inode.i_mapping, 0);
578		btrfs_set_file_extent_compression(leaf, ei, 0);
579		kaddr = kmap_local_page(page);
580		write_extent_buffer(leaf, kaddr, ptr, size);
581		kunmap_local(kaddr);
582		put_page(page);
583	}
584	btrfs_mark_buffer_dirty(trans, leaf);
585	btrfs_release_path(path);
586
587	/*
588	 * We align size to sectorsize for inline extents just for simplicity
589	 * sake.
590	 */
591	ret = btrfs_inode_set_file_extent_range(inode, 0,
592					ALIGN(size, root->fs_info->sectorsize));
593	if (ret)
594		goto fail;
595
596	/*
597	 * We're an inline extent, so nobody can extend the file past i_size
598	 * without locking a page we already have locked.
599	 *
600	 * We must do any i_size and inode updates before we unlock the pages.
601	 * Otherwise we could end up racing with unlink.
602	 */
603	i_size = i_size_read(&inode->vfs_inode);
604	if (update_i_size && size > i_size) {
605		i_size_write(&inode->vfs_inode, size);
606		i_size = size;
607	}
608	inode->disk_i_size = i_size;
609
610fail:
611	return ret;
612}
613
614
615/*
616 * conditionally insert an inline extent into the file.  This
617 * does the checks required to make sure the data is small enough
618 * to fit as an inline extent.
619 */
620static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
621					  size_t compressed_size,
622					  int compress_type,
623					  struct page **compressed_pages,
624					  bool update_i_size)
625{
626	struct btrfs_drop_extents_args drop_args = { 0 };
627	struct btrfs_root *root = inode->root;
628	struct btrfs_fs_info *fs_info = root->fs_info;
629	struct btrfs_trans_handle *trans;
630	u64 data_len = (compressed_size ?: size);
631	int ret;
632	struct btrfs_path *path;
633
634	/*
635	 * We can create an inline extent if it ends at or beyond the current
636	 * i_size, is no larger than a sector (decompressed), and the (possibly
637	 * compressed) data fits in a leaf and the configured maximum inline
638	 * size.
639	 */
640	if (size < i_size_read(&inode->vfs_inode) ||
641	    size > fs_info->sectorsize ||
642	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
643	    data_len > fs_info->max_inline)
644		return 1;
645
646	path = btrfs_alloc_path();
647	if (!path)
648		return -ENOMEM;
649
650	trans = btrfs_join_transaction(root);
651	if (IS_ERR(trans)) {
652		btrfs_free_path(path);
653		return PTR_ERR(trans);
654	}
655	trans->block_rsv = &inode->block_rsv;
656
657	drop_args.path = path;
658	drop_args.start = 0;
659	drop_args.end = fs_info->sectorsize;
660	drop_args.drop_cache = true;
661	drop_args.replace_extent = true;
662	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
663	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
664	if (ret) {
665		btrfs_abort_transaction(trans, ret);
666		goto out;
667	}
668
669	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
670				   size, compressed_size, compress_type,
671				   compressed_pages, update_i_size);
672	if (ret && ret != -ENOSPC) {
673		btrfs_abort_transaction(trans, ret);
674		goto out;
675	} else if (ret == -ENOSPC) {
676		ret = 1;
677		goto out;
678	}
679
680	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
681	ret = btrfs_update_inode(trans, inode);
682	if (ret && ret != -ENOSPC) {
683		btrfs_abort_transaction(trans, ret);
684		goto out;
685	} else if (ret == -ENOSPC) {
686		ret = 1;
687		goto out;
688	}
689
690	btrfs_set_inode_full_sync(inode);
691out:
692	/*
693	 * Don't forget to free the reserved space, as for inlined extent
694	 * it won't count as data extent, free them directly here.
695	 * And at reserve time, it's always aligned to page size, so
696	 * just free one page here.
697	 */
698	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
699	btrfs_free_path(path);
700	btrfs_end_transaction(trans);
701	return ret;
702}
703
704struct async_extent {
705	u64 start;
706	u64 ram_size;
707	u64 compressed_size;
708	struct page **pages;
709	unsigned long nr_pages;
710	int compress_type;
711	struct list_head list;
712};
713
714struct async_chunk {
715	struct btrfs_inode *inode;
716	struct page *locked_page;
717	u64 start;
718	u64 end;
719	blk_opf_t write_flags;
720	struct list_head extents;
721	struct cgroup_subsys_state *blkcg_css;
722	struct btrfs_work work;
723	struct async_cow *async_cow;
724};
725
726struct async_cow {
727	atomic_t num_chunks;
728	struct async_chunk chunks[];
729};
730
731static noinline int add_async_extent(struct async_chunk *cow,
732				     u64 start, u64 ram_size,
733				     u64 compressed_size,
734				     struct page **pages,
735				     unsigned long nr_pages,
736				     int compress_type)
737{
738	struct async_extent *async_extent;
739
740	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
741	if (!async_extent)
742		return -ENOMEM;
743	async_extent->start = start;
744	async_extent->ram_size = ram_size;
745	async_extent->compressed_size = compressed_size;
746	async_extent->pages = pages;
747	async_extent->nr_pages = nr_pages;
748	async_extent->compress_type = compress_type;
749	list_add_tail(&async_extent->list, &cow->extents);
750	return 0;
751}
752
753/*
754 * Check if the inode needs to be submitted to compression, based on mount
755 * options, defragmentation, properties or heuristics.
756 */
757static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
758				      u64 end)
759{
760	struct btrfs_fs_info *fs_info = inode->root->fs_info;
761
762	if (!btrfs_inode_can_compress(inode)) {
763		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
764			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
765			btrfs_ino(inode));
766		return 0;
767	}
768	/*
769	 * Special check for subpage.
770	 *
771	 * We lock the full page then run each delalloc range in the page, thus
772	 * for the following case, we will hit some subpage specific corner case:
773	 *
774	 * 0		32K		64K
775	 * |	|///////|	|///////|
776	 *		\- A		\- B
777	 *
778	 * In above case, both range A and range B will try to unlock the full
779	 * page [0, 64K), causing the one finished later will have page
780	 * unlocked already, triggering various page lock requirement BUG_ON()s.
781	 *
782	 * So here we add an artificial limit that subpage compression can only
783	 * if the range is fully page aligned.
784	 *
785	 * In theory we only need to ensure the first page is fully covered, but
786	 * the tailing partial page will be locked until the full compression
787	 * finishes, delaying the write of other range.
788	 *
789	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
790	 * first to prevent any submitted async extent to unlock the full page.
791	 * By this, we can ensure for subpage case that only the last async_cow
792	 * will unlock the full page.
793	 */
794	if (fs_info->sectorsize < PAGE_SIZE) {
795		if (!PAGE_ALIGNED(start) ||
796		    !PAGE_ALIGNED(end + 1))
797			return 0;
798	}
799
800	/* force compress */
801	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
802		return 1;
803	/* defrag ioctl */
804	if (inode->defrag_compress)
805		return 1;
806	/* bad compression ratios */
807	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
808		return 0;
809	if (btrfs_test_opt(fs_info, COMPRESS) ||
810	    inode->flags & BTRFS_INODE_COMPRESS ||
811	    inode->prop_compress)
812		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
813	return 0;
814}
815
816static inline void inode_should_defrag(struct btrfs_inode *inode,
817		u64 start, u64 end, u64 num_bytes, u32 small_write)
818{
819	/* If this is a small write inside eof, kick off a defrag */
820	if (num_bytes < small_write &&
821	    (start > 0 || end + 1 < inode->disk_i_size))
822		btrfs_add_inode_defrag(NULL, inode, small_write);
823}
824
825/*
826 * Work queue call back to started compression on a file and pages.
827 *
828 * This is done inside an ordered work queue, and the compression is spread
829 * across many cpus.  The actual IO submission is step two, and the ordered work
830 * queue takes care of making sure that happens in the same order things were
831 * put onto the queue by writepages and friends.
832 *
833 * If this code finds it can't get good compression, it puts an entry onto the
834 * work queue to write the uncompressed bytes.  This makes sure that both
835 * compressed inodes and uncompressed inodes are written in the same order that
836 * the flusher thread sent them down.
837 */
838static void compress_file_range(struct btrfs_work *work)
839{
840	struct async_chunk *async_chunk =
841		container_of(work, struct async_chunk, work);
842	struct btrfs_inode *inode = async_chunk->inode;
843	struct btrfs_fs_info *fs_info = inode->root->fs_info;
844	struct address_space *mapping = inode->vfs_inode.i_mapping;
845	u64 blocksize = fs_info->sectorsize;
846	u64 start = async_chunk->start;
847	u64 end = async_chunk->end;
848	u64 actual_end;
849	u64 i_size;
850	int ret = 0;
851	struct page **pages;
852	unsigned long nr_pages;
853	unsigned long total_compressed = 0;
854	unsigned long total_in = 0;
855	unsigned int poff;
856	int i;
857	int compress_type = fs_info->compress_type;
858
859	inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
860
861	/*
862	 * We need to call clear_page_dirty_for_io on each page in the range.
863	 * Otherwise applications with the file mmap'd can wander in and change
864	 * the page contents while we are compressing them.
865	 */
866	extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
867
868	/*
869	 * We need to save i_size before now because it could change in between
870	 * us evaluating the size and assigning it.  This is because we lock and
871	 * unlock the page in truncate and fallocate, and then modify the i_size
872	 * later on.
873	 *
874	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
875	 * does that for us.
876	 */
877	barrier();
878	i_size = i_size_read(&inode->vfs_inode);
879	barrier();
880	actual_end = min_t(u64, i_size, end + 1);
881again:
882	pages = NULL;
883	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
884	nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
885
886	/*
887	 * we don't want to send crud past the end of i_size through
888	 * compression, that's just a waste of CPU time.  So, if the
889	 * end of the file is before the start of our current
890	 * requested range of bytes, we bail out to the uncompressed
891	 * cleanup code that can deal with all of this.
892	 *
893	 * It isn't really the fastest way to fix things, but this is a
894	 * very uncommon corner.
895	 */
896	if (actual_end <= start)
897		goto cleanup_and_bail_uncompressed;
898
899	total_compressed = actual_end - start;
900
901	/*
902	 * Skip compression for a small file range(<=blocksize) that
903	 * isn't an inline extent, since it doesn't save disk space at all.
904	 */
905	if (total_compressed <= blocksize &&
906	   (start > 0 || end + 1 < inode->disk_i_size))
907		goto cleanup_and_bail_uncompressed;
908
909	/*
910	 * For subpage case, we require full page alignment for the sector
911	 * aligned range.
912	 * Thus we must also check against @actual_end, not just @end.
913	 */
914	if (blocksize < PAGE_SIZE) {
915		if (!PAGE_ALIGNED(start) ||
916		    !PAGE_ALIGNED(round_up(actual_end, blocksize)))
917			goto cleanup_and_bail_uncompressed;
918	}
919
920	total_compressed = min_t(unsigned long, total_compressed,
921			BTRFS_MAX_UNCOMPRESSED);
922	total_in = 0;
923	ret = 0;
924
925	/*
926	 * We do compression for mount -o compress and when the inode has not
927	 * been flagged as NOCOMPRESS.  This flag can change at any time if we
928	 * discover bad compression ratios.
929	 */
930	if (!inode_need_compress(inode, start, end))
931		goto cleanup_and_bail_uncompressed;
932
933	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
934	if (!pages) {
935		/*
936		 * Memory allocation failure is not a fatal error, we can fall
937		 * back to uncompressed code.
938		 */
939		goto cleanup_and_bail_uncompressed;
940	}
941
942	if (inode->defrag_compress)
943		compress_type = inode->defrag_compress;
944	else if (inode->prop_compress)
945		compress_type = inode->prop_compress;
946
947	/* Compression level is applied here. */
948	ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
949				   mapping, start, pages, &nr_pages, &total_in,
950				   &total_compressed);
951	if (ret)
952		goto mark_incompressible;
953
954	/*
955	 * Zero the tail end of the last page, as we might be sending it down
956	 * to disk.
957	 */
958	poff = offset_in_page(total_compressed);
959	if (poff)
960		memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
961
962	/*
963	 * Try to create an inline extent.
964	 *
965	 * If we didn't compress the entire range, try to create an uncompressed
966	 * inline extent, else a compressed one.
967	 *
968	 * Check cow_file_range() for why we don't even try to create inline
969	 * extent for the subpage case.
970	 */
971	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
972		if (total_in < actual_end) {
973			ret = cow_file_range_inline(inode, actual_end, 0,
974						    BTRFS_COMPRESS_NONE, NULL,
975						    false);
976		} else {
977			ret = cow_file_range_inline(inode, actual_end,
978						    total_compressed,
979						    compress_type, pages,
980						    false);
981		}
982		if (ret <= 0) {
983			unsigned long clear_flags = EXTENT_DELALLOC |
984				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
985				EXTENT_DO_ACCOUNTING;
986
987			if (ret < 0)
988				mapping_set_error(mapping, -EIO);
989
990			/*
991			 * inline extent creation worked or returned error,
992			 * we don't need to create any more async work items.
993			 * Unlock and free up our temp pages.
994			 *
995			 * We use DO_ACCOUNTING here because we need the
996			 * delalloc_release_metadata to be done _after_ we drop
997			 * our outstanding extent for clearing delalloc for this
998			 * range.
999			 */
1000			extent_clear_unlock_delalloc(inode, start, end,
1001						     NULL,
1002						     clear_flags,
1003						     PAGE_UNLOCK |
1004						     PAGE_START_WRITEBACK |
1005						     PAGE_END_WRITEBACK);
1006			goto free_pages;
1007		}
1008	}
1009
1010	/*
1011	 * We aren't doing an inline extent. Round the compressed size up to a
1012	 * block size boundary so the allocator does sane things.
1013	 */
1014	total_compressed = ALIGN(total_compressed, blocksize);
1015
1016	/*
1017	 * One last check to make sure the compression is really a win, compare
1018	 * the page count read with the blocks on disk, compression must free at
1019	 * least one sector.
1020	 */
1021	total_in = round_up(total_in, fs_info->sectorsize);
1022	if (total_compressed + blocksize > total_in)
1023		goto mark_incompressible;
1024
1025	/*
1026	 * The async work queues will take care of doing actual allocation on
1027	 * disk for these compressed pages, and will submit the bios.
1028	 */
1029	ret = add_async_extent(async_chunk, start, total_in, total_compressed, pages,
1030			       nr_pages, compress_type);
1031	BUG_ON(ret);
1032	if (start + total_in < end) {
1033		start += total_in;
1034		cond_resched();
1035		goto again;
1036	}
1037	return;
1038
1039mark_incompressible:
1040	if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1041		inode->flags |= BTRFS_INODE_NOCOMPRESS;
1042cleanup_and_bail_uncompressed:
1043	ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1044			       BTRFS_COMPRESS_NONE);
1045	BUG_ON(ret);
1046free_pages:
1047	if (pages) {
1048		for (i = 0; i < nr_pages; i++) {
1049			WARN_ON(pages[i]->mapping);
1050			btrfs_free_compr_page(pages[i]);
1051		}
1052		kfree(pages);
1053	}
1054}
1055
1056static void free_async_extent_pages(struct async_extent *async_extent)
1057{
1058	int i;
1059
1060	if (!async_extent->pages)
1061		return;
1062
1063	for (i = 0; i < async_extent->nr_pages; i++) {
1064		WARN_ON(async_extent->pages[i]->mapping);
1065		btrfs_free_compr_page(async_extent->pages[i]);
1066	}
1067	kfree(async_extent->pages);
1068	async_extent->nr_pages = 0;
1069	async_extent->pages = NULL;
1070}
1071
1072static void submit_uncompressed_range(struct btrfs_inode *inode,
1073				      struct async_extent *async_extent,
1074				      struct page *locked_page)
1075{
1076	u64 start = async_extent->start;
1077	u64 end = async_extent->start + async_extent->ram_size - 1;
1078	int ret;
1079	struct writeback_control wbc = {
1080		.sync_mode		= WB_SYNC_ALL,
1081		.range_start		= start,
1082		.range_end		= end,
1083		.no_cgroup_owner	= 1,
1084	};
1085
1086	wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1087	ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
1088	wbc_detach_inode(&wbc);
1089	if (ret < 0) {
1090		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1091		if (locked_page) {
1092			const u64 page_start = page_offset(locked_page);
1093
1094			set_page_writeback(locked_page);
1095			end_page_writeback(locked_page);
1096			btrfs_mark_ordered_io_finished(inode, locked_page,
1097						       page_start, PAGE_SIZE,
1098						       !ret);
1099			mapping_set_error(locked_page->mapping, ret);
1100			unlock_page(locked_page);
1101		}
1102	}
1103}
1104
1105static void submit_one_async_extent(struct async_chunk *async_chunk,
1106				    struct async_extent *async_extent,
1107				    u64 *alloc_hint)
1108{
1109	struct btrfs_inode *inode = async_chunk->inode;
1110	struct extent_io_tree *io_tree = &inode->io_tree;
1111	struct btrfs_root *root = inode->root;
1112	struct btrfs_fs_info *fs_info = root->fs_info;
1113	struct btrfs_ordered_extent *ordered;
1114	struct btrfs_key ins;
1115	struct page *locked_page = NULL;
1116	struct extent_map *em;
1117	int ret = 0;
1118	u64 start = async_extent->start;
1119	u64 end = async_extent->start + async_extent->ram_size - 1;
1120
1121	if (async_chunk->blkcg_css)
1122		kthread_associate_blkcg(async_chunk->blkcg_css);
1123
1124	/*
1125	 * If async_chunk->locked_page is in the async_extent range, we need to
1126	 * handle it.
1127	 */
1128	if (async_chunk->locked_page) {
1129		u64 locked_page_start = page_offset(async_chunk->locked_page);
1130		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
1131
1132		if (!(start >= locked_page_end || end <= locked_page_start))
1133			locked_page = async_chunk->locked_page;
1134	}
1135	lock_extent(io_tree, start, end, NULL);
1136
1137	if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
1138		submit_uncompressed_range(inode, async_extent, locked_page);
1139		goto done;
1140	}
1141
1142	ret = btrfs_reserve_extent(root, async_extent->ram_size,
1143				   async_extent->compressed_size,
1144				   async_extent->compressed_size,
1145				   0, *alloc_hint, &ins, 1, 1);
1146	if (ret) {
1147		/*
1148		 * We can't reserve contiguous space for the compressed size.
1149		 * Unlikely, but it's possible that we could have enough
1150		 * non-contiguous space for the uncompressed size instead.  So
1151		 * fall back to uncompressed.
1152		 */
1153		submit_uncompressed_range(inode, async_extent, locked_page);
1154		goto done;
1155	}
1156
1157	/* Here we're doing allocation and writeback of the compressed pages */
1158	em = create_io_em(inode, start,
1159			  async_extent->ram_size,	/* len */
1160			  start,			/* orig_start */
1161			  ins.objectid,			/* block_start */
1162			  ins.offset,			/* block_len */
1163			  ins.offset,			/* orig_block_len */
1164			  async_extent->ram_size,	/* ram_bytes */
1165			  async_extent->compress_type,
1166			  BTRFS_ORDERED_COMPRESSED);
1167	if (IS_ERR(em)) {
1168		ret = PTR_ERR(em);
1169		goto out_free_reserve;
1170	}
1171	free_extent_map(em);
1172
1173	ordered = btrfs_alloc_ordered_extent(inode, start,	/* file_offset */
1174				       async_extent->ram_size,	/* num_bytes */
1175				       async_extent->ram_size,	/* ram_bytes */
1176				       ins.objectid,		/* disk_bytenr */
1177				       ins.offset,		/* disk_num_bytes */
1178				       0,			/* offset */
1179				       1 << BTRFS_ORDERED_COMPRESSED,
1180				       async_extent->compress_type);
1181	if (IS_ERR(ordered)) {
1182		btrfs_drop_extent_map_range(inode, start, end, false);
1183		ret = PTR_ERR(ordered);
1184		goto out_free_reserve;
1185	}
1186	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1187
1188	/* Clear dirty, set writeback and unlock the pages. */
1189	extent_clear_unlock_delalloc(inode, start, end,
1190			NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1191			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1192	btrfs_submit_compressed_write(ordered,
1193			    async_extent->pages,	/* compressed_pages */
1194			    async_extent->nr_pages,
1195			    async_chunk->write_flags, true);
1196	*alloc_hint = ins.objectid + ins.offset;
1197done:
1198	if (async_chunk->blkcg_css)
1199		kthread_associate_blkcg(NULL);
1200	kfree(async_extent);
1201	return;
1202
1203out_free_reserve:
1204	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1205	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1206	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1207	extent_clear_unlock_delalloc(inode, start, end,
1208				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1209				     EXTENT_DELALLOC_NEW |
1210				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1211				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1212				     PAGE_END_WRITEBACK);
1213	free_async_extent_pages(async_extent);
1214	if (async_chunk->blkcg_css)
1215		kthread_associate_blkcg(NULL);
1216	btrfs_debug(fs_info,
1217"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1218		    root->root_key.objectid, btrfs_ino(inode), start,
1219		    async_extent->ram_size, ret);
1220	kfree(async_extent);
1221}
1222
1223static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1224				      u64 num_bytes)
1225{
1226	struct extent_map_tree *em_tree = &inode->extent_tree;
1227	struct extent_map *em;
1228	u64 alloc_hint = 0;
1229
1230	read_lock(&em_tree->lock);
1231	em = search_extent_mapping(em_tree, start, num_bytes);
1232	if (em) {
1233		/*
1234		 * if block start isn't an actual block number then find the
1235		 * first block in this inode and use that as a hint.  If that
1236		 * block is also bogus then just don't worry about it.
1237		 */
1238		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1239			free_extent_map(em);
1240			em = search_extent_mapping(em_tree, 0, 0);
1241			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1242				alloc_hint = em->block_start;
1243			if (em)
1244				free_extent_map(em);
1245		} else {
1246			alloc_hint = em->block_start;
1247			free_extent_map(em);
1248		}
1249	}
1250	read_unlock(&em_tree->lock);
1251
1252	return alloc_hint;
1253}
1254
1255/*
1256 * when extent_io.c finds a delayed allocation range in the file,
1257 * the call backs end up in this code.  The basic idea is to
1258 * allocate extents on disk for the range, and create ordered data structs
1259 * in ram to track those extents.
1260 *
1261 * locked_page is the page that writepage had locked already.  We use
1262 * it to make sure we don't do extra locks or unlocks.
1263 *
1264 * When this function fails, it unlocks all pages except @locked_page.
1265 *
1266 * When this function successfully creates an inline extent, it returns 1 and
1267 * unlocks all pages including locked_page and starts I/O on them.
1268 * (In reality inline extents are limited to a single page, so locked_page is
1269 * the only page handled anyway).
1270 *
1271 * When this function succeed and creates a normal extent, the page locking
1272 * status depends on the passed in flags:
1273 *
1274 * - If @keep_locked is set, all pages are kept locked.
1275 * - Else all pages except for @locked_page are unlocked.
1276 *
1277 * When a failure happens in the second or later iteration of the
1278 * while-loop, the ordered extents created in previous iterations are kept
1279 * intact. So, the caller must clean them up by calling
1280 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1281 * example.
1282 */
1283static noinline int cow_file_range(struct btrfs_inode *inode,
1284				   struct page *locked_page, u64 start, u64 end,
1285				   u64 *done_offset,
1286				   bool keep_locked, bool no_inline)
1287{
1288	struct btrfs_root *root = inode->root;
1289	struct btrfs_fs_info *fs_info = root->fs_info;
1290	u64 alloc_hint = 0;
1291	u64 orig_start = start;
1292	u64 num_bytes;
1293	unsigned long ram_size;
1294	u64 cur_alloc_size = 0;
1295	u64 min_alloc_size;
1296	u64 blocksize = fs_info->sectorsize;
1297	struct btrfs_key ins;
1298	struct extent_map *em;
1299	unsigned clear_bits;
1300	unsigned long page_ops;
1301	bool extent_reserved = false;
1302	int ret = 0;
1303
1304	if (btrfs_is_free_space_inode(inode)) {
1305		ret = -EINVAL;
1306		goto out_unlock;
1307	}
1308
1309	num_bytes = ALIGN(end - start + 1, blocksize);
1310	num_bytes = max(blocksize,  num_bytes);
1311	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1312
1313	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1314
1315	/*
1316	 * Due to the page size limit, for subpage we can only trigger the
1317	 * writeback for the dirty sectors of page, that means data writeback
1318	 * is doing more writeback than what we want.
1319	 *
1320	 * This is especially unexpected for some call sites like fallocate,
1321	 * where we only increase i_size after everything is done.
1322	 * This means we can trigger inline extent even if we didn't want to.
1323	 * So here we skip inline extent creation completely.
1324	 */
1325	if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) {
1326		u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1327				       end + 1);
1328
1329		/* lets try to make an inline extent */
1330		ret = cow_file_range_inline(inode, actual_end, 0,
1331					    BTRFS_COMPRESS_NONE, NULL, false);
1332		if (ret == 0) {
1333			/*
1334			 * We use DO_ACCOUNTING here because we need the
1335			 * delalloc_release_metadata to be run _after_ we drop
1336			 * our outstanding extent for clearing delalloc for this
1337			 * range.
1338			 */
1339			extent_clear_unlock_delalloc(inode, start, end,
1340				     locked_page,
1341				     EXTENT_LOCKED | EXTENT_DELALLOC |
1342				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1343				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1344				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1345			/*
1346			 * locked_page is locked by the caller of
1347			 * writepage_delalloc(), not locked by
1348			 * __process_pages_contig().
1349			 *
1350			 * We can't let __process_pages_contig() to unlock it,
1351			 * as it doesn't have any subpage::writers recorded.
1352			 *
1353			 * Here we manually unlock the page, since the caller
1354			 * can't determine if it's an inline extent or a
1355			 * compressed extent.
1356			 */
1357			unlock_page(locked_page);
1358			ret = 1;
1359			goto done;
1360		} else if (ret < 0) {
1361			goto out_unlock;
1362		}
1363	}
1364
1365	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1366
1367	/*
1368	 * Relocation relies on the relocated extents to have exactly the same
1369	 * size as the original extents. Normally writeback for relocation data
1370	 * extents follows a NOCOW path because relocation preallocates the
1371	 * extents. However, due to an operation such as scrub turning a block
1372	 * group to RO mode, it may fallback to COW mode, so we must make sure
1373	 * an extent allocated during COW has exactly the requested size and can
1374	 * not be split into smaller extents, otherwise relocation breaks and
1375	 * fails during the stage where it updates the bytenr of file extent
1376	 * items.
1377	 */
1378	if (btrfs_is_data_reloc_root(root))
1379		min_alloc_size = num_bytes;
1380	else
1381		min_alloc_size = fs_info->sectorsize;
1382
1383	while (num_bytes > 0) {
1384		struct btrfs_ordered_extent *ordered;
1385
1386		cur_alloc_size = num_bytes;
1387		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1388					   min_alloc_size, 0, alloc_hint,
1389					   &ins, 1, 1);
1390		if (ret == -EAGAIN) {
1391			/*
1392			 * btrfs_reserve_extent only returns -EAGAIN for zoned
1393			 * file systems, which is an indication that there are
1394			 * no active zones to allocate from at the moment.
1395			 *
1396			 * If this is the first loop iteration, wait for at
1397			 * least one zone to finish before retrying the
1398			 * allocation.  Otherwise ask the caller to write out
1399			 * the already allocated blocks before coming back to
1400			 * us, or return -ENOSPC if it can't handle retries.
1401			 */
1402			ASSERT(btrfs_is_zoned(fs_info));
1403			if (start == orig_start) {
1404				wait_on_bit_io(&inode->root->fs_info->flags,
1405					       BTRFS_FS_NEED_ZONE_FINISH,
1406					       TASK_UNINTERRUPTIBLE);
1407				continue;
1408			}
1409			if (done_offset) {
1410				*done_offset = start - 1;
1411				return 0;
1412			}
1413			ret = -ENOSPC;
1414		}
1415		if (ret < 0)
1416			goto out_unlock;
1417		cur_alloc_size = ins.offset;
1418		extent_reserved = true;
1419
1420		ram_size = ins.offset;
1421		em = create_io_em(inode, start, ins.offset, /* len */
1422				  start, /* orig_start */
1423				  ins.objectid, /* block_start */
1424				  ins.offset, /* block_len */
1425				  ins.offset, /* orig_block_len */
1426				  ram_size, /* ram_bytes */
1427				  BTRFS_COMPRESS_NONE, /* compress_type */
1428				  BTRFS_ORDERED_REGULAR /* type */);
1429		if (IS_ERR(em)) {
1430			ret = PTR_ERR(em);
1431			goto out_reserve;
1432		}
1433		free_extent_map(em);
1434
1435		ordered = btrfs_alloc_ordered_extent(inode, start, ram_size,
1436					ram_size, ins.objectid, cur_alloc_size,
1437					0, 1 << BTRFS_ORDERED_REGULAR,
1438					BTRFS_COMPRESS_NONE);
1439		if (IS_ERR(ordered)) {
1440			ret = PTR_ERR(ordered);
1441			goto out_drop_extent_cache;
1442		}
1443
1444		if (btrfs_is_data_reloc_root(root)) {
1445			ret = btrfs_reloc_clone_csums(ordered);
1446
1447			/*
1448			 * Only drop cache here, and process as normal.
1449			 *
1450			 * We must not allow extent_clear_unlock_delalloc()
1451			 * at out_unlock label to free meta of this ordered
1452			 * extent, as its meta should be freed by
1453			 * btrfs_finish_ordered_io().
1454			 *
1455			 * So we must continue until @start is increased to
1456			 * skip current ordered extent.
1457			 */
1458			if (ret)
1459				btrfs_drop_extent_map_range(inode, start,
1460							    start + ram_size - 1,
1461							    false);
1462		}
1463		btrfs_put_ordered_extent(ordered);
1464
1465		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1466
1467		/*
1468		 * We're not doing compressed IO, don't unlock the first page
1469		 * (which the caller expects to stay locked), don't clear any
1470		 * dirty bits and don't set any writeback bits
1471		 *
1472		 * Do set the Ordered (Private2) bit so we know this page was
1473		 * properly setup for writepage.
1474		 */
1475		page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
1476		page_ops |= PAGE_SET_ORDERED;
1477
1478		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1479					     locked_page,
1480					     EXTENT_LOCKED | EXTENT_DELALLOC,
1481					     page_ops);
1482		if (num_bytes < cur_alloc_size)
1483			num_bytes = 0;
1484		else
1485			num_bytes -= cur_alloc_size;
1486		alloc_hint = ins.objectid + ins.offset;
1487		start += cur_alloc_size;
1488		extent_reserved = false;
1489
1490		/*
1491		 * btrfs_reloc_clone_csums() error, since start is increased
1492		 * extent_clear_unlock_delalloc() at out_unlock label won't
1493		 * free metadata of current ordered extent, we're OK to exit.
1494		 */
1495		if (ret)
1496			goto out_unlock;
1497	}
1498done:
1499	if (done_offset)
1500		*done_offset = end;
1501	return ret;
1502
1503out_drop_extent_cache:
1504	btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
1505out_reserve:
1506	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1507	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1508out_unlock:
1509	/*
1510	 * Now, we have three regions to clean up:
1511	 *
1512	 * |-------(1)----|---(2)---|-------------(3)----------|
1513	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1514	 *
1515	 * We process each region below.
1516	 */
1517
1518	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1519		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1520	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1521
1522	/*
1523	 * For the range (1). We have already instantiated the ordered extents
1524	 * for this region. They are cleaned up by
1525	 * btrfs_cleanup_ordered_extents() in e.g,
1526	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1527	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1528	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1529	 * function.
1530	 *
1531	 * However, in case of @keep_locked, we still need to unlock the pages
1532	 * (except @locked_page) to ensure all the pages are unlocked.
1533	 */
1534	if (keep_locked && orig_start < start) {
1535		if (!locked_page)
1536			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1537		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1538					     locked_page, 0, page_ops);
1539	}
1540
1541	/*
1542	 * For the range (2). If we reserved an extent for our delalloc range
1543	 * (or a subrange) and failed to create the respective ordered extent,
1544	 * then it means that when we reserved the extent we decremented the
1545	 * extent's size from the data space_info's bytes_may_use counter and
1546	 * incremented the space_info's bytes_reserved counter by the same
1547	 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1548	 * to decrement again the data space_info's bytes_may_use counter,
1549	 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1550	 */
1551	if (extent_reserved) {
1552		extent_clear_unlock_delalloc(inode, start,
1553					     start + cur_alloc_size - 1,
1554					     locked_page,
1555					     clear_bits,
1556					     page_ops);
1557		start += cur_alloc_size;
1558	}
1559
1560	/*
1561	 * For the range (3). We never touched the region. In addition to the
1562	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1563	 * space_info's bytes_may_use counter, reserved in
1564	 * btrfs_check_data_free_space().
1565	 */
1566	if (start < end) {
1567		clear_bits |= EXTENT_CLEAR_DATA_RESV;
1568		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1569					     clear_bits, page_ops);
1570	}
1571	return ret;
1572}
1573
1574/*
1575 * Phase two of compressed writeback.  This is the ordered portion of the code,
1576 * which only gets called in the order the work was queued.  We walk all the
1577 * async extents created by compress_file_range and send them down to the disk.
1578 *
1579 * If called with @do_free == true then it'll try to finish the work and free
1580 * the work struct eventually.
1581 */
1582static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1583{
1584	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1585						     work);
1586	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1587	struct async_extent *async_extent;
1588	unsigned long nr_pages;
1589	u64 alloc_hint = 0;
1590
1591	if (do_free) {
1592		struct async_chunk *async_chunk;
1593		struct async_cow *async_cow;
1594
1595		async_chunk = container_of(work, struct async_chunk, work);
1596		btrfs_add_delayed_iput(async_chunk->inode);
1597		if (async_chunk->blkcg_css)
1598			css_put(async_chunk->blkcg_css);
1599
1600		async_cow = async_chunk->async_cow;
1601		if (atomic_dec_and_test(&async_cow->num_chunks))
1602			kvfree(async_cow);
1603		return;
1604	}
1605
1606	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1607		PAGE_SHIFT;
1608
1609	while (!list_empty(&async_chunk->extents)) {
1610		async_extent = list_entry(async_chunk->extents.next,
1611					  struct async_extent, list);
1612		list_del(&async_extent->list);
1613		submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1614	}
1615
1616	/* atomic_sub_return implies a barrier */
1617	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1618	    5 * SZ_1M)
1619		cond_wake_up_nomb(&fs_info->async_submit_wait);
1620}
1621
1622static bool run_delalloc_compressed(struct btrfs_inode *inode,
1623				    struct page *locked_page, u64 start,
1624				    u64 end, struct writeback_control *wbc)
1625{
1626	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1627	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1628	struct async_cow *ctx;
1629	struct async_chunk *async_chunk;
1630	unsigned long nr_pages;
1631	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1632	int i;
1633	unsigned nofs_flag;
1634	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1635
1636	nofs_flag = memalloc_nofs_save();
1637	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1638	memalloc_nofs_restore(nofs_flag);
1639	if (!ctx)
1640		return false;
1641
1642	unlock_extent(&inode->io_tree, start, end, NULL);
1643	set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1644
1645	async_chunk = ctx->chunks;
1646	atomic_set(&ctx->num_chunks, num_chunks);
1647
1648	for (i = 0; i < num_chunks; i++) {
1649		u64 cur_end = min(end, start + SZ_512K - 1);
1650
1651		/*
1652		 * igrab is called higher up in the call chain, take only the
1653		 * lightweight reference for the callback lifetime
1654		 */
1655		ihold(&inode->vfs_inode);
1656		async_chunk[i].async_cow = ctx;
1657		async_chunk[i].inode = inode;
1658		async_chunk[i].start = start;
1659		async_chunk[i].end = cur_end;
1660		async_chunk[i].write_flags = write_flags;
1661		INIT_LIST_HEAD(&async_chunk[i].extents);
1662
1663		/*
1664		 * The locked_page comes all the way from writepage and its
1665		 * the original page we were actually given.  As we spread
1666		 * this large delalloc region across multiple async_chunk
1667		 * structs, only the first struct needs a pointer to locked_page
1668		 *
1669		 * This way we don't need racey decisions about who is supposed
1670		 * to unlock it.
1671		 */
1672		if (locked_page) {
1673			/*
1674			 * Depending on the compressibility, the pages might or
1675			 * might not go through async.  We want all of them to
1676			 * be accounted against wbc once.  Let's do it here
1677			 * before the paths diverge.  wbc accounting is used
1678			 * only for foreign writeback detection and doesn't
1679			 * need full accuracy.  Just account the whole thing
1680			 * against the first page.
1681			 */
1682			wbc_account_cgroup_owner(wbc, locked_page,
1683						 cur_end - start);
1684			async_chunk[i].locked_page = locked_page;
1685			locked_page = NULL;
1686		} else {
1687			async_chunk[i].locked_page = NULL;
1688		}
1689
1690		if (blkcg_css != blkcg_root_css) {
1691			css_get(blkcg_css);
1692			async_chunk[i].blkcg_css = blkcg_css;
1693			async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1694		} else {
1695			async_chunk[i].blkcg_css = NULL;
1696		}
1697
1698		btrfs_init_work(&async_chunk[i].work, compress_file_range,
1699				submit_compressed_extents);
1700
1701		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1702		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1703
1704		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1705
1706		start = cur_end + 1;
1707	}
1708	return true;
1709}
1710
1711/*
1712 * Run the delalloc range from start to end, and write back any dirty pages
1713 * covered by the range.
1714 */
1715static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1716				     struct page *locked_page, u64 start,
1717				     u64 end, struct writeback_control *wbc,
1718				     bool pages_dirty)
1719{
1720	u64 done_offset = end;
1721	int ret;
1722
1723	while (start <= end) {
1724		ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1725				     true, false);
1726		if (ret)
1727			return ret;
1728		extent_write_locked_range(&inode->vfs_inode, locked_page, start,
1729					  done_offset, wbc, pages_dirty);
1730		start = done_offset + 1;
1731	}
1732
1733	return 1;
1734}
1735
1736static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1737					u64 bytenr, u64 num_bytes, bool nowait)
1738{
1739	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1740	struct btrfs_ordered_sum *sums;
1741	int ret;
1742	LIST_HEAD(list);
1743
1744	ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1,
1745				      &list, 0, nowait);
1746	if (ret == 0 && list_empty(&list))
1747		return 0;
1748
1749	while (!list_empty(&list)) {
1750		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1751		list_del(&sums->list);
1752		kfree(sums);
1753	}
1754	if (ret < 0)
1755		return ret;
1756	return 1;
1757}
1758
1759static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1760			   const u64 start, const u64 end)
1761{
1762	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1763	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1764	const u64 range_bytes = end + 1 - start;
1765	struct extent_io_tree *io_tree = &inode->io_tree;
1766	u64 range_start = start;
1767	u64 count;
1768	int ret;
1769
1770	/*
1771	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1772	 * made we had not enough available data space and therefore we did not
1773	 * reserve data space for it, since we though we could do NOCOW for the
1774	 * respective file range (either there is prealloc extent or the inode
1775	 * has the NOCOW bit set).
1776	 *
1777	 * However when we need to fallback to COW mode (because for example the
1778	 * block group for the corresponding extent was turned to RO mode by a
1779	 * scrub or relocation) we need to do the following:
1780	 *
1781	 * 1) We increment the bytes_may_use counter of the data space info.
1782	 *    If COW succeeds, it allocates a new data extent and after doing
1783	 *    that it decrements the space info's bytes_may_use counter and
1784	 *    increments its bytes_reserved counter by the same amount (we do
1785	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1786	 *    bytes_may_use counter to compensate (when space is reserved at
1787	 *    buffered write time, the bytes_may_use counter is incremented);
1788	 *
1789	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1790	 *    that if the COW path fails for any reason, it decrements (through
1791	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1792	 *    data space info, which we incremented in the step above.
1793	 *
1794	 * If we need to fallback to cow and the inode corresponds to a free
1795	 * space cache inode or an inode of the data relocation tree, we must
1796	 * also increment bytes_may_use of the data space_info for the same
1797	 * reason. Space caches and relocated data extents always get a prealloc
1798	 * extent for them, however scrub or balance may have set the block
1799	 * group that contains that extent to RO mode and therefore force COW
1800	 * when starting writeback.
1801	 */
1802	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1803				 EXTENT_NORESERVE, 0, NULL);
1804	if (count > 0 || is_space_ino || is_reloc_ino) {
1805		u64 bytes = count;
1806		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1807		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1808
1809		if (is_space_ino || is_reloc_ino)
1810			bytes = range_bytes;
1811
1812		spin_lock(&sinfo->lock);
1813		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1814		spin_unlock(&sinfo->lock);
1815
1816		if (count > 0)
1817			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1818					 NULL);
1819	}
1820
1821	/*
1822	 * Don't try to create inline extents, as a mix of inline extent that
1823	 * is written out and unlocked directly and a normal NOCOW extent
1824	 * doesn't work.
1825	 */
1826	ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1827	ASSERT(ret != 1);
1828	return ret;
1829}
1830
1831struct can_nocow_file_extent_args {
1832	/* Input fields. */
1833
1834	/* Start file offset of the range we want to NOCOW. */
1835	u64 start;
1836	/* End file offset (inclusive) of the range we want to NOCOW. */
1837	u64 end;
1838	bool writeback_path;
1839	bool strict;
1840	/*
1841	 * Free the path passed to can_nocow_file_extent() once it's not needed
1842	 * anymore.
1843	 */
1844	bool free_path;
1845
1846	/* Output fields. Only set when can_nocow_file_extent() returns 1. */
1847
1848	u64 disk_bytenr;
1849	u64 disk_num_bytes;
1850	u64 extent_offset;
1851	/* Number of bytes that can be written to in NOCOW mode. */
1852	u64 num_bytes;
1853};
1854
1855/*
1856 * Check if we can NOCOW the file extent that the path points to.
1857 * This function may return with the path released, so the caller should check
1858 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1859 *
1860 * Returns: < 0 on error
1861 *            0 if we can not NOCOW
1862 *            1 if we can NOCOW
1863 */
1864static int can_nocow_file_extent(struct btrfs_path *path,
1865				 struct btrfs_key *key,
1866				 struct btrfs_inode *inode,
1867				 struct can_nocow_file_extent_args *args)
1868{
1869	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1870	struct extent_buffer *leaf = path->nodes[0];
1871	struct btrfs_root *root = inode->root;
1872	struct btrfs_file_extent_item *fi;
1873	u64 extent_end;
1874	u8 extent_type;
1875	int can_nocow = 0;
1876	int ret = 0;
1877	bool nowait = path->nowait;
1878
1879	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1880	extent_type = btrfs_file_extent_type(leaf, fi);
1881
1882	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1883		goto out;
1884
1885	/* Can't access these fields unless we know it's not an inline extent. */
1886	args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1887	args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1888	args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1889
1890	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1891	    extent_type == BTRFS_FILE_EXTENT_REG)
1892		goto out;
1893
1894	/*
1895	 * If the extent was created before the generation where the last snapshot
1896	 * for its subvolume was created, then this implies the extent is shared,
1897	 * hence we must COW.
1898	 */
1899	if (!args->strict &&
1900	    btrfs_file_extent_generation(leaf, fi) <=
1901	    btrfs_root_last_snapshot(&root->root_item))
1902		goto out;
1903
1904	/* An explicit hole, must COW. */
1905	if (args->disk_bytenr == 0)
1906		goto out;
1907
1908	/* Compressed/encrypted/encoded extents must be COWed. */
1909	if (btrfs_file_extent_compression(leaf, fi) ||
1910	    btrfs_file_extent_encryption(leaf, fi) ||
1911	    btrfs_file_extent_other_encoding(leaf, fi))
1912		goto out;
1913
1914	extent_end = btrfs_file_extent_end(path);
1915
1916	/*
1917	 * The following checks can be expensive, as they need to take other
1918	 * locks and do btree or rbtree searches, so release the path to avoid
1919	 * blocking other tasks for too long.
1920	 */
1921	btrfs_release_path(path);
1922
1923	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1924				    key->offset - args->extent_offset,
1925				    args->disk_bytenr, args->strict, path);
1926	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1927	if (ret != 0)
1928		goto out;
1929
1930	if (args->free_path) {
1931		/*
1932		 * We don't need the path anymore, plus through the
1933		 * csum_exist_in_range() call below we will end up allocating
1934		 * another path. So free the path to avoid unnecessary extra
1935		 * memory usage.
1936		 */
1937		btrfs_free_path(path);
1938		path = NULL;
1939	}
1940
1941	/* If there are pending snapshots for this root, we must COW. */
1942	if (args->writeback_path && !is_freespace_inode &&
1943	    atomic_read(&root->snapshot_force_cow))
1944		goto out;
1945
1946	args->disk_bytenr += args->extent_offset;
1947	args->disk_bytenr += args->start - key->offset;
1948	args->num_bytes = min(args->end + 1, extent_end) - args->start;
1949
1950	/*
1951	 * Force COW if csums exist in the range. This ensures that csums for a
1952	 * given extent are either valid or do not exist.
1953	 */
1954	ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes,
1955				  nowait);
1956	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1957	if (ret != 0)
1958		goto out;
1959
1960	can_nocow = 1;
1961 out:
1962	if (args->free_path && path)
1963		btrfs_free_path(path);
1964
1965	return ret < 0 ? ret : can_nocow;
1966}
1967
1968/*
1969 * when nowcow writeback call back.  This checks for snapshots or COW copies
1970 * of the extents that exist in the file, and COWs the file as required.
1971 *
1972 * If no cow copies or snapshots exist, we write directly to the existing
1973 * blocks on disk
1974 */
1975static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1976				       struct page *locked_page,
1977				       const u64 start, const u64 end)
1978{
1979	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1980	struct btrfs_root *root = inode->root;
1981	struct btrfs_path *path;
1982	u64 cow_start = (u64)-1;
1983	u64 cur_offset = start;
1984	int ret;
1985	bool check_prev = true;
1986	u64 ino = btrfs_ino(inode);
1987	struct can_nocow_file_extent_args nocow_args = { 0 };
1988
1989	/*
1990	 * Normally on a zoned device we're only doing COW writes, but in case
1991	 * of relocation on a zoned filesystem serializes I/O so that we're only
1992	 * writing sequentially and can end up here as well.
1993	 */
1994	ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
1995
1996	path = btrfs_alloc_path();
1997	if (!path) {
1998		ret = -ENOMEM;
1999		goto error;
2000	}
2001
2002	nocow_args.end = end;
2003	nocow_args.writeback_path = true;
2004
2005	while (1) {
2006		struct btrfs_block_group *nocow_bg = NULL;
2007		struct btrfs_ordered_extent *ordered;
2008		struct btrfs_key found_key;
2009		struct btrfs_file_extent_item *fi;
2010		struct extent_buffer *leaf;
2011		u64 extent_end;
2012		u64 ram_bytes;
2013		u64 nocow_end;
2014		int extent_type;
2015		bool is_prealloc;
2016
2017		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2018					       cur_offset, 0);
2019		if (ret < 0)
2020			goto error;
2021
2022		/*
2023		 * If there is no extent for our range when doing the initial
2024		 * search, then go back to the previous slot as it will be the
2025		 * one containing the search offset
2026		 */
2027		if (ret > 0 && path->slots[0] > 0 && check_prev) {
2028			leaf = path->nodes[0];
2029			btrfs_item_key_to_cpu(leaf, &found_key,
2030					      path->slots[0] - 1);
2031			if (found_key.objectid == ino &&
2032			    found_key.type == BTRFS_EXTENT_DATA_KEY)
2033				path->slots[0]--;
2034		}
2035		check_prev = false;
2036next_slot:
2037		/* Go to next leaf if we have exhausted the current one */
2038		leaf = path->nodes[0];
2039		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2040			ret = btrfs_next_leaf(root, path);
2041			if (ret < 0)
2042				goto error;
2043			if (ret > 0)
2044				break;
2045			leaf = path->nodes[0];
2046		}
2047
2048		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2049
2050		/* Didn't find anything for our INO */
2051		if (found_key.objectid > ino)
2052			break;
2053		/*
2054		 * Keep searching until we find an EXTENT_ITEM or there are no
2055		 * more extents for this inode
2056		 */
2057		if (WARN_ON_ONCE(found_key.objectid < ino) ||
2058		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
2059			path->slots[0]++;
2060			goto next_slot;
2061		}
2062
2063		/* Found key is not EXTENT_DATA_KEY or starts after req range */
2064		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2065		    found_key.offset > end)
2066			break;
2067
2068		/*
2069		 * If the found extent starts after requested offset, then
2070		 * adjust extent_end to be right before this extent begins
2071		 */
2072		if (found_key.offset > cur_offset) {
2073			extent_end = found_key.offset;
2074			extent_type = 0;
2075			goto must_cow;
2076		}
2077
2078		/*
2079		 * Found extent which begins before our range and potentially
2080		 * intersect it
2081		 */
2082		fi = btrfs_item_ptr(leaf, path->slots[0],
2083				    struct btrfs_file_extent_item);
2084		extent_type = btrfs_file_extent_type(leaf, fi);
2085		/* If this is triggered then we have a memory corruption. */
2086		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2087		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2088			ret = -EUCLEAN;
2089			goto error;
2090		}
2091		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2092		extent_end = btrfs_file_extent_end(path);
2093
2094		/*
2095		 * If the extent we got ends before our current offset, skip to
2096		 * the next extent.
2097		 */
2098		if (extent_end <= cur_offset) {
2099			path->slots[0]++;
2100			goto next_slot;
2101		}
2102
2103		nocow_args.start = cur_offset;
2104		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2105		if (ret < 0)
2106			goto error;
2107		if (ret == 0)
2108			goto must_cow;
2109
2110		ret = 0;
2111		nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
2112		if (!nocow_bg) {
2113must_cow:
2114			/*
2115			 * If we can't perform NOCOW writeback for the range,
2116			 * then record the beginning of the range that needs to
2117			 * be COWed.  It will be written out before the next
2118			 * NOCOW range if we find one, or when exiting this
2119			 * loop.
2120			 */
2121			if (cow_start == (u64)-1)
2122				cow_start = cur_offset;
2123			cur_offset = extent_end;
2124			if (cur_offset > end)
2125				break;
2126			if (!path->nodes[0])
2127				continue;
2128			path->slots[0]++;
2129			goto next_slot;
2130		}
2131
2132		/*
2133		 * COW range from cow_start to found_key.offset - 1. As the key
2134		 * will contain the beginning of the first extent that can be
2135		 * NOCOW, following one which needs to be COW'ed
2136		 */
2137		if (cow_start != (u64)-1) {
2138			ret = fallback_to_cow(inode, locked_page,
2139					      cow_start, found_key.offset - 1);
2140			cow_start = (u64)-1;
2141			if (ret) {
2142				btrfs_dec_nocow_writers(nocow_bg);
2143				goto error;
2144			}
2145		}
2146
2147		nocow_end = cur_offset + nocow_args.num_bytes - 1;
2148		is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
2149		if (is_prealloc) {
2150			u64 orig_start = found_key.offset - nocow_args.extent_offset;
2151			struct extent_map *em;
2152
2153			em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
2154					  orig_start,
2155					  nocow_args.disk_bytenr, /* block_start */
2156					  nocow_args.num_bytes, /* block_len */
2157					  nocow_args.disk_num_bytes, /* orig_block_len */
2158					  ram_bytes, BTRFS_COMPRESS_NONE,
2159					  BTRFS_ORDERED_PREALLOC);
2160			if (IS_ERR(em)) {
2161				btrfs_dec_nocow_writers(nocow_bg);
2162				ret = PTR_ERR(em);
2163				goto error;
2164			}
2165			free_extent_map(em);
2166		}
2167
2168		ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
2169				nocow_args.num_bytes, nocow_args.num_bytes,
2170				nocow_args.disk_bytenr, nocow_args.num_bytes, 0,
2171				is_prealloc
2172				? (1 << BTRFS_ORDERED_PREALLOC)
2173				: (1 << BTRFS_ORDERED_NOCOW),
2174				BTRFS_COMPRESS_NONE);
2175		btrfs_dec_nocow_writers(nocow_bg);
2176		if (IS_ERR(ordered)) {
2177			if (is_prealloc) {
2178				btrfs_drop_extent_map_range(inode, cur_offset,
2179							    nocow_end, false);
2180			}
2181			ret = PTR_ERR(ordered);
2182			goto error;
2183		}
2184
2185		if (btrfs_is_data_reloc_root(root))
2186			/*
2187			 * Error handled later, as we must prevent
2188			 * extent_clear_unlock_delalloc() in error handler
2189			 * from freeing metadata of created ordered extent.
2190			 */
2191			ret = btrfs_reloc_clone_csums(ordered);
2192		btrfs_put_ordered_extent(ordered);
2193
2194		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2195					     locked_page, EXTENT_LOCKED |
2196					     EXTENT_DELALLOC |
2197					     EXTENT_CLEAR_DATA_RESV,
2198					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2199
2200		cur_offset = extent_end;
2201
2202		/*
2203		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2204		 * handler, as metadata for created ordered extent will only
2205		 * be freed by btrfs_finish_ordered_io().
2206		 */
2207		if (ret)
2208			goto error;
2209		if (cur_offset > end)
2210			break;
2211	}
2212	btrfs_release_path(path);
2213
2214	if (cur_offset <= end && cow_start == (u64)-1)
2215		cow_start = cur_offset;
2216
2217	if (cow_start != (u64)-1) {
2218		cur_offset = end;
2219		ret = fallback_to_cow(inode, locked_page, cow_start, end);
2220		cow_start = (u64)-1;
2221		if (ret)
2222			goto error;
2223	}
2224
2225	btrfs_free_path(path);
2226	return 0;
2227
2228error:
2229	/*
2230	 * If an error happened while a COW region is outstanding, cur_offset
2231	 * needs to be reset to cow_start to ensure the COW region is unlocked
2232	 * as well.
2233	 */
2234	if (cow_start != (u64)-1)
2235		cur_offset = cow_start;
2236	if (cur_offset < end)
2237		extent_clear_unlock_delalloc(inode, cur_offset, end,
2238					     locked_page, EXTENT_LOCKED |
2239					     EXTENT_DELALLOC | EXTENT_DEFRAG |
2240					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2241					     PAGE_START_WRITEBACK |
2242					     PAGE_END_WRITEBACK);
2243	btrfs_free_path(path);
2244	return ret;
2245}
2246
2247static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2248{
2249	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2250		if (inode->defrag_bytes &&
2251		    test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2252			return false;
2253		return true;
2254	}
2255	return false;
2256}
2257
2258/*
2259 * Function to process delayed allocation (create CoW) for ranges which are
2260 * being touched for the first time.
2261 */
2262int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2263			     u64 start, u64 end, struct writeback_control *wbc)
2264{
2265	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2266	int ret;
2267
2268	/*
2269	 * The range must cover part of the @locked_page, or a return of 1
2270	 * can confuse the caller.
2271	 */
2272	ASSERT(!(end <= page_offset(locked_page) ||
2273		 start >= page_offset(locked_page) + PAGE_SIZE));
2274
2275	if (should_nocow(inode, start, end)) {
2276		ret = run_delalloc_nocow(inode, locked_page, start, end);
2277		goto out;
2278	}
2279
2280	if (btrfs_inode_can_compress(inode) &&
2281	    inode_need_compress(inode, start, end) &&
2282	    run_delalloc_compressed(inode, locked_page, start, end, wbc))
2283		return 1;
2284
2285	if (zoned)
2286		ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
2287				       true);
2288	else
2289		ret = cow_file_range(inode, locked_page, start, end, NULL,
2290				     false, false);
2291
2292out:
2293	if (ret < 0)
2294		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2295					      end - start + 1);
2296	return ret;
2297}
2298
2299void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2300				 struct extent_state *orig, u64 split)
2301{
2302	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2303	u64 size;
2304
2305	lockdep_assert_held(&inode->io_tree.lock);
2306
2307	/* not delalloc, ignore it */
2308	if (!(orig->state & EXTENT_DELALLOC))
2309		return;
2310
2311	size = orig->end - orig->start + 1;
2312	if (size > fs_info->max_extent_size) {
2313		u32 num_extents;
2314		u64 new_size;
2315
2316		/*
2317		 * See the explanation in btrfs_merge_delalloc_extent, the same
2318		 * applies here, just in reverse.
2319		 */
2320		new_size = orig->end - split + 1;
2321		num_extents = count_max_extents(fs_info, new_size);
2322		new_size = split - orig->start;
2323		num_extents += count_max_extents(fs_info, new_size);
2324		if (count_max_extents(fs_info, size) >= num_extents)
2325			return;
2326	}
2327
2328	spin_lock(&inode->lock);
2329	btrfs_mod_outstanding_extents(inode, 1);
2330	spin_unlock(&inode->lock);
2331}
2332
2333/*
2334 * Handle merged delayed allocation extents so we can keep track of new extents
2335 * that are just merged onto old extents, such as when we are doing sequential
2336 * writes, so we can properly account for the metadata space we'll need.
2337 */
2338void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2339				 struct extent_state *other)
2340{
2341	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2342	u64 new_size, old_size;
2343	u32 num_extents;
2344
2345	lockdep_assert_held(&inode->io_tree.lock);
2346
2347	/* not delalloc, ignore it */
2348	if (!(other->state & EXTENT_DELALLOC))
2349		return;
2350
2351	if (new->start > other->start)
2352		new_size = new->end - other->start + 1;
2353	else
2354		new_size = other->end - new->start + 1;
2355
2356	/* we're not bigger than the max, unreserve the space and go */
2357	if (new_size <= fs_info->max_extent_size) {
2358		spin_lock(&inode->lock);
2359		btrfs_mod_outstanding_extents(inode, -1);
2360		spin_unlock(&inode->lock);
2361		return;
2362	}
2363
2364	/*
2365	 * We have to add up either side to figure out how many extents were
2366	 * accounted for before we merged into one big extent.  If the number of
2367	 * extents we accounted for is <= the amount we need for the new range
2368	 * then we can return, otherwise drop.  Think of it like this
2369	 *
2370	 * [ 4k][MAX_SIZE]
2371	 *
2372	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2373	 * need 2 outstanding extents, on one side we have 1 and the other side
2374	 * we have 1 so they are == and we can return.  But in this case
2375	 *
2376	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2377	 *
2378	 * Each range on their own accounts for 2 extents, but merged together
2379	 * they are only 3 extents worth of accounting, so we need to drop in
2380	 * this case.
2381	 */
2382	old_size = other->end - other->start + 1;
2383	num_extents = count_max_extents(fs_info, old_size);
2384	old_size = new->end - new->start + 1;
2385	num_extents += count_max_extents(fs_info, old_size);
2386	if (count_max_extents(fs_info, new_size) >= num_extents)
2387		return;
2388
2389	spin_lock(&inode->lock);
2390	btrfs_mod_outstanding_extents(inode, -1);
2391	spin_unlock(&inode->lock);
2392}
2393
2394static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2395{
2396	struct btrfs_root *root = inode->root;
2397	struct btrfs_fs_info *fs_info = root->fs_info;
2398
2399	spin_lock(&root->delalloc_lock);
2400	ASSERT(list_empty(&inode->delalloc_inodes));
2401	list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2402	root->nr_delalloc_inodes++;
2403	if (root->nr_delalloc_inodes == 1) {
2404		spin_lock(&fs_info->delalloc_root_lock);
2405		ASSERT(list_empty(&root->delalloc_root));
2406		list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2407		spin_unlock(&fs_info->delalloc_root_lock);
2408	}
2409	spin_unlock(&root->delalloc_lock);
2410}
2411
2412void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2413{
2414	struct btrfs_root *root = inode->root;
2415	struct btrfs_fs_info *fs_info = root->fs_info;
2416
2417	lockdep_assert_held(&root->delalloc_lock);
2418
2419	/*
2420	 * We may be called after the inode was already deleted from the list,
2421	 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2422	 * and then later through btrfs_clear_delalloc_extent() while the inode
2423	 * still has ->delalloc_bytes > 0.
2424	 */
2425	if (!list_empty(&inode->delalloc_inodes)) {
2426		list_del_init(&inode->delalloc_inodes);
2427		root->nr_delalloc_inodes--;
2428		if (!root->nr_delalloc_inodes) {
2429			ASSERT(list_empty(&root->delalloc_inodes));
2430			spin_lock(&fs_info->delalloc_root_lock);
2431			ASSERT(!list_empty(&root->delalloc_root));
2432			list_del_init(&root->delalloc_root);
2433			spin_unlock(&fs_info->delalloc_root_lock);
2434		}
2435	}
2436}
2437
2438/*
2439 * Properly track delayed allocation bytes in the inode and to maintain the
2440 * list of inodes that have pending delalloc work to be done.
2441 */
2442void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2443			       u32 bits)
2444{
2445	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2446
2447	lockdep_assert_held(&inode->io_tree.lock);
2448
2449	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2450		WARN_ON(1);
2451	/*
2452	 * set_bit and clear bit hooks normally require _irqsave/restore
2453	 * but in this case, we are only testing for the DELALLOC
2454	 * bit, which is only set or cleared with irqs on
2455	 */
2456	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2457		u64 len = state->end + 1 - state->start;
2458		u64 prev_delalloc_bytes;
2459		u32 num_extents = count_max_extents(fs_info, len);
2460
2461		spin_lock(&inode->lock);
2462		btrfs_mod_outstanding_extents(inode, num_extents);
2463		spin_unlock(&inode->lock);
2464
2465		/* For sanity tests */
2466		if (btrfs_is_testing(fs_info))
2467			return;
2468
2469		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2470					 fs_info->delalloc_batch);
2471		spin_lock(&inode->lock);
2472		prev_delalloc_bytes = inode->delalloc_bytes;
2473		inode->delalloc_bytes += len;
2474		if (bits & EXTENT_DEFRAG)
2475			inode->defrag_bytes += len;
2476		spin_unlock(&inode->lock);
2477
2478		/*
2479		 * We don't need to be under the protection of the inode's lock,
2480		 * because we are called while holding the inode's io_tree lock
2481		 * and are therefore protected against concurrent calls of this
2482		 * function and btrfs_clear_delalloc_extent().
2483		 */
2484		if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2485			btrfs_add_delalloc_inode(inode);
2486	}
2487
2488	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2489	    (bits & EXTENT_DELALLOC_NEW)) {
2490		spin_lock(&inode->lock);
2491		inode->new_delalloc_bytes += state->end + 1 - state->start;
2492		spin_unlock(&inode->lock);
2493	}
2494}
2495
2496/*
2497 * Once a range is no longer delalloc this function ensures that proper
2498 * accounting happens.
2499 */
2500void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2501				 struct extent_state *state, u32 bits)
2502{
2503	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2504	u64 len = state->end + 1 - state->start;
2505	u32 num_extents = count_max_extents(fs_info, len);
2506
2507	lockdep_assert_held(&inode->io_tree.lock);
2508
2509	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2510		spin_lock(&inode->lock);
2511		inode->defrag_bytes -= len;
2512		spin_unlock(&inode->lock);
2513	}
2514
2515	/*
2516	 * set_bit and clear bit hooks normally require _irqsave/restore
2517	 * but in this case, we are only testing for the DELALLOC
2518	 * bit, which is only set or cleared with irqs on
2519	 */
2520	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2521		struct btrfs_root *root = inode->root;
2522		u64 new_delalloc_bytes;
2523
2524		spin_lock(&inode->lock);
2525		btrfs_mod_outstanding_extents(inode, -num_extents);
2526		spin_unlock(&inode->lock);
2527
2528		/*
2529		 * We don't reserve metadata space for space cache inodes so we
2530		 * don't need to call delalloc_release_metadata if there is an
2531		 * error.
2532		 */
2533		if (bits & EXTENT_CLEAR_META_RESV &&
2534		    root != fs_info->tree_root)
2535			btrfs_delalloc_release_metadata(inode, len, true);
2536
2537		/* For sanity tests. */
2538		if (btrfs_is_testing(fs_info))
2539			return;
2540
2541		if (!btrfs_is_data_reloc_root(root) &&
2542		    !btrfs_is_free_space_inode(inode) &&
2543		    !(state->state & EXTENT_NORESERVE) &&
2544		    (bits & EXTENT_CLEAR_DATA_RESV))
2545			btrfs_free_reserved_data_space_noquota(fs_info, len);
2546
2547		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2548					 fs_info->delalloc_batch);
2549		spin_lock(&inode->lock);
2550		inode->delalloc_bytes -= len;
2551		new_delalloc_bytes = inode->delalloc_bytes;
2552		spin_unlock(&inode->lock);
2553
2554		/*
2555		 * We don't need to be under the protection of the inode's lock,
2556		 * because we are called while holding the inode's io_tree lock
2557		 * and are therefore protected against concurrent calls of this
2558		 * function and btrfs_set_delalloc_extent().
2559		 */
2560		if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2561			spin_lock(&root->delalloc_lock);
2562			btrfs_del_delalloc_inode(inode);
2563			spin_unlock(&root->delalloc_lock);
2564		}
2565	}
2566
2567	if ((state->state & EXTENT_DELALLOC_NEW) &&
2568	    (bits & EXTENT_DELALLOC_NEW)) {
2569		spin_lock(&inode->lock);
2570		ASSERT(inode->new_delalloc_bytes >= len);
2571		inode->new_delalloc_bytes -= len;
2572		if (bits & EXTENT_ADD_INODE_BYTES)
2573			inode_add_bytes(&inode->vfs_inode, len);
2574		spin_unlock(&inode->lock);
2575	}
2576}
2577
2578static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
2579					struct btrfs_ordered_extent *ordered)
2580{
2581	u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
2582	u64 len = bbio->bio.bi_iter.bi_size;
2583	struct btrfs_ordered_extent *new;
2584	int ret;
2585
2586	/* Must always be called for the beginning of an ordered extent. */
2587	if (WARN_ON_ONCE(start != ordered->disk_bytenr))
2588		return -EINVAL;
2589
2590	/* No need to split if the ordered extent covers the entire bio. */
2591	if (ordered->disk_num_bytes == len) {
2592		refcount_inc(&ordered->refs);
2593		bbio->ordered = ordered;
2594		return 0;
2595	}
2596
2597	/*
2598	 * Don't split the extent_map for NOCOW extents, as we're writing into
2599	 * a pre-existing one.
2600	 */
2601	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
2602		ret = split_extent_map(bbio->inode, bbio->file_offset,
2603				       ordered->num_bytes, len,
2604				       ordered->disk_bytenr);
2605		if (ret)
2606			return ret;
2607	}
2608
2609	new = btrfs_split_ordered_extent(ordered, len);
2610	if (IS_ERR(new))
2611		return PTR_ERR(new);
2612	bbio->ordered = new;
2613	return 0;
2614}
2615
2616/*
2617 * given a list of ordered sums record them in the inode.  This happens
2618 * at IO completion time based on sums calculated at bio submission time.
2619 */
2620static int add_pending_csums(struct btrfs_trans_handle *trans,
2621			     struct list_head *list)
2622{
2623	struct btrfs_ordered_sum *sum;
2624	struct btrfs_root *csum_root = NULL;
2625	int ret;
2626
2627	list_for_each_entry(sum, list, list) {
2628		trans->adding_csums = true;
2629		if (!csum_root)
2630			csum_root = btrfs_csum_root(trans->fs_info,
2631						    sum->logical);
2632		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2633		trans->adding_csums = false;
2634		if (ret)
2635			return ret;
2636	}
2637	return 0;
2638}
2639
2640static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2641					 const u64 start,
2642					 const u64 len,
2643					 struct extent_state **cached_state)
2644{
2645	u64 search_start = start;
2646	const u64 end = start + len - 1;
2647
2648	while (search_start < end) {
2649		const u64 search_len = end - search_start + 1;
2650		struct extent_map *em;
2651		u64 em_len;
2652		int ret = 0;
2653
2654		em = btrfs_get_extent(inode, NULL, search_start, search_len);
2655		if (IS_ERR(em))
2656			return PTR_ERR(em);
2657
2658		if (em->block_start != EXTENT_MAP_HOLE)
2659			goto next;
2660
2661		em_len = em->len;
2662		if (em->start < search_start)
2663			em_len -= search_start - em->start;
2664		if (em_len > search_len)
2665			em_len = search_len;
2666
2667		ret = set_extent_bit(&inode->io_tree, search_start,
2668				     search_start + em_len - 1,
2669				     EXTENT_DELALLOC_NEW, cached_state);
2670next:
2671		search_start = extent_map_end(em);
2672		free_extent_map(em);
2673		if (ret)
2674			return ret;
2675	}
2676	return 0;
2677}
2678
2679int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2680			      unsigned int extra_bits,
2681			      struct extent_state **cached_state)
2682{
2683	WARN_ON(PAGE_ALIGNED(end));
2684
2685	if (start >= i_size_read(&inode->vfs_inode) &&
2686	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2687		/*
2688		 * There can't be any extents following eof in this case so just
2689		 * set the delalloc new bit for the range directly.
2690		 */
2691		extra_bits |= EXTENT_DELALLOC_NEW;
2692	} else {
2693		int ret;
2694
2695		ret = btrfs_find_new_delalloc_bytes(inode, start,
2696						    end + 1 - start,
2697						    cached_state);
2698		if (ret)
2699			return ret;
2700	}
2701
2702	return set_extent_bit(&inode->io_tree, start, end,
2703			      EXTENT_DELALLOC | extra_bits, cached_state);
2704}
2705
2706/* see btrfs_writepage_start_hook for details on why this is required */
2707struct btrfs_writepage_fixup {
2708	struct page *page;
2709	struct btrfs_inode *inode;
2710	struct btrfs_work work;
2711};
2712
2713static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2714{
2715	struct btrfs_writepage_fixup *fixup =
2716		container_of(work, struct btrfs_writepage_fixup, work);
2717	struct btrfs_ordered_extent *ordered;
2718	struct extent_state *cached_state = NULL;
2719	struct extent_changeset *data_reserved = NULL;
2720	struct page *page = fixup->page;
2721	struct btrfs_inode *inode = fixup->inode;
2722	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2723	u64 page_start = page_offset(page);
2724	u64 page_end = page_offset(page) + PAGE_SIZE - 1;
2725	int ret = 0;
2726	bool free_delalloc_space = true;
2727
2728	/*
2729	 * This is similar to page_mkwrite, we need to reserve the space before
2730	 * we take the page lock.
2731	 */
2732	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2733					   PAGE_SIZE);
2734again:
2735	lock_page(page);
2736
2737	/*
2738	 * Before we queued this fixup, we took a reference on the page.
2739	 * page->mapping may go NULL, but it shouldn't be moved to a different
2740	 * address space.
2741	 */
2742	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2743		/*
2744		 * Unfortunately this is a little tricky, either
2745		 *
2746		 * 1) We got here and our page had already been dealt with and
2747		 *    we reserved our space, thus ret == 0, so we need to just
2748		 *    drop our space reservation and bail.  This can happen the
2749		 *    first time we come into the fixup worker, or could happen
2750		 *    while waiting for the ordered extent.
2751		 * 2) Our page was already dealt with, but we happened to get an
2752		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2753		 *    this case we obviously don't have anything to release, but
2754		 *    because the page was already dealt with we don't want to
2755		 *    mark the page with an error, so make sure we're resetting
2756		 *    ret to 0.  This is why we have this check _before_ the ret
2757		 *    check, because we do not want to have a surprise ENOSPC
2758		 *    when the page was already properly dealt with.
2759		 */
2760		if (!ret) {
2761			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2762			btrfs_delalloc_release_space(inode, data_reserved,
2763						     page_start, PAGE_SIZE,
2764						     true);
2765		}
2766		ret = 0;
2767		goto out_page;
2768	}
2769
2770	/*
2771	 * We can't mess with the page state unless it is locked, so now that
2772	 * it is locked bail if we failed to make our space reservation.
2773	 */
2774	if (ret)
2775		goto out_page;
2776
2777	lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2778
2779	/* already ordered? We're done */
2780	if (PageOrdered(page))
2781		goto out_reserved;
2782
2783	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2784	if (ordered) {
2785		unlock_extent(&inode->io_tree, page_start, page_end,
2786			      &cached_state);
2787		unlock_page(page);
2788		btrfs_start_ordered_extent(ordered);
2789		btrfs_put_ordered_extent(ordered);
2790		goto again;
2791	}
2792
2793	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2794					&cached_state);
2795	if (ret)
2796		goto out_reserved;
2797
2798	/*
2799	 * Everything went as planned, we're now the owner of a dirty page with
2800	 * delayed allocation bits set and space reserved for our COW
2801	 * destination.
2802	 *
2803	 * The page was dirty when we started, nothing should have cleaned it.
2804	 */
2805	BUG_ON(!PageDirty(page));
2806	free_delalloc_space = false;
2807out_reserved:
2808	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2809	if (free_delalloc_space)
2810		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2811					     PAGE_SIZE, true);
2812	unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2813out_page:
2814	if (ret) {
2815		/*
2816		 * We hit ENOSPC or other errors.  Update the mapping and page
2817		 * to reflect the errors and clean the page.
2818		 */
2819		mapping_set_error(page->mapping, ret);
2820		btrfs_mark_ordered_io_finished(inode, page, page_start,
2821					       PAGE_SIZE, !ret);
2822		clear_page_dirty_for_io(page);
2823	}
2824	btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
2825	unlock_page(page);
2826	put_page(page);
2827	kfree(fixup);
2828	extent_changeset_free(data_reserved);
2829	/*
2830	 * As a precaution, do a delayed iput in case it would be the last iput
2831	 * that could need flushing space. Recursing back to fixup worker would
2832	 * deadlock.
2833	 */
2834	btrfs_add_delayed_iput(inode);
2835}
2836
2837/*
2838 * There are a few paths in the higher layers of the kernel that directly
2839 * set the page dirty bit without asking the filesystem if it is a
2840 * good idea.  This causes problems because we want to make sure COW
2841 * properly happens and the data=ordered rules are followed.
2842 *
2843 * In our case any range that doesn't have the ORDERED bit set
2844 * hasn't been properly setup for IO.  We kick off an async process
2845 * to fix it up.  The async helper will wait for ordered extents, set
2846 * the delalloc bit and make it safe to write the page.
2847 */
2848int btrfs_writepage_cow_fixup(struct page *page)
2849{
2850	struct inode *inode = page->mapping->host;
2851	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2852	struct btrfs_writepage_fixup *fixup;
2853
2854	/* This page has ordered extent covering it already */
2855	if (PageOrdered(page))
2856		return 0;
2857
2858	/*
2859	 * PageChecked is set below when we create a fixup worker for this page,
2860	 * don't try to create another one if we're already PageChecked()
2861	 *
2862	 * The extent_io writepage code will redirty the page if we send back
2863	 * EAGAIN.
2864	 */
2865	if (PageChecked(page))
2866		return -EAGAIN;
2867
2868	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2869	if (!fixup)
2870		return -EAGAIN;
2871
2872	/*
2873	 * We are already holding a reference to this inode from
2874	 * write_cache_pages.  We need to hold it because the space reservation
2875	 * takes place outside of the page lock, and we can't trust
2876	 * page->mapping outside of the page lock.
2877	 */
2878	ihold(inode);
2879	btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE);
2880	get_page(page);
2881	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
2882	fixup->page = page;
2883	fixup->inode = BTRFS_I(inode);
2884	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2885
2886	return -EAGAIN;
2887}
2888
2889static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2890				       struct btrfs_inode *inode, u64 file_pos,
2891				       struct btrfs_file_extent_item *stack_fi,
2892				       const bool update_inode_bytes,
2893				       u64 qgroup_reserved)
2894{
2895	struct btrfs_root *root = inode->root;
2896	const u64 sectorsize = root->fs_info->sectorsize;
2897	struct btrfs_path *path;
2898	struct extent_buffer *leaf;
2899	struct btrfs_key ins;
2900	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2901	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2902	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2903	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2904	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2905	struct btrfs_drop_extents_args drop_args = { 0 };
2906	int ret;
2907
2908	path = btrfs_alloc_path();
2909	if (!path)
2910		return -ENOMEM;
2911
2912	/*
2913	 * we may be replacing one extent in the tree with another.
2914	 * The new extent is pinned in the extent map, and we don't want
2915	 * to drop it from the cache until it is completely in the btree.
2916	 *
2917	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2918	 * the caller is expected to unpin it and allow it to be merged
2919	 * with the others.
2920	 */
2921	drop_args.path = path;
2922	drop_args.start = file_pos;
2923	drop_args.end = file_pos + num_bytes;
2924	drop_args.replace_extent = true;
2925	drop_args.extent_item_size = sizeof(*stack_fi);
2926	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2927	if (ret)
2928		goto out;
2929
2930	if (!drop_args.extent_inserted) {
2931		ins.objectid = btrfs_ino(inode);
2932		ins.offset = file_pos;
2933		ins.type = BTRFS_EXTENT_DATA_KEY;
2934
2935		ret = btrfs_insert_empty_item(trans, root, path, &ins,
2936					      sizeof(*stack_fi));
2937		if (ret)
2938			goto out;
2939	}
2940	leaf = path->nodes[0];
2941	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2942	write_extent_buffer(leaf, stack_fi,
2943			btrfs_item_ptr_offset(leaf, path->slots[0]),
2944			sizeof(struct btrfs_file_extent_item));
2945
2946	btrfs_mark_buffer_dirty(trans, leaf);
2947	btrfs_release_path(path);
2948
2949	/*
2950	 * If we dropped an inline extent here, we know the range where it is
2951	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2952	 * number of bytes only for that range containing the inline extent.
2953	 * The remaining of the range will be processed when clearning the
2954	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2955	 */
2956	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2957		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2958
2959		inline_size = drop_args.bytes_found - inline_size;
2960		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2961		drop_args.bytes_found -= inline_size;
2962		num_bytes -= sectorsize;
2963	}
2964
2965	if (update_inode_bytes)
2966		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2967
2968	ins.objectid = disk_bytenr;
2969	ins.offset = disk_num_bytes;
2970	ins.type = BTRFS_EXTENT_ITEM_KEY;
2971
2972	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2973	if (ret)
2974		goto out;
2975
2976	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2977					       file_pos - offset,
2978					       qgroup_reserved, &ins);
2979out:
2980	btrfs_free_path(path);
2981
2982	return ret;
2983}
2984
2985static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2986					 u64 start, u64 len)
2987{
2988	struct btrfs_block_group *cache;
2989
2990	cache = btrfs_lookup_block_group(fs_info, start);
2991	ASSERT(cache);
2992
2993	spin_lock(&cache->lock);
2994	cache->delalloc_bytes -= len;
2995	spin_unlock(&cache->lock);
2996
2997	btrfs_put_block_group(cache);
2998}
2999
3000static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3001					     struct btrfs_ordered_extent *oe)
3002{
3003	struct btrfs_file_extent_item stack_fi;
3004	bool update_inode_bytes;
3005	u64 num_bytes = oe->num_bytes;
3006	u64 ram_bytes = oe->ram_bytes;
3007
3008	memset(&stack_fi, 0, sizeof(stack_fi));
3009	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3010	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3011	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3012						   oe->disk_num_bytes);
3013	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3014	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) {
3015		num_bytes = oe->truncated_len;
3016		ram_bytes = num_bytes;
3017	}
3018	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3019	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3020	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3021	/* Encryption and other encoding is reserved and all 0 */
3022
3023	/*
3024	 * For delalloc, when completing an ordered extent we update the inode's
3025	 * bytes when clearing the range in the inode's io tree, so pass false
3026	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3027	 * except if the ordered extent was truncated.
3028	 */
3029	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3030			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3031			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3032
3033	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3034					   oe->file_offset, &stack_fi,
3035					   update_inode_bytes, oe->qgroup_rsv);
3036}
3037
3038/*
3039 * As ordered data IO finishes, this gets called so we can finish
3040 * an ordered extent if the range of bytes in the file it covers are
3041 * fully written.
3042 */
3043int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3044{
3045	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3046	struct btrfs_root *root = inode->root;
3047	struct btrfs_fs_info *fs_info = root->fs_info;
3048	struct btrfs_trans_handle *trans = NULL;
3049	struct extent_io_tree *io_tree = &inode->io_tree;
3050	struct extent_state *cached_state = NULL;
3051	u64 start, end;
3052	int compress_type = 0;
3053	int ret = 0;
3054	u64 logical_len = ordered_extent->num_bytes;
3055	bool freespace_inode;
3056	bool truncated = false;
3057	bool clear_reserved_extent = true;
3058	unsigned int clear_bits = EXTENT_DEFRAG;
3059
3060	start = ordered_extent->file_offset;
3061	end = start + ordered_extent->num_bytes - 1;
3062
3063	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3064	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3065	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3066	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3067		clear_bits |= EXTENT_DELALLOC_NEW;
3068
3069	freespace_inode = btrfs_is_free_space_inode(inode);
3070	if (!freespace_inode)
3071		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3072
3073	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3074		ret = -EIO;
3075		goto out;
3076	}
3077
3078	if (btrfs_is_zoned(fs_info))
3079		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3080					ordered_extent->disk_num_bytes);
3081
3082	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3083		truncated = true;
3084		logical_len = ordered_extent->truncated_len;
3085		/* Truncated the entire extent, don't bother adding */
3086		if (!logical_len)
3087			goto out;
3088	}
3089
3090	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3091		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3092
3093		btrfs_inode_safe_disk_i_size_write(inode, 0);
3094		if (freespace_inode)
3095			trans = btrfs_join_transaction_spacecache(root);
3096		else
3097			trans = btrfs_join_transaction(root);
3098		if (IS_ERR(trans)) {
3099			ret = PTR_ERR(trans);
3100			trans = NULL;
3101			goto out;
3102		}
3103		trans->block_rsv = &inode->block_rsv;
3104		ret = btrfs_update_inode_fallback(trans, inode);
3105		if (ret) /* -ENOMEM or corruption */
3106			btrfs_abort_transaction(trans, ret);
3107		goto out;
3108	}
3109
3110	clear_bits |= EXTENT_LOCKED;
3111	lock_extent(io_tree, start, end, &cached_state);
3112
3113	if (freespace_inode)
3114		trans = btrfs_join_transaction_spacecache(root);
3115	else
3116		trans = btrfs_join_transaction(root);
3117	if (IS_ERR(trans)) {
3118		ret = PTR_ERR(trans);
3119		trans = NULL;
3120		goto out;
3121	}
3122
3123	trans->block_rsv = &inode->block_rsv;
3124
3125	ret = btrfs_insert_raid_extent(trans, ordered_extent);
3126	if (ret)
3127		goto out;
3128
3129	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3130		compress_type = ordered_extent->compress_type;
3131	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3132		BUG_ON(compress_type);
3133		ret = btrfs_mark_extent_written(trans, inode,
3134						ordered_extent->file_offset,
3135						ordered_extent->file_offset +
3136						logical_len);
3137		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3138						  ordered_extent->disk_num_bytes);
3139	} else {
3140		BUG_ON(root == fs_info->tree_root);
3141		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3142		if (!ret) {
3143			clear_reserved_extent = false;
3144			btrfs_release_delalloc_bytes(fs_info,
3145						ordered_extent->disk_bytenr,
3146						ordered_extent->disk_num_bytes);
3147		}
3148	}
3149	if (ret < 0) {
3150		btrfs_abort_transaction(trans, ret);
3151		goto out;
3152	}
3153
3154	ret = unpin_extent_cache(inode, ordered_extent->file_offset,
3155				 ordered_extent->num_bytes, trans->transid);
3156	if (ret < 0) {
3157		btrfs_abort_transaction(trans, ret);
3158		goto out;
3159	}
3160
3161	ret = add_pending_csums(trans, &ordered_extent->list);
3162	if (ret) {
3163		btrfs_abort_transaction(trans, ret);
3164		goto out;
3165	}
3166
3167	/*
3168	 * If this is a new delalloc range, clear its new delalloc flag to
3169	 * update the inode's number of bytes. This needs to be done first
3170	 * before updating the inode item.
3171	 */
3172	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3173	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3174		clear_extent_bit(&inode->io_tree, start, end,
3175				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3176				 &cached_state);
3177
3178	btrfs_inode_safe_disk_i_size_write(inode, 0);
3179	ret = btrfs_update_inode_fallback(trans, inode);
3180	if (ret) { /* -ENOMEM or corruption */
3181		btrfs_abort_transaction(trans, ret);
3182		goto out;
3183	}
3184	ret = 0;
3185out:
3186	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3187			 &cached_state);
3188
3189	if (trans)
3190		btrfs_end_transaction(trans);
3191
3192	if (ret || truncated) {
3193		u64 unwritten_start = start;
3194
3195		/*
3196		 * If we failed to finish this ordered extent for any reason we
3197		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3198		 * extent, and mark the inode with the error if it wasn't
3199		 * already set.  Any error during writeback would have already
3200		 * set the mapping error, so we need to set it if we're the ones
3201		 * marking this ordered extent as failed.
3202		 */
3203		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3204					     &ordered_extent->flags))
3205			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3206
3207		if (truncated)
3208			unwritten_start += logical_len;
3209		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3210
3211		/*
3212		 * Drop extent maps for the part of the extent we didn't write.
3213		 *
3214		 * We have an exception here for the free_space_inode, this is
3215		 * because when we do btrfs_get_extent() on the free space inode
3216		 * we will search the commit root.  If this is a new block group
3217		 * we won't find anything, and we will trip over the assert in
3218		 * writepage where we do ASSERT(em->block_start !=
3219		 * EXTENT_MAP_HOLE).
3220		 *
3221		 * Theoretically we could also skip this for any NOCOW extent as
3222		 * we don't mess with the extent map tree in the NOCOW case, but
3223		 * for now simply skip this if we are the free space inode.
3224		 */
3225		if (!btrfs_is_free_space_inode(inode))
3226			btrfs_drop_extent_map_range(inode, unwritten_start,
3227						    end, false);
3228
3229		/*
3230		 * If the ordered extent had an IOERR or something else went
3231		 * wrong we need to return the space for this ordered extent
3232		 * back to the allocator.  We only free the extent in the
3233		 * truncated case if we didn't write out the extent at all.
3234		 *
3235		 * If we made it past insert_reserved_file_extent before we
3236		 * errored out then we don't need to do this as the accounting
3237		 * has already been done.
3238		 */
3239		if ((ret || !logical_len) &&
3240		    clear_reserved_extent &&
3241		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3242		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3243			/*
3244			 * Discard the range before returning it back to the
3245			 * free space pool
3246			 */
3247			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3248				btrfs_discard_extent(fs_info,
3249						ordered_extent->disk_bytenr,
3250						ordered_extent->disk_num_bytes,
3251						NULL);
3252			btrfs_free_reserved_extent(fs_info,
3253					ordered_extent->disk_bytenr,
3254					ordered_extent->disk_num_bytes, 1);
3255			/*
3256			 * Actually free the qgroup rsv which was released when
3257			 * the ordered extent was created.
3258			 */
3259			btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
3260						  ordered_extent->qgroup_rsv,
3261						  BTRFS_QGROUP_RSV_DATA);
3262		}
3263	}
3264
3265	/*
3266	 * This needs to be done to make sure anybody waiting knows we are done
3267	 * updating everything for this ordered extent.
3268	 */
3269	btrfs_remove_ordered_extent(inode, ordered_extent);
3270
3271	/* once for us */
3272	btrfs_put_ordered_extent(ordered_extent);
3273	/* once for the tree */
3274	btrfs_put_ordered_extent(ordered_extent);
3275
3276	return ret;
3277}
3278
3279int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3280{
3281	if (btrfs_is_zoned(inode_to_fs_info(ordered->inode)) &&
3282	    !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3283	    list_empty(&ordered->bioc_list))
3284		btrfs_finish_ordered_zoned(ordered);
3285	return btrfs_finish_one_ordered(ordered);
3286}
3287
3288/*
3289 * Verify the checksum for a single sector without any extra action that depend
3290 * on the type of I/O.
3291 */
3292int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3293			    u32 pgoff, u8 *csum, const u8 * const csum_expected)
3294{
3295	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3296	char *kaddr;
3297
3298	ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3299
3300	shash->tfm = fs_info->csum_shash;
3301
3302	kaddr = kmap_local_page(page) + pgoff;
3303	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3304	kunmap_local(kaddr);
3305
3306	if (memcmp(csum, csum_expected, fs_info->csum_size))
3307		return -EIO;
3308	return 0;
3309}
3310
3311/*
3312 * Verify the checksum of a single data sector.
3313 *
3314 * @bbio:	btrfs_io_bio which contains the csum
3315 * @dev:	device the sector is on
3316 * @bio_offset:	offset to the beginning of the bio (in bytes)
3317 * @bv:		bio_vec to check
3318 *
3319 * Check if the checksum on a data block is valid.  When a checksum mismatch is
3320 * detected, report the error and fill the corrupted range with zero.
3321 *
3322 * Return %true if the sector is ok or had no checksum to start with, else %false.
3323 */
3324bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3325			u32 bio_offset, struct bio_vec *bv)
3326{
3327	struct btrfs_inode *inode = bbio->inode;
3328	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3329	u64 file_offset = bbio->file_offset + bio_offset;
3330	u64 end = file_offset + bv->bv_len - 1;
3331	u8 *csum_expected;
3332	u8 csum[BTRFS_CSUM_SIZE];
3333
3334	ASSERT(bv->bv_len == fs_info->sectorsize);
3335
3336	if (!bbio->csum)
3337		return true;
3338
3339	if (btrfs_is_data_reloc_root(inode->root) &&
3340	    test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3341			   NULL)) {
3342		/* Skip the range without csum for data reloc inode */
3343		clear_extent_bits(&inode->io_tree, file_offset, end,
3344				  EXTENT_NODATASUM);
3345		return true;
3346	}
3347
3348	csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3349				fs_info->csum_size;
3350	if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
3351				    csum_expected))
3352		goto zeroit;
3353	return true;
3354
3355zeroit:
3356	btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3357				    bbio->mirror_num);
3358	if (dev)
3359		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3360	memzero_bvec(bv);
3361	return false;
3362}
3363
3364/*
3365 * Perform a delayed iput on @inode.
3366 *
3367 * @inode: The inode we want to perform iput on
3368 *
3369 * This function uses the generic vfs_inode::i_count to track whether we should
3370 * just decrement it (in case it's > 1) or if this is the last iput then link
3371 * the inode to the delayed iput machinery. Delayed iputs are processed at
3372 * transaction commit time/superblock commit/cleaner kthread.
3373 */
3374void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3375{
3376	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3377	unsigned long flags;
3378
3379	if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3380		return;
3381
3382	atomic_inc(&fs_info->nr_delayed_iputs);
3383	/*
3384	 * Need to be irq safe here because we can be called from either an irq
3385	 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3386	 * context.
3387	 */
3388	spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3389	ASSERT(list_empty(&inode->delayed_iput));
3390	list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3391	spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3392	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3393		wake_up_process(fs_info->cleaner_kthread);
3394}
3395
3396static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3397				    struct btrfs_inode *inode)
3398{
3399	list_del_init(&inode->delayed_iput);
3400	spin_unlock_irq(&fs_info->delayed_iput_lock);
3401	iput(&inode->vfs_inode);
3402	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3403		wake_up(&fs_info->delayed_iputs_wait);
3404	spin_lock_irq(&fs_info->delayed_iput_lock);
3405}
3406
3407static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3408				   struct btrfs_inode *inode)
3409{
3410	if (!list_empty(&inode->delayed_iput)) {
3411		spin_lock_irq(&fs_info->delayed_iput_lock);
3412		if (!list_empty(&inode->delayed_iput))
3413			run_delayed_iput_locked(fs_info, inode);
3414		spin_unlock_irq(&fs_info->delayed_iput_lock);
3415	}
3416}
3417
3418void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3419{
3420	/*
3421	 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3422	 * calls btrfs_add_delayed_iput() and that needs to lock
3423	 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3424	 * prevent a deadlock.
3425	 */
3426	spin_lock_irq(&fs_info->delayed_iput_lock);
3427	while (!list_empty(&fs_info->delayed_iputs)) {
3428		struct btrfs_inode *inode;
3429
3430		inode = list_first_entry(&fs_info->delayed_iputs,
3431				struct btrfs_inode, delayed_iput);
3432		run_delayed_iput_locked(fs_info, inode);
3433		if (need_resched()) {
3434			spin_unlock_irq(&fs_info->delayed_iput_lock);
3435			cond_resched();
3436			spin_lock_irq(&fs_info->delayed_iput_lock);
3437		}
3438	}
3439	spin_unlock_irq(&fs_info->delayed_iput_lock);
3440}
3441
3442/*
3443 * Wait for flushing all delayed iputs
3444 *
3445 * @fs_info:  the filesystem
3446 *
3447 * This will wait on any delayed iputs that are currently running with KILLABLE
3448 * set.  Once they are all done running we will return, unless we are killed in
3449 * which case we return EINTR. This helps in user operations like fallocate etc
3450 * that might get blocked on the iputs.
3451 *
3452 * Return EINTR if we were killed, 0 if nothing's pending
3453 */
3454int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3455{
3456	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3457			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3458	if (ret)
3459		return -EINTR;
3460	return 0;
3461}
3462
3463/*
3464 * This creates an orphan entry for the given inode in case something goes wrong
3465 * in the middle of an unlink.
3466 */
3467int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3468		     struct btrfs_inode *inode)
3469{
3470	int ret;
3471
3472	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3473	if (ret && ret != -EEXIST) {
3474		btrfs_abort_transaction(trans, ret);
3475		return ret;
3476	}
3477
3478	return 0;
3479}
3480
3481/*
3482 * We have done the delete so we can go ahead and remove the orphan item for
3483 * this particular inode.
3484 */
3485static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3486			    struct btrfs_inode *inode)
3487{
3488	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3489}
3490
3491/*
3492 * this cleans up any orphans that may be left on the list from the last use
3493 * of this root.
3494 */
3495int btrfs_orphan_cleanup(struct btrfs_root *root)
3496{
3497	struct btrfs_fs_info *fs_info = root->fs_info;
3498	struct btrfs_path *path;
3499	struct extent_buffer *leaf;
3500	struct btrfs_key key, found_key;
3501	struct btrfs_trans_handle *trans;
3502	struct inode *inode;
3503	u64 last_objectid = 0;
3504	int ret = 0, nr_unlink = 0;
3505
3506	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3507		return 0;
3508
3509	path = btrfs_alloc_path();
3510	if (!path) {
3511		ret = -ENOMEM;
3512		goto out;
3513	}
3514	path->reada = READA_BACK;
3515
3516	key.objectid = BTRFS_ORPHAN_OBJECTID;
3517	key.type = BTRFS_ORPHAN_ITEM_KEY;
3518	key.offset = (u64)-1;
3519
3520	while (1) {
3521		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3522		if (ret < 0)
3523			goto out;
3524
3525		/*
3526		 * if ret == 0 means we found what we were searching for, which
3527		 * is weird, but possible, so only screw with path if we didn't
3528		 * find the key and see if we have stuff that matches
3529		 */
3530		if (ret > 0) {
3531			ret = 0;
3532			if (path->slots[0] == 0)
3533				break;
3534			path->slots[0]--;
3535		}
3536
3537		/* pull out the item */
3538		leaf = path->nodes[0];
3539		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3540
3541		/* make sure the item matches what we want */
3542		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3543			break;
3544		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3545			break;
3546
3547		/* release the path since we're done with it */
3548		btrfs_release_path(path);
3549
3550		/*
3551		 * this is where we are basically btrfs_lookup, without the
3552		 * crossing root thing.  we store the inode number in the
3553		 * offset of the orphan item.
3554		 */
3555
3556		if (found_key.offset == last_objectid) {
3557			/*
3558			 * We found the same inode as before. This means we were
3559			 * not able to remove its items via eviction triggered
3560			 * by an iput(). A transaction abort may have happened,
3561			 * due to -ENOSPC for example, so try to grab the error
3562			 * that lead to a transaction abort, if any.
3563			 */
3564			btrfs_err(fs_info,
3565				  "Error removing orphan entry, stopping orphan cleanup");
3566			ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3567			goto out;
3568		}
3569
3570		last_objectid = found_key.offset;
3571
3572		found_key.objectid = found_key.offset;
3573		found_key.type = BTRFS_INODE_ITEM_KEY;
3574		found_key.offset = 0;
3575		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3576		if (IS_ERR(inode)) {
3577			ret = PTR_ERR(inode);
3578			inode = NULL;
3579			if (ret != -ENOENT)
3580				goto out;
3581		}
3582
3583		if (!inode && root == fs_info->tree_root) {
3584			struct btrfs_root *dead_root;
3585			int is_dead_root = 0;
3586
3587			/*
3588			 * This is an orphan in the tree root. Currently these
3589			 * could come from 2 sources:
3590			 *  a) a root (snapshot/subvolume) deletion in progress
3591			 *  b) a free space cache inode
3592			 * We need to distinguish those two, as the orphan item
3593			 * for a root must not get deleted before the deletion
3594			 * of the snapshot/subvolume's tree completes.
3595			 *
3596			 * btrfs_find_orphan_roots() ran before us, which has
3597			 * found all deleted roots and loaded them into
3598			 * fs_info->fs_roots_radix. So here we can find if an
3599			 * orphan item corresponds to a deleted root by looking
3600			 * up the root from that radix tree.
3601			 */
3602
3603			spin_lock(&fs_info->fs_roots_radix_lock);
3604			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3605							 (unsigned long)found_key.objectid);
3606			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3607				is_dead_root = 1;
3608			spin_unlock(&fs_info->fs_roots_radix_lock);
3609
3610			if (is_dead_root) {
3611				/* prevent this orphan from being found again */
3612				key.offset = found_key.objectid - 1;
3613				continue;
3614			}
3615
3616		}
3617
3618		/*
3619		 * If we have an inode with links, there are a couple of
3620		 * possibilities:
3621		 *
3622		 * 1. We were halfway through creating fsverity metadata for the
3623		 * file. In that case, the orphan item represents incomplete
3624		 * fsverity metadata which must be cleaned up with
3625		 * btrfs_drop_verity_items and deleting the orphan item.
3626
3627		 * 2. Old kernels (before v3.12) used to create an
3628		 * orphan item for truncate indicating that there were possibly
3629		 * extent items past i_size that needed to be deleted. In v3.12,
3630		 * truncate was changed to update i_size in sync with the extent
3631		 * items, but the (useless) orphan item was still created. Since
3632		 * v4.18, we don't create the orphan item for truncate at all.
3633		 *
3634		 * So, this item could mean that we need to do a truncate, but
3635		 * only if this filesystem was last used on a pre-v3.12 kernel
3636		 * and was not cleanly unmounted. The odds of that are quite
3637		 * slim, and it's a pain to do the truncate now, so just delete
3638		 * the orphan item.
3639		 *
3640		 * It's also possible that this orphan item was supposed to be
3641		 * deleted but wasn't. The inode number may have been reused,
3642		 * but either way, we can delete the orphan item.
3643		 */
3644		if (!inode || inode->i_nlink) {
3645			if (inode) {
3646				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3647				iput(inode);
3648				inode = NULL;
3649				if (ret)
3650					goto out;
3651			}
3652			trans = btrfs_start_transaction(root, 1);
3653			if (IS_ERR(trans)) {
3654				ret = PTR_ERR(trans);
3655				goto out;
3656			}
3657			btrfs_debug(fs_info, "auto deleting %Lu",
3658				    found_key.objectid);
3659			ret = btrfs_del_orphan_item(trans, root,
3660						    found_key.objectid);
3661			btrfs_end_transaction(trans);
3662			if (ret)
3663				goto out;
3664			continue;
3665		}
3666
3667		nr_unlink++;
3668
3669		/* this will do delete_inode and everything for us */
3670		iput(inode);
3671	}
3672	/* release the path since we're done with it */
3673	btrfs_release_path(path);
3674
3675	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3676		trans = btrfs_join_transaction(root);
3677		if (!IS_ERR(trans))
3678			btrfs_end_transaction(trans);
3679	}
3680
3681	if (nr_unlink)
3682		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3683
3684out:
3685	if (ret)
3686		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3687	btrfs_free_path(path);
3688	return ret;
3689}
3690
3691/*
3692 * very simple check to peek ahead in the leaf looking for xattrs.  If we
3693 * don't find any xattrs, we know there can't be any acls.
3694 *
3695 * slot is the slot the inode is in, objectid is the objectid of the inode
3696 */
3697static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3698					  int slot, u64 objectid,
3699					  int *first_xattr_slot)
3700{
3701	u32 nritems = btrfs_header_nritems(leaf);
3702	struct btrfs_key found_key;
3703	static u64 xattr_access = 0;
3704	static u64 xattr_default = 0;
3705	int scanned = 0;
3706
3707	if (!xattr_access) {
3708		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3709					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3710		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3711					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3712	}
3713
3714	slot++;
3715	*first_xattr_slot = -1;
3716	while (slot < nritems) {
3717		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3718
3719		/* we found a different objectid, there must not be acls */
3720		if (found_key.objectid != objectid)
3721			return 0;
3722
3723		/* we found an xattr, assume we've got an acl */
3724		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3725			if (*first_xattr_slot == -1)
3726				*first_xattr_slot = slot;
3727			if (found_key.offset == xattr_access ||
3728			    found_key.offset == xattr_default)
3729				return 1;
3730		}
3731
3732		/*
3733		 * we found a key greater than an xattr key, there can't
3734		 * be any acls later on
3735		 */
3736		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3737			return 0;
3738
3739		slot++;
3740		scanned++;
3741
3742		/*
3743		 * it goes inode, inode backrefs, xattrs, extents,
3744		 * so if there are a ton of hard links to an inode there can
3745		 * be a lot of backrefs.  Don't waste time searching too hard,
3746		 * this is just an optimization
3747		 */
3748		if (scanned >= 8)
3749			break;
3750	}
3751	/* we hit the end of the leaf before we found an xattr or
3752	 * something larger than an xattr.  We have to assume the inode
3753	 * has acls
3754	 */
3755	if (*first_xattr_slot == -1)
3756		*first_xattr_slot = slot;
3757	return 1;
3758}
3759
3760/*
3761 * read an inode from the btree into the in-memory inode
3762 */
3763static int btrfs_read_locked_inode(struct inode *inode,
3764				   struct btrfs_path *in_path)
3765{
3766	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
3767	struct btrfs_path *path = in_path;
3768	struct extent_buffer *leaf;
3769	struct btrfs_inode_item *inode_item;
3770	struct btrfs_root *root = BTRFS_I(inode)->root;
3771	struct btrfs_key location;
3772	unsigned long ptr;
3773	int maybe_acls;
3774	u32 rdev;
3775	int ret;
3776	bool filled = false;
3777	int first_xattr_slot;
3778
3779	ret = btrfs_fill_inode(inode, &rdev);
3780	if (!ret)
3781		filled = true;
3782
3783	if (!path) {
3784		path = btrfs_alloc_path();
3785		if (!path)
3786			return -ENOMEM;
3787	}
3788
3789	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3790
3791	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3792	if (ret) {
3793		if (path != in_path)
3794			btrfs_free_path(path);
3795		return ret;
3796	}
3797
3798	leaf = path->nodes[0];
3799
3800	if (filled)
3801		goto cache_index;
3802
3803	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3804				    struct btrfs_inode_item);
3805	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3806	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3807	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3808	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3809	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3810	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3811			round_up(i_size_read(inode), fs_info->sectorsize));
3812
3813	inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
3814			btrfs_timespec_nsec(leaf, &inode_item->atime));
3815
3816	inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
3817			btrfs_timespec_nsec(leaf, &inode_item->mtime));
3818
3819	inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
3820			btrfs_timespec_nsec(leaf, &inode_item->ctime));
3821
3822	BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
3823	BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
3824
3825	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3826	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3827	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3828
3829	inode_set_iversion_queried(inode,
3830				   btrfs_inode_sequence(leaf, inode_item));
3831	inode->i_generation = BTRFS_I(inode)->generation;
3832	inode->i_rdev = 0;
3833	rdev = btrfs_inode_rdev(leaf, inode_item);
3834
3835	BTRFS_I(inode)->index_cnt = (u64)-1;
3836	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3837				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3838
3839cache_index:
3840	/*
3841	 * If we were modified in the current generation and evicted from memory
3842	 * and then re-read we need to do a full sync since we don't have any
3843	 * idea about which extents were modified before we were evicted from
3844	 * cache.
3845	 *
3846	 * This is required for both inode re-read from disk and delayed inode
3847	 * in the delayed_nodes xarray.
3848	 */
3849	if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
3850		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3851			&BTRFS_I(inode)->runtime_flags);
3852
3853	/*
3854	 * We don't persist the id of the transaction where an unlink operation
3855	 * against the inode was last made. So here we assume the inode might
3856	 * have been evicted, and therefore the exact value of last_unlink_trans
3857	 * lost, and set it to last_trans to avoid metadata inconsistencies
3858	 * between the inode and its parent if the inode is fsync'ed and the log
3859	 * replayed. For example, in the scenario:
3860	 *
3861	 * touch mydir/foo
3862	 * ln mydir/foo mydir/bar
3863	 * sync
3864	 * unlink mydir/bar
3865	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3866	 * xfs_io -c fsync mydir/foo
3867	 * <power failure>
3868	 * mount fs, triggers fsync log replay
3869	 *
3870	 * We must make sure that when we fsync our inode foo we also log its
3871	 * parent inode, otherwise after log replay the parent still has the
3872	 * dentry with the "bar" name but our inode foo has a link count of 1
3873	 * and doesn't have an inode ref with the name "bar" anymore.
3874	 *
3875	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3876	 * but it guarantees correctness at the expense of occasional full
3877	 * transaction commits on fsync if our inode is a directory, or if our
3878	 * inode is not a directory, logging its parent unnecessarily.
3879	 */
3880	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3881
3882	/*
3883	 * Same logic as for last_unlink_trans. We don't persist the generation
3884	 * of the last transaction where this inode was used for a reflink
3885	 * operation, so after eviction and reloading the inode we must be
3886	 * pessimistic and assume the last transaction that modified the inode.
3887	 */
3888	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3889
3890	path->slots[0]++;
3891	if (inode->i_nlink != 1 ||
3892	    path->slots[0] >= btrfs_header_nritems(leaf))
3893		goto cache_acl;
3894
3895	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3896	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3897		goto cache_acl;
3898
3899	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3900	if (location.type == BTRFS_INODE_REF_KEY) {
3901		struct btrfs_inode_ref *ref;
3902
3903		ref = (struct btrfs_inode_ref *)ptr;
3904		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3905	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3906		struct btrfs_inode_extref *extref;
3907
3908		extref = (struct btrfs_inode_extref *)ptr;
3909		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3910								     extref);
3911	}
3912cache_acl:
3913	/*
3914	 * try to precache a NULL acl entry for files that don't have
3915	 * any xattrs or acls
3916	 */
3917	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3918			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3919	if (first_xattr_slot != -1) {
3920		path->slots[0] = first_xattr_slot;
3921		ret = btrfs_load_inode_props(inode, path);
3922		if (ret)
3923			btrfs_err(fs_info,
3924				  "error loading props for ino %llu (root %llu): %d",
3925				  btrfs_ino(BTRFS_I(inode)),
3926				  root->root_key.objectid, ret);
3927	}
3928	if (path != in_path)
3929		btrfs_free_path(path);
3930
3931	if (!maybe_acls)
3932		cache_no_acl(inode);
3933
3934	switch (inode->i_mode & S_IFMT) {
3935	case S_IFREG:
3936		inode->i_mapping->a_ops = &btrfs_aops;
3937		inode->i_fop = &btrfs_file_operations;
3938		inode->i_op = &btrfs_file_inode_operations;
3939		break;
3940	case S_IFDIR:
3941		inode->i_fop = &btrfs_dir_file_operations;
3942		inode->i_op = &btrfs_dir_inode_operations;
3943		break;
3944	case S_IFLNK:
3945		inode->i_op = &btrfs_symlink_inode_operations;
3946		inode_nohighmem(inode);
3947		inode->i_mapping->a_ops = &btrfs_aops;
3948		break;
3949	default:
3950		inode->i_op = &btrfs_special_inode_operations;
3951		init_special_inode(inode, inode->i_mode, rdev);
3952		break;
3953	}
3954
3955	btrfs_sync_inode_flags_to_i_flags(inode);
3956	return 0;
3957}
3958
3959/*
3960 * given a leaf and an inode, copy the inode fields into the leaf
3961 */
3962static void fill_inode_item(struct btrfs_trans_handle *trans,
3963			    struct extent_buffer *leaf,
3964			    struct btrfs_inode_item *item,
3965			    struct inode *inode)
3966{
3967	struct btrfs_map_token token;
3968	u64 flags;
3969
3970	btrfs_init_map_token(&token, leaf);
3971
3972	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3973	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3974	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
3975	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3976	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3977
3978	btrfs_set_token_timespec_sec(&token, &item->atime,
3979				     inode_get_atime_sec(inode));
3980	btrfs_set_token_timespec_nsec(&token, &item->atime,
3981				      inode_get_atime_nsec(inode));
3982
3983	btrfs_set_token_timespec_sec(&token, &item->mtime,
3984				     inode_get_mtime_sec(inode));
3985	btrfs_set_token_timespec_nsec(&token, &item->mtime,
3986				      inode_get_mtime_nsec(inode));
3987
3988	btrfs_set_token_timespec_sec(&token, &item->ctime,
3989				     inode_get_ctime_sec(inode));
3990	btrfs_set_token_timespec_nsec(&token, &item->ctime,
3991				      inode_get_ctime_nsec(inode));
3992
3993	btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
3994	btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
3995
3996	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
3997	btrfs_set_token_inode_generation(&token, item,
3998					 BTRFS_I(inode)->generation);
3999	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4000	btrfs_set_token_inode_transid(&token, item, trans->transid);
4001	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4002	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4003					  BTRFS_I(inode)->ro_flags);
4004	btrfs_set_token_inode_flags(&token, item, flags);
4005	btrfs_set_token_inode_block_group(&token, item, 0);
4006}
4007
4008/*
4009 * copy everything in the in-memory inode into the btree.
4010 */
4011static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4012					    struct btrfs_inode *inode)
4013{
4014	struct btrfs_inode_item *inode_item;
4015	struct btrfs_path *path;
4016	struct extent_buffer *leaf;
4017	int ret;
4018
4019	path = btrfs_alloc_path();
4020	if (!path)
4021		return -ENOMEM;
4022
4023	ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1);
4024	if (ret) {
4025		if (ret > 0)
4026			ret = -ENOENT;
4027		goto failed;
4028	}
4029
4030	leaf = path->nodes[0];
4031	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4032				    struct btrfs_inode_item);
4033
4034	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4035	btrfs_mark_buffer_dirty(trans, leaf);
4036	btrfs_set_inode_last_trans(trans, inode);
4037	ret = 0;
4038failed:
4039	btrfs_free_path(path);
4040	return ret;
4041}
4042
4043/*
4044 * copy everything in the in-memory inode into the btree.
4045 */
4046int btrfs_update_inode(struct btrfs_trans_handle *trans,
4047		       struct btrfs_inode *inode)
4048{
4049	struct btrfs_root *root = inode->root;
4050	struct btrfs_fs_info *fs_info = root->fs_info;
4051	int ret;
4052
4053	/*
4054	 * If the inode is a free space inode, we can deadlock during commit
4055	 * if we put it into the delayed code.
4056	 *
4057	 * The data relocation inode should also be directly updated
4058	 * without delay
4059	 */
4060	if (!btrfs_is_free_space_inode(inode)
4061	    && !btrfs_is_data_reloc_root(root)
4062	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4063		btrfs_update_root_times(trans, root);
4064
4065		ret = btrfs_delayed_update_inode(trans, inode);
4066		if (!ret)
4067			btrfs_set_inode_last_trans(trans, inode);
4068		return ret;
4069	}
4070
4071	return btrfs_update_inode_item(trans, inode);
4072}
4073
4074int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4075				struct btrfs_inode *inode)
4076{
4077	int ret;
4078
4079	ret = btrfs_update_inode(trans, inode);
4080	if (ret == -ENOSPC)
4081		return btrfs_update_inode_item(trans, inode);
4082	return ret;
4083}
4084
4085/*
4086 * unlink helper that gets used here in inode.c and in the tree logging
4087 * recovery code.  It remove a link in a directory with a given name, and
4088 * also drops the back refs in the inode to the directory
4089 */
4090static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4091				struct btrfs_inode *dir,
4092				struct btrfs_inode *inode,
4093				const struct fscrypt_str *name,
4094				struct btrfs_rename_ctx *rename_ctx)
4095{
4096	struct btrfs_root *root = dir->root;
4097	struct btrfs_fs_info *fs_info = root->fs_info;
4098	struct btrfs_path *path;
4099	int ret = 0;
4100	struct btrfs_dir_item *di;
4101	u64 index;
4102	u64 ino = btrfs_ino(inode);
4103	u64 dir_ino = btrfs_ino(dir);
4104
4105	path = btrfs_alloc_path();
4106	if (!path) {
4107		ret = -ENOMEM;
4108		goto out;
4109	}
4110
4111	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4112	if (IS_ERR_OR_NULL(di)) {
4113		ret = di ? PTR_ERR(di) : -ENOENT;
4114		goto err;
4115	}
4116	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4117	if (ret)
4118		goto err;
4119	btrfs_release_path(path);
4120
4121	/*
4122	 * If we don't have dir index, we have to get it by looking up
4123	 * the inode ref, since we get the inode ref, remove it directly,
4124	 * it is unnecessary to do delayed deletion.
4125	 *
4126	 * But if we have dir index, needn't search inode ref to get it.
4127	 * Since the inode ref is close to the inode item, it is better
4128	 * that we delay to delete it, and just do this deletion when
4129	 * we update the inode item.
4130	 */
4131	if (inode->dir_index) {
4132		ret = btrfs_delayed_delete_inode_ref(inode);
4133		if (!ret) {
4134			index = inode->dir_index;
4135			goto skip_backref;
4136		}
4137	}
4138
4139	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4140	if (ret) {
4141		btrfs_info(fs_info,
4142			"failed to delete reference to %.*s, inode %llu parent %llu",
4143			name->len, name->name, ino, dir_ino);
4144		btrfs_abort_transaction(trans, ret);
4145		goto err;
4146	}
4147skip_backref:
4148	if (rename_ctx)
4149		rename_ctx->index = index;
4150
4151	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4152	if (ret) {
4153		btrfs_abort_transaction(trans, ret);
4154		goto err;
4155	}
4156
4157	/*
4158	 * If we are in a rename context, we don't need to update anything in the
4159	 * log. That will be done later during the rename by btrfs_log_new_name().
4160	 * Besides that, doing it here would only cause extra unnecessary btree
4161	 * operations on the log tree, increasing latency for applications.
4162	 */
4163	if (!rename_ctx) {
4164		btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
4165		btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
4166	}
4167
4168	/*
4169	 * If we have a pending delayed iput we could end up with the final iput
4170	 * being run in btrfs-cleaner context.  If we have enough of these built
4171	 * up we can end up burning a lot of time in btrfs-cleaner without any
4172	 * way to throttle the unlinks.  Since we're currently holding a ref on
4173	 * the inode we can run the delayed iput here without any issues as the
4174	 * final iput won't be done until after we drop the ref we're currently
4175	 * holding.
4176	 */
4177	btrfs_run_delayed_iput(fs_info, inode);
4178err:
4179	btrfs_free_path(path);
4180	if (ret)
4181		goto out;
4182
4183	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4184	inode_inc_iversion(&inode->vfs_inode);
4185	inode_inc_iversion(&dir->vfs_inode);
4186 	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4187	ret = btrfs_update_inode(trans, dir);
4188out:
4189	return ret;
4190}
4191
4192int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4193		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4194		       const struct fscrypt_str *name)
4195{
4196	int ret;
4197
4198	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4199	if (!ret) {
4200		drop_nlink(&inode->vfs_inode);
4201		ret = btrfs_update_inode(trans, inode);
4202	}
4203	return ret;
4204}
4205
4206/*
4207 * helper to start transaction for unlink and rmdir.
4208 *
4209 * unlink and rmdir are special in btrfs, they do not always free space, so
4210 * if we cannot make our reservations the normal way try and see if there is
4211 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4212 * allow the unlink to occur.
4213 */
4214static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4215{
4216	struct btrfs_root *root = dir->root;
4217
4218	return btrfs_start_transaction_fallback_global_rsv(root,
4219						   BTRFS_UNLINK_METADATA_UNITS);
4220}
4221
4222static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4223{
4224	struct btrfs_trans_handle *trans;
4225	struct inode *inode = d_inode(dentry);
4226	int ret;
4227	struct fscrypt_name fname;
4228
4229	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4230	if (ret)
4231		return ret;
4232
4233	/* This needs to handle no-key deletions later on */
4234
4235	trans = __unlink_start_trans(BTRFS_I(dir));
4236	if (IS_ERR(trans)) {
4237		ret = PTR_ERR(trans);
4238		goto fscrypt_free;
4239	}
4240
4241	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4242				false);
4243
4244	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4245				 &fname.disk_name);
4246	if (ret)
4247		goto end_trans;
4248
4249	if (inode->i_nlink == 0) {
4250		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4251		if (ret)
4252			goto end_trans;
4253	}
4254
4255end_trans:
4256	btrfs_end_transaction(trans);
4257	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4258fscrypt_free:
4259	fscrypt_free_filename(&fname);
4260	return ret;
4261}
4262
4263static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4264			       struct btrfs_inode *dir, struct dentry *dentry)
4265{
4266	struct btrfs_root *root = dir->root;
4267	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4268	struct btrfs_path *path;
4269	struct extent_buffer *leaf;
4270	struct btrfs_dir_item *di;
4271	struct btrfs_key key;
4272	u64 index;
4273	int ret;
4274	u64 objectid;
4275	u64 dir_ino = btrfs_ino(dir);
4276	struct fscrypt_name fname;
4277
4278	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4279	if (ret)
4280		return ret;
4281
4282	/* This needs to handle no-key deletions later on */
4283
4284	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4285		objectid = inode->root->root_key.objectid;
4286	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4287		objectid = inode->location.objectid;
4288	} else {
4289		WARN_ON(1);
4290		fscrypt_free_filename(&fname);
4291		return -EINVAL;
4292	}
4293
4294	path = btrfs_alloc_path();
4295	if (!path) {
4296		ret = -ENOMEM;
4297		goto out;
4298	}
4299
4300	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4301				   &fname.disk_name, -1);
4302	if (IS_ERR_OR_NULL(di)) {
4303		ret = di ? PTR_ERR(di) : -ENOENT;
4304		goto out;
4305	}
4306
4307	leaf = path->nodes[0];
4308	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4309	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4310	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4311	if (ret) {
4312		btrfs_abort_transaction(trans, ret);
4313		goto out;
4314	}
4315	btrfs_release_path(path);
4316
4317	/*
4318	 * This is a placeholder inode for a subvolume we didn't have a
4319	 * reference to at the time of the snapshot creation.  In the meantime
4320	 * we could have renamed the real subvol link into our snapshot, so
4321	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4322	 * Instead simply lookup the dir_index_item for this entry so we can
4323	 * remove it.  Otherwise we know we have a ref to the root and we can
4324	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4325	 */
4326	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4327		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4328		if (IS_ERR_OR_NULL(di)) {
4329			if (!di)
4330				ret = -ENOENT;
4331			else
4332				ret = PTR_ERR(di);
4333			btrfs_abort_transaction(trans, ret);
4334			goto out;
4335		}
4336
4337		leaf = path->nodes[0];
4338		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4339		index = key.offset;
4340		btrfs_release_path(path);
4341	} else {
4342		ret = btrfs_del_root_ref(trans, objectid,
4343					 root->root_key.objectid, dir_ino,
4344					 &index, &fname.disk_name);
4345		if (ret) {
4346			btrfs_abort_transaction(trans, ret);
4347			goto out;
4348		}
4349	}
4350
4351	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4352	if (ret) {
4353		btrfs_abort_transaction(trans, ret);
4354		goto out;
4355	}
4356
4357	btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4358	inode_inc_iversion(&dir->vfs_inode);
4359	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4360	ret = btrfs_update_inode_fallback(trans, dir);
4361	if (ret)
4362		btrfs_abort_transaction(trans, ret);
4363out:
4364	btrfs_free_path(path);
4365	fscrypt_free_filename(&fname);
4366	return ret;
4367}
4368
4369/*
4370 * Helper to check if the subvolume references other subvolumes or if it's
4371 * default.
4372 */
4373static noinline int may_destroy_subvol(struct btrfs_root *root)
4374{
4375	struct btrfs_fs_info *fs_info = root->fs_info;
4376	struct btrfs_path *path;
4377	struct btrfs_dir_item *di;
4378	struct btrfs_key key;
4379	struct fscrypt_str name = FSTR_INIT("default", 7);
4380	u64 dir_id;
4381	int ret;
4382
4383	path = btrfs_alloc_path();
4384	if (!path)
4385		return -ENOMEM;
4386
4387	/* Make sure this root isn't set as the default subvol */
4388	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4389	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4390				   dir_id, &name, 0);
4391	if (di && !IS_ERR(di)) {
4392		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4393		if (key.objectid == root->root_key.objectid) {
4394			ret = -EPERM;
4395			btrfs_err(fs_info,
4396				  "deleting default subvolume %llu is not allowed",
4397				  key.objectid);
4398			goto out;
4399		}
4400		btrfs_release_path(path);
4401	}
4402
4403	key.objectid = root->root_key.objectid;
4404	key.type = BTRFS_ROOT_REF_KEY;
4405	key.offset = (u64)-1;
4406
4407	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4408	if (ret < 0)
4409		goto out;
4410	if (ret == 0) {
4411		/*
4412		 * Key with offset -1 found, there would have to exist a root
4413		 * with such id, but this is out of valid range.
4414		 */
4415		ret = -EUCLEAN;
4416		goto out;
4417	}
4418
4419	ret = 0;
4420	if (path->slots[0] > 0) {
4421		path->slots[0]--;
4422		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4423		if (key.objectid == root->root_key.objectid &&
4424		    key.type == BTRFS_ROOT_REF_KEY)
4425			ret = -ENOTEMPTY;
4426	}
4427out:
4428	btrfs_free_path(path);
4429	return ret;
4430}
4431
4432/* Delete all dentries for inodes belonging to the root */
4433static void btrfs_prune_dentries(struct btrfs_root *root)
4434{
4435	struct btrfs_fs_info *fs_info = root->fs_info;
4436	struct rb_node *node;
4437	struct rb_node *prev;
4438	struct btrfs_inode *entry;
4439	struct inode *inode;
4440	u64 objectid = 0;
4441
4442	if (!BTRFS_FS_ERROR(fs_info))
4443		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4444
4445	spin_lock(&root->inode_lock);
4446again:
4447	node = root->inode_tree.rb_node;
4448	prev = NULL;
4449	while (node) {
4450		prev = node;
4451		entry = rb_entry(node, struct btrfs_inode, rb_node);
4452
4453		if (objectid < btrfs_ino(entry))
4454			node = node->rb_left;
4455		else if (objectid > btrfs_ino(entry))
4456			node = node->rb_right;
4457		else
4458			break;
4459	}
4460	if (!node) {
4461		while (prev) {
4462			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4463			if (objectid <= btrfs_ino(entry)) {
4464				node = prev;
4465				break;
4466			}
4467			prev = rb_next(prev);
4468		}
4469	}
4470	while (node) {
4471		entry = rb_entry(node, struct btrfs_inode, rb_node);
4472		objectid = btrfs_ino(entry) + 1;
4473		inode = igrab(&entry->vfs_inode);
4474		if (inode) {
4475			spin_unlock(&root->inode_lock);
4476			if (atomic_read(&inode->i_count) > 1)
4477				d_prune_aliases(inode);
4478			/*
4479			 * btrfs_drop_inode will have it removed from the inode
4480			 * cache when its usage count hits zero.
4481			 */
4482			iput(inode);
4483			cond_resched();
4484			spin_lock(&root->inode_lock);
4485			goto again;
4486		}
4487
4488		if (cond_resched_lock(&root->inode_lock))
4489			goto again;
4490
4491		node = rb_next(node);
4492	}
4493	spin_unlock(&root->inode_lock);
4494}
4495
4496int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4497{
4498	struct btrfs_root *root = dir->root;
4499	struct btrfs_fs_info *fs_info = root->fs_info;
4500	struct inode *inode = d_inode(dentry);
4501	struct btrfs_root *dest = BTRFS_I(inode)->root;
4502	struct btrfs_trans_handle *trans;
4503	struct btrfs_block_rsv block_rsv;
4504	u64 root_flags;
4505	u64 qgroup_reserved = 0;
4506	int ret;
4507
4508	down_write(&fs_info->subvol_sem);
4509
4510	/*
4511	 * Don't allow to delete a subvolume with send in progress. This is
4512	 * inside the inode lock so the error handling that has to drop the bit
4513	 * again is not run concurrently.
4514	 */
4515	spin_lock(&dest->root_item_lock);
4516	if (dest->send_in_progress) {
4517		spin_unlock(&dest->root_item_lock);
4518		btrfs_warn(fs_info,
4519			   "attempt to delete subvolume %llu during send",
4520			   dest->root_key.objectid);
4521		ret = -EPERM;
4522		goto out_up_write;
4523	}
4524	if (atomic_read(&dest->nr_swapfiles)) {
4525		spin_unlock(&dest->root_item_lock);
4526		btrfs_warn(fs_info,
4527			   "attempt to delete subvolume %llu with active swapfile",
4528			   root->root_key.objectid);
4529		ret = -EPERM;
4530		goto out_up_write;
4531	}
4532	root_flags = btrfs_root_flags(&dest->root_item);
4533	btrfs_set_root_flags(&dest->root_item,
4534			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4535	spin_unlock(&dest->root_item_lock);
4536
4537	ret = may_destroy_subvol(dest);
4538	if (ret)
4539		goto out_undead;
4540
4541	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4542	/*
4543	 * One for dir inode,
4544	 * two for dir entries,
4545	 * two for root ref/backref.
4546	 */
4547	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4548	if (ret)
4549		goto out_undead;
4550	qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4551
4552	trans = btrfs_start_transaction(root, 0);
4553	if (IS_ERR(trans)) {
4554		ret = PTR_ERR(trans);
4555		goto out_release;
4556	}
4557	ret = btrfs_record_root_in_trans(trans, root);
4558	if (ret) {
4559		btrfs_abort_transaction(trans, ret);
4560		goto out_end_trans;
4561	}
4562	btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4563	qgroup_reserved = 0;
4564	trans->block_rsv = &block_rsv;
4565	trans->bytes_reserved = block_rsv.size;
4566
4567	btrfs_record_snapshot_destroy(trans, dir);
4568
4569	ret = btrfs_unlink_subvol(trans, dir, dentry);
4570	if (ret) {
4571		btrfs_abort_transaction(trans, ret);
4572		goto out_end_trans;
4573	}
4574
4575	ret = btrfs_record_root_in_trans(trans, dest);
4576	if (ret) {
4577		btrfs_abort_transaction(trans, ret);
4578		goto out_end_trans;
4579	}
4580
4581	memset(&dest->root_item.drop_progress, 0,
4582		sizeof(dest->root_item.drop_progress));
4583	btrfs_set_root_drop_level(&dest->root_item, 0);
4584	btrfs_set_root_refs(&dest->root_item, 0);
4585
4586	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4587		ret = btrfs_insert_orphan_item(trans,
4588					fs_info->tree_root,
4589					dest->root_key.objectid);
4590		if (ret) {
4591			btrfs_abort_transaction(trans, ret);
4592			goto out_end_trans;
4593		}
4594	}
4595
4596	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4597				  BTRFS_UUID_KEY_SUBVOL,
4598				  dest->root_key.objectid);
4599	if (ret && ret != -ENOENT) {
4600		btrfs_abort_transaction(trans, ret);
4601		goto out_end_trans;
4602	}
4603	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4604		ret = btrfs_uuid_tree_remove(trans,
4605					  dest->root_item.received_uuid,
4606					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4607					  dest->root_key.objectid);
4608		if (ret && ret != -ENOENT) {
4609			btrfs_abort_transaction(trans, ret);
4610			goto out_end_trans;
4611		}
4612	}
4613
4614	free_anon_bdev(dest->anon_dev);
4615	dest->anon_dev = 0;
4616out_end_trans:
4617	trans->block_rsv = NULL;
4618	trans->bytes_reserved = 0;
4619	ret = btrfs_end_transaction(trans);
4620	inode->i_flags |= S_DEAD;
4621out_release:
4622	btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4623	if (qgroup_reserved)
4624		btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4625out_undead:
4626	if (ret) {
4627		spin_lock(&dest->root_item_lock);
4628		root_flags = btrfs_root_flags(&dest->root_item);
4629		btrfs_set_root_flags(&dest->root_item,
4630				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4631		spin_unlock(&dest->root_item_lock);
4632	}
4633out_up_write:
4634	up_write(&fs_info->subvol_sem);
4635	if (!ret) {
4636		d_invalidate(dentry);
4637		btrfs_prune_dentries(dest);
4638		ASSERT(dest->send_in_progress == 0);
4639	}
4640
4641	return ret;
4642}
4643
4644static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4645{
4646	struct inode *inode = d_inode(dentry);
4647	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4648	int err = 0;
4649	struct btrfs_trans_handle *trans;
4650	u64 last_unlink_trans;
4651	struct fscrypt_name fname;
4652
4653	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4654		return -ENOTEMPTY;
4655	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4656		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4657			btrfs_err(fs_info,
4658			"extent tree v2 doesn't support snapshot deletion yet");
4659			return -EOPNOTSUPP;
4660		}
4661		return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
4662	}
4663
4664	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4665	if (err)
4666		return err;
4667
4668	/* This needs to handle no-key deletions later on */
4669
4670	trans = __unlink_start_trans(BTRFS_I(dir));
4671	if (IS_ERR(trans)) {
4672		err = PTR_ERR(trans);
4673		goto out_notrans;
4674	}
4675
4676	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4677		err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
4678		goto out;
4679	}
4680
4681	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4682	if (err)
4683		goto out;
4684
4685	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4686
4687	/* now the directory is empty */
4688	err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4689				 &fname.disk_name);
4690	if (!err) {
4691		btrfs_i_size_write(BTRFS_I(inode), 0);
4692		/*
4693		 * Propagate the last_unlink_trans value of the deleted dir to
4694		 * its parent directory. This is to prevent an unrecoverable
4695		 * log tree in the case we do something like this:
4696		 * 1) create dir foo
4697		 * 2) create snapshot under dir foo
4698		 * 3) delete the snapshot
4699		 * 4) rmdir foo
4700		 * 5) mkdir foo
4701		 * 6) fsync foo or some file inside foo
4702		 */
4703		if (last_unlink_trans >= trans->transid)
4704			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4705	}
4706out:
4707	btrfs_end_transaction(trans);
4708out_notrans:
4709	btrfs_btree_balance_dirty(fs_info);
4710	fscrypt_free_filename(&fname);
4711
4712	return err;
4713}
4714
4715/*
4716 * Read, zero a chunk and write a block.
4717 *
4718 * @inode - inode that we're zeroing
4719 * @from - the offset to start zeroing
4720 * @len - the length to zero, 0 to zero the entire range respective to the
4721 *	offset
4722 * @front - zero up to the offset instead of from the offset on
4723 *
4724 * This will find the block for the "from" offset and cow the block and zero the
4725 * part we want to zero.  This is used with truncate and hole punching.
4726 */
4727int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4728			 int front)
4729{
4730	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4731	struct address_space *mapping = inode->vfs_inode.i_mapping;
4732	struct extent_io_tree *io_tree = &inode->io_tree;
4733	struct btrfs_ordered_extent *ordered;
4734	struct extent_state *cached_state = NULL;
4735	struct extent_changeset *data_reserved = NULL;
4736	bool only_release_metadata = false;
4737	u32 blocksize = fs_info->sectorsize;
4738	pgoff_t index = from >> PAGE_SHIFT;
4739	unsigned offset = from & (blocksize - 1);
4740	struct folio *folio;
4741	gfp_t mask = btrfs_alloc_write_mask(mapping);
4742	size_t write_bytes = blocksize;
4743	int ret = 0;
4744	u64 block_start;
4745	u64 block_end;
4746
4747	if (IS_ALIGNED(offset, blocksize) &&
4748	    (!len || IS_ALIGNED(len, blocksize)))
4749		goto out;
4750
4751	block_start = round_down(from, blocksize);
4752	block_end = block_start + blocksize - 1;
4753
4754	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4755					  blocksize, false);
4756	if (ret < 0) {
4757		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4758			/* For nocow case, no need to reserve data space */
4759			only_release_metadata = true;
4760		} else {
4761			goto out;
4762		}
4763	}
4764	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4765	if (ret < 0) {
4766		if (!only_release_metadata)
4767			btrfs_free_reserved_data_space(inode, data_reserved,
4768						       block_start, blocksize);
4769		goto out;
4770	}
4771again:
4772	folio = __filemap_get_folio(mapping, index,
4773				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
4774	if (IS_ERR(folio)) {
4775		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4776					     blocksize, true);
4777		btrfs_delalloc_release_extents(inode, blocksize);
4778		ret = -ENOMEM;
4779		goto out;
4780	}
4781
4782	if (!folio_test_uptodate(folio)) {
4783		ret = btrfs_read_folio(NULL, folio);
4784		folio_lock(folio);
4785		if (folio->mapping != mapping) {
4786			folio_unlock(folio);
4787			folio_put(folio);
4788			goto again;
4789		}
4790		if (!folio_test_uptodate(folio)) {
4791			ret = -EIO;
4792			goto out_unlock;
4793		}
4794	}
4795
4796	/*
4797	 * We unlock the page after the io is completed and then re-lock it
4798	 * above.  release_folio() could have come in between that and cleared
4799	 * folio private, but left the page in the mapping.  Set the page mapped
4800	 * here to make sure it's properly set for the subpage stuff.
4801	 */
4802	ret = set_folio_extent_mapped(folio);
4803	if (ret < 0)
4804		goto out_unlock;
4805
4806	folio_wait_writeback(folio);
4807
4808	lock_extent(io_tree, block_start, block_end, &cached_state);
4809
4810	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4811	if (ordered) {
4812		unlock_extent(io_tree, block_start, block_end, &cached_state);
4813		folio_unlock(folio);
4814		folio_put(folio);
4815		btrfs_start_ordered_extent(ordered);
4816		btrfs_put_ordered_extent(ordered);
4817		goto again;
4818	}
4819
4820	clear_extent_bit(&inode->io_tree, block_start, block_end,
4821			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4822			 &cached_state);
4823
4824	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4825					&cached_state);
4826	if (ret) {
4827		unlock_extent(io_tree, block_start, block_end, &cached_state);
4828		goto out_unlock;
4829	}
4830
4831	if (offset != blocksize) {
4832		if (!len)
4833			len = blocksize - offset;
4834		if (front)
4835			folio_zero_range(folio, block_start - folio_pos(folio),
4836					 offset);
4837		else
4838			folio_zero_range(folio,
4839					 (block_start - folio_pos(folio)) + offset,
4840					 len);
4841	}
4842	btrfs_folio_clear_checked(fs_info, folio, block_start,
4843				  block_end + 1 - block_start);
4844	btrfs_folio_set_dirty(fs_info, folio, block_start,
4845			      block_end + 1 - block_start);
4846	unlock_extent(io_tree, block_start, block_end, &cached_state);
4847
4848	if (only_release_metadata)
4849		set_extent_bit(&inode->io_tree, block_start, block_end,
4850			       EXTENT_NORESERVE, NULL);
4851
4852out_unlock:
4853	if (ret) {
4854		if (only_release_metadata)
4855			btrfs_delalloc_release_metadata(inode, blocksize, true);
4856		else
4857			btrfs_delalloc_release_space(inode, data_reserved,
4858					block_start, blocksize, true);
4859	}
4860	btrfs_delalloc_release_extents(inode, blocksize);
4861	folio_unlock(folio);
4862	folio_put(folio);
4863out:
4864	if (only_release_metadata)
4865		btrfs_check_nocow_unlock(inode);
4866	extent_changeset_free(data_reserved);
4867	return ret;
4868}
4869
4870static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
4871{
4872	struct btrfs_root *root = inode->root;
4873	struct btrfs_fs_info *fs_info = root->fs_info;
4874	struct btrfs_trans_handle *trans;
4875	struct btrfs_drop_extents_args drop_args = { 0 };
4876	int ret;
4877
4878	/*
4879	 * If NO_HOLES is enabled, we don't need to do anything.
4880	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4881	 * or btrfs_update_inode() will be called, which guarantee that the next
4882	 * fsync will know this inode was changed and needs to be logged.
4883	 */
4884	if (btrfs_fs_incompat(fs_info, NO_HOLES))
4885		return 0;
4886
4887	/*
4888	 * 1 - for the one we're dropping
4889	 * 1 - for the one we're adding
4890	 * 1 - for updating the inode.
4891	 */
4892	trans = btrfs_start_transaction(root, 3);
4893	if (IS_ERR(trans))
4894		return PTR_ERR(trans);
4895
4896	drop_args.start = offset;
4897	drop_args.end = offset + len;
4898	drop_args.drop_cache = true;
4899
4900	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4901	if (ret) {
4902		btrfs_abort_transaction(trans, ret);
4903		btrfs_end_transaction(trans);
4904		return ret;
4905	}
4906
4907	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
4908	if (ret) {
4909		btrfs_abort_transaction(trans, ret);
4910	} else {
4911		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4912		btrfs_update_inode(trans, inode);
4913	}
4914	btrfs_end_transaction(trans);
4915	return ret;
4916}
4917
4918/*
4919 * This function puts in dummy file extents for the area we're creating a hole
4920 * for.  So if we are truncating this file to a larger size we need to insert
4921 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4922 * the range between oldsize and size
4923 */
4924int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4925{
4926	struct btrfs_root *root = inode->root;
4927	struct btrfs_fs_info *fs_info = root->fs_info;
4928	struct extent_io_tree *io_tree = &inode->io_tree;
4929	struct extent_map *em = NULL;
4930	struct extent_state *cached_state = NULL;
4931	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4932	u64 block_end = ALIGN(size, fs_info->sectorsize);
4933	u64 last_byte;
4934	u64 cur_offset;
4935	u64 hole_size;
4936	int err = 0;
4937
4938	/*
4939	 * If our size started in the middle of a block we need to zero out the
4940	 * rest of the block before we expand the i_size, otherwise we could
4941	 * expose stale data.
4942	 */
4943	err = btrfs_truncate_block(inode, oldsize, 0, 0);
4944	if (err)
4945		return err;
4946
4947	if (size <= hole_start)
4948		return 0;
4949
4950	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
4951					   &cached_state);
4952	cur_offset = hole_start;
4953	while (1) {
4954		em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
4955		if (IS_ERR(em)) {
4956			err = PTR_ERR(em);
4957			em = NULL;
4958			break;
4959		}
4960		last_byte = min(extent_map_end(em), block_end);
4961		last_byte = ALIGN(last_byte, fs_info->sectorsize);
4962		hole_size = last_byte - cur_offset;
4963
4964		if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
4965			struct extent_map *hole_em;
4966
4967			err = maybe_insert_hole(inode, cur_offset, hole_size);
4968			if (err)
4969				break;
4970
4971			err = btrfs_inode_set_file_extent_range(inode,
4972							cur_offset, hole_size);
4973			if (err)
4974				break;
4975
4976			hole_em = alloc_extent_map();
4977			if (!hole_em) {
4978				btrfs_drop_extent_map_range(inode, cur_offset,
4979						    cur_offset + hole_size - 1,
4980						    false);
4981				btrfs_set_inode_full_sync(inode);
4982				goto next;
4983			}
4984			hole_em->start = cur_offset;
4985			hole_em->len = hole_size;
4986			hole_em->orig_start = cur_offset;
4987
4988			hole_em->block_start = EXTENT_MAP_HOLE;
4989			hole_em->block_len = 0;
4990			hole_em->orig_block_len = 0;
4991			hole_em->ram_bytes = hole_size;
4992			hole_em->generation = btrfs_get_fs_generation(fs_info);
4993
4994			err = btrfs_replace_extent_map_range(inode, hole_em, true);
4995			free_extent_map(hole_em);
4996		} else {
4997			err = btrfs_inode_set_file_extent_range(inode,
4998							cur_offset, hole_size);
4999			if (err)
5000				break;
5001		}
5002next:
5003		free_extent_map(em);
5004		em = NULL;
5005		cur_offset = last_byte;
5006		if (cur_offset >= block_end)
5007			break;
5008	}
5009	free_extent_map(em);
5010	unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5011	return err;
5012}
5013
5014static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5015{
5016	struct btrfs_root *root = BTRFS_I(inode)->root;
5017	struct btrfs_trans_handle *trans;
5018	loff_t oldsize = i_size_read(inode);
5019	loff_t newsize = attr->ia_size;
5020	int mask = attr->ia_valid;
5021	int ret;
5022
5023	/*
5024	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5025	 * special case where we need to update the times despite not having
5026	 * these flags set.  For all other operations the VFS set these flags
5027	 * explicitly if it wants a timestamp update.
5028	 */
5029	if (newsize != oldsize) {
5030		inode_inc_iversion(inode);
5031		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5032			inode_set_mtime_to_ts(inode,
5033					      inode_set_ctime_current(inode));
5034		}
5035	}
5036
5037	if (newsize > oldsize) {
5038		/*
5039		 * Don't do an expanding truncate while snapshotting is ongoing.
5040		 * This is to ensure the snapshot captures a fully consistent
5041		 * state of this file - if the snapshot captures this expanding
5042		 * truncation, it must capture all writes that happened before
5043		 * this truncation.
5044		 */
5045		btrfs_drew_write_lock(&root->snapshot_lock);
5046		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5047		if (ret) {
5048			btrfs_drew_write_unlock(&root->snapshot_lock);
5049			return ret;
5050		}
5051
5052		trans = btrfs_start_transaction(root, 1);
5053		if (IS_ERR(trans)) {
5054			btrfs_drew_write_unlock(&root->snapshot_lock);
5055			return PTR_ERR(trans);
5056		}
5057
5058		i_size_write(inode, newsize);
5059		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5060		pagecache_isize_extended(inode, oldsize, newsize);
5061		ret = btrfs_update_inode(trans, BTRFS_I(inode));
5062		btrfs_drew_write_unlock(&root->snapshot_lock);
5063		btrfs_end_transaction(trans);
5064	} else {
5065		struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5066
5067		if (btrfs_is_zoned(fs_info)) {
5068			ret = btrfs_wait_ordered_range(inode,
5069					ALIGN(newsize, fs_info->sectorsize),
5070					(u64)-1);
5071			if (ret)
5072				return ret;
5073		}
5074
5075		/*
5076		 * We're truncating a file that used to have good data down to
5077		 * zero. Make sure any new writes to the file get on disk
5078		 * on close.
5079		 */
5080		if (newsize == 0)
5081			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5082				&BTRFS_I(inode)->runtime_flags);
5083
5084		truncate_setsize(inode, newsize);
5085
5086		inode_dio_wait(inode);
5087
5088		ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5089		if (ret && inode->i_nlink) {
5090			int err;
5091
5092			/*
5093			 * Truncate failed, so fix up the in-memory size. We
5094			 * adjusted disk_i_size down as we removed extents, so
5095			 * wait for disk_i_size to be stable and then update the
5096			 * in-memory size to match.
5097			 */
5098			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5099			if (err)
5100				return err;
5101			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5102		}
5103	}
5104
5105	return ret;
5106}
5107
5108static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5109			 struct iattr *attr)
5110{
5111	struct inode *inode = d_inode(dentry);
5112	struct btrfs_root *root = BTRFS_I(inode)->root;
5113	int err;
5114
5115	if (btrfs_root_readonly(root))
5116		return -EROFS;
5117
5118	err = setattr_prepare(idmap, dentry, attr);
5119	if (err)
5120		return err;
5121
5122	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5123		err = btrfs_setsize(inode, attr);
5124		if (err)
5125			return err;
5126	}
5127
5128	if (attr->ia_valid) {
5129		setattr_copy(idmap, inode, attr);
5130		inode_inc_iversion(inode);
5131		err = btrfs_dirty_inode(BTRFS_I(inode));
5132
5133		if (!err && attr->ia_valid & ATTR_MODE)
5134			err = posix_acl_chmod(idmap, dentry, inode->i_mode);
5135	}
5136
5137	return err;
5138}
5139
5140/*
5141 * While truncating the inode pages during eviction, we get the VFS
5142 * calling btrfs_invalidate_folio() against each folio of the inode. This
5143 * is slow because the calls to btrfs_invalidate_folio() result in a
5144 * huge amount of calls to lock_extent() and clear_extent_bit(),
5145 * which keep merging and splitting extent_state structures over and over,
5146 * wasting lots of time.
5147 *
5148 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5149 * skip all those expensive operations on a per folio basis and do only
5150 * the ordered io finishing, while we release here the extent_map and
5151 * extent_state structures, without the excessive merging and splitting.
5152 */
5153static void evict_inode_truncate_pages(struct inode *inode)
5154{
5155	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5156	struct rb_node *node;
5157
5158	ASSERT(inode->i_state & I_FREEING);
5159	truncate_inode_pages_final(&inode->i_data);
5160
5161	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5162
5163	/*
5164	 * Keep looping until we have no more ranges in the io tree.
5165	 * We can have ongoing bios started by readahead that have
5166	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5167	 * still in progress (unlocked the pages in the bio but did not yet
5168	 * unlocked the ranges in the io tree). Therefore this means some
5169	 * ranges can still be locked and eviction started because before
5170	 * submitting those bios, which are executed by a separate task (work
5171	 * queue kthread), inode references (inode->i_count) were not taken
5172	 * (which would be dropped in the end io callback of each bio).
5173	 * Therefore here we effectively end up waiting for those bios and
5174	 * anyone else holding locked ranges without having bumped the inode's
5175	 * reference count - if we don't do it, when they access the inode's
5176	 * io_tree to unlock a range it may be too late, leading to an
5177	 * use-after-free issue.
5178	 */
5179	spin_lock(&io_tree->lock);
5180	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5181		struct extent_state *state;
5182		struct extent_state *cached_state = NULL;
5183		u64 start;
5184		u64 end;
5185		unsigned state_flags;
5186
5187		node = rb_first(&io_tree->state);
5188		state = rb_entry(node, struct extent_state, rb_node);
5189		start = state->start;
5190		end = state->end;
5191		state_flags = state->state;
5192		spin_unlock(&io_tree->lock);
5193
5194		lock_extent(io_tree, start, end, &cached_state);
5195
5196		/*
5197		 * If still has DELALLOC flag, the extent didn't reach disk,
5198		 * and its reserved space won't be freed by delayed_ref.
5199		 * So we need to free its reserved space here.
5200		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5201		 *
5202		 * Note, end is the bytenr of last byte, so we need + 1 here.
5203		 */
5204		if (state_flags & EXTENT_DELALLOC)
5205			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5206					       end - start + 1, NULL);
5207
5208		clear_extent_bit(io_tree, start, end,
5209				 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5210				 &cached_state);
5211
5212		cond_resched();
5213		spin_lock(&io_tree->lock);
5214	}
5215	spin_unlock(&io_tree->lock);
5216}
5217
5218static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5219							struct btrfs_block_rsv *rsv)
5220{
5221	struct btrfs_fs_info *fs_info = root->fs_info;
5222	struct btrfs_trans_handle *trans;
5223	u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5224	int ret;
5225
5226	/*
5227	 * Eviction should be taking place at some place safe because of our
5228	 * delayed iputs.  However the normal flushing code will run delayed
5229	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5230	 *
5231	 * We reserve the delayed_refs_extra here again because we can't use
5232	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5233	 * above.  We reserve our extra bit here because we generate a ton of
5234	 * delayed refs activity by truncating.
5235	 *
5236	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5237	 * if we fail to make this reservation we can re-try without the
5238	 * delayed_refs_extra so we can make some forward progress.
5239	 */
5240	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5241				     BTRFS_RESERVE_FLUSH_EVICT);
5242	if (ret) {
5243		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5244					     BTRFS_RESERVE_FLUSH_EVICT);
5245		if (ret) {
5246			btrfs_warn(fs_info,
5247				   "could not allocate space for delete; will truncate on mount");
5248			return ERR_PTR(-ENOSPC);
5249		}
5250		delayed_refs_extra = 0;
5251	}
5252
5253	trans = btrfs_join_transaction(root);
5254	if (IS_ERR(trans))
5255		return trans;
5256
5257	if (delayed_refs_extra) {
5258		trans->block_rsv = &fs_info->trans_block_rsv;
5259		trans->bytes_reserved = delayed_refs_extra;
5260		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5261					delayed_refs_extra, true);
5262	}
5263	return trans;
5264}
5265
5266void btrfs_evict_inode(struct inode *inode)
5267{
5268	struct btrfs_fs_info *fs_info;
5269	struct btrfs_trans_handle *trans;
5270	struct btrfs_root *root = BTRFS_I(inode)->root;
5271	struct btrfs_block_rsv *rsv = NULL;
5272	int ret;
5273
5274	trace_btrfs_inode_evict(inode);
5275
5276	if (!root) {
5277		fsverity_cleanup_inode(inode);
5278		clear_inode(inode);
5279		return;
5280	}
5281
5282	fs_info = inode_to_fs_info(inode);
5283	evict_inode_truncate_pages(inode);
5284
5285	if (inode->i_nlink &&
5286	    ((btrfs_root_refs(&root->root_item) != 0 &&
5287	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5288	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5289		goto out;
5290
5291	if (is_bad_inode(inode))
5292		goto out;
5293
5294	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5295		goto out;
5296
5297	if (inode->i_nlink > 0) {
5298		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5299		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5300		goto out;
5301	}
5302
5303	/*
5304	 * This makes sure the inode item in tree is uptodate and the space for
5305	 * the inode update is released.
5306	 */
5307	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5308	if (ret)
5309		goto out;
5310
5311	/*
5312	 * This drops any pending insert or delete operations we have for this
5313	 * inode.  We could have a delayed dir index deletion queued up, but
5314	 * we're removing the inode completely so that'll be taken care of in
5315	 * the truncate.
5316	 */
5317	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5318
5319	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5320	if (!rsv)
5321		goto out;
5322	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5323	rsv->failfast = true;
5324
5325	btrfs_i_size_write(BTRFS_I(inode), 0);
5326
5327	while (1) {
5328		struct btrfs_truncate_control control = {
5329			.inode = BTRFS_I(inode),
5330			.ino = btrfs_ino(BTRFS_I(inode)),
5331			.new_size = 0,
5332			.min_type = 0,
5333		};
5334
5335		trans = evict_refill_and_join(root, rsv);
5336		if (IS_ERR(trans))
5337			goto out;
5338
5339		trans->block_rsv = rsv;
5340
5341		ret = btrfs_truncate_inode_items(trans, root, &control);
5342		trans->block_rsv = &fs_info->trans_block_rsv;
5343		btrfs_end_transaction(trans);
5344		/*
5345		 * We have not added new delayed items for our inode after we
5346		 * have flushed its delayed items, so no need to throttle on
5347		 * delayed items. However we have modified extent buffers.
5348		 */
5349		btrfs_btree_balance_dirty_nodelay(fs_info);
5350		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5351			goto out;
5352		else if (!ret)
5353			break;
5354	}
5355
5356	/*
5357	 * Errors here aren't a big deal, it just means we leave orphan items in
5358	 * the tree. They will be cleaned up on the next mount. If the inode
5359	 * number gets reused, cleanup deletes the orphan item without doing
5360	 * anything, and unlink reuses the existing orphan item.
5361	 *
5362	 * If it turns out that we are dropping too many of these, we might want
5363	 * to add a mechanism for retrying these after a commit.
5364	 */
5365	trans = evict_refill_and_join(root, rsv);
5366	if (!IS_ERR(trans)) {
5367		trans->block_rsv = rsv;
5368		btrfs_orphan_del(trans, BTRFS_I(inode));
5369		trans->block_rsv = &fs_info->trans_block_rsv;
5370		btrfs_end_transaction(trans);
5371	}
5372
5373out:
5374	btrfs_free_block_rsv(fs_info, rsv);
5375	/*
5376	 * If we didn't successfully delete, the orphan item will still be in
5377	 * the tree and we'll retry on the next mount. Again, we might also want
5378	 * to retry these periodically in the future.
5379	 */
5380	btrfs_remove_delayed_node(BTRFS_I(inode));
5381	fsverity_cleanup_inode(inode);
5382	clear_inode(inode);
5383}
5384
5385/*
5386 * Return the key found in the dir entry in the location pointer, fill @type
5387 * with BTRFS_FT_*, and return 0.
5388 *
5389 * If no dir entries were found, returns -ENOENT.
5390 * If found a corrupted location in dir entry, returns -EUCLEAN.
5391 */
5392static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5393			       struct btrfs_key *location, u8 *type)
5394{
5395	struct btrfs_dir_item *di;
5396	struct btrfs_path *path;
5397	struct btrfs_root *root = dir->root;
5398	int ret = 0;
5399	struct fscrypt_name fname;
5400
5401	path = btrfs_alloc_path();
5402	if (!path)
5403		return -ENOMEM;
5404
5405	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5406	if (ret < 0)
5407		goto out;
5408	/*
5409	 * fscrypt_setup_filename() should never return a positive value, but
5410	 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5411	 */
5412	ASSERT(ret == 0);
5413
5414	/* This needs to handle no-key deletions later on */
5415
5416	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5417				   &fname.disk_name, 0);
5418	if (IS_ERR_OR_NULL(di)) {
5419		ret = di ? PTR_ERR(di) : -ENOENT;
5420		goto out;
5421	}
5422
5423	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5424	if (location->type != BTRFS_INODE_ITEM_KEY &&
5425	    location->type != BTRFS_ROOT_ITEM_KEY) {
5426		ret = -EUCLEAN;
5427		btrfs_warn(root->fs_info,
5428"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5429			   __func__, fname.disk_name.name, btrfs_ino(dir),
5430			   location->objectid, location->type, location->offset);
5431	}
5432	if (!ret)
5433		*type = btrfs_dir_ftype(path->nodes[0], di);
5434out:
5435	fscrypt_free_filename(&fname);
5436	btrfs_free_path(path);
5437	return ret;
5438}
5439
5440/*
5441 * when we hit a tree root in a directory, the btrfs part of the inode
5442 * needs to be changed to reflect the root directory of the tree root.  This
5443 * is kind of like crossing a mount point.
5444 */
5445static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5446				    struct btrfs_inode *dir,
5447				    struct dentry *dentry,
5448				    struct btrfs_key *location,
5449				    struct btrfs_root **sub_root)
5450{
5451	struct btrfs_path *path;
5452	struct btrfs_root *new_root;
5453	struct btrfs_root_ref *ref;
5454	struct extent_buffer *leaf;
5455	struct btrfs_key key;
5456	int ret;
5457	int err = 0;
5458	struct fscrypt_name fname;
5459
5460	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5461	if (ret)
5462		return ret;
5463
5464	path = btrfs_alloc_path();
5465	if (!path) {
5466		err = -ENOMEM;
5467		goto out;
5468	}
5469
5470	err = -ENOENT;
5471	key.objectid = dir->root->root_key.objectid;
5472	key.type = BTRFS_ROOT_REF_KEY;
5473	key.offset = location->objectid;
5474
5475	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5476	if (ret) {
5477		if (ret < 0)
5478			err = ret;
5479		goto out;
5480	}
5481
5482	leaf = path->nodes[0];
5483	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5484	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5485	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5486		goto out;
5487
5488	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5489				   (unsigned long)(ref + 1), fname.disk_name.len);
5490	if (ret)
5491		goto out;
5492
5493	btrfs_release_path(path);
5494
5495	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5496	if (IS_ERR(new_root)) {
5497		err = PTR_ERR(new_root);
5498		goto out;
5499	}
5500
5501	*sub_root = new_root;
5502	location->objectid = btrfs_root_dirid(&new_root->root_item);
5503	location->type = BTRFS_INODE_ITEM_KEY;
5504	location->offset = 0;
5505	err = 0;
5506out:
5507	btrfs_free_path(path);
5508	fscrypt_free_filename(&fname);
5509	return err;
5510}
5511
5512static void inode_tree_add(struct btrfs_inode *inode)
5513{
5514	struct btrfs_root *root = inode->root;
5515	struct btrfs_inode *entry;
5516	struct rb_node **p;
5517	struct rb_node *parent;
5518	struct rb_node *new = &inode->rb_node;
5519	u64 ino = btrfs_ino(inode);
5520
5521	if (inode_unhashed(&inode->vfs_inode))
5522		return;
5523	parent = NULL;
5524	spin_lock(&root->inode_lock);
5525	p = &root->inode_tree.rb_node;
5526	while (*p) {
5527		parent = *p;
5528		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5529
5530		if (ino < btrfs_ino(entry))
5531			p = &parent->rb_left;
5532		else if (ino > btrfs_ino(entry))
5533			p = &parent->rb_right;
5534		else {
5535			WARN_ON(!(entry->vfs_inode.i_state &
5536				  (I_WILL_FREE | I_FREEING)));
5537			rb_replace_node(parent, new, &root->inode_tree);
5538			RB_CLEAR_NODE(parent);
5539			spin_unlock(&root->inode_lock);
5540			return;
5541		}
5542	}
5543	rb_link_node(new, parent, p);
5544	rb_insert_color(new, &root->inode_tree);
5545	spin_unlock(&root->inode_lock);
5546}
5547
5548static void inode_tree_del(struct btrfs_inode *inode)
5549{
5550	struct btrfs_root *root = inode->root;
5551	int empty = 0;
5552
5553	spin_lock(&root->inode_lock);
5554	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5555		rb_erase(&inode->rb_node, &root->inode_tree);
5556		RB_CLEAR_NODE(&inode->rb_node);
5557		empty = RB_EMPTY_ROOT(&root->inode_tree);
5558	}
5559	spin_unlock(&root->inode_lock);
5560
5561	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5562		spin_lock(&root->inode_lock);
5563		empty = RB_EMPTY_ROOT(&root->inode_tree);
5564		spin_unlock(&root->inode_lock);
5565		if (empty)
5566			btrfs_add_dead_root(root);
5567	}
5568}
5569
5570
5571static int btrfs_init_locked_inode(struct inode *inode, void *p)
5572{
5573	struct btrfs_iget_args *args = p;
5574
5575	inode->i_ino = args->ino;
5576	BTRFS_I(inode)->location.objectid = args->ino;
5577	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5578	BTRFS_I(inode)->location.offset = 0;
5579	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5580
5581	if (args->root && args->root == args->root->fs_info->tree_root &&
5582	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5583		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5584			&BTRFS_I(inode)->runtime_flags);
5585	return 0;
5586}
5587
5588static int btrfs_find_actor(struct inode *inode, void *opaque)
5589{
5590	struct btrfs_iget_args *args = opaque;
5591
5592	return args->ino == BTRFS_I(inode)->location.objectid &&
5593		args->root == BTRFS_I(inode)->root;
5594}
5595
5596static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5597				       struct btrfs_root *root)
5598{
5599	struct inode *inode;
5600	struct btrfs_iget_args args;
5601	unsigned long hashval = btrfs_inode_hash(ino, root);
5602
5603	args.ino = ino;
5604	args.root = root;
5605
5606	inode = iget5_locked(s, hashval, btrfs_find_actor,
5607			     btrfs_init_locked_inode,
5608			     (void *)&args);
5609	return inode;
5610}
5611
5612/*
5613 * Get an inode object given its inode number and corresponding root.
5614 * Path can be preallocated to prevent recursing back to iget through
5615 * allocator. NULL is also valid but may require an additional allocation
5616 * later.
5617 */
5618struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5619			      struct btrfs_root *root, struct btrfs_path *path)
5620{
5621	struct inode *inode;
5622
5623	inode = btrfs_iget_locked(s, ino, root);
5624	if (!inode)
5625		return ERR_PTR(-ENOMEM);
5626
5627	if (inode->i_state & I_NEW) {
5628		int ret;
5629
5630		ret = btrfs_read_locked_inode(inode, path);
5631		if (!ret) {
5632			inode_tree_add(BTRFS_I(inode));
5633			unlock_new_inode(inode);
5634		} else {
5635			iget_failed(inode);
5636			/*
5637			 * ret > 0 can come from btrfs_search_slot called by
5638			 * btrfs_read_locked_inode, this means the inode item
5639			 * was not found.
5640			 */
5641			if (ret > 0)
5642				ret = -ENOENT;
5643			inode = ERR_PTR(ret);
5644		}
5645	}
5646
5647	return inode;
5648}
5649
5650struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5651{
5652	return btrfs_iget_path(s, ino, root, NULL);
5653}
5654
5655static struct inode *new_simple_dir(struct inode *dir,
5656				    struct btrfs_key *key,
5657				    struct btrfs_root *root)
5658{
5659	struct timespec64 ts;
5660	struct inode *inode = new_inode(dir->i_sb);
5661
5662	if (!inode)
5663		return ERR_PTR(-ENOMEM);
5664
5665	BTRFS_I(inode)->root = btrfs_grab_root(root);
5666	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5667	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5668
5669	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5670	/*
5671	 * We only need lookup, the rest is read-only and there's no inode
5672	 * associated with the dentry
5673	 */
5674	inode->i_op = &simple_dir_inode_operations;
5675	inode->i_opflags &= ~IOP_XATTR;
5676	inode->i_fop = &simple_dir_operations;
5677	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5678
5679	ts = inode_set_ctime_current(inode);
5680	inode_set_mtime_to_ts(inode, ts);
5681	inode_set_atime_to_ts(inode, inode_get_atime(dir));
5682	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
5683	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
5684
5685	inode->i_uid = dir->i_uid;
5686	inode->i_gid = dir->i_gid;
5687
5688	return inode;
5689}
5690
5691static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5692static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5693static_assert(BTRFS_FT_DIR == FT_DIR);
5694static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5695static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5696static_assert(BTRFS_FT_FIFO == FT_FIFO);
5697static_assert(BTRFS_FT_SOCK == FT_SOCK);
5698static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5699
5700static inline u8 btrfs_inode_type(struct inode *inode)
5701{
5702	return fs_umode_to_ftype(inode->i_mode);
5703}
5704
5705struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5706{
5707	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
5708	struct inode *inode;
5709	struct btrfs_root *root = BTRFS_I(dir)->root;
5710	struct btrfs_root *sub_root = root;
5711	struct btrfs_key location;
5712	u8 di_type = 0;
5713	int ret = 0;
5714
5715	if (dentry->d_name.len > BTRFS_NAME_LEN)
5716		return ERR_PTR(-ENAMETOOLONG);
5717
5718	ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
5719	if (ret < 0)
5720		return ERR_PTR(ret);
5721
5722	if (location.type == BTRFS_INODE_ITEM_KEY) {
5723		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5724		if (IS_ERR(inode))
5725			return inode;
5726
5727		/* Do extra check against inode mode with di_type */
5728		if (btrfs_inode_type(inode) != di_type) {
5729			btrfs_crit(fs_info,
5730"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5731				  inode->i_mode, btrfs_inode_type(inode),
5732				  di_type);
5733			iput(inode);
5734			return ERR_PTR(-EUCLEAN);
5735		}
5736		return inode;
5737	}
5738
5739	ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
5740				       &location, &sub_root);
5741	if (ret < 0) {
5742		if (ret != -ENOENT)
5743			inode = ERR_PTR(ret);
5744		else
5745			inode = new_simple_dir(dir, &location, root);
5746	} else {
5747		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5748		btrfs_put_root(sub_root);
5749
5750		if (IS_ERR(inode))
5751			return inode;
5752
5753		down_read(&fs_info->cleanup_work_sem);
5754		if (!sb_rdonly(inode->i_sb))
5755			ret = btrfs_orphan_cleanup(sub_root);
5756		up_read(&fs_info->cleanup_work_sem);
5757		if (ret) {
5758			iput(inode);
5759			inode = ERR_PTR(ret);
5760		}
5761	}
5762
5763	return inode;
5764}
5765
5766static int btrfs_dentry_delete(const struct dentry *dentry)
5767{
5768	struct btrfs_root *root;
5769	struct inode *inode = d_inode(dentry);
5770
5771	if (!inode && !IS_ROOT(dentry))
5772		inode = d_inode(dentry->d_parent);
5773
5774	if (inode) {
5775		root = BTRFS_I(inode)->root;
5776		if (btrfs_root_refs(&root->root_item) == 0)
5777			return 1;
5778
5779		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5780			return 1;
5781	}
5782	return 0;
5783}
5784
5785static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5786				   unsigned int flags)
5787{
5788	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5789
5790	if (inode == ERR_PTR(-ENOENT))
5791		inode = NULL;
5792	return d_splice_alias(inode, dentry);
5793}
5794
5795/*
5796 * Find the highest existing sequence number in a directory and then set the
5797 * in-memory index_cnt variable to the first free sequence number.
5798 */
5799static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5800{
5801	struct btrfs_root *root = inode->root;
5802	struct btrfs_key key, found_key;
5803	struct btrfs_path *path;
5804	struct extent_buffer *leaf;
5805	int ret;
5806
5807	key.objectid = btrfs_ino(inode);
5808	key.type = BTRFS_DIR_INDEX_KEY;
5809	key.offset = (u64)-1;
5810
5811	path = btrfs_alloc_path();
5812	if (!path)
5813		return -ENOMEM;
5814
5815	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5816	if (ret < 0)
5817		goto out;
5818	/* FIXME: we should be able to handle this */
5819	if (ret == 0)
5820		goto out;
5821	ret = 0;
5822
5823	if (path->slots[0] == 0) {
5824		inode->index_cnt = BTRFS_DIR_START_INDEX;
5825		goto out;
5826	}
5827
5828	path->slots[0]--;
5829
5830	leaf = path->nodes[0];
5831	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5832
5833	if (found_key.objectid != btrfs_ino(inode) ||
5834	    found_key.type != BTRFS_DIR_INDEX_KEY) {
5835		inode->index_cnt = BTRFS_DIR_START_INDEX;
5836		goto out;
5837	}
5838
5839	inode->index_cnt = found_key.offset + 1;
5840out:
5841	btrfs_free_path(path);
5842	return ret;
5843}
5844
5845static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
5846{
5847	int ret = 0;
5848
5849	btrfs_inode_lock(dir, 0);
5850	if (dir->index_cnt == (u64)-1) {
5851		ret = btrfs_inode_delayed_dir_index_count(dir);
5852		if (ret) {
5853			ret = btrfs_set_inode_index_count(dir);
5854			if (ret)
5855				goto out;
5856		}
5857	}
5858
5859	/* index_cnt is the index number of next new entry, so decrement it. */
5860	*index = dir->index_cnt - 1;
5861out:
5862	btrfs_inode_unlock(dir, 0);
5863
5864	return ret;
5865}
5866
5867/*
5868 * All this infrastructure exists because dir_emit can fault, and we are holding
5869 * the tree lock when doing readdir.  For now just allocate a buffer and copy
5870 * our information into that, and then dir_emit from the buffer.  This is
5871 * similar to what NFS does, only we don't keep the buffer around in pagecache
5872 * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5873 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5874 * tree lock.
5875 */
5876static int btrfs_opendir(struct inode *inode, struct file *file)
5877{
5878	struct btrfs_file_private *private;
5879	u64 last_index;
5880	int ret;
5881
5882	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
5883	if (ret)
5884		return ret;
5885
5886	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5887	if (!private)
5888		return -ENOMEM;
5889	private->last_index = last_index;
5890	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5891	if (!private->filldir_buf) {
5892		kfree(private);
5893		return -ENOMEM;
5894	}
5895	file->private_data = private;
5896	return 0;
5897}
5898
5899static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
5900{
5901	struct btrfs_file_private *private = file->private_data;
5902	int ret;
5903
5904	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
5905				       &private->last_index);
5906	if (ret)
5907		return ret;
5908
5909	return generic_file_llseek(file, offset, whence);
5910}
5911
5912struct dir_entry {
5913	u64 ino;
5914	u64 offset;
5915	unsigned type;
5916	int name_len;
5917};
5918
5919static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5920{
5921	while (entries--) {
5922		struct dir_entry *entry = addr;
5923		char *name = (char *)(entry + 1);
5924
5925		ctx->pos = get_unaligned(&entry->offset);
5926		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5927					 get_unaligned(&entry->ino),
5928					 get_unaligned(&entry->type)))
5929			return 1;
5930		addr += sizeof(struct dir_entry) +
5931			get_unaligned(&entry->name_len);
5932		ctx->pos++;
5933	}
5934	return 0;
5935}
5936
5937static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5938{
5939	struct inode *inode = file_inode(file);
5940	struct btrfs_root *root = BTRFS_I(inode)->root;
5941	struct btrfs_file_private *private = file->private_data;
5942	struct btrfs_dir_item *di;
5943	struct btrfs_key key;
5944	struct btrfs_key found_key;
5945	struct btrfs_path *path;
5946	void *addr;
5947	LIST_HEAD(ins_list);
5948	LIST_HEAD(del_list);
5949	int ret;
5950	char *name_ptr;
5951	int name_len;
5952	int entries = 0;
5953	int total_len = 0;
5954	bool put = false;
5955	struct btrfs_key location;
5956
5957	if (!dir_emit_dots(file, ctx))
5958		return 0;
5959
5960	path = btrfs_alloc_path();
5961	if (!path)
5962		return -ENOMEM;
5963
5964	addr = private->filldir_buf;
5965	path->reada = READA_FORWARD;
5966
5967	put = btrfs_readdir_get_delayed_items(inode, private->last_index,
5968					      &ins_list, &del_list);
5969
5970again:
5971	key.type = BTRFS_DIR_INDEX_KEY;
5972	key.offset = ctx->pos;
5973	key.objectid = btrfs_ino(BTRFS_I(inode));
5974
5975	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5976		struct dir_entry *entry;
5977		struct extent_buffer *leaf = path->nodes[0];
5978		u8 ftype;
5979
5980		if (found_key.objectid != key.objectid)
5981			break;
5982		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5983			break;
5984		if (found_key.offset < ctx->pos)
5985			continue;
5986		if (found_key.offset > private->last_index)
5987			break;
5988		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5989			continue;
5990		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5991		name_len = btrfs_dir_name_len(leaf, di);
5992		if ((total_len + sizeof(struct dir_entry) + name_len) >=
5993		    PAGE_SIZE) {
5994			btrfs_release_path(path);
5995			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5996			if (ret)
5997				goto nopos;
5998			addr = private->filldir_buf;
5999			entries = 0;
6000			total_len = 0;
6001			goto again;
6002		}
6003
6004		ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6005		entry = addr;
6006		name_ptr = (char *)(entry + 1);
6007		read_extent_buffer(leaf, name_ptr,
6008				   (unsigned long)(di + 1), name_len);
6009		put_unaligned(name_len, &entry->name_len);
6010		put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6011		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6012		put_unaligned(location.objectid, &entry->ino);
6013		put_unaligned(found_key.offset, &entry->offset);
6014		entries++;
6015		addr += sizeof(struct dir_entry) + name_len;
6016		total_len += sizeof(struct dir_entry) + name_len;
6017	}
6018	/* Catch error encountered during iteration */
6019	if (ret < 0)
6020		goto err;
6021
6022	btrfs_release_path(path);
6023
6024	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6025	if (ret)
6026		goto nopos;
6027
6028	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6029	if (ret)
6030		goto nopos;
6031
6032	/*
6033	 * Stop new entries from being returned after we return the last
6034	 * entry.
6035	 *
6036	 * New directory entries are assigned a strictly increasing
6037	 * offset.  This means that new entries created during readdir
6038	 * are *guaranteed* to be seen in the future by that readdir.
6039	 * This has broken buggy programs which operate on names as
6040	 * they're returned by readdir.  Until we re-use freed offsets
6041	 * we have this hack to stop new entries from being returned
6042	 * under the assumption that they'll never reach this huge
6043	 * offset.
6044	 *
6045	 * This is being careful not to overflow 32bit loff_t unless the
6046	 * last entry requires it because doing so has broken 32bit apps
6047	 * in the past.
6048	 */
6049	if (ctx->pos >= INT_MAX)
6050		ctx->pos = LLONG_MAX;
6051	else
6052		ctx->pos = INT_MAX;
6053nopos:
6054	ret = 0;
6055err:
6056	if (put)
6057		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6058	btrfs_free_path(path);
6059	return ret;
6060}
6061
6062/*
6063 * This is somewhat expensive, updating the tree every time the
6064 * inode changes.  But, it is most likely to find the inode in cache.
6065 * FIXME, needs more benchmarking...there are no reasons other than performance
6066 * to keep or drop this code.
6067 */
6068static int btrfs_dirty_inode(struct btrfs_inode *inode)
6069{
6070	struct btrfs_root *root = inode->root;
6071	struct btrfs_fs_info *fs_info = root->fs_info;
6072	struct btrfs_trans_handle *trans;
6073	int ret;
6074
6075	if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6076		return 0;
6077
6078	trans = btrfs_join_transaction(root);
6079	if (IS_ERR(trans))
6080		return PTR_ERR(trans);
6081
6082	ret = btrfs_update_inode(trans, inode);
6083	if (ret == -ENOSPC || ret == -EDQUOT) {
6084		/* whoops, lets try again with the full transaction */
6085		btrfs_end_transaction(trans);
6086		trans = btrfs_start_transaction(root, 1);
6087		if (IS_ERR(trans))
6088			return PTR_ERR(trans);
6089
6090		ret = btrfs_update_inode(trans, inode);
6091	}
6092	btrfs_end_transaction(trans);
6093	if (inode->delayed_node)
6094		btrfs_balance_delayed_items(fs_info);
6095
6096	return ret;
6097}
6098
6099/*
6100 * This is a copy of file_update_time.  We need this so we can return error on
6101 * ENOSPC for updating the inode in the case of file write and mmap writes.
6102 */
6103static int btrfs_update_time(struct inode *inode, int flags)
6104{
6105	struct btrfs_root *root = BTRFS_I(inode)->root;
6106	bool dirty;
6107
6108	if (btrfs_root_readonly(root))
6109		return -EROFS;
6110
6111	dirty = inode_update_timestamps(inode, flags);
6112	return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
6113}
6114
6115/*
6116 * helper to find a free sequence number in a given directory.  This current
6117 * code is very simple, later versions will do smarter things in the btree
6118 */
6119int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6120{
6121	int ret = 0;
6122
6123	if (dir->index_cnt == (u64)-1) {
6124		ret = btrfs_inode_delayed_dir_index_count(dir);
6125		if (ret) {
6126			ret = btrfs_set_inode_index_count(dir);
6127			if (ret)
6128				return ret;
6129		}
6130	}
6131
6132	*index = dir->index_cnt;
6133	dir->index_cnt++;
6134
6135	return ret;
6136}
6137
6138static int btrfs_insert_inode_locked(struct inode *inode)
6139{
6140	struct btrfs_iget_args args;
6141
6142	args.ino = BTRFS_I(inode)->location.objectid;
6143	args.root = BTRFS_I(inode)->root;
6144
6145	return insert_inode_locked4(inode,
6146		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6147		   btrfs_find_actor, &args);
6148}
6149
6150int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6151			    unsigned int *trans_num_items)
6152{
6153	struct inode *dir = args->dir;
6154	struct inode *inode = args->inode;
6155	int ret;
6156
6157	if (!args->orphan) {
6158		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6159					     &args->fname);
6160		if (ret)
6161			return ret;
6162	}
6163
6164	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6165	if (ret) {
6166		fscrypt_free_filename(&args->fname);
6167		return ret;
6168	}
6169
6170	/* 1 to add inode item */
6171	*trans_num_items = 1;
6172	/* 1 to add compression property */
6173	if (BTRFS_I(dir)->prop_compress)
6174		(*trans_num_items)++;
6175	/* 1 to add default ACL xattr */
6176	if (args->default_acl)
6177		(*trans_num_items)++;
6178	/* 1 to add access ACL xattr */
6179	if (args->acl)
6180		(*trans_num_items)++;
6181#ifdef CONFIG_SECURITY
6182	/* 1 to add LSM xattr */
6183	if (dir->i_security)
6184		(*trans_num_items)++;
6185#endif
6186	if (args->orphan) {
6187		/* 1 to add orphan item */
6188		(*trans_num_items)++;
6189	} else {
6190		/*
6191		 * 1 to add dir item
6192		 * 1 to add dir index
6193		 * 1 to update parent inode item
6194		 *
6195		 * No need for 1 unit for the inode ref item because it is
6196		 * inserted in a batch together with the inode item at
6197		 * btrfs_create_new_inode().
6198		 */
6199		*trans_num_items += 3;
6200	}
6201	return 0;
6202}
6203
6204void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6205{
6206	posix_acl_release(args->acl);
6207	posix_acl_release(args->default_acl);
6208	fscrypt_free_filename(&args->fname);
6209}
6210
6211/*
6212 * Inherit flags from the parent inode.
6213 *
6214 * Currently only the compression flags and the cow flags are inherited.
6215 */
6216static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6217{
6218	unsigned int flags;
6219
6220	flags = dir->flags;
6221
6222	if (flags & BTRFS_INODE_NOCOMPRESS) {
6223		inode->flags &= ~BTRFS_INODE_COMPRESS;
6224		inode->flags |= BTRFS_INODE_NOCOMPRESS;
6225	} else if (flags & BTRFS_INODE_COMPRESS) {
6226		inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6227		inode->flags |= BTRFS_INODE_COMPRESS;
6228	}
6229
6230	if (flags & BTRFS_INODE_NODATACOW) {
6231		inode->flags |= BTRFS_INODE_NODATACOW;
6232		if (S_ISREG(inode->vfs_inode.i_mode))
6233			inode->flags |= BTRFS_INODE_NODATASUM;
6234	}
6235
6236	btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
6237}
6238
6239int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6240			   struct btrfs_new_inode_args *args)
6241{
6242	struct timespec64 ts;
6243	struct inode *dir = args->dir;
6244	struct inode *inode = args->inode;
6245	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6246	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6247	struct btrfs_root *root;
6248	struct btrfs_inode_item *inode_item;
6249	struct btrfs_key *location;
6250	struct btrfs_path *path;
6251	u64 objectid;
6252	struct btrfs_inode_ref *ref;
6253	struct btrfs_key key[2];
6254	u32 sizes[2];
6255	struct btrfs_item_batch batch;
6256	unsigned long ptr;
6257	int ret;
6258
6259	path = btrfs_alloc_path();
6260	if (!path)
6261		return -ENOMEM;
6262
6263	if (!args->subvol)
6264		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6265	root = BTRFS_I(inode)->root;
6266
6267	ret = btrfs_get_free_objectid(root, &objectid);
6268	if (ret)
6269		goto out;
6270	inode->i_ino = objectid;
6271
6272	if (args->orphan) {
6273		/*
6274		 * O_TMPFILE, set link count to 0, so that after this point, we
6275		 * fill in an inode item with the correct link count.
6276		 */
6277		set_nlink(inode, 0);
6278	} else {
6279		trace_btrfs_inode_request(dir);
6280
6281		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6282		if (ret)
6283			goto out;
6284	}
6285	/* index_cnt is ignored for everything but a dir. */
6286	BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6287	BTRFS_I(inode)->generation = trans->transid;
6288	inode->i_generation = BTRFS_I(inode)->generation;
6289
6290	/*
6291	 * We don't have any capability xattrs set here yet, shortcut any
6292	 * queries for the xattrs here.  If we add them later via the inode
6293	 * security init path or any other path this flag will be cleared.
6294	 */
6295	set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6296
6297	/*
6298	 * Subvolumes don't inherit flags from their parent directory.
6299	 * Originally this was probably by accident, but we probably can't
6300	 * change it now without compatibility issues.
6301	 */
6302	if (!args->subvol)
6303		btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6304
6305	if (S_ISREG(inode->i_mode)) {
6306		if (btrfs_test_opt(fs_info, NODATASUM))
6307			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6308		if (btrfs_test_opt(fs_info, NODATACOW))
6309			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6310				BTRFS_INODE_NODATASUM;
6311	}
6312
6313	location = &BTRFS_I(inode)->location;
6314	location->objectid = objectid;
6315	location->offset = 0;
6316	location->type = BTRFS_INODE_ITEM_KEY;
6317
6318	ret = btrfs_insert_inode_locked(inode);
6319	if (ret < 0) {
6320		if (!args->orphan)
6321			BTRFS_I(dir)->index_cnt--;
6322		goto out;
6323	}
6324
6325	/*
6326	 * We could have gotten an inode number from somebody who was fsynced
6327	 * and then removed in this same transaction, so let's just set full
6328	 * sync since it will be a full sync anyway and this will blow away the
6329	 * old info in the log.
6330	 */
6331	btrfs_set_inode_full_sync(BTRFS_I(inode));
6332
6333	key[0].objectid = objectid;
6334	key[0].type = BTRFS_INODE_ITEM_KEY;
6335	key[0].offset = 0;
6336
6337	sizes[0] = sizeof(struct btrfs_inode_item);
6338
6339	if (!args->orphan) {
6340		/*
6341		 * Start new inodes with an inode_ref. This is slightly more
6342		 * efficient for small numbers of hard links since they will
6343		 * be packed into one item. Extended refs will kick in if we
6344		 * add more hard links than can fit in the ref item.
6345		 */
6346		key[1].objectid = objectid;
6347		key[1].type = BTRFS_INODE_REF_KEY;
6348		if (args->subvol) {
6349			key[1].offset = objectid;
6350			sizes[1] = 2 + sizeof(*ref);
6351		} else {
6352			key[1].offset = btrfs_ino(BTRFS_I(dir));
6353			sizes[1] = name->len + sizeof(*ref);
6354		}
6355	}
6356
6357	batch.keys = &key[0];
6358	batch.data_sizes = &sizes[0];
6359	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6360	batch.nr = args->orphan ? 1 : 2;
6361	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6362	if (ret != 0) {
6363		btrfs_abort_transaction(trans, ret);
6364		goto discard;
6365	}
6366
6367	ts = simple_inode_init_ts(inode);
6368	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6369	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6370
6371	/*
6372	 * We're going to fill the inode item now, so at this point the inode
6373	 * must be fully initialized.
6374	 */
6375
6376	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6377				  struct btrfs_inode_item);
6378	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6379			     sizeof(*inode_item));
6380	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6381
6382	if (!args->orphan) {
6383		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6384				     struct btrfs_inode_ref);
6385		ptr = (unsigned long)(ref + 1);
6386		if (args->subvol) {
6387			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6388			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6389			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6390		} else {
6391			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6392						     name->len);
6393			btrfs_set_inode_ref_index(path->nodes[0], ref,
6394						  BTRFS_I(inode)->dir_index);
6395			write_extent_buffer(path->nodes[0], name->name, ptr,
6396					    name->len);
6397		}
6398	}
6399
6400	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
6401	/*
6402	 * We don't need the path anymore, plus inheriting properties, adding
6403	 * ACLs, security xattrs, orphan item or adding the link, will result in
6404	 * allocating yet another path. So just free our path.
6405	 */
6406	btrfs_free_path(path);
6407	path = NULL;
6408
6409	if (args->subvol) {
6410		struct inode *parent;
6411
6412		/*
6413		 * Subvolumes inherit properties from their parent subvolume,
6414		 * not the directory they were created in.
6415		 */
6416		parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6417				    BTRFS_I(dir)->root);
6418		if (IS_ERR(parent)) {
6419			ret = PTR_ERR(parent);
6420		} else {
6421			ret = btrfs_inode_inherit_props(trans, inode, parent);
6422			iput(parent);
6423		}
6424	} else {
6425		ret = btrfs_inode_inherit_props(trans, inode, dir);
6426	}
6427	if (ret) {
6428		btrfs_err(fs_info,
6429			  "error inheriting props for ino %llu (root %llu): %d",
6430			  btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6431			  ret);
6432	}
6433
6434	/*
6435	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6436	 * probably a bug.
6437	 */
6438	if (!args->subvol) {
6439		ret = btrfs_init_inode_security(trans, args);
6440		if (ret) {
6441			btrfs_abort_transaction(trans, ret);
6442			goto discard;
6443		}
6444	}
6445
6446	inode_tree_add(BTRFS_I(inode));
6447
6448	trace_btrfs_inode_new(inode);
6449	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6450
6451	btrfs_update_root_times(trans, root);
6452
6453	if (args->orphan) {
6454		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6455	} else {
6456		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6457				     0, BTRFS_I(inode)->dir_index);
6458	}
6459	if (ret) {
6460		btrfs_abort_transaction(trans, ret);
6461		goto discard;
6462	}
6463
6464	return 0;
6465
6466discard:
6467	/*
6468	 * discard_new_inode() calls iput(), but the caller owns the reference
6469	 * to the inode.
6470	 */
6471	ihold(inode);
6472	discard_new_inode(inode);
6473out:
6474	btrfs_free_path(path);
6475	return ret;
6476}
6477
6478/*
6479 * utility function to add 'inode' into 'parent_inode' with
6480 * a give name and a given sequence number.
6481 * if 'add_backref' is true, also insert a backref from the
6482 * inode to the parent directory.
6483 */
6484int btrfs_add_link(struct btrfs_trans_handle *trans,
6485		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6486		   const struct fscrypt_str *name, int add_backref, u64 index)
6487{
6488	int ret = 0;
6489	struct btrfs_key key;
6490	struct btrfs_root *root = parent_inode->root;
6491	u64 ino = btrfs_ino(inode);
6492	u64 parent_ino = btrfs_ino(parent_inode);
6493
6494	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6495		memcpy(&key, &inode->root->root_key, sizeof(key));
6496	} else {
6497		key.objectid = ino;
6498		key.type = BTRFS_INODE_ITEM_KEY;
6499		key.offset = 0;
6500	}
6501
6502	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6503		ret = btrfs_add_root_ref(trans, key.objectid,
6504					 root->root_key.objectid, parent_ino,
6505					 index, name);
6506	} else if (add_backref) {
6507		ret = btrfs_insert_inode_ref(trans, root, name,
6508					     ino, parent_ino, index);
6509	}
6510
6511	/* Nothing to clean up yet */
6512	if (ret)
6513		return ret;
6514
6515	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6516				    btrfs_inode_type(&inode->vfs_inode), index);
6517	if (ret == -EEXIST || ret == -EOVERFLOW)
6518		goto fail_dir_item;
6519	else if (ret) {
6520		btrfs_abort_transaction(trans, ret);
6521		return ret;
6522	}
6523
6524	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6525			   name->len * 2);
6526	inode_inc_iversion(&parent_inode->vfs_inode);
6527	/*
6528	 * If we are replaying a log tree, we do not want to update the mtime
6529	 * and ctime of the parent directory with the current time, since the
6530	 * log replay procedure is responsible for setting them to their correct
6531	 * values (the ones it had when the fsync was done).
6532	 */
6533	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
6534		inode_set_mtime_to_ts(&parent_inode->vfs_inode,
6535				      inode_set_ctime_current(&parent_inode->vfs_inode));
6536
6537	ret = btrfs_update_inode(trans, parent_inode);
6538	if (ret)
6539		btrfs_abort_transaction(trans, ret);
6540	return ret;
6541
6542fail_dir_item:
6543	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6544		u64 local_index;
6545		int err;
6546		err = btrfs_del_root_ref(trans, key.objectid,
6547					 root->root_key.objectid, parent_ino,
6548					 &local_index, name);
6549		if (err)
6550			btrfs_abort_transaction(trans, err);
6551	} else if (add_backref) {
6552		u64 local_index;
6553		int err;
6554
6555		err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
6556					  &local_index);
6557		if (err)
6558			btrfs_abort_transaction(trans, err);
6559	}
6560
6561	/* Return the original error code */
6562	return ret;
6563}
6564
6565static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6566			       struct inode *inode)
6567{
6568	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6569	struct btrfs_root *root = BTRFS_I(dir)->root;
6570	struct btrfs_new_inode_args new_inode_args = {
6571		.dir = dir,
6572		.dentry = dentry,
6573		.inode = inode,
6574	};
6575	unsigned int trans_num_items;
6576	struct btrfs_trans_handle *trans;
6577	int err;
6578
6579	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6580	if (err)
6581		goto out_inode;
6582
6583	trans = btrfs_start_transaction(root, trans_num_items);
6584	if (IS_ERR(trans)) {
6585		err = PTR_ERR(trans);
6586		goto out_new_inode_args;
6587	}
6588
6589	err = btrfs_create_new_inode(trans, &new_inode_args);
6590	if (!err)
6591		d_instantiate_new(dentry, inode);
6592
6593	btrfs_end_transaction(trans);
6594	btrfs_btree_balance_dirty(fs_info);
6595out_new_inode_args:
6596	btrfs_new_inode_args_destroy(&new_inode_args);
6597out_inode:
6598	if (err)
6599		iput(inode);
6600	return err;
6601}
6602
6603static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6604		       struct dentry *dentry, umode_t mode, dev_t rdev)
6605{
6606	struct inode *inode;
6607
6608	inode = new_inode(dir->i_sb);
6609	if (!inode)
6610		return -ENOMEM;
6611	inode_init_owner(idmap, inode, dir, mode);
6612	inode->i_op = &btrfs_special_inode_operations;
6613	init_special_inode(inode, inode->i_mode, rdev);
6614	return btrfs_create_common(dir, dentry, inode);
6615}
6616
6617static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6618			struct dentry *dentry, umode_t mode, bool excl)
6619{
6620	struct inode *inode;
6621
6622	inode = new_inode(dir->i_sb);
6623	if (!inode)
6624		return -ENOMEM;
6625	inode_init_owner(idmap, inode, dir, mode);
6626	inode->i_fop = &btrfs_file_operations;
6627	inode->i_op = &btrfs_file_inode_operations;
6628	inode->i_mapping->a_ops = &btrfs_aops;
6629	return btrfs_create_common(dir, dentry, inode);
6630}
6631
6632static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6633		      struct dentry *dentry)
6634{
6635	struct btrfs_trans_handle *trans = NULL;
6636	struct btrfs_root *root = BTRFS_I(dir)->root;
6637	struct inode *inode = d_inode(old_dentry);
6638	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
6639	struct fscrypt_name fname;
6640	u64 index;
6641	int err;
6642	int drop_inode = 0;
6643
6644	/* do not allow sys_link's with other subvols of the same device */
6645	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6646		return -EXDEV;
6647
6648	if (inode->i_nlink >= BTRFS_LINK_MAX)
6649		return -EMLINK;
6650
6651	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
6652	if (err)
6653		goto fail;
6654
6655	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6656	if (err)
6657		goto fail;
6658
6659	/*
6660	 * 2 items for inode and inode ref
6661	 * 2 items for dir items
6662	 * 1 item for parent inode
6663	 * 1 item for orphan item deletion if O_TMPFILE
6664	 */
6665	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6666	if (IS_ERR(trans)) {
6667		err = PTR_ERR(trans);
6668		trans = NULL;
6669		goto fail;
6670	}
6671
6672	/* There are several dir indexes for this inode, clear the cache. */
6673	BTRFS_I(inode)->dir_index = 0ULL;
6674	inc_nlink(inode);
6675	inode_inc_iversion(inode);
6676	inode_set_ctime_current(inode);
6677	ihold(inode);
6678	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6679
6680	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6681			     &fname.disk_name, 1, index);
6682
6683	if (err) {
6684		drop_inode = 1;
6685	} else {
6686		struct dentry *parent = dentry->d_parent;
6687
6688		err = btrfs_update_inode(trans, BTRFS_I(inode));
6689		if (err)
6690			goto fail;
6691		if (inode->i_nlink == 1) {
6692			/*
6693			 * If new hard link count is 1, it's a file created
6694			 * with open(2) O_TMPFILE flag.
6695			 */
6696			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6697			if (err)
6698				goto fail;
6699		}
6700		d_instantiate(dentry, inode);
6701		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6702	}
6703
6704fail:
6705	fscrypt_free_filename(&fname);
6706	if (trans)
6707		btrfs_end_transaction(trans);
6708	if (drop_inode) {
6709		inode_dec_link_count(inode);
6710		iput(inode);
6711	}
6712	btrfs_btree_balance_dirty(fs_info);
6713	return err;
6714}
6715
6716static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
6717		       struct dentry *dentry, umode_t mode)
6718{
6719	struct inode *inode;
6720
6721	inode = new_inode(dir->i_sb);
6722	if (!inode)
6723		return -ENOMEM;
6724	inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
6725	inode->i_op = &btrfs_dir_inode_operations;
6726	inode->i_fop = &btrfs_dir_file_operations;
6727	return btrfs_create_common(dir, dentry, inode);
6728}
6729
6730static noinline int uncompress_inline(struct btrfs_path *path,
6731				      struct page *page,
6732				      struct btrfs_file_extent_item *item)
6733{
6734	int ret;
6735	struct extent_buffer *leaf = path->nodes[0];
6736	char *tmp;
6737	size_t max_size;
6738	unsigned long inline_size;
6739	unsigned long ptr;
6740	int compress_type;
6741
6742	compress_type = btrfs_file_extent_compression(leaf, item);
6743	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6744	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6745	tmp = kmalloc(inline_size, GFP_NOFS);
6746	if (!tmp)
6747		return -ENOMEM;
6748	ptr = btrfs_file_extent_inline_start(item);
6749
6750	read_extent_buffer(leaf, tmp, ptr, inline_size);
6751
6752	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6753	ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
6754
6755	/*
6756	 * decompression code contains a memset to fill in any space between the end
6757	 * of the uncompressed data and the end of max_size in case the decompressed
6758	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6759	 * the end of an inline extent and the beginning of the next block, so we
6760	 * cover that region here.
6761	 */
6762
6763	if (max_size < PAGE_SIZE)
6764		memzero_page(page, max_size, PAGE_SIZE - max_size);
6765	kfree(tmp);
6766	return ret;
6767}
6768
6769static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
6770			      struct page *page)
6771{
6772	struct btrfs_file_extent_item *fi;
6773	void *kaddr;
6774	size_t copy_size;
6775
6776	if (!page || PageUptodate(page))
6777		return 0;
6778
6779	ASSERT(page_offset(page) == 0);
6780
6781	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6782			    struct btrfs_file_extent_item);
6783	if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
6784		return uncompress_inline(path, page, fi);
6785
6786	copy_size = min_t(u64, PAGE_SIZE,
6787			  btrfs_file_extent_ram_bytes(path->nodes[0], fi));
6788	kaddr = kmap_local_page(page);
6789	read_extent_buffer(path->nodes[0], kaddr,
6790			   btrfs_file_extent_inline_start(fi), copy_size);
6791	kunmap_local(kaddr);
6792	if (copy_size < PAGE_SIZE)
6793		memzero_page(page, copy_size, PAGE_SIZE - copy_size);
6794	return 0;
6795}
6796
6797/*
6798 * Lookup the first extent overlapping a range in a file.
6799 *
6800 * @inode:	file to search in
6801 * @page:	page to read extent data into if the extent is inline
6802 * @start:	file offset
6803 * @len:	length of range starting at @start
6804 *
6805 * Return the first &struct extent_map which overlaps the given range, reading
6806 * it from the B-tree and caching it if necessary. Note that there may be more
6807 * extents which overlap the given range after the returned extent_map.
6808 *
6809 * If @page is not NULL and the extent is inline, this also reads the extent
6810 * data directly into the page and marks the extent up to date in the io_tree.
6811 *
6812 * Return: ERR_PTR on error, non-NULL extent_map on success.
6813 */
6814struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6815				    struct page *page, u64 start, u64 len)
6816{
6817	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6818	int ret = 0;
6819	u64 extent_start = 0;
6820	u64 extent_end = 0;
6821	u64 objectid = btrfs_ino(inode);
6822	int extent_type = -1;
6823	struct btrfs_path *path = NULL;
6824	struct btrfs_root *root = inode->root;
6825	struct btrfs_file_extent_item *item;
6826	struct extent_buffer *leaf;
6827	struct btrfs_key found_key;
6828	struct extent_map *em = NULL;
6829	struct extent_map_tree *em_tree = &inode->extent_tree;
6830
6831	read_lock(&em_tree->lock);
6832	em = lookup_extent_mapping(em_tree, start, len);
6833	read_unlock(&em_tree->lock);
6834
6835	if (em) {
6836		if (em->start > start || em->start + em->len <= start)
6837			free_extent_map(em);
6838		else if (em->block_start == EXTENT_MAP_INLINE && page)
6839			free_extent_map(em);
6840		else
6841			goto out;
6842	}
6843	em = alloc_extent_map();
6844	if (!em) {
6845		ret = -ENOMEM;
6846		goto out;
6847	}
6848	em->start = EXTENT_MAP_HOLE;
6849	em->orig_start = EXTENT_MAP_HOLE;
6850	em->len = (u64)-1;
6851	em->block_len = (u64)-1;
6852
6853	path = btrfs_alloc_path();
6854	if (!path) {
6855		ret = -ENOMEM;
6856		goto out;
6857	}
6858
6859	/* Chances are we'll be called again, so go ahead and do readahead */
6860	path->reada = READA_FORWARD;
6861
6862	/*
6863	 * The same explanation in load_free_space_cache applies here as well,
6864	 * we only read when we're loading the free space cache, and at that
6865	 * point the commit_root has everything we need.
6866	 */
6867	if (btrfs_is_free_space_inode(inode)) {
6868		path->search_commit_root = 1;
6869		path->skip_locking = 1;
6870	}
6871
6872	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6873	if (ret < 0) {
6874		goto out;
6875	} else if (ret > 0) {
6876		if (path->slots[0] == 0)
6877			goto not_found;
6878		path->slots[0]--;
6879		ret = 0;
6880	}
6881
6882	leaf = path->nodes[0];
6883	item = btrfs_item_ptr(leaf, path->slots[0],
6884			      struct btrfs_file_extent_item);
6885	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6886	if (found_key.objectid != objectid ||
6887	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6888		/*
6889		 * If we backup past the first extent we want to move forward
6890		 * and see if there is an extent in front of us, otherwise we'll
6891		 * say there is a hole for our whole search range which can
6892		 * cause problems.
6893		 */
6894		extent_end = start;
6895		goto next;
6896	}
6897
6898	extent_type = btrfs_file_extent_type(leaf, item);
6899	extent_start = found_key.offset;
6900	extent_end = btrfs_file_extent_end(path);
6901	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6902	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6903		/* Only regular file could have regular/prealloc extent */
6904		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6905			ret = -EUCLEAN;
6906			btrfs_crit(fs_info,
6907		"regular/prealloc extent found for non-regular inode %llu",
6908				   btrfs_ino(inode));
6909			goto out;
6910		}
6911		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6912						       extent_start);
6913	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6914		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6915						      path->slots[0],
6916						      extent_start);
6917	}
6918next:
6919	if (start >= extent_end) {
6920		path->slots[0]++;
6921		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6922			ret = btrfs_next_leaf(root, path);
6923			if (ret < 0)
6924				goto out;
6925			else if (ret > 0)
6926				goto not_found;
6927
6928			leaf = path->nodes[0];
6929		}
6930		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6931		if (found_key.objectid != objectid ||
6932		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6933			goto not_found;
6934		if (start + len <= found_key.offset)
6935			goto not_found;
6936		if (start > found_key.offset)
6937			goto next;
6938
6939		/* New extent overlaps with existing one */
6940		em->start = start;
6941		em->orig_start = start;
6942		em->len = found_key.offset - start;
6943		em->block_start = EXTENT_MAP_HOLE;
6944		goto insert;
6945	}
6946
6947	btrfs_extent_item_to_extent_map(inode, path, item, em);
6948
6949	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6950	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6951		goto insert;
6952	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6953		/*
6954		 * Inline extent can only exist at file offset 0. This is
6955		 * ensured by tree-checker and inline extent creation path.
6956		 * Thus all members representing file offsets should be zero.
6957		 */
6958		ASSERT(extent_start == 0);
6959		ASSERT(em->start == 0);
6960
6961		/*
6962		 * btrfs_extent_item_to_extent_map() should have properly
6963		 * initialized em members already.
6964		 *
6965		 * Other members are not utilized for inline extents.
6966		 */
6967		ASSERT(em->block_start == EXTENT_MAP_INLINE);
6968		ASSERT(em->len == fs_info->sectorsize);
6969
6970		ret = read_inline_extent(inode, path, page);
6971		if (ret < 0)
6972			goto out;
6973		goto insert;
6974	}
6975not_found:
6976	em->start = start;
6977	em->orig_start = start;
6978	em->len = len;
6979	em->block_start = EXTENT_MAP_HOLE;
6980insert:
6981	ret = 0;
6982	btrfs_release_path(path);
6983	if (em->start > start || extent_map_end(em) <= start) {
6984		btrfs_err(fs_info,
6985			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
6986			  em->start, em->len, start, len);
6987		ret = -EIO;
6988		goto out;
6989	}
6990
6991	write_lock(&em_tree->lock);
6992	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6993	write_unlock(&em_tree->lock);
6994out:
6995	btrfs_free_path(path);
6996
6997	trace_btrfs_get_extent(root, inode, em);
6998
6999	if (ret) {
7000		free_extent_map(em);
7001		return ERR_PTR(ret);
7002	}
7003	return em;
7004}
7005
7006static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
7007						  struct btrfs_dio_data *dio_data,
7008						  const u64 start,
7009						  const u64 len,
7010						  const u64 orig_start,
7011						  const u64 block_start,
7012						  const u64 block_len,
7013						  const u64 orig_block_len,
7014						  const u64 ram_bytes,
7015						  const int type)
7016{
7017	struct extent_map *em = NULL;
7018	struct btrfs_ordered_extent *ordered;
7019
7020	if (type != BTRFS_ORDERED_NOCOW) {
7021		em = create_io_em(inode, start, len, orig_start, block_start,
7022				  block_len, orig_block_len, ram_bytes,
7023				  BTRFS_COMPRESS_NONE, /* compress_type */
7024				  type);
7025		if (IS_ERR(em))
7026			goto out;
7027	}
7028	ordered = btrfs_alloc_ordered_extent(inode, start, len, len,
7029					     block_start, block_len, 0,
7030					     (1 << type) |
7031					     (1 << BTRFS_ORDERED_DIRECT),
7032					     BTRFS_COMPRESS_NONE);
7033	if (IS_ERR(ordered)) {
7034		if (em) {
7035			free_extent_map(em);
7036			btrfs_drop_extent_map_range(inode, start,
7037						    start + len - 1, false);
7038		}
7039		em = ERR_CAST(ordered);
7040	} else {
7041		ASSERT(!dio_data->ordered);
7042		dio_data->ordered = ordered;
7043	}
7044 out:
7045
7046	return em;
7047}
7048
7049static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
7050						  struct btrfs_dio_data *dio_data,
7051						  u64 start, u64 len)
7052{
7053	struct btrfs_root *root = inode->root;
7054	struct btrfs_fs_info *fs_info = root->fs_info;
7055	struct extent_map *em;
7056	struct btrfs_key ins;
7057	u64 alloc_hint;
7058	int ret;
7059
7060	alloc_hint = get_extent_allocation_hint(inode, start, len);
7061again:
7062	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7063				   0, alloc_hint, &ins, 1, 1);
7064	if (ret == -EAGAIN) {
7065		ASSERT(btrfs_is_zoned(fs_info));
7066		wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
7067			       TASK_UNINTERRUPTIBLE);
7068		goto again;
7069	}
7070	if (ret)
7071		return ERR_PTR(ret);
7072
7073	em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start,
7074				     ins.objectid, ins.offset, ins.offset,
7075				     ins.offset, BTRFS_ORDERED_REGULAR);
7076	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7077	if (IS_ERR(em))
7078		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7079					   1);
7080
7081	return em;
7082}
7083
7084static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7085{
7086	struct btrfs_block_group *block_group;
7087	bool readonly = false;
7088
7089	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7090	if (!block_group || block_group->ro)
7091		readonly = true;
7092	if (block_group)
7093		btrfs_put_block_group(block_group);
7094	return readonly;
7095}
7096
7097/*
7098 * Check if we can do nocow write into the range [@offset, @offset + @len)
7099 *
7100 * @offset:	File offset
7101 * @len:	The length to write, will be updated to the nocow writeable
7102 *		range
7103 * @orig_start:	(optional) Return the original file offset of the file extent
7104 * @orig_len:	(optional) Return the original on-disk length of the file extent
7105 * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7106 * @strict:	if true, omit optimizations that might force us into unnecessary
7107 *		cow. e.g., don't trust generation number.
7108 *
7109 * Return:
7110 * >0	and update @len if we can do nocow write
7111 *  0	if we can't do nocow write
7112 * <0	if error happened
7113 *
7114 * NOTE: This only checks the file extents, caller is responsible to wait for
7115 *	 any ordered extents.
7116 */
7117noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7118			      u64 *orig_start, u64 *orig_block_len,
7119			      u64 *ram_bytes, bool nowait, bool strict)
7120{
7121	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7122	struct can_nocow_file_extent_args nocow_args = { 0 };
7123	struct btrfs_path *path;
7124	int ret;
7125	struct extent_buffer *leaf;
7126	struct btrfs_root *root = BTRFS_I(inode)->root;
7127	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7128	struct btrfs_file_extent_item *fi;
7129	struct btrfs_key key;
7130	int found_type;
7131
7132	path = btrfs_alloc_path();
7133	if (!path)
7134		return -ENOMEM;
7135	path->nowait = nowait;
7136
7137	ret = btrfs_lookup_file_extent(NULL, root, path,
7138			btrfs_ino(BTRFS_I(inode)), offset, 0);
7139	if (ret < 0)
7140		goto out;
7141
7142	if (ret == 1) {
7143		if (path->slots[0] == 0) {
7144			/* can't find the item, must cow */
7145			ret = 0;
7146			goto out;
7147		}
7148		path->slots[0]--;
7149	}
7150	ret = 0;
7151	leaf = path->nodes[0];
7152	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7153	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7154	    key.type != BTRFS_EXTENT_DATA_KEY) {
7155		/* not our file or wrong item type, must cow */
7156		goto out;
7157	}
7158
7159	if (key.offset > offset) {
7160		/* Wrong offset, must cow */
7161		goto out;
7162	}
7163
7164	if (btrfs_file_extent_end(path) <= offset)
7165		goto out;
7166
7167	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7168	found_type = btrfs_file_extent_type(leaf, fi);
7169	if (ram_bytes)
7170		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7171
7172	nocow_args.start = offset;
7173	nocow_args.end = offset + *len - 1;
7174	nocow_args.strict = strict;
7175	nocow_args.free_path = true;
7176
7177	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7178	/* can_nocow_file_extent() has freed the path. */
7179	path = NULL;
7180
7181	if (ret != 1) {
7182		/* Treat errors as not being able to NOCOW. */
7183		ret = 0;
7184		goto out;
7185	}
7186
7187	ret = 0;
7188	if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7189		goto out;
7190
7191	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7192	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7193		u64 range_end;
7194
7195		range_end = round_up(offset + nocow_args.num_bytes,
7196				     root->fs_info->sectorsize) - 1;
7197		ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
7198		if (ret) {
7199			ret = -EAGAIN;
7200			goto out;
7201		}
7202	}
7203
7204	if (orig_start)
7205		*orig_start = key.offset - nocow_args.extent_offset;
7206	if (orig_block_len)
7207		*orig_block_len = nocow_args.disk_num_bytes;
7208
7209	*len = nocow_args.num_bytes;
7210	ret = 1;
7211out:
7212	btrfs_free_path(path);
7213	return ret;
7214}
7215
7216static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7217			      struct extent_state **cached_state,
7218			      unsigned int iomap_flags)
7219{
7220	const bool writing = (iomap_flags & IOMAP_WRITE);
7221	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7222	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7223	struct btrfs_ordered_extent *ordered;
7224	int ret = 0;
7225
7226	while (1) {
7227		if (nowait) {
7228			if (!try_lock_extent(io_tree, lockstart, lockend,
7229					     cached_state))
7230				return -EAGAIN;
7231		} else {
7232			lock_extent(io_tree, lockstart, lockend, cached_state);
7233		}
7234		/*
7235		 * We're concerned with the entire range that we're going to be
7236		 * doing DIO to, so we need to make sure there's no ordered
7237		 * extents in this range.
7238		 */
7239		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7240						     lockend - lockstart + 1);
7241
7242		/*
7243		 * We need to make sure there are no buffered pages in this
7244		 * range either, we could have raced between the invalidate in
7245		 * generic_file_direct_write and locking the extent.  The
7246		 * invalidate needs to happen so that reads after a write do not
7247		 * get stale data.
7248		 */
7249		if (!ordered &&
7250		    (!writing || !filemap_range_has_page(inode->i_mapping,
7251							 lockstart, lockend)))
7252			break;
7253
7254		unlock_extent(io_tree, lockstart, lockend, cached_state);
7255
7256		if (ordered) {
7257			if (nowait) {
7258				btrfs_put_ordered_extent(ordered);
7259				ret = -EAGAIN;
7260				break;
7261			}
7262			/*
7263			 * If we are doing a DIO read and the ordered extent we
7264			 * found is for a buffered write, we can not wait for it
7265			 * to complete and retry, because if we do so we can
7266			 * deadlock with concurrent buffered writes on page
7267			 * locks. This happens only if our DIO read covers more
7268			 * than one extent map, if at this point has already
7269			 * created an ordered extent for a previous extent map
7270			 * and locked its range in the inode's io tree, and a
7271			 * concurrent write against that previous extent map's
7272			 * range and this range started (we unlock the ranges
7273			 * in the io tree only when the bios complete and
7274			 * buffered writes always lock pages before attempting
7275			 * to lock range in the io tree).
7276			 */
7277			if (writing ||
7278			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7279				btrfs_start_ordered_extent(ordered);
7280			else
7281				ret = nowait ? -EAGAIN : -ENOTBLK;
7282			btrfs_put_ordered_extent(ordered);
7283		} else {
7284			/*
7285			 * We could trigger writeback for this range (and wait
7286			 * for it to complete) and then invalidate the pages for
7287			 * this range (through invalidate_inode_pages2_range()),
7288			 * but that can lead us to a deadlock with a concurrent
7289			 * call to readahead (a buffered read or a defrag call
7290			 * triggered a readahead) on a page lock due to an
7291			 * ordered dio extent we created before but did not have
7292			 * yet a corresponding bio submitted (whence it can not
7293			 * complete), which makes readahead wait for that
7294			 * ordered extent to complete while holding a lock on
7295			 * that page.
7296			 */
7297			ret = nowait ? -EAGAIN : -ENOTBLK;
7298		}
7299
7300		if (ret)
7301			break;
7302
7303		cond_resched();
7304	}
7305
7306	return ret;
7307}
7308
7309/* The callers of this must take lock_extent() */
7310static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7311				       u64 len, u64 orig_start, u64 block_start,
7312				       u64 block_len, u64 orig_block_len,
7313				       u64 ram_bytes, int compress_type,
7314				       int type)
7315{
7316	struct extent_map *em;
7317	int ret;
7318
7319	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7320	       type == BTRFS_ORDERED_COMPRESSED ||
7321	       type == BTRFS_ORDERED_NOCOW ||
7322	       type == BTRFS_ORDERED_REGULAR);
7323
7324	em = alloc_extent_map();
7325	if (!em)
7326		return ERR_PTR(-ENOMEM);
7327
7328	em->start = start;
7329	em->orig_start = orig_start;
7330	em->len = len;
7331	em->block_len = block_len;
7332	em->block_start = block_start;
7333	em->orig_block_len = orig_block_len;
7334	em->ram_bytes = ram_bytes;
7335	em->generation = -1;
7336	em->flags |= EXTENT_FLAG_PINNED;
7337	if (type == BTRFS_ORDERED_PREALLOC)
7338		em->flags |= EXTENT_FLAG_FILLING;
7339	else if (type == BTRFS_ORDERED_COMPRESSED)
7340		extent_map_set_compression(em, compress_type);
7341
7342	ret = btrfs_replace_extent_map_range(inode, em, true);
7343	if (ret) {
7344		free_extent_map(em);
7345		return ERR_PTR(ret);
7346	}
7347
7348	/* em got 2 refs now, callers needs to do free_extent_map once. */
7349	return em;
7350}
7351
7352
7353static int btrfs_get_blocks_direct_write(struct extent_map **map,
7354					 struct inode *inode,
7355					 struct btrfs_dio_data *dio_data,
7356					 u64 start, u64 *lenp,
7357					 unsigned int iomap_flags)
7358{
7359	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7360	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7361	struct extent_map *em = *map;
7362	int type;
7363	u64 block_start, orig_start, orig_block_len, ram_bytes;
7364	struct btrfs_block_group *bg;
7365	bool can_nocow = false;
7366	bool space_reserved = false;
7367	u64 len = *lenp;
7368	u64 prev_len;
7369	int ret = 0;
7370
7371	/*
7372	 * We don't allocate a new extent in the following cases
7373	 *
7374	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7375	 * existing extent.
7376	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7377	 * just use the extent.
7378	 *
7379	 */
7380	if ((em->flags & EXTENT_FLAG_PREALLOC) ||
7381	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7382	     em->block_start != EXTENT_MAP_HOLE)) {
7383		if (em->flags & EXTENT_FLAG_PREALLOC)
7384			type = BTRFS_ORDERED_PREALLOC;
7385		else
7386			type = BTRFS_ORDERED_NOCOW;
7387		len = min(len, em->len - (start - em->start));
7388		block_start = em->block_start + (start - em->start);
7389
7390		if (can_nocow_extent(inode, start, &len, &orig_start,
7391				     &orig_block_len, &ram_bytes, false, false) == 1) {
7392			bg = btrfs_inc_nocow_writers(fs_info, block_start);
7393			if (bg)
7394				can_nocow = true;
7395		}
7396	}
7397
7398	prev_len = len;
7399	if (can_nocow) {
7400		struct extent_map *em2;
7401
7402		/* We can NOCOW, so only need to reserve metadata space. */
7403		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7404						      nowait);
7405		if (ret < 0) {
7406			/* Our caller expects us to free the input extent map. */
7407			free_extent_map(em);
7408			*map = NULL;
7409			btrfs_dec_nocow_writers(bg);
7410			if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7411				ret = -EAGAIN;
7412			goto out;
7413		}
7414		space_reserved = true;
7415
7416		em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len,
7417					      orig_start, block_start,
7418					      len, orig_block_len,
7419					      ram_bytes, type);
7420		btrfs_dec_nocow_writers(bg);
7421		if (type == BTRFS_ORDERED_PREALLOC) {
7422			free_extent_map(em);
7423			*map = em2;
7424			em = em2;
7425		}
7426
7427		if (IS_ERR(em2)) {
7428			ret = PTR_ERR(em2);
7429			goto out;
7430		}
7431
7432		dio_data->nocow_done = true;
7433	} else {
7434		/* Our caller expects us to free the input extent map. */
7435		free_extent_map(em);
7436		*map = NULL;
7437
7438		if (nowait) {
7439			ret = -EAGAIN;
7440			goto out;
7441		}
7442
7443		/*
7444		 * If we could not allocate data space before locking the file
7445		 * range and we can't do a NOCOW write, then we have to fail.
7446		 */
7447		if (!dio_data->data_space_reserved) {
7448			ret = -ENOSPC;
7449			goto out;
7450		}
7451
7452		/*
7453		 * We have to COW and we have already reserved data space before,
7454		 * so now we reserve only metadata.
7455		 */
7456		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7457						      false);
7458		if (ret < 0)
7459			goto out;
7460		space_reserved = true;
7461
7462		em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
7463		if (IS_ERR(em)) {
7464			ret = PTR_ERR(em);
7465			goto out;
7466		}
7467		*map = em;
7468		len = min(len, em->len - (start - em->start));
7469		if (len < prev_len)
7470			btrfs_delalloc_release_metadata(BTRFS_I(inode),
7471							prev_len - len, true);
7472	}
7473
7474	/*
7475	 * We have created our ordered extent, so we can now release our reservation
7476	 * for an outstanding extent.
7477	 */
7478	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7479
7480	/*
7481	 * Need to update the i_size under the extent lock so buffered
7482	 * readers will get the updated i_size when we unlock.
7483	 */
7484	if (start + len > i_size_read(inode))
7485		i_size_write(inode, start + len);
7486out:
7487	if (ret && space_reserved) {
7488		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7489		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7490	}
7491	*lenp = len;
7492	return ret;
7493}
7494
7495static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7496		loff_t length, unsigned int flags, struct iomap *iomap,
7497		struct iomap *srcmap)
7498{
7499	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7500	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7501	struct extent_map *em;
7502	struct extent_state *cached_state = NULL;
7503	struct btrfs_dio_data *dio_data = iter->private;
7504	u64 lockstart, lockend;
7505	const bool write = !!(flags & IOMAP_WRITE);
7506	int ret = 0;
7507	u64 len = length;
7508	const u64 data_alloc_len = length;
7509	bool unlock_extents = false;
7510
7511	/*
7512	 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7513	 * we're NOWAIT we may submit a bio for a partial range and return
7514	 * EIOCBQUEUED, which would result in an errant short read.
7515	 *
7516	 * The best way to handle this would be to allow for partial completions
7517	 * of iocb's, so we could submit the partial bio, return and fault in
7518	 * the rest of the pages, and then submit the io for the rest of the
7519	 * range.  However we don't have that currently, so simply return
7520	 * -EAGAIN at this point so that the normal path is used.
7521	 */
7522	if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
7523		return -EAGAIN;
7524
7525	/*
7526	 * Cap the size of reads to that usually seen in buffered I/O as we need
7527	 * to allocate a contiguous array for the checksums.
7528	 */
7529	if (!write)
7530		len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
7531
7532	lockstart = start;
7533	lockend = start + len - 1;
7534
7535	/*
7536	 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7537	 * enough if we've written compressed pages to this area, so we need to
7538	 * flush the dirty pages again to make absolutely sure that any
7539	 * outstanding dirty pages are on disk - the first flush only starts
7540	 * compression on the data, while keeping the pages locked, so by the
7541	 * time the second flush returns we know bios for the compressed pages
7542	 * were submitted and finished, and the pages no longer under writeback.
7543	 *
7544	 * If we have a NOWAIT request and we have any pages in the range that
7545	 * are locked, likely due to compression still in progress, we don't want
7546	 * to block on page locks. We also don't want to block on pages marked as
7547	 * dirty or under writeback (same as for the non-compression case).
7548	 * iomap_dio_rw() did the same check, but after that and before we got
7549	 * here, mmap'ed writes may have happened or buffered reads started
7550	 * (readpage() and readahead(), which lock pages), as we haven't locked
7551	 * the file range yet.
7552	 */
7553	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7554		     &BTRFS_I(inode)->runtime_flags)) {
7555		if (flags & IOMAP_NOWAIT) {
7556			if (filemap_range_needs_writeback(inode->i_mapping,
7557							  lockstart, lockend))
7558				return -EAGAIN;
7559		} else {
7560			ret = filemap_fdatawrite_range(inode->i_mapping, start,
7561						       start + length - 1);
7562			if (ret)
7563				return ret;
7564		}
7565	}
7566
7567	memset(dio_data, 0, sizeof(*dio_data));
7568
7569	/*
7570	 * We always try to allocate data space and must do it before locking
7571	 * the file range, to avoid deadlocks with concurrent writes to the same
7572	 * range if the range has several extents and the writes don't expand the
7573	 * current i_size (the inode lock is taken in shared mode). If we fail to
7574	 * allocate data space here we continue and later, after locking the
7575	 * file range, we fail with ENOSPC only if we figure out we can not do a
7576	 * NOCOW write.
7577	 */
7578	if (write && !(flags & IOMAP_NOWAIT)) {
7579		ret = btrfs_check_data_free_space(BTRFS_I(inode),
7580						  &dio_data->data_reserved,
7581						  start, data_alloc_len, false);
7582		if (!ret)
7583			dio_data->data_space_reserved = true;
7584		else if (ret && !(BTRFS_I(inode)->flags &
7585				  (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7586			goto err;
7587	}
7588
7589	/*
7590	 * If this errors out it's because we couldn't invalidate pagecache for
7591	 * this range and we need to fallback to buffered IO, or we are doing a
7592	 * NOWAIT read/write and we need to block.
7593	 */
7594	ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7595	if (ret < 0)
7596		goto err;
7597
7598	em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
7599	if (IS_ERR(em)) {
7600		ret = PTR_ERR(em);
7601		goto unlock_err;
7602	}
7603
7604	/*
7605	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7606	 * io.  INLINE is special, and we could probably kludge it in here, but
7607	 * it's still buffered so for safety lets just fall back to the generic
7608	 * buffered path.
7609	 *
7610	 * For COMPRESSED we _have_ to read the entire extent in so we can
7611	 * decompress it, so there will be buffering required no matter what we
7612	 * do, so go ahead and fallback to buffered.
7613	 *
7614	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7615	 * to buffered IO.  Don't blame me, this is the price we pay for using
7616	 * the generic code.
7617	 */
7618	if (extent_map_is_compressed(em) ||
7619	    em->block_start == EXTENT_MAP_INLINE) {
7620		free_extent_map(em);
7621		/*
7622		 * If we are in a NOWAIT context, return -EAGAIN in order to
7623		 * fallback to buffered IO. This is not only because we can
7624		 * block with buffered IO (no support for NOWAIT semantics at
7625		 * the moment) but also to avoid returning short reads to user
7626		 * space - this happens if we were able to read some data from
7627		 * previous non-compressed extents and then when we fallback to
7628		 * buffered IO, at btrfs_file_read_iter() by calling
7629		 * filemap_read(), we fail to fault in pages for the read buffer,
7630		 * in which case filemap_read() returns a short read (the number
7631		 * of bytes previously read is > 0, so it does not return -EFAULT).
7632		 */
7633		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7634		goto unlock_err;
7635	}
7636
7637	len = min(len, em->len - (start - em->start));
7638
7639	/*
7640	 * If we have a NOWAIT request and the range contains multiple extents
7641	 * (or a mix of extents and holes), then we return -EAGAIN to make the
7642	 * caller fallback to a context where it can do a blocking (without
7643	 * NOWAIT) request. This way we avoid doing partial IO and returning
7644	 * success to the caller, which is not optimal for writes and for reads
7645	 * it can result in unexpected behaviour for an application.
7646	 *
7647	 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7648	 * iomap_dio_rw(), we can end up returning less data then what the caller
7649	 * asked for, resulting in an unexpected, and incorrect, short read.
7650	 * That is, the caller asked to read N bytes and we return less than that,
7651	 * which is wrong unless we are crossing EOF. This happens if we get a
7652	 * page fault error when trying to fault in pages for the buffer that is
7653	 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7654	 * have previously submitted bios for other extents in the range, in
7655	 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7656	 * those bios have completed by the time we get the page fault error,
7657	 * which we return back to our caller - we should only return EIOCBQUEUED
7658	 * after we have submitted bios for all the extents in the range.
7659	 */
7660	if ((flags & IOMAP_NOWAIT) && len < length) {
7661		free_extent_map(em);
7662		ret = -EAGAIN;
7663		goto unlock_err;
7664	}
7665
7666	if (write) {
7667		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7668						    start, &len, flags);
7669		if (ret < 0)
7670			goto unlock_err;
7671		unlock_extents = true;
7672		/* Recalc len in case the new em is smaller than requested */
7673		len = min(len, em->len - (start - em->start));
7674		if (dio_data->data_space_reserved) {
7675			u64 release_offset;
7676			u64 release_len = 0;
7677
7678			if (dio_data->nocow_done) {
7679				release_offset = start;
7680				release_len = data_alloc_len;
7681			} else if (len < data_alloc_len) {
7682				release_offset = start + len;
7683				release_len = data_alloc_len - len;
7684			}
7685
7686			if (release_len > 0)
7687				btrfs_free_reserved_data_space(BTRFS_I(inode),
7688							       dio_data->data_reserved,
7689							       release_offset,
7690							       release_len);
7691		}
7692	} else {
7693		/*
7694		 * We need to unlock only the end area that we aren't using.
7695		 * The rest is going to be unlocked by the endio routine.
7696		 */
7697		lockstart = start + len;
7698		if (lockstart < lockend)
7699			unlock_extents = true;
7700	}
7701
7702	if (unlock_extents)
7703		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7704			      &cached_state);
7705	else
7706		free_extent_state(cached_state);
7707
7708	/*
7709	 * Translate extent map information to iomap.
7710	 * We trim the extents (and move the addr) even though iomap code does
7711	 * that, since we have locked only the parts we are performing I/O in.
7712	 */
7713	if ((em->block_start == EXTENT_MAP_HOLE) ||
7714	    ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) {
7715		iomap->addr = IOMAP_NULL_ADDR;
7716		iomap->type = IOMAP_HOLE;
7717	} else {
7718		iomap->addr = em->block_start + (start - em->start);
7719		iomap->type = IOMAP_MAPPED;
7720	}
7721	iomap->offset = start;
7722	iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7723	iomap->length = len;
7724	free_extent_map(em);
7725
7726	return 0;
7727
7728unlock_err:
7729	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7730		      &cached_state);
7731err:
7732	if (dio_data->data_space_reserved) {
7733		btrfs_free_reserved_data_space(BTRFS_I(inode),
7734					       dio_data->data_reserved,
7735					       start, data_alloc_len);
7736		extent_changeset_free(dio_data->data_reserved);
7737	}
7738
7739	return ret;
7740}
7741
7742static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7743		ssize_t written, unsigned int flags, struct iomap *iomap)
7744{
7745	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7746	struct btrfs_dio_data *dio_data = iter->private;
7747	size_t submitted = dio_data->submitted;
7748	const bool write = !!(flags & IOMAP_WRITE);
7749	int ret = 0;
7750
7751	if (!write && (iomap->type == IOMAP_HOLE)) {
7752		/* If reading from a hole, unlock and return */
7753		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
7754			      NULL);
7755		return 0;
7756	}
7757
7758	if (submitted < length) {
7759		pos += submitted;
7760		length -= submitted;
7761		if (write)
7762			btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7763						    pos, length, false);
7764		else
7765			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7766				      pos + length - 1, NULL);
7767		ret = -ENOTBLK;
7768	}
7769	if (write) {
7770		btrfs_put_ordered_extent(dio_data->ordered);
7771		dio_data->ordered = NULL;
7772	}
7773
7774	if (write)
7775		extent_changeset_free(dio_data->data_reserved);
7776	return ret;
7777}
7778
7779static void btrfs_dio_end_io(struct btrfs_bio *bbio)
7780{
7781	struct btrfs_dio_private *dip =
7782		container_of(bbio, struct btrfs_dio_private, bbio);
7783	struct btrfs_inode *inode = bbio->inode;
7784	struct bio *bio = &bbio->bio;
7785
7786	if (bio->bi_status) {
7787		btrfs_warn(inode->root->fs_info,
7788		"direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7789			   btrfs_ino(inode), bio->bi_opf,
7790			   dip->file_offset, dip->bytes, bio->bi_status);
7791	}
7792
7793	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
7794		btrfs_finish_ordered_extent(bbio->ordered, NULL,
7795					    dip->file_offset, dip->bytes,
7796					    !bio->bi_status);
7797	} else {
7798		unlock_extent(&inode->io_tree, dip->file_offset,
7799			      dip->file_offset + dip->bytes - 1, NULL);
7800	}
7801
7802	bbio->bio.bi_private = bbio->private;
7803	iomap_dio_bio_end_io(bio);
7804}
7805
7806static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
7807				loff_t file_offset)
7808{
7809	struct btrfs_bio *bbio = btrfs_bio(bio);
7810	struct btrfs_dio_private *dip =
7811		container_of(bbio, struct btrfs_dio_private, bbio);
7812	struct btrfs_dio_data *dio_data = iter->private;
7813
7814	btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
7815		       btrfs_dio_end_io, bio->bi_private);
7816	bbio->inode = BTRFS_I(iter->inode);
7817	bbio->file_offset = file_offset;
7818
7819	dip->file_offset = file_offset;
7820	dip->bytes = bio->bi_iter.bi_size;
7821
7822	dio_data->submitted += bio->bi_iter.bi_size;
7823
7824	/*
7825	 * Check if we are doing a partial write.  If we are, we need to split
7826	 * the ordered extent to match the submitted bio.  Hang on to the
7827	 * remaining unfinishable ordered_extent in dio_data so that it can be
7828	 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7829	 * remaining pages is blocked on the outstanding ordered extent.
7830	 */
7831	if (iter->flags & IOMAP_WRITE) {
7832		int ret;
7833
7834		ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
7835		if (ret) {
7836			btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7837						    file_offset, dip->bytes,
7838						    !ret);
7839			bio->bi_status = errno_to_blk_status(ret);
7840			iomap_dio_bio_end_io(bio);
7841			return;
7842		}
7843	}
7844
7845	btrfs_submit_bio(bbio, 0);
7846}
7847
7848static const struct iomap_ops btrfs_dio_iomap_ops = {
7849	.iomap_begin            = btrfs_dio_iomap_begin,
7850	.iomap_end              = btrfs_dio_iomap_end,
7851};
7852
7853static const struct iomap_dio_ops btrfs_dio_ops = {
7854	.submit_io		= btrfs_dio_submit_io,
7855	.bio_set		= &btrfs_dio_bioset,
7856};
7857
7858ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
7859{
7860	struct btrfs_dio_data data = { 0 };
7861
7862	return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7863			    IOMAP_DIO_PARTIAL, &data, done_before);
7864}
7865
7866struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
7867				  size_t done_before)
7868{
7869	struct btrfs_dio_data data = { 0 };
7870
7871	return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7872			    IOMAP_DIO_PARTIAL, &data, done_before);
7873}
7874
7875static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7876			u64 start, u64 len)
7877{
7878	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
7879	int	ret;
7880
7881	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
7882	if (ret)
7883		return ret;
7884
7885	/*
7886	 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7887	 * file range (0 to LLONG_MAX), but that is not enough if we have
7888	 * compression enabled. The first filemap_fdatawrite_range() only kicks
7889	 * in the compression of data (in an async thread) and will return
7890	 * before the compression is done and writeback is started. A second
7891	 * filemap_fdatawrite_range() is needed to wait for the compression to
7892	 * complete and writeback to start. We also need to wait for ordered
7893	 * extents to complete, because our fiemap implementation uses mainly
7894	 * file extent items to list the extents, searching for extent maps
7895	 * only for file ranges with holes or prealloc extents to figure out
7896	 * if we have delalloc in those ranges.
7897	 */
7898	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
7899		ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
7900		if (ret)
7901			return ret;
7902	}
7903
7904	btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
7905
7906	/*
7907	 * We did an initial flush to avoid holding the inode's lock while
7908	 * triggering writeback and waiting for the completion of IO and ordered
7909	 * extents. Now after we locked the inode we do it again, because it's
7910	 * possible a new write may have happened in between those two steps.
7911	 */
7912	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
7913		ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
7914		if (ret) {
7915			btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
7916			return ret;
7917		}
7918	}
7919
7920	ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
7921	btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
7922
7923	return ret;
7924}
7925
7926static int btrfs_writepages(struct address_space *mapping,
7927			    struct writeback_control *wbc)
7928{
7929	return extent_writepages(mapping, wbc);
7930}
7931
7932static void btrfs_readahead(struct readahead_control *rac)
7933{
7934	extent_readahead(rac);
7935}
7936
7937/*
7938 * For release_folio() and invalidate_folio() we have a race window where
7939 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7940 * If we continue to release/invalidate the page, we could cause use-after-free
7941 * for subpage spinlock.  So this function is to spin and wait for subpage
7942 * spinlock.
7943 */
7944static void wait_subpage_spinlock(struct page *page)
7945{
7946	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
7947	struct folio *folio = page_folio(page);
7948	struct btrfs_subpage *subpage;
7949
7950	if (!btrfs_is_subpage(fs_info, page->mapping))
7951		return;
7952
7953	ASSERT(folio_test_private(folio) && folio_get_private(folio));
7954	subpage = folio_get_private(folio);
7955
7956	/*
7957	 * This may look insane as we just acquire the spinlock and release it,
7958	 * without doing anything.  But we just want to make sure no one is
7959	 * still holding the subpage spinlock.
7960	 * And since the page is not dirty nor writeback, and we have page
7961	 * locked, the only possible way to hold a spinlock is from the endio
7962	 * function to clear page writeback.
7963	 *
7964	 * Here we just acquire the spinlock so that all existing callers
7965	 * should exit and we're safe to release/invalidate the page.
7966	 */
7967	spin_lock_irq(&subpage->lock);
7968	spin_unlock_irq(&subpage->lock);
7969}
7970
7971static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7972{
7973	int ret = try_release_extent_mapping(&folio->page, gfp_flags);
7974
7975	if (ret == 1) {
7976		wait_subpage_spinlock(&folio->page);
7977		clear_page_extent_mapped(&folio->page);
7978	}
7979	return ret;
7980}
7981
7982static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7983{
7984	if (folio_test_writeback(folio) || folio_test_dirty(folio))
7985		return false;
7986	return __btrfs_release_folio(folio, gfp_flags);
7987}
7988
7989#ifdef CONFIG_MIGRATION
7990static int btrfs_migrate_folio(struct address_space *mapping,
7991			     struct folio *dst, struct folio *src,
7992			     enum migrate_mode mode)
7993{
7994	int ret = filemap_migrate_folio(mapping, dst, src, mode);
7995
7996	if (ret != MIGRATEPAGE_SUCCESS)
7997		return ret;
7998
7999	if (folio_test_ordered(src)) {
8000		folio_clear_ordered(src);
8001		folio_set_ordered(dst);
8002	}
8003
8004	return MIGRATEPAGE_SUCCESS;
8005}
8006#else
8007#define btrfs_migrate_folio NULL
8008#endif
8009
8010static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
8011				 size_t length)
8012{
8013	struct btrfs_inode *inode = folio_to_inode(folio);
8014	struct btrfs_fs_info *fs_info = inode->root->fs_info;
8015	struct extent_io_tree *tree = &inode->io_tree;
8016	struct extent_state *cached_state = NULL;
8017	u64 page_start = folio_pos(folio);
8018	u64 page_end = page_start + folio_size(folio) - 1;
8019	u64 cur;
8020	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
8021
8022	/*
8023	 * We have folio locked so no new ordered extent can be created on this
8024	 * page, nor bio can be submitted for this folio.
8025	 *
8026	 * But already submitted bio can still be finished on this folio.
8027	 * Furthermore, endio function won't skip folio which has Ordered
8028	 * (Private2) already cleared, so it's possible for endio and
8029	 * invalidate_folio to do the same ordered extent accounting twice
8030	 * on one folio.
8031	 *
8032	 * So here we wait for any submitted bios to finish, so that we won't
8033	 * do double ordered extent accounting on the same folio.
8034	 */
8035	folio_wait_writeback(folio);
8036	wait_subpage_spinlock(&folio->page);
8037
8038	/*
8039	 * For subpage case, we have call sites like
8040	 * btrfs_punch_hole_lock_range() which passes range not aligned to
8041	 * sectorsize.
8042	 * If the range doesn't cover the full folio, we don't need to and
8043	 * shouldn't clear page extent mapped, as folio->private can still
8044	 * record subpage dirty bits for other part of the range.
8045	 *
8046	 * For cases that invalidate the full folio even the range doesn't
8047	 * cover the full folio, like invalidating the last folio, we're
8048	 * still safe to wait for ordered extent to finish.
8049	 */
8050	if (!(offset == 0 && length == folio_size(folio))) {
8051		btrfs_release_folio(folio, GFP_NOFS);
8052		return;
8053	}
8054
8055	if (!inode_evicting)
8056		lock_extent(tree, page_start, page_end, &cached_state);
8057
8058	cur = page_start;
8059	while (cur < page_end) {
8060		struct btrfs_ordered_extent *ordered;
8061		u64 range_end;
8062		u32 range_len;
8063		u32 extra_flags = 0;
8064
8065		ordered = btrfs_lookup_first_ordered_range(inode, cur,
8066							   page_end + 1 - cur);
8067		if (!ordered) {
8068			range_end = page_end;
8069			/*
8070			 * No ordered extent covering this range, we are safe
8071			 * to delete all extent states in the range.
8072			 */
8073			extra_flags = EXTENT_CLEAR_ALL_BITS;
8074			goto next;
8075		}
8076		if (ordered->file_offset > cur) {
8077			/*
8078			 * There is a range between [cur, oe->file_offset) not
8079			 * covered by any ordered extent.
8080			 * We are safe to delete all extent states, and handle
8081			 * the ordered extent in the next iteration.
8082			 */
8083			range_end = ordered->file_offset - 1;
8084			extra_flags = EXTENT_CLEAR_ALL_BITS;
8085			goto next;
8086		}
8087
8088		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
8089				page_end);
8090		ASSERT(range_end + 1 - cur < U32_MAX);
8091		range_len = range_end + 1 - cur;
8092		if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
8093			/*
8094			 * If Ordered (Private2) is cleared, it means endio has
8095			 * already been executed for the range.
8096			 * We can't delete the extent states as
8097			 * btrfs_finish_ordered_io() may still use some of them.
8098			 */
8099			goto next;
8100		}
8101		btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
8102
8103		/*
8104		 * IO on this page will never be started, so we need to account
8105		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8106		 * here, must leave that up for the ordered extent completion.
8107		 *
8108		 * This will also unlock the range for incoming
8109		 * btrfs_finish_ordered_io().
8110		 */
8111		if (!inode_evicting)
8112			clear_extent_bit(tree, cur, range_end,
8113					 EXTENT_DELALLOC |
8114					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8115					 EXTENT_DEFRAG, &cached_state);
8116
8117		spin_lock_irq(&inode->ordered_tree_lock);
8118		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8119		ordered->truncated_len = min(ordered->truncated_len,
8120					     cur - ordered->file_offset);
8121		spin_unlock_irq(&inode->ordered_tree_lock);
8122
8123		/*
8124		 * If the ordered extent has finished, we're safe to delete all
8125		 * the extent states of the range, otherwise
8126		 * btrfs_finish_ordered_io() will get executed by endio for
8127		 * other pages, so we can't delete extent states.
8128		 */
8129		if (btrfs_dec_test_ordered_pending(inode, &ordered,
8130						   cur, range_end + 1 - cur)) {
8131			btrfs_finish_ordered_io(ordered);
8132			/*
8133			 * The ordered extent has finished, now we're again
8134			 * safe to delete all extent states of the range.
8135			 */
8136			extra_flags = EXTENT_CLEAR_ALL_BITS;
8137		}
8138next:
8139		if (ordered)
8140			btrfs_put_ordered_extent(ordered);
8141		/*
8142		 * Qgroup reserved space handler
8143		 * Sector(s) here will be either:
8144		 *
8145		 * 1) Already written to disk or bio already finished
8146		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
8147		 *    Qgroup will be handled by its qgroup_record then.
8148		 *    btrfs_qgroup_free_data() call will do nothing here.
8149		 *
8150		 * 2) Not written to disk yet
8151		 *    Then btrfs_qgroup_free_data() call will clear the
8152		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
8153		 *    reserved data space.
8154		 *    Since the IO will never happen for this page.
8155		 */
8156		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
8157		if (!inode_evicting) {
8158			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8159				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8160				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
8161				 extra_flags, &cached_state);
8162		}
8163		cur = range_end + 1;
8164	}
8165	/*
8166	 * We have iterated through all ordered extents of the page, the page
8167	 * should not have Ordered (Private2) anymore, or the above iteration
8168	 * did something wrong.
8169	 */
8170	ASSERT(!folio_test_ordered(folio));
8171	btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
8172	if (!inode_evicting)
8173		__btrfs_release_folio(folio, GFP_NOFS);
8174	clear_page_extent_mapped(&folio->page);
8175}
8176
8177/*
8178 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8179 * called from a page fault handler when a page is first dirtied. Hence we must
8180 * be careful to check for EOF conditions here. We set the page up correctly
8181 * for a written page which means we get ENOSPC checking when writing into
8182 * holes and correct delalloc and unwritten extent mapping on filesystems that
8183 * support these features.
8184 *
8185 * We are not allowed to take the i_mutex here so we have to play games to
8186 * protect against truncate races as the page could now be beyond EOF.  Because
8187 * truncate_setsize() writes the inode size before removing pages, once we have
8188 * the page lock we can determine safely if the page is beyond EOF. If it is not
8189 * beyond EOF, then the page is guaranteed safe against truncation until we
8190 * unlock the page.
8191 */
8192vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8193{
8194	struct page *page = vmf->page;
8195	struct folio *folio = page_folio(page);
8196	struct inode *inode = file_inode(vmf->vma->vm_file);
8197	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
8198	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8199	struct btrfs_ordered_extent *ordered;
8200	struct extent_state *cached_state = NULL;
8201	struct extent_changeset *data_reserved = NULL;
8202	unsigned long zero_start;
8203	loff_t size;
8204	vm_fault_t ret;
8205	int ret2;
8206	int reserved = 0;
8207	u64 reserved_space;
8208	u64 page_start;
8209	u64 page_end;
8210	u64 end;
8211
8212	ASSERT(folio_order(folio) == 0);
8213
8214	reserved_space = PAGE_SIZE;
8215
8216	sb_start_pagefault(inode->i_sb);
8217	page_start = page_offset(page);
8218	page_end = page_start + PAGE_SIZE - 1;
8219	end = page_end;
8220
8221	/*
8222	 * Reserving delalloc space after obtaining the page lock can lead to
8223	 * deadlock. For example, if a dirty page is locked by this function
8224	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8225	 * dirty page write out, then the btrfs_writepages() function could
8226	 * end up waiting indefinitely to get a lock on the page currently
8227	 * being processed by btrfs_page_mkwrite() function.
8228	 */
8229	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8230					    page_start, reserved_space);
8231	if (!ret2) {
8232		ret2 = file_update_time(vmf->vma->vm_file);
8233		reserved = 1;
8234	}
8235	if (ret2) {
8236		ret = vmf_error(ret2);
8237		if (reserved)
8238			goto out;
8239		goto out_noreserve;
8240	}
8241
8242	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8243again:
8244	down_read(&BTRFS_I(inode)->i_mmap_lock);
8245	lock_page(page);
8246	size = i_size_read(inode);
8247
8248	if ((page->mapping != inode->i_mapping) ||
8249	    (page_start >= size)) {
8250		/* page got truncated out from underneath us */
8251		goto out_unlock;
8252	}
8253	wait_on_page_writeback(page);
8254
8255	lock_extent(io_tree, page_start, page_end, &cached_state);
8256	ret2 = set_page_extent_mapped(page);
8257	if (ret2 < 0) {
8258		ret = vmf_error(ret2);
8259		unlock_extent(io_tree, page_start, page_end, &cached_state);
8260		goto out_unlock;
8261	}
8262
8263	/*
8264	 * we can't set the delalloc bits if there are pending ordered
8265	 * extents.  Drop our locks and wait for them to finish
8266	 */
8267	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8268			PAGE_SIZE);
8269	if (ordered) {
8270		unlock_extent(io_tree, page_start, page_end, &cached_state);
8271		unlock_page(page);
8272		up_read(&BTRFS_I(inode)->i_mmap_lock);
8273		btrfs_start_ordered_extent(ordered);
8274		btrfs_put_ordered_extent(ordered);
8275		goto again;
8276	}
8277
8278	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8279		reserved_space = round_up(size - page_start,
8280					  fs_info->sectorsize);
8281		if (reserved_space < PAGE_SIZE) {
8282			end = page_start + reserved_space - 1;
8283			btrfs_delalloc_release_space(BTRFS_I(inode),
8284					data_reserved, page_start,
8285					PAGE_SIZE - reserved_space, true);
8286		}
8287	}
8288
8289	/*
8290	 * page_mkwrite gets called when the page is firstly dirtied after it's
8291	 * faulted in, but write(2) could also dirty a page and set delalloc
8292	 * bits, thus in this case for space account reason, we still need to
8293	 * clear any delalloc bits within this page range since we have to
8294	 * reserve data&meta space before lock_page() (see above comments).
8295	 */
8296	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8297			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8298			  EXTENT_DEFRAG, &cached_state);
8299
8300	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8301					&cached_state);
8302	if (ret2) {
8303		unlock_extent(io_tree, page_start, page_end, &cached_state);
8304		ret = VM_FAULT_SIGBUS;
8305		goto out_unlock;
8306	}
8307
8308	/* page is wholly or partially inside EOF */
8309	if (page_start + PAGE_SIZE > size)
8310		zero_start = offset_in_page(size);
8311	else
8312		zero_start = PAGE_SIZE;
8313
8314	if (zero_start != PAGE_SIZE)
8315		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8316
8317	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
8318	btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
8319	btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
8320
8321	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8322
8323	unlock_extent(io_tree, page_start, page_end, &cached_state);
8324	up_read(&BTRFS_I(inode)->i_mmap_lock);
8325
8326	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8327	sb_end_pagefault(inode->i_sb);
8328	extent_changeset_free(data_reserved);
8329	return VM_FAULT_LOCKED;
8330
8331out_unlock:
8332	unlock_page(page);
8333	up_read(&BTRFS_I(inode)->i_mmap_lock);
8334out:
8335	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8336	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8337				     reserved_space, (ret != 0));
8338out_noreserve:
8339	sb_end_pagefault(inode->i_sb);
8340	extent_changeset_free(data_reserved);
8341	return ret;
8342}
8343
8344static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
8345{
8346	struct btrfs_truncate_control control = {
8347		.inode = inode,
8348		.ino = btrfs_ino(inode),
8349		.min_type = BTRFS_EXTENT_DATA_KEY,
8350		.clear_extent_range = true,
8351	};
8352	struct btrfs_root *root = inode->root;
8353	struct btrfs_fs_info *fs_info = root->fs_info;
8354	struct btrfs_block_rsv *rsv;
8355	int ret;
8356	struct btrfs_trans_handle *trans;
8357	u64 mask = fs_info->sectorsize - 1;
8358	const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8359
8360	if (!skip_writeback) {
8361		ret = btrfs_wait_ordered_range(&inode->vfs_inode,
8362					       inode->vfs_inode.i_size & (~mask),
8363					       (u64)-1);
8364		if (ret)
8365			return ret;
8366	}
8367
8368	/*
8369	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8370	 * things going on here:
8371	 *
8372	 * 1) We need to reserve space to update our inode.
8373	 *
8374	 * 2) We need to have something to cache all the space that is going to
8375	 * be free'd up by the truncate operation, but also have some slack
8376	 * space reserved in case it uses space during the truncate (thank you
8377	 * very much snapshotting).
8378	 *
8379	 * And we need these to be separate.  The fact is we can use a lot of
8380	 * space doing the truncate, and we have no earthly idea how much space
8381	 * we will use, so we need the truncate reservation to be separate so it
8382	 * doesn't end up using space reserved for updating the inode.  We also
8383	 * need to be able to stop the transaction and start a new one, which
8384	 * means we need to be able to update the inode several times, and we
8385	 * have no idea of knowing how many times that will be, so we can't just
8386	 * reserve 1 item for the entirety of the operation, so that has to be
8387	 * done separately as well.
8388	 *
8389	 * So that leaves us with
8390	 *
8391	 * 1) rsv - for the truncate reservation, which we will steal from the
8392	 * transaction reservation.
8393	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8394	 * updating the inode.
8395	 */
8396	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8397	if (!rsv)
8398		return -ENOMEM;
8399	rsv->size = min_size;
8400	rsv->failfast = true;
8401
8402	/*
8403	 * 1 for the truncate slack space
8404	 * 1 for updating the inode.
8405	 */
8406	trans = btrfs_start_transaction(root, 2);
8407	if (IS_ERR(trans)) {
8408		ret = PTR_ERR(trans);
8409		goto out;
8410	}
8411
8412	/* Migrate the slack space for the truncate to our reserve */
8413	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8414				      min_size, false);
8415	/*
8416	 * We have reserved 2 metadata units when we started the transaction and
8417	 * min_size matches 1 unit, so this should never fail, but if it does,
8418	 * it's not critical we just fail truncation.
8419	 */
8420	if (WARN_ON(ret)) {
8421		btrfs_end_transaction(trans);
8422		goto out;
8423	}
8424
8425	trans->block_rsv = rsv;
8426
8427	while (1) {
8428		struct extent_state *cached_state = NULL;
8429		const u64 new_size = inode->vfs_inode.i_size;
8430		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8431
8432		control.new_size = new_size;
8433		lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8434		/*
8435		 * We want to drop from the next block forward in case this new
8436		 * size is not block aligned since we will be keeping the last
8437		 * block of the extent just the way it is.
8438		 */
8439		btrfs_drop_extent_map_range(inode,
8440					    ALIGN(new_size, fs_info->sectorsize),
8441					    (u64)-1, false);
8442
8443		ret = btrfs_truncate_inode_items(trans, root, &control);
8444
8445		inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
8446		btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
8447
8448		unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8449
8450		trans->block_rsv = &fs_info->trans_block_rsv;
8451		if (ret != -ENOSPC && ret != -EAGAIN)
8452			break;
8453
8454		ret = btrfs_update_inode(trans, inode);
8455		if (ret)
8456			break;
8457
8458		btrfs_end_transaction(trans);
8459		btrfs_btree_balance_dirty(fs_info);
8460
8461		trans = btrfs_start_transaction(root, 2);
8462		if (IS_ERR(trans)) {
8463			ret = PTR_ERR(trans);
8464			trans = NULL;
8465			break;
8466		}
8467
8468		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8469		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8470					      rsv, min_size, false);
8471		/*
8472		 * We have reserved 2 metadata units when we started the
8473		 * transaction and min_size matches 1 unit, so this should never
8474		 * fail, but if it does, it's not critical we just fail truncation.
8475		 */
8476		if (WARN_ON(ret))
8477			break;
8478
8479		trans->block_rsv = rsv;
8480	}
8481
8482	/*
8483	 * We can't call btrfs_truncate_block inside a trans handle as we could
8484	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8485	 * know we've truncated everything except the last little bit, and can
8486	 * do btrfs_truncate_block and then update the disk_i_size.
8487	 */
8488	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8489		btrfs_end_transaction(trans);
8490		btrfs_btree_balance_dirty(fs_info);
8491
8492		ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
8493		if (ret)
8494			goto out;
8495		trans = btrfs_start_transaction(root, 1);
8496		if (IS_ERR(trans)) {
8497			ret = PTR_ERR(trans);
8498			goto out;
8499		}
8500		btrfs_inode_safe_disk_i_size_write(inode, 0);
8501	}
8502
8503	if (trans) {
8504		int ret2;
8505
8506		trans->block_rsv = &fs_info->trans_block_rsv;
8507		ret2 = btrfs_update_inode(trans, inode);
8508		if (ret2 && !ret)
8509			ret = ret2;
8510
8511		ret2 = btrfs_end_transaction(trans);
8512		if (ret2 && !ret)
8513			ret = ret2;
8514		btrfs_btree_balance_dirty(fs_info);
8515	}
8516out:
8517	btrfs_free_block_rsv(fs_info, rsv);
8518	/*
8519	 * So if we truncate and then write and fsync we normally would just
8520	 * write the extents that changed, which is a problem if we need to
8521	 * first truncate that entire inode.  So set this flag so we write out
8522	 * all of the extents in the inode to the sync log so we're completely
8523	 * safe.
8524	 *
8525	 * If no extents were dropped or trimmed we don't need to force the next
8526	 * fsync to truncate all the inode's items from the log and re-log them
8527	 * all. This means the truncate operation did not change the file size,
8528	 * or changed it to a smaller size but there was only an implicit hole
8529	 * between the old i_size and the new i_size, and there were no prealloc
8530	 * extents beyond i_size to drop.
8531	 */
8532	if (control.extents_found > 0)
8533		btrfs_set_inode_full_sync(inode);
8534
8535	return ret;
8536}
8537
8538struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
8539				     struct inode *dir)
8540{
8541	struct inode *inode;
8542
8543	inode = new_inode(dir->i_sb);
8544	if (inode) {
8545		/*
8546		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8547		 * the parent's sgid bit is set. This is probably a bug.
8548		 */
8549		inode_init_owner(idmap, inode, NULL,
8550				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8551		inode->i_op = &btrfs_dir_inode_operations;
8552		inode->i_fop = &btrfs_dir_file_operations;
8553	}
8554	return inode;
8555}
8556
8557struct inode *btrfs_alloc_inode(struct super_block *sb)
8558{
8559	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8560	struct btrfs_inode *ei;
8561	struct inode *inode;
8562	struct extent_io_tree *file_extent_tree = NULL;
8563
8564	/* Self tests may pass a NULL fs_info. */
8565	if (fs_info && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
8566		file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
8567		if (!file_extent_tree)
8568			return NULL;
8569	}
8570
8571	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8572	if (!ei) {
8573		kfree(file_extent_tree);
8574		return NULL;
8575	}
8576
8577	ei->root = NULL;
8578	ei->generation = 0;
8579	ei->last_trans = 0;
8580	ei->last_sub_trans = 0;
8581	ei->logged_trans = 0;
8582	ei->delalloc_bytes = 0;
8583	ei->new_delalloc_bytes = 0;
8584	ei->defrag_bytes = 0;
8585	ei->disk_i_size = 0;
8586	ei->flags = 0;
8587	ei->ro_flags = 0;
8588	ei->csum_bytes = 0;
8589	ei->index_cnt = (u64)-1;
8590	ei->dir_index = 0;
8591	ei->last_unlink_trans = 0;
8592	ei->last_reflink_trans = 0;
8593	ei->last_log_commit = 0;
8594
8595	spin_lock_init(&ei->lock);
8596	ei->outstanding_extents = 0;
8597	if (sb->s_magic != BTRFS_TEST_MAGIC)
8598		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8599					      BTRFS_BLOCK_RSV_DELALLOC);
8600	ei->runtime_flags = 0;
8601	ei->prop_compress = BTRFS_COMPRESS_NONE;
8602	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8603
8604	ei->delayed_node = NULL;
8605
8606	ei->i_otime_sec = 0;
8607	ei->i_otime_nsec = 0;
8608
8609	inode = &ei->vfs_inode;
8610	extent_map_tree_init(&ei->extent_tree);
8611
8612	/* This io tree sets the valid inode. */
8613	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8614	ei->io_tree.inode = ei;
8615
8616	ei->file_extent_tree = file_extent_tree;
8617	if (file_extent_tree) {
8618		extent_io_tree_init(fs_info, ei->file_extent_tree,
8619				    IO_TREE_INODE_FILE_EXTENT);
8620		/* Lockdep class is set only for the file extent tree. */
8621		lockdep_set_class(&ei->file_extent_tree->lock, &file_extent_tree_class);
8622	}
8623	mutex_init(&ei->log_mutex);
8624	spin_lock_init(&ei->ordered_tree_lock);
8625	ei->ordered_tree = RB_ROOT;
8626	ei->ordered_tree_last = NULL;
8627	INIT_LIST_HEAD(&ei->delalloc_inodes);
8628	INIT_LIST_HEAD(&ei->delayed_iput);
8629	RB_CLEAR_NODE(&ei->rb_node);
8630	init_rwsem(&ei->i_mmap_lock);
8631
8632	return inode;
8633}
8634
8635#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8636void btrfs_test_destroy_inode(struct inode *inode)
8637{
8638	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8639	kfree(BTRFS_I(inode)->file_extent_tree);
8640	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8641}
8642#endif
8643
8644void btrfs_free_inode(struct inode *inode)
8645{
8646	kfree(BTRFS_I(inode)->file_extent_tree);
8647	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8648}
8649
8650void btrfs_destroy_inode(struct inode *vfs_inode)
8651{
8652	struct btrfs_ordered_extent *ordered;
8653	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8654	struct btrfs_root *root = inode->root;
8655	bool freespace_inode;
8656
8657	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8658	WARN_ON(vfs_inode->i_data.nrpages);
8659	WARN_ON(inode->block_rsv.reserved);
8660	WARN_ON(inode->block_rsv.size);
8661	WARN_ON(inode->outstanding_extents);
8662	if (!S_ISDIR(vfs_inode->i_mode)) {
8663		WARN_ON(inode->delalloc_bytes);
8664		WARN_ON(inode->new_delalloc_bytes);
8665	}
8666	WARN_ON(inode->csum_bytes);
8667	WARN_ON(inode->defrag_bytes);
8668
8669	/*
8670	 * This can happen where we create an inode, but somebody else also
8671	 * created the same inode and we need to destroy the one we already
8672	 * created.
8673	 */
8674	if (!root)
8675		return;
8676
8677	/*
8678	 * If this is a free space inode do not take the ordered extents lockdep
8679	 * map.
8680	 */
8681	freespace_inode = btrfs_is_free_space_inode(inode);
8682
8683	while (1) {
8684		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8685		if (!ordered)
8686			break;
8687		else {
8688			btrfs_err(root->fs_info,
8689				  "found ordered extent %llu %llu on inode cleanup",
8690				  ordered->file_offset, ordered->num_bytes);
8691
8692			if (!freespace_inode)
8693				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8694
8695			btrfs_remove_ordered_extent(inode, ordered);
8696			btrfs_put_ordered_extent(ordered);
8697			btrfs_put_ordered_extent(ordered);
8698		}
8699	}
8700	btrfs_qgroup_check_reserved_leak(inode);
8701	inode_tree_del(inode);
8702	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8703	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8704	btrfs_put_root(inode->root);
8705}
8706
8707int btrfs_drop_inode(struct inode *inode)
8708{
8709	struct btrfs_root *root = BTRFS_I(inode)->root;
8710
8711	if (root == NULL)
8712		return 1;
8713
8714	/* the snap/subvol tree is on deleting */
8715	if (btrfs_root_refs(&root->root_item) == 0)
8716		return 1;
8717	else
8718		return generic_drop_inode(inode);
8719}
8720
8721static void init_once(void *foo)
8722{
8723	struct btrfs_inode *ei = foo;
8724
8725	inode_init_once(&ei->vfs_inode);
8726}
8727
8728void __cold btrfs_destroy_cachep(void)
8729{
8730	/*
8731	 * Make sure all delayed rcu free inodes are flushed before we
8732	 * destroy cache.
8733	 */
8734	rcu_barrier();
8735	bioset_exit(&btrfs_dio_bioset);
8736	kmem_cache_destroy(btrfs_inode_cachep);
8737}
8738
8739int __init btrfs_init_cachep(void)
8740{
8741	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8742			sizeof(struct btrfs_inode), 0,
8743			SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
8744			init_once);
8745	if (!btrfs_inode_cachep)
8746		goto fail;
8747
8748	if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
8749			offsetof(struct btrfs_dio_private, bbio.bio),
8750			BIOSET_NEED_BVECS))
8751		goto fail;
8752
8753	return 0;
8754fail:
8755	btrfs_destroy_cachep();
8756	return -ENOMEM;
8757}
8758
8759static int btrfs_getattr(struct mnt_idmap *idmap,
8760			 const struct path *path, struct kstat *stat,
8761			 u32 request_mask, unsigned int flags)
8762{
8763	u64 delalloc_bytes;
8764	u64 inode_bytes;
8765	struct inode *inode = d_inode(path->dentry);
8766	u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
8767	u32 bi_flags = BTRFS_I(inode)->flags;
8768	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8769
8770	stat->result_mask |= STATX_BTIME;
8771	stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8772	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8773	if (bi_flags & BTRFS_INODE_APPEND)
8774		stat->attributes |= STATX_ATTR_APPEND;
8775	if (bi_flags & BTRFS_INODE_COMPRESS)
8776		stat->attributes |= STATX_ATTR_COMPRESSED;
8777	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8778		stat->attributes |= STATX_ATTR_IMMUTABLE;
8779	if (bi_flags & BTRFS_INODE_NODUMP)
8780		stat->attributes |= STATX_ATTR_NODUMP;
8781	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8782		stat->attributes |= STATX_ATTR_VERITY;
8783
8784	stat->attributes_mask |= (STATX_ATTR_APPEND |
8785				  STATX_ATTR_COMPRESSED |
8786				  STATX_ATTR_IMMUTABLE |
8787				  STATX_ATTR_NODUMP);
8788
8789	generic_fillattr(idmap, request_mask, inode, stat);
8790	stat->dev = BTRFS_I(inode)->root->anon_dev;
8791
8792	spin_lock(&BTRFS_I(inode)->lock);
8793	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8794	inode_bytes = inode_get_bytes(inode);
8795	spin_unlock(&BTRFS_I(inode)->lock);
8796	stat->blocks = (ALIGN(inode_bytes, blocksize) +
8797			ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8798	return 0;
8799}
8800
8801static int btrfs_rename_exchange(struct inode *old_dir,
8802			      struct dentry *old_dentry,
8803			      struct inode *new_dir,
8804			      struct dentry *new_dentry)
8805{
8806	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8807	struct btrfs_trans_handle *trans;
8808	unsigned int trans_num_items;
8809	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8810	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8811	struct inode *new_inode = new_dentry->d_inode;
8812	struct inode *old_inode = old_dentry->d_inode;
8813	struct btrfs_rename_ctx old_rename_ctx;
8814	struct btrfs_rename_ctx new_rename_ctx;
8815	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8816	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8817	u64 old_idx = 0;
8818	u64 new_idx = 0;
8819	int ret;
8820	int ret2;
8821	bool need_abort = false;
8822	struct fscrypt_name old_fname, new_fname;
8823	struct fscrypt_str *old_name, *new_name;
8824
8825	/*
8826	 * For non-subvolumes allow exchange only within one subvolume, in the
8827	 * same inode namespace. Two subvolumes (represented as directory) can
8828	 * be exchanged as they're a logical link and have a fixed inode number.
8829	 */
8830	if (root != dest &&
8831	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8832	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
8833		return -EXDEV;
8834
8835	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8836	if (ret)
8837		return ret;
8838
8839	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8840	if (ret) {
8841		fscrypt_free_filename(&old_fname);
8842		return ret;
8843	}
8844
8845	old_name = &old_fname.disk_name;
8846	new_name = &new_fname.disk_name;
8847
8848	/* close the race window with snapshot create/destroy ioctl */
8849	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8850	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
8851		down_read(&fs_info->subvol_sem);
8852
8853	/*
8854	 * For each inode:
8855	 * 1 to remove old dir item
8856	 * 1 to remove old dir index
8857	 * 1 to add new dir item
8858	 * 1 to add new dir index
8859	 * 1 to update parent inode
8860	 *
8861	 * If the parents are the same, we only need to account for one
8862	 */
8863	trans_num_items = (old_dir == new_dir ? 9 : 10);
8864	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8865		/*
8866		 * 1 to remove old root ref
8867		 * 1 to remove old root backref
8868		 * 1 to add new root ref
8869		 * 1 to add new root backref
8870		 */
8871		trans_num_items += 4;
8872	} else {
8873		/*
8874		 * 1 to update inode item
8875		 * 1 to remove old inode ref
8876		 * 1 to add new inode ref
8877		 */
8878		trans_num_items += 3;
8879	}
8880	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8881		trans_num_items += 4;
8882	else
8883		trans_num_items += 3;
8884	trans = btrfs_start_transaction(root, trans_num_items);
8885	if (IS_ERR(trans)) {
8886		ret = PTR_ERR(trans);
8887		goto out_notrans;
8888	}
8889
8890	if (dest != root) {
8891		ret = btrfs_record_root_in_trans(trans, dest);
8892		if (ret)
8893			goto out_fail;
8894	}
8895
8896	/*
8897	 * We need to find a free sequence number both in the source and
8898	 * in the destination directory for the exchange.
8899	 */
8900	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8901	if (ret)
8902		goto out_fail;
8903	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8904	if (ret)
8905		goto out_fail;
8906
8907	BTRFS_I(old_inode)->dir_index = 0ULL;
8908	BTRFS_I(new_inode)->dir_index = 0ULL;
8909
8910	/* Reference for the source. */
8911	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8912		/* force full log commit if subvolume involved. */
8913		btrfs_set_log_full_commit(trans);
8914	} else {
8915		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8916					     btrfs_ino(BTRFS_I(new_dir)),
8917					     old_idx);
8918		if (ret)
8919			goto out_fail;
8920		need_abort = true;
8921	}
8922
8923	/* And now for the dest. */
8924	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8925		/* force full log commit if subvolume involved. */
8926		btrfs_set_log_full_commit(trans);
8927	} else {
8928		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8929					     btrfs_ino(BTRFS_I(old_dir)),
8930					     new_idx);
8931		if (ret) {
8932			if (need_abort)
8933				btrfs_abort_transaction(trans, ret);
8934			goto out_fail;
8935		}
8936	}
8937
8938	/* Update inode version and ctime/mtime. */
8939	inode_inc_iversion(old_dir);
8940	inode_inc_iversion(new_dir);
8941	inode_inc_iversion(old_inode);
8942	inode_inc_iversion(new_inode);
8943	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8944
8945	if (old_dentry->d_parent != new_dentry->d_parent) {
8946		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8947					BTRFS_I(old_inode), true);
8948		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8949					BTRFS_I(new_inode), true);
8950	}
8951
8952	/* src is a subvolume */
8953	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8954		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8955	} else { /* src is an inode */
8956		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8957					   BTRFS_I(old_dentry->d_inode),
8958					   old_name, &old_rename_ctx);
8959		if (!ret)
8960			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8961	}
8962	if (ret) {
8963		btrfs_abort_transaction(trans, ret);
8964		goto out_fail;
8965	}
8966
8967	/* dest is a subvolume */
8968	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8969		ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8970	} else { /* dest is an inode */
8971		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8972					   BTRFS_I(new_dentry->d_inode),
8973					   new_name, &new_rename_ctx);
8974		if (!ret)
8975			ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8976	}
8977	if (ret) {
8978		btrfs_abort_transaction(trans, ret);
8979		goto out_fail;
8980	}
8981
8982	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8983			     new_name, 0, old_idx);
8984	if (ret) {
8985		btrfs_abort_transaction(trans, ret);
8986		goto out_fail;
8987	}
8988
8989	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8990			     old_name, 0, new_idx);
8991	if (ret) {
8992		btrfs_abort_transaction(trans, ret);
8993		goto out_fail;
8994	}
8995
8996	if (old_inode->i_nlink == 1)
8997		BTRFS_I(old_inode)->dir_index = old_idx;
8998	if (new_inode->i_nlink == 1)
8999		BTRFS_I(new_inode)->dir_index = new_idx;
9000
9001	/*
9002	 * Now pin the logs of the roots. We do it to ensure that no other task
9003	 * can sync the logs while we are in progress with the rename, because
9004	 * that could result in an inconsistency in case any of the inodes that
9005	 * are part of this rename operation were logged before.
9006	 */
9007	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9008		btrfs_pin_log_trans(root);
9009	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9010		btrfs_pin_log_trans(dest);
9011
9012	/* Do the log updates for all inodes. */
9013	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9014		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9015				   old_rename_ctx.index, new_dentry->d_parent);
9016	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9017		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
9018				   new_rename_ctx.index, old_dentry->d_parent);
9019
9020	/* Now unpin the logs. */
9021	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9022		btrfs_end_log_trans(root);
9023	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9024		btrfs_end_log_trans(dest);
9025out_fail:
9026	ret2 = btrfs_end_transaction(trans);
9027	ret = ret ? ret : ret2;
9028out_notrans:
9029	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9030	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9031		up_read(&fs_info->subvol_sem);
9032
9033	fscrypt_free_filename(&new_fname);
9034	fscrypt_free_filename(&old_fname);
9035	return ret;
9036}
9037
9038static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
9039					struct inode *dir)
9040{
9041	struct inode *inode;
9042
9043	inode = new_inode(dir->i_sb);
9044	if (inode) {
9045		inode_init_owner(idmap, inode, dir,
9046				 S_IFCHR | WHITEOUT_MODE);
9047		inode->i_op = &btrfs_special_inode_operations;
9048		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
9049	}
9050	return inode;
9051}
9052
9053static int btrfs_rename(struct mnt_idmap *idmap,
9054			struct inode *old_dir, struct dentry *old_dentry,
9055			struct inode *new_dir, struct dentry *new_dentry,
9056			unsigned int flags)
9057{
9058	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
9059	struct btrfs_new_inode_args whiteout_args = {
9060		.dir = old_dir,
9061		.dentry = old_dentry,
9062	};
9063	struct btrfs_trans_handle *trans;
9064	unsigned int trans_num_items;
9065	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9066	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9067	struct inode *new_inode = d_inode(new_dentry);
9068	struct inode *old_inode = d_inode(old_dentry);
9069	struct btrfs_rename_ctx rename_ctx;
9070	u64 index = 0;
9071	int ret;
9072	int ret2;
9073	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9074	struct fscrypt_name old_fname, new_fname;
9075
9076	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9077		return -EPERM;
9078
9079	/* we only allow rename subvolume link between subvolumes */
9080	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9081		return -EXDEV;
9082
9083	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9084	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9085		return -ENOTEMPTY;
9086
9087	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9088	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9089		return -ENOTEMPTY;
9090
9091	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
9092	if (ret)
9093		return ret;
9094
9095	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
9096	if (ret) {
9097		fscrypt_free_filename(&old_fname);
9098		return ret;
9099	}
9100
9101	/* check for collisions, even if the  name isn't there */
9102	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
9103	if (ret) {
9104		if (ret == -EEXIST) {
9105			/* we shouldn't get
9106			 * eexist without a new_inode */
9107			if (WARN_ON(!new_inode)) {
9108				goto out_fscrypt_names;
9109			}
9110		} else {
9111			/* maybe -EOVERFLOW */
9112			goto out_fscrypt_names;
9113		}
9114	}
9115	ret = 0;
9116
9117	/*
9118	 * we're using rename to replace one file with another.  Start IO on it
9119	 * now so  we don't add too much work to the end of the transaction
9120	 */
9121	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9122		filemap_flush(old_inode->i_mapping);
9123
9124	if (flags & RENAME_WHITEOUT) {
9125		whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
9126		if (!whiteout_args.inode) {
9127			ret = -ENOMEM;
9128			goto out_fscrypt_names;
9129		}
9130		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9131		if (ret)
9132			goto out_whiteout_inode;
9133	} else {
9134		/* 1 to update the old parent inode. */
9135		trans_num_items = 1;
9136	}
9137
9138	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9139		/* Close the race window with snapshot create/destroy ioctl */
9140		down_read(&fs_info->subvol_sem);
9141		/*
9142		 * 1 to remove old root ref
9143		 * 1 to remove old root backref
9144		 * 1 to add new root ref
9145		 * 1 to add new root backref
9146		 */
9147		trans_num_items += 4;
9148	} else {
9149		/*
9150		 * 1 to update inode
9151		 * 1 to remove old inode ref
9152		 * 1 to add new inode ref
9153		 */
9154		trans_num_items += 3;
9155	}
9156	/*
9157	 * 1 to remove old dir item
9158	 * 1 to remove old dir index
9159	 * 1 to add new dir item
9160	 * 1 to add new dir index
9161	 */
9162	trans_num_items += 4;
9163	/* 1 to update new parent inode if it's not the same as the old parent */
9164	if (new_dir != old_dir)
9165		trans_num_items++;
9166	if (new_inode) {
9167		/*
9168		 * 1 to update inode
9169		 * 1 to remove inode ref
9170		 * 1 to remove dir item
9171		 * 1 to remove dir index
9172		 * 1 to possibly add orphan item
9173		 */
9174		trans_num_items += 5;
9175	}
9176	trans = btrfs_start_transaction(root, trans_num_items);
9177	if (IS_ERR(trans)) {
9178		ret = PTR_ERR(trans);
9179		goto out_notrans;
9180	}
9181
9182	if (dest != root) {
9183		ret = btrfs_record_root_in_trans(trans, dest);
9184		if (ret)
9185			goto out_fail;
9186	}
9187
9188	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9189	if (ret)
9190		goto out_fail;
9191
9192	BTRFS_I(old_inode)->dir_index = 0ULL;
9193	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9194		/* force full log commit if subvolume involved. */
9195		btrfs_set_log_full_commit(trans);
9196	} else {
9197		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
9198					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
9199					     index);
9200		if (ret)
9201			goto out_fail;
9202	}
9203
9204	inode_inc_iversion(old_dir);
9205	inode_inc_iversion(new_dir);
9206	inode_inc_iversion(old_inode);
9207	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
9208
9209	if (old_dentry->d_parent != new_dentry->d_parent)
9210		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9211					BTRFS_I(old_inode), true);
9212
9213	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9214		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
9215	} else {
9216		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9217					   BTRFS_I(d_inode(old_dentry)),
9218					   &old_fname.disk_name, &rename_ctx);
9219		if (!ret)
9220			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
9221	}
9222	if (ret) {
9223		btrfs_abort_transaction(trans, ret);
9224		goto out_fail;
9225	}
9226
9227	if (new_inode) {
9228		inode_inc_iversion(new_inode);
9229		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9230			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9231			ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
9232			BUG_ON(new_inode->i_nlink == 0);
9233		} else {
9234			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9235						 BTRFS_I(d_inode(new_dentry)),
9236						 &new_fname.disk_name);
9237		}
9238		if (!ret && new_inode->i_nlink == 0)
9239			ret = btrfs_orphan_add(trans,
9240					BTRFS_I(d_inode(new_dentry)));
9241		if (ret) {
9242			btrfs_abort_transaction(trans, ret);
9243			goto out_fail;
9244		}
9245	}
9246
9247	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9248			     &new_fname.disk_name, 0, index);
9249	if (ret) {
9250		btrfs_abort_transaction(trans, ret);
9251		goto out_fail;
9252	}
9253
9254	if (old_inode->i_nlink == 1)
9255		BTRFS_I(old_inode)->dir_index = index;
9256
9257	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9258		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9259				   rename_ctx.index, new_dentry->d_parent);
9260
9261	if (flags & RENAME_WHITEOUT) {
9262		ret = btrfs_create_new_inode(trans, &whiteout_args);
9263		if (ret) {
9264			btrfs_abort_transaction(trans, ret);
9265			goto out_fail;
9266		} else {
9267			unlock_new_inode(whiteout_args.inode);
9268			iput(whiteout_args.inode);
9269			whiteout_args.inode = NULL;
9270		}
9271	}
9272out_fail:
9273	ret2 = btrfs_end_transaction(trans);
9274	ret = ret ? ret : ret2;
9275out_notrans:
9276	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9277		up_read(&fs_info->subvol_sem);
9278	if (flags & RENAME_WHITEOUT)
9279		btrfs_new_inode_args_destroy(&whiteout_args);
9280out_whiteout_inode:
9281	if (flags & RENAME_WHITEOUT)
9282		iput(whiteout_args.inode);
9283out_fscrypt_names:
9284	fscrypt_free_filename(&old_fname);
9285	fscrypt_free_filename(&new_fname);
9286	return ret;
9287}
9288
9289static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
9290			 struct dentry *old_dentry, struct inode *new_dir,
9291			 struct dentry *new_dentry, unsigned int flags)
9292{
9293	int ret;
9294
9295	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9296		return -EINVAL;
9297
9298	if (flags & RENAME_EXCHANGE)
9299		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9300					    new_dentry);
9301	else
9302		ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
9303				   new_dentry, flags);
9304
9305	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
9306
9307	return ret;
9308}
9309
9310struct btrfs_delalloc_work {
9311	struct inode *inode;
9312	struct completion completion;
9313	struct list_head list;
9314	struct btrfs_work work;
9315};
9316
9317static void btrfs_run_delalloc_work(struct btrfs_work *work)
9318{
9319	struct btrfs_delalloc_work *delalloc_work;
9320	struct inode *inode;
9321
9322	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9323				     work);
9324	inode = delalloc_work->inode;
9325	filemap_flush(inode->i_mapping);
9326	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9327				&BTRFS_I(inode)->runtime_flags))
9328		filemap_flush(inode->i_mapping);
9329
9330	iput(inode);
9331	complete(&delalloc_work->completion);
9332}
9333
9334static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9335{
9336	struct btrfs_delalloc_work *work;
9337
9338	work = kmalloc(sizeof(*work), GFP_NOFS);
9339	if (!work)
9340		return NULL;
9341
9342	init_completion(&work->completion);
9343	INIT_LIST_HEAD(&work->list);
9344	work->inode = inode;
9345	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
9346
9347	return work;
9348}
9349
9350/*
9351 * some fairly slow code that needs optimization. This walks the list
9352 * of all the inodes with pending delalloc and forces them to disk.
9353 */
9354static int start_delalloc_inodes(struct btrfs_root *root,
9355				 struct writeback_control *wbc, bool snapshot,
9356				 bool in_reclaim_context)
9357{
9358	struct btrfs_inode *binode;
9359	struct inode *inode;
9360	struct btrfs_delalloc_work *work, *next;
9361	LIST_HEAD(works);
9362	LIST_HEAD(splice);
9363	int ret = 0;
9364	bool full_flush = wbc->nr_to_write == LONG_MAX;
9365
9366	mutex_lock(&root->delalloc_mutex);
9367	spin_lock(&root->delalloc_lock);
9368	list_splice_init(&root->delalloc_inodes, &splice);
9369	while (!list_empty(&splice)) {
9370		binode = list_entry(splice.next, struct btrfs_inode,
9371				    delalloc_inodes);
9372
9373		list_move_tail(&binode->delalloc_inodes,
9374			       &root->delalloc_inodes);
9375
9376		if (in_reclaim_context &&
9377		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9378			continue;
9379
9380		inode = igrab(&binode->vfs_inode);
9381		if (!inode) {
9382			cond_resched_lock(&root->delalloc_lock);
9383			continue;
9384		}
9385		spin_unlock(&root->delalloc_lock);
9386
9387		if (snapshot)
9388			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9389				&binode->runtime_flags);
9390		if (full_flush) {
9391			work = btrfs_alloc_delalloc_work(inode);
9392			if (!work) {
9393				iput(inode);
9394				ret = -ENOMEM;
9395				goto out;
9396			}
9397			list_add_tail(&work->list, &works);
9398			btrfs_queue_work(root->fs_info->flush_workers,
9399					 &work->work);
9400		} else {
9401			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9402			btrfs_add_delayed_iput(BTRFS_I(inode));
9403			if (ret || wbc->nr_to_write <= 0)
9404				goto out;
9405		}
9406		cond_resched();
9407		spin_lock(&root->delalloc_lock);
9408	}
9409	spin_unlock(&root->delalloc_lock);
9410
9411out:
9412	list_for_each_entry_safe(work, next, &works, list) {
9413		list_del_init(&work->list);
9414		wait_for_completion(&work->completion);
9415		kfree(work);
9416	}
9417
9418	if (!list_empty(&splice)) {
9419		spin_lock(&root->delalloc_lock);
9420		list_splice_tail(&splice, &root->delalloc_inodes);
9421		spin_unlock(&root->delalloc_lock);
9422	}
9423	mutex_unlock(&root->delalloc_mutex);
9424	return ret;
9425}
9426
9427int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9428{
9429	struct writeback_control wbc = {
9430		.nr_to_write = LONG_MAX,
9431		.sync_mode = WB_SYNC_NONE,
9432		.range_start = 0,
9433		.range_end = LLONG_MAX,
9434	};
9435	struct btrfs_fs_info *fs_info = root->fs_info;
9436
9437	if (BTRFS_FS_ERROR(fs_info))
9438		return -EROFS;
9439
9440	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9441}
9442
9443int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9444			       bool in_reclaim_context)
9445{
9446	struct writeback_control wbc = {
9447		.nr_to_write = nr,
9448		.sync_mode = WB_SYNC_NONE,
9449		.range_start = 0,
9450		.range_end = LLONG_MAX,
9451	};
9452	struct btrfs_root *root;
9453	LIST_HEAD(splice);
9454	int ret;
9455
9456	if (BTRFS_FS_ERROR(fs_info))
9457		return -EROFS;
9458
9459	mutex_lock(&fs_info->delalloc_root_mutex);
9460	spin_lock(&fs_info->delalloc_root_lock);
9461	list_splice_init(&fs_info->delalloc_roots, &splice);
9462	while (!list_empty(&splice)) {
9463		/*
9464		 * Reset nr_to_write here so we know that we're doing a full
9465		 * flush.
9466		 */
9467		if (nr == LONG_MAX)
9468			wbc.nr_to_write = LONG_MAX;
9469
9470		root = list_first_entry(&splice, struct btrfs_root,
9471					delalloc_root);
9472		root = btrfs_grab_root(root);
9473		BUG_ON(!root);
9474		list_move_tail(&root->delalloc_root,
9475			       &fs_info->delalloc_roots);
9476		spin_unlock(&fs_info->delalloc_root_lock);
9477
9478		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9479		btrfs_put_root(root);
9480		if (ret < 0 || wbc.nr_to_write <= 0)
9481			goto out;
9482		spin_lock(&fs_info->delalloc_root_lock);
9483	}
9484	spin_unlock(&fs_info->delalloc_root_lock);
9485
9486	ret = 0;
9487out:
9488	if (!list_empty(&splice)) {
9489		spin_lock(&fs_info->delalloc_root_lock);
9490		list_splice_tail(&splice, &fs_info->delalloc_roots);
9491		spin_unlock(&fs_info->delalloc_root_lock);
9492	}
9493	mutex_unlock(&fs_info->delalloc_root_mutex);
9494	return ret;
9495}
9496
9497static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
9498			 struct dentry *dentry, const char *symname)
9499{
9500	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9501	struct btrfs_trans_handle *trans;
9502	struct btrfs_root *root = BTRFS_I(dir)->root;
9503	struct btrfs_path *path;
9504	struct btrfs_key key;
9505	struct inode *inode;
9506	struct btrfs_new_inode_args new_inode_args = {
9507		.dir = dir,
9508		.dentry = dentry,
9509	};
9510	unsigned int trans_num_items;
9511	int err;
9512	int name_len;
9513	int datasize;
9514	unsigned long ptr;
9515	struct btrfs_file_extent_item *ei;
9516	struct extent_buffer *leaf;
9517
9518	name_len = strlen(symname);
9519	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9520		return -ENAMETOOLONG;
9521
9522	inode = new_inode(dir->i_sb);
9523	if (!inode)
9524		return -ENOMEM;
9525	inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9526	inode->i_op = &btrfs_symlink_inode_operations;
9527	inode_nohighmem(inode);
9528	inode->i_mapping->a_ops = &btrfs_aops;
9529	btrfs_i_size_write(BTRFS_I(inode), name_len);
9530	inode_set_bytes(inode, name_len);
9531
9532	new_inode_args.inode = inode;
9533	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9534	if (err)
9535		goto out_inode;
9536	/* 1 additional item for the inline extent */
9537	trans_num_items++;
9538
9539	trans = btrfs_start_transaction(root, trans_num_items);
9540	if (IS_ERR(trans)) {
9541		err = PTR_ERR(trans);
9542		goto out_new_inode_args;
9543	}
9544
9545	err = btrfs_create_new_inode(trans, &new_inode_args);
9546	if (err)
9547		goto out;
9548
9549	path = btrfs_alloc_path();
9550	if (!path) {
9551		err = -ENOMEM;
9552		btrfs_abort_transaction(trans, err);
9553		discard_new_inode(inode);
9554		inode = NULL;
9555		goto out;
9556	}
9557	key.objectid = btrfs_ino(BTRFS_I(inode));
9558	key.offset = 0;
9559	key.type = BTRFS_EXTENT_DATA_KEY;
9560	datasize = btrfs_file_extent_calc_inline_size(name_len);
9561	err = btrfs_insert_empty_item(trans, root, path, &key,
9562				      datasize);
9563	if (err) {
9564		btrfs_abort_transaction(trans, err);
9565		btrfs_free_path(path);
9566		discard_new_inode(inode);
9567		inode = NULL;
9568		goto out;
9569	}
9570	leaf = path->nodes[0];
9571	ei = btrfs_item_ptr(leaf, path->slots[0],
9572			    struct btrfs_file_extent_item);
9573	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9574	btrfs_set_file_extent_type(leaf, ei,
9575				   BTRFS_FILE_EXTENT_INLINE);
9576	btrfs_set_file_extent_encryption(leaf, ei, 0);
9577	btrfs_set_file_extent_compression(leaf, ei, 0);
9578	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9579	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9580
9581	ptr = btrfs_file_extent_inline_start(ei);
9582	write_extent_buffer(leaf, symname, ptr, name_len);
9583	btrfs_mark_buffer_dirty(trans, leaf);
9584	btrfs_free_path(path);
9585
9586	d_instantiate_new(dentry, inode);
9587	err = 0;
9588out:
9589	btrfs_end_transaction(trans);
9590	btrfs_btree_balance_dirty(fs_info);
9591out_new_inode_args:
9592	btrfs_new_inode_args_destroy(&new_inode_args);
9593out_inode:
9594	if (err)
9595		iput(inode);
9596	return err;
9597}
9598
9599static struct btrfs_trans_handle *insert_prealloc_file_extent(
9600				       struct btrfs_trans_handle *trans_in,
9601				       struct btrfs_inode *inode,
9602				       struct btrfs_key *ins,
9603				       u64 file_offset)
9604{
9605	struct btrfs_file_extent_item stack_fi;
9606	struct btrfs_replace_extent_info extent_info;
9607	struct btrfs_trans_handle *trans = trans_in;
9608	struct btrfs_path *path;
9609	u64 start = ins->objectid;
9610	u64 len = ins->offset;
9611	u64 qgroup_released = 0;
9612	int ret;
9613
9614	memset(&stack_fi, 0, sizeof(stack_fi));
9615
9616	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9617	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9618	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9619	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9620	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9621	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9622	/* Encryption and other encoding is reserved and all 0 */
9623
9624	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9625	if (ret < 0)
9626		return ERR_PTR(ret);
9627
9628	if (trans) {
9629		ret = insert_reserved_file_extent(trans, inode,
9630						  file_offset, &stack_fi,
9631						  true, qgroup_released);
9632		if (ret)
9633			goto free_qgroup;
9634		return trans;
9635	}
9636
9637	extent_info.disk_offset = start;
9638	extent_info.disk_len = len;
9639	extent_info.data_offset = 0;
9640	extent_info.data_len = len;
9641	extent_info.file_offset = file_offset;
9642	extent_info.extent_buf = (char *)&stack_fi;
9643	extent_info.is_new_extent = true;
9644	extent_info.update_times = true;
9645	extent_info.qgroup_reserved = qgroup_released;
9646	extent_info.insertions = 0;
9647
9648	path = btrfs_alloc_path();
9649	if (!path) {
9650		ret = -ENOMEM;
9651		goto free_qgroup;
9652	}
9653
9654	ret = btrfs_replace_file_extents(inode, path, file_offset,
9655				     file_offset + len - 1, &extent_info,
9656				     &trans);
9657	btrfs_free_path(path);
9658	if (ret)
9659		goto free_qgroup;
9660	return trans;
9661
9662free_qgroup:
9663	/*
9664	 * We have released qgroup data range at the beginning of the function,
9665	 * and normally qgroup_released bytes will be freed when committing
9666	 * transaction.
9667	 * But if we error out early, we have to free what we have released
9668	 * or we leak qgroup data reservation.
9669	 */
9670	btrfs_qgroup_free_refroot(inode->root->fs_info,
9671			inode->root->root_key.objectid, qgroup_released,
9672			BTRFS_QGROUP_RSV_DATA);
9673	return ERR_PTR(ret);
9674}
9675
9676static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9677				       u64 start, u64 num_bytes, u64 min_size,
9678				       loff_t actual_len, u64 *alloc_hint,
9679				       struct btrfs_trans_handle *trans)
9680{
9681	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9682	struct extent_map *em;
9683	struct btrfs_root *root = BTRFS_I(inode)->root;
9684	struct btrfs_key ins;
9685	u64 cur_offset = start;
9686	u64 clear_offset = start;
9687	u64 i_size;
9688	u64 cur_bytes;
9689	u64 last_alloc = (u64)-1;
9690	int ret = 0;
9691	bool own_trans = true;
9692	u64 end = start + num_bytes - 1;
9693
9694	if (trans)
9695		own_trans = false;
9696	while (num_bytes > 0) {
9697		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9698		cur_bytes = max(cur_bytes, min_size);
9699		/*
9700		 * If we are severely fragmented we could end up with really
9701		 * small allocations, so if the allocator is returning small
9702		 * chunks lets make its job easier by only searching for those
9703		 * sized chunks.
9704		 */
9705		cur_bytes = min(cur_bytes, last_alloc);
9706		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9707				min_size, 0, *alloc_hint, &ins, 1, 0);
9708		if (ret)
9709			break;
9710
9711		/*
9712		 * We've reserved this space, and thus converted it from
9713		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9714		 * from here on out we will only need to clear our reservation
9715		 * for the remaining unreserved area, so advance our
9716		 * clear_offset by our extent size.
9717		 */
9718		clear_offset += ins.offset;
9719
9720		last_alloc = ins.offset;
9721		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9722						    &ins, cur_offset);
9723		/*
9724		 * Now that we inserted the prealloc extent we can finally
9725		 * decrement the number of reservations in the block group.
9726		 * If we did it before, we could race with relocation and have
9727		 * relocation miss the reserved extent, making it fail later.
9728		 */
9729		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9730		if (IS_ERR(trans)) {
9731			ret = PTR_ERR(trans);
9732			btrfs_free_reserved_extent(fs_info, ins.objectid,
9733						   ins.offset, 0);
9734			break;
9735		}
9736
9737		em = alloc_extent_map();
9738		if (!em) {
9739			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9740					    cur_offset + ins.offset - 1, false);
9741			btrfs_set_inode_full_sync(BTRFS_I(inode));
9742			goto next;
9743		}
9744
9745		em->start = cur_offset;
9746		em->orig_start = cur_offset;
9747		em->len = ins.offset;
9748		em->block_start = ins.objectid;
9749		em->block_len = ins.offset;
9750		em->orig_block_len = ins.offset;
9751		em->ram_bytes = ins.offset;
9752		em->flags |= EXTENT_FLAG_PREALLOC;
9753		em->generation = trans->transid;
9754
9755		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9756		free_extent_map(em);
9757next:
9758		num_bytes -= ins.offset;
9759		cur_offset += ins.offset;
9760		*alloc_hint = ins.objectid + ins.offset;
9761
9762		inode_inc_iversion(inode);
9763		inode_set_ctime_current(inode);
9764		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9765		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9766		    (actual_len > inode->i_size) &&
9767		    (cur_offset > inode->i_size)) {
9768			if (cur_offset > actual_len)
9769				i_size = actual_len;
9770			else
9771				i_size = cur_offset;
9772			i_size_write(inode, i_size);
9773			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9774		}
9775
9776		ret = btrfs_update_inode(trans, BTRFS_I(inode));
9777
9778		if (ret) {
9779			btrfs_abort_transaction(trans, ret);
9780			if (own_trans)
9781				btrfs_end_transaction(trans);
9782			break;
9783		}
9784
9785		if (own_trans) {
9786			btrfs_end_transaction(trans);
9787			trans = NULL;
9788		}
9789	}
9790	if (clear_offset < end)
9791		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9792			end - clear_offset + 1);
9793	return ret;
9794}
9795
9796int btrfs_prealloc_file_range(struct inode *inode, int mode,
9797			      u64 start, u64 num_bytes, u64 min_size,
9798			      loff_t actual_len, u64 *alloc_hint)
9799{
9800	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9801					   min_size, actual_len, alloc_hint,
9802					   NULL);
9803}
9804
9805int btrfs_prealloc_file_range_trans(struct inode *inode,
9806				    struct btrfs_trans_handle *trans, int mode,
9807				    u64 start, u64 num_bytes, u64 min_size,
9808				    loff_t actual_len, u64 *alloc_hint)
9809{
9810	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9811					   min_size, actual_len, alloc_hint, trans);
9812}
9813
9814static int btrfs_permission(struct mnt_idmap *idmap,
9815			    struct inode *inode, int mask)
9816{
9817	struct btrfs_root *root = BTRFS_I(inode)->root;
9818	umode_t mode = inode->i_mode;
9819
9820	if (mask & MAY_WRITE &&
9821	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9822		if (btrfs_root_readonly(root))
9823			return -EROFS;
9824		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9825			return -EACCES;
9826	}
9827	return generic_permission(idmap, inode, mask);
9828}
9829
9830static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9831			 struct file *file, umode_t mode)
9832{
9833	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9834	struct btrfs_trans_handle *trans;
9835	struct btrfs_root *root = BTRFS_I(dir)->root;
9836	struct inode *inode;
9837	struct btrfs_new_inode_args new_inode_args = {
9838		.dir = dir,
9839		.dentry = file->f_path.dentry,
9840		.orphan = true,
9841	};
9842	unsigned int trans_num_items;
9843	int ret;
9844
9845	inode = new_inode(dir->i_sb);
9846	if (!inode)
9847		return -ENOMEM;
9848	inode_init_owner(idmap, inode, dir, mode);
9849	inode->i_fop = &btrfs_file_operations;
9850	inode->i_op = &btrfs_file_inode_operations;
9851	inode->i_mapping->a_ops = &btrfs_aops;
9852
9853	new_inode_args.inode = inode;
9854	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9855	if (ret)
9856		goto out_inode;
9857
9858	trans = btrfs_start_transaction(root, trans_num_items);
9859	if (IS_ERR(trans)) {
9860		ret = PTR_ERR(trans);
9861		goto out_new_inode_args;
9862	}
9863
9864	ret = btrfs_create_new_inode(trans, &new_inode_args);
9865
9866	/*
9867	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9868	 * set it to 1 because d_tmpfile() will issue a warning if the count is
9869	 * 0, through:
9870	 *
9871	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9872	 */
9873	set_nlink(inode, 1);
9874
9875	if (!ret) {
9876		d_tmpfile(file, inode);
9877		unlock_new_inode(inode);
9878		mark_inode_dirty(inode);
9879	}
9880
9881	btrfs_end_transaction(trans);
9882	btrfs_btree_balance_dirty(fs_info);
9883out_new_inode_args:
9884	btrfs_new_inode_args_destroy(&new_inode_args);
9885out_inode:
9886	if (ret)
9887		iput(inode);
9888	return finish_open_simple(file, ret);
9889}
9890
9891void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
9892{
9893	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9894	unsigned long index = start >> PAGE_SHIFT;
9895	unsigned long end_index = end >> PAGE_SHIFT;
9896	struct page *page;
9897	u32 len;
9898
9899	ASSERT(end + 1 - start <= U32_MAX);
9900	len = end + 1 - start;
9901	while (index <= end_index) {
9902		page = find_get_page(inode->vfs_inode.i_mapping, index);
9903		ASSERT(page); /* Pages should be in the extent_io_tree */
9904
9905		/* This is for data, which doesn't yet support larger folio. */
9906		ASSERT(folio_order(page_folio(page)) == 0);
9907		btrfs_folio_set_writeback(fs_info, page_folio(page), start, len);
9908		put_page(page);
9909		index++;
9910	}
9911}
9912
9913int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9914					     int compress_type)
9915{
9916	switch (compress_type) {
9917	case BTRFS_COMPRESS_NONE:
9918		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9919	case BTRFS_COMPRESS_ZLIB:
9920		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9921	case BTRFS_COMPRESS_LZO:
9922		/*
9923		 * The LZO format depends on the sector size. 64K is the maximum
9924		 * sector size that we support.
9925		 */
9926		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9927			return -EINVAL;
9928		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9929		       (fs_info->sectorsize_bits - 12);
9930	case BTRFS_COMPRESS_ZSTD:
9931		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9932	default:
9933		return -EUCLEAN;
9934	}
9935}
9936
9937static ssize_t btrfs_encoded_read_inline(
9938				struct kiocb *iocb,
9939				struct iov_iter *iter, u64 start,
9940				u64 lockend,
9941				struct extent_state **cached_state,
9942				u64 extent_start, size_t count,
9943				struct btrfs_ioctl_encoded_io_args *encoded,
9944				bool *unlocked)
9945{
9946	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9947	struct btrfs_root *root = inode->root;
9948	struct btrfs_fs_info *fs_info = root->fs_info;
9949	struct extent_io_tree *io_tree = &inode->io_tree;
9950	struct btrfs_path *path;
9951	struct extent_buffer *leaf;
9952	struct btrfs_file_extent_item *item;
9953	u64 ram_bytes;
9954	unsigned long ptr;
9955	void *tmp;
9956	ssize_t ret;
9957
9958	path = btrfs_alloc_path();
9959	if (!path) {
9960		ret = -ENOMEM;
9961		goto out;
9962	}
9963	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9964				       extent_start, 0);
9965	if (ret) {
9966		if (ret > 0) {
9967			/* The extent item disappeared? */
9968			ret = -EIO;
9969		}
9970		goto out;
9971	}
9972	leaf = path->nodes[0];
9973	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9974
9975	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9976	ptr = btrfs_file_extent_inline_start(item);
9977
9978	encoded->len = min_t(u64, extent_start + ram_bytes,
9979			     inode->vfs_inode.i_size) - iocb->ki_pos;
9980	ret = btrfs_encoded_io_compression_from_extent(fs_info,
9981				 btrfs_file_extent_compression(leaf, item));
9982	if (ret < 0)
9983		goto out;
9984	encoded->compression = ret;
9985	if (encoded->compression) {
9986		size_t inline_size;
9987
9988		inline_size = btrfs_file_extent_inline_item_len(leaf,
9989								path->slots[0]);
9990		if (inline_size > count) {
9991			ret = -ENOBUFS;
9992			goto out;
9993		}
9994		count = inline_size;
9995		encoded->unencoded_len = ram_bytes;
9996		encoded->unencoded_offset = iocb->ki_pos - extent_start;
9997	} else {
9998		count = min_t(u64, count, encoded->len);
9999		encoded->len = count;
10000		encoded->unencoded_len = count;
10001		ptr += iocb->ki_pos - extent_start;
10002	}
10003
10004	tmp = kmalloc(count, GFP_NOFS);
10005	if (!tmp) {
10006		ret = -ENOMEM;
10007		goto out;
10008	}
10009	read_extent_buffer(leaf, tmp, ptr, count);
10010	btrfs_release_path(path);
10011	unlock_extent(io_tree, start, lockend, cached_state);
10012	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10013	*unlocked = true;
10014
10015	ret = copy_to_iter(tmp, count, iter);
10016	if (ret != count)
10017		ret = -EFAULT;
10018	kfree(tmp);
10019out:
10020	btrfs_free_path(path);
10021	return ret;
10022}
10023
10024struct btrfs_encoded_read_private {
10025	wait_queue_head_t wait;
10026	atomic_t pending;
10027	blk_status_t status;
10028};
10029
10030static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
10031{
10032	struct btrfs_encoded_read_private *priv = bbio->private;
10033
10034	if (bbio->bio.bi_status) {
10035		/*
10036		 * The memory barrier implied by the atomic_dec_return() here
10037		 * pairs with the memory barrier implied by the
10038		 * atomic_dec_return() or io_wait_event() in
10039		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
10040		 * write is observed before the load of status in
10041		 * btrfs_encoded_read_regular_fill_pages().
10042		 */
10043		WRITE_ONCE(priv->status, bbio->bio.bi_status);
10044	}
10045	if (!atomic_dec_return(&priv->pending))
10046		wake_up(&priv->wait);
10047	bio_put(&bbio->bio);
10048}
10049
10050int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
10051					  u64 file_offset, u64 disk_bytenr,
10052					  u64 disk_io_size, struct page **pages)
10053{
10054	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10055	struct btrfs_encoded_read_private priv = {
10056		.pending = ATOMIC_INIT(1),
10057	};
10058	unsigned long i = 0;
10059	struct btrfs_bio *bbio;
10060
10061	init_waitqueue_head(&priv.wait);
10062
10063	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
10064			       btrfs_encoded_read_endio, &priv);
10065	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
10066	bbio->inode = inode;
10067
10068	do {
10069		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
10070
10071		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
10072			atomic_inc(&priv.pending);
10073			btrfs_submit_bio(bbio, 0);
10074
10075			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
10076					       btrfs_encoded_read_endio, &priv);
10077			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
10078			bbio->inode = inode;
10079			continue;
10080		}
10081
10082		i++;
10083		disk_bytenr += bytes;
10084		disk_io_size -= bytes;
10085	} while (disk_io_size);
10086
10087	atomic_inc(&priv.pending);
10088	btrfs_submit_bio(bbio, 0);
10089
10090	if (atomic_dec_return(&priv.pending))
10091		io_wait_event(priv.wait, !atomic_read(&priv.pending));
10092	/* See btrfs_encoded_read_endio() for ordering. */
10093	return blk_status_to_errno(READ_ONCE(priv.status));
10094}
10095
10096static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
10097					  struct iov_iter *iter,
10098					  u64 start, u64 lockend,
10099					  struct extent_state **cached_state,
10100					  u64 disk_bytenr, u64 disk_io_size,
10101					  size_t count, bool compressed,
10102					  bool *unlocked)
10103{
10104	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10105	struct extent_io_tree *io_tree = &inode->io_tree;
10106	struct page **pages;
10107	unsigned long nr_pages, i;
10108	u64 cur;
10109	size_t page_offset;
10110	ssize_t ret;
10111
10112	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10113	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10114	if (!pages)
10115		return -ENOMEM;
10116	ret = btrfs_alloc_page_array(nr_pages, pages, 0);
10117	if (ret) {
10118		ret = -ENOMEM;
10119		goto out;
10120		}
10121
10122	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10123						    disk_io_size, pages);
10124	if (ret)
10125		goto out;
10126
10127	unlock_extent(io_tree, start, lockend, cached_state);
10128	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10129	*unlocked = true;
10130
10131	if (compressed) {
10132		i = 0;
10133		page_offset = 0;
10134	} else {
10135		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10136		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10137	}
10138	cur = 0;
10139	while (cur < count) {
10140		size_t bytes = min_t(size_t, count - cur,
10141				     PAGE_SIZE - page_offset);
10142
10143		if (copy_page_to_iter(pages[i], page_offset, bytes,
10144				      iter) != bytes) {
10145			ret = -EFAULT;
10146			goto out;
10147		}
10148		i++;
10149		cur += bytes;
10150		page_offset = 0;
10151	}
10152	ret = count;
10153out:
10154	for (i = 0; i < nr_pages; i++) {
10155		if (pages[i])
10156			__free_page(pages[i]);
10157	}
10158	kfree(pages);
10159	return ret;
10160}
10161
10162ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10163			   struct btrfs_ioctl_encoded_io_args *encoded)
10164{
10165	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10166	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10167	struct extent_io_tree *io_tree = &inode->io_tree;
10168	ssize_t ret;
10169	size_t count = iov_iter_count(iter);
10170	u64 start, lockend, disk_bytenr, disk_io_size;
10171	struct extent_state *cached_state = NULL;
10172	struct extent_map *em;
10173	bool unlocked = false;
10174
10175	file_accessed(iocb->ki_filp);
10176
10177	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
10178
10179	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10180		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10181		return 0;
10182	}
10183	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10184	/*
10185	 * We don't know how long the extent containing iocb->ki_pos is, but if
10186	 * it's compressed we know that it won't be longer than this.
10187	 */
10188	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10189
10190	for (;;) {
10191		struct btrfs_ordered_extent *ordered;
10192
10193		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10194					       lockend - start + 1);
10195		if (ret)
10196			goto out_unlock_inode;
10197		lock_extent(io_tree, start, lockend, &cached_state);
10198		ordered = btrfs_lookup_ordered_range(inode, start,
10199						     lockend - start + 1);
10200		if (!ordered)
10201			break;
10202		btrfs_put_ordered_extent(ordered);
10203		unlock_extent(io_tree, start, lockend, &cached_state);
10204		cond_resched();
10205	}
10206
10207	em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
10208	if (IS_ERR(em)) {
10209		ret = PTR_ERR(em);
10210		goto out_unlock_extent;
10211	}
10212
10213	if (em->block_start == EXTENT_MAP_INLINE) {
10214		u64 extent_start = em->start;
10215
10216		/*
10217		 * For inline extents we get everything we need out of the
10218		 * extent item.
10219		 */
10220		free_extent_map(em);
10221		em = NULL;
10222		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10223						&cached_state, extent_start,
10224						count, encoded, &unlocked);
10225		goto out;
10226	}
10227
10228	/*
10229	 * We only want to return up to EOF even if the extent extends beyond
10230	 * that.
10231	 */
10232	encoded->len = min_t(u64, extent_map_end(em),
10233			     inode->vfs_inode.i_size) - iocb->ki_pos;
10234	if (em->block_start == EXTENT_MAP_HOLE ||
10235	    (em->flags & EXTENT_FLAG_PREALLOC)) {
10236		disk_bytenr = EXTENT_MAP_HOLE;
10237		count = min_t(u64, count, encoded->len);
10238		encoded->len = count;
10239		encoded->unencoded_len = count;
10240	} else if (extent_map_is_compressed(em)) {
10241		disk_bytenr = em->block_start;
10242		/*
10243		 * Bail if the buffer isn't large enough to return the whole
10244		 * compressed extent.
10245		 */
10246		if (em->block_len > count) {
10247			ret = -ENOBUFS;
10248			goto out_em;
10249		}
10250		disk_io_size = em->block_len;
10251		count = em->block_len;
10252		encoded->unencoded_len = em->ram_bytes;
10253		encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10254		ret = btrfs_encoded_io_compression_from_extent(fs_info,
10255							       extent_map_compression(em));
10256		if (ret < 0)
10257			goto out_em;
10258		encoded->compression = ret;
10259	} else {
10260		disk_bytenr = em->block_start + (start - em->start);
10261		if (encoded->len > count)
10262			encoded->len = count;
10263		/*
10264		 * Don't read beyond what we locked. This also limits the page
10265		 * allocations that we'll do.
10266		 */
10267		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10268		count = start + disk_io_size - iocb->ki_pos;
10269		encoded->len = count;
10270		encoded->unencoded_len = count;
10271		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10272	}
10273	free_extent_map(em);
10274	em = NULL;
10275
10276	if (disk_bytenr == EXTENT_MAP_HOLE) {
10277		unlock_extent(io_tree, start, lockend, &cached_state);
10278		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10279		unlocked = true;
10280		ret = iov_iter_zero(count, iter);
10281		if (ret != count)
10282			ret = -EFAULT;
10283	} else {
10284		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10285						 &cached_state, disk_bytenr,
10286						 disk_io_size, count,
10287						 encoded->compression,
10288						 &unlocked);
10289	}
10290
10291out:
10292	if (ret >= 0)
10293		iocb->ki_pos += encoded->len;
10294out_em:
10295	free_extent_map(em);
10296out_unlock_extent:
10297	if (!unlocked)
10298		unlock_extent(io_tree, start, lockend, &cached_state);
10299out_unlock_inode:
10300	if (!unlocked)
10301		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10302	return ret;
10303}
10304
10305ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10306			       const struct btrfs_ioctl_encoded_io_args *encoded)
10307{
10308	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10309	struct btrfs_root *root = inode->root;
10310	struct btrfs_fs_info *fs_info = root->fs_info;
10311	struct extent_io_tree *io_tree = &inode->io_tree;
10312	struct extent_changeset *data_reserved = NULL;
10313	struct extent_state *cached_state = NULL;
10314	struct btrfs_ordered_extent *ordered;
10315	int compression;
10316	size_t orig_count;
10317	u64 start, end;
10318	u64 num_bytes, ram_bytes, disk_num_bytes;
10319	unsigned long nr_pages, i;
10320	struct page **pages;
10321	struct btrfs_key ins;
10322	bool extent_reserved = false;
10323	struct extent_map *em;
10324	ssize_t ret;
10325
10326	switch (encoded->compression) {
10327	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10328		compression = BTRFS_COMPRESS_ZLIB;
10329		break;
10330	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10331		compression = BTRFS_COMPRESS_ZSTD;
10332		break;
10333	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10334	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10335	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10336	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10337	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10338		/* The sector size must match for LZO. */
10339		if (encoded->compression -
10340		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10341		    fs_info->sectorsize_bits)
10342			return -EINVAL;
10343		compression = BTRFS_COMPRESS_LZO;
10344		break;
10345	default:
10346		return -EINVAL;
10347	}
10348	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10349		return -EINVAL;
10350
10351	/*
10352	 * Compressed extents should always have checksums, so error out if we
10353	 * have a NOCOW file or inode was created while mounted with NODATASUM.
10354	 */
10355	if (inode->flags & BTRFS_INODE_NODATASUM)
10356		return -EINVAL;
10357
10358	orig_count = iov_iter_count(from);
10359
10360	/* The extent size must be sane. */
10361	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10362	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10363		return -EINVAL;
10364
10365	/*
10366	 * The compressed data must be smaller than the decompressed data.
10367	 *
10368	 * It's of course possible for data to compress to larger or the same
10369	 * size, but the buffered I/O path falls back to no compression for such
10370	 * data, and we don't want to break any assumptions by creating these
10371	 * extents.
10372	 *
10373	 * Note that this is less strict than the current check we have that the
10374	 * compressed data must be at least one sector smaller than the
10375	 * decompressed data. We only want to enforce the weaker requirement
10376	 * from old kernels that it is at least one byte smaller.
10377	 */
10378	if (orig_count >= encoded->unencoded_len)
10379		return -EINVAL;
10380
10381	/* The extent must start on a sector boundary. */
10382	start = iocb->ki_pos;
10383	if (!IS_ALIGNED(start, fs_info->sectorsize))
10384		return -EINVAL;
10385
10386	/*
10387	 * The extent must end on a sector boundary. However, we allow a write
10388	 * which ends at or extends i_size to have an unaligned length; we round
10389	 * up the extent size and set i_size to the unaligned end.
10390	 */
10391	if (start + encoded->len < inode->vfs_inode.i_size &&
10392	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10393		return -EINVAL;
10394
10395	/* Finally, the offset in the unencoded data must be sector-aligned. */
10396	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10397		return -EINVAL;
10398
10399	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10400	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10401	end = start + num_bytes - 1;
10402
10403	/*
10404	 * If the extent cannot be inline, the compressed data on disk must be
10405	 * sector-aligned. For convenience, we extend it with zeroes if it
10406	 * isn't.
10407	 */
10408	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10409	nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10410	pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10411	if (!pages)
10412		return -ENOMEM;
10413	for (i = 0; i < nr_pages; i++) {
10414		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10415		char *kaddr;
10416
10417		pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10418		if (!pages[i]) {
10419			ret = -ENOMEM;
10420			goto out_pages;
10421		}
10422		kaddr = kmap_local_page(pages[i]);
10423		if (copy_from_iter(kaddr, bytes, from) != bytes) {
10424			kunmap_local(kaddr);
10425			ret = -EFAULT;
10426			goto out_pages;
10427		}
10428		if (bytes < PAGE_SIZE)
10429			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10430		kunmap_local(kaddr);
10431	}
10432
10433	for (;;) {
10434		struct btrfs_ordered_extent *ordered;
10435
10436		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10437		if (ret)
10438			goto out_pages;
10439		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10440						    start >> PAGE_SHIFT,
10441						    end >> PAGE_SHIFT);
10442		if (ret)
10443			goto out_pages;
10444		lock_extent(io_tree, start, end, &cached_state);
10445		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10446		if (!ordered &&
10447		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10448			break;
10449		if (ordered)
10450			btrfs_put_ordered_extent(ordered);
10451		unlock_extent(io_tree, start, end, &cached_state);
10452		cond_resched();
10453	}
10454
10455	/*
10456	 * We don't use the higher-level delalloc space functions because our
10457	 * num_bytes and disk_num_bytes are different.
10458	 */
10459	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10460	if (ret)
10461		goto out_unlock;
10462	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10463	if (ret)
10464		goto out_free_data_space;
10465	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10466					      false);
10467	if (ret)
10468		goto out_qgroup_free_data;
10469
10470	/* Try an inline extent first. */
10471	if (start == 0 && encoded->unencoded_len == encoded->len &&
10472	    encoded->unencoded_offset == 0) {
10473		ret = cow_file_range_inline(inode, encoded->len, orig_count,
10474					    compression, pages, true);
10475		if (ret <= 0) {
10476			if (ret == 0)
10477				ret = orig_count;
10478			goto out_delalloc_release;
10479		}
10480	}
10481
10482	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10483				   disk_num_bytes, 0, 0, &ins, 1, 1);
10484	if (ret)
10485		goto out_delalloc_release;
10486	extent_reserved = true;
10487
10488	em = create_io_em(inode, start, num_bytes,
10489			  start - encoded->unencoded_offset, ins.objectid,
10490			  ins.offset, ins.offset, ram_bytes, compression,
10491			  BTRFS_ORDERED_COMPRESSED);
10492	if (IS_ERR(em)) {
10493		ret = PTR_ERR(em);
10494		goto out_free_reserved;
10495	}
10496	free_extent_map(em);
10497
10498	ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes,
10499				       ins.objectid, ins.offset,
10500				       encoded->unencoded_offset,
10501				       (1 << BTRFS_ORDERED_ENCODED) |
10502				       (1 << BTRFS_ORDERED_COMPRESSED),
10503				       compression);
10504	if (IS_ERR(ordered)) {
10505		btrfs_drop_extent_map_range(inode, start, end, false);
10506		ret = PTR_ERR(ordered);
10507		goto out_free_reserved;
10508	}
10509	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10510
10511	if (start + encoded->len > inode->vfs_inode.i_size)
10512		i_size_write(&inode->vfs_inode, start + encoded->len);
10513
10514	unlock_extent(io_tree, start, end, &cached_state);
10515
10516	btrfs_delalloc_release_extents(inode, num_bytes);
10517
10518	btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false);
10519	ret = orig_count;
10520	goto out;
10521
10522out_free_reserved:
10523	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10524	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
10525out_delalloc_release:
10526	btrfs_delalloc_release_extents(inode, num_bytes);
10527	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10528out_qgroup_free_data:
10529	if (ret < 0)
10530		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10531out_free_data_space:
10532	/*
10533	 * If btrfs_reserve_extent() succeeded, then we already decremented
10534	 * bytes_may_use.
10535	 */
10536	if (!extent_reserved)
10537		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
10538out_unlock:
10539	unlock_extent(io_tree, start, end, &cached_state);
10540out_pages:
10541	for (i = 0; i < nr_pages; i++) {
10542		if (pages[i])
10543			__free_page(pages[i]);
10544	}
10545	kvfree(pages);
10546out:
10547	if (ret >= 0)
10548		iocb->ki_pos += encoded->len;
10549	return ret;
10550}
10551
10552#ifdef CONFIG_SWAP
10553/*
10554 * Add an entry indicating a block group or device which is pinned by a
10555 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10556 * negative errno on failure.
10557 */
10558static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10559				  bool is_block_group)
10560{
10561	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10562	struct btrfs_swapfile_pin *sp, *entry;
10563	struct rb_node **p;
10564	struct rb_node *parent = NULL;
10565
10566	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10567	if (!sp)
10568		return -ENOMEM;
10569	sp->ptr = ptr;
10570	sp->inode = inode;
10571	sp->is_block_group = is_block_group;
10572	sp->bg_extent_count = 1;
10573
10574	spin_lock(&fs_info->swapfile_pins_lock);
10575	p = &fs_info->swapfile_pins.rb_node;
10576	while (*p) {
10577		parent = *p;
10578		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10579		if (sp->ptr < entry->ptr ||
10580		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10581			p = &(*p)->rb_left;
10582		} else if (sp->ptr > entry->ptr ||
10583			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10584			p = &(*p)->rb_right;
10585		} else {
10586			if (is_block_group)
10587				entry->bg_extent_count++;
10588			spin_unlock(&fs_info->swapfile_pins_lock);
10589			kfree(sp);
10590			return 1;
10591		}
10592	}
10593	rb_link_node(&sp->node, parent, p);
10594	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10595	spin_unlock(&fs_info->swapfile_pins_lock);
10596	return 0;
10597}
10598
10599/* Free all of the entries pinned by this swapfile. */
10600static void btrfs_free_swapfile_pins(struct inode *inode)
10601{
10602	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10603	struct btrfs_swapfile_pin *sp;
10604	struct rb_node *node, *next;
10605
10606	spin_lock(&fs_info->swapfile_pins_lock);
10607	node = rb_first(&fs_info->swapfile_pins);
10608	while (node) {
10609		next = rb_next(node);
10610		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10611		if (sp->inode == inode) {
10612			rb_erase(&sp->node, &fs_info->swapfile_pins);
10613			if (sp->is_block_group) {
10614				btrfs_dec_block_group_swap_extents(sp->ptr,
10615							   sp->bg_extent_count);
10616				btrfs_put_block_group(sp->ptr);
10617			}
10618			kfree(sp);
10619		}
10620		node = next;
10621	}
10622	spin_unlock(&fs_info->swapfile_pins_lock);
10623}
10624
10625struct btrfs_swap_info {
10626	u64 start;
10627	u64 block_start;
10628	u64 block_len;
10629	u64 lowest_ppage;
10630	u64 highest_ppage;
10631	unsigned long nr_pages;
10632	int nr_extents;
10633};
10634
10635static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10636				 struct btrfs_swap_info *bsi)
10637{
10638	unsigned long nr_pages;
10639	unsigned long max_pages;
10640	u64 first_ppage, first_ppage_reported, next_ppage;
10641	int ret;
10642
10643	/*
10644	 * Our swapfile may have had its size extended after the swap header was
10645	 * written. In that case activating the swapfile should not go beyond
10646	 * the max size set in the swap header.
10647	 */
10648	if (bsi->nr_pages >= sis->max)
10649		return 0;
10650
10651	max_pages = sis->max - bsi->nr_pages;
10652	first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10653	next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10654
10655	if (first_ppage >= next_ppage)
10656		return 0;
10657	nr_pages = next_ppage - first_ppage;
10658	nr_pages = min(nr_pages, max_pages);
10659
10660	first_ppage_reported = first_ppage;
10661	if (bsi->start == 0)
10662		first_ppage_reported++;
10663	if (bsi->lowest_ppage > first_ppage_reported)
10664		bsi->lowest_ppage = first_ppage_reported;
10665	if (bsi->highest_ppage < (next_ppage - 1))
10666		bsi->highest_ppage = next_ppage - 1;
10667
10668	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10669	if (ret < 0)
10670		return ret;
10671	bsi->nr_extents += ret;
10672	bsi->nr_pages += nr_pages;
10673	return 0;
10674}
10675
10676static void btrfs_swap_deactivate(struct file *file)
10677{
10678	struct inode *inode = file_inode(file);
10679
10680	btrfs_free_swapfile_pins(inode);
10681	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10682}
10683
10684static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10685			       sector_t *span)
10686{
10687	struct inode *inode = file_inode(file);
10688	struct btrfs_root *root = BTRFS_I(inode)->root;
10689	struct btrfs_fs_info *fs_info = root->fs_info;
10690	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10691	struct extent_state *cached_state = NULL;
10692	struct extent_map *em = NULL;
10693	struct btrfs_chunk_map *map = NULL;
10694	struct btrfs_device *device = NULL;
10695	struct btrfs_swap_info bsi = {
10696		.lowest_ppage = (sector_t)-1ULL,
10697	};
10698	int ret = 0;
10699	u64 isize;
10700	u64 start;
10701
10702	/*
10703	 * If the swap file was just created, make sure delalloc is done. If the
10704	 * file changes again after this, the user is doing something stupid and
10705	 * we don't really care.
10706	 */
10707	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10708	if (ret)
10709		return ret;
10710
10711	/*
10712	 * The inode is locked, so these flags won't change after we check them.
10713	 */
10714	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10715		btrfs_warn(fs_info, "swapfile must not be compressed");
10716		return -EINVAL;
10717	}
10718	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10719		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10720		return -EINVAL;
10721	}
10722	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10723		btrfs_warn(fs_info, "swapfile must not be checksummed");
10724		return -EINVAL;
10725	}
10726
10727	/*
10728	 * Balance or device remove/replace/resize can move stuff around from
10729	 * under us. The exclop protection makes sure they aren't running/won't
10730	 * run concurrently while we are mapping the swap extents, and
10731	 * fs_info->swapfile_pins prevents them from running while the swap
10732	 * file is active and moving the extents. Note that this also prevents
10733	 * a concurrent device add which isn't actually necessary, but it's not
10734	 * really worth the trouble to allow it.
10735	 */
10736	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10737		btrfs_warn(fs_info,
10738	   "cannot activate swapfile while exclusive operation is running");
10739		return -EBUSY;
10740	}
10741
10742	/*
10743	 * Prevent snapshot creation while we are activating the swap file.
10744	 * We do not want to race with snapshot creation. If snapshot creation
10745	 * already started before we bumped nr_swapfiles from 0 to 1 and
10746	 * completes before the first write into the swap file after it is
10747	 * activated, than that write would fallback to COW.
10748	 */
10749	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10750		btrfs_exclop_finish(fs_info);
10751		btrfs_warn(fs_info,
10752	   "cannot activate swapfile because snapshot creation is in progress");
10753		return -EINVAL;
10754	}
10755	/*
10756	 * Snapshots can create extents which require COW even if NODATACOW is
10757	 * set. We use this counter to prevent snapshots. We must increment it
10758	 * before walking the extents because we don't want a concurrent
10759	 * snapshot to run after we've already checked the extents.
10760	 *
10761	 * It is possible that subvolume is marked for deletion but still not
10762	 * removed yet. To prevent this race, we check the root status before
10763	 * activating the swapfile.
10764	 */
10765	spin_lock(&root->root_item_lock);
10766	if (btrfs_root_dead(root)) {
10767		spin_unlock(&root->root_item_lock);
10768
10769		btrfs_exclop_finish(fs_info);
10770		btrfs_warn(fs_info,
10771		"cannot activate swapfile because subvolume %llu is being deleted",
10772			root->root_key.objectid);
10773		return -EPERM;
10774	}
10775	atomic_inc(&root->nr_swapfiles);
10776	spin_unlock(&root->root_item_lock);
10777
10778	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10779
10780	lock_extent(io_tree, 0, isize - 1, &cached_state);
10781	start = 0;
10782	while (start < isize) {
10783		u64 logical_block_start, physical_block_start;
10784		struct btrfs_block_group *bg;
10785		u64 len = isize - start;
10786
10787		em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
10788		if (IS_ERR(em)) {
10789			ret = PTR_ERR(em);
10790			goto out;
10791		}
10792
10793		if (em->block_start == EXTENT_MAP_HOLE) {
10794			btrfs_warn(fs_info, "swapfile must not have holes");
10795			ret = -EINVAL;
10796			goto out;
10797		}
10798		if (em->block_start == EXTENT_MAP_INLINE) {
10799			/*
10800			 * It's unlikely we'll ever actually find ourselves
10801			 * here, as a file small enough to fit inline won't be
10802			 * big enough to store more than the swap header, but in
10803			 * case something changes in the future, let's catch it
10804			 * here rather than later.
10805			 */
10806			btrfs_warn(fs_info, "swapfile must not be inline");
10807			ret = -EINVAL;
10808			goto out;
10809		}
10810		if (extent_map_is_compressed(em)) {
10811			btrfs_warn(fs_info, "swapfile must not be compressed");
10812			ret = -EINVAL;
10813			goto out;
10814		}
10815
10816		logical_block_start = em->block_start + (start - em->start);
10817		len = min(len, em->len - (start - em->start));
10818		free_extent_map(em);
10819		em = NULL;
10820
10821		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true);
10822		if (ret < 0) {
10823			goto out;
10824		} else if (ret) {
10825			ret = 0;
10826		} else {
10827			btrfs_warn(fs_info,
10828				   "swapfile must not be copy-on-write");
10829			ret = -EINVAL;
10830			goto out;
10831		}
10832
10833		map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10834		if (IS_ERR(map)) {
10835			ret = PTR_ERR(map);
10836			goto out;
10837		}
10838
10839		if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10840			btrfs_warn(fs_info,
10841				   "swapfile must have single data profile");
10842			ret = -EINVAL;
10843			goto out;
10844		}
10845
10846		if (device == NULL) {
10847			device = map->stripes[0].dev;
10848			ret = btrfs_add_swapfile_pin(inode, device, false);
10849			if (ret == 1)
10850				ret = 0;
10851			else if (ret)
10852				goto out;
10853		} else if (device != map->stripes[0].dev) {
10854			btrfs_warn(fs_info, "swapfile must be on one device");
10855			ret = -EINVAL;
10856			goto out;
10857		}
10858
10859		physical_block_start = (map->stripes[0].physical +
10860					(logical_block_start - map->start));
10861		len = min(len, map->chunk_len - (logical_block_start - map->start));
10862		btrfs_free_chunk_map(map);
10863		map = NULL;
10864
10865		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10866		if (!bg) {
10867			btrfs_warn(fs_info,
10868			   "could not find block group containing swapfile");
10869			ret = -EINVAL;
10870			goto out;
10871		}
10872
10873		if (!btrfs_inc_block_group_swap_extents(bg)) {
10874			btrfs_warn(fs_info,
10875			   "block group for swapfile at %llu is read-only%s",
10876			   bg->start,
10877			   atomic_read(&fs_info->scrubs_running) ?
10878				       " (scrub running)" : "");
10879			btrfs_put_block_group(bg);
10880			ret = -EINVAL;
10881			goto out;
10882		}
10883
10884		ret = btrfs_add_swapfile_pin(inode, bg, true);
10885		if (ret) {
10886			btrfs_put_block_group(bg);
10887			if (ret == 1)
10888				ret = 0;
10889			else
10890				goto out;
10891		}
10892
10893		if (bsi.block_len &&
10894		    bsi.block_start + bsi.block_len == physical_block_start) {
10895			bsi.block_len += len;
10896		} else {
10897			if (bsi.block_len) {
10898				ret = btrfs_add_swap_extent(sis, &bsi);
10899				if (ret)
10900					goto out;
10901			}
10902			bsi.start = start;
10903			bsi.block_start = physical_block_start;
10904			bsi.block_len = len;
10905		}
10906
10907		start += len;
10908	}
10909
10910	if (bsi.block_len)
10911		ret = btrfs_add_swap_extent(sis, &bsi);
10912
10913out:
10914	if (!IS_ERR_OR_NULL(em))
10915		free_extent_map(em);
10916	if (!IS_ERR_OR_NULL(map))
10917		btrfs_free_chunk_map(map);
10918
10919	unlock_extent(io_tree, 0, isize - 1, &cached_state);
10920
10921	if (ret)
10922		btrfs_swap_deactivate(file);
10923
10924	btrfs_drew_write_unlock(&root->snapshot_lock);
10925
10926	btrfs_exclop_finish(fs_info);
10927
10928	if (ret)
10929		return ret;
10930
10931	if (device)
10932		sis->bdev = device->bdev;
10933	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10934	sis->max = bsi.nr_pages;
10935	sis->pages = bsi.nr_pages - 1;
10936	sis->highest_bit = bsi.nr_pages - 1;
10937	return bsi.nr_extents;
10938}
10939#else
10940static void btrfs_swap_deactivate(struct file *file)
10941{
10942}
10943
10944static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10945			       sector_t *span)
10946{
10947	return -EOPNOTSUPP;
10948}
10949#endif
10950
10951/*
10952 * Update the number of bytes used in the VFS' inode. When we replace extents in
10953 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10954 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10955 * always get a correct value.
10956 */
10957void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10958			      const u64 add_bytes,
10959			      const u64 del_bytes)
10960{
10961	if (add_bytes == del_bytes)
10962		return;
10963
10964	spin_lock(&inode->lock);
10965	if (del_bytes > 0)
10966		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10967	if (add_bytes > 0)
10968		inode_add_bytes(&inode->vfs_inode, add_bytes);
10969	spin_unlock(&inode->lock);
10970}
10971
10972/*
10973 * Verify that there are no ordered extents for a given file range.
10974 *
10975 * @inode:   The target inode.
10976 * @start:   Start offset of the file range, should be sector size aligned.
10977 * @end:     End offset (inclusive) of the file range, its value +1 should be
10978 *           sector size aligned.
10979 *
10980 * This should typically be used for cases where we locked an inode's VFS lock in
10981 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10982 * we have flushed all delalloc in the range, we have waited for all ordered
10983 * extents in the range to complete and finally we have locked the file range in
10984 * the inode's io_tree.
10985 */
10986void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10987{
10988	struct btrfs_root *root = inode->root;
10989	struct btrfs_ordered_extent *ordered;
10990
10991	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10992		return;
10993
10994	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10995	if (ordered) {
10996		btrfs_err(root->fs_info,
10997"found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10998			  start, end, btrfs_ino(inode), root->root_key.objectid,
10999			  ordered->file_offset,
11000			  ordered->file_offset + ordered->num_bytes - 1);
11001		btrfs_put_ordered_extent(ordered);
11002	}
11003
11004	ASSERT(ordered == NULL);
11005}
11006
11007static const struct inode_operations btrfs_dir_inode_operations = {
11008	.getattr	= btrfs_getattr,
11009	.lookup		= btrfs_lookup,
11010	.create		= btrfs_create,
11011	.unlink		= btrfs_unlink,
11012	.link		= btrfs_link,
11013	.mkdir		= btrfs_mkdir,
11014	.rmdir		= btrfs_rmdir,
11015	.rename		= btrfs_rename2,
11016	.symlink	= btrfs_symlink,
11017	.setattr	= btrfs_setattr,
11018	.mknod		= btrfs_mknod,
11019	.listxattr	= btrfs_listxattr,
11020	.permission	= btrfs_permission,
11021	.get_inode_acl	= btrfs_get_acl,
11022	.set_acl	= btrfs_set_acl,
11023	.update_time	= btrfs_update_time,
11024	.tmpfile        = btrfs_tmpfile,
11025	.fileattr_get	= btrfs_fileattr_get,
11026	.fileattr_set	= btrfs_fileattr_set,
11027};
11028
11029static const struct file_operations btrfs_dir_file_operations = {
11030	.llseek		= btrfs_dir_llseek,
11031	.read		= generic_read_dir,
11032	.iterate_shared	= btrfs_real_readdir,
11033	.open		= btrfs_opendir,
11034	.unlocked_ioctl	= btrfs_ioctl,
11035#ifdef CONFIG_COMPAT
11036	.compat_ioctl	= btrfs_compat_ioctl,
11037#endif
11038	.release        = btrfs_release_file,
11039	.fsync		= btrfs_sync_file,
11040};
11041
11042/*
11043 * btrfs doesn't support the bmap operation because swapfiles
11044 * use bmap to make a mapping of extents in the file.  They assume
11045 * these extents won't change over the life of the file and they
11046 * use the bmap result to do IO directly to the drive.
11047 *
11048 * the btrfs bmap call would return logical addresses that aren't
11049 * suitable for IO and they also will change frequently as COW
11050 * operations happen.  So, swapfile + btrfs == corruption.
11051 *
11052 * For now we're avoiding this by dropping bmap.
11053 */
11054static const struct address_space_operations btrfs_aops = {
11055	.read_folio	= btrfs_read_folio,
11056	.writepages	= btrfs_writepages,
11057	.readahead	= btrfs_readahead,
11058	.invalidate_folio = btrfs_invalidate_folio,
11059	.release_folio	= btrfs_release_folio,
11060	.migrate_folio	= btrfs_migrate_folio,
11061	.dirty_folio	= filemap_dirty_folio,
11062	.error_remove_folio = generic_error_remove_folio,
11063	.swap_activate	= btrfs_swap_activate,
11064	.swap_deactivate = btrfs_swap_deactivate,
11065};
11066
11067static const struct inode_operations btrfs_file_inode_operations = {
11068	.getattr	= btrfs_getattr,
11069	.setattr	= btrfs_setattr,
11070	.listxattr      = btrfs_listxattr,
11071	.permission	= btrfs_permission,
11072	.fiemap		= btrfs_fiemap,
11073	.get_inode_acl	= btrfs_get_acl,
11074	.set_acl	= btrfs_set_acl,
11075	.update_time	= btrfs_update_time,
11076	.fileattr_get	= btrfs_fileattr_get,
11077	.fileattr_set	= btrfs_fileattr_set,
11078};
11079static const struct inode_operations btrfs_special_inode_operations = {
11080	.getattr	= btrfs_getattr,
11081	.setattr	= btrfs_setattr,
11082	.permission	= btrfs_permission,
11083	.listxattr	= btrfs_listxattr,
11084	.get_inode_acl	= btrfs_get_acl,
11085	.set_acl	= btrfs_set_acl,
11086	.update_time	= btrfs_update_time,
11087};
11088static const struct inode_operations btrfs_symlink_inode_operations = {
11089	.get_link	= page_get_link,
11090	.getattr	= btrfs_getattr,
11091	.setattr	= btrfs_setattr,
11092	.permission	= btrfs_permission,
11093	.listxattr	= btrfs_listxattr,
11094	.update_time	= btrfs_update_time,
11095};
11096
11097const struct dentry_operations btrfs_dentry_operations = {
11098	.d_delete	= btrfs_dentry_delete,
11099};
11100