1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS inode operations.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11#include <linux/buffer_head.h>
12#include <linux/gfp.h>
13#include <linux/mpage.h>
14#include <linux/pagemap.h>
15#include <linux/writeback.h>
16#include <linux/uio.h>
17#include <linux/fiemap.h>
18#include "nilfs.h"
19#include "btnode.h"
20#include "segment.h"
21#include "page.h"
22#include "mdt.h"
23#include "cpfile.h"
24#include "ifile.h"
25
26/**
27 * struct nilfs_iget_args - arguments used during comparison between inodes
28 * @ino: inode number
29 * @cno: checkpoint number
30 * @root: pointer on NILFS root object (mounted checkpoint)
31 * @for_gc: inode for GC flag
32 * @for_btnc: inode for B-tree node cache flag
33 * @for_shadow: inode for shadowed page cache flag
34 */
35struct nilfs_iget_args {
36	u64 ino;
37	__u64 cno;
38	struct nilfs_root *root;
39	bool for_gc;
40	bool for_btnc;
41	bool for_shadow;
42};
43
44static int nilfs_iget_test(struct inode *inode, void *opaque);
45
46void nilfs_inode_add_blocks(struct inode *inode, int n)
47{
48	struct nilfs_root *root = NILFS_I(inode)->i_root;
49
50	inode_add_bytes(inode, i_blocksize(inode) * n);
51	if (root)
52		atomic64_add(n, &root->blocks_count);
53}
54
55void nilfs_inode_sub_blocks(struct inode *inode, int n)
56{
57	struct nilfs_root *root = NILFS_I(inode)->i_root;
58
59	inode_sub_bytes(inode, i_blocksize(inode) * n);
60	if (root)
61		atomic64_sub(n, &root->blocks_count);
62}
63
64/**
65 * nilfs_get_block() - get a file block on the filesystem (callback function)
66 * @inode: inode struct of the target file
67 * @blkoff: file block number
68 * @bh_result: buffer head to be mapped on
69 * @create: indicate whether allocating the block or not when it has not
70 *      been allocated yet.
71 *
72 * This function does not issue actual read request of the specified data
73 * block. It is done by VFS.
74 */
75int nilfs_get_block(struct inode *inode, sector_t blkoff,
76		    struct buffer_head *bh_result, int create)
77{
78	struct nilfs_inode_info *ii = NILFS_I(inode);
79	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
80	__u64 blknum = 0;
81	int err = 0, ret;
82	unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
83
84	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
85	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
86	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
87	if (ret >= 0) {	/* found */
88		map_bh(bh_result, inode->i_sb, blknum);
89		if (ret > 0)
90			bh_result->b_size = (ret << inode->i_blkbits);
91		goto out;
92	}
93	/* data block was not found */
94	if (ret == -ENOENT && create) {
95		struct nilfs_transaction_info ti;
96
97		bh_result->b_blocknr = 0;
98		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
99		if (unlikely(err))
100			goto out;
101		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
102					(unsigned long)bh_result);
103		if (unlikely(err != 0)) {
104			if (err == -EEXIST) {
105				/*
106				 * The get_block() function could be called
107				 * from multiple callers for an inode.
108				 * However, the page having this block must
109				 * be locked in this case.
110				 */
111				nilfs_warn(inode->i_sb,
112					   "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
113					   __func__, inode->i_ino,
114					   (unsigned long long)blkoff);
115				err = -EAGAIN;
116			}
117			nilfs_transaction_abort(inode->i_sb);
118			goto out;
119		}
120		nilfs_mark_inode_dirty_sync(inode);
121		nilfs_transaction_commit(inode->i_sb); /* never fails */
122		/* Error handling should be detailed */
123		set_buffer_new(bh_result);
124		set_buffer_delay(bh_result);
125		map_bh(bh_result, inode->i_sb, 0);
126		/* Disk block number must be changed to proper value */
127
128	} else if (ret == -ENOENT) {
129		/*
130		 * not found is not error (e.g. hole); must return without
131		 * the mapped state flag.
132		 */
133		;
134	} else {
135		err = ret;
136	}
137
138 out:
139	return err;
140}
141
142/**
143 * nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
144 * address_space_operations.
145 * @file: file struct of the file to be read
146 * @folio: the folio to be read
147 */
148static int nilfs_read_folio(struct file *file, struct folio *folio)
149{
150	return mpage_read_folio(folio, nilfs_get_block);
151}
152
153static void nilfs_readahead(struct readahead_control *rac)
154{
155	mpage_readahead(rac, nilfs_get_block);
156}
157
158static int nilfs_writepages(struct address_space *mapping,
159			    struct writeback_control *wbc)
160{
161	struct inode *inode = mapping->host;
162	int err = 0;
163
164	if (sb_rdonly(inode->i_sb)) {
165		nilfs_clear_dirty_pages(mapping, false);
166		return -EROFS;
167	}
168
169	if (wbc->sync_mode == WB_SYNC_ALL)
170		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
171						    wbc->range_start,
172						    wbc->range_end);
173	return err;
174}
175
176static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
177{
178	struct folio *folio = page_folio(page);
179	struct inode *inode = folio->mapping->host;
180	int err;
181
182	if (sb_rdonly(inode->i_sb)) {
183		/*
184		 * It means that filesystem was remounted in read-only
185		 * mode because of error or metadata corruption. But we
186		 * have dirty pages that try to be flushed in background.
187		 * So, here we simply discard this dirty page.
188		 */
189		nilfs_clear_folio_dirty(folio, false);
190		folio_unlock(folio);
191		return -EROFS;
192	}
193
194	folio_redirty_for_writepage(wbc, folio);
195	folio_unlock(folio);
196
197	if (wbc->sync_mode == WB_SYNC_ALL) {
198		err = nilfs_construct_segment(inode->i_sb);
199		if (unlikely(err))
200			return err;
201	} else if (wbc->for_reclaim)
202		nilfs_flush_segment(inode->i_sb, inode->i_ino);
203
204	return 0;
205}
206
207static bool nilfs_dirty_folio(struct address_space *mapping,
208		struct folio *folio)
209{
210	struct inode *inode = mapping->host;
211	struct buffer_head *head;
212	unsigned int nr_dirty = 0;
213	bool ret = filemap_dirty_folio(mapping, folio);
214
215	/*
216	 * The page may not be locked, eg if called from try_to_unmap_one()
217	 */
218	spin_lock(&mapping->i_private_lock);
219	head = folio_buffers(folio);
220	if (head) {
221		struct buffer_head *bh = head;
222
223		do {
224			/* Do not mark hole blocks dirty */
225			if (buffer_dirty(bh) || !buffer_mapped(bh))
226				continue;
227
228			set_buffer_dirty(bh);
229			nr_dirty++;
230		} while (bh = bh->b_this_page, bh != head);
231	} else if (ret) {
232		nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits);
233	}
234	spin_unlock(&mapping->i_private_lock);
235
236	if (nr_dirty)
237		nilfs_set_file_dirty(inode, nr_dirty);
238	return ret;
239}
240
241void nilfs_write_failed(struct address_space *mapping, loff_t to)
242{
243	struct inode *inode = mapping->host;
244
245	if (to > inode->i_size) {
246		truncate_pagecache(inode, inode->i_size);
247		nilfs_truncate(inode);
248	}
249}
250
251static int nilfs_write_begin(struct file *file, struct address_space *mapping,
252			     loff_t pos, unsigned len,
253			     struct page **pagep, void **fsdata)
254
255{
256	struct inode *inode = mapping->host;
257	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
258
259	if (unlikely(err))
260		return err;
261
262	err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
263	if (unlikely(err)) {
264		nilfs_write_failed(mapping, pos + len);
265		nilfs_transaction_abort(inode->i_sb);
266	}
267	return err;
268}
269
270static int nilfs_write_end(struct file *file, struct address_space *mapping,
271			   loff_t pos, unsigned len, unsigned copied,
272			   struct page *page, void *fsdata)
273{
274	struct inode *inode = mapping->host;
275	unsigned int start = pos & (PAGE_SIZE - 1);
276	unsigned int nr_dirty;
277	int err;
278
279	nr_dirty = nilfs_page_count_clean_buffers(page, start,
280						  start + copied);
281	copied = generic_write_end(file, mapping, pos, len, copied, page,
282				   fsdata);
283	nilfs_set_file_dirty(inode, nr_dirty);
284	err = nilfs_transaction_commit(inode->i_sb);
285	return err ? : copied;
286}
287
288static ssize_t
289nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
290{
291	struct inode *inode = file_inode(iocb->ki_filp);
292
293	if (iov_iter_rw(iter) == WRITE)
294		return 0;
295
296	/* Needs synchronization with the cleaner */
297	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
298}
299
300const struct address_space_operations nilfs_aops = {
301	.writepage		= nilfs_writepage,
302	.read_folio		= nilfs_read_folio,
303	.writepages		= nilfs_writepages,
304	.dirty_folio		= nilfs_dirty_folio,
305	.readahead		= nilfs_readahead,
306	.write_begin		= nilfs_write_begin,
307	.write_end		= nilfs_write_end,
308	.invalidate_folio	= block_invalidate_folio,
309	.direct_IO		= nilfs_direct_IO,
310	.is_partially_uptodate  = block_is_partially_uptodate,
311};
312
313static int nilfs_insert_inode_locked(struct inode *inode,
314				     struct nilfs_root *root,
315				     unsigned long ino)
316{
317	struct nilfs_iget_args args = {
318		.ino = ino, .root = root, .cno = 0, .for_gc = false,
319		.for_btnc = false, .for_shadow = false
320	};
321
322	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
323}
324
325struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
326{
327	struct super_block *sb = dir->i_sb;
328	struct the_nilfs *nilfs = sb->s_fs_info;
329	struct inode *inode;
330	struct nilfs_inode_info *ii;
331	struct nilfs_root *root;
332	struct buffer_head *bh;
333	int err = -ENOMEM;
334	ino_t ino;
335
336	inode = new_inode(sb);
337	if (unlikely(!inode))
338		goto failed;
339
340	mapping_set_gfp_mask(inode->i_mapping,
341			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
342
343	root = NILFS_I(dir)->i_root;
344	ii = NILFS_I(inode);
345	ii->i_state = BIT(NILFS_I_NEW);
346	ii->i_root = root;
347
348	err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
349	if (unlikely(err))
350		goto failed_ifile_create_inode;
351	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
352
353	if (unlikely(ino < NILFS_USER_INO)) {
354		nilfs_warn(sb,
355			   "inode bitmap is inconsistent for reserved inodes");
356		do {
357			brelse(bh);
358			err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
359			if (unlikely(err))
360				goto failed_ifile_create_inode;
361		} while (ino < NILFS_USER_INO);
362
363		nilfs_info(sb, "repaired inode bitmap for reserved inodes");
364	}
365	ii->i_bh = bh;
366
367	atomic64_inc(&root->inodes_count);
368	inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
369	inode->i_ino = ino;
370	simple_inode_init_ts(inode);
371
372	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
373		err = nilfs_bmap_read(ii->i_bmap, NULL);
374		if (err < 0)
375			goto failed_after_creation;
376
377		set_bit(NILFS_I_BMAP, &ii->i_state);
378		/* No lock is needed; iget() ensures it. */
379	}
380
381	ii->i_flags = nilfs_mask_flags(
382		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
383
384	/* ii->i_file_acl = 0; */
385	/* ii->i_dir_acl = 0; */
386	ii->i_dir_start_lookup = 0;
387	nilfs_set_inode_flags(inode);
388	spin_lock(&nilfs->ns_next_gen_lock);
389	inode->i_generation = nilfs->ns_next_generation++;
390	spin_unlock(&nilfs->ns_next_gen_lock);
391	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
392		err = -EIO;
393		goto failed_after_creation;
394	}
395
396	err = nilfs_init_acl(inode, dir);
397	if (unlikely(err))
398		/*
399		 * Never occur.  When supporting nilfs_init_acl(),
400		 * proper cancellation of above jobs should be considered.
401		 */
402		goto failed_after_creation;
403
404	return inode;
405
406 failed_after_creation:
407	clear_nlink(inode);
408	if (inode->i_state & I_NEW)
409		unlock_new_inode(inode);
410	iput(inode);  /*
411		       * raw_inode will be deleted through
412		       * nilfs_evict_inode().
413		       */
414	goto failed;
415
416 failed_ifile_create_inode:
417	make_bad_inode(inode);
418	iput(inode);
419 failed:
420	return ERR_PTR(err);
421}
422
423void nilfs_set_inode_flags(struct inode *inode)
424{
425	unsigned int flags = NILFS_I(inode)->i_flags;
426	unsigned int new_fl = 0;
427
428	if (flags & FS_SYNC_FL)
429		new_fl |= S_SYNC;
430	if (flags & FS_APPEND_FL)
431		new_fl |= S_APPEND;
432	if (flags & FS_IMMUTABLE_FL)
433		new_fl |= S_IMMUTABLE;
434	if (flags & FS_NOATIME_FL)
435		new_fl |= S_NOATIME;
436	if (flags & FS_DIRSYNC_FL)
437		new_fl |= S_DIRSYNC;
438	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
439			S_NOATIME | S_DIRSYNC);
440}
441
442int nilfs_read_inode_common(struct inode *inode,
443			    struct nilfs_inode *raw_inode)
444{
445	struct nilfs_inode_info *ii = NILFS_I(inode);
446	int err;
447
448	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
449	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
450	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
451	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
452	inode->i_size = le64_to_cpu(raw_inode->i_size);
453	inode_set_atime(inode, le64_to_cpu(raw_inode->i_mtime),
454			le32_to_cpu(raw_inode->i_mtime_nsec));
455	inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime),
456			le32_to_cpu(raw_inode->i_ctime_nsec));
457	inode_set_mtime(inode, le64_to_cpu(raw_inode->i_mtime),
458			le32_to_cpu(raw_inode->i_mtime_nsec));
459	if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
460		return -EIO; /* this inode is for metadata and corrupted */
461	if (inode->i_nlink == 0)
462		return -ESTALE; /* this inode is deleted */
463
464	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
465	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
466#if 0
467	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
468	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
469		0 : le32_to_cpu(raw_inode->i_dir_acl);
470#endif
471	ii->i_dir_start_lookup = 0;
472	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
473
474	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
475	    S_ISLNK(inode->i_mode)) {
476		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
477		if (err < 0)
478			return err;
479		set_bit(NILFS_I_BMAP, &ii->i_state);
480		/* No lock is needed; iget() ensures it. */
481	}
482	return 0;
483}
484
485static int __nilfs_read_inode(struct super_block *sb,
486			      struct nilfs_root *root, unsigned long ino,
487			      struct inode *inode)
488{
489	struct the_nilfs *nilfs = sb->s_fs_info;
490	struct buffer_head *bh;
491	struct nilfs_inode *raw_inode;
492	int err;
493
494	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
495	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
496	if (unlikely(err))
497		goto bad_inode;
498
499	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
500
501	err = nilfs_read_inode_common(inode, raw_inode);
502	if (err)
503		goto failed_unmap;
504
505	if (S_ISREG(inode->i_mode)) {
506		inode->i_op = &nilfs_file_inode_operations;
507		inode->i_fop = &nilfs_file_operations;
508		inode->i_mapping->a_ops = &nilfs_aops;
509	} else if (S_ISDIR(inode->i_mode)) {
510		inode->i_op = &nilfs_dir_inode_operations;
511		inode->i_fop = &nilfs_dir_operations;
512		inode->i_mapping->a_ops = &nilfs_aops;
513	} else if (S_ISLNK(inode->i_mode)) {
514		inode->i_op = &nilfs_symlink_inode_operations;
515		inode_nohighmem(inode);
516		inode->i_mapping->a_ops = &nilfs_aops;
517	} else {
518		inode->i_op = &nilfs_special_inode_operations;
519		init_special_inode(
520			inode, inode->i_mode,
521			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
522	}
523	nilfs_ifile_unmap_inode(raw_inode);
524	brelse(bh);
525	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
526	nilfs_set_inode_flags(inode);
527	mapping_set_gfp_mask(inode->i_mapping,
528			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
529	return 0;
530
531 failed_unmap:
532	nilfs_ifile_unmap_inode(raw_inode);
533	brelse(bh);
534
535 bad_inode:
536	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
537	return err;
538}
539
540static int nilfs_iget_test(struct inode *inode, void *opaque)
541{
542	struct nilfs_iget_args *args = opaque;
543	struct nilfs_inode_info *ii;
544
545	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
546		return 0;
547
548	ii = NILFS_I(inode);
549	if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
550		if (!args->for_btnc)
551			return 0;
552	} else if (args->for_btnc) {
553		return 0;
554	}
555	if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
556		if (!args->for_shadow)
557			return 0;
558	} else if (args->for_shadow) {
559		return 0;
560	}
561
562	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
563		return !args->for_gc;
564
565	return args->for_gc && args->cno == ii->i_cno;
566}
567
568static int nilfs_iget_set(struct inode *inode, void *opaque)
569{
570	struct nilfs_iget_args *args = opaque;
571
572	inode->i_ino = args->ino;
573	NILFS_I(inode)->i_cno = args->cno;
574	NILFS_I(inode)->i_root = args->root;
575	if (args->root && args->ino == NILFS_ROOT_INO)
576		nilfs_get_root(args->root);
577
578	if (args->for_gc)
579		NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
580	if (args->for_btnc)
581		NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
582	if (args->for_shadow)
583		NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
584	return 0;
585}
586
587struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
588			    unsigned long ino)
589{
590	struct nilfs_iget_args args = {
591		.ino = ino, .root = root, .cno = 0, .for_gc = false,
592		.for_btnc = false, .for_shadow = false
593	};
594
595	return ilookup5(sb, ino, nilfs_iget_test, &args);
596}
597
598struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
599				unsigned long ino)
600{
601	struct nilfs_iget_args args = {
602		.ino = ino, .root = root, .cno = 0, .for_gc = false,
603		.for_btnc = false, .for_shadow = false
604	};
605
606	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
607}
608
609struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
610			 unsigned long ino)
611{
612	struct inode *inode;
613	int err;
614
615	inode = nilfs_iget_locked(sb, root, ino);
616	if (unlikely(!inode))
617		return ERR_PTR(-ENOMEM);
618	if (!(inode->i_state & I_NEW))
619		return inode;
620
621	err = __nilfs_read_inode(sb, root, ino, inode);
622	if (unlikely(err)) {
623		iget_failed(inode);
624		return ERR_PTR(err);
625	}
626	unlock_new_inode(inode);
627	return inode;
628}
629
630struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
631				__u64 cno)
632{
633	struct nilfs_iget_args args = {
634		.ino = ino, .root = NULL, .cno = cno, .for_gc = true,
635		.for_btnc = false, .for_shadow = false
636	};
637	struct inode *inode;
638	int err;
639
640	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
641	if (unlikely(!inode))
642		return ERR_PTR(-ENOMEM);
643	if (!(inode->i_state & I_NEW))
644		return inode;
645
646	err = nilfs_init_gcinode(inode);
647	if (unlikely(err)) {
648		iget_failed(inode);
649		return ERR_PTR(err);
650	}
651	unlock_new_inode(inode);
652	return inode;
653}
654
655/**
656 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
657 * @inode: inode object
658 *
659 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
660 * or does nothing if the inode already has it.  This function allocates
661 * an additional inode to maintain page cache of B-tree nodes one-on-one.
662 *
663 * Return Value: On success, 0 is returned. On errors, one of the following
664 * negative error code is returned.
665 *
666 * %-ENOMEM - Insufficient memory available.
667 */
668int nilfs_attach_btree_node_cache(struct inode *inode)
669{
670	struct nilfs_inode_info *ii = NILFS_I(inode);
671	struct inode *btnc_inode;
672	struct nilfs_iget_args args;
673
674	if (ii->i_assoc_inode)
675		return 0;
676
677	args.ino = inode->i_ino;
678	args.root = ii->i_root;
679	args.cno = ii->i_cno;
680	args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
681	args.for_btnc = true;
682	args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
683
684	btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
685				  nilfs_iget_set, &args);
686	if (unlikely(!btnc_inode))
687		return -ENOMEM;
688	if (btnc_inode->i_state & I_NEW) {
689		nilfs_init_btnc_inode(btnc_inode);
690		unlock_new_inode(btnc_inode);
691	}
692	NILFS_I(btnc_inode)->i_assoc_inode = inode;
693	NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
694	ii->i_assoc_inode = btnc_inode;
695
696	return 0;
697}
698
699/**
700 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
701 * @inode: inode object
702 *
703 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
704 * holder inode bound to @inode, or does nothing if @inode doesn't have it.
705 */
706void nilfs_detach_btree_node_cache(struct inode *inode)
707{
708	struct nilfs_inode_info *ii = NILFS_I(inode);
709	struct inode *btnc_inode = ii->i_assoc_inode;
710
711	if (btnc_inode) {
712		NILFS_I(btnc_inode)->i_assoc_inode = NULL;
713		ii->i_assoc_inode = NULL;
714		iput(btnc_inode);
715	}
716}
717
718/**
719 * nilfs_iget_for_shadow - obtain inode for shadow mapping
720 * @inode: inode object that uses shadow mapping
721 *
722 * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
723 * caches for shadow mapping.  The page cache for data pages is set up
724 * in one inode and the one for b-tree node pages is set up in the
725 * other inode, which is attached to the former inode.
726 *
727 * Return Value: On success, a pointer to the inode for data pages is
728 * returned. On errors, one of the following negative error code is returned
729 * in a pointer type.
730 *
731 * %-ENOMEM - Insufficient memory available.
732 */
733struct inode *nilfs_iget_for_shadow(struct inode *inode)
734{
735	struct nilfs_iget_args args = {
736		.ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
737		.for_btnc = false, .for_shadow = true
738	};
739	struct inode *s_inode;
740	int err;
741
742	s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
743			       nilfs_iget_set, &args);
744	if (unlikely(!s_inode))
745		return ERR_PTR(-ENOMEM);
746	if (!(s_inode->i_state & I_NEW))
747		return inode;
748
749	NILFS_I(s_inode)->i_flags = 0;
750	memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
751	mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
752
753	err = nilfs_attach_btree_node_cache(s_inode);
754	if (unlikely(err)) {
755		iget_failed(s_inode);
756		return ERR_PTR(err);
757	}
758	unlock_new_inode(s_inode);
759	return s_inode;
760}
761
762/**
763 * nilfs_write_inode_common - export common inode information to on-disk inode
764 * @inode:     inode object
765 * @raw_inode: on-disk inode
766 *
767 * This function writes standard information from the on-memory inode @inode
768 * to @raw_inode on ifile, cpfile or a super root block.  Since inode bmap
769 * data is not exported, nilfs_bmap_write() must be called separately during
770 * log writing.
771 */
772void nilfs_write_inode_common(struct inode *inode,
773			      struct nilfs_inode *raw_inode)
774{
775	struct nilfs_inode_info *ii = NILFS_I(inode);
776
777	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
778	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
779	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
780	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
781	raw_inode->i_size = cpu_to_le64(inode->i_size);
782	raw_inode->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
783	raw_inode->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
784	raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
785	raw_inode->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
786	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
787
788	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
789	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
790
791	/*
792	 * When extending inode, nilfs->ns_inode_size should be checked
793	 * for substitutions of appended fields.
794	 */
795}
796
797void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
798{
799	ino_t ino = inode->i_ino;
800	struct nilfs_inode_info *ii = NILFS_I(inode);
801	struct inode *ifile = ii->i_root->ifile;
802	struct nilfs_inode *raw_inode;
803
804	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
805
806	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
807		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
808	if (flags & I_DIRTY_DATASYNC)
809		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
810
811	nilfs_write_inode_common(inode, raw_inode);
812
813	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
814		raw_inode->i_device_code =
815			cpu_to_le64(huge_encode_dev(inode->i_rdev));
816
817	nilfs_ifile_unmap_inode(raw_inode);
818}
819
820#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
821
822static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
823				unsigned long from)
824{
825	__u64 b;
826	int ret;
827
828	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
829		return;
830repeat:
831	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
832	if (ret == -ENOENT)
833		return;
834	else if (ret < 0)
835		goto failed;
836
837	if (b < from)
838		return;
839
840	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
841	ret = nilfs_bmap_truncate(ii->i_bmap, b);
842	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
843	if (!ret || (ret == -ENOMEM &&
844		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
845		goto repeat;
846
847failed:
848	nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
849		   ret, ii->vfs_inode.i_ino);
850}
851
852void nilfs_truncate(struct inode *inode)
853{
854	unsigned long blkoff;
855	unsigned int blocksize;
856	struct nilfs_transaction_info ti;
857	struct super_block *sb = inode->i_sb;
858	struct nilfs_inode_info *ii = NILFS_I(inode);
859
860	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
861		return;
862	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
863		return;
864
865	blocksize = sb->s_blocksize;
866	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
867	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
868
869	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
870
871	nilfs_truncate_bmap(ii, blkoff);
872
873	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
874	if (IS_SYNC(inode))
875		nilfs_set_transaction_flag(NILFS_TI_SYNC);
876
877	nilfs_mark_inode_dirty(inode);
878	nilfs_set_file_dirty(inode, 0);
879	nilfs_transaction_commit(sb);
880	/*
881	 * May construct a logical segment and may fail in sync mode.
882	 * But truncate has no return value.
883	 */
884}
885
886static void nilfs_clear_inode(struct inode *inode)
887{
888	struct nilfs_inode_info *ii = NILFS_I(inode);
889
890	/*
891	 * Free resources allocated in nilfs_read_inode(), here.
892	 */
893	BUG_ON(!list_empty(&ii->i_dirty));
894	brelse(ii->i_bh);
895	ii->i_bh = NULL;
896
897	if (nilfs_is_metadata_file_inode(inode))
898		nilfs_mdt_clear(inode);
899
900	if (test_bit(NILFS_I_BMAP, &ii->i_state))
901		nilfs_bmap_clear(ii->i_bmap);
902
903	if (!test_bit(NILFS_I_BTNC, &ii->i_state))
904		nilfs_detach_btree_node_cache(inode);
905
906	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
907		nilfs_put_root(ii->i_root);
908}
909
910void nilfs_evict_inode(struct inode *inode)
911{
912	struct nilfs_transaction_info ti;
913	struct super_block *sb = inode->i_sb;
914	struct nilfs_inode_info *ii = NILFS_I(inode);
915	struct the_nilfs *nilfs;
916	int ret;
917
918	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
919		truncate_inode_pages_final(&inode->i_data);
920		clear_inode(inode);
921		nilfs_clear_inode(inode);
922		return;
923	}
924	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
925
926	truncate_inode_pages_final(&inode->i_data);
927
928	nilfs = sb->s_fs_info;
929	if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
930		/*
931		 * If this inode is about to be disposed after the file system
932		 * has been degraded to read-only due to file system corruption
933		 * or after the writer has been detached, do not make any
934		 * changes that cause writes, just clear it.
935		 * Do this check after read-locking ns_segctor_sem by
936		 * nilfs_transaction_begin() in order to avoid a race with
937		 * the writer detach operation.
938		 */
939		clear_inode(inode);
940		nilfs_clear_inode(inode);
941		nilfs_transaction_abort(sb);
942		return;
943	}
944
945	/* TODO: some of the following operations may fail.  */
946	nilfs_truncate_bmap(ii, 0);
947	nilfs_mark_inode_dirty(inode);
948	clear_inode(inode);
949
950	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
951	if (!ret)
952		atomic64_dec(&ii->i_root->inodes_count);
953
954	nilfs_clear_inode(inode);
955
956	if (IS_SYNC(inode))
957		nilfs_set_transaction_flag(NILFS_TI_SYNC);
958	nilfs_transaction_commit(sb);
959	/*
960	 * May construct a logical segment and may fail in sync mode.
961	 * But delete_inode has no return value.
962	 */
963}
964
965int nilfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
966		  struct iattr *iattr)
967{
968	struct nilfs_transaction_info ti;
969	struct inode *inode = d_inode(dentry);
970	struct super_block *sb = inode->i_sb;
971	int err;
972
973	err = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
974	if (err)
975		return err;
976
977	err = nilfs_transaction_begin(sb, &ti, 0);
978	if (unlikely(err))
979		return err;
980
981	if ((iattr->ia_valid & ATTR_SIZE) &&
982	    iattr->ia_size != i_size_read(inode)) {
983		inode_dio_wait(inode);
984		truncate_setsize(inode, iattr->ia_size);
985		nilfs_truncate(inode);
986	}
987
988	setattr_copy(&nop_mnt_idmap, inode, iattr);
989	mark_inode_dirty(inode);
990
991	if (iattr->ia_valid & ATTR_MODE) {
992		err = nilfs_acl_chmod(inode);
993		if (unlikely(err))
994			goto out_err;
995	}
996
997	return nilfs_transaction_commit(sb);
998
999out_err:
1000	nilfs_transaction_abort(sb);
1001	return err;
1002}
1003
1004int nilfs_permission(struct mnt_idmap *idmap, struct inode *inode,
1005		     int mask)
1006{
1007	struct nilfs_root *root = NILFS_I(inode)->i_root;
1008
1009	if ((mask & MAY_WRITE) && root &&
1010	    root->cno != NILFS_CPTREE_CURRENT_CNO)
1011		return -EROFS; /* snapshot is not writable */
1012
1013	return generic_permission(&nop_mnt_idmap, inode, mask);
1014}
1015
1016int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
1017{
1018	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1019	struct nilfs_inode_info *ii = NILFS_I(inode);
1020	int err;
1021
1022	spin_lock(&nilfs->ns_inode_lock);
1023	if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
1024		spin_unlock(&nilfs->ns_inode_lock);
1025		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1026						  inode->i_ino, pbh);
1027		if (unlikely(err))
1028			return err;
1029		spin_lock(&nilfs->ns_inode_lock);
1030		if (ii->i_bh == NULL)
1031			ii->i_bh = *pbh;
1032		else if (unlikely(!buffer_uptodate(ii->i_bh))) {
1033			__brelse(ii->i_bh);
1034			ii->i_bh = *pbh;
1035		} else {
1036			brelse(*pbh);
1037			*pbh = ii->i_bh;
1038		}
1039	} else
1040		*pbh = ii->i_bh;
1041
1042	get_bh(*pbh);
1043	spin_unlock(&nilfs->ns_inode_lock);
1044	return 0;
1045}
1046
1047int nilfs_inode_dirty(struct inode *inode)
1048{
1049	struct nilfs_inode_info *ii = NILFS_I(inode);
1050	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1051	int ret = 0;
1052
1053	if (!list_empty(&ii->i_dirty)) {
1054		spin_lock(&nilfs->ns_inode_lock);
1055		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1056			test_bit(NILFS_I_BUSY, &ii->i_state);
1057		spin_unlock(&nilfs->ns_inode_lock);
1058	}
1059	return ret;
1060}
1061
1062int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
1063{
1064	struct nilfs_inode_info *ii = NILFS_I(inode);
1065	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1066
1067	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
1068
1069	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1070		return 0;
1071
1072	spin_lock(&nilfs->ns_inode_lock);
1073	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1074	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1075		/*
1076		 * Because this routine may race with nilfs_dispose_list(),
1077		 * we have to check NILFS_I_QUEUED here, too.
1078		 */
1079		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1080			/*
1081			 * This will happen when somebody is freeing
1082			 * this inode.
1083			 */
1084			nilfs_warn(inode->i_sb,
1085				   "cannot set file dirty (ino=%lu): the file is being freed",
1086				   inode->i_ino);
1087			spin_unlock(&nilfs->ns_inode_lock);
1088			return -EINVAL; /*
1089					 * NILFS_I_DIRTY may remain for
1090					 * freeing inode.
1091					 */
1092		}
1093		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1094		set_bit(NILFS_I_QUEUED, &ii->i_state);
1095	}
1096	spin_unlock(&nilfs->ns_inode_lock);
1097	return 0;
1098}
1099
1100int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
1101{
1102	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1103	struct buffer_head *ibh;
1104	int err;
1105
1106	/*
1107	 * Do not dirty inodes after the log writer has been detached
1108	 * and its nilfs_root struct has been freed.
1109	 */
1110	if (unlikely(nilfs_purging(nilfs)))
1111		return 0;
1112
1113	err = nilfs_load_inode_block(inode, &ibh);
1114	if (unlikely(err)) {
1115		nilfs_warn(inode->i_sb,
1116			   "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1117			   inode->i_ino, err);
1118		return err;
1119	}
1120	nilfs_update_inode(inode, ibh, flags);
1121	mark_buffer_dirty(ibh);
1122	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
1123	brelse(ibh);
1124	return 0;
1125}
1126
1127/**
1128 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
1129 * @inode: inode of the file to be registered.
1130 * @flags: flags to determine the dirty state of the inode
1131 *
1132 * nilfs_dirty_inode() loads a inode block containing the specified
1133 * @inode and copies data from a nilfs_inode to a corresponding inode
1134 * entry in the inode block. This operation is excluded from the segment
1135 * construction. This function can be called both as a single operation
1136 * and as a part of indivisible file operations.
1137 */
1138void nilfs_dirty_inode(struct inode *inode, int flags)
1139{
1140	struct nilfs_transaction_info ti;
1141	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
1142
1143	if (is_bad_inode(inode)) {
1144		nilfs_warn(inode->i_sb,
1145			   "tried to mark bad_inode dirty. ignored.");
1146		dump_stack();
1147		return;
1148	}
1149	if (mdi) {
1150		nilfs_mdt_mark_dirty(inode);
1151		return;
1152	}
1153	nilfs_transaction_begin(inode->i_sb, &ti, 0);
1154	__nilfs_mark_inode_dirty(inode, flags);
1155	nilfs_transaction_commit(inode->i_sb); /* never fails */
1156}
1157
1158int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1159		 __u64 start, __u64 len)
1160{
1161	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1162	__u64 logical = 0, phys = 0, size = 0;
1163	__u32 flags = 0;
1164	loff_t isize;
1165	sector_t blkoff, end_blkoff;
1166	sector_t delalloc_blkoff;
1167	unsigned long delalloc_blklen;
1168	unsigned int blkbits = inode->i_blkbits;
1169	int ret, n;
1170
1171	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1172	if (ret)
1173		return ret;
1174
1175	inode_lock(inode);
1176
1177	isize = i_size_read(inode);
1178
1179	blkoff = start >> blkbits;
1180	end_blkoff = (start + len - 1) >> blkbits;
1181
1182	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1183							&delalloc_blkoff);
1184
1185	do {
1186		__u64 blkphy;
1187		unsigned int maxblocks;
1188
1189		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1190			if (size) {
1191				/* End of the current extent */
1192				ret = fiemap_fill_next_extent(
1193					fieinfo, logical, phys, size, flags);
1194				if (ret)
1195					break;
1196			}
1197			if (blkoff > end_blkoff)
1198				break;
1199
1200			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1201			logical = blkoff << blkbits;
1202			phys = 0;
1203			size = delalloc_blklen << blkbits;
1204
1205			blkoff = delalloc_blkoff + delalloc_blklen;
1206			delalloc_blklen = nilfs_find_uncommitted_extent(
1207				inode, blkoff, &delalloc_blkoff);
1208			continue;
1209		}
1210
1211		/*
1212		 * Limit the number of blocks that we look up so as
1213		 * not to get into the next delayed allocation extent.
1214		 */
1215		maxblocks = INT_MAX;
1216		if (delalloc_blklen)
1217			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1218					  maxblocks);
1219		blkphy = 0;
1220
1221		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1222		n = nilfs_bmap_lookup_contig(
1223			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1224		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1225
1226		if (n < 0) {
1227			int past_eof;
1228
1229			if (unlikely(n != -ENOENT))
1230				break; /* error */
1231
1232			/* HOLE */
1233			blkoff++;
1234			past_eof = ((blkoff << blkbits) >= isize);
1235
1236			if (size) {
1237				/* End of the current extent */
1238
1239				if (past_eof)
1240					flags |= FIEMAP_EXTENT_LAST;
1241
1242				ret = fiemap_fill_next_extent(
1243					fieinfo, logical, phys, size, flags);
1244				if (ret)
1245					break;
1246				size = 0;
1247			}
1248			if (blkoff > end_blkoff || past_eof)
1249				break;
1250		} else {
1251			if (size) {
1252				if (phys && blkphy << blkbits == phys + size) {
1253					/* The current extent goes on */
1254					size += n << blkbits;
1255				} else {
1256					/* Terminate the current extent */
1257					ret = fiemap_fill_next_extent(
1258						fieinfo, logical, phys, size,
1259						flags);
1260					if (ret || blkoff > end_blkoff)
1261						break;
1262
1263					/* Start another extent */
1264					flags = FIEMAP_EXTENT_MERGED;
1265					logical = blkoff << blkbits;
1266					phys = blkphy << blkbits;
1267					size = n << blkbits;
1268				}
1269			} else {
1270				/* Start a new extent */
1271				flags = FIEMAP_EXTENT_MERGED;
1272				logical = blkoff << blkbits;
1273				phys = blkphy << blkbits;
1274				size = n << blkbits;
1275			}
1276			blkoff += n;
1277		}
1278		cond_resched();
1279	} while (true);
1280
1281	/* If ret is 1 then we just hit the end of the extent array */
1282	if (ret == 1)
1283		ret = 0;
1284
1285	inode_unlock(inode);
1286	return ret;
1287}
1288