• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/fs/nilfs2/
1/*
2 * inode.c - NILFS inode operations.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24#include <linux/buffer_head.h>
25#include <linux/gfp.h>
26#include <linux/mpage.h>
27#include <linux/writeback.h>
28#include <linux/uio.h>
29#include "nilfs.h"
30#include "btnode.h"
31#include "segment.h"
32#include "page.h"
33#include "mdt.h"
34#include "cpfile.h"
35#include "ifile.h"
36
37
38/**
39 * nilfs_get_block() - get a file block on the filesystem (callback function)
40 * @inode - inode struct of the target file
41 * @blkoff - file block number
42 * @bh_result - buffer head to be mapped on
43 * @create - indicate whether allocating the block or not when it has not
44 *      been allocated yet.
45 *
46 * This function does not issue actual read request of the specified data
47 * block. It is done by VFS.
48 */
49int nilfs_get_block(struct inode *inode, sector_t blkoff,
50		    struct buffer_head *bh_result, int create)
51{
52	struct nilfs_inode_info *ii = NILFS_I(inode);
53	__u64 blknum = 0;
54	int err = 0, ret;
55	struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
56	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
57
58	down_read(&NILFS_MDT(dat)->mi_sem);
59	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
60	up_read(&NILFS_MDT(dat)->mi_sem);
61	if (ret >= 0) {	/* found */
62		map_bh(bh_result, inode->i_sb, blknum);
63		if (ret > 0)
64			bh_result->b_size = (ret << inode->i_blkbits);
65		goto out;
66	}
67	/* data block was not found */
68	if (ret == -ENOENT && create) {
69		struct nilfs_transaction_info ti;
70
71		bh_result->b_blocknr = 0;
72		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
73		if (unlikely(err))
74			goto out;
75		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
76					(unsigned long)bh_result);
77		if (unlikely(err != 0)) {
78			if (err == -EEXIST) {
79				/*
80				 * The get_block() function could be called
81				 * from multiple callers for an inode.
82				 * However, the page having this block must
83				 * be locked in this case.
84				 */
85				printk(KERN_WARNING
86				       "nilfs_get_block: a race condition "
87				       "while inserting a data block. "
88				       "(inode number=%lu, file block "
89				       "offset=%llu)\n",
90				       inode->i_ino,
91				       (unsigned long long)blkoff);
92				err = 0;
93			} else if (err == -EINVAL) {
94				nilfs_error(inode->i_sb, __func__,
95					    "broken bmap (inode=%lu)\n",
96					    inode->i_ino);
97				err = -EIO;
98			}
99			nilfs_transaction_abort(inode->i_sb);
100			goto out;
101		}
102		nilfs_mark_inode_dirty(inode);
103		nilfs_transaction_commit(inode->i_sb); /* never fails */
104		/* Error handling should be detailed */
105		set_buffer_new(bh_result);
106		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
107						      to proper value */
108	} else if (ret == -ENOENT) {
109		/* not found is not error (e.g. hole); must return without
110		   the mapped state flag. */
111		;
112	} else {
113		err = ret;
114	}
115
116 out:
117	return err;
118}
119
120/**
121 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
122 * address_space_operations.
123 * @file - file struct of the file to be read
124 * @page - the page to be read
125 */
126static int nilfs_readpage(struct file *file, struct page *page)
127{
128	return mpage_readpage(page, nilfs_get_block);
129}
130
131/**
132 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
133 * address_space_operations.
134 * @file - file struct of the file to be read
135 * @mapping - address_space struct used for reading multiple pages
136 * @pages - the pages to be read
137 * @nr_pages - number of pages to be read
138 */
139static int nilfs_readpages(struct file *file, struct address_space *mapping,
140			   struct list_head *pages, unsigned nr_pages)
141{
142	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
143}
144
145static int nilfs_writepages(struct address_space *mapping,
146			    struct writeback_control *wbc)
147{
148	struct inode *inode = mapping->host;
149	int err = 0;
150
151	if (wbc->sync_mode == WB_SYNC_ALL)
152		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
153						    wbc->range_start,
154						    wbc->range_end);
155	return err;
156}
157
158static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
159{
160	struct inode *inode = page->mapping->host;
161	int err;
162
163	redirty_page_for_writepage(wbc, page);
164	unlock_page(page);
165
166	if (wbc->sync_mode == WB_SYNC_ALL) {
167		err = nilfs_construct_segment(inode->i_sb);
168		if (unlikely(err))
169			return err;
170	} else if (wbc->for_reclaim)
171		nilfs_flush_segment(inode->i_sb, inode->i_ino);
172
173	return 0;
174}
175
176static int nilfs_set_page_dirty(struct page *page)
177{
178	int ret = __set_page_dirty_buffers(page);
179
180	if (ret) {
181		struct inode *inode = page->mapping->host;
182		struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
183		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
184
185		nilfs_set_file_dirty(sbi, inode, nr_dirty);
186	}
187	return ret;
188}
189
190static int nilfs_write_begin(struct file *file, struct address_space *mapping,
191			     loff_t pos, unsigned len, unsigned flags,
192			     struct page **pagep, void **fsdata)
193
194{
195	struct inode *inode = mapping->host;
196	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
197
198	if (unlikely(err))
199		return err;
200
201	err = block_write_begin(mapping, pos, len, flags, pagep,
202				nilfs_get_block);
203	if (unlikely(err)) {
204		loff_t isize = mapping->host->i_size;
205		if (pos + len > isize)
206			vmtruncate(mapping->host, isize);
207
208		nilfs_transaction_abort(inode->i_sb);
209	}
210	return err;
211}
212
213static int nilfs_write_end(struct file *file, struct address_space *mapping,
214			   loff_t pos, unsigned len, unsigned copied,
215			   struct page *page, void *fsdata)
216{
217	struct inode *inode = mapping->host;
218	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
219	unsigned nr_dirty;
220	int err;
221
222	nr_dirty = nilfs_page_count_clean_buffers(page, start,
223						  start + copied);
224	copied = generic_write_end(file, mapping, pos, len, copied, page,
225				   fsdata);
226	nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
227	err = nilfs_transaction_commit(inode->i_sb);
228	return err ? : copied;
229}
230
231static ssize_t
232nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
233		loff_t offset, unsigned long nr_segs)
234{
235	struct file *file = iocb->ki_filp;
236	struct inode *inode = file->f_mapping->host;
237	ssize_t size;
238
239	if (rw == WRITE)
240		return 0;
241
242	/* Needs synchronization with the cleaner */
243	size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
244				  offset, nr_segs, nilfs_get_block, NULL);
245
246	/*
247	 * In case of error extending write may have instantiated a few
248	 * blocks outside i_size. Trim these off again.
249	 */
250	if (unlikely((rw & WRITE) && size < 0)) {
251		loff_t isize = i_size_read(inode);
252		loff_t end = offset + iov_length(iov, nr_segs);
253
254		if (end > isize)
255			vmtruncate(inode, isize);
256	}
257
258	return size;
259}
260
261const struct address_space_operations nilfs_aops = {
262	.writepage		= nilfs_writepage,
263	.readpage		= nilfs_readpage,
264	.sync_page		= block_sync_page,
265	.writepages		= nilfs_writepages,
266	.set_page_dirty		= nilfs_set_page_dirty,
267	.readpages		= nilfs_readpages,
268	.write_begin		= nilfs_write_begin,
269	.write_end		= nilfs_write_end,
270	/* .releasepage		= nilfs_releasepage, */
271	.invalidatepage		= block_invalidatepage,
272	.direct_IO		= nilfs_direct_IO,
273	.is_partially_uptodate  = block_is_partially_uptodate,
274};
275
276struct inode *nilfs_new_inode(struct inode *dir, int mode)
277{
278	struct super_block *sb = dir->i_sb;
279	struct nilfs_sb_info *sbi = NILFS_SB(sb);
280	struct inode *inode;
281	struct nilfs_inode_info *ii;
282	int err = -ENOMEM;
283	ino_t ino;
284
285	inode = new_inode(sb);
286	if (unlikely(!inode))
287		goto failed;
288
289	mapping_set_gfp_mask(inode->i_mapping,
290			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
291
292	ii = NILFS_I(inode);
293	ii->i_state = 1 << NILFS_I_NEW;
294
295	err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh);
296	if (unlikely(err))
297		goto failed_ifile_create_inode;
298	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
299
300	atomic_inc(&sbi->s_inodes_count);
301	inode_init_owner(inode, dir, mode);
302	inode->i_ino = ino;
303	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
304
305	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
306		err = nilfs_bmap_read(ii->i_bmap, NULL);
307		if (err < 0)
308			goto failed_bmap;
309
310		set_bit(NILFS_I_BMAP, &ii->i_state);
311		/* No lock is needed; iget() ensures it. */
312	}
313
314	ii->i_flags = NILFS_I(dir)->i_flags;
315	if (S_ISLNK(mode))
316		ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
317	if (!S_ISDIR(mode))
318		ii->i_flags &= ~NILFS_DIRSYNC_FL;
319
320	/* ii->i_file_acl = 0; */
321	/* ii->i_dir_acl = 0; */
322	ii->i_dir_start_lookup = 0;
323	ii->i_cno = 0;
324	nilfs_set_inode_flags(inode);
325	spin_lock(&sbi->s_next_gen_lock);
326	inode->i_generation = sbi->s_next_generation++;
327	spin_unlock(&sbi->s_next_gen_lock);
328	insert_inode_hash(inode);
329
330	err = nilfs_init_acl(inode, dir);
331	if (unlikely(err))
332		goto failed_acl; /* never occur. When supporting
333				    nilfs_init_acl(), proper cancellation of
334				    above jobs should be considered */
335
336	return inode;
337
338 failed_acl:
339 failed_bmap:
340	inode->i_nlink = 0;
341	iput(inode);  /* raw_inode will be deleted through
342			 generic_delete_inode() */
343	goto failed;
344
345 failed_ifile_create_inode:
346	make_bad_inode(inode);
347	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
348			 called */
349 failed:
350	return ERR_PTR(err);
351}
352
353void nilfs_free_inode(struct inode *inode)
354{
355	struct super_block *sb = inode->i_sb;
356	struct nilfs_sb_info *sbi = NILFS_SB(sb);
357
358	(void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
359	atomic_dec(&sbi->s_inodes_count);
360}
361
362void nilfs_set_inode_flags(struct inode *inode)
363{
364	unsigned int flags = NILFS_I(inode)->i_flags;
365
366	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
367			    S_DIRSYNC);
368	if (flags & NILFS_SYNC_FL)
369		inode->i_flags |= S_SYNC;
370	if (flags & NILFS_APPEND_FL)
371		inode->i_flags |= S_APPEND;
372	if (flags & NILFS_IMMUTABLE_FL)
373		inode->i_flags |= S_IMMUTABLE;
374#ifndef NILFS_ATIME_DISABLE
375	if (flags & NILFS_NOATIME_FL)
376#endif
377		inode->i_flags |= S_NOATIME;
378	if (flags & NILFS_DIRSYNC_FL)
379		inode->i_flags |= S_DIRSYNC;
380	mapping_set_gfp_mask(inode->i_mapping,
381			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
382}
383
384int nilfs_read_inode_common(struct inode *inode,
385			    struct nilfs_inode *raw_inode)
386{
387	struct nilfs_inode_info *ii = NILFS_I(inode);
388	int err;
389
390	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
391	inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
392	inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
393	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
394	inode->i_size = le64_to_cpu(raw_inode->i_size);
395	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
396	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
397	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
398	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
399	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
400	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
401	if (inode->i_nlink == 0 && inode->i_mode == 0)
402		return -EINVAL; /* this inode is deleted */
403
404	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
405	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
406	ii->i_dir_start_lookup = 0;
407	ii->i_cno = 0;
408	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
409
410	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
411	    S_ISLNK(inode->i_mode)) {
412		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
413		if (err < 0)
414			return err;
415		set_bit(NILFS_I_BMAP, &ii->i_state);
416		/* No lock is needed; iget() ensures it. */
417	}
418	return 0;
419}
420
421static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
422			      struct inode *inode)
423{
424	struct nilfs_sb_info *sbi = NILFS_SB(sb);
425	struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
426	struct buffer_head *bh;
427	struct nilfs_inode *raw_inode;
428	int err;
429
430	down_read(&NILFS_MDT(dat)->mi_sem);
431	err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh);
432	if (unlikely(err))
433		goto bad_inode;
434
435	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
436
437	err = nilfs_read_inode_common(inode, raw_inode);
438	if (err)
439		goto failed_unmap;
440
441	if (S_ISREG(inode->i_mode)) {
442		inode->i_op = &nilfs_file_inode_operations;
443		inode->i_fop = &nilfs_file_operations;
444		inode->i_mapping->a_ops = &nilfs_aops;
445	} else if (S_ISDIR(inode->i_mode)) {
446		inode->i_op = &nilfs_dir_inode_operations;
447		inode->i_fop = &nilfs_dir_operations;
448		inode->i_mapping->a_ops = &nilfs_aops;
449	} else if (S_ISLNK(inode->i_mode)) {
450		inode->i_op = &nilfs_symlink_inode_operations;
451		inode->i_mapping->a_ops = &nilfs_aops;
452	} else {
453		inode->i_op = &nilfs_special_inode_operations;
454		init_special_inode(
455			inode, inode->i_mode,
456			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
457	}
458	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
459	brelse(bh);
460	up_read(&NILFS_MDT(dat)->mi_sem);
461	nilfs_set_inode_flags(inode);
462	return 0;
463
464 failed_unmap:
465	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
466	brelse(bh);
467
468 bad_inode:
469	up_read(&NILFS_MDT(dat)->mi_sem);
470	return err;
471}
472
473struct inode *nilfs_iget(struct super_block *sb, unsigned long ino)
474{
475	struct inode *inode;
476	int err;
477
478	inode = iget_locked(sb, ino);
479	if (unlikely(!inode))
480		return ERR_PTR(-ENOMEM);
481	if (!(inode->i_state & I_NEW))
482		return inode;
483
484	err = __nilfs_read_inode(sb, ino, inode);
485	if (unlikely(err)) {
486		iget_failed(inode);
487		return ERR_PTR(err);
488	}
489	unlock_new_inode(inode);
490	return inode;
491}
492
493void nilfs_write_inode_common(struct inode *inode,
494			      struct nilfs_inode *raw_inode, int has_bmap)
495{
496	struct nilfs_inode_info *ii = NILFS_I(inode);
497
498	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
499	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
500	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
501	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
502	raw_inode->i_size = cpu_to_le64(inode->i_size);
503	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
504	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
505	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
506	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
507	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
508
509	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
510	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
511
512	if (has_bmap)
513		nilfs_bmap_write(ii->i_bmap, raw_inode);
514	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
515		raw_inode->i_device_code =
516			cpu_to_le64(huge_encode_dev(inode->i_rdev));
517	/* When extending inode, nilfs->ns_inode_size should be checked
518	   for substitutions of appended fields */
519}
520
521void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
522{
523	ino_t ino = inode->i_ino;
524	struct nilfs_inode_info *ii = NILFS_I(inode);
525	struct super_block *sb = inode->i_sb;
526	struct nilfs_sb_info *sbi = NILFS_SB(sb);
527	struct nilfs_inode *raw_inode;
528
529	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
530
531	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
532		memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
533	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
534
535	nilfs_write_inode_common(inode, raw_inode, 0);
536	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh);
537}
538
539#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
540
541static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
542				unsigned long from)
543{
544	unsigned long b;
545	int ret;
546
547	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
548		return;
549 repeat:
550	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
551	if (ret == -ENOENT)
552		return;
553	else if (ret < 0)
554		goto failed;
555
556	if (b < from)
557		return;
558
559	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
560	ret = nilfs_bmap_truncate(ii->i_bmap, b);
561	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
562	if (!ret || (ret == -ENOMEM &&
563		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
564		goto repeat;
565
566 failed:
567	if (ret == -EINVAL)
568		nilfs_error(ii->vfs_inode.i_sb, __func__,
569			    "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino);
570	else
571		nilfs_warning(ii->vfs_inode.i_sb, __func__,
572			      "failed to truncate bmap (ino=%lu, err=%d)",
573			      ii->vfs_inode.i_ino, ret);
574}
575
576void nilfs_truncate(struct inode *inode)
577{
578	unsigned long blkoff;
579	unsigned int blocksize;
580	struct nilfs_transaction_info ti;
581	struct super_block *sb = inode->i_sb;
582	struct nilfs_inode_info *ii = NILFS_I(inode);
583
584	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
585		return;
586	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
587		return;
588
589	blocksize = sb->s_blocksize;
590	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
591	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
592
593	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
594
595	nilfs_truncate_bmap(ii, blkoff);
596
597	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
598	if (IS_SYNC(inode))
599		nilfs_set_transaction_flag(NILFS_TI_SYNC);
600
601	nilfs_mark_inode_dirty(inode);
602	nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
603	nilfs_transaction_commit(sb);
604	/* May construct a logical segment and may fail in sync mode.
605	   But truncate has no return value. */
606}
607
608static void nilfs_clear_inode(struct inode *inode)
609{
610	struct nilfs_inode_info *ii = NILFS_I(inode);
611
612	/*
613	 * Free resources allocated in nilfs_read_inode(), here.
614	 */
615	BUG_ON(!list_empty(&ii->i_dirty));
616	brelse(ii->i_bh);
617	ii->i_bh = NULL;
618
619	if (test_bit(NILFS_I_BMAP, &ii->i_state))
620		nilfs_bmap_clear(ii->i_bmap);
621
622	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
623}
624
625void nilfs_evict_inode(struct inode *inode)
626{
627	struct nilfs_transaction_info ti;
628	struct super_block *sb = inode->i_sb;
629	struct nilfs_inode_info *ii = NILFS_I(inode);
630
631	if (inode->i_nlink || unlikely(is_bad_inode(inode))) {
632		if (inode->i_data.nrpages)
633			truncate_inode_pages(&inode->i_data, 0);
634		end_writeback(inode);
635		nilfs_clear_inode(inode);
636		return;
637	}
638	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
639
640	if (inode->i_data.nrpages)
641		truncate_inode_pages(&inode->i_data, 0);
642
643	nilfs_truncate_bmap(ii, 0);
644	nilfs_mark_inode_dirty(inode);
645	end_writeback(inode);
646	nilfs_clear_inode(inode);
647	nilfs_free_inode(inode);
648	/* nilfs_free_inode() marks inode buffer dirty */
649	if (IS_SYNC(inode))
650		nilfs_set_transaction_flag(NILFS_TI_SYNC);
651	nilfs_transaction_commit(sb);
652	/* May construct a logical segment and may fail in sync mode.
653	   But delete_inode has no return value. */
654}
655
656int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
657{
658	struct nilfs_transaction_info ti;
659	struct inode *inode = dentry->d_inode;
660	struct super_block *sb = inode->i_sb;
661	int err;
662
663	err = inode_change_ok(inode, iattr);
664	if (err)
665		return err;
666
667	err = nilfs_transaction_begin(sb, &ti, 0);
668	if (unlikely(err))
669		return err;
670
671	if ((iattr->ia_valid & ATTR_SIZE) &&
672	    iattr->ia_size != i_size_read(inode)) {
673		err = vmtruncate(inode, iattr->ia_size);
674		if (unlikely(err))
675			goto out_err;
676	}
677
678	setattr_copy(inode, iattr);
679	mark_inode_dirty(inode);
680
681	if (iattr->ia_valid & ATTR_MODE) {
682		err = nilfs_acl_chmod(inode);
683		if (unlikely(err))
684			goto out_err;
685	}
686
687	return nilfs_transaction_commit(sb);
688
689out_err:
690	nilfs_transaction_abort(sb);
691	return err;
692}
693
694int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
695			   struct buffer_head **pbh)
696{
697	struct nilfs_inode_info *ii = NILFS_I(inode);
698	int err;
699
700	spin_lock(&sbi->s_inode_lock);
701	if (ii->i_bh == NULL) {
702		spin_unlock(&sbi->s_inode_lock);
703		err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
704						  pbh);
705		if (unlikely(err))
706			return err;
707		spin_lock(&sbi->s_inode_lock);
708		if (ii->i_bh == NULL)
709			ii->i_bh = *pbh;
710		else {
711			brelse(*pbh);
712			*pbh = ii->i_bh;
713		}
714	} else
715		*pbh = ii->i_bh;
716
717	get_bh(*pbh);
718	spin_unlock(&sbi->s_inode_lock);
719	return 0;
720}
721
722int nilfs_inode_dirty(struct inode *inode)
723{
724	struct nilfs_inode_info *ii = NILFS_I(inode);
725	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
726	int ret = 0;
727
728	if (!list_empty(&ii->i_dirty)) {
729		spin_lock(&sbi->s_inode_lock);
730		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
731			test_bit(NILFS_I_BUSY, &ii->i_state);
732		spin_unlock(&sbi->s_inode_lock);
733	}
734	return ret;
735}
736
737int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode,
738			 unsigned nr_dirty)
739{
740	struct nilfs_inode_info *ii = NILFS_I(inode);
741
742	atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
743
744	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
745		return 0;
746
747	spin_lock(&sbi->s_inode_lock);
748	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
749	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
750		/* Because this routine may race with nilfs_dispose_list(),
751		   we have to check NILFS_I_QUEUED here, too. */
752		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
753			/* This will happen when somebody is freeing
754			   this inode. */
755			nilfs_warning(sbi->s_super, __func__,
756				      "cannot get inode (ino=%lu)\n",
757				      inode->i_ino);
758			spin_unlock(&sbi->s_inode_lock);
759			return -EINVAL; /* NILFS_I_DIRTY may remain for
760					   freeing inode */
761		}
762		list_del(&ii->i_dirty);
763		list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
764		set_bit(NILFS_I_QUEUED, &ii->i_state);
765	}
766	spin_unlock(&sbi->s_inode_lock);
767	return 0;
768}
769
770int nilfs_mark_inode_dirty(struct inode *inode)
771{
772	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
773	struct buffer_head *ibh;
774	int err;
775
776	err = nilfs_load_inode_block(sbi, inode, &ibh);
777	if (unlikely(err)) {
778		nilfs_warning(inode->i_sb, __func__,
779			      "failed to reget inode block.\n");
780		return err;
781	}
782	nilfs_update_inode(inode, ibh);
783	nilfs_mdt_mark_buffer_dirty(ibh);
784	nilfs_mdt_mark_dirty(sbi->s_ifile);
785	brelse(ibh);
786	return 0;
787}
788
789/**
790 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
791 * @inode: inode of the file to be registered.
792 *
793 * nilfs_dirty_inode() loads a inode block containing the specified
794 * @inode and copies data from a nilfs_inode to a corresponding inode
795 * entry in the inode block. This operation is excluded from the segment
796 * construction. This function can be called both as a single operation
797 * and as a part of indivisible file operations.
798 */
799void nilfs_dirty_inode(struct inode *inode)
800{
801	struct nilfs_transaction_info ti;
802
803	if (is_bad_inode(inode)) {
804		nilfs_warning(inode->i_sb, __func__,
805			      "tried to mark bad_inode dirty. ignored.\n");
806		dump_stack();
807		return;
808	}
809	nilfs_transaction_begin(inode->i_sb, &ti, 0);
810	nilfs_mark_inode_dirty(inode);
811	nilfs_transaction_commit(inode->i_sb); /* never fails */
812}
813