• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/fs/reiserfs/
1/*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
5#include <linux/time.h>
6#include <linux/reiserfs_fs.h>
7#include <linux/reiserfs_acl.h>
8#include <linux/reiserfs_xattr.h>
9#include <asm/uaccess.h>
10#include <linux/pagemap.h>
11#include <linux/swap.h>
12#include <linux/writeback.h>
13#include <linux/blkdev.h>
14#include <linux/buffer_head.h>
15#include <linux/quotaops.h>
16
17/*
18** We pack the tails of files on file close, not at the time they are written.
19** This implies an unnecessary copy of the tail and an unnecessary indirect item
20** insertion/balancing, for files that are written in one write.
21** It avoids unnecessary tail packings (balances) for files that are written in
22** multiple writes and are small enough to have tails.
23**
24** file_release is called by the VFS layer when the file is closed.  If
25** this is the last open file descriptor, and the file
26** small enough to have a tail, and the tail is currently in an
27** unformatted node, the tail is converted back into a direct item.
28**
29** We use reiserfs_truncate_file to pack the tail, since it already has
30** all the conditions coded.
31*/
32static int reiserfs_file_release(struct inode *inode, struct file *filp)
33{
34
35	struct reiserfs_transaction_handle th;
36	int err;
37	int jbegin_failure = 0;
38
39	BUG_ON(!S_ISREG(inode->i_mode));
40
41        if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
42		return 0;
43
44	mutex_lock(&(REISERFS_I(inode)->tailpack));
45
46        if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
47		mutex_unlock(&(REISERFS_I(inode)->tailpack));
48		return 0;
49	}
50
51	/* fast out for when nothing needs to be done */
52	if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
53	     !tail_has_to_be_packed(inode)) &&
54	    REISERFS_I(inode)->i_prealloc_count <= 0) {
55		mutex_unlock(&(REISERFS_I(inode)->tailpack));
56		return 0;
57	}
58
59	reiserfs_write_lock(inode->i_sb);
60	/* freeing preallocation only involves relogging blocks that
61	 * are already in the current transaction.  preallocation gets
62	 * freed at the end of each transaction, so it is impossible for
63	 * us to log any additional blocks (including quota blocks)
64	 */
65	err = journal_begin(&th, inode->i_sb, 1);
66	if (err) {
67		/* uh oh, we can't allow the inode to go away while there
68		 * is still preallocation blocks pending.  Try to join the
69		 * aborted transaction
70		 */
71		jbegin_failure = err;
72		err = journal_join_abort(&th, inode->i_sb, 1);
73
74		if (err) {
75			/* hmpf, our choices here aren't good.  We can pin the inode
76			 * which will disallow unmount from every happening, we can
77			 * do nothing, which will corrupt random memory on unmount,
78			 * or we can forcibly remove the file from the preallocation
79			 * list, which will leak blocks on disk.  Lets pin the inode
80			 * and let the admin know what is going on.
81			 */
82			igrab(inode);
83			reiserfs_warning(inode->i_sb, "clm-9001",
84					 "pinning inode %lu because the "
85					 "preallocation can't be freed",
86					 inode->i_ino);
87			goto out;
88		}
89	}
90	reiserfs_update_inode_transaction(inode);
91
92#ifdef REISERFS_PREALLOCATE
93	reiserfs_discard_prealloc(&th, inode);
94#endif
95	err = journal_end(&th, inode->i_sb, 1);
96
97	/* copy back the error code from journal_begin */
98	if (!err)
99		err = jbegin_failure;
100
101	if (!err &&
102	    (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
103	    tail_has_to_be_packed(inode)) {
104
105		/* if regular file is released by last holder and it has been
106		   appended (we append by unformatted node only) or its direct
107		   item(s) had to be converted, then it may have to be
108		   indirect2direct converted */
109		err = reiserfs_truncate_file(inode, 0);
110	}
111      out:
112	reiserfs_write_unlock(inode->i_sb);
113	mutex_unlock(&(REISERFS_I(inode)->tailpack));
114	return err;
115}
116
117static int reiserfs_file_open(struct inode *inode, struct file *file)
118{
119	int err = dquot_file_open(inode, file);
120        if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
121		/* somebody might be tailpacking on final close; wait for it */
122		mutex_lock(&(REISERFS_I(inode)->tailpack));
123		atomic_inc(&REISERFS_I(inode)->openers);
124		mutex_unlock(&(REISERFS_I(inode)->tailpack));
125	}
126	return err;
127}
128
129static void reiserfs_vfs_truncate_file(struct inode *inode)
130{
131	mutex_lock(&(REISERFS_I(inode)->tailpack));
132	reiserfs_truncate_file(inode, 1);
133	mutex_unlock(&(REISERFS_I(inode)->tailpack));
134}
135
136/* Sync a reiserfs file. */
137
138
139static int reiserfs_sync_file(struct file *filp, int datasync)
140{
141	struct inode *inode = filp->f_mapping->host;
142	int err;
143	int barrier_done;
144
145	BUG_ON(!S_ISREG(inode->i_mode));
146	err = sync_mapping_buffers(inode->i_mapping);
147	reiserfs_write_lock(inode->i_sb);
148	barrier_done = reiserfs_commit_for_inode(inode);
149	reiserfs_write_unlock(inode->i_sb);
150	if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
151		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
152			BLKDEV_IFL_WAIT);
153	if (barrier_done < 0)
154		return barrier_done;
155	return (err < 0) ? -EIO : 0;
156}
157
158/* taken fs/buffer.c:__block_commit_write */
159int reiserfs_commit_page(struct inode *inode, struct page *page,
160			 unsigned from, unsigned to)
161{
162	unsigned block_start, block_end;
163	int partial = 0;
164	unsigned blocksize;
165	struct buffer_head *bh, *head;
166	unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
167	int new;
168	int logit = reiserfs_file_data_log(inode);
169	struct super_block *s = inode->i_sb;
170	int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
171	struct reiserfs_transaction_handle th;
172	int ret = 0;
173
174	th.t_trans_id = 0;
175	blocksize = 1 << inode->i_blkbits;
176
177	if (logit) {
178		reiserfs_write_lock(s);
179		ret = journal_begin(&th, s, bh_per_page + 1);
180		if (ret)
181			goto drop_write_lock;
182		reiserfs_update_inode_transaction(inode);
183	}
184	for (bh = head = page_buffers(page), block_start = 0;
185	     bh != head || !block_start;
186	     block_start = block_end, bh = bh->b_this_page) {
187
188		new = buffer_new(bh);
189		clear_buffer_new(bh);
190		block_end = block_start + blocksize;
191		if (block_end <= from || block_start >= to) {
192			if (!buffer_uptodate(bh))
193				partial = 1;
194		} else {
195			set_buffer_uptodate(bh);
196			if (logit) {
197				reiserfs_prepare_for_journal(s, bh, 1);
198				journal_mark_dirty(&th, s, bh);
199			} else if (!buffer_dirty(bh)) {
200				mark_buffer_dirty(bh);
201				/* do data=ordered on any page past the end
202				 * of file and any buffer marked BH_New.
203				 */
204				if (reiserfs_data_ordered(inode->i_sb) &&
205				    (new || page->index >= i_size_index)) {
206					reiserfs_add_ordered_list(inode, bh);
207				}
208			}
209		}
210	}
211	if (logit) {
212		ret = journal_end(&th, s, bh_per_page + 1);
213	      drop_write_lock:
214		reiserfs_write_unlock(s);
215	}
216	/*
217	 * If this is a partial write which happened to make all buffers
218	 * uptodate then we can optimize away a bogus readpage() for
219	 * the next read(). Here we 'discover' whether the page went
220	 * uptodate as a result of this (potentially partial) write.
221	 */
222	if (!partial)
223		SetPageUptodate(page);
224	return ret;
225}
226
227/* Write @count bytes at position @ppos in a file indicated by @file
228   from the buffer @buf.
229
230   generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
231   something simple that works.  It is not for serious use by general purpose filesystems, excepting the one that it was
232   written for (ext2/3).  This is for several reasons:
233
234   * It has no understanding of any filesystem specific optimizations.
235
236   * It enters the filesystem repeatedly for each page that is written.
237
238   * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
239   * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
240   * to reiserfs which allows for fewer tree traversals.
241
242   * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
243
244   * Asking the block allocation code for blocks one at a time is slightly less efficient.
245
246   All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
247   use it, but we were in a hurry to make code freeze, and so it couldn't be revised then.  This new code should make
248   things right finally.
249
250   Future Features: providing search_by_key with hints.
251
252*/
253static ssize_t reiserfs_file_write(struct file *file,	/* the file we are going to write into */
254				   const char __user * buf,	/*  pointer to user supplied data
255								   (in userspace) */
256				   size_t count,	/* amount of bytes to write */
257				   loff_t * ppos	/* pointer to position in file that we start writing at. Should be updated to
258							 * new current position before returning. */
259				   )
260{
261	struct inode *inode = file->f_path.dentry->d_inode;	// Inode of the file that we are writing to.
262	/* To simplify coding at this time, we store
263	   locked pages in array for now */
264	struct reiserfs_transaction_handle th;
265	th.t_trans_id = 0;
266
267	/* If a filesystem is converted from 3.5 to 3.6, we'll have v3.5 items
268	* lying around (most of the disk, in fact). Despite the filesystem
269	* now being a v3.6 format, the old items still can't support large
270	* file sizes. Catch this case here, as the rest of the VFS layer is
271	* oblivious to the different limitations between old and new items.
272	* reiserfs_setattr catches this for truncates. This chunk is lifted
273	* from generic_write_checks. */
274	if (get_inode_item_key_version (inode) == KEY_FORMAT_3_5 &&
275	    *ppos + count > MAX_NON_LFS) {
276		if (*ppos >= MAX_NON_LFS) {
277			return -EFBIG;
278		}
279		if (count > MAX_NON_LFS - (unsigned long)*ppos)
280			count = MAX_NON_LFS - (unsigned long)*ppos;
281	}
282
283	return do_sync_write(file, buf, count, ppos);
284}
285
286const struct file_operations reiserfs_file_operations = {
287	.read = do_sync_read,
288	.write = reiserfs_file_write,
289	.unlocked_ioctl = reiserfs_ioctl,
290#ifdef CONFIG_COMPAT
291	.compat_ioctl = reiserfs_compat_ioctl,
292#endif
293	.mmap = generic_file_mmap,
294	.open = reiserfs_file_open,
295	.release = reiserfs_file_release,
296	.fsync = reiserfs_sync_file,
297	.aio_read = generic_file_aio_read,
298	.aio_write = generic_file_aio_write,
299	.splice_read = generic_file_splice_read,
300	.splice_write = generic_file_splice_write,
301	.llseek = generic_file_llseek,
302};
303
304const struct inode_operations reiserfs_file_inode_operations = {
305	.truncate = reiserfs_vfs_truncate_file,
306	.setattr = reiserfs_setattr,
307	.setxattr = reiserfs_setxattr,
308	.getxattr = reiserfs_getxattr,
309	.listxattr = reiserfs_listxattr,
310	.removexattr = reiserfs_removexattr,
311	.permission = reiserfs_permission,
312};
313