1/*
2 *  linux/fs/ext2/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 *  from
10 *
11 *  linux/fs/minix/inode.c
12 *
13 *  Copyright (C) 1991, 1992  Linus Torvalds
14 *
15 *  Goal-directed block allocation by Stephen Tweedie
16 * 	(sct@dcs.ed.ac.uk), 1993, 1998
17 *  Big-endian to little-endian byte-swapping/bitmaps by
18 *        David S. Miller (davem@caip.rutgers.edu), 1995
19 *  64-bit file support on 64-bit platforms by Jakub Jelinek
20 * 	(jj@sunsite.ms.mff.cuni.cz)
21 *
22 *  Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
23 */
24
25#include <linux/time.h>
26#include <linux/highuid.h>
27#include <linux/pagemap.h>
28#include <linux/quotaops.h>
29#include <linux/module.h>
30#include <linux/writeback.h>
31#include <linux/buffer_head.h>
32#include <linux/mpage.h>
33#include <linux/fiemap.h>
34#include <linux/namei.h>
35#include "ext2.h"
36#include "acl.h"
37#include "xip.h"
38
39MODULE_AUTHOR("Remy Card and others");
40MODULE_DESCRIPTION("Second Extended Filesystem");
41MODULE_LICENSE("GPL");
42
43static int __ext2_write_inode(struct inode *inode, int do_sync);
44
45/*
46 * Test whether an inode is a fast symlink.
47 */
48static inline int ext2_inode_is_fast_symlink(struct inode *inode)
49{
50	int ea_blocks = EXT2_I(inode)->i_file_acl ?
51		(inode->i_sb->s_blocksize >> 9) : 0;
52
53	return (S_ISLNK(inode->i_mode) &&
54		inode->i_blocks - ea_blocks == 0);
55}
56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61	struct inode *inode = mapping->host;
62
63	if (to > inode->i_size) {
64		truncate_pagecache(inode, to, inode->i_size);
65		ext2_truncate_blocks(inode, inode->i_size);
66	}
67}
68
69/*
70 * Called at the last iput() if i_nlink is zero.
71 */
72void ext2_evict_inode(struct inode * inode)
73{
74	struct ext2_block_alloc_info *rsv;
75	int want_delete = 0;
76
77	if (!inode->i_nlink && !is_bad_inode(inode)) {
78		want_delete = 1;
79		dquot_initialize(inode);
80	} else {
81		dquot_drop(inode);
82	}
83
84	truncate_inode_pages(&inode->i_data, 0);
85
86	if (want_delete) {
87		/* set dtime */
88		EXT2_I(inode)->i_dtime	= get_seconds();
89		mark_inode_dirty(inode);
90		__ext2_write_inode(inode, inode_needs_sync(inode));
91		/* truncate to 0 */
92		inode->i_size = 0;
93		if (inode->i_blocks)
94			ext2_truncate_blocks(inode, 0);
95	}
96
97	invalidate_inode_buffers(inode);
98	end_writeback(inode);
99
100	ext2_discard_reservation(inode);
101	rsv = EXT2_I(inode)->i_block_alloc_info;
102	EXT2_I(inode)->i_block_alloc_info = NULL;
103	if (unlikely(rsv))
104		kfree(rsv);
105
106	if (want_delete)
107		ext2_free_inode(inode);
108}
109
110typedef struct {
111	__le32	*p;
112	__le32	key;
113	struct buffer_head *bh;
114} Indirect;
115
116static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
117{
118	p->key = *(p->p = v);
119	p->bh = bh;
120}
121
122static inline int verify_chain(Indirect *from, Indirect *to)
123{
124	while (from <= to && from->key == *from->p)
125		from++;
126	return (from > to);
127}
128
129/**
130 *	ext2_block_to_path - parse the block number into array of offsets
131 *	@inode: inode in question (we are only interested in its superblock)
132 *	@i_block: block number to be parsed
133 *	@offsets: array to store the offsets in
134 *      @boundary: set this non-zero if the referred-to block is likely to be
135 *             followed (on disk) by an indirect block.
136 *	To store the locations of file's data ext2 uses a data structure common
137 *	for UNIX filesystems - tree of pointers anchored in the inode, with
138 *	data blocks at leaves and indirect blocks in intermediate nodes.
139 *	This function translates the block number into path in that tree -
140 *	return value is the path length and @offsets[n] is the offset of
141 *	pointer to (n+1)th node in the nth one. If @block is out of range
142 *	(negative or too large) warning is printed and zero returned.
143 *
144 *	Note: function doesn't find node addresses, so no IO is needed. All
145 *	we need to know is the capacity of indirect blocks (taken from the
146 *	inode->i_sb).
147 */
148
149/*
150 * Portability note: the last comparison (check that we fit into triple
151 * indirect block) is spelled differently, because otherwise on an
152 * architecture with 32-bit longs and 8Kb pages we might get into trouble
153 * if our filesystem had 8Kb blocks. We might use long long, but that would
154 * kill us on x86. Oh, well, at least the sign propagation does not matter -
155 * i_block would have to be negative in the very beginning, so we would not
156 * get there at all.
157 */
158
159static int ext2_block_to_path(struct inode *inode,
160			long i_block, int offsets[4], int *boundary)
161{
162	int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
163	int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
164	const long direct_blocks = EXT2_NDIR_BLOCKS,
165		indirect_blocks = ptrs,
166		double_blocks = (1 << (ptrs_bits * 2));
167	int n = 0;
168	int final = 0;
169
170	if (i_block < 0) {
171		ext2_msg(inode->i_sb, KERN_WARNING,
172			"warning: %s: block < 0", __func__);
173	} else if (i_block < direct_blocks) {
174		offsets[n++] = i_block;
175		final = direct_blocks;
176	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
177		offsets[n++] = EXT2_IND_BLOCK;
178		offsets[n++] = i_block;
179		final = ptrs;
180	} else if ((i_block -= indirect_blocks) < double_blocks) {
181		offsets[n++] = EXT2_DIND_BLOCK;
182		offsets[n++] = i_block >> ptrs_bits;
183		offsets[n++] = i_block & (ptrs - 1);
184		final = ptrs;
185	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
186		offsets[n++] = EXT2_TIND_BLOCK;
187		offsets[n++] = i_block >> (ptrs_bits * 2);
188		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
189		offsets[n++] = i_block & (ptrs - 1);
190		final = ptrs;
191	} else {
192		ext2_msg(inode->i_sb, KERN_WARNING,
193			"warning: %s: block is too big", __func__);
194	}
195	if (boundary)
196		*boundary = final - 1 - (i_block & (ptrs - 1));
197
198	return n;
199}
200
201/**
202 *	ext2_get_branch - read the chain of indirect blocks leading to data
203 *	@inode: inode in question
204 *	@depth: depth of the chain (1 - direct pointer, etc.)
205 *	@offsets: offsets of pointers in inode/indirect blocks
206 *	@chain: place to store the result
207 *	@err: here we store the error value
208 *
209 *	Function fills the array of triples <key, p, bh> and returns %NULL
210 *	if everything went OK or the pointer to the last filled triple
211 *	(incomplete one) otherwise. Upon the return chain[i].key contains
212 *	the number of (i+1)-th block in the chain (as it is stored in memory,
213 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
214 *	number (it points into struct inode for i==0 and into the bh->b_data
215 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
216 *	block for i>0 and NULL for i==0. In other words, it holds the block
217 *	numbers of the chain, addresses they were taken from (and where we can
218 *	verify that chain did not change) and buffer_heads hosting these
219 *	numbers.
220 *
221 *	Function stops when it stumbles upon zero pointer (absent block)
222 *		(pointer to last triple returned, *@err == 0)
223 *	or when it gets an IO error reading an indirect block
224 *		(ditto, *@err == -EIO)
225 *	or when it notices that chain had been changed while it was reading
226 *		(ditto, *@err == -EAGAIN)
227 *	or when it reads all @depth-1 indirect blocks successfully and finds
228 *	the whole chain, all way to the data (returns %NULL, *err == 0).
229 */
230static Indirect *ext2_get_branch(struct inode *inode,
231				 int depth,
232				 int *offsets,
233				 Indirect chain[4],
234				 int *err)
235{
236	struct super_block *sb = inode->i_sb;
237	Indirect *p = chain;
238	struct buffer_head *bh;
239
240	*err = 0;
241	/* i_data is not going away, no lock needed */
242	add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets);
243	if (!p->key)
244		goto no_block;
245	while (--depth) {
246		bh = sb_bread(sb, le32_to_cpu(p->key));
247		if (!bh)
248			goto failure;
249		read_lock(&EXT2_I(inode)->i_meta_lock);
250		if (!verify_chain(chain, p))
251			goto changed;
252		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
253		read_unlock(&EXT2_I(inode)->i_meta_lock);
254		if (!p->key)
255			goto no_block;
256	}
257	return NULL;
258
259changed:
260	read_unlock(&EXT2_I(inode)->i_meta_lock);
261	brelse(bh);
262	*err = -EAGAIN;
263	goto no_block;
264failure:
265	*err = -EIO;
266no_block:
267	return p;
268}
269
270/**
271 *	ext2_find_near - find a place for allocation with sufficient locality
272 *	@inode: owner
273 *	@ind: descriptor of indirect block.
274 *
275 *	This function returns the preferred place for block allocation.
276 *	It is used when heuristic for sequential allocation fails.
277 *	Rules are:
278 *	  + if there is a block to the left of our position - allocate near it.
279 *	  + if pointer will live in indirect block - allocate near that block.
280 *	  + if pointer will live in inode - allocate in the same cylinder group.
281 *
282 * In the latter case we colour the starting block by the callers PID to
283 * prevent it from clashing with concurrent allocations for a different inode
284 * in the same block group.   The PID is used here so that functionally related
285 * files will be close-by on-disk.
286 *
287 *	Caller must make sure that @ind is valid and will stay that way.
288 */
289
290static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
291{
292	struct ext2_inode_info *ei = EXT2_I(inode);
293	__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
294	__le32 *p;
295	ext2_fsblk_t bg_start;
296	ext2_fsblk_t colour;
297
298	/* Try to find previous block */
299	for (p = ind->p - 1; p >= start; p--)
300		if (*p)
301			return le32_to_cpu(*p);
302
303	/* No such thing, so let's try location of indirect block */
304	if (ind->bh)
305		return ind->bh->b_blocknr;
306
307	/*
308	 * It is going to be refered from inode itself? OK, just put it into
309	 * the same cylinder group then.
310	 */
311	bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
312	colour = (current->pid % 16) *
313			(EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
314	return bg_start + colour;
315}
316
317/**
318 *	ext2_find_goal - find a preferred place for allocation.
319 *	@inode: owner
320 *	@block:  block we want
321 *	@partial: pointer to the last triple within a chain
322 *
323 *	Returns preferred place for a block (the goal).
324 */
325
326static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
327					  Indirect *partial)
328{
329	struct ext2_block_alloc_info *block_i;
330
331	block_i = EXT2_I(inode)->i_block_alloc_info;
332
333	/*
334	 * try the heuristic for sequential allocation,
335	 * failing that at least try to get decent locality.
336	 */
337	if (block_i && (block == block_i->last_alloc_logical_block + 1)
338		&& (block_i->last_alloc_physical_block != 0)) {
339		return block_i->last_alloc_physical_block + 1;
340	}
341
342	return ext2_find_near(inode, partial);
343}
344
345/**
346 *	ext2_blks_to_allocate: Look up the block map and count the number
347 *	of direct blocks need to be allocated for the given branch.
348 *
349 * 	@branch: chain of indirect blocks
350 *	@k: number of blocks need for indirect blocks
351 *	@blks: number of data blocks to be mapped.
352 *	@blocks_to_boundary:  the offset in the indirect block
353 *
354 *	return the total number of blocks to be allocate, including the
355 *	direct and indirect blocks.
356 */
357static int
358ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
359		int blocks_to_boundary)
360{
361	unsigned long count = 0;
362
363	/*
364	 * Simple case, [t,d]Indirect block(s) has not allocated yet
365	 * then it's clear blocks on that path have not allocated
366	 */
367	if (k > 0) {
368		/* right now don't hanel cross boundary allocation */
369		if (blks < blocks_to_boundary + 1)
370			count += blks;
371		else
372			count += blocks_to_boundary + 1;
373		return count;
374	}
375
376	count++;
377	while (count < blks && count <= blocks_to_boundary
378		&& le32_to_cpu(*(branch[0].p + count)) == 0) {
379		count++;
380	}
381	return count;
382}
383
384/**
385 *	ext2_alloc_blocks: multiple allocate blocks needed for a branch
386 *	@indirect_blks: the number of blocks need to allocate for indirect
387 *			blocks
388 *
389 *	@new_blocks: on return it will store the new block numbers for
390 *	the indirect blocks(if needed) and the first direct block,
391 *	@blks:	on return it will store the total number of allocated
392 *		direct blocks
393 */
394static int ext2_alloc_blocks(struct inode *inode,
395			ext2_fsblk_t goal, int indirect_blks, int blks,
396			ext2_fsblk_t new_blocks[4], int *err)
397{
398	int target, i;
399	unsigned long count = 0;
400	int index = 0;
401	ext2_fsblk_t current_block = 0;
402	int ret = 0;
403
404	/*
405	 * Here we try to allocate the requested multiple blocks at once,
406	 * on a best-effort basis.
407	 * To build a branch, we should allocate blocks for
408	 * the indirect blocks(if not allocated yet), and at least
409	 * the first direct block of this branch.  That's the
410	 * minimum number of blocks need to allocate(required)
411	 */
412	target = blks + indirect_blks;
413
414	while (1) {
415		count = target;
416		/* allocating blocks for indirect blocks and direct blocks */
417		current_block = ext2_new_blocks(inode,goal,&count,err);
418		if (*err)
419			goto failed_out;
420
421		target -= count;
422		/* allocate blocks for indirect blocks */
423		while (index < indirect_blks && count) {
424			new_blocks[index++] = current_block++;
425			count--;
426		}
427
428		if (count > 0)
429			break;
430	}
431
432	/* save the new block number for the first direct block */
433	new_blocks[index] = current_block;
434
435	/* total number of blocks allocated for direct blocks */
436	ret = count;
437	*err = 0;
438	return ret;
439failed_out:
440	for (i = 0; i <index; i++)
441		ext2_free_blocks(inode, new_blocks[i], 1);
442	if (index)
443		mark_inode_dirty(inode);
444	return ret;
445}
446
447/**
448 *	ext2_alloc_branch - allocate and set up a chain of blocks.
449 *	@inode: owner
450 *	@num: depth of the chain (number of blocks to allocate)
451 *	@offsets: offsets (in the blocks) to store the pointers to next.
452 *	@branch: place to store the chain in.
453 *
454 *	This function allocates @num blocks, zeroes out all but the last one,
455 *	links them into chain and (if we are synchronous) writes them to disk.
456 *	In other words, it prepares a branch that can be spliced onto the
457 *	inode. It stores the information about that chain in the branch[], in
458 *	the same format as ext2_get_branch() would do. We are calling it after
459 *	we had read the existing part of chain and partial points to the last
460 *	triple of that (one with zero ->key). Upon the exit we have the same
461 *	picture as after the successful ext2_get_block(), excpet that in one
462 *	place chain is disconnected - *branch->p is still zero (we did not
463 *	set the last link), but branch->key contains the number that should
464 *	be placed into *branch->p to fill that gap.
465 *
466 *	If allocation fails we free all blocks we've allocated (and forget
467 *	their buffer_heads) and return the error value the from failed
468 *	ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain
469 *	as described above and return 0.
470 */
471
472static int ext2_alloc_branch(struct inode *inode,
473			int indirect_blks, int *blks, ext2_fsblk_t goal,
474			int *offsets, Indirect *branch)
475{
476	int blocksize = inode->i_sb->s_blocksize;
477	int i, n = 0;
478	int err = 0;
479	struct buffer_head *bh;
480	int num;
481	ext2_fsblk_t new_blocks[4];
482	ext2_fsblk_t current_block;
483
484	num = ext2_alloc_blocks(inode, goal, indirect_blks,
485				*blks, new_blocks, &err);
486	if (err)
487		return err;
488
489	branch[0].key = cpu_to_le32(new_blocks[0]);
490	/*
491	 * metadata blocks and data blocks are allocated.
492	 */
493	for (n = 1; n <= indirect_blks;  n++) {
494		/*
495		 * Get buffer_head for parent block, zero it out
496		 * and set the pointer to new one, then send
497		 * parent to disk.
498		 */
499		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
500		branch[n].bh = bh;
501		lock_buffer(bh);
502		memset(bh->b_data, 0, blocksize);
503		branch[n].p = (__le32 *) bh->b_data + offsets[n];
504		branch[n].key = cpu_to_le32(new_blocks[n]);
505		*branch[n].p = branch[n].key;
506		if ( n == indirect_blks) {
507			current_block = new_blocks[n];
508			/*
509			 * End of chain, update the last new metablock of
510			 * the chain to point to the new allocated
511			 * data blocks numbers
512			 */
513			for (i=1; i < num; i++)
514				*(branch[n].p + i) = cpu_to_le32(++current_block);
515		}
516		set_buffer_uptodate(bh);
517		unlock_buffer(bh);
518		mark_buffer_dirty_inode(bh, inode);
519		/* We used to sync bh here if IS_SYNC(inode).
520		 * But we now rely upon generic_write_sync()
521		 * and b_inode_buffers.  But not for directories.
522		 */
523		if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
524			sync_dirty_buffer(bh);
525	}
526	*blks = num;
527	return err;
528}
529
530/**
531 * ext2_splice_branch - splice the allocated branch onto inode.
532 * @inode: owner
533 * @block: (logical) number of block we are adding
534 * @where: location of missing link
535 * @num:   number of indirect blocks we are adding
536 * @blks:  number of direct blocks we are adding
537 *
538 * This function fills the missing link and does all housekeeping needed in
539 * inode (->i_blocks, etc.). In case of success we end up with the full
540 * chain to new block and return 0.
541 */
542static void ext2_splice_branch(struct inode *inode,
543			long block, Indirect *where, int num, int blks)
544{
545	int i;
546	struct ext2_block_alloc_info *block_i;
547	ext2_fsblk_t current_block;
548
549	block_i = EXT2_I(inode)->i_block_alloc_info;
550
551	/* That's it */
552
553	*where->p = where->key;
554
555	/*
556	 * Update the host buffer_head or inode to point to more just allocated
557	 * direct blocks blocks
558	 */
559	if (num == 0 && blks > 1) {
560		current_block = le32_to_cpu(where->key) + 1;
561		for (i = 1; i < blks; i++)
562			*(where->p + i ) = cpu_to_le32(current_block++);
563	}
564
565	/*
566	 * update the most recently allocated logical & physical block
567	 * in i_block_alloc_info, to assist find the proper goal block for next
568	 * allocation
569	 */
570	if (block_i) {
571		block_i->last_alloc_logical_block = block + blks - 1;
572		block_i->last_alloc_physical_block =
573				le32_to_cpu(where[num].key) + blks - 1;
574	}
575
576	/* We are done with atomic stuff, now do the rest of housekeeping */
577
578	/* had we spliced it onto indirect block? */
579	if (where->bh)
580		mark_buffer_dirty_inode(where->bh, inode);
581
582	inode->i_ctime = CURRENT_TIME_SEC;
583	mark_inode_dirty(inode);
584}
585
586/*
587 * Allocation strategy is simple: if we have to allocate something, we will
588 * have to go the whole way to leaf. So let's do it before attaching anything
589 * to tree, set linkage between the newborn blocks, write them if sync is
590 * required, recheck the path, free and repeat if check fails, otherwise
591 * set the last missing link (that will protect us from any truncate-generated
592 * removals - all blocks on the path are immune now) and possibly force the
593 * write on the parent block.
594 * That has a nice additional property: no special recovery from the failed
595 * allocations is needed - we simply release blocks and do not touch anything
596 * reachable from inode.
597 *
598 * `handle' can be NULL if create == 0.
599 *
600 * return > 0, # of blocks mapped or allocated.
601 * return = 0, if plain lookup failed.
602 * return < 0, error case.
603 */
604static int ext2_get_blocks(struct inode *inode,
605			   sector_t iblock, unsigned long maxblocks,
606			   struct buffer_head *bh_result,
607			   int create)
608{
609	int err = -EIO;
610	int offsets[4];
611	Indirect chain[4];
612	Indirect *partial;
613	ext2_fsblk_t goal;
614	int indirect_blks;
615	int blocks_to_boundary = 0;
616	int depth;
617	struct ext2_inode_info *ei = EXT2_I(inode);
618	int count = 0;
619	ext2_fsblk_t first_block = 0;
620
621	depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
622
623	if (depth == 0)
624		return (err);
625
626	partial = ext2_get_branch(inode, depth, offsets, chain, &err);
627	/* Simplest case - block found, no allocation needed */
628	if (!partial) {
629		first_block = le32_to_cpu(chain[depth - 1].key);
630		clear_buffer_new(bh_result); /* What's this do? */
631		count++;
632		/*map more blocks*/
633		while (count < maxblocks && count <= blocks_to_boundary) {
634			ext2_fsblk_t blk;
635
636			if (!verify_chain(chain, chain + depth - 1)) {
637				/*
638				 * Indirect block might be removed by
639				 * truncate while we were reading it.
640				 * Handling of that case: forget what we've
641				 * got now, go to reread.
642				 */
643				err = -EAGAIN;
644				count = 0;
645				break;
646			}
647			blk = le32_to_cpu(*(chain[depth-1].p + count));
648			if (blk == first_block + count)
649				count++;
650			else
651				break;
652		}
653		if (err != -EAGAIN)
654			goto got_it;
655	}
656
657	/* Next simple case - plain lookup or failed read of indirect block */
658	if (!create || err == -EIO)
659		goto cleanup;
660
661	mutex_lock(&ei->truncate_mutex);
662	/*
663	 * If the indirect block is missing while we are reading
664	 * the chain(ext3_get_branch() returns -EAGAIN err), or
665	 * if the chain has been changed after we grab the semaphore,
666	 * (either because another process truncated this branch, or
667	 * another get_block allocated this branch) re-grab the chain to see if
668	 * the request block has been allocated or not.
669	 *
670	 * Since we already block the truncate/other get_block
671	 * at this point, we will have the current copy of the chain when we
672	 * splice the branch into the tree.
673	 */
674	if (err == -EAGAIN || !verify_chain(chain, partial)) {
675		while (partial > chain) {
676			brelse(partial->bh);
677			partial--;
678		}
679		partial = ext2_get_branch(inode, depth, offsets, chain, &err);
680		if (!partial) {
681			count++;
682			mutex_unlock(&ei->truncate_mutex);
683			if (err)
684				goto cleanup;
685			clear_buffer_new(bh_result);
686			goto got_it;
687		}
688	}
689
690	/*
691	 * Okay, we need to do block allocation.  Lazily initialize the block
692	 * allocation info here if necessary
693	*/
694	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
695		ext2_init_block_alloc_info(inode);
696
697	goal = ext2_find_goal(inode, iblock, partial);
698
699	/* the number of blocks need to allocate for [d,t]indirect blocks */
700	indirect_blks = (chain + depth) - partial - 1;
701	/*
702	 * Next look up the indirect map to count the totoal number of
703	 * direct blocks to allocate for this branch.
704	 */
705	count = ext2_blks_to_allocate(partial, indirect_blks,
706					maxblocks, blocks_to_boundary);
707	err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
708				offsets + (partial - chain), partial);
709
710	if (err) {
711		mutex_unlock(&ei->truncate_mutex);
712		goto cleanup;
713	}
714
715	if (ext2_use_xip(inode->i_sb)) {
716		/*
717		 * we need to clear the block
718		 */
719		err = ext2_clear_xip_target (inode,
720			le32_to_cpu(chain[depth-1].key));
721		if (err) {
722			mutex_unlock(&ei->truncate_mutex);
723			goto cleanup;
724		}
725	}
726
727	ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
728	mutex_unlock(&ei->truncate_mutex);
729	set_buffer_new(bh_result);
730got_it:
731	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
732	if (count > blocks_to_boundary)
733		set_buffer_boundary(bh_result);
734	err = count;
735	/* Clean up and exit */
736	partial = chain + depth - 1;	/* the whole chain */
737cleanup:
738	while (partial > chain) {
739		brelse(partial->bh);
740		partial--;
741	}
742	return err;
743}
744
745int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
746{
747	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
748	int ret = ext2_get_blocks(inode, iblock, max_blocks,
749			      bh_result, create);
750	if (ret > 0) {
751		bh_result->b_size = (ret << inode->i_blkbits);
752		ret = 0;
753	}
754	return ret;
755
756}
757
758int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
759		u64 start, u64 len)
760{
761	return generic_block_fiemap(inode, fieinfo, start, len,
762				    ext2_get_block);
763}
764
765static int ext2_writepage(struct page *page, struct writeback_control *wbc)
766{
767	return block_write_full_page(page, ext2_get_block, wbc);
768}
769
770static int ext2_readpage(struct file *file, struct page *page)
771{
772	return mpage_readpage(page, ext2_get_block);
773}
774
775static int
776ext2_readpages(struct file *file, struct address_space *mapping,
777		struct list_head *pages, unsigned nr_pages)
778{
779	return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
780}
781
782static int
783ext2_write_begin(struct file *file, struct address_space *mapping,
784		loff_t pos, unsigned len, unsigned flags,
785		struct page **pagep, void **fsdata)
786{
787	int ret;
788
789	ret = block_write_begin(mapping, pos, len, flags, pagep,
790				ext2_get_block);
791	if (ret < 0)
792		ext2_write_failed(mapping, pos + len);
793	return ret;
794}
795
796static int ext2_write_end(struct file *file, struct address_space *mapping,
797			loff_t pos, unsigned len, unsigned copied,
798			struct page *page, void *fsdata)
799{
800	int ret;
801
802	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
803	if (ret < len)
804		ext2_write_failed(mapping, pos + len);
805	return ret;
806}
807
808static int
809ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
810		loff_t pos, unsigned len, unsigned flags,
811		struct page **pagep, void **fsdata)
812{
813	int ret;
814
815	ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
816			       ext2_get_block);
817	if (ret < 0)
818		ext2_write_failed(mapping, pos + len);
819	return ret;
820}
821
822static int ext2_nobh_writepage(struct page *page,
823			struct writeback_control *wbc)
824{
825	return nobh_writepage(page, ext2_get_block, wbc);
826}
827
828static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
829{
830	return generic_block_bmap(mapping,block,ext2_get_block);
831}
832
833static ssize_t
834ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
835			loff_t offset, unsigned long nr_segs)
836{
837	struct file *file = iocb->ki_filp;
838	struct address_space *mapping = file->f_mapping;
839	struct inode *inode = mapping->host;
840	ssize_t ret;
841
842	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
843				iov, offset, nr_segs, ext2_get_block, NULL);
844	if (ret < 0 && (rw & WRITE))
845		ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
846	return ret;
847}
848
849static int
850ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
851{
852	return mpage_writepages(mapping, wbc, ext2_get_block);
853}
854
855const struct address_space_operations ext2_aops = {
856	.readpage		= ext2_readpage,
857	.readpages		= ext2_readpages,
858	.writepage		= ext2_writepage,
859	.sync_page		= block_sync_page,
860	.write_begin		= ext2_write_begin,
861	.write_end		= ext2_write_end,
862	.bmap			= ext2_bmap,
863	.direct_IO		= ext2_direct_IO,
864	.writepages		= ext2_writepages,
865	.migratepage		= buffer_migrate_page,
866	.is_partially_uptodate	= block_is_partially_uptodate,
867	.error_remove_page	= generic_error_remove_page,
868};
869
870const struct address_space_operations ext2_aops_xip = {
871	.bmap			= ext2_bmap,
872	.get_xip_mem		= ext2_get_xip_mem,
873};
874
875const struct address_space_operations ext2_nobh_aops = {
876	.readpage		= ext2_readpage,
877	.readpages		= ext2_readpages,
878	.writepage		= ext2_nobh_writepage,
879	.sync_page		= block_sync_page,
880	.write_begin		= ext2_nobh_write_begin,
881	.write_end		= nobh_write_end,
882	.bmap			= ext2_bmap,
883	.direct_IO		= ext2_direct_IO,
884	.writepages		= ext2_writepages,
885	.migratepage		= buffer_migrate_page,
886	.error_remove_page	= generic_error_remove_page,
887};
888
889/*
890 * Probably it should be a library function... search for first non-zero word
891 * or memcmp with zero_page, whatever is better for particular architecture.
892 * Linus?
893 */
894static inline int all_zeroes(__le32 *p, __le32 *q)
895{
896	while (p < q)
897		if (*p++)
898			return 0;
899	return 1;
900}
901
902/**
903 *	ext2_find_shared - find the indirect blocks for partial truncation.
904 *	@inode:	  inode in question
905 *	@depth:	  depth of the affected branch
906 *	@offsets: offsets of pointers in that branch (see ext2_block_to_path)
907 *	@chain:	  place to store the pointers to partial indirect blocks
908 *	@top:	  place to the (detached) top of branch
909 *
910 *	This is a helper function used by ext2_truncate().
911 *
912 *	When we do truncate() we may have to clean the ends of several indirect
913 *	blocks but leave the blocks themselves alive. Block is partially
914 *	truncated if some data below the new i_size is refered from it (and
915 *	it is on the path to the first completely truncated data block, indeed).
916 *	We have to free the top of that path along with everything to the right
917 *	of the path. Since no allocation past the truncation point is possible
918 *	until ext2_truncate() finishes, we may safely do the latter, but top
919 *	of branch may require special attention - pageout below the truncation
920 *	point might try to populate it.
921 *
922 *	We atomically detach the top of branch from the tree, store the block
923 *	number of its root in *@top, pointers to buffer_heads of partially
924 *	truncated blocks - in @chain[].bh and pointers to their last elements
925 *	that should not be removed - in @chain[].p. Return value is the pointer
926 *	to last filled element of @chain.
927 *
928 *	The work left to caller to do the actual freeing of subtrees:
929 *		a) free the subtree starting from *@top
930 *		b) free the subtrees whose roots are stored in
931 *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
932 *		c) free the subtrees growing from the inode past the @chain[0].p
933 *			(no partially truncated stuff there).
934 */
935
936static Indirect *ext2_find_shared(struct inode *inode,
937				int depth,
938				int offsets[4],
939				Indirect chain[4],
940				__le32 *top)
941{
942	Indirect *partial, *p;
943	int k, err;
944
945	*top = 0;
946	for (k = depth; k > 1 && !offsets[k-1]; k--)
947		;
948	partial = ext2_get_branch(inode, k, offsets, chain, &err);
949	if (!partial)
950		partial = chain + k-1;
951	/*
952	 * If the branch acquired continuation since we've looked at it -
953	 * fine, it should all survive and (new) top doesn't belong to us.
954	 */
955	write_lock(&EXT2_I(inode)->i_meta_lock);
956	if (!partial->key && *partial->p) {
957		write_unlock(&EXT2_I(inode)->i_meta_lock);
958		goto no_top;
959	}
960	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
961		;
962	/*
963	 * OK, we've found the last block that must survive. The rest of our
964	 * branch should be detached before unlocking. However, if that rest
965	 * of branch is all ours and does not grow immediately from the inode
966	 * it's easier to cheat and just decrement partial->p.
967	 */
968	if (p == chain + k - 1 && p > chain) {
969		p->p--;
970	} else {
971		*top = *p->p;
972		*p->p = 0;
973	}
974	write_unlock(&EXT2_I(inode)->i_meta_lock);
975
976	while(partial > p)
977	{
978		brelse(partial->bh);
979		partial--;
980	}
981no_top:
982	return partial;
983}
984
985/**
986 *	ext2_free_data - free a list of data blocks
987 *	@inode:	inode we are dealing with
988 *	@p:	array of block numbers
989 *	@q:	points immediately past the end of array
990 *
991 *	We are freeing all blocks refered from that array (numbers are
992 *	stored as little-endian 32-bit) and updating @inode->i_blocks
993 *	appropriately.
994 */
995static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
996{
997	unsigned long block_to_free = 0, count = 0;
998	unsigned long nr;
999
1000	for ( ; p < q ; p++) {
1001		nr = le32_to_cpu(*p);
1002		if (nr) {
1003			*p = 0;
1004			/* accumulate blocks to free if they're contiguous */
1005			if (count == 0)
1006				goto free_this;
1007			else if (block_to_free == nr - count)
1008				count++;
1009			else {
1010				ext2_free_blocks (inode, block_to_free, count);
1011				mark_inode_dirty(inode);
1012			free_this:
1013				block_to_free = nr;
1014				count = 1;
1015			}
1016		}
1017	}
1018	if (count > 0) {
1019		ext2_free_blocks (inode, block_to_free, count);
1020		mark_inode_dirty(inode);
1021	}
1022}
1023
1024/**
1025 *	ext2_free_branches - free an array of branches
1026 *	@inode:	inode we are dealing with
1027 *	@p:	array of block numbers
1028 *	@q:	pointer immediately past the end of array
1029 *	@depth:	depth of the branches to free
1030 *
1031 *	We are freeing all blocks refered from these branches (numbers are
1032 *	stored as little-endian 32-bit) and updating @inode->i_blocks
1033 *	appropriately.
1034 */
1035static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
1036{
1037	struct buffer_head * bh;
1038	unsigned long nr;
1039
1040	if (depth--) {
1041		int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1042		for ( ; p < q ; p++) {
1043			nr = le32_to_cpu(*p);
1044			if (!nr)
1045				continue;
1046			*p = 0;
1047			bh = sb_bread(inode->i_sb, nr);
1048			/*
1049			 * A read failure? Report error and clear slot
1050			 * (should be rare).
1051			 */
1052			if (!bh) {
1053				ext2_error(inode->i_sb, "ext2_free_branches",
1054					"Read failure, inode=%ld, block=%ld",
1055					inode->i_ino, nr);
1056				continue;
1057			}
1058			ext2_free_branches(inode,
1059					   (__le32*)bh->b_data,
1060					   (__le32*)bh->b_data + addr_per_block,
1061					   depth);
1062			bforget(bh);
1063			ext2_free_blocks(inode, nr, 1);
1064			mark_inode_dirty(inode);
1065		}
1066	} else
1067		ext2_free_data(inode, p, q);
1068}
1069
1070static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1071{
1072	__le32 *i_data = EXT2_I(inode)->i_data;
1073	struct ext2_inode_info *ei = EXT2_I(inode);
1074	int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
1075	int offsets[4];
1076	Indirect chain[4];
1077	Indirect *partial;
1078	__le32 nr = 0;
1079	int n;
1080	long iblock;
1081	unsigned blocksize;
1082	blocksize = inode->i_sb->s_blocksize;
1083	iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1084
1085	n = ext2_block_to_path(inode, iblock, offsets, NULL);
1086	if (n == 0)
1087		return;
1088
1089	/*
1090	 * From here we block out all ext2_get_block() callers who want to
1091	 * modify the block allocation tree.
1092	 */
1093	mutex_lock(&ei->truncate_mutex);
1094
1095	if (n == 1) {
1096		ext2_free_data(inode, i_data+offsets[0],
1097					i_data + EXT2_NDIR_BLOCKS);
1098		goto do_indirects;
1099	}
1100
1101	partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1102	/* Kill the top of shared branch (already detached) */
1103	if (nr) {
1104		if (partial == chain)
1105			mark_inode_dirty(inode);
1106		else
1107			mark_buffer_dirty_inode(partial->bh, inode);
1108		ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1109	}
1110	/* Clear the ends of indirect blocks on the shared branch */
1111	while (partial > chain) {
1112		ext2_free_branches(inode,
1113				   partial->p + 1,
1114				   (__le32*)partial->bh->b_data+addr_per_block,
1115				   (chain+n-1) - partial);
1116		mark_buffer_dirty_inode(partial->bh, inode);
1117		brelse (partial->bh);
1118		partial--;
1119	}
1120do_indirects:
1121	/* Kill the remaining (whole) subtrees */
1122	switch (offsets[0]) {
1123		default:
1124			nr = i_data[EXT2_IND_BLOCK];
1125			if (nr) {
1126				i_data[EXT2_IND_BLOCK] = 0;
1127				mark_inode_dirty(inode);
1128				ext2_free_branches(inode, &nr, &nr+1, 1);
1129			}
1130		case EXT2_IND_BLOCK:
1131			nr = i_data[EXT2_DIND_BLOCK];
1132			if (nr) {
1133				i_data[EXT2_DIND_BLOCK] = 0;
1134				mark_inode_dirty(inode);
1135				ext2_free_branches(inode, &nr, &nr+1, 2);
1136			}
1137		case EXT2_DIND_BLOCK:
1138			nr = i_data[EXT2_TIND_BLOCK];
1139			if (nr) {
1140				i_data[EXT2_TIND_BLOCK] = 0;
1141				mark_inode_dirty(inode);
1142				ext2_free_branches(inode, &nr, &nr+1, 3);
1143			}
1144		case EXT2_TIND_BLOCK:
1145			;
1146	}
1147
1148	ext2_discard_reservation(inode);
1149
1150	mutex_unlock(&ei->truncate_mutex);
1151}
1152
1153static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1154{
1155	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1156	    S_ISLNK(inode->i_mode)))
1157		return;
1158	if (ext2_inode_is_fast_symlink(inode))
1159		return;
1160	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1161		return;
1162	__ext2_truncate_blocks(inode, offset);
1163}
1164
1165static int ext2_setsize(struct inode *inode, loff_t newsize)
1166{
1167	int error;
1168
1169	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1170	    S_ISLNK(inode->i_mode)))
1171		return -EINVAL;
1172	if (ext2_inode_is_fast_symlink(inode))
1173		return -EINVAL;
1174	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1175		return -EPERM;
1176
1177	if (mapping_is_xip(inode->i_mapping))
1178		error = xip_truncate_page(inode->i_mapping, newsize);
1179	else if (test_opt(inode->i_sb, NOBH))
1180		error = nobh_truncate_page(inode->i_mapping,
1181				newsize, ext2_get_block);
1182	else
1183		error = block_truncate_page(inode->i_mapping,
1184				newsize, ext2_get_block);
1185	if (error)
1186		return error;
1187
1188	truncate_setsize(inode, newsize);
1189	__ext2_truncate_blocks(inode, newsize);
1190
1191	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1192	if (inode_needs_sync(inode)) {
1193		sync_mapping_buffers(inode->i_mapping);
1194		ext2_sync_inode (inode);
1195	} else {
1196		mark_inode_dirty(inode);
1197	}
1198
1199	return 0;
1200}
1201
1202static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
1203					struct buffer_head **p)
1204{
1205	struct buffer_head * bh;
1206	unsigned long block_group;
1207	unsigned long block;
1208	unsigned long offset;
1209	struct ext2_group_desc * gdp;
1210
1211	*p = NULL;
1212	if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) ||
1213	    ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1214		goto Einval;
1215
1216	block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
1217	gdp = ext2_get_group_desc(sb, block_group, NULL);
1218	if (!gdp)
1219		goto Egdp;
1220	/*
1221	 * Figure out the offset within the block group inode table
1222	 */
1223	offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb);
1224	block = le32_to_cpu(gdp->bg_inode_table) +
1225		(offset >> EXT2_BLOCK_SIZE_BITS(sb));
1226	if (!(bh = sb_bread(sb, block)))
1227		goto Eio;
1228
1229	*p = bh;
1230	offset &= (EXT2_BLOCK_SIZE(sb) - 1);
1231	return (struct ext2_inode *) (bh->b_data + offset);
1232
1233Einval:
1234	ext2_error(sb, "ext2_get_inode", "bad inode number: %lu",
1235		   (unsigned long) ino);
1236	return ERR_PTR(-EINVAL);
1237Eio:
1238	ext2_error(sb, "ext2_get_inode",
1239		   "unable to read inode block - inode=%lu, block=%lu",
1240		   (unsigned long) ino, block);
1241Egdp:
1242	return ERR_PTR(-EIO);
1243}
1244
1245void ext2_set_inode_flags(struct inode *inode)
1246{
1247	unsigned int flags = EXT2_I(inode)->i_flags;
1248
1249	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
1250	if (flags & EXT2_SYNC_FL)
1251		inode->i_flags |= S_SYNC;
1252	if (flags & EXT2_APPEND_FL)
1253		inode->i_flags |= S_APPEND;
1254	if (flags & EXT2_IMMUTABLE_FL)
1255		inode->i_flags |= S_IMMUTABLE;
1256	if (flags & EXT2_NOATIME_FL)
1257		inode->i_flags |= S_NOATIME;
1258	if (flags & EXT2_DIRSYNC_FL)
1259		inode->i_flags |= S_DIRSYNC;
1260}
1261
1262/* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
1263void ext2_get_inode_flags(struct ext2_inode_info *ei)
1264{
1265	unsigned int flags = ei->vfs_inode.i_flags;
1266
1267	ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
1268			EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
1269	if (flags & S_SYNC)
1270		ei->i_flags |= EXT2_SYNC_FL;
1271	if (flags & S_APPEND)
1272		ei->i_flags |= EXT2_APPEND_FL;
1273	if (flags & S_IMMUTABLE)
1274		ei->i_flags |= EXT2_IMMUTABLE_FL;
1275	if (flags & S_NOATIME)
1276		ei->i_flags |= EXT2_NOATIME_FL;
1277	if (flags & S_DIRSYNC)
1278		ei->i_flags |= EXT2_DIRSYNC_FL;
1279}
1280
1281struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1282{
1283	struct ext2_inode_info *ei;
1284	struct buffer_head * bh;
1285	struct ext2_inode *raw_inode;
1286	struct inode *inode;
1287	long ret = -EIO;
1288	int n;
1289
1290	inode = iget_locked(sb, ino);
1291	if (!inode)
1292		return ERR_PTR(-ENOMEM);
1293	if (!(inode->i_state & I_NEW))
1294		return inode;
1295
1296	ei = EXT2_I(inode);
1297	ei->i_block_alloc_info = NULL;
1298
1299	raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
1300	if (IS_ERR(raw_inode)) {
1301		ret = PTR_ERR(raw_inode);
1302 		goto bad_inode;
1303	}
1304
1305	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
1306	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
1307	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
1308	if (!(test_opt (inode->i_sb, NO_UID32))) {
1309		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
1310		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
1311	}
1312	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
1313	inode->i_size = le32_to_cpu(raw_inode->i_size);
1314	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
1315	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
1316	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
1317	inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1318	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
1319	/* We now have enough fields to check if the inode was active or not.
1320	 * This is needed because nfsd might try to access dead inodes
1321	 * the test is that same one that e2fsck uses
1322	 * NeilBrown 1999oct15
1323	 */
1324	if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
1325		/* this inode is deleted */
1326		brelse (bh);
1327		ret = -ESTALE;
1328		goto bad_inode;
1329	}
1330	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1331	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1332	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1333	ei->i_frag_no = raw_inode->i_frag;
1334	ei->i_frag_size = raw_inode->i_fsize;
1335	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
1336	ei->i_dir_acl = 0;
1337	if (S_ISREG(inode->i_mode))
1338		inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
1339	else
1340		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
1341	ei->i_dtime = 0;
1342	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1343	ei->i_state = 0;
1344	ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
1345	ei->i_dir_start_lookup = 0;
1346
1347	/*
1348	 * NOTE! The in-memory inode i_data array is in little-endian order
1349	 * even on big-endian machines: we do NOT byteswap the block numbers!
1350	 */
1351	for (n = 0; n < EXT2_N_BLOCKS; n++)
1352		ei->i_data[n] = raw_inode->i_block[n];
1353
1354	if (S_ISREG(inode->i_mode)) {
1355		inode->i_op = &ext2_file_inode_operations;
1356		if (ext2_use_xip(inode->i_sb)) {
1357			inode->i_mapping->a_ops = &ext2_aops_xip;
1358			inode->i_fop = &ext2_xip_file_operations;
1359		} else if (test_opt(inode->i_sb, NOBH)) {
1360			inode->i_mapping->a_ops = &ext2_nobh_aops;
1361			inode->i_fop = &ext2_file_operations;
1362		} else {
1363			inode->i_mapping->a_ops = &ext2_aops;
1364			inode->i_fop = &ext2_file_operations;
1365		}
1366	} else if (S_ISDIR(inode->i_mode)) {
1367		inode->i_op = &ext2_dir_inode_operations;
1368		inode->i_fop = &ext2_dir_operations;
1369		if (test_opt(inode->i_sb, NOBH))
1370			inode->i_mapping->a_ops = &ext2_nobh_aops;
1371		else
1372			inode->i_mapping->a_ops = &ext2_aops;
1373	} else if (S_ISLNK(inode->i_mode)) {
1374		if (ext2_inode_is_fast_symlink(inode)) {
1375			inode->i_op = &ext2_fast_symlink_inode_operations;
1376			nd_terminate_link(ei->i_data, inode->i_size,
1377				sizeof(ei->i_data) - 1);
1378		} else {
1379			inode->i_op = &ext2_symlink_inode_operations;
1380			if (test_opt(inode->i_sb, NOBH))
1381				inode->i_mapping->a_ops = &ext2_nobh_aops;
1382			else
1383				inode->i_mapping->a_ops = &ext2_aops;
1384		}
1385	} else {
1386		inode->i_op = &ext2_special_inode_operations;
1387		if (raw_inode->i_block[0])
1388			init_special_inode(inode, inode->i_mode,
1389			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
1390		else
1391			init_special_inode(inode, inode->i_mode,
1392			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1393	}
1394	brelse (bh);
1395	ext2_set_inode_flags(inode);
1396	unlock_new_inode(inode);
1397	return inode;
1398
1399bad_inode:
1400	iget_failed(inode);
1401	return ERR_PTR(ret);
1402}
1403
1404static int __ext2_write_inode(struct inode *inode, int do_sync)
1405{
1406	struct ext2_inode_info *ei = EXT2_I(inode);
1407	struct super_block *sb = inode->i_sb;
1408	ino_t ino = inode->i_ino;
1409	uid_t uid = inode->i_uid;
1410	gid_t gid = inode->i_gid;
1411	struct buffer_head * bh;
1412	struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1413	int n;
1414	int err = 0;
1415
1416	if (IS_ERR(raw_inode))
1417 		return -EIO;
1418
1419	/* For fields not not tracking in the in-memory inode,
1420	 * initialise them to zero for new inodes. */
1421	if (ei->i_state & EXT2_STATE_NEW)
1422		memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1423
1424	ext2_get_inode_flags(ei);
1425	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
1426	if (!(test_opt(sb, NO_UID32))) {
1427		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
1428		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
1429/*
1430 * Fix up interoperability with old kernels. Otherwise, old inodes get
1431 * re-used with the upper 16 bits of the uid/gid intact
1432 */
1433		if (!ei->i_dtime) {
1434			raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid));
1435			raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid));
1436		} else {
1437			raw_inode->i_uid_high = 0;
1438			raw_inode->i_gid_high = 0;
1439		}
1440	} else {
1441		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid));
1442		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid));
1443		raw_inode->i_uid_high = 0;
1444		raw_inode->i_gid_high = 0;
1445	}
1446	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
1447	raw_inode->i_size = cpu_to_le32(inode->i_size);
1448	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1449	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1450	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1451
1452	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
1453	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
1454	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
1455	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
1456	raw_inode->i_frag = ei->i_frag_no;
1457	raw_inode->i_fsize = ei->i_frag_size;
1458	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
1459	if (!S_ISREG(inode->i_mode))
1460		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
1461	else {
1462		raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32);
1463		if (inode->i_size > 0x7fffffffULL) {
1464			if (!EXT2_HAS_RO_COMPAT_FEATURE(sb,
1465					EXT2_FEATURE_RO_COMPAT_LARGE_FILE) ||
1466			    EXT2_SB(sb)->s_es->s_rev_level ==
1467					cpu_to_le32(EXT2_GOOD_OLD_REV)) {
1468			       /* If this is the first large file
1469				* created, add a flag to the superblock.
1470				*/
1471				spin_lock(&EXT2_SB(sb)->s_lock);
1472				ext2_update_dynamic_rev(sb);
1473				EXT2_SET_RO_COMPAT_FEATURE(sb,
1474					EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1475				spin_unlock(&EXT2_SB(sb)->s_lock);
1476				ext2_write_super(sb);
1477			}
1478		}
1479	}
1480
1481	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
1482	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1483		if (old_valid_dev(inode->i_rdev)) {
1484			raw_inode->i_block[0] =
1485				cpu_to_le32(old_encode_dev(inode->i_rdev));
1486			raw_inode->i_block[1] = 0;
1487		} else {
1488			raw_inode->i_block[0] = 0;
1489			raw_inode->i_block[1] =
1490				cpu_to_le32(new_encode_dev(inode->i_rdev));
1491			raw_inode->i_block[2] = 0;
1492		}
1493	} else for (n = 0; n < EXT2_N_BLOCKS; n++)
1494		raw_inode->i_block[n] = ei->i_data[n];
1495	mark_buffer_dirty(bh);
1496	if (do_sync) {
1497		sync_dirty_buffer(bh);
1498		if (buffer_req(bh) && !buffer_uptodate(bh)) {
1499			printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1500				sb->s_id, (unsigned long) ino);
1501			err = -EIO;
1502		}
1503	}
1504	ei->i_state &= ~EXT2_STATE_NEW;
1505	brelse (bh);
1506	return err;
1507}
1508
1509int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
1510{
1511	return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1512}
1513
1514int ext2_sync_inode(struct inode *inode)
1515{
1516	struct writeback_control wbc = {
1517		.sync_mode = WB_SYNC_ALL,
1518		.nr_to_write = 0,	/* sys_fsync did this */
1519	};
1520	return sync_inode(inode, &wbc);
1521}
1522
1523int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1524{
1525	struct inode *inode = dentry->d_inode;
1526	int error;
1527
1528	error = inode_change_ok(inode, iattr);
1529	if (error)
1530		return error;
1531
1532	if (is_quota_modification(inode, iattr))
1533		dquot_initialize(inode);
1534	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1535	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
1536		error = dquot_transfer(inode, iattr);
1537		if (error)
1538			return error;
1539	}
1540	if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
1541		error = ext2_setsize(inode, iattr->ia_size);
1542		if (error)
1543			return error;
1544	}
1545	setattr_copy(inode, iattr);
1546	if (iattr->ia_valid & ATTR_MODE)
1547		error = ext2_acl_chmod(inode);
1548	mark_inode_dirty(inode);
1549
1550	return error;
1551}
1552