• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/hfsplus_journal/
1/*
2 *  linux/fs/hfsplus/extents.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handling of Extents both in catalog and extents overflow trees
9 */
10
11#include <linux/errno.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
14
15#include "hfsplus_fs.h"
16#include "hfsplus_raw.h"
17
18/* Compare two extents keys, returns 0 on same, pos/neg for difference */
19int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
20			const hfsplus_btree_key *k2)
21{
22	__be32 k1id, k2id;
23	__be32 k1s, k2s;
24
25	k1id = k1->ext.cnid;
26	k2id = k2->ext.cnid;
27	if (k1id != k2id)
28		return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
29
30	if (k1->ext.fork_type != k2->ext.fork_type)
31		return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
32
33	k1s = k1->ext.start_block;
34	k2s = k2->ext.start_block;
35	if (k1s == k2s)
36		return 0;
37	return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
38}
39
40static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
41				  u32 block, u8 type)
42{
43	key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
44	key->ext.cnid = cpu_to_be32(cnid);
45	key->ext.start_block = cpu_to_be32(block);
46	key->ext.fork_type = type;
47	key->ext.pad = 0;
48}
49
50static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
51{
52	int i;
53	u32 count;
54
55	for (i = 0; i < 8; ext++, i++) {
56		count = be32_to_cpu(ext->block_count);
57		if (off < count)
58			return be32_to_cpu(ext->start_block) + off;
59		off -= count;
60	}
61	/* panic? */
62	return 0;
63}
64
65static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
66{
67	int i;
68	u32 count = 0;
69
70	for (i = 0; i < 8; ext++, i++)
71		count += be32_to_cpu(ext->block_count);
72	return count;
73}
74
75static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
76{
77	int i;
78
79	ext += 7;
80	for (i = 0; i < 7; ext--, i++)
81		if (ext->block_count)
82			break;
83	return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
84}
85
86static int __hfsplus_ext_write_extent(hfsplus_handle_t *hfsplus_handle, struct inode *inode, struct hfs_find_data *fd)
87{
88	int res;
89
90	WARN_ON(!mutex_is_locked(&HFSPLUS_I(inode).extents_lock));
91	hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start,
92			      HFSPLUS_IS_RSRC(inode) ?  HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
93	res = hfs_brec_find(hfsplus_handle, fd);
94	if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) {
95		if (res != -ENOENT)
96			return res;
97		hfs_brec_insert(hfsplus_handle, fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec));
98		HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
99	} else {
100		if (res)
101			return res;
102		hfs_bnode_write(hfsplus_handle, fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength);
103		HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY;
104	}
105
106	/*
107	 * We can't just use hfsplus_mark_inode_dirty here, because we
108	 * also get called from hfsplus_write_inode, which should not
109	 * redirty the inode.  Instead the callers have to be careful
110	 * to explicily mark the inode dirty, too.
111	 */
112    HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
113
114	return 0;
115}
116
117static int hfsplus_ext_write_extent_locked(hfsplus_handle_t *hfsplus_handle,struct inode *inode)
118{
119	int res = 0;
120
121	if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) {
122		struct hfs_find_data fd;
123
124		res = hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
125		if (res)
126			return res;
127		res = __hfsplus_ext_write_extent(hfsplus_handle,inode, &fd);
128		hfs_find_exit(hfsplus_handle, &fd);
129	}
130	return res;
131}
132
133int hfsplus_ext_write_extent(hfsplus_handle_t *hfsplus_handle,struct inode *inode)
134{
135	int res;
136
137	mutex_lock(&HFSPLUS_I(inode).extents_lock);
138	res = hfsplus_ext_write_extent_locked(hfsplus_handle,inode);
139	mutex_unlock(&HFSPLUS_I(inode).extents_lock);
140    return res;
141}
142static inline int __hfsplus_ext_read_extent(hfsplus_handle_t *hfsplus_handle, struct hfs_find_data *fd,
143					    struct hfsplus_extent *extent,
144					    u32 cnid, u32 block, u8 type)
145{
146	int res;
147
148	hfsplus_ext_build_key(fd->search_key, cnid, block, type);
149	fd->key->ext.cnid = 0;
150	res = hfs_brec_find(hfsplus_handle, fd);
151	if (res && res != -ENOENT)
152		return res;
153	if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
154	    fd->key->ext.fork_type != fd->search_key->ext.fork_type)
155		return -ENOENT;
156	if (fd->entrylength != sizeof(hfsplus_extent_rec))
157		return -EIO;
158	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec));
159	return 0;
160}
161
162static inline int __hfsplus_ext_cache_extent(hfsplus_handle_t *hfsplus_handle, struct hfs_find_data *fd, struct inode *inode, u32 block)
163{
164	int res;
165
166	WARN_ON(!mutex_is_locked(&HFSPLUS_I(inode).extents_lock));
167	if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) {
168		res = __hfsplus_ext_write_extent(hfsplus_handle, inode, fd);
169		if (res)
170			return res;
171	}
172
173	res = __hfsplus_ext_read_extent(hfsplus_handle, fd, HFSPLUS_I(inode).cached_extents, inode->i_ino,
174					block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
175	if (!res) {
176		HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block);
177		HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents);
178	} else {
179		HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
180		HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
181	}
182	return res;
183}
184
185static int hfsplus_ext_read_extent(hfsplus_handle_t *hfsplus_handle, struct inode *inode, u32 block)
186{
187	struct hfs_find_data fd;
188	int res;
189
190	if (block >= HFSPLUS_I(inode).cached_start &&
191	    block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks)
192		return 0;
193
194	res = hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
195	if (!res) {
196	res = __hfsplus_ext_cache_extent(hfsplus_handle, &fd, inode, block);
197	hfs_find_exit(hfsplus_handle, &fd);
198	}
199	return res;
200}
201
202/* Get a block at iblock for inode, possibly allocating if create */
203int hfsplus_get_block(struct inode *inode, sector_t iblock,
204		      struct buffer_head *bh_result, int create)
205{
206	struct super_block *sb;
207	int res = -EIO;
208	u32 ablock, dblock, mask;
209	int was_dirty = 0;
210	int shift;
211	hfsplus_handle_t *hfsplus_handle, tmp_hfsplus_handle;
212
213	tmp_hfsplus_handle.journaled = !HFSPLUS_JOURNAL_PRESENT;
214	tmp_hfsplus_handle.handle = NULL;
215
216	sb = inode->i_sb;
217
218	/* Journal device */
219	if (HFSPLUS_SB(sb).jnl.journaled == HFSPLUS_JOURNAL_PRESENT) {
220		/* Write Metadata */
221		if (((inode->i_mapping->a_ops == &hfsplus_journalled_btree_aops) ||
222			(inode->i_mapping->a_ops == &hfsplus_journalled_aops)) && create) {
223			hfsplus_handle = hfsplus_jbd_current_handle();
224			if (hfsplus_handle == NULL) {
225				printk("hfsplus_handle is NULL\n");
226				hfsplus_handle = &tmp_hfsplus_handle;
227			}
228		}
229		else {
230			hfsplus_handle = &tmp_hfsplus_handle;
231		}
232	}
233	/* Non-journal device */
234	else {
235		hfsplus_handle = &tmp_hfsplus_handle;
236	}
237
238	/* Convert inode block to disk allocation block */
239	shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits;
240	ablock = iblock >> HFSPLUS_SB(sb).fs_shift;
241
242	if (iblock >= HFSPLUS_I(inode).fs_blocks) {
243		if (iblock > HFSPLUS_I(inode).fs_blocks || !create) {
244			return -EIO;
245		}
246		if (ablock >= HFSPLUS_I(inode).alloc_blocks) {
247			res = hfsplus_file_extend(hfsplus_handle, inode);
248			if (res)
249				return res;
250		}
251	} else
252		create = 0;
253
254	if (ablock < HFSPLUS_I(inode).first_blocks) {
255		dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock);
256		goto done;
257	}
258
259	if (inode->i_ino == HFSPLUS_EXT_CNID)
260		return -EIO;
261
262	mutex_lock(&HFSPLUS_I(inode).extents_lock);
263	was_dirty = (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY);
264	res = hfsplus_ext_read_extent(hfsplus_handle, inode, ablock);
265	if (res) {
266		mutex_unlock(&HFSPLUS_I(inode).extents_lock);
267		return -EIO;
268	}
269	dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents,
270					ablock - HFSPLUS_I(inode).cached_start);
271	mutex_unlock(&HFSPLUS_I(inode).extents_lock);
272
273done:
274	dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
275	mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1;
276	map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask));
277	if (create) {
278		set_buffer_new(bh_result);
279		HFSPLUS_I(inode).phys_size += sb->s_blocksize;
280		HFSPLUS_I(inode).fs_blocks++;
281		inode_add_bytes(inode, sb->s_blocksize);
282		if (hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode)) {
283			printk("HFS+-fs: Error in %s()\n", __FUNCTION__);
284			return -1;
285		}
286	}
287	if (create || was_dirty)
288		mark_inode_dirty(inode);
289	return 0;
290}
291
292static void hfsplus_dump_extent(struct hfsplus_extent *extent)
293{
294	int i;
295
296	dprint(DBG_EXTENT, "   ");
297	for (i = 0; i < 8; i++)
298		dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block),
299				 be32_to_cpu(extent[i].block_count));
300	dprint(DBG_EXTENT, "\n");
301}
302
303static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
304			      u32 alloc_block, u32 block_count)
305{
306	u32 count, start;
307	int i;
308
309	hfsplus_dump_extent(extent);
310	for (i = 0; i < 8; extent++, i++) {
311		count = be32_to_cpu(extent->block_count);
312		if (offset == count) {
313			start = be32_to_cpu(extent->start_block);
314			if (alloc_block != start + count) {
315				if (++i >= 8)
316					return -ENOSPC;
317				extent++;
318				extent->start_block = cpu_to_be32(alloc_block);
319			} else
320				block_count += count;
321			extent->block_count = cpu_to_be32(block_count);
322			return 0;
323		} else if (offset < count)
324			break;
325		offset -= count;
326	}
327	/* panic? */
328	return -EIO;
329}
330
331static int hfsplus_free_extents(hfsplus_handle_t *hfsplus_handle, struct super_block *sb,
332				struct hfsplus_extent *extent,
333				u32 offset, u32 block_nr)
334{
335	u32 count, start;
336	int i;
337	int err = 0;
338
339	hfsplus_dump_extent(extent);
340	for (i = 0; i < 8; extent++, i++) {
341		count = be32_to_cpu(extent->block_count);
342		if (offset == count)
343			goto found;
344		else if (offset < count)
345			break;
346		offset -= count;
347	}
348	/* panic? */
349	return -EIO;
350found:
351	for (;;) {
352		start = be32_to_cpu(extent->start_block);
353		if (count <= block_nr) {
354			err = hfsplus_block_free(hfsplus_handle, sb, start, count);
355			if (err) {
356				pr_err("can't free extent\n");
357			}
358			extent->block_count = 0;
359			extent->start_block = 0;
360			block_nr -= count;
361		} else {
362			count -= block_nr;
363			err = hfsplus_block_free(hfsplus_handle, sb, start + count, block_nr);
364			if (err) {
365				pr_err("can't free extent\n");
366			}
367			extent->block_count = cpu_to_be32(count);
368			block_nr = 0;
369		}
370		if (!block_nr || !i)
371			return err;
372		i--;
373		extent--;
374		count = be32_to_cpu(extent->block_count);
375	}
376}
377
378int hfsplus_free_fork(hfsplus_handle_t *hfsplus_handle, struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type)
379{
380	struct hfs_find_data fd;
381	hfsplus_extent_rec ext_entry;
382	u32 total_blocks, blocks, start;
383	int res, i;
384
385	total_blocks = be32_to_cpu(fork->total_blocks);
386	if (!total_blocks)
387		return 0;
388
389	blocks = 0;
390	for (i = 0; i < 8; i++)
391		blocks += be32_to_cpu(fork->extents[i].block_count);
392
393	res = hfsplus_free_extents(hfsplus_handle, sb, fork->extents, blocks, blocks);
394	if (res)
395		return res;
396	if (total_blocks == blocks)
397		return 0;
398
399	res = hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
400	if (res)
401		return res;
402	do {
403		res = __hfsplus_ext_read_extent(hfsplus_handle, &fd, ext_entry, cnid,
404						total_blocks, type);
405		if (res)
406			break;
407		start = be32_to_cpu(fd.key->ext.start_block);
408		hfsplus_free_extents(hfsplus_handle, sb, ext_entry,
409				     total_blocks - start,
410				     total_blocks);
411		hfs_brec_remove(hfsplus_handle, &fd);
412		total_blocks = start;
413	} while (total_blocks > blocks);
414	hfs_find_exit(hfsplus_handle, &fd);
415
416	return res;
417}
418
419int hfsplus_file_extend(hfsplus_handle_t *hfsplus_handle, struct inode *inode)
420{
421	struct super_block *sb = inode->i_sb;
422	u32 start, len, goal;
423	int res;
424
425	if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) {
426		// extend alloc file
427		printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
428			HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks);
429		return -ENOSPC;
430	}
431
432	mutex_lock(&HFSPLUS_I(inode).extents_lock);
433	if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks)
434		goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents);
435	else {
436		res = hfsplus_ext_read_extent(hfsplus_handle, inode, HFSPLUS_I(inode).alloc_blocks);
437		if (res)
438			goto out;
439		goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents);
440	}
441
442//	len = HFSPLUS_I(inode).clump_blocks;
443	len = 0x100;
444	start = hfsplus_block_allocate(hfsplus_handle, sb, HFSPLUS_SB(sb).total_blocks, goal, &len);
445	if (start >= HFSPLUS_SB(sb).total_blocks) {
446		start = hfsplus_block_allocate(hfsplus_handle, sb, goal, 0, &len);
447		if (start >= goal) {
448			res = -ENOSPC;
449			goto out;
450		}
451	}
452
453	if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) {
454		if (!HFSPLUS_I(inode).first_blocks) {
455			/* no extents yet */
456			HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start);
457			HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len);
458			res = 0;
459		} else {
460			/* try to append to extents in inode */
461			res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents,
462						 HFSPLUS_I(inode).alloc_blocks,
463						 start, len);
464			if (res == -ENOSPC)
465				goto insert_extent;
466		}
467		if (!res) {
468			hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
469			HFSPLUS_I(inode).first_blocks += len;
470		}
471	} else {
472		res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents,
473					 HFSPLUS_I(inode).alloc_blocks -
474					 HFSPLUS_I(inode).cached_start,
475					 start, len);
476		if (!res) {
477			hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
478			HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
479			HFSPLUS_I(inode).cached_blocks += len;
480		} else if (res == -ENOSPC)
481			goto insert_extent;
482	}
483out:
484	mutex_unlock(&HFSPLUS_I(inode).extents_lock);
485	if (!res) {
486		HFSPLUS_I(inode).alloc_blocks += len;
487		res = hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode);
488	}
489	return res;
490
491insert_extent:
492	res = hfsplus_ext_write_extent_locked(hfsplus_handle,inode);
493	if (res)
494		goto out;
495
496	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
497	HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start);
498	HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len);
499	hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
500	HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
501	HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks;
502	HFSPLUS_I(inode).cached_blocks = len;
503
504	res = 0;
505	goto out;
506}
507
508void hfsplus_file_truncate(struct inode *inode)
509{
510	struct super_block *sb = inode->i_sb;
511	struct hfs_find_data fd;
512	u32 alloc_cnt, blk_cnt, start;
513	int res;
514	hfsplus_handle_t hfsplus_handle;
515
516	dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
517	       (long long)HFSPLUS_I(inode).phys_size, inode->i_size);
518	if (inode->i_size > HFSPLUS_I(inode).phys_size) {
519		struct address_space *mapping = inode->i_mapping;
520		struct page *page;
521		void *fsdata;
522		u32 size = inode->i_size;
523		int res;
524
525		res = pagecache_write_begin(NULL, mapping, size, 0,
526						AOP_FLAG_UNINTERRUPTIBLE,
527						&page, &fsdata);
528		if (res)
529			return;
530		res = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
531		if (res < 0)
532		if (hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle))
533			return;
534
535		hfsplus_journalled_mark_inode_dirty(__FUNCTION__, &hfsplus_handle, inode);
536		hfsplus_journal_stop(&hfsplus_handle);
537		return;
538	} else if (inode->i_size == HFSPLUS_I(inode).phys_size)
539		return;
540
541	if (hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle))
542		return;
543
544	blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift;
545	alloc_cnt = HFSPLUS_I(inode).alloc_blocks;
546	if (blk_cnt == alloc_cnt)
547		goto out;
548
549	mutex_lock(&HFSPLUS_I(inode).extents_lock);
550	res = hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
551	if (res) {
552		/* XXX: We lack error handling of hfsplus_file_truncate() */
553		return;
554	}
555	while (1) {
556		if (alloc_cnt == HFSPLUS_I(inode).first_blocks) {
557			hfsplus_free_extents(&hfsplus_handle, sb, HFSPLUS_I(inode).first_extents,
558					     alloc_cnt, alloc_cnt - blk_cnt);
559			hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
560			HFSPLUS_I(inode).first_blocks = blk_cnt;
561			break;
562		}
563		res = __hfsplus_ext_cache_extent(&hfsplus_handle, &fd, inode, alloc_cnt);
564		if (res)
565			break;
566		start = HFSPLUS_I(inode).cached_start;
567		hfsplus_free_extents(&hfsplus_handle, sb, HFSPLUS_I(inode).cached_extents,
568				     alloc_cnt - start, alloc_cnt - blk_cnt);
569		hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
570		if (blk_cnt > start) {
571			HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
572			break;
573		}
574		alloc_cnt = start;
575		HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
576		HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
577		hfs_brec_remove(&hfsplus_handle, &fd);
578	}
579	hfs_find_exit(&hfsplus_handle, &fd);
580	mutex_unlock(&HFSPLUS_I(inode).extents_lock);
581
582	HFSPLUS_I(inode).alloc_blocks = blk_cnt;
583out:
584	HFSPLUS_I(inode).phys_size = inode->i_size;
585	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
586	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
587	hfsplus_journalled_mark_inode_dirty(__FUNCTION__, &hfsplus_handle, inode);
588    /* Foxconn added start pling 05/31/2010 */
589    /* Set the i_blocks field properly */
590    inode->i_blocks = inode->i_size/512;
591    if (inode->i_size % 512)
592        inode->i_blocks++;
593    /* Foxconn added end pling 05/31/2010 */
594	hfsplus_journal_stop(&hfsplus_handle);
595}
596