1/*
2 *  linux/fs/hfsplus/inode.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Inode handling routines
9 */
10
11#include <linux/mm.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
14#include <linux/mpage.h>
15#include <linux/sched.h>
16
17#include "hfsplus_fs.h"
18#include "hfsplus_raw.h"
19
20static int hfsplus_readpage(struct file *file, struct page *page)
21{
22	return block_read_full_page(page, hfsplus_get_block);
23}
24
25static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
26{
27	return block_write_full_page(page, hfsplus_get_block, wbc);
28}
29
30static int hfsplus_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
31{
32	return cont_prepare_write(page, from, to, hfsplus_get_block,
33		&HFSPLUS_I(page->mapping->host).phys_size);
34}
35
36static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
37{
38	return generic_block_bmap(mapping, block, hfsplus_get_block);
39}
40
41static int hfsplus_releasepage(struct page *page, gfp_t mask)
42{
43	struct inode *inode = page->mapping->host;
44	struct super_block *sb = inode->i_sb;
45	struct hfs_btree *tree;
46	struct hfs_bnode *node;
47	u32 nidx;
48	int i, res = 1;
49
50	switch (inode->i_ino) {
51	case HFSPLUS_EXT_CNID:
52		tree = HFSPLUS_SB(sb).ext_tree;
53		break;
54	case HFSPLUS_CAT_CNID:
55		tree = HFSPLUS_SB(sb).cat_tree;
56		break;
57	case HFSPLUS_ATTR_CNID:
58		tree = HFSPLUS_SB(sb).attr_tree;
59		break;
60	default:
61		BUG();
62		return 0;
63	}
64	if (tree->node_size >= PAGE_CACHE_SIZE) {
65		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
66		spin_lock(&tree->hash_lock);
67		node = hfs_bnode_findhash(tree, nidx);
68		if (!node)
69			;
70		else if (atomic_read(&node->refcnt))
71			res = 0;
72		if (res && node) {
73			hfs_bnode_unhash(node);
74			hfs_bnode_free(node);
75		}
76		spin_unlock(&tree->hash_lock);
77	} else {
78		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
79		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
80		spin_lock(&tree->hash_lock);
81		do {
82			node = hfs_bnode_findhash(tree, nidx++);
83			if (!node)
84				continue;
85			if (atomic_read(&node->refcnt)) {
86				res = 0;
87				break;
88			}
89			hfs_bnode_unhash(node);
90			hfs_bnode_free(node);
91		} while (--i && nidx < tree->node_count);
92		spin_unlock(&tree->hash_lock);
93	}
94	return res ? try_to_free_buffers(page) : 0;
95}
96
97int hfsplus_commit_write_fn(hfsplus_handle_t *hfsplus_handle, struct buffer_head *bh)
98{
99	if (!buffer_mapped(bh) || buffer_hfsplus_jbd_freed(bh))
100		return 0;
101	set_buffer_uptodate(bh);
102
103	return hfsplus_journal_dirty_metadata(__FUNCTION__, bh, hfsplus_handle);
104}
105
106static int hfsplus_jbd_set_page_dirty(struct page *page)
107{
108	SetPageChecked(page);
109	return __set_page_dirty_nobuffers(page);
110}
111
112int hfsplus_walk_page_buffers(hfsplus_handle_t *hfsplus_handle,
113				struct buffer_head *head,
114				unsigned from,
115				unsigned to,
116				int *partial,
117				int (*fn)(hfsplus_handle_t *hfsplus_handle,
118						struct buffer_head *bh))
119{
120	struct buffer_head *bh;
121	unsigned block_start, block_end;
122	unsigned blocksize = head->b_size;
123	int err, ret = 0;
124	struct buffer_head *next;
125
126	for (	bh = head, block_start = 0;
127		ret == 0 && (bh != head || !block_start);
128	    	block_start = block_end, bh = next)
129	{
130		next = bh->b_this_page;
131		block_end = block_start + blocksize;
132		if (block_end <= from || block_start >= to) {
133			if (partial && !buffer_uptodate(bh))
134				*partial = 1;
135			continue;
136		}
137		err = (*fn)(hfsplus_handle, bh);
138		if (!ret)
139			ret = err;
140	}
141	return ret;
142}
143
144int hfsplus_do_journal_get_write_access(hfsplus_handle_t *hfsplus_handle, struct buffer_head *bh)
145{
146	if (!buffer_mapped(bh) || buffer_hfsplus_jbd_freed(bh))
147		return 0;
148	return hfsplus_journal_get_write_access(__FUNCTION__, hfsplus_handle, bh);
149}
150
151static int hfsplus_journalled_writepage(struct page *page, struct writeback_control *wbc)
152{
153	struct inode *inode = page->mapping->host;
154	struct super_block *sb;
155	hfsplus_handle_t hfsplus_handle;
156	hfsplus_jbd_t *journal;
157	int ret = 0;
158	int err;
159
160	sb = inode->i_sb;
161
162	if (hfsplus_jbd_current_handle()) {
163		dprint(DBG_JCOMMIT, "HFS+-fs: It is already in a handle\n");
164		goto no_write;
165	}
166
167	journal = HFSPLUS_SB(sb).jnl.s_journal;
168	if (is_hfsplus_jbd_aborted(journal)) {
169		printk("HFS+-fs: Detected aborted journal\n");
170		goto no_write;
171	}
172
173	if ((ret = hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle)))
174		goto no_write;
175
176	/* Taken from ext3 */
177	if (!page_has_buffers(page) || PageChecked(page)) {
178		/*
179		 * It's mmapped pagecache.  Add buffers and journal it.  There
180		 * doesn't seem much point in redirtying the page here.
181		 */
182		ClearPageChecked(page);
183		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, hfsplus_get_block);
184		if (ret != 0) {
185			hfsplus_journal_stop(&hfsplus_handle);
186			goto out_unlock;
187		}
188		ret = hfsplus_walk_page_buffers(&hfsplus_handle, page_buffers(page), 0,
189			PAGE_CACHE_SIZE, NULL, hfsplus_do_journal_get_write_access);
190
191		err = hfsplus_walk_page_buffers(&hfsplus_handle, page_buffers(page), 0,
192				PAGE_CACHE_SIZE, NULL, hfsplus_commit_write_fn);
193		if (ret == 0)
194			ret = err;
195		unlock_page(page);
196	} else {
197		/*
198		 * It may be a page full of checkpoint-mode buffers.  We don't
199		 * really know unless we go poke around in the buffer_heads.
200		 * But block_write_full_page will do the right thing.
201		 */
202		ret = block_write_full_page(page, hfsplus_get_block, wbc);
203	}
204	err = hfsplus_journal_stop(&hfsplus_handle);
205	if (!ret)
206		ret = err;
207
208out:
209	return ret;
210
211no_write:
212	redirty_page_for_writepage(wbc, page);
213out_unlock:
214	unlock_page(page);
215	goto out;
216}
217
218static int hfsplus_journalled_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
219{
220	struct inode *inode = page->mapping->host;
221	hfsplus_handle_t hfsplus_handle;
222	int ret;
223
224	if ((ret = hfsplus_journal_start(__FUNCTION__, inode->i_sb, &hfsplus_handle)))
225		goto out;
226
227	ret = block_prepare_write(page, from, to, hfsplus_get_block);
228	if (ret)
229		hfsplus_journal_stop(&hfsplus_handle);
230
231out:
232	return ret;
233}
234
235static int hfsplus_journalled_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
236{
237	hfsplus_handle_t *hfsplus_handle;
238	struct inode *inode = page->mapping->host;
239	int ret = 0, ret2;
240	int partial = 0;
241	loff_t pos;
242
243	hfsplus_handle = hfsplus_jbd_current_handle();
244	/*
245	 * Here we duplicate the generic_commit_write() functionality
246	 */
247	pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
248
249	ret = hfsplus_walk_page_buffers(hfsplus_handle, page_buffers(page), from, to, &partial, hfsplus_commit_write_fn);
250	if (!partial)
251		SetPageUptodate(page);
252	if (pos > inode->i_size) {
253		i_size_write(inode, pos);
254		ret2 = hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode);
255		if (!ret)
256			ret = ret2;
257	}
258
259	ret2 = hfsplus_journal_stop(hfsplus_handle);
260	if (!ret)
261		ret = ret2;
262	return ret;
263}
264
265static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
266			      struct buffer_head *bh_result, int create)
267{
268	int ret;
269
270	ret = hfsplus_get_block(inode, iblock, bh_result, create);
271	if (!ret)
272		bh_result->b_size = (1 << inode->i_blkbits);
273	return ret;
274}
275
276static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
277		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
278{
279	struct file *file = iocb->ki_filp;
280	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
281
282	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
283				  offset, nr_segs, hfsplus_get_block, NULL);
284}
285
286static int hfsplus_writepages(struct address_space *mapping,
287			      struct writeback_control *wbc)
288{
289	struct inode * const inode = mapping->host;
290	int ret;
291	u32 block_num = be32_to_cpu(HFSPLUS_I(inode).first_extents[0].start_block);
292
293	if (block_num == 1)
294		return mpage_writepages(mapping, wbc, NULL);
295	else
296		return mpage_writepages(mapping, wbc, hfsplus_get_block);
297
298	return ret;
299}
300
301#if 0
302static int hfsplus_journalled_writepages(struct address_space *mapping,
303			      struct writeback_control *wbc)
304{
305	struct inode * const inode = mapping->host;
306	u32 block_num = be32_to_cpu(HFSPLUS_I(inode).first_extents[0].start_block);
307	int ret;
308	hfsplus_handle_t hfsplus_handle;
309
310	if (hfsplus_journal_start(__FUNCTION__, inode->i_sb, &hfsplus_handle))
311		return 1; /* TODO: Need to check return value */
312
313	if (block_num == 1)
314		ret = mpage_writepages(mapping, wbc, NULL);
315	else
316		ret = mpage_writepages(mapping, wbc, hfsplus_get_block);
317
318	hfsplus_journal_stop(&hfsplus_handle);
319	return ret;
320}
321#endif
322
323struct address_space_operations hfsplus_journalled_btree_aops = {
324	.readpage	= hfsplus_readpage,
325	.writepage	= hfsplus_journalled_writepage,
326	.sync_page	= block_sync_page,
327	.prepare_write	= hfsplus_journalled_prepare_write,
328	.commit_write	= hfsplus_journalled_commit_write,
329	.set_page_dirty = hfsplus_jbd_set_page_dirty,
330	.bmap		= hfsplus_bmap,
331	.releasepage	= hfsplus_releasepage,
332};
333
334struct address_space_operations hfsplus_journalled_aops = {
335	.readpage	= hfsplus_readpage,
336	.writepage	= hfsplus_journalled_writepage,
337	.sync_page	= block_sync_page,
338	.prepare_write	= hfsplus_journalled_prepare_write,
339	.commit_write	= hfsplus_journalled_commit_write,
340	.set_page_dirty = hfsplus_jbd_set_page_dirty,
341	.bmap		= hfsplus_bmap,
342	.direct_IO	= hfsplus_direct_IO,
343	.writepages	= hfsplus_writepages,
344};
345
346const struct address_space_operations hfsplus_btree_aops = {
347	.readpage	= hfsplus_readpage,
348	.writepage	= hfsplus_writepage,
349	.sync_page	= block_sync_page,
350	.prepare_write	= hfsplus_prepare_write,
351	.commit_write	= generic_commit_write,
352	.bmap		= hfsplus_bmap,
353	.releasepage	= hfsplus_releasepage,
354};
355
356const struct address_space_operations hfsplus_aops = {
357	.readpage	= hfsplus_readpage,
358	.writepage	= hfsplus_writepage,
359	.sync_page	= block_sync_page,
360	.prepare_write	= hfsplus_prepare_write,
361	.commit_write	= generic_commit_write,
362	.bmap		= hfsplus_bmap,
363	.direct_IO	= hfsplus_direct_IO,
364	.writepages	= hfsplus_writepages,
365};
366
367static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
368					  struct nameidata *nd)
369{
370	struct hfs_find_data fd;
371	struct super_block *sb = dir->i_sb;
372	struct inode *inode = NULL;
373	hfsplus_handle_t hfsplus_handle;
374	int err;
375
376	if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
377		goto out;
378
379	inode = HFSPLUS_I(dir).rsrc_inode;
380	if (inode)
381		goto out;
382
383	if (hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle))
384		return NULL;
385
386	inode = new_inode(sb);
387	if (!inode) {
388		hfsplus_journal_stop(&hfsplus_handle);
389		return ERR_PTR(-ENOMEM);
390	}
391
392	inode->i_ino = dir->i_ino;
393	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
394	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
395	HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
396
397	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
398	err = hfsplus_find_cat(&hfsplus_handle, sb, dir->i_ino, &fd);
399	if (!err)
400		err = hfsplus_cat_read_inode(inode, &fd);
401	hfs_find_exit(&hfsplus_handle, &fd);
402	if (err) {
403		iput(inode);
404		hfsplus_journal_stop(&hfsplus_handle);
405		return ERR_PTR(err);
406	}
407	HFSPLUS_I(inode).rsrc_inode = dir;
408	HFSPLUS_I(dir).rsrc_inode = inode;
409	igrab(dir);
410	hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
411	hfsplus_journalled_mark_inode_dirty(__FUNCTION__, &hfsplus_handle, inode);
412	hfsplus_journal_stop(&hfsplus_handle);
413out:
414	d_add(dentry, inode);
415	return NULL;
416}
417
418static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
419{
420	struct super_block *sb = inode->i_sb;
421	u16 mode;
422
423	mode = be16_to_cpu(perms->mode);
424
425	inode->i_uid = be32_to_cpu(perms->owner);
426	if (!inode->i_uid && !mode)
427		inode->i_uid = HFSPLUS_SB(sb).uid;
428
429	inode->i_gid = be32_to_cpu(perms->group);
430	if (!inode->i_gid && !mode)
431		inode->i_gid = HFSPLUS_SB(sb).gid;
432
433	if (dir) {
434		mode = mode ? (mode & S_IALLUGO) :
435			(S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
436		mode |= S_IFDIR;
437	} else if (!mode)
438		mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
439			~(HFSPLUS_SB(sb).umask));
440	inode->i_mode = mode;
441
442	HFSPLUS_I(inode).rootflags = perms->rootflags;
443	HFSPLUS_I(inode).userflags = perms->userflags;
444	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
445		inode->i_flags |= S_IMMUTABLE;
446	else
447		inode->i_flags &= ~S_IMMUTABLE;
448	if (perms->rootflags & HFSPLUS_FLG_APPEND)
449		inode->i_flags |= S_APPEND;
450	else
451		inode->i_flags &= ~S_APPEND;
452}
453
454static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
455{
456	if (inode->i_flags & S_IMMUTABLE)
457		perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
458	else
459		perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
460	if (inode->i_flags & S_APPEND)
461		perms->rootflags |= HFSPLUS_FLG_APPEND;
462	else
463		perms->rootflags &= ~HFSPLUS_FLG_APPEND;
464	perms->userflags = HFSPLUS_I(inode).userflags;
465	perms->mode = cpu_to_be16(inode->i_mode);
466	perms->owner = cpu_to_be32(inode->i_uid);
467	perms->group = cpu_to_be32(inode->i_gid);
468	perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
469}
470
471static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd)
472{
473	/* MAY_EXEC is also used for lookup, if no x bit is set allow lookup,
474	 * open_exec has the same test, so it's still not executable, if a x bit
475	 * is set fall back to standard permission check.
476	 */
477	if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111))
478		return 0;
479	return generic_permission(inode, mask, NULL);
480}
481
482
483static int hfsplus_file_open(struct inode *inode, struct file *file)
484{
485	if (HFSPLUS_IS_RSRC(inode))
486		inode = HFSPLUS_I(inode).rsrc_inode;
487	if (atomic_read(&file->f_count) != 1)
488		return 0;
489	atomic_inc(&HFSPLUS_I(inode).opencnt);
490	return 0;
491}
492
493static int hfsplus_file_release(struct inode *inode, struct file *file)
494{
495	struct super_block *sb = inode->i_sb;
496	hfsplus_handle_t hfsplus_handle;
497
498	if (HFSPLUS_IS_RSRC(inode))
499		inode = HFSPLUS_I(inode).rsrc_inode;
500	if (atomic_read(&file->f_count) != 0)
501		return 0;
502	if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
503		int ret;
504		mutex_lock(&inode->i_mutex);
505		hfsplus_file_truncate(inode);
506		if (inode->i_flags & S_DEAD) {
507			if ((ret = hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle))) {
508				mutex_unlock(&inode->i_mutex);
509				return ret;
510			}
511			hfsplus_delete_cat(&hfsplus_handle, inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
512			hfsplus_delete_inode(&hfsplus_handle, inode);
513			hfsplus_journal_stop(&hfsplus_handle);
514		}
515		mutex_unlock(&inode->i_mutex);
516	}
517	return 0;
518}
519
520extern const struct inode_operations hfsplus_dir_inode_operations;
521extern struct file_operations hfsplus_dir_operations;
522
523static const struct inode_operations hfsplus_file_inode_operations = {
524	.lookup		= hfsplus_file_lookup,
525	.truncate	= hfsplus_file_truncate,
526	.permission	= hfsplus_permission,
527	.setxattr	= hfsplus_setxattr,
528	.getxattr	= hfsplus_getxattr,
529	.listxattr	= hfsplus_listxattr,
530};
531
532static const struct file_operations hfsplus_file_operations = {
533	.llseek 	= generic_file_llseek,
534	.read		= do_sync_read,
535	.aio_read	= generic_file_aio_read,
536	.write		= do_sync_write,
537	.aio_write	= generic_file_aio_write,
538	.mmap		= generic_file_mmap,
539	.sendfile	= generic_file_sendfile,
540	.fsync		= file_fsync,
541	.open		= hfsplus_file_open,
542	.release	= hfsplus_file_release,
543	.ioctl          = hfsplus_ioctl,
544};
545
546struct inode *hfsplus_new_inode(hfsplus_handle_t *hfsplus_handle, struct super_block *sb, int mode)
547{
548	struct inode *inode = new_inode(sb);
549	if (!inode)
550		return NULL;
551
552	//{
553	//void hfsplus_inode_check(struct super_block *sb);
554	//atomic_inc(&HFSPLUS_SB(sb).inode_cnt);
555	//hfsplus_inode_check(sb);
556	//}
557	inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
558	inode->i_mode = mode;
559	inode->i_uid = current->fsuid;
560	inode->i_gid = current->fsgid;
561	inode->i_nlink = 1;
562	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
563	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
564	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
565	atomic_set(&HFSPLUS_I(inode).opencnt, 0);
566	HFSPLUS_I(inode).flags = 0;
567	memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
568	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
569	HFSPLUS_I(inode).alloc_blocks = 0;
570	HFSPLUS_I(inode).first_blocks = 0;
571	HFSPLUS_I(inode).cached_start = 0;
572	HFSPLUS_I(inode).cached_blocks = 0;
573	HFSPLUS_I(inode).phys_size = 0;
574	HFSPLUS_I(inode).fs_blocks = 0;
575	HFSPLUS_I(inode).rsrc_inode = NULL;
576	if (S_ISDIR(inode->i_mode)) {
577		inode->i_size = 2;
578		HFSPLUS_SB(sb).folder_count++;
579		inode->i_op = &hfsplus_dir_inode_operations;
580		inode->i_fop = &hfsplus_dir_operations;
581	} else if (S_ISREG(inode->i_mode)) {
582		HFSPLUS_SB(sb).file_count++;
583		inode->i_op = &hfsplus_file_inode_operations;
584		inode->i_fop = &hfsplus_file_operations;
585		inode->i_mapping->a_ops = &hfsplus_aops;
586		HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
587	} else if (S_ISLNK(inode->i_mode)) {
588		HFSPLUS_SB(sb).file_count++;
589		inode->i_op = &page_symlink_inode_operations;
590		inode->i_mapping->a_ops = &hfsplus_aops;
591		HFSPLUS_I(inode).clump_blocks = 1;
592	} else
593		HFSPLUS_SB(sb).file_count++;
594	insert_inode_hash(inode);
595	if (hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode)) {
596		hfsplus_delete_inode(hfsplus_handle, inode);
597		return NULL;
598	}
599	sb->s_dirt = 1;
600
601	return inode;
602}
603
604void hfsplus_delete_inode(hfsplus_handle_t *hfsplus_handle, struct inode *inode)
605{
606	struct super_block *sb = inode->i_sb;
607
608	if (S_ISDIR(inode->i_mode)) {
609		HFSPLUS_SB(sb).folder_count--;
610		sb->s_dirt = 1;
611		return;
612	}
613	HFSPLUS_SB(sb).file_count--;
614	if (S_ISREG(inode->i_mode)) {
615		if (!inode->i_nlink) {
616			inode->i_size = 0;
617			hfsplus_file_truncate(inode);
618		}
619	} else if (S_ISLNK(inode->i_mode)) {
620		inode->i_size = 0;
621		hfsplus_file_truncate(inode);
622	}
623	sb->s_dirt = 1;
624}
625
626void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
627{
628	struct super_block *sb = inode->i_sb;
629	u32 count;
630	int i;
631
632	memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
633	       sizeof(hfsplus_extent_rec));
634	for (count = 0, i = 0; i < 8; i++)
635		count += be32_to_cpu(fork->extents[i].block_count);
636	HFSPLUS_I(inode).first_blocks = count;
637	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
638	HFSPLUS_I(inode).cached_start = 0;
639	HFSPLUS_I(inode).cached_blocks = 0;
640
641	HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
642	inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
643	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
644	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
645    /* Foxconn added start pling 05/31/2010 */
646    /* Set the i_blocks field properly */
647    inode->i_blocks = inode->i_size/512;
648    if (inode->i_size % 512)
649        inode->i_blocks++;
650    /* Foxconn added end pling 05/31/2010 */
651	HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
652	if (!HFSPLUS_I(inode).clump_blocks)
653		HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
654				HFSPLUS_SB(sb).data_clump_blocks;
655}
656
657void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
658{
659	memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
660	       sizeof(hfsplus_extent_rec));
661	fork->total_size = cpu_to_be64(inode->i_size);
662	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
663}
664
665int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
666{
667	hfsplus_cat_entry entry;
668	int res = 0;
669	u16 type;
670
671	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
672
673	HFSPLUS_I(inode).dev = 0;
674	if (type == HFSPLUS_FOLDER) {
675		struct hfsplus_cat_folder *folder = &entry.folder;
676
677		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
678			/* panic? */;
679		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
680					sizeof(struct hfsplus_cat_folder));
681		hfsplus_get_perms(inode, &folder->permissions, 1);
682		inode->i_nlink = 1;
683		inode->i_size = 2 + be32_to_cpu(folder->valence);
684		inode->i_atime = hfsp_mt2ut(folder->access_date);
685		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
686		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
687		HFSPLUS_I(inode).create_date = folder->create_date;
688		HFSPLUS_I(inode).fs_blocks = 0;
689		inode->i_op = &hfsplus_dir_inode_operations;
690		inode->i_fop = &hfsplus_dir_operations;
691	} else if (type == HFSPLUS_FILE) {
692		struct hfsplus_cat_file *file = &entry.file;
693
694		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
695			/* panic? */;
696		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
697					sizeof(struct hfsplus_cat_file));
698
699		hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
700					&file->data_fork : &file->rsrc_fork);
701		hfsplus_get_perms(inode, &file->permissions, 0);
702		inode->i_nlink = 1;
703		if (S_ISREG(inode->i_mode)) {
704			if (file->permissions.dev)
705				inode->i_nlink = be32_to_cpu(file->permissions.dev);
706			inode->i_op = &hfsplus_file_inode_operations;
707			inode->i_fop = &hfsplus_file_operations;
708			inode->i_mapping->a_ops = &hfsplus_aops;
709		} else if (S_ISLNK(inode->i_mode)) {
710			inode->i_op = &page_symlink_inode_operations;
711			inode->i_mapping->a_ops = &hfsplus_aops;
712		} else {
713			init_special_inode(inode, inode->i_mode,
714					   be32_to_cpu(file->permissions.dev));
715		}
716		inode->i_atime = hfsp_mt2ut(file->access_date);
717		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
718		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
719		HFSPLUS_I(inode).create_date = file->create_date;
720	} else {
721		printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
722		res = -EIO;
723	}
724	return res;
725}
726
727int hfsplus_cat_write_inode(hfsplus_handle_t *hfsplus_handle, struct inode *inode)
728{
729	struct inode *main_inode = inode;
730	struct hfs_find_data fd;
731	hfsplus_cat_entry entry;
732
733	if (HFSPLUS_IS_RSRC(inode))
734		main_inode = HFSPLUS_I(inode).rsrc_inode;
735
736	if (!main_inode->i_nlink)
737		return 0;
738
739	if (hfsplus_handle->journaled != HFSPLUS_JOURNAL_PRESENT) {
740		if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
741			/* panic? */
742			return -EIO;
743	} else {
744		if (hfsplus_journalled_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
745			/* panic? */
746			return -EIO;
747	}
748
749	if (hfsplus_find_cat(hfsplus_handle, main_inode->i_sb, main_inode->i_ino, &fd))
750		/* panic? */
751		goto out;
752
753	if (S_ISDIR(main_inode->i_mode)) {
754		struct hfsplus_cat_folder *folder = &entry.folder;
755
756		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
757			/* panic? */;
758		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
759					sizeof(struct hfsplus_cat_folder));
760		/* simple node checks? */
761		hfsplus_set_perms(inode, &folder->permissions);
762		folder->access_date = hfsp_ut2mt(inode->i_atime);
763		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
764		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
765		folder->valence = cpu_to_be32(inode->i_size - 2);
766		hfs_bnode_write(hfsplus_handle, fd.bnode, &entry, fd.entryoffset,
767					 sizeof(struct hfsplus_cat_folder));
768	} else if (HFSPLUS_IS_RSRC(inode)) {
769		struct hfsplus_cat_file *file = &entry.file;
770		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
771			       sizeof(struct hfsplus_cat_file));
772		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
773		hfs_bnode_write(hfsplus_handle, fd.bnode, &entry, fd.entryoffset,
774				sizeof(struct hfsplus_cat_file));
775	} else {
776		struct hfsplus_cat_file *file = &entry.file;
777
778		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
779			/* panic? */;
780		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
781					sizeof(struct hfsplus_cat_file));
782		hfsplus_inode_write_fork(inode, &file->data_fork);
783		if (S_ISREG(inode->i_mode))
784			HFSPLUS_I(inode).dev = inode->i_nlink;
785		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
786			HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
787		hfsplus_set_perms(inode, &file->permissions);
788		if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
789			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
790		else
791			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
792		file->access_date = hfsp_ut2mt(inode->i_atime);
793		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
794		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
795		hfs_bnode_write(hfsplus_handle, fd.bnode, &entry, fd.entryoffset,
796					 sizeof(struct hfsplus_cat_file));
797	}
798out:
799	if (hfsplus_handle->journaled != HFSPLUS_JOURNAL_PRESENT) {
800		hfs_find_exit(hfsplus_handle, &fd);
801	} else {
802		hfsplus_journalled_find_exit(hfsplus_handle, &fd);
803	}
804	return 0;
805}
806