• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/hfsplus_journal/
1/*
2 *  linux/fs/hfsplus/inode.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Inode handling routines
9 */
10
11#include <linux/blkdev.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15#include <linux/mpage.h>
16#include <linux/sched.h>
17
18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h"
20
21static int hfsplus_readpage(struct file *file, struct page *page)
22{
23	return block_read_full_page(page, hfsplus_get_block);
24}
25
26static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
27{
28	return block_write_full_page(page, hfsplus_get_block, wbc);
29}
30
31static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
32			loff_t pos, unsigned len, unsigned flags,
33			struct page **pagep, void **fsdata)
34{
35	int ret;
36
37	*pagep = NULL;
38	ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
39				hfsplus_get_block,
40				&HFSPLUS_I(mapping->host).phys_size);
41	if (unlikely(ret)) {
42		loff_t isize = mapping->host->i_size;
43		if (pos + len > isize)
44			vmtruncate(mapping->host, isize);
45	}
46
47	return ret;
48}
49
50static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
51{
52	return generic_block_bmap(mapping, block, hfsplus_get_block);
53}
54
55static int hfsplus_releasepage(struct page *page, gfp_t mask)
56{
57	struct inode *inode = page->mapping->host;
58	struct super_block *sb = inode->i_sb;
59	struct hfs_btree *tree;
60	struct hfs_bnode *node;
61	u32 nidx;
62	int i, res = 1;
63
64	switch (inode->i_ino) {
65	case HFSPLUS_EXT_CNID:
66		tree = HFSPLUS_SB(sb).ext_tree;
67		break;
68	case HFSPLUS_CAT_CNID:
69		tree = HFSPLUS_SB(sb).cat_tree;
70		break;
71	case HFSPLUS_ATTR_CNID:
72		tree = HFSPLUS_SB(sb).attr_tree;
73		break;
74	default:
75		BUG();
76		return 0;
77	}
78	if (!tree)
79		return 0;
80	if (tree->node_size >= PAGE_CACHE_SIZE) {
81		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
82		spin_lock(&tree->hash_lock);
83		node = hfs_bnode_findhash(tree, nidx);
84		if (!node)
85			;
86		else if (atomic_read(&node->refcnt))
87			res = 0;
88		if (res && node) {
89			hfs_bnode_unhash(node);
90			hfs_bnode_free(node);
91		}
92		spin_unlock(&tree->hash_lock);
93	} else {
94		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
95		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
96		spin_lock(&tree->hash_lock);
97		do {
98			node = hfs_bnode_findhash(tree, nidx++);
99			if (!node)
100				continue;
101			if (atomic_read(&node->refcnt)) {
102				res = 0;
103				break;
104			}
105			hfs_bnode_unhash(node);
106			hfs_bnode_free(node);
107		} while (--i && nidx < tree->node_count);
108		spin_unlock(&tree->hash_lock);
109	}
110	return res ? try_to_free_buffers(page) : 0;
111}
112
113int hfsplus_commit_write_fn(hfsplus_handle_t *hfsplus_handle, struct buffer_head *bh)
114{
115	if (!buffer_mapped(bh) || buffer_hfsplus_jbd_freed(bh))
116		return 0;
117	set_buffer_uptodate(bh);
118
119	return hfsplus_journal_dirty_metadata(__FUNCTION__, bh, hfsplus_handle);
120}
121
122static int hfsplus_jbd_set_page_dirty(struct page *page)
123{
124	SetPageChecked(page);
125	return __set_page_dirty_nobuffers(page);
126}
127
128int hfsplus_walk_page_buffers(hfsplus_handle_t *hfsplus_handle,
129				struct buffer_head *head,
130				unsigned from,
131				unsigned to,
132				int *partial,
133				int (*fn)(hfsplus_handle_t *hfsplus_handle,
134						struct buffer_head *bh))
135{
136	struct buffer_head *bh;
137	unsigned block_start, block_end;
138	unsigned blocksize = head->b_size;
139	int err, ret = 0;
140	struct buffer_head *next;
141
142	for (	bh = head, block_start = 0;
143		ret == 0 && (bh != head || !block_start);
144	    	block_start = block_end, bh = next)
145	{
146		next = bh->b_this_page;
147		block_end = block_start + blocksize;
148		if (block_end <= from || block_start >= to) {
149			if (partial && !buffer_uptodate(bh))
150				*partial = 1;
151			continue;
152		}
153		err = (*fn)(hfsplus_handle, bh);
154		if (!ret)
155			ret = err;
156	}
157	return ret;
158}
159
160int hfsplus_do_journal_get_write_access(hfsplus_handle_t *hfsplus_handle, struct buffer_head *bh)
161{
162	if (!buffer_mapped(bh) || buffer_hfsplus_jbd_freed(bh))
163		return 0;
164	return hfsplus_journal_get_write_access(__FUNCTION__, hfsplus_handle, bh);
165}
166
167static int hfsplus_journalled_writepage(struct page *page, struct writeback_control *wbc)
168{
169	struct inode *inode = page->mapping->host;
170	struct super_block *sb;
171	hfsplus_handle_t hfsplus_handle;
172	hfsplus_jbd_t *journal;
173	int ret = 0;
174	int err;
175
176	sb = inode->i_sb;
177
178	if (hfsplus_jbd_current_handle()) {
179		dprint(DBG_JCOMMIT, "HFS+-fs: It is already in a handle\n");
180		goto no_write;
181	}
182
183	journal = HFSPLUS_SB(sb).jnl.s_journal;
184	if (is_hfsplus_jbd_aborted(journal)) {
185		printk("HFS+-fs: Detected aborted journal\n");
186		goto no_write;
187	}
188
189	if ((ret = hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle)))
190		goto no_write;
191
192	/* Taken from ext3 */
193	if (!page_has_buffers(page) || PageChecked(page)) {
194		/*
195		 * It's mmapped pagecache.  Add buffers and journal it.  There
196		 * doesn't seem much point in redirtying the page here.
197		 */
198		ClearPageChecked(page);
199		ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, hfsplus_get_block);
200		if (ret != 0) {
201			hfsplus_journal_stop(&hfsplus_handle);
202			goto out_unlock;
203		}
204		ret = hfsplus_walk_page_buffers(&hfsplus_handle, page_buffers(page), 0,
205			PAGE_CACHE_SIZE, NULL, hfsplus_do_journal_get_write_access);
206
207		err = hfsplus_walk_page_buffers(&hfsplus_handle, page_buffers(page), 0,
208				PAGE_CACHE_SIZE, NULL, hfsplus_commit_write_fn);
209		if (ret == 0)
210			ret = err;
211		unlock_page(page);
212	} else {
213		/*
214		 * It may be a page full of checkpoint-mode buffers.  We don't
215		 * really know unless we go poke around in the buffer_heads.
216		 * But block_write_full_page will do the right thing.
217		 */
218		ret = block_write_full_page(page, hfsplus_get_block, wbc);
219	}
220	err = hfsplus_journal_stop(&hfsplus_handle);
221	if (!ret)
222		ret = err;
223
224out:
225	return ret;
226
227no_write:
228	redirty_page_for_writepage(wbc, page);
229out_unlock:
230	unlock_page(page);
231	goto out;
232}
233
234static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
235		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
236{
237	struct file *file = iocb->ki_filp;
238	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
239	ssize_t ret;
240
241	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
242				  offset, nr_segs, hfsplus_get_block, NULL);
243
244	/*
245	 * In case of error extending write may have instantiated a few
246	 * blocks outside i_size. Trim these off again.
247	 */
248	if (unlikely((rw & WRITE) && ret < 0)) {
249		loff_t isize = i_size_read(inode);
250		loff_t end = offset + iov_length(iov, nr_segs);
251
252		if (end > isize)
253			vmtruncate(inode, isize);
254	}
255
256	return ret;
257}
258
259static int hfsplus_writepages(struct address_space *mapping,
260			      struct writeback_control *wbc)
261{
262	return mpage_writepages(mapping, wbc, hfsplus_get_block);
263}
264
265struct address_space_operations hfsplus_journalled_btree_aops = {
266	.readpage	= hfsplus_readpage,
267	.writepage	= hfsplus_journalled_writepage,
268	.sync_page	= block_sync_page,
269	.set_page_dirty = hfsplus_jbd_set_page_dirty,
270	.bmap		= hfsplus_bmap,
271	.releasepage	= hfsplus_releasepage,
272};
273
274struct address_space_operations hfsplus_journalled_aops = {
275	.readpage	= hfsplus_readpage,
276	.writepage	= hfsplus_journalled_writepage,
277	.sync_page	= block_sync_page,
278	.set_page_dirty = hfsplus_jbd_set_page_dirty,
279	.bmap		= hfsplus_bmap,
280	.direct_IO	= hfsplus_direct_IO,
281	.writepages	= hfsplus_writepages,
282};
283const struct address_space_operations hfsplus_btree_aops = {
284	.readpage	= hfsplus_readpage,
285	.writepage	= hfsplus_writepage,
286	.sync_page	= block_sync_page,
287	.write_begin	= hfsplus_write_begin,
288	.write_end	= generic_write_end,
289	.bmap		= hfsplus_bmap,
290	.releasepage	= hfsplus_releasepage,
291};
292
293const struct address_space_operations hfsplus_aops = {
294	.readpage	= hfsplus_readpage,
295	.writepage	= hfsplus_writepage,
296	.sync_page	= block_sync_page,
297	.write_begin	= hfsplus_write_begin,
298	.write_end	= generic_write_end,
299	.bmap		= hfsplus_bmap,
300	.direct_IO	= hfsplus_direct_IO,
301	.writepages	= hfsplus_writepages,
302};
303
304const struct dentry_operations hfsplus_dentry_operations = {
305	.d_hash       = hfsplus_hash_dentry,
306	.d_compare    = hfsplus_compare_dentry,
307};
308
309static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
310					  struct nameidata *nd)
311{
312	struct hfs_find_data fd;
313	struct super_block *sb = dir->i_sb;
314	struct inode *inode = NULL;
315	hfsplus_handle_t hfsplus_handle;
316	int err;
317
318	if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
319		goto out;
320
321	inode = HFSPLUS_I(dir).rsrc_inode;
322	if (inode)
323		goto out;
324
325	if (hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle))
326		return NULL;
327
328	inode = new_inode(sb);
329	if (!inode) {
330		hfsplus_journal_stop(&hfsplus_handle);
331		return ERR_PTR(-ENOMEM);
332	}
333
334	inode->i_ino = dir->i_ino;
335	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
336	mutex_init(&HFSPLUS_I(inode).extents_lock);
337
338	HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
339
340	err = hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
341	if (!err) {
342	err = hfsplus_find_cat(&hfsplus_handle, sb, dir->i_ino, &fd);
343	if (!err)
344		err = hfsplus_cat_read_inode(inode, &fd);
345	hfs_find_exit(&hfsplus_handle, &fd);
346	}
347	if (err) {
348		iput(inode);
349		hfsplus_journal_stop(&hfsplus_handle);
350		return ERR_PTR(err);
351	}
352	HFSPLUS_I(inode).rsrc_inode = dir;
353	HFSPLUS_I(dir).rsrc_inode = inode;
354	igrab(dir);
355//	hlist_add_fake(&inode->i_hash);
356	hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
357	hfsplus_journalled_mark_inode_dirty(__FUNCTION__, &hfsplus_handle, inode);
358	hfsplus_journal_stop(&hfsplus_handle);
359out:
360	d_add(dentry, inode);
361	return NULL;
362}
363
364static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
365{
366	struct super_block *sb = inode->i_sb;
367	u16 mode;
368
369	mode = be16_to_cpu(perms->mode);
370
371	inode->i_uid = be32_to_cpu(perms->owner);
372	if (!inode->i_uid && !mode)
373		inode->i_uid = HFSPLUS_SB(sb).uid;
374
375	inode->i_gid = be32_to_cpu(perms->group);
376	if (!inode->i_gid && !mode)
377		inode->i_gid = HFSPLUS_SB(sb).gid;
378
379	if (dir) {
380		mode = mode ? (mode & S_IALLUGO) :
381			(S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
382		mode |= S_IFDIR;
383	} else if (!mode)
384		mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
385			~(HFSPLUS_SB(sb).umask));
386	inode->i_mode = mode;
387
388	HFSPLUS_I(inode).userflags = perms->userflags;
389	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
390		inode->i_flags |= S_IMMUTABLE;
391	else
392		inode->i_flags &= ~S_IMMUTABLE;
393	if (perms->rootflags & HFSPLUS_FLG_APPEND)
394		inode->i_flags |= S_APPEND;
395	else
396		inode->i_flags &= ~S_APPEND;
397}
398
399static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
400{
401	if (inode->i_flags & S_IMMUTABLE)
402		perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
403	else
404		perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
405	if (inode->i_flags & S_APPEND)
406		perms->rootflags |= HFSPLUS_FLG_APPEND;
407	else
408		perms->rootflags &= ~HFSPLUS_FLG_APPEND;
409	perms->userflags = HFSPLUS_I(inode).userflags;
410	perms->mode = cpu_to_be16(inode->i_mode);
411	perms->owner = cpu_to_be32(inode->i_uid);
412	perms->group = cpu_to_be32(inode->i_gid);
413	perms->dev = cpu_to_be32(HFSPLUS_I(inode).linkid);
414}
415
416static int hfsplus_file_open(struct inode *inode, struct file *file)
417{
418	if (HFSPLUS_IS_RSRC(inode))
419		inode = HFSPLUS_I(inode).rsrc_inode;
420	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
421		return -EOVERFLOW;
422	atomic_inc(&HFSPLUS_I(inode).opencnt);
423	return 0;
424}
425
426static int hfsplus_file_release(struct inode *inode, struct file *file)
427{
428	struct super_block *sb = inode->i_sb;
429	hfsplus_handle_t hfsplus_handle;
430	int ret;
431
432	if (HFSPLUS_IS_RSRC(inode))
433		inode = HFSPLUS_I(inode).rsrc_inode;
434	if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
435		mutex_lock(&inode->i_mutex);
436		hfsplus_file_truncate(inode);
437		if (inode->i_flags & S_DEAD) {
438			if ((ret = hfsplus_journal_start(__FUNCTION__, sb, &hfsplus_handle))) {
439				mutex_unlock(&inode->i_mutex);
440				return ret;
441			}
442			hfsplus_delete_cat(&hfsplus_handle, inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
443			hfsplus_delete_inode(&hfsplus_handle, inode);
444			hfsplus_journal_stop(&hfsplus_handle);
445		}
446		mutex_unlock(&inode->i_mutex);
447	}
448	return 0;
449}
450
451static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
452{
453	struct inode *inode = dentry->d_inode;
454	int error;
455
456	error = inode_change_ok(inode, attr);
457	if (error)
458		return error;
459
460	if ((attr->ia_valid & ATTR_SIZE) &&
461	    attr->ia_size != i_size_read(inode)) {
462		error = vmtruncate(inode, attr->ia_size);
463		if (error)
464			return error;
465	}
466
467	setattr_copy(inode, attr);
468	mark_inode_dirty(inode);
469	return 0;
470}
471
472int hfsplus_file_fsync(struct file *filp, int datasync)
473{
474	struct inode *inode = filp->f_mapping->host;
475	struct super_block * sb;
476	int ret, err;
477
478	/*
479	 * Sync inode metadata into the catalog and extent trees.
480	 */
481	ret = write_inode_now(inode, 0);
482
483	/* sync the superblock to buffers */
484	sb = inode->i_sb;
485	if (sb->s_dirt) {
486		if (!(sb->s_flags & MS_RDONLY))
487			hfsplus_sync_fs(sb, 1);
488		else
489			sb->s_dirt = 0;
490	}
491
492	/* .. finally sync the buffers to disk */
493	err = sync_blockdev(sb->s_bdev);
494	if (!ret)
495		ret = err;
496	return ret;
497}
498
499static const struct inode_operations hfsplus_file_inode_operations = {
500	.lookup		= hfsplus_file_lookup,
501	.truncate	= hfsplus_file_truncate,
502	.setattr	= hfsplus_setattr,
503	.setxattr	= hfsplus_setxattr,
504	.getxattr	= hfsplus_getxattr,
505	.listxattr	= hfsplus_listxattr,
506};
507
508static const struct file_operations hfsplus_file_operations = {
509	.llseek 	= generic_file_llseek,
510	.read		= do_sync_read,
511	.aio_read	= generic_file_aio_read,
512	.write		= do_sync_write,
513	.aio_write	= generic_file_aio_write,
514	.mmap		= generic_file_mmap,
515	.splice_read	= generic_file_splice_read,
516	.fsync		= hfsplus_file_fsync,
517	.open		= hfsplus_file_open,
518	.release	= hfsplus_file_release,
519	.unlocked_ioctl = hfsplus_ioctl,
520};
521
522struct inode *hfsplus_new_inode(hfsplus_handle_t *hfsplus_handle, struct super_block *sb, int mode)
523{
524	struct inode *inode = new_inode(sb);
525	if (!inode)
526		return NULL;
527
528	inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
529	inode->i_mode = mode;
530	inode->i_uid = current_fsuid();
531	inode->i_gid = current_fsgid();
532	inode->i_nlink = 1;
533	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
534	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
535	mutex_init(&HFSPLUS_I(inode).extents_lock);
536	atomic_set(&HFSPLUS_I(inode).opencnt, 0);
537	HFSPLUS_I(inode).flags = 0;
538	memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
539	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
540	HFSPLUS_I(inode).alloc_blocks = 0;
541	HFSPLUS_I(inode).first_blocks = 0;
542	HFSPLUS_I(inode).cached_start = 0;
543	HFSPLUS_I(inode).cached_blocks = 0;
544	HFSPLUS_I(inode).phys_size = 0;
545	HFSPLUS_I(inode).fs_blocks = 0;
546	HFSPLUS_I(inode).rsrc_inode = NULL;
547	if (S_ISDIR(inode->i_mode)) {
548		inode->i_size = 2;
549		HFSPLUS_SB(sb).folder_count++;
550		inode->i_op = &hfsplus_dir_inode_operations;
551		inode->i_fop = &hfsplus_dir_operations;
552	} else if (S_ISREG(inode->i_mode)) {
553		HFSPLUS_SB(sb).file_count++;
554		inode->i_op = &hfsplus_file_inode_operations;
555		inode->i_fop = &hfsplus_file_operations;
556		inode->i_mapping->a_ops = &hfsplus_aops;
557		HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
558	} else if (S_ISLNK(inode->i_mode)) {
559		HFSPLUS_SB(sb).file_count++;
560		inode->i_op = &page_symlink_inode_operations;
561		inode->i_mapping->a_ops = &hfsplus_aops;
562		HFSPLUS_I(inode).clump_blocks = 1;
563	} else
564		HFSPLUS_SB(sb).file_count++;
565	insert_inode_hash(inode);
566	if (hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode)) {
567		hfsplus_delete_inode(hfsplus_handle, inode);
568		return NULL;
569	}
570	sb->s_dirt = 1;
571
572	return inode;
573}
574
575void hfsplus_delete_inode(hfsplus_handle_t *hfsplus_handle, struct inode *inode)
576{
577	struct super_block *sb = inode->i_sb;
578
579	if (S_ISDIR(inode->i_mode)) {
580		HFSPLUS_SB(sb).folder_count--;
581		sb->s_dirt = 1;
582		return;
583	}
584	HFSPLUS_SB(sb).file_count--;
585	if (S_ISREG(inode->i_mode)) {
586		if (!inode->i_nlink) {
587			inode->i_size = 0;
588			hfsplus_file_truncate(inode);
589		}
590	} else if (S_ISLNK(inode->i_mode)) {
591		inode->i_size = 0;
592		hfsplus_file_truncate(inode);
593	}
594	sb->s_dirt = 1;
595}
596
597void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
598{
599	struct super_block *sb = inode->i_sb;
600	u32 count;
601	int i;
602
603	memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
604	       sizeof(hfsplus_extent_rec));
605	for (count = 0, i = 0; i < 8; i++)
606		count += be32_to_cpu(fork->extents[i].block_count);
607	HFSPLUS_I(inode).first_blocks = count;
608	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
609	HFSPLUS_I(inode).cached_start = 0;
610	HFSPLUS_I(inode).cached_blocks = 0;
611
612	HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
613	HFSPLUS_I(inode).phys_size = inode->i_size = be64_to_cpu(fork->total_size);
614	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
615	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
616    /* Foxconn added start pling 05/31/2010 */
617    /* Set the i_blocks field properly */
618    inode->i_blocks = inode->i_size/512;
619    if (inode->i_size % 512)
620        inode->i_blocks++;
621    /* Foxconn added end pling 05/31/2010 */
622	HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
623	if (!HFSPLUS_I(inode).clump_blocks)
624		HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ?
625				HFSPLUS_SB(sb).rsrc_clump_blocks :
626				HFSPLUS_SB(sb).data_clump_blocks;
627}
628
629void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
630{
631	memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
632	       sizeof(hfsplus_extent_rec));
633	fork->total_size = cpu_to_be64(inode->i_size);
634	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
635}
636
637int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
638{
639	hfsplus_cat_entry entry;
640	int res = 0;
641	u16 type;
642
643	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
644
645	HFSPLUS_I(inode).linkid = 0;
646	if (type == HFSPLUS_FOLDER) {
647		struct hfsplus_cat_folder *folder = &entry.folder;
648
649		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
650			/* panic? */;
651		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
652					sizeof(struct hfsplus_cat_folder));
653		hfsplus_get_perms(inode, &folder->permissions, 1);
654		inode->i_nlink = 1;
655		inode->i_size = 2 + be32_to_cpu(folder->valence);
656		inode->i_atime = hfsp_mt2ut(folder->access_date);
657		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
658		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
659		HFSPLUS_I(inode).create_date = folder->create_date;
660		HFSPLUS_I(inode).fs_blocks = 0;
661		inode->i_op = &hfsplus_dir_inode_operations;
662		inode->i_fop = &hfsplus_dir_operations;
663	} else if (type == HFSPLUS_FILE) {
664		struct hfsplus_cat_file *file = &entry.file;
665
666		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
667			/* panic? */;
668		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
669					sizeof(struct hfsplus_cat_file));
670
671		hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
672					&file->rsrc_fork : &file->data_fork);
673		hfsplus_get_perms(inode, &file->permissions, 0);
674		inode->i_nlink = 1;
675		if (S_ISREG(inode->i_mode)) {
676			if (file->permissions.dev)
677				inode->i_nlink = be32_to_cpu(file->permissions.dev);
678			inode->i_op = &hfsplus_file_inode_operations;
679			inode->i_fop = &hfsplus_file_operations;
680			inode->i_mapping->a_ops = &hfsplus_aops;
681		} else if (S_ISLNK(inode->i_mode)) {
682			inode->i_op = &page_symlink_inode_operations;
683			inode->i_mapping->a_ops = &hfsplus_aops;
684		} else {
685			init_special_inode(inode, inode->i_mode,
686					   be32_to_cpu(file->permissions.dev));
687		}
688		inode->i_atime = hfsp_mt2ut(file->access_date);
689		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
690		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
691		HFSPLUS_I(inode).create_date = file->create_date;
692	} else {
693		printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
694		res = -EIO;
695	}
696	return res;
697}
698
699int hfsplus_cat_write_inode(hfsplus_handle_t *hfsplus_handle, struct inode *inode)
700{
701	struct inode *main_inode = inode;
702	struct hfs_find_data fd;
703	hfsplus_cat_entry entry;
704
705	if (HFSPLUS_IS_RSRC(inode))
706		main_inode = HFSPLUS_I(inode).rsrc_inode;
707
708	if (!main_inode->i_nlink)
709		return 0;
710
711	if (hfsplus_handle->journaled != HFSPLUS_JOURNAL_PRESENT) {
712		if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
713			/* panic? */
714			return -EIO;
715	} else {
716		if (hfsplus_journalled_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
717			/* panic? */
718			return -EIO;
719	}
720
721	if (hfsplus_find_cat(hfsplus_handle, main_inode->i_sb, main_inode->i_ino, &fd))
722		/* panic? */
723		goto out;
724
725	if (S_ISDIR(main_inode->i_mode)) {
726		struct hfsplus_cat_folder *folder = &entry.folder;
727
728		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
729			/* panic? */;
730		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
731					sizeof(struct hfsplus_cat_folder));
732		/* simple node checks? */
733		hfsplus_cat_set_perms(inode, &folder->permissions);
734		folder->access_date = hfsp_ut2mt(inode->i_atime);
735		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
736		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
737		folder->valence = cpu_to_be32(inode->i_size - 2);
738		hfs_bnode_write(hfsplus_handle, fd.bnode, &entry, fd.entryoffset,
739					 sizeof(struct hfsplus_cat_folder));
740	} else if (HFSPLUS_IS_RSRC(inode)) {
741		struct hfsplus_cat_file *file = &entry.file;
742		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
743			       sizeof(struct hfsplus_cat_file));
744		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
745		hfs_bnode_write(hfsplus_handle, fd.bnode, &entry, fd.entryoffset,
746				sizeof(struct hfsplus_cat_file));
747	} else {
748		struct hfsplus_cat_file *file = &entry.file;
749
750		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
751			/* panic? */;
752		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
753					sizeof(struct hfsplus_cat_file));
754		hfsplus_inode_write_fork(inode, &file->data_fork);
755		hfsplus_cat_set_perms(inode, &file->permissions);
756		if (HFSPLUS_FLG_IMMUTABLE &
757				(file->permissions.rootflags |
758					file->permissions.userflags))
759			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
760		else
761			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
762		file->access_date = hfsp_ut2mt(inode->i_atime);
763		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
764		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
765		hfs_bnode_write(hfsplus_handle, fd.bnode, &entry, fd.entryoffset,
766					 sizeof(struct hfsplus_cat_file));
767	}
768
769out:
770	if (hfsplus_handle->journaled != HFSPLUS_JOURNAL_PRESENT) {
771		hfs_find_exit(hfsplus_handle, &fd);
772	} else {
773		hfsplus_journalled_find_exit(hfsplus_handle, &fd);
774	}
775	return 0;
776}
777