1/*
2 *  linux/fs/hfsplus/inode.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Inode handling routines
9 */
10
11#include <linux/mm.h>
12#include <linux/fs.h>
13#include <linux/pagemap.h>
14#include <linux/mpage.h>
15#include <linux/sched.h>
16
17#include "hfsplus_fs.h"
18#include "hfsplus_raw.h"
19
20static int hfsplus_readpage(struct file *file, struct page *page)
21{
22	return block_read_full_page(page, hfsplus_get_block);
23}
24
25static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
26{
27	return block_write_full_page(page, hfsplus_get_block, wbc);
28}
29
30static int hfsplus_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
31{
32	return cont_prepare_write(page, from, to, hfsplus_get_block,
33		&HFSPLUS_I(page->mapping->host).phys_size);
34}
35
36static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
37{
38	return generic_block_bmap(mapping, block, hfsplus_get_block);
39}
40
41static int hfsplus_releasepage(struct page *page, gfp_t mask)
42{
43	struct inode *inode = page->mapping->host;
44	struct super_block *sb = inode->i_sb;
45	struct hfs_btree *tree;
46	struct hfs_bnode *node;
47	u32 nidx;
48	int i, res = 1;
49
50	switch (inode->i_ino) {
51	case HFSPLUS_EXT_CNID:
52		tree = HFSPLUS_SB(sb).ext_tree;
53		break;
54	case HFSPLUS_CAT_CNID:
55		tree = HFSPLUS_SB(sb).cat_tree;
56		break;
57	case HFSPLUS_ATTR_CNID:
58		tree = HFSPLUS_SB(sb).attr_tree;
59		break;
60	default:
61		BUG();
62		return 0;
63	}
64	if (tree->node_size >= PAGE_CACHE_SIZE) {
65		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
66		spin_lock(&tree->hash_lock);
67		node = hfs_bnode_findhash(tree, nidx);
68		if (!node)
69			;
70		else if (atomic_read(&node->refcnt))
71			res = 0;
72		if (res && node) {
73			hfs_bnode_unhash(node);
74			hfs_bnode_free(node);
75		}
76		spin_unlock(&tree->hash_lock);
77	} else {
78		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
79		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
80		spin_lock(&tree->hash_lock);
81		do {
82			node = hfs_bnode_findhash(tree, nidx++);
83			if (!node)
84				continue;
85			if (atomic_read(&node->refcnt)) {
86				res = 0;
87				break;
88			}
89			hfs_bnode_unhash(node);
90			hfs_bnode_free(node);
91		} while (--i && nidx < tree->node_count);
92		spin_unlock(&tree->hash_lock);
93	}
94	return res ? try_to_free_buffers(page) : 0;
95}
96
97static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
98		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
99{
100	struct file *file = iocb->ki_filp;
101	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
102
103	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
104				  offset, nr_segs, hfsplus_get_block, NULL);
105}
106
107static int hfsplus_writepages(struct address_space *mapping,
108			      struct writeback_control *wbc)
109{
110	return mpage_writepages(mapping, wbc, hfsplus_get_block);
111}
112
113const struct address_space_operations hfsplus_btree_aops = {
114	.readpage	= hfsplus_readpage,
115	.writepage	= hfsplus_writepage,
116	.sync_page	= block_sync_page,
117	.prepare_write	= hfsplus_prepare_write,
118	.commit_write	= generic_commit_write,
119	.bmap		= hfsplus_bmap,
120	.releasepage	= hfsplus_releasepage,
121};
122
123const struct address_space_operations hfsplus_aops = {
124	.readpage	= hfsplus_readpage,
125	.writepage	= hfsplus_writepage,
126	.sync_page	= block_sync_page,
127	.prepare_write	= hfsplus_prepare_write,
128	.commit_write	= generic_commit_write,
129	.bmap		= hfsplus_bmap,
130	.direct_IO	= hfsplus_direct_IO,
131	.writepages	= hfsplus_writepages,
132};
133
134static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
135					  struct nameidata *nd)
136{
137	struct hfs_find_data fd;
138	struct super_block *sb = dir->i_sb;
139	struct inode *inode = NULL;
140	int err;
141
142	if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
143		goto out;
144
145	inode = HFSPLUS_I(dir).rsrc_inode;
146	if (inode)
147		goto out;
148
149	inode = new_inode(sb);
150	if (!inode)
151		return ERR_PTR(-ENOMEM);
152
153	inode->i_ino = dir->i_ino;
154	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
155	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
156	HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
157
158	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
159	err = hfsplus_find_cat(sb, dir->i_ino, &fd);
160	if (!err)
161		err = hfsplus_cat_read_inode(inode, &fd);
162	hfs_find_exit(&fd);
163	if (err) {
164		iput(inode);
165		return ERR_PTR(err);
166	}
167	HFSPLUS_I(inode).rsrc_inode = dir;
168	HFSPLUS_I(dir).rsrc_inode = inode;
169	igrab(dir);
170	hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
171	mark_inode_dirty(inode);
172out:
173	d_add(dentry, inode);
174	return NULL;
175}
176
177static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
178{
179	struct super_block *sb = inode->i_sb;
180	u16 mode;
181
182	mode = be16_to_cpu(perms->mode);
183
184	inode->i_uid = be32_to_cpu(perms->owner);
185	if (!inode->i_uid && !mode)
186		inode->i_uid = HFSPLUS_SB(sb).uid;
187
188	inode->i_gid = be32_to_cpu(perms->group);
189	if (!inode->i_gid && !mode)
190		inode->i_gid = HFSPLUS_SB(sb).gid;
191
192	if (dir) {
193		mode = mode ? (mode & S_IALLUGO) :
194			(S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
195		mode |= S_IFDIR;
196	} else if (!mode)
197		mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
198			~(HFSPLUS_SB(sb).umask));
199	inode->i_mode = mode;
200
201	HFSPLUS_I(inode).rootflags = perms->rootflags;
202	HFSPLUS_I(inode).userflags = perms->userflags;
203	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
204		inode->i_flags |= S_IMMUTABLE;
205	else
206		inode->i_flags &= ~S_IMMUTABLE;
207	if (perms->rootflags & HFSPLUS_FLG_APPEND)
208		inode->i_flags |= S_APPEND;
209	else
210		inode->i_flags &= ~S_APPEND;
211}
212
213static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
214{
215	if (inode->i_flags & S_IMMUTABLE)
216		perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
217	else
218		perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
219	if (inode->i_flags & S_APPEND)
220		perms->rootflags |= HFSPLUS_FLG_APPEND;
221	else
222		perms->rootflags &= ~HFSPLUS_FLG_APPEND;
223	perms->userflags = HFSPLUS_I(inode).userflags;
224	perms->mode = cpu_to_be16(inode->i_mode);
225	perms->owner = cpu_to_be32(inode->i_uid);
226	perms->group = cpu_to_be32(inode->i_gid);
227	perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
228}
229
230static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd)
231{
232	/* MAY_EXEC is also used for lookup, if no x bit is set allow lookup,
233	 * open_exec has the same test, so it's still not executable, if a x bit
234	 * is set fall back to standard permission check.
235	 */
236	if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111))
237		return 0;
238	return generic_permission(inode, mask, NULL);
239}
240
241
242static int hfsplus_file_open(struct inode *inode, struct file *file)
243{
244	if (HFSPLUS_IS_RSRC(inode))
245		inode = HFSPLUS_I(inode).rsrc_inode;
246	if (atomic_read(&file->f_count) != 1)
247		return 0;
248	atomic_inc(&HFSPLUS_I(inode).opencnt);
249	return 0;
250}
251
252static int hfsplus_file_release(struct inode *inode, struct file *file)
253{
254	struct super_block *sb = inode->i_sb;
255
256	if (HFSPLUS_IS_RSRC(inode))
257		inode = HFSPLUS_I(inode).rsrc_inode;
258	if (atomic_read(&file->f_count) != 0)
259		return 0;
260	if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
261		mutex_lock(&inode->i_mutex);
262		hfsplus_file_truncate(inode);
263		if (inode->i_flags & S_DEAD) {
264			hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
265			hfsplus_delete_inode(inode);
266		}
267		mutex_unlock(&inode->i_mutex);
268	}
269	return 0;
270}
271
272extern const struct inode_operations hfsplus_dir_inode_operations;
273extern struct file_operations hfsplus_dir_operations;
274
275static const struct inode_operations hfsplus_file_inode_operations = {
276	.lookup		= hfsplus_file_lookup,
277	.truncate	= hfsplus_file_truncate,
278	.permission	= hfsplus_permission,
279	.setxattr	= hfsplus_setxattr,
280	.getxattr	= hfsplus_getxattr,
281	.listxattr	= hfsplus_listxattr,
282};
283
284static const struct file_operations hfsplus_file_operations = {
285	.llseek 	= generic_file_llseek,
286	.read		= do_sync_read,
287	.aio_read	= generic_file_aio_read,
288	.write		= do_sync_write,
289	.aio_write	= generic_file_aio_write,
290	.mmap		= generic_file_mmap,
291	.sendfile	= generic_file_sendfile,
292	.fsync		= file_fsync,
293	.open		= hfsplus_file_open,
294	.release	= hfsplus_file_release,
295	.ioctl          = hfsplus_ioctl,
296};
297
298struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
299{
300	struct inode *inode = new_inode(sb);
301	if (!inode)
302		return NULL;
303
304	inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
305	inode->i_mode = mode;
306	inode->i_uid = current->fsuid;
307	inode->i_gid = current->fsgid;
308	inode->i_nlink = 1;
309	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
310	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
311	init_MUTEX(&HFSPLUS_I(inode).extents_lock);
312	atomic_set(&HFSPLUS_I(inode).opencnt, 0);
313	HFSPLUS_I(inode).flags = 0;
314	memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
315	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
316	HFSPLUS_I(inode).alloc_blocks = 0;
317	HFSPLUS_I(inode).first_blocks = 0;
318	HFSPLUS_I(inode).cached_start = 0;
319	HFSPLUS_I(inode).cached_blocks = 0;
320	HFSPLUS_I(inode).phys_size = 0;
321	HFSPLUS_I(inode).fs_blocks = 0;
322	HFSPLUS_I(inode).rsrc_inode = NULL;
323	if (S_ISDIR(inode->i_mode)) {
324		inode->i_size = 2;
325		HFSPLUS_SB(sb).folder_count++;
326		inode->i_op = &hfsplus_dir_inode_operations;
327		inode->i_fop = &hfsplus_dir_operations;
328	} else if (S_ISREG(inode->i_mode)) {
329		HFSPLUS_SB(sb).file_count++;
330		inode->i_op = &hfsplus_file_inode_operations;
331		inode->i_fop = &hfsplus_file_operations;
332		inode->i_mapping->a_ops = &hfsplus_aops;
333		HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
334	} else if (S_ISLNK(inode->i_mode)) {
335		HFSPLUS_SB(sb).file_count++;
336		inode->i_op = &page_symlink_inode_operations;
337		inode->i_mapping->a_ops = &hfsplus_aops;
338		HFSPLUS_I(inode).clump_blocks = 1;
339	} else
340		HFSPLUS_SB(sb).file_count++;
341	insert_inode_hash(inode);
342	mark_inode_dirty(inode);
343	sb->s_dirt = 1;
344
345	return inode;
346}
347
348void hfsplus_delete_inode(struct inode *inode)
349{
350	struct super_block *sb = inode->i_sb;
351
352	if (S_ISDIR(inode->i_mode)) {
353		HFSPLUS_SB(sb).folder_count--;
354		sb->s_dirt = 1;
355		return;
356	}
357	HFSPLUS_SB(sb).file_count--;
358	if (S_ISREG(inode->i_mode)) {
359		if (!inode->i_nlink) {
360			inode->i_size = 0;
361			hfsplus_file_truncate(inode);
362		}
363	} else if (S_ISLNK(inode->i_mode)) {
364		inode->i_size = 0;
365		hfsplus_file_truncate(inode);
366	}
367	sb->s_dirt = 1;
368}
369
370void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
371{
372	struct super_block *sb = inode->i_sb;
373	u32 count;
374	int i;
375
376	memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
377	       sizeof(hfsplus_extent_rec));
378	for (count = 0, i = 0; i < 8; i++)
379		count += be32_to_cpu(fork->extents[i].block_count);
380	HFSPLUS_I(inode).first_blocks = count;
381	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
382	HFSPLUS_I(inode).cached_start = 0;
383	HFSPLUS_I(inode).cached_blocks = 0;
384
385	HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
386	inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
387	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
388	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
389    /* Foxconn added start pling 05/31/2010 */
390    /* Set the i_blocks field properly */
391    inode->i_blocks = inode->i_size/512;
392    if (inode->i_size % 512)
393        inode->i_blocks++;
394    /* Foxconn added end pling 05/31/2010 */
395	HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
396	if (!HFSPLUS_I(inode).clump_blocks)
397		HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
398				HFSPLUS_SB(sb).data_clump_blocks;
399}
400
401void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
402{
403	memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
404	       sizeof(hfsplus_extent_rec));
405	fork->total_size = cpu_to_be64(inode->i_size);
406	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
407}
408
409int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
410{
411	hfsplus_cat_entry entry;
412	int res = 0;
413	u16 type;
414
415	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
416
417	HFSPLUS_I(inode).dev = 0;
418	if (type == HFSPLUS_FOLDER) {
419		struct hfsplus_cat_folder *folder = &entry.folder;
420
421		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
422			/* panic? */;
423		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
424					sizeof(struct hfsplus_cat_folder));
425		hfsplus_get_perms(inode, &folder->permissions, 1);
426		inode->i_nlink = 1;
427		inode->i_size = 2 + be32_to_cpu(folder->valence);
428		inode->i_atime = hfsp_mt2ut(folder->access_date);
429		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
430		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
431		HFSPLUS_I(inode).create_date = folder->create_date;
432		HFSPLUS_I(inode).fs_blocks = 0;
433		inode->i_op = &hfsplus_dir_inode_operations;
434		inode->i_fop = &hfsplus_dir_operations;
435	} else if (type == HFSPLUS_FILE) {
436		struct hfsplus_cat_file *file = &entry.file;
437
438		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
439			/* panic? */;
440		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
441					sizeof(struct hfsplus_cat_file));
442
443		hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
444					&file->data_fork : &file->rsrc_fork);
445		hfsplus_get_perms(inode, &file->permissions, 0);
446		inode->i_nlink = 1;
447		if (S_ISREG(inode->i_mode)) {
448			if (file->permissions.dev)
449				inode->i_nlink = be32_to_cpu(file->permissions.dev);
450			inode->i_op = &hfsplus_file_inode_operations;
451			inode->i_fop = &hfsplus_file_operations;
452			inode->i_mapping->a_ops = &hfsplus_aops;
453		} else if (S_ISLNK(inode->i_mode)) {
454			inode->i_op = &page_symlink_inode_operations;
455			inode->i_mapping->a_ops = &hfsplus_aops;
456		} else {
457			init_special_inode(inode, inode->i_mode,
458					   be32_to_cpu(file->permissions.dev));
459		}
460		inode->i_atime = hfsp_mt2ut(file->access_date);
461		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
462		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
463		HFSPLUS_I(inode).create_date = file->create_date;
464	} else {
465		printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
466		res = -EIO;
467	}
468	return res;
469}
470
471int hfsplus_cat_write_inode(struct inode *inode)
472{
473	struct inode *main_inode = inode;
474	struct hfs_find_data fd;
475	hfsplus_cat_entry entry;
476
477	if (HFSPLUS_IS_RSRC(inode))
478		main_inode = HFSPLUS_I(inode).rsrc_inode;
479
480	if (!main_inode->i_nlink)
481		return 0;
482
483	if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
484		/* panic? */
485		return -EIO;
486
487	if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
488		/* panic? */
489		goto out;
490
491	if (S_ISDIR(main_inode->i_mode)) {
492		struct hfsplus_cat_folder *folder = &entry.folder;
493
494		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
495			/* panic? */;
496		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
497					sizeof(struct hfsplus_cat_folder));
498		/* simple node checks? */
499		hfsplus_set_perms(inode, &folder->permissions);
500		folder->access_date = hfsp_ut2mt(inode->i_atime);
501		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
502		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
503		folder->valence = cpu_to_be32(inode->i_size - 2);
504		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
505					 sizeof(struct hfsplus_cat_folder));
506	} else if (HFSPLUS_IS_RSRC(inode)) {
507		struct hfsplus_cat_file *file = &entry.file;
508		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
509			       sizeof(struct hfsplus_cat_file));
510		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
511		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
512				sizeof(struct hfsplus_cat_file));
513	} else {
514		struct hfsplus_cat_file *file = &entry.file;
515
516		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
517			/* panic? */;
518		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
519					sizeof(struct hfsplus_cat_file));
520		hfsplus_inode_write_fork(inode, &file->data_fork);
521		if (S_ISREG(inode->i_mode))
522			HFSPLUS_I(inode).dev = inode->i_nlink;
523		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
524			HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
525		hfsplus_set_perms(inode, &file->permissions);
526		if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
527			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
528		else
529			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
530		file->access_date = hfsp_ut2mt(inode->i_atime);
531		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
532		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
533		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
534					 sizeof(struct hfsplus_cat_file));
535	}
536out:
537	hfs_find_exit(&fd);
538	return 0;
539}
540