1/*
2 *  linux/fs/hfsplus/btree.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handle opening/closing btree
9 */
10
11#include <linux/slab.h>
12#include <linux/pagemap.h>
13#include <linux/log2.h>
14
15#include "hfsplus_fs.h"
16#include "hfsplus_raw.h"
17
18
19/* Get a reference to a B*Tree and do some initial checks */
20struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
21{
22	struct hfs_btree *tree;
23	struct hfs_btree_header_rec *head;
24	struct address_space *mapping;
25	struct page *page;
26	unsigned int size;
27
28	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
29	if (!tree)
30		return NULL;
31
32	init_MUTEX(&tree->tree_lock);
33	spin_lock_init(&tree->hash_lock);
34	tree->sb = sb;
35	tree->cnid = id;
36	tree->inode = iget(sb, id);
37	if (!tree->inode)
38		goto free_tree;
39
40	mapping = tree->inode->i_mapping;
41	page = read_mapping_page(mapping, 0, NULL);
42	if (IS_ERR(page))
43		goto free_tree;
44
45	/* Load the header */
46	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
47	tree->root = be32_to_cpu(head->root);
48	tree->leaf_count = be32_to_cpu(head->leaf_count);
49	tree->leaf_head = be32_to_cpu(head->leaf_head);
50	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
51	tree->node_count = be32_to_cpu(head->node_count);
52	tree->free_nodes = be32_to_cpu(head->free_nodes);
53	tree->attributes = be32_to_cpu(head->attributes);
54	tree->node_size = be16_to_cpu(head->node_size);
55	tree->max_key_len = be16_to_cpu(head->max_key_len);
56	tree->depth = be16_to_cpu(head->depth);
57
58	/* Set the correct compare function */
59	if (id == HFSPLUS_EXT_CNID) {
60		tree->keycmp = hfsplus_ext_cmp_key;
61	} else if (id == HFSPLUS_CAT_CNID) {
62		if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
63		    (head->key_type == HFSPLUS_KEY_BINARY))
64			tree->keycmp = hfsplus_cat_bin_cmp_key;
65		else
66			tree->keycmp = hfsplus_cat_case_cmp_key;
67	} else {
68		printk(KERN_ERR "hfs: unknown B*Tree requested\n");
69		goto fail_page;
70	}
71
72	size = tree->node_size;
73	if (!is_power_of_2(size))
74		goto fail_page;
75	if (!tree->node_count)
76		goto fail_page;
77	tree->node_size_shift = ffs(size) - 1;
78
79	tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
80
81	kunmap(page);
82	page_cache_release(page);
83	return tree;
84
85 fail_page:
86	tree->inode->i_mapping->a_ops = &hfsplus_aops;
87	page_cache_release(page);
88 free_tree:
89	iput(tree->inode);
90	kfree(tree);
91	return NULL;
92}
93
94/* Release resources used by a btree */
95void hfs_btree_close(struct hfs_btree *tree)
96{
97	struct hfs_bnode *node;
98	int i;
99
100	if (!tree)
101		return;
102
103	for (i = 0; i < NODE_HASH_SIZE; i++) {
104		while ((node = tree->node_hash[i])) {
105			tree->node_hash[i] = node->next_hash;
106			if (atomic_read(&node->refcnt))
107				printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
108					node->tree->cnid, node->this, atomic_read(&node->refcnt));
109			hfs_bnode_free(node);
110			tree->node_hash_cnt--;
111		}
112	}
113	iput(tree->inode);
114	kfree(tree);
115}
116
117void hfs_btree_write(struct hfs_btree *tree)
118{
119	struct hfs_btree_header_rec *head;
120	struct hfs_bnode *node;
121	struct page *page;
122
123	node = hfs_bnode_find(tree, 0);
124	if (IS_ERR(node))
125		/* panic? */
126		return;
127	/* Load the header */
128	page = node->page[0];
129	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
130
131	head->root = cpu_to_be32(tree->root);
132	head->leaf_count = cpu_to_be32(tree->leaf_count);
133	head->leaf_head = cpu_to_be32(tree->leaf_head);
134	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
135	head->node_count = cpu_to_be32(tree->node_count);
136	head->free_nodes = cpu_to_be32(tree->free_nodes);
137	head->attributes = cpu_to_be32(tree->attributes);
138	head->depth = cpu_to_be16(tree->depth);
139
140	kunmap(page);
141	set_page_dirty(page);
142	hfs_bnode_put(node);
143}
144
145static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
146{
147	struct hfs_btree *tree = prev->tree;
148	struct hfs_bnode *node;
149	struct hfs_bnode_desc desc;
150	__be32 cnid;
151
152	node = hfs_bnode_create(tree, idx);
153	if (IS_ERR(node))
154		return node;
155
156	tree->free_nodes--;
157	prev->next = idx;
158	cnid = cpu_to_be32(idx);
159	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
160
161	node->type = HFS_NODE_MAP;
162	node->num_recs = 1;
163	hfs_bnode_clear(node, 0, tree->node_size);
164	desc.next = 0;
165	desc.prev = 0;
166	desc.type = HFS_NODE_MAP;
167	desc.height = 0;
168	desc.num_recs = cpu_to_be16(1);
169	desc.reserved = 0;
170	hfs_bnode_write(node, &desc, 0, sizeof(desc));
171	hfs_bnode_write_u16(node, 14, 0x8000);
172	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
173	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
174
175	return node;
176}
177
178struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
179{
180	struct hfs_bnode *node, *next_node;
181	struct page **pagep;
182	u32 nidx, idx;
183	u16 off, len;
184	u8 *data, byte, m;
185	int i;
186
187	while (!tree->free_nodes) {
188		struct inode *inode = tree->inode;
189		u32 count;
190		int res;
191
192		res = hfsplus_file_extend(inode);
193		if (res)
194			return ERR_PTR(res);
195		HFSPLUS_I(inode).phys_size = inode->i_size =
196				(loff_t)HFSPLUS_I(inode).alloc_blocks <<
197				HFSPLUS_SB(tree->sb).alloc_blksz_shift;
198		HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
199					     HFSPLUS_SB(tree->sb).fs_shift;
200		inode_set_bytes(inode, inode->i_size);
201        /* Foxconn added start pling 05/31/2010 */
202        /* Set the i_blocks field properly */
203        inode->i_blocks = inode->i_size/512;
204        if (inode->i_size % 512)
205            inode->i_blocks++;
206        /* Foxconn added end pling 05/31/2010 */
207		count = inode->i_size >> tree->node_size_shift;
208		tree->free_nodes = count - tree->node_count;
209		tree->node_count = count;
210	}
211
212	nidx = 0;
213	node = hfs_bnode_find(tree, nidx);
214	if (IS_ERR(node))
215		return node;
216	len = hfs_brec_lenoff(node, 2, &off);
217
218	off += node->page_offset;
219	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
220	data = kmap(*pagep);
221	off &= ~PAGE_CACHE_MASK;
222	idx = 0;
223
224	for (;;) {
225		while (len) {
226			byte = data[off];
227			if (byte != 0xff) {
228				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
229					if (!(byte & m)) {
230						idx += i;
231						data[off] |= m;
232						set_page_dirty(*pagep);
233						kunmap(*pagep);
234						tree->free_nodes--;
235						mark_inode_dirty(tree->inode);
236						hfs_bnode_put(node);
237						return hfs_bnode_create(tree, idx);
238					}
239				}
240			}
241			if (++off >= PAGE_CACHE_SIZE) {
242				kunmap(*pagep);
243				data = kmap(*++pagep);
244				off = 0;
245			}
246			idx += 8;
247			len--;
248		}
249		kunmap(*pagep);
250		nidx = node->next;
251		if (!nidx) {
252			printk(KERN_DEBUG "hfs: create new bmap node...\n");
253			next_node = hfs_bmap_new_bmap(node, idx);
254		} else
255			next_node = hfs_bnode_find(tree, nidx);
256		hfs_bnode_put(node);
257		if (IS_ERR(next_node))
258			return next_node;
259		node = next_node;
260
261		len = hfs_brec_lenoff(node, 0, &off);
262		off += node->page_offset;
263		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
264		data = kmap(*pagep);
265		off &= ~PAGE_CACHE_MASK;
266	}
267}
268
269void hfs_bmap_free(struct hfs_bnode *node)
270{
271	struct hfs_btree *tree;
272	struct page *page;
273	u16 off, len;
274	u32 nidx;
275	u8 *data, byte, m;
276
277	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
278	BUG_ON(!node->this);
279	tree = node->tree;
280	nidx = node->this;
281	node = hfs_bnode_find(tree, 0);
282	if (IS_ERR(node))
283		return;
284	len = hfs_brec_lenoff(node, 2, &off);
285	while (nidx >= len * 8) {
286		u32 i;
287
288		nidx -= len * 8;
289		i = node->next;
290		hfs_bnode_put(node);
291		if (!i) {
292			/* panic */;
293			printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
294			return;
295		}
296		node = hfs_bnode_find(tree, i);
297		if (IS_ERR(node))
298			return;
299		if (node->type != HFS_NODE_MAP) {
300			/* panic */;
301			printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
302			hfs_bnode_put(node);
303			return;
304		}
305		len = hfs_brec_lenoff(node, 0, &off);
306	}
307	off += node->page_offset + nidx / 8;
308	page = node->page[off >> PAGE_CACHE_SHIFT];
309	data = kmap(page);
310	off &= ~PAGE_CACHE_MASK;
311	m = 1 << (~nidx & 7);
312	byte = data[off];
313	if (!(byte & m)) {
314		printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
315		kunmap(page);
316		hfs_bnode_put(node);
317		return;
318	}
319	data[off] = byte & ~m;
320	set_page_dirty(page);
321	kunmap(page);
322	hfs_bnode_put(node);
323	tree->free_nodes++;
324	mark_inode_dirty(tree->inode);
325}
326