• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/fs/hfsplus/
1/*
2 *  linux/fs/hfsplus/btree.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handle opening/closing btree
9 */
10
11#include <linux/slab.h>
12#include <linux/pagemap.h>
13#include <linux/log2.h>
14
15#include "hfsplus_fs.h"
16#include "hfsplus_raw.h"
17
18
19/* Get a reference to a B*Tree and do some initial checks */
20struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
21{
22	struct hfs_btree *tree;
23	struct hfs_btree_header_rec *head;
24	struct address_space *mapping;
25	struct inode *inode;
26	struct page *page;
27	unsigned int size;
28
29	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
30	if (!tree)
31		return NULL;
32
33	init_MUTEX(&tree->tree_lock);
34	spin_lock_init(&tree->hash_lock);
35	tree->sb = sb;
36	tree->cnid = id;
37	inode = hfsplus_iget(sb, id);
38	if (IS_ERR(inode))
39		goto free_tree;
40	tree->inode = inode;
41
42	mapping = tree->inode->i_mapping;
43	page = read_mapping_page(mapping, 0, NULL);
44	if (IS_ERR(page))
45		goto free_tree;
46
47	/* Load the header */
48	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
49	tree->root = be32_to_cpu(head->root);
50	tree->leaf_count = be32_to_cpu(head->leaf_count);
51	tree->leaf_head = be32_to_cpu(head->leaf_head);
52	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
53	tree->node_count = be32_to_cpu(head->node_count);
54	tree->free_nodes = be32_to_cpu(head->free_nodes);
55	tree->attributes = be32_to_cpu(head->attributes);
56	tree->node_size = be16_to_cpu(head->node_size);
57	tree->max_key_len = be16_to_cpu(head->max_key_len);
58	tree->depth = be16_to_cpu(head->depth);
59
60	/* Set the correct compare function */
61	if (id == HFSPLUS_EXT_CNID) {
62		tree->keycmp = hfsplus_ext_cmp_key;
63	} else if (id == HFSPLUS_CAT_CNID) {
64		if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
65		    (head->key_type == HFSPLUS_KEY_BINARY))
66			tree->keycmp = hfsplus_cat_bin_cmp_key;
67		else {
68			tree->keycmp = hfsplus_cat_case_cmp_key;
69			HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD;
70		}
71	} else {
72		printk(KERN_ERR "hfs: unknown B*Tree requested\n");
73		goto fail_page;
74	}
75
76	size = tree->node_size;
77	if (!is_power_of_2(size))
78		goto fail_page;
79	if (!tree->node_count)
80		goto fail_page;
81	tree->node_size_shift = ffs(size) - 1;
82
83	tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
84
85	kunmap(page);
86	page_cache_release(page);
87	return tree;
88
89 fail_page:
90	tree->inode->i_mapping->a_ops = &hfsplus_aops;
91	page_cache_release(page);
92 free_tree:
93	iput(tree->inode);
94	kfree(tree);
95	return NULL;
96}
97
98/* Release resources used by a btree */
99void hfs_btree_close(struct hfs_btree *tree)
100{
101	struct hfs_bnode *node;
102	int i;
103
104	if (!tree)
105		return;
106
107	for (i = 0; i < NODE_HASH_SIZE; i++) {
108		while ((node = tree->node_hash[i])) {
109			tree->node_hash[i] = node->next_hash;
110			if (atomic_read(&node->refcnt))
111				printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
112					node->tree->cnid, node->this, atomic_read(&node->refcnt));
113			hfs_bnode_free(node);
114			tree->node_hash_cnt--;
115		}
116	}
117	iput(tree->inode);
118	kfree(tree);
119}
120
121void hfs_btree_write(struct hfs_btree *tree)
122{
123	struct hfs_btree_header_rec *head;
124	struct hfs_bnode *node;
125	struct page *page;
126
127	node = hfs_bnode_find(tree, 0);
128	if (IS_ERR(node))
129		/* panic? */
130		return;
131	/* Load the header */
132	page = node->page[0];
133	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
134
135	head->root = cpu_to_be32(tree->root);
136	head->leaf_count = cpu_to_be32(tree->leaf_count);
137	head->leaf_head = cpu_to_be32(tree->leaf_head);
138	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
139	head->node_count = cpu_to_be32(tree->node_count);
140	head->free_nodes = cpu_to_be32(tree->free_nodes);
141	head->attributes = cpu_to_be32(tree->attributes);
142	head->depth = cpu_to_be16(tree->depth);
143
144	kunmap(page);
145	set_page_dirty(page);
146	hfs_bnode_put(node);
147}
148
149static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
150{
151	struct hfs_btree *tree = prev->tree;
152	struct hfs_bnode *node;
153	struct hfs_bnode_desc desc;
154	__be32 cnid;
155
156	node = hfs_bnode_create(tree, idx);
157	if (IS_ERR(node))
158		return node;
159
160	tree->free_nodes--;
161	prev->next = idx;
162	cnid = cpu_to_be32(idx);
163	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
164
165	node->type = HFS_NODE_MAP;
166	node->num_recs = 1;
167	hfs_bnode_clear(node, 0, tree->node_size);
168	desc.next = 0;
169	desc.prev = 0;
170	desc.type = HFS_NODE_MAP;
171	desc.height = 0;
172	desc.num_recs = cpu_to_be16(1);
173	desc.reserved = 0;
174	hfs_bnode_write(node, &desc, 0, sizeof(desc));
175	hfs_bnode_write_u16(node, 14, 0x8000);
176	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
177	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
178
179	return node;
180}
181
182struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
183{
184	struct hfs_bnode *node, *next_node;
185	struct page **pagep;
186	u32 nidx, idx;
187	unsigned off;
188	u16 off16;
189	u16 len;
190	u8 *data, byte, m;
191	int i;
192
193	while (!tree->free_nodes) {
194		struct inode *inode = tree->inode;
195		u32 count;
196		int res;
197
198		res = hfsplus_file_extend(inode);
199		if (res)
200			return ERR_PTR(res);
201		HFSPLUS_I(inode).phys_size = inode->i_size =
202				(loff_t)HFSPLUS_I(inode).alloc_blocks <<
203				HFSPLUS_SB(tree->sb).alloc_blksz_shift;
204		HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
205					     HFSPLUS_SB(tree->sb).fs_shift;
206		inode_set_bytes(inode, inode->i_size);
207		count = inode->i_size >> tree->node_size_shift;
208		tree->free_nodes = count - tree->node_count;
209		tree->node_count = count;
210	}
211
212	nidx = 0;
213	node = hfs_bnode_find(tree, nidx);
214	if (IS_ERR(node))
215		return node;
216	len = hfs_brec_lenoff(node, 2, &off16);
217	off = off16;
218
219	off += node->page_offset;
220	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
221	data = kmap(*pagep);
222	off &= ~PAGE_CACHE_MASK;
223	idx = 0;
224
225	for (;;) {
226		while (len) {
227			byte = data[off];
228			if (byte != 0xff) {
229				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
230					if (!(byte & m)) {
231						idx += i;
232						data[off] |= m;
233						set_page_dirty(*pagep);
234						kunmap(*pagep);
235						tree->free_nodes--;
236						mark_inode_dirty(tree->inode);
237						hfs_bnode_put(node);
238						return hfs_bnode_create(tree, idx);
239					}
240				}
241			}
242			if (++off >= PAGE_CACHE_SIZE) {
243				kunmap(*pagep);
244				data = kmap(*++pagep);
245				off = 0;
246			}
247			idx += 8;
248			len--;
249		}
250		kunmap(*pagep);
251		nidx = node->next;
252		if (!nidx) {
253			printk(KERN_DEBUG "hfs: create new bmap node...\n");
254			next_node = hfs_bmap_new_bmap(node, idx);
255		} else
256			next_node = hfs_bnode_find(tree, nidx);
257		hfs_bnode_put(node);
258		if (IS_ERR(next_node))
259			return next_node;
260		node = next_node;
261
262		len = hfs_brec_lenoff(node, 0, &off16);
263		off = off16;
264		off += node->page_offset;
265		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
266		data = kmap(*pagep);
267		off &= ~PAGE_CACHE_MASK;
268	}
269}
270
271void hfs_bmap_free(struct hfs_bnode *node)
272{
273	struct hfs_btree *tree;
274	struct page *page;
275	u16 off, len;
276	u32 nidx;
277	u8 *data, byte, m;
278
279	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
280	BUG_ON(!node->this);
281	tree = node->tree;
282	nidx = node->this;
283	node = hfs_bnode_find(tree, 0);
284	if (IS_ERR(node))
285		return;
286	len = hfs_brec_lenoff(node, 2, &off);
287	while (nidx >= len * 8) {
288		u32 i;
289
290		nidx -= len * 8;
291		i = node->next;
292		hfs_bnode_put(node);
293		if (!i) {
294			/* panic */;
295			printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
296			return;
297		}
298		node = hfs_bnode_find(tree, i);
299		if (IS_ERR(node))
300			return;
301		if (node->type != HFS_NODE_MAP) {
302			/* panic */;
303			printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
304			hfs_bnode_put(node);
305			return;
306		}
307		len = hfs_brec_lenoff(node, 0, &off);
308	}
309	off += node->page_offset + nidx / 8;
310	page = node->page[off >> PAGE_CACHE_SHIFT];
311	data = kmap(page);
312	off &= ~PAGE_CACHE_MASK;
313	m = 1 << (~nidx & 7);
314	byte = data[off];
315	if (!(byte & m)) {
316		printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
317		kunmap(page);
318		hfs_bnode_put(node);
319		return;
320	}
321	data[off] = byte & ~m;
322	set_page_dirty(page);
323	kunmap(page);
324	hfs_bnode_put(node);
325	tree->free_nodes++;
326	mark_inode_dirty(tree->inode);
327}
328