1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  linux/fs/hfsplus/bnode.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handle basic btree node operations
10 */
11
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/fs.h>
16#include <linux/swap.h>
17
18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h"
20
21/* Copy a specified range of bytes from the raw data of a node */
22void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
23{
24	struct page **pagep;
25	int l;
26
27	off += node->page_offset;
28	pagep = node->page + (off >> PAGE_SHIFT);
29	off &= ~PAGE_MASK;
30
31	l = min_t(int, len, PAGE_SIZE - off);
32	memcpy_from_page(buf, *pagep, off, l);
33
34	while ((len -= l) != 0) {
35		buf += l;
36		l = min_t(int, len, PAGE_SIZE);
37		memcpy_from_page(buf, *++pagep, 0, l);
38	}
39}
40
41u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
42{
43	__be16 data;
44	/* TODO: optimize later... */
45	hfs_bnode_read(node, &data, off, 2);
46	return be16_to_cpu(data);
47}
48
49u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
50{
51	u8 data;
52	/* TODO: optimize later... */
53	hfs_bnode_read(node, &data, off, 1);
54	return data;
55}
56
57void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
58{
59	struct hfs_btree *tree;
60	int key_len;
61
62	tree = node->tree;
63	if (node->type == HFS_NODE_LEAF ||
64	    tree->attributes & HFS_TREE_VARIDXKEYS ||
65	    node->tree->cnid == HFSPLUS_ATTR_CNID)
66		key_len = hfs_bnode_read_u16(node, off) + 2;
67	else
68		key_len = tree->max_key_len + 2;
69
70	hfs_bnode_read(node, key, off, key_len);
71}
72
73void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
74{
75	struct page **pagep;
76	int l;
77
78	off += node->page_offset;
79	pagep = node->page + (off >> PAGE_SHIFT);
80	off &= ~PAGE_MASK;
81
82	l = min_t(int, len, PAGE_SIZE - off);
83	memcpy_to_page(*pagep, off, buf, l);
84	set_page_dirty(*pagep);
85
86	while ((len -= l) != 0) {
87		buf += l;
88		l = min_t(int, len, PAGE_SIZE);
89		memcpy_to_page(*++pagep, 0, buf, l);
90		set_page_dirty(*pagep);
91	}
92}
93
94void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
95{
96	__be16 v = cpu_to_be16(data);
97	/* TODO: optimize later... */
98	hfs_bnode_write(node, &v, off, 2);
99}
100
101void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
102{
103	struct page **pagep;
104	int l;
105
106	off += node->page_offset;
107	pagep = node->page + (off >> PAGE_SHIFT);
108	off &= ~PAGE_MASK;
109
110	l = min_t(int, len, PAGE_SIZE - off);
111	memzero_page(*pagep, off, l);
112	set_page_dirty(*pagep);
113
114	while ((len -= l) != 0) {
115		l = min_t(int, len, PAGE_SIZE);
116		memzero_page(*++pagep, 0, l);
117		set_page_dirty(*pagep);
118	}
119}
120
121void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
122		    struct hfs_bnode *src_node, int src, int len)
123{
124	struct page **src_page, **dst_page;
125	int l;
126
127	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
128	if (!len)
129		return;
130	src += src_node->page_offset;
131	dst += dst_node->page_offset;
132	src_page = src_node->page + (src >> PAGE_SHIFT);
133	src &= ~PAGE_MASK;
134	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
135	dst &= ~PAGE_MASK;
136
137	if (src == dst) {
138		l = min_t(int, len, PAGE_SIZE - src);
139		memcpy_page(*dst_page, src, *src_page, src, l);
140		set_page_dirty(*dst_page);
141
142		while ((len -= l) != 0) {
143			l = min_t(int, len, PAGE_SIZE);
144			memcpy_page(*++dst_page, 0, *++src_page, 0, l);
145			set_page_dirty(*dst_page);
146		}
147	} else {
148		void *src_ptr, *dst_ptr;
149
150		do {
151			dst_ptr = kmap_local_page(*dst_page) + dst;
152			src_ptr = kmap_local_page(*src_page) + src;
153			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
154				l = PAGE_SIZE - src;
155				src = 0;
156				dst += l;
157			} else {
158				l = PAGE_SIZE - dst;
159				src += l;
160				dst = 0;
161			}
162			l = min(len, l);
163			memcpy(dst_ptr, src_ptr, l);
164			kunmap_local(src_ptr);
165			set_page_dirty(*dst_page);
166			kunmap_local(dst_ptr);
167			if (!dst)
168				dst_page++;
169			else
170				src_page++;
171		} while ((len -= l));
172	}
173}
174
175void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
176{
177	struct page **src_page, **dst_page;
178	void *src_ptr, *dst_ptr;
179	int l;
180
181	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
182	if (!len)
183		return;
184	src += node->page_offset;
185	dst += node->page_offset;
186	if (dst > src) {
187		src += len - 1;
188		src_page = node->page + (src >> PAGE_SHIFT);
189		src = (src & ~PAGE_MASK) + 1;
190		dst += len - 1;
191		dst_page = node->page + (dst >> PAGE_SHIFT);
192		dst = (dst & ~PAGE_MASK) + 1;
193
194		if (src == dst) {
195			while (src < len) {
196				dst_ptr = kmap_local_page(*dst_page);
197				src_ptr = kmap_local_page(*src_page);
198				memmove(dst_ptr, src_ptr, src);
199				kunmap_local(src_ptr);
200				set_page_dirty(*dst_page);
201				kunmap_local(dst_ptr);
202				len -= src;
203				src = PAGE_SIZE;
204				src_page--;
205				dst_page--;
206			}
207			src -= len;
208			dst_ptr = kmap_local_page(*dst_page);
209			src_ptr = kmap_local_page(*src_page);
210			memmove(dst_ptr + src, src_ptr + src, len);
211			kunmap_local(src_ptr);
212			set_page_dirty(*dst_page);
213			kunmap_local(dst_ptr);
214		} else {
215			do {
216				dst_ptr = kmap_local_page(*dst_page) + dst;
217				src_ptr = kmap_local_page(*src_page) + src;
218				if (src < dst) {
219					l = src;
220					src = PAGE_SIZE;
221					dst -= l;
222				} else {
223					l = dst;
224					src -= l;
225					dst = PAGE_SIZE;
226				}
227				l = min(len, l);
228				memmove(dst_ptr - l, src_ptr - l, l);
229				kunmap_local(src_ptr);
230				set_page_dirty(*dst_page);
231				kunmap_local(dst_ptr);
232				if (dst == PAGE_SIZE)
233					dst_page--;
234				else
235					src_page--;
236			} while ((len -= l));
237		}
238	} else {
239		src_page = node->page + (src >> PAGE_SHIFT);
240		src &= ~PAGE_MASK;
241		dst_page = node->page + (dst >> PAGE_SHIFT);
242		dst &= ~PAGE_MASK;
243
244		if (src == dst) {
245			l = min_t(int, len, PAGE_SIZE - src);
246
247			dst_ptr = kmap_local_page(*dst_page) + src;
248			src_ptr = kmap_local_page(*src_page) + src;
249			memmove(dst_ptr, src_ptr, l);
250			kunmap_local(src_ptr);
251			set_page_dirty(*dst_page);
252			kunmap_local(dst_ptr);
253
254			while ((len -= l) != 0) {
255				l = min_t(int, len, PAGE_SIZE);
256				dst_ptr = kmap_local_page(*++dst_page);
257				src_ptr = kmap_local_page(*++src_page);
258				memmove(dst_ptr, src_ptr, l);
259				kunmap_local(src_ptr);
260				set_page_dirty(*dst_page);
261				kunmap_local(dst_ptr);
262			}
263		} else {
264			do {
265				dst_ptr = kmap_local_page(*dst_page) + dst;
266				src_ptr = kmap_local_page(*src_page) + src;
267				if (PAGE_SIZE - src <
268						PAGE_SIZE - dst) {
269					l = PAGE_SIZE - src;
270					src = 0;
271					dst += l;
272				} else {
273					l = PAGE_SIZE - dst;
274					src += l;
275					dst = 0;
276				}
277				l = min(len, l);
278				memmove(dst_ptr, src_ptr, l);
279				kunmap_local(src_ptr);
280				set_page_dirty(*dst_page);
281				kunmap_local(dst_ptr);
282				if (!dst)
283					dst_page++;
284				else
285					src_page++;
286			} while ((len -= l));
287		}
288	}
289}
290
291void hfs_bnode_dump(struct hfs_bnode *node)
292{
293	struct hfs_bnode_desc desc;
294	__be32 cnid;
295	int i, off, key_off;
296
297	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
298	hfs_bnode_read(node, &desc, 0, sizeof(desc));
299	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
300		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
301		desc.type, desc.height, be16_to_cpu(desc.num_recs));
302
303	off = node->tree->node_size - 2;
304	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
305		key_off = hfs_bnode_read_u16(node, off);
306		hfs_dbg(BNODE_MOD, " %d", key_off);
307		if (i && node->type == HFS_NODE_INDEX) {
308			int tmp;
309
310			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
311					node->tree->cnid == HFSPLUS_ATTR_CNID)
312				tmp = hfs_bnode_read_u16(node, key_off) + 2;
313			else
314				tmp = node->tree->max_key_len + 2;
315			hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
316			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
317			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
318		} else if (i && node->type == HFS_NODE_LEAF) {
319			int tmp;
320
321			tmp = hfs_bnode_read_u16(node, key_off);
322			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
323		}
324	}
325	hfs_dbg_cont(BNODE_MOD, "\n");
326}
327
328void hfs_bnode_unlink(struct hfs_bnode *node)
329{
330	struct hfs_btree *tree;
331	struct hfs_bnode *tmp;
332	__be32 cnid;
333
334	tree = node->tree;
335	if (node->prev) {
336		tmp = hfs_bnode_find(tree, node->prev);
337		if (IS_ERR(tmp))
338			return;
339		tmp->next = node->next;
340		cnid = cpu_to_be32(tmp->next);
341		hfs_bnode_write(tmp, &cnid,
342			offsetof(struct hfs_bnode_desc, next), 4);
343		hfs_bnode_put(tmp);
344	} else if (node->type == HFS_NODE_LEAF)
345		tree->leaf_head = node->next;
346
347	if (node->next) {
348		tmp = hfs_bnode_find(tree, node->next);
349		if (IS_ERR(tmp))
350			return;
351		tmp->prev = node->prev;
352		cnid = cpu_to_be32(tmp->prev);
353		hfs_bnode_write(tmp, &cnid,
354			offsetof(struct hfs_bnode_desc, prev), 4);
355		hfs_bnode_put(tmp);
356	} else if (node->type == HFS_NODE_LEAF)
357		tree->leaf_tail = node->prev;
358
359	/* move down? */
360	if (!node->prev && !node->next)
361		hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
362	if (!node->parent) {
363		tree->root = 0;
364		tree->depth = 0;
365	}
366	set_bit(HFS_BNODE_DELETED, &node->flags);
367}
368
369static inline int hfs_bnode_hash(u32 num)
370{
371	num = (num >> 16) + num;
372	num += num >> 8;
373	return num & (NODE_HASH_SIZE - 1);
374}
375
376struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
377{
378	struct hfs_bnode *node;
379
380	if (cnid >= tree->node_count) {
381		pr_err("request for non-existent node %d in B*Tree\n",
382		       cnid);
383		return NULL;
384	}
385
386	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
387			node; node = node->next_hash)
388		if (node->this == cnid)
389			return node;
390	return NULL;
391}
392
393static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
394{
395	struct hfs_bnode *node, *node2;
396	struct address_space *mapping;
397	struct page *page;
398	int size, block, i, hash;
399	loff_t off;
400
401	if (cnid >= tree->node_count) {
402		pr_err("request for non-existent node %d in B*Tree\n",
403		       cnid);
404		return NULL;
405	}
406
407	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
408		sizeof(struct page *);
409	node = kzalloc(size, GFP_KERNEL);
410	if (!node)
411		return NULL;
412	node->tree = tree;
413	node->this = cnid;
414	set_bit(HFS_BNODE_NEW, &node->flags);
415	atomic_set(&node->refcnt, 1);
416	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
417		node->tree->cnid, node->this);
418	init_waitqueue_head(&node->lock_wq);
419	spin_lock(&tree->hash_lock);
420	node2 = hfs_bnode_findhash(tree, cnid);
421	if (!node2) {
422		hash = hfs_bnode_hash(cnid);
423		node->next_hash = tree->node_hash[hash];
424		tree->node_hash[hash] = node;
425		tree->node_hash_cnt++;
426	} else {
427		spin_unlock(&tree->hash_lock);
428		kfree(node);
429		wait_event(node2->lock_wq,
430			!test_bit(HFS_BNODE_NEW, &node2->flags));
431		return node2;
432	}
433	spin_unlock(&tree->hash_lock);
434
435	mapping = tree->inode->i_mapping;
436	off = (loff_t)cnid << tree->node_size_shift;
437	block = off >> PAGE_SHIFT;
438	node->page_offset = off & ~PAGE_MASK;
439	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
440		page = read_mapping_page(mapping, block, NULL);
441		if (IS_ERR(page))
442			goto fail;
443		node->page[i] = page;
444	}
445
446	return node;
447fail:
448	set_bit(HFS_BNODE_ERROR, &node->flags);
449	return node;
450}
451
452void hfs_bnode_unhash(struct hfs_bnode *node)
453{
454	struct hfs_bnode **p;
455
456	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
457		node->tree->cnid, node->this, atomic_read(&node->refcnt));
458	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
459	     *p && *p != node; p = &(*p)->next_hash)
460		;
461	BUG_ON(!*p);
462	*p = node->next_hash;
463	node->tree->node_hash_cnt--;
464}
465
466/* Load a particular node out of a tree */
467struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
468{
469	struct hfs_bnode *node;
470	struct hfs_bnode_desc *desc;
471	int i, rec_off, off, next_off;
472	int entry_size, key_size;
473
474	spin_lock(&tree->hash_lock);
475	node = hfs_bnode_findhash(tree, num);
476	if (node) {
477		hfs_bnode_get(node);
478		spin_unlock(&tree->hash_lock);
479		wait_event(node->lock_wq,
480			!test_bit(HFS_BNODE_NEW, &node->flags));
481		if (test_bit(HFS_BNODE_ERROR, &node->flags))
482			goto node_error;
483		return node;
484	}
485	spin_unlock(&tree->hash_lock);
486	node = __hfs_bnode_create(tree, num);
487	if (!node)
488		return ERR_PTR(-ENOMEM);
489	if (test_bit(HFS_BNODE_ERROR, &node->flags))
490		goto node_error;
491	if (!test_bit(HFS_BNODE_NEW, &node->flags))
492		return node;
493
494	desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
495							 node->page_offset);
496	node->prev = be32_to_cpu(desc->prev);
497	node->next = be32_to_cpu(desc->next);
498	node->num_recs = be16_to_cpu(desc->num_recs);
499	node->type = desc->type;
500	node->height = desc->height;
501	kunmap_local(desc);
502
503	switch (node->type) {
504	case HFS_NODE_HEADER:
505	case HFS_NODE_MAP:
506		if (node->height != 0)
507			goto node_error;
508		break;
509	case HFS_NODE_LEAF:
510		if (node->height != 1)
511			goto node_error;
512		break;
513	case HFS_NODE_INDEX:
514		if (node->height <= 1 || node->height > tree->depth)
515			goto node_error;
516		break;
517	default:
518		goto node_error;
519	}
520
521	rec_off = tree->node_size - 2;
522	off = hfs_bnode_read_u16(node, rec_off);
523	if (off != sizeof(struct hfs_bnode_desc))
524		goto node_error;
525	for (i = 1; i <= node->num_recs; off = next_off, i++) {
526		rec_off -= 2;
527		next_off = hfs_bnode_read_u16(node, rec_off);
528		if (next_off <= off ||
529		    next_off > tree->node_size ||
530		    next_off & 1)
531			goto node_error;
532		entry_size = next_off - off;
533		if (node->type != HFS_NODE_INDEX &&
534		    node->type != HFS_NODE_LEAF)
535			continue;
536		key_size = hfs_bnode_read_u16(node, off) + 2;
537		if (key_size >= entry_size || key_size & 1)
538			goto node_error;
539	}
540	clear_bit(HFS_BNODE_NEW, &node->flags);
541	wake_up(&node->lock_wq);
542	return node;
543
544node_error:
545	set_bit(HFS_BNODE_ERROR, &node->flags);
546	clear_bit(HFS_BNODE_NEW, &node->flags);
547	wake_up(&node->lock_wq);
548	hfs_bnode_put(node);
549	return ERR_PTR(-EIO);
550}
551
552void hfs_bnode_free(struct hfs_bnode *node)
553{
554	int i;
555
556	for (i = 0; i < node->tree->pages_per_bnode; i++)
557		if (node->page[i])
558			put_page(node->page[i]);
559	kfree(node);
560}
561
562struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
563{
564	struct hfs_bnode *node;
565	struct page **pagep;
566	int i;
567
568	spin_lock(&tree->hash_lock);
569	node = hfs_bnode_findhash(tree, num);
570	spin_unlock(&tree->hash_lock);
571	if (node) {
572		pr_crit("new node %u already hashed?\n", num);
573		WARN_ON(1);
574		return node;
575	}
576	node = __hfs_bnode_create(tree, num);
577	if (!node)
578		return ERR_PTR(-ENOMEM);
579	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
580		hfs_bnode_put(node);
581		return ERR_PTR(-EIO);
582	}
583
584	pagep = node->page;
585	memzero_page(*pagep, node->page_offset,
586		     min_t(int, PAGE_SIZE, tree->node_size));
587	set_page_dirty(*pagep);
588	for (i = 1; i < tree->pages_per_bnode; i++) {
589		memzero_page(*++pagep, 0, PAGE_SIZE);
590		set_page_dirty(*pagep);
591	}
592	clear_bit(HFS_BNODE_NEW, &node->flags);
593	wake_up(&node->lock_wq);
594
595	return node;
596}
597
598void hfs_bnode_get(struct hfs_bnode *node)
599{
600	if (node) {
601		atomic_inc(&node->refcnt);
602		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
603			node->tree->cnid, node->this,
604			atomic_read(&node->refcnt));
605	}
606}
607
608/* Dispose of resources used by a node */
609void hfs_bnode_put(struct hfs_bnode *node)
610{
611	if (node) {
612		struct hfs_btree *tree = node->tree;
613		int i;
614
615		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
616			node->tree->cnid, node->this,
617			atomic_read(&node->refcnt));
618		BUG_ON(!atomic_read(&node->refcnt));
619		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
620			return;
621		for (i = 0; i < tree->pages_per_bnode; i++) {
622			if (!node->page[i])
623				continue;
624			mark_page_accessed(node->page[i]);
625		}
626
627		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
628			hfs_bnode_unhash(node);
629			spin_unlock(&tree->hash_lock);
630			if (hfs_bnode_need_zeroout(tree))
631				hfs_bnode_clear(node, 0, tree->node_size);
632			hfs_bmap_free(node);
633			hfs_bnode_free(node);
634			return;
635		}
636		spin_unlock(&tree->hash_lock);
637	}
638}
639
640/*
641 * Unused nodes have to be zeroed if this is the catalog tree and
642 * a corresponding flag in the volume header is set.
643 */
644bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
645{
646	struct super_block *sb = tree->inode->i_sb;
647	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
648	const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
649
650	return tree->cnid == HFSPLUS_CAT_CNID &&
651		volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
652}
653