• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/fs/btrfs/
1/*
2 * Copyright (C) 2008 Oracle.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/sort.h>
22#include "ctree.h"
23#include "ref-cache.h"
24#include "transaction.h"
25
26/*
27 * leaf refs are used to cache the information about which extents
28 * a given leaf has references on.  This allows us to process that leaf
29 * in btrfs_drop_snapshot without needing to read it back from disk.
30 */
31
32/*
33 * kmalloc a leaf reference struct and update the counters for the
34 * total ref cache size
35 */
36struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
37					    int nr_extents)
38{
39	struct btrfs_leaf_ref *ref;
40	size_t size = btrfs_leaf_ref_size(nr_extents);
41
42	ref = kmalloc(size, GFP_NOFS);
43	if (ref) {
44		spin_lock(&root->fs_info->ref_cache_lock);
45		root->fs_info->total_ref_cache_size += size;
46		spin_unlock(&root->fs_info->ref_cache_lock);
47
48		memset(ref, 0, sizeof(*ref));
49		atomic_set(&ref->usage, 1);
50		INIT_LIST_HEAD(&ref->list);
51	}
52	return ref;
53}
54
55/*
56 * free a leaf reference struct and update the counters for the
57 * total ref cache size
58 */
59void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
60{
61	if (!ref)
62		return;
63	WARN_ON(atomic_read(&ref->usage) == 0);
64	if (atomic_dec_and_test(&ref->usage)) {
65		size_t size = btrfs_leaf_ref_size(ref->nritems);
66
67		BUG_ON(ref->in_tree);
68		kfree(ref);
69
70		spin_lock(&root->fs_info->ref_cache_lock);
71		root->fs_info->total_ref_cache_size -= size;
72		spin_unlock(&root->fs_info->ref_cache_lock);
73	}
74}
75
76static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
77				   struct rb_node *node)
78{
79	struct rb_node **p = &root->rb_node;
80	struct rb_node *parent = NULL;
81	struct btrfs_leaf_ref *entry;
82
83	while (*p) {
84		parent = *p;
85		entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
86
87		if (bytenr < entry->bytenr)
88			p = &(*p)->rb_left;
89		else if (bytenr > entry->bytenr)
90			p = &(*p)->rb_right;
91		else
92			return parent;
93	}
94
95	entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
96	rb_link_node(node, parent, p);
97	rb_insert_color(node, root);
98	return NULL;
99}
100
101static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
102{
103	struct rb_node *n = root->rb_node;
104	struct btrfs_leaf_ref *entry;
105
106	while (n) {
107		entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
108		WARN_ON(!entry->in_tree);
109
110		if (bytenr < entry->bytenr)
111			n = n->rb_left;
112		else if (bytenr > entry->bytenr)
113			n = n->rb_right;
114		else
115			return n;
116	}
117	return NULL;
118}
119
120int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
121			   int shared)
122{
123	struct btrfs_leaf_ref *ref = NULL;
124	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
125
126	if (shared)
127		tree = &root->fs_info->shared_ref_tree;
128	if (!tree)
129		return 0;
130
131	spin_lock(&tree->lock);
132	while (!list_empty(&tree->list)) {
133		ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
134		BUG_ON(ref->tree != tree);
135		if (ref->root_gen > max_root_gen)
136			break;
137		if (!xchg(&ref->in_tree, 0)) {
138			cond_resched_lock(&tree->lock);
139			continue;
140		}
141
142		rb_erase(&ref->rb_node, &tree->root);
143		list_del_init(&ref->list);
144
145		spin_unlock(&tree->lock);
146		btrfs_free_leaf_ref(root, ref);
147		cond_resched();
148		spin_lock(&tree->lock);
149	}
150	spin_unlock(&tree->lock);
151	return 0;
152}
153
154/*
155 * find the leaf ref for a given extent.  This returns the ref struct with
156 * a usage reference incremented
157 */
158struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
159					     u64 bytenr)
160{
161	struct rb_node *rb;
162	struct btrfs_leaf_ref *ref = NULL;
163	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
164again:
165	if (tree) {
166		spin_lock(&tree->lock);
167		rb = tree_search(&tree->root, bytenr);
168		if (rb)
169			ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
170		if (ref)
171			atomic_inc(&ref->usage);
172		spin_unlock(&tree->lock);
173		if (ref)
174			return ref;
175	}
176	if (tree != &root->fs_info->shared_ref_tree) {
177		tree = &root->fs_info->shared_ref_tree;
178		goto again;
179	}
180	return NULL;
181}
182
183/*
184 * add a fully filled in leaf ref struct
185 * remove all the refs older than a given root generation
186 */
187int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
188		       int shared)
189{
190	int ret = 0;
191	struct rb_node *rb;
192	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
193
194	if (shared)
195		tree = &root->fs_info->shared_ref_tree;
196
197	spin_lock(&tree->lock);
198	rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
199	if (rb) {
200		ret = -EEXIST;
201	} else {
202		atomic_inc(&ref->usage);
203		ref->tree = tree;
204		ref->in_tree = 1;
205		list_add_tail(&ref->list, &tree->list);
206	}
207	spin_unlock(&tree->lock);
208	return ret;
209}
210
211/*
212 * remove a single leaf ref from the tree.  This drops the ref held by the tree
213 * only
214 */
215int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
216{
217	struct btrfs_leaf_ref_tree *tree;
218
219	if (!xchg(&ref->in_tree, 0))
220		return 0;
221
222	tree = ref->tree;
223	spin_lock(&tree->lock);
224
225	rb_erase(&ref->rb_node, &tree->root);
226	list_del_init(&ref->list);
227
228	spin_unlock(&tree->lock);
229
230	btrfs_free_leaf_ref(root, ref);
231	return 0;
232}
233