• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/btrfs/

Lines Matching refs:tree

40 	struct extent_io_tree *tree;
80 "state %lu in tree %p refs %d\n",
83 state->state, state->tree, atomic_read(&state->refs));
103 void extent_io_tree_init(struct extent_io_tree *tree,
106 tree->state = RB_ROOT;
107 tree->buffer = RB_ROOT;
108 tree->ops = NULL;
109 tree->dirty_bytes = 0;
110 spin_lock_init(&tree->lock);
111 spin_lock_init(&tree->buffer_lock);
112 tree->mapping = mapping;
127 state->tree = NULL;
146 WARN_ON(state->tree);
181 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
185 struct rb_root *root = &tree->state;
226 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
232 ret = __etree_search(tree, offset, &prev, NULL);
238 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
241 struct rb_root *root = &tree->buffer;
263 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
266 struct rb_root *root = &tree->buffer;
282 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
285 if (tree->ops && tree->ops->merge_extent_hook)
286 tree->ops->merge_extent_hook(tree->mapping->host, new,
293 * extent in the tree. Extents with EXTENT_IO in their state field
297 * This should be called with the tree lock held.
299 static int merge_state(struct extent_io_tree *tree,
313 merge_cb(tree, state, other);
315 other->tree = NULL;
316 rb_erase(&other->rb_node, &tree->state);
325 merge_cb(tree, state, other);
327 state->tree = NULL;
328 rb_erase(&state->rb_node, &tree->state);
337 static int set_state_cb(struct extent_io_tree *tree,
340 if (tree->ops && tree->ops->set_bit_hook) {
341 return tree->ops->set_bit_hook(tree->mapping->host,
348 static void clear_state_cb(struct extent_io_tree *tree,
351 if (tree->ops && tree->ops->clear_bit_hook)
352 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
356 * insert an extent_state struct into the tree. 'bits' are set on the
362 * The tree lock is not taken internally. This is a utility function and
365 static int insert_state(struct extent_io_tree *tree,
381 ret = set_state_cb(tree, state, bits);
386 tree->dirty_bytes += end - start + 1;
388 node = tree_insert(&tree->state, end, &state->rb_node);
399 state->tree = tree;
400 merge_state(tree, state);
404 static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
407 if (tree->ops && tree->ops->split_extent_hook)
408 return tree->ops->split_extent_hook(tree->mapping->host,
419 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
420 * are two extent state structs in the tree:
424 * The tree locks are not taken by this function. They need to be held
427 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
432 split_cb(tree, orig, split);
439 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
444 prealloc->tree = tree;
451 * forcibly remove the state from the tree (delete == 1).
454 * struct is freed and removed from the tree
456 static int clear_state_bit(struct extent_io_tree *tree,
465 WARN_ON(range > tree->dirty_bytes);
466 tree->dirty_bytes -= range;
468 clear_state_cb(tree, state, bits);
473 if (state->tree) {
474 rb_erase(&state->rb_node, &tree->state);
475 state->tree = NULL;
481 merge_state(tree, state);
487 * clear some bits on a range in the tree. This may require splitting
488 * or inserting elements in the tree, so the gfp mask is used to
492 * the given range from the tree regardless of state (ie for truncate).
496 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
499 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
527 spin_lock(&tree->lock);
536 if (cached && cached->tree && cached->start == start) {
549 node = tree_search(tree, start);
578 err = split_state(tree, state, prealloc, start);
584 set |= clear_state_bit(tree, state, &bits, wake);
600 err = split_state(tree, state, prealloc, end + 1);
605 set |= clear_state_bit(tree, prealloc, &bits, wake);
616 set |= clear_state_bit(tree, state, &bits, wake);
629 spin_unlock(&tree->lock);
638 spin_unlock(&tree->lock);
644 static int wait_on_state(struct extent_io_tree *tree,
646 __releases(tree->lock)
647 __acquires(tree->lock)
651 spin_unlock(&tree->lock);
653 spin_lock(&tree->lock);
659 * waits for one or more bits to clear on a range in the state tree.
661 * The tree lock is taken by this function
663 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
668 spin_lock(&tree->lock);
675 node = tree_search(tree, start);
687 wait_on_state(tree, state);
697 spin_unlock(&tree->lock);
699 spin_lock(&tree->lock);
703 spin_unlock(&tree->lock);
707 static int set_state_bits(struct extent_io_tree *tree,
714 ret = set_state_cb(tree, state, bits);
719 tree->dirty_bytes += range;
738 * set some bits on a range in the tree. This may require allocations or
745 * [start, end] is inclusive This takes the tree lock.
748 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
767 spin_lock(&tree->lock);
770 if (state->start == start && state->tree) {
779 node = tree_search(tree, start);
781 err = insert_state(tree, prealloc, start, end, &bits);
805 err = set_state_bits(tree, state, &bits);
810 merge_state(tree, state);
849 err = split_state(tree, state, prealloc, start);
855 err = set_state_bits(tree, state, &bits);
859 merge_state(tree, state);
879 err = insert_state(tree, prealloc, start, this_end,
903 err = split_state(tree, state, prealloc, end + 1);
906 err = set_state_bits(tree, prealloc, &bits);
912 merge_state(tree, prealloc);
920 spin_unlock(&tree->lock);
929 spin_unlock(&tree->lock);
936 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
939 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
943 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
946 return set_extent_bit(tree, start, end, bits, 0, NULL,
950 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
953 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
956 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
959 return set_extent_bit(tree, start, end,
964 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
967 return clear_extent_bit(tree, start, end,
972 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
975 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
979 static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
982 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
986 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
989 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
993 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
997 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1001 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1003 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
1010 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1016 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1020 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1030 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1032 return lock_extent_bits(tree, start, end, 0, NULL, mask);
1035 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1041 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1045 clear_extent_bit(tree, start, failed_start - 1,
1052 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1055 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1059 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1062 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1067 * helper function to set pages and extents in the tree dirty
1069 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1076 page = find_get_page(tree->mapping, index);
1086 * helper function to set both pages and extents in the tree writeback
1088 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1095 page = find_get_page(tree->mapping, index);
1105 * find the first offset in the io tree with 'bits' set. zero is
1111 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1118 spin_lock(&tree->lock);
1123 node = tree_search(tree, start);
1140 spin_unlock(&tree->lock);
1145 * return it. tree->lock must be held. NULL will returned if
1148 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1158 node = tree_search(tree, start);
1179 * 1 is returned if we find something, 0 if nothing was in the tree
1181 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1191 spin_lock(&tree->lock);
1197 node = tree_search(tree, cur_start);
1231 spin_unlock(&tree->lock);
1331 * 1 is returned if we find something, 0 if nothing was in the tree
1334 struct extent_io_tree *tree,
1350 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1395 lock_extent_bits(tree, delalloc_start, delalloc_end,
1399 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1402 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1417 struct extent_io_tree *tree,
1437 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1474 * count the number of bytes in the tree that have a given bit(s)
1478 u64 count_range_bits(struct extent_io_tree *tree,
1493 spin_lock(&tree->lock);
1495 total_bytes = tree->dirty_bytes;
1502 node = tree_search(tree, cur_start);
1525 spin_unlock(&tree->lock);
1530 * set the private field for a given byte offset in the tree. If there isn't
1533 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1539 spin_lock(&tree->lock);
1544 node = tree_search(tree, start);
1556 spin_unlock(&tree->lock);
1560 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1566 spin_lock(&tree->lock);
1571 node = tree_search(tree, start);
1583 spin_unlock(&tree->lock);
1588 * searches a range in the state tree for a given mask.
1589 * If 'filled' == 1, this returns 1 only if every extent in the tree
1593 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1600 spin_lock(&tree->lock);
1601 if (cached && cached->tree && cached->start == start)
1604 node = tree_search(tree, start);
1638 spin_unlock(&tree->lock);
1644 * extents in the tree for that page are up to date
1646 static int check_page_uptodate(struct extent_io_tree *tree,
1651 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1657 * helper function to unlock a page if all the extents in the tree
1660 static int check_page_locked(struct extent_io_tree *tree,
1665 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1672 * in the tree for that page are done with writeback
1674 static int check_page_writeback(struct extent_io_tree *tree,
1686 * clear the writeback bits in the extent tree for this IO
1689 * Scheduling is not allowed, so the extent state tree is expected
1696 struct extent_io_tree *tree;
1704 tree = &BTRFS_I(page->mapping->host)->io_tree;
1717 if (tree->ops && tree->ops->writepage_end_io_hook) {
1718 ret = tree->ops->writepage_end_io_hook(page, start,
1724 if (!uptodate && tree->ops &&
1725 tree->ops->writepage_io_failed_hook) {
1726 ret = tree->ops->writepage_io_failed_hook(bio, page,
1735 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1743 check_page_writeback(tree, page);
1753 * set the page up to date if all extents in the tree are uptodate
1754 * clear the lock bit in the extent tree
1757 * Scheduling is not allowed, so the extent state tree is expected
1765 struct extent_io_tree *tree;
1776 tree = &BTRFS_I(page->mapping->host)->io_tree;
1790 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1791 ret = tree->ops->readpage_end_io_hook(page, start, end,
1796 if (!uptodate && tree->ops &&
1797 tree->ops->readpage_io_failed_hook) {
1798 ret = tree->ops->readpage_io_failed_hook(bio, page,
1810 set_extent_uptodate(tree, start, end,
1813 unlock_extent(tree, start, end, GFP_ATOMIC);
1825 check_page_uptodate(tree, page);
1830 check_page_locked(tree, page);
1839 * the structs in the extent tree when done, and set the uptodate bits
1846 struct extent_io_tree *tree;
1852 tree = &BTRFS_I(page->mapping->host)->io_tree;
1862 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1868 unlock_extent(tree, start, end, GFP_ATOMIC);
1902 struct extent_io_tree *tree = bio->bi_private;
1913 if (tree->ops && tree->ops->submit_bio_hook)
1914 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1924 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1952 (tree->ops && tree->ops->merge_bio_hook &&
1953 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1972 bio->bi_private = tree;
1998 * into the tree that are removed when the IO is done (by the end_io
2001 static int __extent_read_full_page(struct extent_io_tree *tree,
2032 lock_extent(tree, start, end, GFP_NOFS);
2036 unlock_extent(tree, start, end, GFP_NOFS);
2061 set_extent_uptodate(tree, cur, cur + iosize - 1,
2063 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2070 unlock_extent(tree, cur, end, GFP_NOFS);
2105 set_extent_uptodate(tree, cur, cur + iosize - 1,
2107 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2113 if (test_range_bit(tree, cur, cur_end,
2115 check_page_uptodate(tree, page);
2116 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2126 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2133 if (tree->ops && tree->ops->readpage_io_hook) {
2134 ret = tree->ops->readpage_io_hook(page, cur,
2140 ret = submit_extent_page(READ, tree, page,
2162 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2169 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2188 * records are inserted to lock ranges in the tree, and as dirty areas
2197 struct extent_io_tree *tree = epd->tree;
2264 nr_delalloc = find_lock_delalloc_range(inode, tree,
2273 tree->ops->fill_delalloc(inode, page, delalloc_start,
2309 if (tree->ops && tree->ops->writepage_start_hook) {
2310 ret = tree->ops->writepage_start_hook(page, start,
2329 if (tree->ops && tree->ops->writepage_end_io_hook)
2330 tree->ops->writepage_end_io_hook(page, start,
2340 if (tree->ops && tree->ops->writepage_end_io_hook)
2341 tree->ops->writepage_end_io_hook(page, cur,
2375 if (!compressed && tree->ops &&
2376 tree->ops->writepage_end_io_hook)
2377 tree->ops->writepage_end_io_hook(page, cur,
2394 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2401 if (tree->ops && tree->ops->writepage_io_hook) {
2402 ret = tree->ops->writepage_io_hook(page, cur,
2412 set_range_writeback(tree, cur, cur + iosize - 1);
2420 ret = submit_extent_page(write_flags, tree, page,
2462 static int extent_write_cache_pages(struct extent_io_tree *tree,
2507 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2508 tree->ops->write_cache_pages_lock_hook(page);
2583 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2591 .tree = tree,
2606 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2612 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2624 .tree = tree,
2642 if (tree->ops && tree->ops->writepage_end_io_hook)
2643 tree->ops->writepage_end_io_hook(page, start,
2656 int extent_writepages(struct extent_io_tree *tree,
2664 .tree = tree,
2670 ret = extent_write_cache_pages(tree, mapping, wbc,
2677 int extent_readpages(struct extent_io_tree *tree,
2693 __extent_read_full_page(tree, page, get_extent,
2707 * records from the tree
2709 int extent_invalidatepage(struct extent_io_tree *tree,
2721 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2723 clear_extent_bit(tree, start, end,
2734 int extent_commit_write(struct extent_io_tree *tree,
2750 int extent_prepare_write(struct extent_io_tree *tree,
2776 lock_extent(tree, page_start, page_end, GFP_NOFS);
2786 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2805 !test_range_bit(tree, block_start, cur_end,
2818 set_extent_bit(tree, block_start,
2821 ret = submit_extent_page(READ, tree, page,
2829 set_extent_uptodate(tree, block_start, cur_end,
2831 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2838 wait_extent_bit(tree, orig_block_start,
2841 check_page_uptodate(tree, page);
2852 struct extent_io_tree *tree, struct page *page,
2859 if (test_range_bit(tree, start, end,
2869 clear_extent_bit(tree, start, end,
2882 struct extent_io_tree *tree, struct page *page,
2906 if (!test_range_bit(tree, em->start,
2911 /* once for the rb tree */
2921 return try_release_extent_state(map, tree, page, mask);
3054 * be in the radix tree.
3069 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3106 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3117 struct address_space *mapping = tree->mapping;
3120 spin_lock(&tree->buffer_lock);
3121 eb = buffer_search(tree, start);
3124 spin_unlock(&tree->buffer_lock);
3128 spin_unlock(&tree->buffer_lock);
3130 eb = __alloc_extent_buffer(tree, start, len, mask);
3167 spin_lock(&tree->buffer_lock);
3168 exists = buffer_tree_insert(tree, start, &eb->rb_node);
3172 spin_unlock(&tree->buffer_lock);
3175 /* add one reference for the tree */
3177 spin_unlock(&tree->buffer_lock);
3190 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3196 spin_lock(&tree->buffer_lock);
3197 eb = buffer_search(tree, start);
3200 spin_unlock(&tree->buffer_lock);
3219 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3252 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3255 return wait_on_extent_writeback(tree, eb->start,
3259 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3273 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3284 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3294 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3303 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3310 check_page_uptodate(tree, page);
3318 int extent_range_uptodate(struct extent_io_tree *tree,
3327 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
3332 page = find_get_page(tree->mapping, index);
3344 int extent_buffer_uptodate(struct extent_io_tree *tree,
3357 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3373 int read_extent_buffer_pages(struct extent_io_tree *tree,
3393 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3433 err = __extent_read_full_page(tree, page,
3835 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3843 spin_lock(&tree->buffer_lock);
3844 eb = buffer_search(tree, start);
3860 rb_erase(&eb->rb_node, &tree->buffer);
3863 spin_unlock(&tree->buffer_lock);