ctree.c revision 67d5e289
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007,2008 Oracle.  All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/rbtree.h>
9#include <linux/mm.h>
10#include "ctree.h"
11#include "disk-io.h"
12#include "transaction.h"
13#include "print-tree.h"
14#include "locking.h"
15#include "volumes.h"
16#include "qgroup.h"
17#include "tree-mod-log.h"
18
19static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
20		      *root, struct btrfs_path *path, int level);
21static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
22		      const struct btrfs_key *ins_key, struct btrfs_path *path,
23		      int data_size, int extend);
24static int push_node_left(struct btrfs_trans_handle *trans,
25			  struct extent_buffer *dst,
26			  struct extent_buffer *src, int empty);
27static int balance_node_right(struct btrfs_trans_handle *trans,
28			      struct extent_buffer *dst_buf,
29			      struct extent_buffer *src_buf);
30static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
31		    int level, int slot);
32
33static const struct btrfs_csums {
34	u16		size;
35	const char	name[10];
36	const char	driver[12];
37} btrfs_csums[] = {
38	[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
39	[BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
40	[BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
41	[BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
42				     .driver = "blake2b-256" },
43};
44
45int btrfs_super_csum_size(const struct btrfs_super_block *s)
46{
47	u16 t = btrfs_super_csum_type(s);
48	/*
49	 * csum type is validated at mount time
50	 */
51	return btrfs_csums[t].size;
52}
53
54const char *btrfs_super_csum_name(u16 csum_type)
55{
56	/* csum type is validated at mount time */
57	return btrfs_csums[csum_type].name;
58}
59
60/*
61 * Return driver name if defined, otherwise the name that's also a valid driver
62 * name
63 */
64const char *btrfs_super_csum_driver(u16 csum_type)
65{
66	/* csum type is validated at mount time */
67	return btrfs_csums[csum_type].driver[0] ?
68		btrfs_csums[csum_type].driver :
69		btrfs_csums[csum_type].name;
70}
71
72size_t __attribute_const__ btrfs_get_num_csums(void)
73{
74	return ARRAY_SIZE(btrfs_csums);
75}
76
77struct btrfs_path *btrfs_alloc_path(void)
78{
79	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
80}
81
82/* this also releases the path */
83void btrfs_free_path(struct btrfs_path *p)
84{
85	if (!p)
86		return;
87	btrfs_release_path(p);
88	kmem_cache_free(btrfs_path_cachep, p);
89}
90
91/*
92 * path release drops references on the extent buffers in the path
93 * and it drops any locks held by this path
94 *
95 * It is safe to call this on paths that no locks or extent buffers held.
96 */
97noinline void btrfs_release_path(struct btrfs_path *p)
98{
99	int i;
100
101	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
102		p->slots[i] = 0;
103		if (!p->nodes[i])
104			continue;
105		if (p->locks[i]) {
106			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
107			p->locks[i] = 0;
108		}
109		free_extent_buffer(p->nodes[i]);
110		p->nodes[i] = NULL;
111	}
112}
113
114/*
115 * safely gets a reference on the root node of a tree.  A lock
116 * is not taken, so a concurrent writer may put a different node
117 * at the root of the tree.  See btrfs_lock_root_node for the
118 * looping required.
119 *
120 * The extent buffer returned by this has a reference taken, so
121 * it won't disappear.  It may stop being the root of the tree
122 * at any time because there are no locks held.
123 */
124struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
125{
126	struct extent_buffer *eb;
127
128	while (1) {
129		rcu_read_lock();
130		eb = rcu_dereference(root->node);
131
132		/*
133		 * RCU really hurts here, we could free up the root node because
134		 * it was COWed but we may not get the new root node yet so do
135		 * the inc_not_zero dance and if it doesn't work then
136		 * synchronize_rcu and try again.
137		 */
138		if (atomic_inc_not_zero(&eb->refs)) {
139			rcu_read_unlock();
140			break;
141		}
142		rcu_read_unlock();
143		synchronize_rcu();
144	}
145	return eb;
146}
147
148/*
149 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
150 * just get put onto a simple dirty list.  Transaction walks this list to make
151 * sure they get properly updated on disk.
152 */
153static void add_root_to_dirty_list(struct btrfs_root *root)
154{
155	struct btrfs_fs_info *fs_info = root->fs_info;
156
157	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
158	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
159		return;
160
161	spin_lock(&fs_info->trans_lock);
162	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
163		/* Want the extent tree to be the last on the list */
164		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
165			list_move_tail(&root->dirty_list,
166				       &fs_info->dirty_cowonly_roots);
167		else
168			list_move(&root->dirty_list,
169				  &fs_info->dirty_cowonly_roots);
170	}
171	spin_unlock(&fs_info->trans_lock);
172}
173
174/*
175 * used by snapshot creation to make a copy of a root for a tree with
176 * a given objectid.  The buffer with the new root node is returned in
177 * cow_ret, and this func returns zero on success or a negative error code.
178 */
179int btrfs_copy_root(struct btrfs_trans_handle *trans,
180		      struct btrfs_root *root,
181		      struct extent_buffer *buf,
182		      struct extent_buffer **cow_ret, u64 new_root_objectid)
183{
184	struct btrfs_fs_info *fs_info = root->fs_info;
185	struct extent_buffer *cow;
186	int ret = 0;
187	int level;
188	struct btrfs_disk_key disk_key;
189
190	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
191		trans->transid != fs_info->running_transaction->transid);
192	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
193		trans->transid != root->last_trans);
194
195	level = btrfs_header_level(buf);
196	if (level == 0)
197		btrfs_item_key(buf, &disk_key, 0);
198	else
199		btrfs_node_key(buf, &disk_key, 0);
200
201	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
202				     &disk_key, level, buf->start, 0,
203				     BTRFS_NESTING_NEW_ROOT);
204	if (IS_ERR(cow))
205		return PTR_ERR(cow);
206
207	copy_extent_buffer_full(cow, buf);
208	btrfs_set_header_bytenr(cow, cow->start);
209	btrfs_set_header_generation(cow, trans->transid);
210	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
211	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
212				     BTRFS_HEADER_FLAG_RELOC);
213	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
214		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
215	else
216		btrfs_set_header_owner(cow, new_root_objectid);
217
218	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
219
220	WARN_ON(btrfs_header_generation(buf) > trans->transid);
221	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
222		ret = btrfs_inc_ref(trans, root, cow, 1);
223	else
224		ret = btrfs_inc_ref(trans, root, cow, 0);
225	if (ret) {
226		btrfs_tree_unlock(cow);
227		free_extent_buffer(cow);
228		btrfs_abort_transaction(trans, ret);
229		return ret;
230	}
231
232	btrfs_mark_buffer_dirty(cow);
233	*cow_ret = cow;
234	return 0;
235}
236
237/*
238 * check if the tree block can be shared by multiple trees
239 */
240int btrfs_block_can_be_shared(struct btrfs_root *root,
241			      struct extent_buffer *buf)
242{
243	/*
244	 * Tree blocks not in shareable trees and tree roots are never shared.
245	 * If a block was allocated after the last snapshot and the block was
246	 * not allocated by tree relocation, we know the block is not shared.
247	 */
248	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
249	    buf != root->node && buf != root->commit_root &&
250	    (btrfs_header_generation(buf) <=
251	     btrfs_root_last_snapshot(&root->root_item) ||
252	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
253		return 1;
254
255	return 0;
256}
257
258static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
259				       struct btrfs_root *root,
260				       struct extent_buffer *buf,
261				       struct extent_buffer *cow,
262				       int *last_ref)
263{
264	struct btrfs_fs_info *fs_info = root->fs_info;
265	u64 refs;
266	u64 owner;
267	u64 flags;
268	u64 new_flags = 0;
269	int ret;
270
271	/*
272	 * Backrefs update rules:
273	 *
274	 * Always use full backrefs for extent pointers in tree block
275	 * allocated by tree relocation.
276	 *
277	 * If a shared tree block is no longer referenced by its owner
278	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
279	 * use full backrefs for extent pointers in tree block.
280	 *
281	 * If a tree block is been relocating
282	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
283	 * use full backrefs for extent pointers in tree block.
284	 * The reason for this is some operations (such as drop tree)
285	 * are only allowed for blocks use full backrefs.
286	 */
287
288	if (btrfs_block_can_be_shared(root, buf)) {
289		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
290					       btrfs_header_level(buf), 1,
291					       &refs, &flags);
292		if (ret)
293			return ret;
294		if (refs == 0) {
295			ret = -EROFS;
296			btrfs_handle_fs_error(fs_info, ret, NULL);
297			return ret;
298		}
299	} else {
300		refs = 1;
301		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
302		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
303			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
304		else
305			flags = 0;
306	}
307
308	owner = btrfs_header_owner(buf);
309	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
310	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
311
312	if (refs > 1) {
313		if ((owner == root->root_key.objectid ||
314		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
315		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
316			ret = btrfs_inc_ref(trans, root, buf, 1);
317			if (ret)
318				return ret;
319
320			if (root->root_key.objectid ==
321			    BTRFS_TREE_RELOC_OBJECTID) {
322				ret = btrfs_dec_ref(trans, root, buf, 0);
323				if (ret)
324					return ret;
325				ret = btrfs_inc_ref(trans, root, cow, 1);
326				if (ret)
327					return ret;
328			}
329			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
330		} else {
331
332			if (root->root_key.objectid ==
333			    BTRFS_TREE_RELOC_OBJECTID)
334				ret = btrfs_inc_ref(trans, root, cow, 1);
335			else
336				ret = btrfs_inc_ref(trans, root, cow, 0);
337			if (ret)
338				return ret;
339		}
340		if (new_flags != 0) {
341			int level = btrfs_header_level(buf);
342
343			ret = btrfs_set_disk_extent_flags(trans, buf,
344							  new_flags, level, 0);
345			if (ret)
346				return ret;
347		}
348	} else {
349		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
350			if (root->root_key.objectid ==
351			    BTRFS_TREE_RELOC_OBJECTID)
352				ret = btrfs_inc_ref(trans, root, cow, 1);
353			else
354				ret = btrfs_inc_ref(trans, root, cow, 0);
355			if (ret)
356				return ret;
357			ret = btrfs_dec_ref(trans, root, buf, 1);
358			if (ret)
359				return ret;
360		}
361		btrfs_clean_tree_block(buf);
362		*last_ref = 1;
363	}
364	return 0;
365}
366
367/*
368 * does the dirty work in cow of a single block.  The parent block (if
369 * supplied) is updated to point to the new cow copy.  The new buffer is marked
370 * dirty and returned locked.  If you modify the block it needs to be marked
371 * dirty again.
372 *
373 * search_start -- an allocation hint for the new block
374 *
375 * empty_size -- a hint that you plan on doing more cow.  This is the size in
376 * bytes the allocator should try to find free next to the block it returns.
377 * This is just a hint and may be ignored by the allocator.
378 */
379static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
380			     struct btrfs_root *root,
381			     struct extent_buffer *buf,
382			     struct extent_buffer *parent, int parent_slot,
383			     struct extent_buffer **cow_ret,
384			     u64 search_start, u64 empty_size,
385			     enum btrfs_lock_nesting nest)
386{
387	struct btrfs_fs_info *fs_info = root->fs_info;
388	struct btrfs_disk_key disk_key;
389	struct extent_buffer *cow;
390	int level, ret;
391	int last_ref = 0;
392	int unlock_orig = 0;
393	u64 parent_start = 0;
394
395	if (*cow_ret == buf)
396		unlock_orig = 1;
397
398	btrfs_assert_tree_locked(buf);
399
400	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
401		trans->transid != fs_info->running_transaction->transid);
402	WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
403		trans->transid != root->last_trans);
404
405	level = btrfs_header_level(buf);
406
407	if (level == 0)
408		btrfs_item_key(buf, &disk_key, 0);
409	else
410		btrfs_node_key(buf, &disk_key, 0);
411
412	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
413		parent_start = parent->start;
414
415	cow = btrfs_alloc_tree_block(trans, root, parent_start,
416				     root->root_key.objectid, &disk_key, level,
417				     search_start, empty_size, nest);
418	if (IS_ERR(cow))
419		return PTR_ERR(cow);
420
421	/* cow is set to blocking by btrfs_init_new_buffer */
422
423	copy_extent_buffer_full(cow, buf);
424	btrfs_set_header_bytenr(cow, cow->start);
425	btrfs_set_header_generation(cow, trans->transid);
426	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
427	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
428				     BTRFS_HEADER_FLAG_RELOC);
429	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
430		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
431	else
432		btrfs_set_header_owner(cow, root->root_key.objectid);
433
434	write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
435
436	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
437	if (ret) {
438		btrfs_tree_unlock(cow);
439		free_extent_buffer(cow);
440		btrfs_abort_transaction(trans, ret);
441		return ret;
442	}
443
444	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
445		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
446		if (ret) {
447			btrfs_tree_unlock(cow);
448			free_extent_buffer(cow);
449			btrfs_abort_transaction(trans, ret);
450			return ret;
451		}
452	}
453
454	if (buf == root->node) {
455		WARN_ON(parent && parent != buf);
456		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
457		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
458			parent_start = buf->start;
459
460		atomic_inc(&cow->refs);
461		ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
462		BUG_ON(ret < 0);
463		rcu_assign_pointer(root->node, cow);
464
465		btrfs_free_tree_block(trans, root, buf, parent_start,
466				      last_ref);
467		free_extent_buffer(buf);
468		add_root_to_dirty_list(root);
469	} else {
470		WARN_ON(trans->transid != btrfs_header_generation(parent));
471		btrfs_tree_mod_log_insert_key(parent, parent_slot,
472					      BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
473		btrfs_set_node_blockptr(parent, parent_slot,
474					cow->start);
475		btrfs_set_node_ptr_generation(parent, parent_slot,
476					      trans->transid);
477		btrfs_mark_buffer_dirty(parent);
478		if (last_ref) {
479			ret = btrfs_tree_mod_log_free_eb(buf);
480			if (ret) {
481				btrfs_tree_unlock(cow);
482				free_extent_buffer(cow);
483				btrfs_abort_transaction(trans, ret);
484				return ret;
485			}
486		}
487		btrfs_free_tree_block(trans, root, buf, parent_start,
488				      last_ref);
489	}
490	if (unlock_orig)
491		btrfs_tree_unlock(buf);
492	free_extent_buffer_stale(buf);
493	btrfs_mark_buffer_dirty(cow);
494	*cow_ret = cow;
495	return 0;
496}
497
498static inline int should_cow_block(struct btrfs_trans_handle *trans,
499				   struct btrfs_root *root,
500				   struct extent_buffer *buf)
501{
502	if (btrfs_is_testing(root->fs_info))
503		return 0;
504
505	/* Ensure we can see the FORCE_COW bit */
506	smp_mb__before_atomic();
507
508	/*
509	 * We do not need to cow a block if
510	 * 1) this block is not created or changed in this transaction;
511	 * 2) this block does not belong to TREE_RELOC tree;
512	 * 3) the root is not forced COW.
513	 *
514	 * What is forced COW:
515	 *    when we create snapshot during committing the transaction,
516	 *    after we've finished copying src root, we must COW the shared
517	 *    block to ensure the metadata consistency.
518	 */
519	if (btrfs_header_generation(buf) == trans->transid &&
520	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
521	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
522	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
523	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
524		return 0;
525	return 1;
526}
527
528/*
529 * cows a single block, see __btrfs_cow_block for the real work.
530 * This version of it has extra checks so that a block isn't COWed more than
531 * once per transaction, as long as it hasn't been written yet
532 */
533noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
534		    struct btrfs_root *root, struct extent_buffer *buf,
535		    struct extent_buffer *parent, int parent_slot,
536		    struct extent_buffer **cow_ret,
537		    enum btrfs_lock_nesting nest)
538{
539	struct btrfs_fs_info *fs_info = root->fs_info;
540	u64 search_start;
541	int ret;
542
543	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
544		btrfs_err(fs_info,
545			"COW'ing blocks on a fs root that's being dropped");
546
547	if (trans->transaction != fs_info->running_transaction)
548		WARN(1, KERN_CRIT "trans %llu running %llu\n",
549		       trans->transid,
550		       fs_info->running_transaction->transid);
551
552	if (trans->transid != fs_info->generation)
553		WARN(1, KERN_CRIT "trans %llu running %llu\n",
554		       trans->transid, fs_info->generation);
555
556	if (!should_cow_block(trans, root, buf)) {
557		*cow_ret = buf;
558		return 0;
559	}
560
561	search_start = buf->start & ~((u64)SZ_1G - 1);
562
563	/*
564	 * Before CoWing this block for later modification, check if it's
565	 * the subtree root and do the delayed subtree trace if needed.
566	 *
567	 * Also We don't care about the error, as it's handled internally.
568	 */
569	btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
570	ret = __btrfs_cow_block(trans, root, buf, parent,
571				 parent_slot, cow_ret, search_start, 0, nest);
572
573	trace_btrfs_cow_block(root, buf, *cow_ret);
574
575	return ret;
576}
577ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
578
579/*
580 * helper function for defrag to decide if two blocks pointed to by a
581 * node are actually close by
582 */
583static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
584{
585	if (blocknr < other && other - (blocknr + blocksize) < 32768)
586		return 1;
587	if (blocknr > other && blocknr - (other + blocksize) < 32768)
588		return 1;
589	return 0;
590}
591
592#ifdef __LITTLE_ENDIAN
593
594/*
595 * Compare two keys, on little-endian the disk order is same as CPU order and
596 * we can avoid the conversion.
597 */
598static int comp_keys(const struct btrfs_disk_key *disk_key,
599		     const struct btrfs_key *k2)
600{
601	const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
602
603	return btrfs_comp_cpu_keys(k1, k2);
604}
605
606#else
607
608/*
609 * compare two keys in a memcmp fashion
610 */
611static int comp_keys(const struct btrfs_disk_key *disk,
612		     const struct btrfs_key *k2)
613{
614	struct btrfs_key k1;
615
616	btrfs_disk_key_to_cpu(&k1, disk);
617
618	return btrfs_comp_cpu_keys(&k1, k2);
619}
620#endif
621
622/*
623 * same as comp_keys only with two btrfs_key's
624 */
625int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
626{
627	if (k1->objectid > k2->objectid)
628		return 1;
629	if (k1->objectid < k2->objectid)
630		return -1;
631	if (k1->type > k2->type)
632		return 1;
633	if (k1->type < k2->type)
634		return -1;
635	if (k1->offset > k2->offset)
636		return 1;
637	if (k1->offset < k2->offset)
638		return -1;
639	return 0;
640}
641
642/*
643 * this is used by the defrag code to go through all the
644 * leaves pointed to by a node and reallocate them so that
645 * disk order is close to key order
646 */
647int btrfs_realloc_node(struct btrfs_trans_handle *trans,
648		       struct btrfs_root *root, struct extent_buffer *parent,
649		       int start_slot, u64 *last_ret,
650		       struct btrfs_key *progress)
651{
652	struct btrfs_fs_info *fs_info = root->fs_info;
653	struct extent_buffer *cur;
654	u64 blocknr;
655	u64 search_start = *last_ret;
656	u64 last_block = 0;
657	u64 other;
658	u32 parent_nritems;
659	int end_slot;
660	int i;
661	int err = 0;
662	u32 blocksize;
663	int progress_passed = 0;
664	struct btrfs_disk_key disk_key;
665
666	WARN_ON(trans->transaction != fs_info->running_transaction);
667	WARN_ON(trans->transid != fs_info->generation);
668
669	parent_nritems = btrfs_header_nritems(parent);
670	blocksize = fs_info->nodesize;
671	end_slot = parent_nritems - 1;
672
673	if (parent_nritems <= 1)
674		return 0;
675
676	for (i = start_slot; i <= end_slot; i++) {
677		int close = 1;
678
679		btrfs_node_key(parent, &disk_key, i);
680		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
681			continue;
682
683		progress_passed = 1;
684		blocknr = btrfs_node_blockptr(parent, i);
685		if (last_block == 0)
686			last_block = blocknr;
687
688		if (i > 0) {
689			other = btrfs_node_blockptr(parent, i - 1);
690			close = close_blocks(blocknr, other, blocksize);
691		}
692		if (!close && i < end_slot) {
693			other = btrfs_node_blockptr(parent, i + 1);
694			close = close_blocks(blocknr, other, blocksize);
695		}
696		if (close) {
697			last_block = blocknr;
698			continue;
699		}
700
701		cur = btrfs_read_node_slot(parent, i);
702		if (IS_ERR(cur))
703			return PTR_ERR(cur);
704		if (search_start == 0)
705			search_start = last_block;
706
707		btrfs_tree_lock(cur);
708		err = __btrfs_cow_block(trans, root, cur, parent, i,
709					&cur, search_start,
710					min(16 * blocksize,
711					    (end_slot - i) * blocksize),
712					BTRFS_NESTING_COW);
713		if (err) {
714			btrfs_tree_unlock(cur);
715			free_extent_buffer(cur);
716			break;
717		}
718		search_start = cur->start;
719		last_block = cur->start;
720		*last_ret = search_start;
721		btrfs_tree_unlock(cur);
722		free_extent_buffer(cur);
723	}
724	return err;
725}
726
727/*
728 * search for key in the extent_buffer.  The items start at offset p,
729 * and they are item_size apart.
730 *
731 * the slot in the array is returned via slot, and it points to
732 * the place where you would insert key if it is not found in
733 * the array.
734 *
735 * Slot may point to total number of items if the key is bigger than
736 * all of the keys
737 */
738static noinline int generic_bin_search(struct extent_buffer *eb,
739				       unsigned long p, int item_size,
740				       const struct btrfs_key *key, int *slot)
741{
742	int low = 0;
743	int high = btrfs_header_nritems(eb);
744	int ret;
745	const int key_size = sizeof(struct btrfs_disk_key);
746
747	if (low > high) {
748		btrfs_err(eb->fs_info,
749		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
750			  __func__, low, high, eb->start,
751			  btrfs_header_owner(eb), btrfs_header_level(eb));
752		return -EINVAL;
753	}
754
755	while (low < high) {
756		unsigned long oip;
757		unsigned long offset;
758		struct btrfs_disk_key *tmp;
759		struct btrfs_disk_key unaligned;
760		int mid;
761
762		mid = (low + high) / 2;
763		offset = p + mid * item_size;
764		oip = offset_in_page(offset);
765
766		if (oip + key_size <= PAGE_SIZE) {
767			const unsigned long idx = get_eb_page_index(offset);
768			char *kaddr = page_address(eb->pages[idx]);
769
770			oip = get_eb_offset_in_page(eb, offset);
771			tmp = (struct btrfs_disk_key *)(kaddr + oip);
772		} else {
773			read_extent_buffer(eb, &unaligned, offset, key_size);
774			tmp = &unaligned;
775		}
776
777		ret = comp_keys(tmp, key);
778
779		if (ret < 0)
780			low = mid + 1;
781		else if (ret > 0)
782			high = mid;
783		else {
784			*slot = mid;
785			return 0;
786		}
787	}
788	*slot = low;
789	return 1;
790}
791
792/*
793 * simple bin_search frontend that does the right thing for
794 * leaves vs nodes
795 */
796int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
797		     int *slot)
798{
799	if (btrfs_header_level(eb) == 0)
800		return generic_bin_search(eb,
801					  offsetof(struct btrfs_leaf, items),
802					  sizeof(struct btrfs_item), key, slot);
803	else
804		return generic_bin_search(eb,
805					  offsetof(struct btrfs_node, ptrs),
806					  sizeof(struct btrfs_key_ptr), key, slot);
807}
808
809static void root_add_used(struct btrfs_root *root, u32 size)
810{
811	spin_lock(&root->accounting_lock);
812	btrfs_set_root_used(&root->root_item,
813			    btrfs_root_used(&root->root_item) + size);
814	spin_unlock(&root->accounting_lock);
815}
816
817static void root_sub_used(struct btrfs_root *root, u32 size)
818{
819	spin_lock(&root->accounting_lock);
820	btrfs_set_root_used(&root->root_item,
821			    btrfs_root_used(&root->root_item) - size);
822	spin_unlock(&root->accounting_lock);
823}
824
825/* given a node and slot number, this reads the blocks it points to.  The
826 * extent buffer is returned with a reference taken (but unlocked).
827 */
828struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
829					   int slot)
830{
831	int level = btrfs_header_level(parent);
832	struct extent_buffer *eb;
833	struct btrfs_key first_key;
834
835	if (slot < 0 || slot >= btrfs_header_nritems(parent))
836		return ERR_PTR(-ENOENT);
837
838	BUG_ON(level == 0);
839
840	btrfs_node_key_to_cpu(parent, &first_key, slot);
841	eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
842			     btrfs_header_owner(parent),
843			     btrfs_node_ptr_generation(parent, slot),
844			     level - 1, &first_key);
845	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
846		free_extent_buffer(eb);
847		eb = ERR_PTR(-EIO);
848	}
849
850	return eb;
851}
852
853/*
854 * node level balancing, used to make sure nodes are in proper order for
855 * item deletion.  We balance from the top down, so we have to make sure
856 * that a deletion won't leave an node completely empty later on.
857 */
858static noinline int balance_level(struct btrfs_trans_handle *trans,
859			 struct btrfs_root *root,
860			 struct btrfs_path *path, int level)
861{
862	struct btrfs_fs_info *fs_info = root->fs_info;
863	struct extent_buffer *right = NULL;
864	struct extent_buffer *mid;
865	struct extent_buffer *left = NULL;
866	struct extent_buffer *parent = NULL;
867	int ret = 0;
868	int wret;
869	int pslot;
870	int orig_slot = path->slots[level];
871	u64 orig_ptr;
872
873	ASSERT(level > 0);
874
875	mid = path->nodes[level];
876
877	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
878	WARN_ON(btrfs_header_generation(mid) != trans->transid);
879
880	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
881
882	if (level < BTRFS_MAX_LEVEL - 1) {
883		parent = path->nodes[level + 1];
884		pslot = path->slots[level + 1];
885	}
886
887	/*
888	 * deal with the case where there is only one pointer in the root
889	 * by promoting the node below to a root
890	 */
891	if (!parent) {
892		struct extent_buffer *child;
893
894		if (btrfs_header_nritems(mid) != 1)
895			return 0;
896
897		/* promote the child to a root */
898		child = btrfs_read_node_slot(mid, 0);
899		if (IS_ERR(child)) {
900			ret = PTR_ERR(child);
901			btrfs_handle_fs_error(fs_info, ret, NULL);
902			goto enospc;
903		}
904
905		btrfs_tree_lock(child);
906		ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
907				      BTRFS_NESTING_COW);
908		if (ret) {
909			btrfs_tree_unlock(child);
910			free_extent_buffer(child);
911			goto enospc;
912		}
913
914		ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
915		BUG_ON(ret < 0);
916		rcu_assign_pointer(root->node, child);
917
918		add_root_to_dirty_list(root);
919		btrfs_tree_unlock(child);
920
921		path->locks[level] = 0;
922		path->nodes[level] = NULL;
923		btrfs_clean_tree_block(mid);
924		btrfs_tree_unlock(mid);
925		/* once for the path */
926		free_extent_buffer(mid);
927
928		root_sub_used(root, mid->len);
929		btrfs_free_tree_block(trans, root, mid, 0, 1);
930		/* once for the root ptr */
931		free_extent_buffer_stale(mid);
932		return 0;
933	}
934	if (btrfs_header_nritems(mid) >
935	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
936		return 0;
937
938	left = btrfs_read_node_slot(parent, pslot - 1);
939	if (IS_ERR(left))
940		left = NULL;
941
942	if (left) {
943		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
944		wret = btrfs_cow_block(trans, root, left,
945				       parent, pslot - 1, &left,
946				       BTRFS_NESTING_LEFT_COW);
947		if (wret) {
948			ret = wret;
949			goto enospc;
950		}
951	}
952
953	right = btrfs_read_node_slot(parent, pslot + 1);
954	if (IS_ERR(right))
955		right = NULL;
956
957	if (right) {
958		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
959		wret = btrfs_cow_block(trans, root, right,
960				       parent, pslot + 1, &right,
961				       BTRFS_NESTING_RIGHT_COW);
962		if (wret) {
963			ret = wret;
964			goto enospc;
965		}
966	}
967
968	/* first, try to make some room in the middle buffer */
969	if (left) {
970		orig_slot += btrfs_header_nritems(left);
971		wret = push_node_left(trans, left, mid, 1);
972		if (wret < 0)
973			ret = wret;
974	}
975
976	/*
977	 * then try to empty the right most buffer into the middle
978	 */
979	if (right) {
980		wret = push_node_left(trans, mid, right, 1);
981		if (wret < 0 && wret != -ENOSPC)
982			ret = wret;
983		if (btrfs_header_nritems(right) == 0) {
984			btrfs_clean_tree_block(right);
985			btrfs_tree_unlock(right);
986			del_ptr(root, path, level + 1, pslot + 1);
987			root_sub_used(root, right->len);
988			btrfs_free_tree_block(trans, root, right, 0, 1);
989			free_extent_buffer_stale(right);
990			right = NULL;
991		} else {
992			struct btrfs_disk_key right_key;
993			btrfs_node_key(right, &right_key, 0);
994			ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
995					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
996			BUG_ON(ret < 0);
997			btrfs_set_node_key(parent, &right_key, pslot + 1);
998			btrfs_mark_buffer_dirty(parent);
999		}
1000	}
1001	if (btrfs_header_nritems(mid) == 1) {
1002		/*
1003		 * we're not allowed to leave a node with one item in the
1004		 * tree during a delete.  A deletion from lower in the tree
1005		 * could try to delete the only pointer in this node.
1006		 * So, pull some keys from the left.
1007		 * There has to be a left pointer at this point because
1008		 * otherwise we would have pulled some pointers from the
1009		 * right
1010		 */
1011		if (!left) {
1012			ret = -EROFS;
1013			btrfs_handle_fs_error(fs_info, ret, NULL);
1014			goto enospc;
1015		}
1016		wret = balance_node_right(trans, mid, left);
1017		if (wret < 0) {
1018			ret = wret;
1019			goto enospc;
1020		}
1021		if (wret == 1) {
1022			wret = push_node_left(trans, left, mid, 1);
1023			if (wret < 0)
1024				ret = wret;
1025		}
1026		BUG_ON(wret == 1);
1027	}
1028	if (btrfs_header_nritems(mid) == 0) {
1029		btrfs_clean_tree_block(mid);
1030		btrfs_tree_unlock(mid);
1031		del_ptr(root, path, level + 1, pslot);
1032		root_sub_used(root, mid->len);
1033		btrfs_free_tree_block(trans, root, mid, 0, 1);
1034		free_extent_buffer_stale(mid);
1035		mid = NULL;
1036	} else {
1037		/* update the parent key to reflect our changes */
1038		struct btrfs_disk_key mid_key;
1039		btrfs_node_key(mid, &mid_key, 0);
1040		ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1041				BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1042		BUG_ON(ret < 0);
1043		btrfs_set_node_key(parent, &mid_key, pslot);
1044		btrfs_mark_buffer_dirty(parent);
1045	}
1046
1047	/* update the path */
1048	if (left) {
1049		if (btrfs_header_nritems(left) > orig_slot) {
1050			atomic_inc(&left->refs);
1051			/* left was locked after cow */
1052			path->nodes[level] = left;
1053			path->slots[level + 1] -= 1;
1054			path->slots[level] = orig_slot;
1055			if (mid) {
1056				btrfs_tree_unlock(mid);
1057				free_extent_buffer(mid);
1058			}
1059		} else {
1060			orig_slot -= btrfs_header_nritems(left);
1061			path->slots[level] = orig_slot;
1062		}
1063	}
1064	/* double check we haven't messed things up */
1065	if (orig_ptr !=
1066	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1067		BUG();
1068enospc:
1069	if (right) {
1070		btrfs_tree_unlock(right);
1071		free_extent_buffer(right);
1072	}
1073	if (left) {
1074		if (path->nodes[level] != left)
1075			btrfs_tree_unlock(left);
1076		free_extent_buffer(left);
1077	}
1078	return ret;
1079}
1080
1081/* Node balancing for insertion.  Here we only split or push nodes around
1082 * when they are completely full.  This is also done top down, so we
1083 * have to be pessimistic.
1084 */
1085static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1086					  struct btrfs_root *root,
1087					  struct btrfs_path *path, int level)
1088{
1089	struct btrfs_fs_info *fs_info = root->fs_info;
1090	struct extent_buffer *right = NULL;
1091	struct extent_buffer *mid;
1092	struct extent_buffer *left = NULL;
1093	struct extent_buffer *parent = NULL;
1094	int ret = 0;
1095	int wret;
1096	int pslot;
1097	int orig_slot = path->slots[level];
1098
1099	if (level == 0)
1100		return 1;
1101
1102	mid = path->nodes[level];
1103	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1104
1105	if (level < BTRFS_MAX_LEVEL - 1) {
1106		parent = path->nodes[level + 1];
1107		pslot = path->slots[level + 1];
1108	}
1109
1110	if (!parent)
1111		return 1;
1112
1113	left = btrfs_read_node_slot(parent, pslot - 1);
1114	if (IS_ERR(left))
1115		left = NULL;
1116
1117	/* first, try to make some room in the middle buffer */
1118	if (left) {
1119		u32 left_nr;
1120
1121		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1122
1123		left_nr = btrfs_header_nritems(left);
1124		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1125			wret = 1;
1126		} else {
1127			ret = btrfs_cow_block(trans, root, left, parent,
1128					      pslot - 1, &left,
1129					      BTRFS_NESTING_LEFT_COW);
1130			if (ret)
1131				wret = 1;
1132			else {
1133				wret = push_node_left(trans, left, mid, 0);
1134			}
1135		}
1136		if (wret < 0)
1137			ret = wret;
1138		if (wret == 0) {
1139			struct btrfs_disk_key disk_key;
1140			orig_slot += left_nr;
1141			btrfs_node_key(mid, &disk_key, 0);
1142			ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1143					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1144			BUG_ON(ret < 0);
1145			btrfs_set_node_key(parent, &disk_key, pslot);
1146			btrfs_mark_buffer_dirty(parent);
1147			if (btrfs_header_nritems(left) > orig_slot) {
1148				path->nodes[level] = left;
1149				path->slots[level + 1] -= 1;
1150				path->slots[level] = orig_slot;
1151				btrfs_tree_unlock(mid);
1152				free_extent_buffer(mid);
1153			} else {
1154				orig_slot -=
1155					btrfs_header_nritems(left);
1156				path->slots[level] = orig_slot;
1157				btrfs_tree_unlock(left);
1158				free_extent_buffer(left);
1159			}
1160			return 0;
1161		}
1162		btrfs_tree_unlock(left);
1163		free_extent_buffer(left);
1164	}
1165	right = btrfs_read_node_slot(parent, pslot + 1);
1166	if (IS_ERR(right))
1167		right = NULL;
1168
1169	/*
1170	 * then try to empty the right most buffer into the middle
1171	 */
1172	if (right) {
1173		u32 right_nr;
1174
1175		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1176
1177		right_nr = btrfs_header_nritems(right);
1178		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1179			wret = 1;
1180		} else {
1181			ret = btrfs_cow_block(trans, root, right,
1182					      parent, pslot + 1,
1183					      &right, BTRFS_NESTING_RIGHT_COW);
1184			if (ret)
1185				wret = 1;
1186			else {
1187				wret = balance_node_right(trans, right, mid);
1188			}
1189		}
1190		if (wret < 0)
1191			ret = wret;
1192		if (wret == 0) {
1193			struct btrfs_disk_key disk_key;
1194
1195			btrfs_node_key(right, &disk_key, 0);
1196			ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1197					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1198			BUG_ON(ret < 0);
1199			btrfs_set_node_key(parent, &disk_key, pslot + 1);
1200			btrfs_mark_buffer_dirty(parent);
1201
1202			if (btrfs_header_nritems(mid) <= orig_slot) {
1203				path->nodes[level] = right;
1204				path->slots[level + 1] += 1;
1205				path->slots[level] = orig_slot -
1206					btrfs_header_nritems(mid);
1207				btrfs_tree_unlock(mid);
1208				free_extent_buffer(mid);
1209			} else {
1210				btrfs_tree_unlock(right);
1211				free_extent_buffer(right);
1212			}
1213			return 0;
1214		}
1215		btrfs_tree_unlock(right);
1216		free_extent_buffer(right);
1217	}
1218	return 1;
1219}
1220
1221/*
1222 * readahead one full node of leaves, finding things that are close
1223 * to the block in 'slot', and triggering ra on them.
1224 */
1225static void reada_for_search(struct btrfs_fs_info *fs_info,
1226			     struct btrfs_path *path,
1227			     int level, int slot, u64 objectid)
1228{
1229	struct extent_buffer *node;
1230	struct btrfs_disk_key disk_key;
1231	u32 nritems;
1232	u64 search;
1233	u64 target;
1234	u64 nread = 0;
1235	u64 nread_max;
1236	struct extent_buffer *eb;
1237	u32 nr;
1238	u32 blocksize;
1239	u32 nscan = 0;
1240
1241	if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1242		return;
1243
1244	if (!path->nodes[level])
1245		return;
1246
1247	node = path->nodes[level];
1248
1249	/*
1250	 * Since the time between visiting leaves is much shorter than the time
1251	 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1252	 * much IO at once (possibly random).
1253	 */
1254	if (path->reada == READA_FORWARD_ALWAYS) {
1255		if (level > 1)
1256			nread_max = node->fs_info->nodesize;
1257		else
1258			nread_max = SZ_128K;
1259	} else {
1260		nread_max = SZ_64K;
1261	}
1262
1263	search = btrfs_node_blockptr(node, slot);
1264	blocksize = fs_info->nodesize;
1265	eb = find_extent_buffer(fs_info, search);
1266	if (eb) {
1267		free_extent_buffer(eb);
1268		return;
1269	}
1270
1271	target = search;
1272
1273	nritems = btrfs_header_nritems(node);
1274	nr = slot;
1275
1276	while (1) {
1277		if (path->reada == READA_BACK) {
1278			if (nr == 0)
1279				break;
1280			nr--;
1281		} else if (path->reada == READA_FORWARD ||
1282			   path->reada == READA_FORWARD_ALWAYS) {
1283			nr++;
1284			if (nr >= nritems)
1285				break;
1286		}
1287		if (path->reada == READA_BACK && objectid) {
1288			btrfs_node_key(node, &disk_key, nr);
1289			if (btrfs_disk_key_objectid(&disk_key) != objectid)
1290				break;
1291		}
1292		search = btrfs_node_blockptr(node, nr);
1293		if (path->reada == READA_FORWARD_ALWAYS ||
1294		    (search <= target && target - search <= 65536) ||
1295		    (search > target && search - target <= 65536)) {
1296			btrfs_readahead_node_child(node, nr);
1297			nread += blocksize;
1298		}
1299		nscan++;
1300		if (nread > nread_max || nscan > 32)
1301			break;
1302	}
1303}
1304
1305static noinline void reada_for_balance(struct btrfs_path *path, int level)
1306{
1307	struct extent_buffer *parent;
1308	int slot;
1309	int nritems;
1310
1311	parent = path->nodes[level + 1];
1312	if (!parent)
1313		return;
1314
1315	nritems = btrfs_header_nritems(parent);
1316	slot = path->slots[level + 1];
1317
1318	if (slot > 0)
1319		btrfs_readahead_node_child(parent, slot - 1);
1320	if (slot + 1 < nritems)
1321		btrfs_readahead_node_child(parent, slot + 1);
1322}
1323
1324
1325/*
1326 * when we walk down the tree, it is usually safe to unlock the higher layers
1327 * in the tree.  The exceptions are when our path goes through slot 0, because
1328 * operations on the tree might require changing key pointers higher up in the
1329 * tree.
1330 *
1331 * callers might also have set path->keep_locks, which tells this code to keep
1332 * the lock if the path points to the last slot in the block.  This is part of
1333 * walking through the tree, and selecting the next slot in the higher block.
1334 *
1335 * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
1336 * if lowest_unlock is 1, level 0 won't be unlocked
1337 */
1338static noinline void unlock_up(struct btrfs_path *path, int level,
1339			       int lowest_unlock, int min_write_lock_level,
1340			       int *write_lock_level)
1341{
1342	int i;
1343	int skip_level = level;
1344	int no_skips = 0;
1345	struct extent_buffer *t;
1346
1347	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1348		if (!path->nodes[i])
1349			break;
1350		if (!path->locks[i])
1351			break;
1352		if (!no_skips && path->slots[i] == 0) {
1353			skip_level = i + 1;
1354			continue;
1355		}
1356		if (!no_skips && path->keep_locks) {
1357			u32 nritems;
1358			t = path->nodes[i];
1359			nritems = btrfs_header_nritems(t);
1360			if (nritems < 1 || path->slots[i] >= nritems - 1) {
1361				skip_level = i + 1;
1362				continue;
1363			}
1364		}
1365		if (skip_level < i && i >= lowest_unlock)
1366			no_skips = 1;
1367
1368		t = path->nodes[i];
1369		if (i >= lowest_unlock && i > skip_level) {
1370			btrfs_tree_unlock_rw(t, path->locks[i]);
1371			path->locks[i] = 0;
1372			if (write_lock_level &&
1373			    i > min_write_lock_level &&
1374			    i <= *write_lock_level) {
1375				*write_lock_level = i - 1;
1376			}
1377		}
1378	}
1379}
1380
1381/*
1382 * helper function for btrfs_search_slot.  The goal is to find a block
1383 * in cache without setting the path to blocking.  If we find the block
1384 * we return zero and the path is unchanged.
1385 *
1386 * If we can't find the block, we set the path blocking and do some
1387 * reada.  -EAGAIN is returned and the search must be repeated.
1388 */
1389static int
1390read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1391		      struct extent_buffer **eb_ret, int level, int slot,
1392		      const struct btrfs_key *key)
1393{
1394	struct btrfs_fs_info *fs_info = root->fs_info;
1395	u64 blocknr;
1396	u64 gen;
1397	struct extent_buffer *tmp;
1398	struct btrfs_key first_key;
1399	int ret;
1400	int parent_level;
1401
1402	blocknr = btrfs_node_blockptr(*eb_ret, slot);
1403	gen = btrfs_node_ptr_generation(*eb_ret, slot);
1404	parent_level = btrfs_header_level(*eb_ret);
1405	btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
1406
1407	tmp = find_extent_buffer(fs_info, blocknr);
1408	if (tmp) {
1409		if (p->reada == READA_FORWARD_ALWAYS)
1410			reada_for_search(fs_info, p, level, slot, key->objectid);
1411
1412		/* first we do an atomic uptodate check */
1413		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1414			/*
1415			 * Do extra check for first_key, eb can be stale due to
1416			 * being cached, read from scrub, or have multiple
1417			 * parents (shared tree blocks).
1418			 */
1419			if (btrfs_verify_level_key(tmp,
1420					parent_level - 1, &first_key, gen)) {
1421				free_extent_buffer(tmp);
1422				return -EUCLEAN;
1423			}
1424			*eb_ret = tmp;
1425			return 0;
1426		}
1427
1428		/* now we're allowed to do a blocking uptodate check */
1429		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
1430		if (!ret) {
1431			*eb_ret = tmp;
1432			return 0;
1433		}
1434		free_extent_buffer(tmp);
1435		btrfs_release_path(p);
1436		return -EIO;
1437	}
1438
1439	/*
1440	 * reduce lock contention at high levels
1441	 * of the btree by dropping locks before
1442	 * we read.  Don't release the lock on the current
1443	 * level because we need to walk this node to figure
1444	 * out which blocks to read.
1445	 */
1446	btrfs_unlock_up_safe(p, level + 1);
1447
1448	if (p->reada != READA_NONE)
1449		reada_for_search(fs_info, p, level, slot, key->objectid);
1450
1451	ret = -EAGAIN;
1452	tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
1453			      gen, parent_level - 1, &first_key);
1454	if (!IS_ERR(tmp)) {
1455		/*
1456		 * If the read above didn't mark this buffer up to date,
1457		 * it will never end up being up to date.  Set ret to EIO now
1458		 * and give up so that our caller doesn't loop forever
1459		 * on our EAGAINs.
1460		 */
1461		if (!extent_buffer_uptodate(tmp))
1462			ret = -EIO;
1463		free_extent_buffer(tmp);
1464	} else {
1465		ret = PTR_ERR(tmp);
1466	}
1467
1468	btrfs_release_path(p);
1469	return ret;
1470}
1471
1472/*
1473 * helper function for btrfs_search_slot.  This does all of the checks
1474 * for node-level blocks and does any balancing required based on
1475 * the ins_len.
1476 *
1477 * If no extra work was required, zero is returned.  If we had to
1478 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1479 * start over
1480 */
1481static int
1482setup_nodes_for_search(struct btrfs_trans_handle *trans,
1483		       struct btrfs_root *root, struct btrfs_path *p,
1484		       struct extent_buffer *b, int level, int ins_len,
1485		       int *write_lock_level)
1486{
1487	struct btrfs_fs_info *fs_info = root->fs_info;
1488	int ret = 0;
1489
1490	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1491	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1492
1493		if (*write_lock_level < level + 1) {
1494			*write_lock_level = level + 1;
1495			btrfs_release_path(p);
1496			return -EAGAIN;
1497		}
1498
1499		reada_for_balance(p, level);
1500		ret = split_node(trans, root, p, level);
1501
1502		b = p->nodes[level];
1503	} else if (ins_len < 0 && btrfs_header_nritems(b) <
1504		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1505
1506		if (*write_lock_level < level + 1) {
1507			*write_lock_level = level + 1;
1508			btrfs_release_path(p);
1509			return -EAGAIN;
1510		}
1511
1512		reada_for_balance(p, level);
1513		ret = balance_level(trans, root, p, level);
1514		if (ret)
1515			return ret;
1516
1517		b = p->nodes[level];
1518		if (!b) {
1519			btrfs_release_path(p);
1520			return -EAGAIN;
1521		}
1522		BUG_ON(btrfs_header_nritems(b) == 1);
1523	}
1524	return ret;
1525}
1526
1527int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1528		u64 iobjectid, u64 ioff, u8 key_type,
1529		struct btrfs_key *found_key)
1530{
1531	int ret;
1532	struct btrfs_key key;
1533	struct extent_buffer *eb;
1534
1535	ASSERT(path);
1536	ASSERT(found_key);
1537
1538	key.type = key_type;
1539	key.objectid = iobjectid;
1540	key.offset = ioff;
1541
1542	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1543	if (ret < 0)
1544		return ret;
1545
1546	eb = path->nodes[0];
1547	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1548		ret = btrfs_next_leaf(fs_root, path);
1549		if (ret)
1550			return ret;
1551		eb = path->nodes[0];
1552	}
1553
1554	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1555	if (found_key->type != key.type ||
1556			found_key->objectid != key.objectid)
1557		return 1;
1558
1559	return 0;
1560}
1561
1562static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1563							struct btrfs_path *p,
1564							int write_lock_level)
1565{
1566	struct btrfs_fs_info *fs_info = root->fs_info;
1567	struct extent_buffer *b;
1568	int root_lock;
1569	int level = 0;
1570
1571	/* We try very hard to do read locks on the root */
1572	root_lock = BTRFS_READ_LOCK;
1573
1574	if (p->search_commit_root) {
1575		/*
1576		 * The commit roots are read only so we always do read locks,
1577		 * and we always must hold the commit_root_sem when doing
1578		 * searches on them, the only exception is send where we don't
1579		 * want to block transaction commits for a long time, so
1580		 * we need to clone the commit root in order to avoid races
1581		 * with transaction commits that create a snapshot of one of
1582		 * the roots used by a send operation.
1583		 */
1584		if (p->need_commit_sem) {
1585			down_read(&fs_info->commit_root_sem);
1586			b = btrfs_clone_extent_buffer(root->commit_root);
1587			up_read(&fs_info->commit_root_sem);
1588			if (!b)
1589				return ERR_PTR(-ENOMEM);
1590
1591		} else {
1592			b = root->commit_root;
1593			atomic_inc(&b->refs);
1594		}
1595		level = btrfs_header_level(b);
1596		/*
1597		 * Ensure that all callers have set skip_locking when
1598		 * p->search_commit_root = 1.
1599		 */
1600		ASSERT(p->skip_locking == 1);
1601
1602		goto out;
1603	}
1604
1605	if (p->skip_locking) {
1606		b = btrfs_root_node(root);
1607		level = btrfs_header_level(b);
1608		goto out;
1609	}
1610
1611	/*
1612	 * If the level is set to maximum, we can skip trying to get the read
1613	 * lock.
1614	 */
1615	if (write_lock_level < BTRFS_MAX_LEVEL) {
1616		/*
1617		 * We don't know the level of the root node until we actually
1618		 * have it read locked
1619		 */
1620		b = btrfs_read_lock_root_node(root);
1621		level = btrfs_header_level(b);
1622		if (level > write_lock_level)
1623			goto out;
1624
1625		/* Whoops, must trade for write lock */
1626		btrfs_tree_read_unlock(b);
1627		free_extent_buffer(b);
1628	}
1629
1630	b = btrfs_lock_root_node(root);
1631	root_lock = BTRFS_WRITE_LOCK;
1632
1633	/* The level might have changed, check again */
1634	level = btrfs_header_level(b);
1635
1636out:
1637	p->nodes[level] = b;
1638	if (!p->skip_locking)
1639		p->locks[level] = root_lock;
1640	/*
1641	 * Callers are responsible for dropping b's references.
1642	 */
1643	return b;
1644}
1645
1646
1647/*
1648 * btrfs_search_slot - look for a key in a tree and perform necessary
1649 * modifications to preserve tree invariants.
1650 *
1651 * @trans:	Handle of transaction, used when modifying the tree
1652 * @p:		Holds all btree nodes along the search path
1653 * @root:	The root node of the tree
1654 * @key:	The key we are looking for
1655 * @ins_len:	Indicates purpose of search:
1656 *              >0  for inserts it's size of item inserted (*)
1657 *              <0  for deletions
1658 *               0  for plain searches, not modifying the tree
1659 *
1660 *              (*) If size of item inserted doesn't include
1661 *              sizeof(struct btrfs_item), then p->search_for_extension must
1662 *              be set.
1663 * @cow:	boolean should CoW operations be performed. Must always be 1
1664 *		when modifying the tree.
1665 *
1666 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1667 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1668 *
1669 * If @key is found, 0 is returned and you can find the item in the leaf level
1670 * of the path (level 0)
1671 *
1672 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1673 * points to the slot where it should be inserted
1674 *
1675 * If an error is encountered while searching the tree a negative error number
1676 * is returned
1677 */
1678int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1679		      const struct btrfs_key *key, struct btrfs_path *p,
1680		      int ins_len, int cow)
1681{
1682	struct extent_buffer *b;
1683	int slot;
1684	int ret;
1685	int err;
1686	int level;
1687	int lowest_unlock = 1;
1688	/* everything at write_lock_level or lower must be write locked */
1689	int write_lock_level = 0;
1690	u8 lowest_level = 0;
1691	int min_write_lock_level;
1692	int prev_cmp;
1693
1694	lowest_level = p->lowest_level;
1695	WARN_ON(lowest_level && ins_len > 0);
1696	WARN_ON(p->nodes[0] != NULL);
1697	BUG_ON(!cow && ins_len);
1698
1699	if (ins_len < 0) {
1700		lowest_unlock = 2;
1701
1702		/* when we are removing items, we might have to go up to level
1703		 * two as we update tree pointers  Make sure we keep write
1704		 * for those levels as well
1705		 */
1706		write_lock_level = 2;
1707	} else if (ins_len > 0) {
1708		/*
1709		 * for inserting items, make sure we have a write lock on
1710		 * level 1 so we can update keys
1711		 */
1712		write_lock_level = 1;
1713	}
1714
1715	if (!cow)
1716		write_lock_level = -1;
1717
1718	if (cow && (p->keep_locks || p->lowest_level))
1719		write_lock_level = BTRFS_MAX_LEVEL;
1720
1721	min_write_lock_level = write_lock_level;
1722
1723again:
1724	prev_cmp = -1;
1725	b = btrfs_search_slot_get_root(root, p, write_lock_level);
1726	if (IS_ERR(b)) {
1727		ret = PTR_ERR(b);
1728		goto done;
1729	}
1730
1731	while (b) {
1732		int dec = 0;
1733
1734		level = btrfs_header_level(b);
1735
1736		if (cow) {
1737			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
1738
1739			/*
1740			 * if we don't really need to cow this block
1741			 * then we don't want to set the path blocking,
1742			 * so we test it here
1743			 */
1744			if (!should_cow_block(trans, root, b))
1745				goto cow_done;
1746
1747			/*
1748			 * must have write locks on this node and the
1749			 * parent
1750			 */
1751			if (level > write_lock_level ||
1752			    (level + 1 > write_lock_level &&
1753			    level + 1 < BTRFS_MAX_LEVEL &&
1754			    p->nodes[level + 1])) {
1755				write_lock_level = level + 1;
1756				btrfs_release_path(p);
1757				goto again;
1758			}
1759
1760			if (last_level)
1761				err = btrfs_cow_block(trans, root, b, NULL, 0,
1762						      &b,
1763						      BTRFS_NESTING_COW);
1764			else
1765				err = btrfs_cow_block(trans, root, b,
1766						      p->nodes[level + 1],
1767						      p->slots[level + 1], &b,
1768						      BTRFS_NESTING_COW);
1769			if (err) {
1770				ret = err;
1771				goto done;
1772			}
1773		}
1774cow_done:
1775		p->nodes[level] = b;
1776		/*
1777		 * Leave path with blocking locks to avoid massive
1778		 * lock context switch, this is made on purpose.
1779		 */
1780
1781		/*
1782		 * we have a lock on b and as long as we aren't changing
1783		 * the tree, there is no way to for the items in b to change.
1784		 * It is safe to drop the lock on our parent before we
1785		 * go through the expensive btree search on b.
1786		 *
1787		 * If we're inserting or deleting (ins_len != 0), then we might
1788		 * be changing slot zero, which may require changing the parent.
1789		 * So, we can't drop the lock until after we know which slot
1790		 * we're operating on.
1791		 */
1792		if (!ins_len && !p->keep_locks) {
1793			int u = level + 1;
1794
1795			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
1796				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
1797				p->locks[u] = 0;
1798			}
1799		}
1800
1801		/*
1802		 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
1803		 * we can safely assume the target key will always be in slot 0
1804		 * on lower levels due to the invariants BTRFS' btree provides,
1805		 * namely that a btrfs_key_ptr entry always points to the
1806		 * lowest key in the child node, thus we can skip searching
1807		 * lower levels
1808		 */
1809		if (prev_cmp == 0) {
1810			slot = 0;
1811			ret = 0;
1812		} else {
1813			ret = btrfs_bin_search(b, key, &slot);
1814			prev_cmp = ret;
1815			if (ret < 0)
1816				goto done;
1817		}
1818
1819		if (level == 0) {
1820			p->slots[level] = slot;
1821			/*
1822			 * Item key already exists. In this case, if we are
1823			 * allowed to insert the item (for example, in dir_item
1824			 * case, item key collision is allowed), it will be
1825			 * merged with the original item. Only the item size
1826			 * grows, no new btrfs item will be added. If
1827			 * search_for_extension is not set, ins_len already
1828			 * accounts the size btrfs_item, deduct it here so leaf
1829			 * space check will be correct.
1830			 */
1831			if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
1832				ASSERT(ins_len >= sizeof(struct btrfs_item));
1833				ins_len -= sizeof(struct btrfs_item);
1834			}
1835			if (ins_len > 0 &&
1836			    btrfs_leaf_free_space(b) < ins_len) {
1837				if (write_lock_level < 1) {
1838					write_lock_level = 1;
1839					btrfs_release_path(p);
1840					goto again;
1841				}
1842
1843				err = split_leaf(trans, root, key,
1844						 p, ins_len, ret == 0);
1845
1846				BUG_ON(err > 0);
1847				if (err) {
1848					ret = err;
1849					goto done;
1850				}
1851			}
1852			if (!p->search_for_split)
1853				unlock_up(p, level, lowest_unlock,
1854					  min_write_lock_level, NULL);
1855			goto done;
1856		}
1857		if (ret && slot > 0) {
1858			dec = 1;
1859			slot--;
1860		}
1861		p->slots[level] = slot;
1862		err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
1863					     &write_lock_level);
1864		if (err == -EAGAIN)
1865			goto again;
1866		if (err) {
1867			ret = err;
1868			goto done;
1869		}
1870		b = p->nodes[level];
1871		slot = p->slots[level];
1872
1873		/*
1874		 * Slot 0 is special, if we change the key we have to update
1875		 * the parent pointer which means we must have a write lock on
1876		 * the parent
1877		 */
1878		if (slot == 0 && ins_len && write_lock_level < level + 1) {
1879			write_lock_level = level + 1;
1880			btrfs_release_path(p);
1881			goto again;
1882		}
1883
1884		unlock_up(p, level, lowest_unlock, min_write_lock_level,
1885			  &write_lock_level);
1886
1887		if (level == lowest_level) {
1888			if (dec)
1889				p->slots[level]++;
1890			goto done;
1891		}
1892
1893		err = read_block_for_search(root, p, &b, level, slot, key);
1894		if (err == -EAGAIN)
1895			goto again;
1896		if (err) {
1897			ret = err;
1898			goto done;
1899		}
1900
1901		if (!p->skip_locking) {
1902			level = btrfs_header_level(b);
1903			if (level <= write_lock_level) {
1904				btrfs_tree_lock(b);
1905				p->locks[level] = BTRFS_WRITE_LOCK;
1906			} else {
1907				btrfs_tree_read_lock(b);
1908				p->locks[level] = BTRFS_READ_LOCK;
1909			}
1910			p->nodes[level] = b;
1911		}
1912	}
1913	ret = 1;
1914done:
1915	if (ret < 0 && !p->skip_release_on_error)
1916		btrfs_release_path(p);
1917	return ret;
1918}
1919ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
1920
1921/*
1922 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
1923 * current state of the tree together with the operations recorded in the tree
1924 * modification log to search for the key in a previous version of this tree, as
1925 * denoted by the time_seq parameter.
1926 *
1927 * Naturally, there is no support for insert, delete or cow operations.
1928 *
1929 * The resulting path and return value will be set up as if we called
1930 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
1931 */
1932int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
1933			  struct btrfs_path *p, u64 time_seq)
1934{
1935	struct btrfs_fs_info *fs_info = root->fs_info;
1936	struct extent_buffer *b;
1937	int slot;
1938	int ret;
1939	int err;
1940	int level;
1941	int lowest_unlock = 1;
1942	u8 lowest_level = 0;
1943
1944	lowest_level = p->lowest_level;
1945	WARN_ON(p->nodes[0] != NULL);
1946
1947	if (p->search_commit_root) {
1948		BUG_ON(time_seq);
1949		return btrfs_search_slot(NULL, root, key, p, 0, 0);
1950	}
1951
1952again:
1953	b = btrfs_get_old_root(root, time_seq);
1954	if (!b) {
1955		ret = -EIO;
1956		goto done;
1957	}
1958	level = btrfs_header_level(b);
1959	p->locks[level] = BTRFS_READ_LOCK;
1960
1961	while (b) {
1962		int dec = 0;
1963
1964		level = btrfs_header_level(b);
1965		p->nodes[level] = b;
1966
1967		/*
1968		 * we have a lock on b and as long as we aren't changing
1969		 * the tree, there is no way to for the items in b to change.
1970		 * It is safe to drop the lock on our parent before we
1971		 * go through the expensive btree search on b.
1972		 */
1973		btrfs_unlock_up_safe(p, level + 1);
1974
1975		ret = btrfs_bin_search(b, key, &slot);
1976		if (ret < 0)
1977			goto done;
1978
1979		if (level == 0) {
1980			p->slots[level] = slot;
1981			unlock_up(p, level, lowest_unlock, 0, NULL);
1982			goto done;
1983		}
1984
1985		if (ret && slot > 0) {
1986			dec = 1;
1987			slot--;
1988		}
1989		p->slots[level] = slot;
1990		unlock_up(p, level, lowest_unlock, 0, NULL);
1991
1992		if (level == lowest_level) {
1993			if (dec)
1994				p->slots[level]++;
1995			goto done;
1996		}
1997
1998		err = read_block_for_search(root, p, &b, level, slot, key);
1999		if (err == -EAGAIN)
2000			goto again;
2001		if (err) {
2002			ret = err;
2003			goto done;
2004		}
2005
2006		level = btrfs_header_level(b);
2007		btrfs_tree_read_lock(b);
2008		b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2009		if (!b) {
2010			ret = -ENOMEM;
2011			goto done;
2012		}
2013		p->locks[level] = BTRFS_READ_LOCK;
2014		p->nodes[level] = b;
2015	}
2016	ret = 1;
2017done:
2018	if (ret < 0)
2019		btrfs_release_path(p);
2020
2021	return ret;
2022}
2023
2024/*
2025 * helper to use instead of search slot if no exact match is needed but
2026 * instead the next or previous item should be returned.
2027 * When find_higher is true, the next higher item is returned, the next lower
2028 * otherwise.
2029 * When return_any and find_higher are both true, and no higher item is found,
2030 * return the next lower instead.
2031 * When return_any is true and find_higher is false, and no lower item is found,
2032 * return the next higher instead.
2033 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2034 * < 0 on error
2035 */
2036int btrfs_search_slot_for_read(struct btrfs_root *root,
2037			       const struct btrfs_key *key,
2038			       struct btrfs_path *p, int find_higher,
2039			       int return_any)
2040{
2041	int ret;
2042	struct extent_buffer *leaf;
2043
2044again:
2045	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2046	if (ret <= 0)
2047		return ret;
2048	/*
2049	 * a return value of 1 means the path is at the position where the
2050	 * item should be inserted. Normally this is the next bigger item,
2051	 * but in case the previous item is the last in a leaf, path points
2052	 * to the first free slot in the previous leaf, i.e. at an invalid
2053	 * item.
2054	 */
2055	leaf = p->nodes[0];
2056
2057	if (find_higher) {
2058		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2059			ret = btrfs_next_leaf(root, p);
2060			if (ret <= 0)
2061				return ret;
2062			if (!return_any)
2063				return 1;
2064			/*
2065			 * no higher item found, return the next
2066			 * lower instead
2067			 */
2068			return_any = 0;
2069			find_higher = 0;
2070			btrfs_release_path(p);
2071			goto again;
2072		}
2073	} else {
2074		if (p->slots[0] == 0) {
2075			ret = btrfs_prev_leaf(root, p);
2076			if (ret < 0)
2077				return ret;
2078			if (!ret) {
2079				leaf = p->nodes[0];
2080				if (p->slots[0] == btrfs_header_nritems(leaf))
2081					p->slots[0]--;
2082				return 0;
2083			}
2084			if (!return_any)
2085				return 1;
2086			/*
2087			 * no lower item found, return the next
2088			 * higher instead
2089			 */
2090			return_any = 0;
2091			find_higher = 1;
2092			btrfs_release_path(p);
2093			goto again;
2094		} else {
2095			--p->slots[0];
2096		}
2097	}
2098	return 0;
2099}
2100
2101/*
2102 * adjust the pointers going up the tree, starting at level
2103 * making sure the right key of each node is points to 'key'.
2104 * This is used after shifting pointers to the left, so it stops
2105 * fixing up pointers when a given leaf/node is not in slot 0 of the
2106 * higher levels
2107 *
2108 */
2109static void fixup_low_keys(struct btrfs_path *path,
2110			   struct btrfs_disk_key *key, int level)
2111{
2112	int i;
2113	struct extent_buffer *t;
2114	int ret;
2115
2116	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2117		int tslot = path->slots[i];
2118
2119		if (!path->nodes[i])
2120			break;
2121		t = path->nodes[i];
2122		ret = btrfs_tree_mod_log_insert_key(t, tslot,
2123				BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
2124		BUG_ON(ret < 0);
2125		btrfs_set_node_key(t, key, tslot);
2126		btrfs_mark_buffer_dirty(path->nodes[i]);
2127		if (tslot != 0)
2128			break;
2129	}
2130}
2131
2132/*
2133 * update item key.
2134 *
2135 * This function isn't completely safe. It's the caller's responsibility
2136 * that the new key won't break the order
2137 */
2138void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2139			     struct btrfs_path *path,
2140			     const struct btrfs_key *new_key)
2141{
2142	struct btrfs_disk_key disk_key;
2143	struct extent_buffer *eb;
2144	int slot;
2145
2146	eb = path->nodes[0];
2147	slot = path->slots[0];
2148	if (slot > 0) {
2149		btrfs_item_key(eb, &disk_key, slot - 1);
2150		if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2151			btrfs_crit(fs_info,
2152		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2153				   slot, btrfs_disk_key_objectid(&disk_key),
2154				   btrfs_disk_key_type(&disk_key),
2155				   btrfs_disk_key_offset(&disk_key),
2156				   new_key->objectid, new_key->type,
2157				   new_key->offset);
2158			btrfs_print_leaf(eb);
2159			BUG();
2160		}
2161	}
2162	if (slot < btrfs_header_nritems(eb) - 1) {
2163		btrfs_item_key(eb, &disk_key, slot + 1);
2164		if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2165			btrfs_crit(fs_info,
2166		"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2167				   slot, btrfs_disk_key_objectid(&disk_key),
2168				   btrfs_disk_key_type(&disk_key),
2169				   btrfs_disk_key_offset(&disk_key),
2170				   new_key->objectid, new_key->type,
2171				   new_key->offset);
2172			btrfs_print_leaf(eb);
2173			BUG();
2174		}
2175	}
2176
2177	btrfs_cpu_key_to_disk(&disk_key, new_key);
2178	btrfs_set_item_key(eb, &disk_key, slot);
2179	btrfs_mark_buffer_dirty(eb);
2180	if (slot == 0)
2181		fixup_low_keys(path, &disk_key, 1);
2182}
2183
2184/*
2185 * Check key order of two sibling extent buffers.
2186 *
2187 * Return true if something is wrong.
2188 * Return false if everything is fine.
2189 *
2190 * Tree-checker only works inside one tree block, thus the following
2191 * corruption can not be detected by tree-checker:
2192 *
2193 * Leaf @left			| Leaf @right
2194 * --------------------------------------------------------------
2195 * | 1 | 2 | 3 | 4 | 5 | f6 |   | 7 | 8 |
2196 *
2197 * Key f6 in leaf @left itself is valid, but not valid when the next
2198 * key in leaf @right is 7.
2199 * This can only be checked at tree block merge time.
2200 * And since tree checker has ensured all key order in each tree block
2201 * is correct, we only need to bother the last key of @left and the first
2202 * key of @right.
2203 */
2204static bool check_sibling_keys(struct extent_buffer *left,
2205			       struct extent_buffer *right)
2206{
2207	struct btrfs_key left_last;
2208	struct btrfs_key right_first;
2209	int level = btrfs_header_level(left);
2210	int nr_left = btrfs_header_nritems(left);
2211	int nr_right = btrfs_header_nritems(right);
2212
2213	/* No key to check in one of the tree blocks */
2214	if (!nr_left || !nr_right)
2215		return false;
2216
2217	if (level) {
2218		btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2219		btrfs_node_key_to_cpu(right, &right_first, 0);
2220	} else {
2221		btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2222		btrfs_item_key_to_cpu(right, &right_first, 0);
2223	}
2224
2225	if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2226		btrfs_crit(left->fs_info,
2227"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2228			   left_last.objectid, left_last.type,
2229			   left_last.offset, right_first.objectid,
2230			   right_first.type, right_first.offset);
2231		return true;
2232	}
2233	return false;
2234}
2235
2236/*
2237 * try to push data from one node into the next node left in the
2238 * tree.
2239 *
2240 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2241 * error, and > 0 if there was no room in the left hand block.
2242 */
2243static int push_node_left(struct btrfs_trans_handle *trans,
2244			  struct extent_buffer *dst,
2245			  struct extent_buffer *src, int empty)
2246{
2247	struct btrfs_fs_info *fs_info = trans->fs_info;
2248	int push_items = 0;
2249	int src_nritems;
2250	int dst_nritems;
2251	int ret = 0;
2252
2253	src_nritems = btrfs_header_nritems(src);
2254	dst_nritems = btrfs_header_nritems(dst);
2255	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2256	WARN_ON(btrfs_header_generation(src) != trans->transid);
2257	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2258
2259	if (!empty && src_nritems <= 8)
2260		return 1;
2261
2262	if (push_items <= 0)
2263		return 1;
2264
2265	if (empty) {
2266		push_items = min(src_nritems, push_items);
2267		if (push_items < src_nritems) {
2268			/* leave at least 8 pointers in the node if
2269			 * we aren't going to empty it
2270			 */
2271			if (src_nritems - push_items < 8) {
2272				if (push_items <= 8)
2273					return 1;
2274				push_items -= 8;
2275			}
2276		}
2277	} else
2278		push_items = min(src_nritems - 8, push_items);
2279
2280	/* dst is the left eb, src is the middle eb */
2281	if (check_sibling_keys(dst, src)) {
2282		ret = -EUCLEAN;
2283		btrfs_abort_transaction(trans, ret);
2284		return ret;
2285	}
2286	ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2287	if (ret) {
2288		btrfs_abort_transaction(trans, ret);
2289		return ret;
2290	}
2291	copy_extent_buffer(dst, src,
2292			   btrfs_node_key_ptr_offset(dst_nritems),
2293			   btrfs_node_key_ptr_offset(0),
2294			   push_items * sizeof(struct btrfs_key_ptr));
2295
2296	if (push_items < src_nritems) {
2297		/*
2298		 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2299		 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2300		 */
2301		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2302				      btrfs_node_key_ptr_offset(push_items),
2303				      (src_nritems - push_items) *
2304				      sizeof(struct btrfs_key_ptr));
2305	}
2306	btrfs_set_header_nritems(src, src_nritems - push_items);
2307	btrfs_set_header_nritems(dst, dst_nritems + push_items);
2308	btrfs_mark_buffer_dirty(src);
2309	btrfs_mark_buffer_dirty(dst);
2310
2311	return ret;
2312}
2313
2314/*
2315 * try to push data from one node into the next node right in the
2316 * tree.
2317 *
2318 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2319 * error, and > 0 if there was no room in the right hand block.
2320 *
2321 * this will  only push up to 1/2 the contents of the left node over
2322 */
2323static int balance_node_right(struct btrfs_trans_handle *trans,
2324			      struct extent_buffer *dst,
2325			      struct extent_buffer *src)
2326{
2327	struct btrfs_fs_info *fs_info = trans->fs_info;
2328	int push_items = 0;
2329	int max_push;
2330	int src_nritems;
2331	int dst_nritems;
2332	int ret = 0;
2333
2334	WARN_ON(btrfs_header_generation(src) != trans->transid);
2335	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2336
2337	src_nritems = btrfs_header_nritems(src);
2338	dst_nritems = btrfs_header_nritems(dst);
2339	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2340	if (push_items <= 0)
2341		return 1;
2342
2343	if (src_nritems < 4)
2344		return 1;
2345
2346	max_push = src_nritems / 2 + 1;
2347	/* don't try to empty the node */
2348	if (max_push >= src_nritems)
2349		return 1;
2350
2351	if (max_push < push_items)
2352		push_items = max_push;
2353
2354	/* dst is the right eb, src is the middle eb */
2355	if (check_sibling_keys(src, dst)) {
2356		ret = -EUCLEAN;
2357		btrfs_abort_transaction(trans, ret);
2358		return ret;
2359	}
2360	ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2361	BUG_ON(ret < 0);
2362	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2363				      btrfs_node_key_ptr_offset(0),
2364				      (dst_nritems) *
2365				      sizeof(struct btrfs_key_ptr));
2366
2367	ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2368					 push_items);
2369	if (ret) {
2370		btrfs_abort_transaction(trans, ret);
2371		return ret;
2372	}
2373	copy_extent_buffer(dst, src,
2374			   btrfs_node_key_ptr_offset(0),
2375			   btrfs_node_key_ptr_offset(src_nritems - push_items),
2376			   push_items * sizeof(struct btrfs_key_ptr));
2377
2378	btrfs_set_header_nritems(src, src_nritems - push_items);
2379	btrfs_set_header_nritems(dst, dst_nritems + push_items);
2380
2381	btrfs_mark_buffer_dirty(src);
2382	btrfs_mark_buffer_dirty(dst);
2383
2384	return ret;
2385}
2386
2387/*
2388 * helper function to insert a new root level in the tree.
2389 * A new node is allocated, and a single item is inserted to
2390 * point to the existing root
2391 *
2392 * returns zero on success or < 0 on failure.
2393 */
2394static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2395			   struct btrfs_root *root,
2396			   struct btrfs_path *path, int level)
2397{
2398	struct btrfs_fs_info *fs_info = root->fs_info;
2399	u64 lower_gen;
2400	struct extent_buffer *lower;
2401	struct extent_buffer *c;
2402	struct extent_buffer *old;
2403	struct btrfs_disk_key lower_key;
2404	int ret;
2405
2406	BUG_ON(path->nodes[level]);
2407	BUG_ON(path->nodes[level-1] != root->node);
2408
2409	lower = path->nodes[level-1];
2410	if (level == 1)
2411		btrfs_item_key(lower, &lower_key, 0);
2412	else
2413		btrfs_node_key(lower, &lower_key, 0);
2414
2415	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2416				   &lower_key, level, root->node->start, 0,
2417				   BTRFS_NESTING_NEW_ROOT);
2418	if (IS_ERR(c))
2419		return PTR_ERR(c);
2420
2421	root_add_used(root, fs_info->nodesize);
2422
2423	btrfs_set_header_nritems(c, 1);
2424	btrfs_set_node_key(c, &lower_key, 0);
2425	btrfs_set_node_blockptr(c, 0, lower->start);
2426	lower_gen = btrfs_header_generation(lower);
2427	WARN_ON(lower_gen != trans->transid);
2428
2429	btrfs_set_node_ptr_generation(c, 0, lower_gen);
2430
2431	btrfs_mark_buffer_dirty(c);
2432
2433	old = root->node;
2434	ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2435	BUG_ON(ret < 0);
2436	rcu_assign_pointer(root->node, c);
2437
2438	/* the super has an extra ref to root->node */
2439	free_extent_buffer(old);
2440
2441	add_root_to_dirty_list(root);
2442	atomic_inc(&c->refs);
2443	path->nodes[level] = c;
2444	path->locks[level] = BTRFS_WRITE_LOCK;
2445	path->slots[level] = 0;
2446	return 0;
2447}
2448
2449/*
2450 * worker function to insert a single pointer in a node.
2451 * the node should have enough room for the pointer already
2452 *
2453 * slot and level indicate where you want the key to go, and
2454 * blocknr is the block the key points to.
2455 */
2456static void insert_ptr(struct btrfs_trans_handle *trans,
2457		       struct btrfs_path *path,
2458		       struct btrfs_disk_key *key, u64 bytenr,
2459		       int slot, int level)
2460{
2461	struct extent_buffer *lower;
2462	int nritems;
2463	int ret;
2464
2465	BUG_ON(!path->nodes[level]);
2466	btrfs_assert_tree_locked(path->nodes[level]);
2467	lower = path->nodes[level];
2468	nritems = btrfs_header_nritems(lower);
2469	BUG_ON(slot > nritems);
2470	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2471	if (slot != nritems) {
2472		if (level) {
2473			ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2474					slot, nritems - slot);
2475			BUG_ON(ret < 0);
2476		}
2477		memmove_extent_buffer(lower,
2478			      btrfs_node_key_ptr_offset(slot + 1),
2479			      btrfs_node_key_ptr_offset(slot),
2480			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
2481	}
2482	if (level) {
2483		ret = btrfs_tree_mod_log_insert_key(lower, slot,
2484					    BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
2485		BUG_ON(ret < 0);
2486	}
2487	btrfs_set_node_key(lower, key, slot);
2488	btrfs_set_node_blockptr(lower, slot, bytenr);
2489	WARN_ON(trans->transid == 0);
2490	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2491	btrfs_set_header_nritems(lower, nritems + 1);
2492	btrfs_mark_buffer_dirty(lower);
2493}
2494
2495/*
2496 * split the node at the specified level in path in two.
2497 * The path is corrected to point to the appropriate node after the split
2498 *
2499 * Before splitting this tries to make some room in the node by pushing
2500 * left and right, if either one works, it returns right away.
2501 *
2502 * returns 0 on success and < 0 on failure
2503 */
2504static noinline int split_node(struct btrfs_trans_handle *trans,
2505			       struct btrfs_root *root,
2506			       struct btrfs_path *path, int level)
2507{
2508	struct btrfs_fs_info *fs_info = root->fs_info;
2509	struct extent_buffer *c;
2510	struct extent_buffer *split;
2511	struct btrfs_disk_key disk_key;
2512	int mid;
2513	int ret;
2514	u32 c_nritems;
2515
2516	c = path->nodes[level];
2517	WARN_ON(btrfs_header_generation(c) != trans->transid);
2518	if (c == root->node) {
2519		/*
2520		 * trying to split the root, lets make a new one
2521		 *
2522		 * tree mod log: We don't log_removal old root in
2523		 * insert_new_root, because that root buffer will be kept as a
2524		 * normal node. We are going to log removal of half of the
2525		 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2526		 * holding a tree lock on the buffer, which is why we cannot
2527		 * race with other tree_mod_log users.
2528		 */
2529		ret = insert_new_root(trans, root, path, level + 1);
2530		if (ret)
2531			return ret;
2532	} else {
2533		ret = push_nodes_for_insert(trans, root, path, level);
2534		c = path->nodes[level];
2535		if (!ret && btrfs_header_nritems(c) <
2536		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2537			return 0;
2538		if (ret < 0)
2539			return ret;
2540	}
2541
2542	c_nritems = btrfs_header_nritems(c);
2543	mid = (c_nritems + 1) / 2;
2544	btrfs_node_key(c, &disk_key, mid);
2545
2546	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2547				       &disk_key, level, c->start, 0,
2548				       BTRFS_NESTING_SPLIT);
2549	if (IS_ERR(split))
2550		return PTR_ERR(split);
2551
2552	root_add_used(root, fs_info->nodesize);
2553	ASSERT(btrfs_header_level(c) == level);
2554
2555	ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2556	if (ret) {
2557		btrfs_abort_transaction(trans, ret);
2558		return ret;
2559	}
2560	copy_extent_buffer(split, c,
2561			   btrfs_node_key_ptr_offset(0),
2562			   btrfs_node_key_ptr_offset(mid),
2563			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2564	btrfs_set_header_nritems(split, c_nritems - mid);
2565	btrfs_set_header_nritems(c, mid);
2566
2567	btrfs_mark_buffer_dirty(c);
2568	btrfs_mark_buffer_dirty(split);
2569
2570	insert_ptr(trans, path, &disk_key, split->start,
2571		   path->slots[level + 1] + 1, level + 1);
2572
2573	if (path->slots[level] >= mid) {
2574		path->slots[level] -= mid;
2575		btrfs_tree_unlock(c);
2576		free_extent_buffer(c);
2577		path->nodes[level] = split;
2578		path->slots[level + 1] += 1;
2579	} else {
2580		btrfs_tree_unlock(split);
2581		free_extent_buffer(split);
2582	}
2583	return 0;
2584}
2585
2586/*
2587 * how many bytes are required to store the items in a leaf.  start
2588 * and nr indicate which items in the leaf to check.  This totals up the
2589 * space used both by the item structs and the item data
2590 */
2591static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2592{
2593	struct btrfs_item *start_item;
2594	struct btrfs_item *end_item;
2595	int data_len;
2596	int nritems = btrfs_header_nritems(l);
2597	int end = min(nritems, start + nr) - 1;
2598
2599	if (!nr)
2600		return 0;
2601	start_item = btrfs_item_nr(start);
2602	end_item = btrfs_item_nr(end);
2603	data_len = btrfs_item_offset(l, start_item) +
2604		   btrfs_item_size(l, start_item);
2605	data_len = data_len - btrfs_item_offset(l, end_item);
2606	data_len += sizeof(struct btrfs_item) * nr;
2607	WARN_ON(data_len < 0);
2608	return data_len;
2609}
2610
2611/*
2612 * The space between the end of the leaf items and
2613 * the start of the leaf data.  IOW, how much room
2614 * the leaf has left for both items and data
2615 */
2616noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
2617{
2618	struct btrfs_fs_info *fs_info = leaf->fs_info;
2619	int nritems = btrfs_header_nritems(leaf);
2620	int ret;
2621
2622	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
2623	if (ret < 0) {
2624		btrfs_crit(fs_info,
2625			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
2626			   ret,
2627			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
2628			   leaf_space_used(leaf, 0, nritems), nritems);
2629	}
2630	return ret;
2631}
2632
2633/*
2634 * min slot controls the lowest index we're willing to push to the
2635 * right.  We'll push up to and including min_slot, but no lower
2636 */
2637static noinline int __push_leaf_right(struct btrfs_path *path,
2638				      int data_size, int empty,
2639				      struct extent_buffer *right,
2640				      int free_space, u32 left_nritems,
2641				      u32 min_slot)
2642{
2643	struct btrfs_fs_info *fs_info = right->fs_info;
2644	struct extent_buffer *left = path->nodes[0];
2645	struct extent_buffer *upper = path->nodes[1];
2646	struct btrfs_map_token token;
2647	struct btrfs_disk_key disk_key;
2648	int slot;
2649	u32 i;
2650	int push_space = 0;
2651	int push_items = 0;
2652	struct btrfs_item *item;
2653	u32 nr;
2654	u32 right_nritems;
2655	u32 data_end;
2656	u32 this_item_size;
2657
2658	if (empty)
2659		nr = 0;
2660	else
2661		nr = max_t(u32, 1, min_slot);
2662
2663	if (path->slots[0] >= left_nritems)
2664		push_space += data_size;
2665
2666	slot = path->slots[1];
2667	i = left_nritems - 1;
2668	while (i >= nr) {
2669		item = btrfs_item_nr(i);
2670
2671		if (!empty && push_items > 0) {
2672			if (path->slots[0] > i)
2673				break;
2674			if (path->slots[0] == i) {
2675				int space = btrfs_leaf_free_space(left);
2676
2677				if (space + push_space * 2 > free_space)
2678					break;
2679			}
2680		}
2681
2682		if (path->slots[0] == i)
2683			push_space += data_size;
2684
2685		this_item_size = btrfs_item_size(left, item);
2686		if (this_item_size + sizeof(*item) + push_space > free_space)
2687			break;
2688
2689		push_items++;
2690		push_space += this_item_size + sizeof(*item);
2691		if (i == 0)
2692			break;
2693		i--;
2694	}
2695
2696	if (push_items == 0)
2697		goto out_unlock;
2698
2699	WARN_ON(!empty && push_items == left_nritems);
2700
2701	/* push left to right */
2702	right_nritems = btrfs_header_nritems(right);
2703
2704	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2705	push_space -= leaf_data_end(left);
2706
2707	/* make room in the right data area */
2708	data_end = leaf_data_end(right);
2709	memmove_extent_buffer(right,
2710			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
2711			      BTRFS_LEAF_DATA_OFFSET + data_end,
2712			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
2713
2714	/* copy from the left data area */
2715	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
2716		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2717		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
2718		     push_space);
2719
2720	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2721			      btrfs_item_nr_offset(0),
2722			      right_nritems * sizeof(struct btrfs_item));
2723
2724	/* copy the items from left to right */
2725	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2726		   btrfs_item_nr_offset(left_nritems - push_items),
2727		   push_items * sizeof(struct btrfs_item));
2728
2729	/* update the item pointers */
2730	btrfs_init_map_token(&token, right);
2731	right_nritems += push_items;
2732	btrfs_set_header_nritems(right, right_nritems);
2733	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2734	for (i = 0; i < right_nritems; i++) {
2735		item = btrfs_item_nr(i);
2736		push_space -= btrfs_token_item_size(&token, item);
2737		btrfs_set_token_item_offset(&token, item, push_space);
2738	}
2739
2740	left_nritems -= push_items;
2741	btrfs_set_header_nritems(left, left_nritems);
2742
2743	if (left_nritems)
2744		btrfs_mark_buffer_dirty(left);
2745	else
2746		btrfs_clean_tree_block(left);
2747
2748	btrfs_mark_buffer_dirty(right);
2749
2750	btrfs_item_key(right, &disk_key, 0);
2751	btrfs_set_node_key(upper, &disk_key, slot + 1);
2752	btrfs_mark_buffer_dirty(upper);
2753
2754	/* then fixup the leaf pointer in the path */
2755	if (path->slots[0] >= left_nritems) {
2756		path->slots[0] -= left_nritems;
2757		if (btrfs_header_nritems(path->nodes[0]) == 0)
2758			btrfs_clean_tree_block(path->nodes[0]);
2759		btrfs_tree_unlock(path->nodes[0]);
2760		free_extent_buffer(path->nodes[0]);
2761		path->nodes[0] = right;
2762		path->slots[1] += 1;
2763	} else {
2764		btrfs_tree_unlock(right);
2765		free_extent_buffer(right);
2766	}
2767	return 0;
2768
2769out_unlock:
2770	btrfs_tree_unlock(right);
2771	free_extent_buffer(right);
2772	return 1;
2773}
2774
2775/*
2776 * push some data in the path leaf to the right, trying to free up at
2777 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
2778 *
2779 * returns 1 if the push failed because the other node didn't have enough
2780 * room, 0 if everything worked out and < 0 if there were major errors.
2781 *
2782 * this will push starting from min_slot to the end of the leaf.  It won't
2783 * push any slot lower than min_slot
2784 */
2785static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2786			   *root, struct btrfs_path *path,
2787			   int min_data_size, int data_size,
2788			   int empty, u32 min_slot)
2789{
2790	struct extent_buffer *left = path->nodes[0];
2791	struct extent_buffer *right;
2792	struct extent_buffer *upper;
2793	int slot;
2794	int free_space;
2795	u32 left_nritems;
2796	int ret;
2797
2798	if (!path->nodes[1])
2799		return 1;
2800
2801	slot = path->slots[1];
2802	upper = path->nodes[1];
2803	if (slot >= btrfs_header_nritems(upper) - 1)
2804		return 1;
2805
2806	btrfs_assert_tree_locked(path->nodes[1]);
2807
2808	right = btrfs_read_node_slot(upper, slot + 1);
2809	/*
2810	 * slot + 1 is not valid or we fail to read the right node,
2811	 * no big deal, just return.
2812	 */
2813	if (IS_ERR(right))
2814		return 1;
2815
2816	__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
2817
2818	free_space = btrfs_leaf_free_space(right);
2819	if (free_space < data_size)
2820		goto out_unlock;
2821
2822	/* cow and double check */
2823	ret = btrfs_cow_block(trans, root, right, upper,
2824			      slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
2825	if (ret)
2826		goto out_unlock;
2827
2828	free_space = btrfs_leaf_free_space(right);
2829	if (free_space < data_size)
2830		goto out_unlock;
2831
2832	left_nritems = btrfs_header_nritems(left);
2833	if (left_nritems == 0)
2834		goto out_unlock;
2835
2836	if (check_sibling_keys(left, right)) {
2837		ret = -EUCLEAN;
2838		btrfs_tree_unlock(right);
2839		free_extent_buffer(right);
2840		return ret;
2841	}
2842	if (path->slots[0] == left_nritems && !empty) {
2843		/* Key greater than all keys in the leaf, right neighbor has
2844		 * enough room for it and we're not emptying our leaf to delete
2845		 * it, therefore use right neighbor to insert the new item and
2846		 * no need to touch/dirty our left leaf. */
2847		btrfs_tree_unlock(left);
2848		free_extent_buffer(left);
2849		path->nodes[0] = right;
2850		path->slots[0] = 0;
2851		path->slots[1]++;
2852		return 0;
2853	}
2854
2855	return __push_leaf_right(path, min_data_size, empty,
2856				right, free_space, left_nritems, min_slot);
2857out_unlock:
2858	btrfs_tree_unlock(right);
2859	free_extent_buffer(right);
2860	return 1;
2861}
2862
2863/*
2864 * push some data in the path leaf to the left, trying to free up at
2865 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
2866 *
2867 * max_slot can put a limit on how far into the leaf we'll push items.  The
2868 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
2869 * items
2870 */
2871static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
2872				     int empty, struct extent_buffer *left,
2873				     int free_space, u32 right_nritems,
2874				     u32 max_slot)
2875{
2876	struct btrfs_fs_info *fs_info = left->fs_info;
2877	struct btrfs_disk_key disk_key;
2878	struct extent_buffer *right = path->nodes[0];
2879	int i;
2880	int push_space = 0;
2881	int push_items = 0;
2882	struct btrfs_item *item;
2883	u32 old_left_nritems;
2884	u32 nr;
2885	int ret = 0;
2886	u32 this_item_size;
2887	u32 old_left_item_size;
2888	struct btrfs_map_token token;
2889
2890	if (empty)
2891		nr = min(right_nritems, max_slot);
2892	else
2893		nr = min(right_nritems - 1, max_slot);
2894
2895	for (i = 0; i < nr; i++) {
2896		item = btrfs_item_nr(i);
2897
2898		if (!empty && push_items > 0) {
2899			if (path->slots[0] < i)
2900				break;
2901			if (path->slots[0] == i) {
2902				int space = btrfs_leaf_free_space(right);
2903
2904				if (space + push_space * 2 > free_space)
2905					break;
2906			}
2907		}
2908
2909		if (path->slots[0] == i)
2910			push_space += data_size;
2911
2912		this_item_size = btrfs_item_size(right, item);
2913		if (this_item_size + sizeof(*item) + push_space > free_space)
2914			break;
2915
2916		push_items++;
2917		push_space += this_item_size + sizeof(*item);
2918	}
2919
2920	if (push_items == 0) {
2921		ret = 1;
2922		goto out;
2923	}
2924	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
2925
2926	/* push data from right to left */
2927	copy_extent_buffer(left, right,
2928			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
2929			   btrfs_item_nr_offset(0),
2930			   push_items * sizeof(struct btrfs_item));
2931
2932	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
2933		     btrfs_item_offset_nr(right, push_items - 1);
2934
2935	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
2936		     leaf_data_end(left) - push_space,
2937		     BTRFS_LEAF_DATA_OFFSET +
2938		     btrfs_item_offset_nr(right, push_items - 1),
2939		     push_space);
2940	old_left_nritems = btrfs_header_nritems(left);
2941	BUG_ON(old_left_nritems <= 0);
2942
2943	btrfs_init_map_token(&token, left);
2944	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2945	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2946		u32 ioff;
2947
2948		item = btrfs_item_nr(i);
2949
2950		ioff = btrfs_token_item_offset(&token, item);
2951		btrfs_set_token_item_offset(&token, item,
2952		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
2953	}
2954	btrfs_set_header_nritems(left, old_left_nritems + push_items);
2955
2956	/* fixup right node */
2957	if (push_items > right_nritems)
2958		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
2959		       right_nritems);
2960
2961	if (push_items < right_nritems) {
2962		push_space = btrfs_item_offset_nr(right, push_items - 1) -
2963						  leaf_data_end(right);
2964		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
2965				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2966				      BTRFS_LEAF_DATA_OFFSET +
2967				      leaf_data_end(right), push_space);
2968
2969		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2970			      btrfs_item_nr_offset(push_items),
2971			     (btrfs_header_nritems(right) - push_items) *
2972			     sizeof(struct btrfs_item));
2973	}
2974
2975	btrfs_init_map_token(&token, right);
2976	right_nritems -= push_items;
2977	btrfs_set_header_nritems(right, right_nritems);
2978	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2979	for (i = 0; i < right_nritems; i++) {
2980		item = btrfs_item_nr(i);
2981
2982		push_space = push_space - btrfs_token_item_size(&token, item);
2983		btrfs_set_token_item_offset(&token, item, push_space);
2984	}
2985
2986	btrfs_mark_buffer_dirty(left);
2987	if (right_nritems)
2988		btrfs_mark_buffer_dirty(right);
2989	else
2990		btrfs_clean_tree_block(right);
2991
2992	btrfs_item_key(right, &disk_key, 0);
2993	fixup_low_keys(path, &disk_key, 1);
2994
2995	/* then fixup the leaf pointer in the path */
2996	if (path->slots[0] < push_items) {
2997		path->slots[0] += old_left_nritems;
2998		btrfs_tree_unlock(path->nodes[0]);
2999		free_extent_buffer(path->nodes[0]);
3000		path->nodes[0] = left;
3001		path->slots[1] -= 1;
3002	} else {
3003		btrfs_tree_unlock(left);
3004		free_extent_buffer(left);
3005		path->slots[0] -= push_items;
3006	}
3007	BUG_ON(path->slots[0] < 0);
3008	return ret;
3009out:
3010	btrfs_tree_unlock(left);
3011	free_extent_buffer(left);
3012	return ret;
3013}
3014
3015/*
3016 * push some data in the path leaf to the left, trying to free up at
3017 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3018 *
3019 * max_slot can put a limit on how far into the leaf we'll push items.  The
3020 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3021 * items
3022 */
3023static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3024			  *root, struct btrfs_path *path, int min_data_size,
3025			  int data_size, int empty, u32 max_slot)
3026{
3027	struct extent_buffer *right = path->nodes[0];
3028	struct extent_buffer *left;
3029	int slot;
3030	int free_space;
3031	u32 right_nritems;
3032	int ret = 0;
3033
3034	slot = path->slots[1];
3035	if (slot == 0)
3036		return 1;
3037	if (!path->nodes[1])
3038		return 1;
3039
3040	right_nritems = btrfs_header_nritems(right);
3041	if (right_nritems == 0)
3042		return 1;
3043
3044	btrfs_assert_tree_locked(path->nodes[1]);
3045
3046	left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3047	/*
3048	 * slot - 1 is not valid or we fail to read the left node,
3049	 * no big deal, just return.
3050	 */
3051	if (IS_ERR(left))
3052		return 1;
3053
3054	__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3055
3056	free_space = btrfs_leaf_free_space(left);
3057	if (free_space < data_size) {
3058		ret = 1;
3059		goto out;
3060	}
3061
3062	/* cow and double check */
3063	ret = btrfs_cow_block(trans, root, left,
3064			      path->nodes[1], slot - 1, &left,
3065			      BTRFS_NESTING_LEFT_COW);
3066	if (ret) {
3067		/* we hit -ENOSPC, but it isn't fatal here */
3068		if (ret == -ENOSPC)
3069			ret = 1;
3070		goto out;
3071	}
3072
3073	free_space = btrfs_leaf_free_space(left);
3074	if (free_space < data_size) {
3075		ret = 1;
3076		goto out;
3077	}
3078
3079	if (check_sibling_keys(left, right)) {
3080		ret = -EUCLEAN;
3081		goto out;
3082	}
3083	return __push_leaf_left(path, min_data_size,
3084			       empty, left, free_space, right_nritems,
3085			       max_slot);
3086out:
3087	btrfs_tree_unlock(left);
3088	free_extent_buffer(left);
3089	return ret;
3090}
3091
3092/*
3093 * split the path's leaf in two, making sure there is at least data_size
3094 * available for the resulting leaf level of the path.
3095 */
3096static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3097				    struct btrfs_path *path,
3098				    struct extent_buffer *l,
3099				    struct extent_buffer *right,
3100				    int slot, int mid, int nritems)
3101{
3102	struct btrfs_fs_info *fs_info = trans->fs_info;
3103	int data_copy_size;
3104	int rt_data_off;
3105	int i;
3106	struct btrfs_disk_key disk_key;
3107	struct btrfs_map_token token;
3108
3109	nritems = nritems - mid;
3110	btrfs_set_header_nritems(right, nritems);
3111	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
3112
3113	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3114			   btrfs_item_nr_offset(mid),
3115			   nritems * sizeof(struct btrfs_item));
3116
3117	copy_extent_buffer(right, l,
3118		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
3119		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
3120		     leaf_data_end(l), data_copy_size);
3121
3122	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
3123
3124	btrfs_init_map_token(&token, right);
3125	for (i = 0; i < nritems; i++) {
3126		struct btrfs_item *item = btrfs_item_nr(i);
3127		u32 ioff;
3128
3129		ioff = btrfs_token_item_offset(&token, item);
3130		btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
3131	}
3132
3133	btrfs_set_header_nritems(l, mid);
3134	btrfs_item_key(right, &disk_key, 0);
3135	insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3136
3137	btrfs_mark_buffer_dirty(right);
3138	btrfs_mark_buffer_dirty(l);
3139	BUG_ON(path->slots[0] != slot);
3140
3141	if (mid <= slot) {
3142		btrfs_tree_unlock(path->nodes[0]);
3143		free_extent_buffer(path->nodes[0]);
3144		path->nodes[0] = right;
3145		path->slots[0] -= mid;
3146		path->slots[1] += 1;
3147	} else {
3148		btrfs_tree_unlock(right);
3149		free_extent_buffer(right);
3150	}
3151
3152	BUG_ON(path->slots[0] < 0);
3153}
3154
3155/*
3156 * double splits happen when we need to insert a big item in the middle
3157 * of a leaf.  A double split can leave us with 3 mostly empty leaves:
3158 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3159 *          A                 B                 C
3160 *
3161 * We avoid this by trying to push the items on either side of our target
3162 * into the adjacent leaves.  If all goes well we can avoid the double split
3163 * completely.
3164 */
3165static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3166					  struct btrfs_root *root,
3167					  struct btrfs_path *path,
3168					  int data_size)
3169{
3170	int ret;
3171	int progress = 0;
3172	int slot;
3173	u32 nritems;
3174	int space_needed = data_size;
3175
3176	slot = path->slots[0];
3177	if (slot < btrfs_header_nritems(path->nodes[0]))
3178		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3179
3180	/*
3181	 * try to push all the items after our slot into the
3182	 * right leaf
3183	 */
3184	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3185	if (ret < 0)
3186		return ret;
3187
3188	if (ret == 0)
3189		progress++;
3190
3191	nritems = btrfs_header_nritems(path->nodes[0]);
3192	/*
3193	 * our goal is to get our slot at the start or end of a leaf.  If
3194	 * we've done so we're done
3195	 */
3196	if (path->slots[0] == 0 || path->slots[0] == nritems)
3197		return 0;
3198
3199	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3200		return 0;
3201
3202	/* try to push all the items before our slot into the next leaf */
3203	slot = path->slots[0];
3204	space_needed = data_size;
3205	if (slot > 0)
3206		space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3207	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3208	if (ret < 0)
3209		return ret;
3210
3211	if (ret == 0)
3212		progress++;
3213
3214	if (progress)
3215		return 0;
3216	return 1;
3217}
3218
3219/*
3220 * split the path's leaf in two, making sure there is at least data_size
3221 * available for the resulting leaf level of the path.
3222 *
3223 * returns 0 if all went well and < 0 on failure.
3224 */
3225static noinline int split_leaf(struct btrfs_trans_handle *trans,
3226			       struct btrfs_root *root,
3227			       const struct btrfs_key *ins_key,
3228			       struct btrfs_path *path, int data_size,
3229			       int extend)
3230{
3231	struct btrfs_disk_key disk_key;
3232	struct extent_buffer *l;
3233	u32 nritems;
3234	int mid;
3235	int slot;
3236	struct extent_buffer *right;
3237	struct btrfs_fs_info *fs_info = root->fs_info;
3238	int ret = 0;
3239	int wret;
3240	int split;
3241	int num_doubles = 0;
3242	int tried_avoid_double = 0;
3243
3244	l = path->nodes[0];
3245	slot = path->slots[0];
3246	if (extend && data_size + btrfs_item_size_nr(l, slot) +
3247	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3248		return -EOVERFLOW;
3249
3250	/* first try to make some room by pushing left and right */
3251	if (data_size && path->nodes[1]) {
3252		int space_needed = data_size;
3253
3254		if (slot < btrfs_header_nritems(l))
3255			space_needed -= btrfs_leaf_free_space(l);
3256
3257		wret = push_leaf_right(trans, root, path, space_needed,
3258				       space_needed, 0, 0);
3259		if (wret < 0)
3260			return wret;
3261		if (wret) {
3262			space_needed = data_size;
3263			if (slot > 0)
3264				space_needed -= btrfs_leaf_free_space(l);
3265			wret = push_leaf_left(trans, root, path, space_needed,
3266					      space_needed, 0, (u32)-1);
3267			if (wret < 0)
3268				return wret;
3269		}
3270		l = path->nodes[0];
3271
3272		/* did the pushes work? */
3273		if (btrfs_leaf_free_space(l) >= data_size)
3274			return 0;
3275	}
3276
3277	if (!path->nodes[1]) {
3278		ret = insert_new_root(trans, root, path, 1);
3279		if (ret)
3280			return ret;
3281	}
3282again:
3283	split = 1;
3284	l = path->nodes[0];
3285	slot = path->slots[0];
3286	nritems = btrfs_header_nritems(l);
3287	mid = (nritems + 1) / 2;
3288
3289	if (mid <= slot) {
3290		if (nritems == 1 ||
3291		    leaf_space_used(l, mid, nritems - mid) + data_size >
3292			BTRFS_LEAF_DATA_SIZE(fs_info)) {
3293			if (slot >= nritems) {
3294				split = 0;
3295			} else {
3296				mid = slot;
3297				if (mid != nritems &&
3298				    leaf_space_used(l, mid, nritems - mid) +
3299				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3300					if (data_size && !tried_avoid_double)
3301						goto push_for_double;
3302					split = 2;
3303				}
3304			}
3305		}
3306	} else {
3307		if (leaf_space_used(l, 0, mid) + data_size >
3308			BTRFS_LEAF_DATA_SIZE(fs_info)) {
3309			if (!extend && data_size && slot == 0) {
3310				split = 0;
3311			} else if ((extend || !data_size) && slot == 0) {
3312				mid = 1;
3313			} else {
3314				mid = slot;
3315				if (mid != nritems &&
3316				    leaf_space_used(l, mid, nritems - mid) +
3317				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3318					if (data_size && !tried_avoid_double)
3319						goto push_for_double;
3320					split = 2;
3321				}
3322			}
3323		}
3324	}
3325
3326	if (split == 0)
3327		btrfs_cpu_key_to_disk(&disk_key, ins_key);
3328	else
3329		btrfs_item_key(l, &disk_key, mid);
3330
3331	/*
3332	 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3333	 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3334	 * subclasses, which is 8 at the time of this patch, and we've maxed it
3335	 * out.  In the future we could add a
3336	 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3337	 * use BTRFS_NESTING_NEW_ROOT.
3338	 */
3339	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3340				       &disk_key, 0, l->start, 0,
3341				       num_doubles ? BTRFS_NESTING_NEW_ROOT :
3342				       BTRFS_NESTING_SPLIT);
3343	if (IS_ERR(right))
3344		return PTR_ERR(right);
3345
3346	root_add_used(root, fs_info->nodesize);
3347
3348	if (split == 0) {
3349		if (mid <= slot) {
3350			btrfs_set_header_nritems(right, 0);
3351			insert_ptr(trans, path, &disk_key,
3352				   right->start, path->slots[1] + 1, 1);
3353			btrfs_tree_unlock(path->nodes[0]);
3354			free_extent_buffer(path->nodes[0]);
3355			path->nodes[0] = right;
3356			path->slots[0] = 0;
3357			path->slots[1] += 1;
3358		} else {
3359			btrfs_set_header_nritems(right, 0);
3360			insert_ptr(trans, path, &disk_key,
3361				   right->start, path->slots[1], 1);
3362			btrfs_tree_unlock(path->nodes[0]);
3363			free_extent_buffer(path->nodes[0]);
3364			path->nodes[0] = right;
3365			path->slots[0] = 0;
3366			if (path->slots[1] == 0)
3367				fixup_low_keys(path, &disk_key, 1);
3368		}
3369		/*
3370		 * We create a new leaf 'right' for the required ins_len and
3371		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3372		 * the content of ins_len to 'right'.
3373		 */
3374		return ret;
3375	}
3376
3377	copy_for_split(trans, path, l, right, slot, mid, nritems);
3378
3379	if (split == 2) {
3380		BUG_ON(num_doubles != 0);
3381		num_doubles++;
3382		goto again;
3383	}
3384
3385	return 0;
3386
3387push_for_double:
3388	push_for_double_split(trans, root, path, data_size);
3389	tried_avoid_double = 1;
3390	if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3391		return 0;
3392	goto again;
3393}
3394
3395static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3396					 struct btrfs_root *root,
3397					 struct btrfs_path *path, int ins_len)
3398{
3399	struct btrfs_key key;
3400	struct extent_buffer *leaf;
3401	struct btrfs_file_extent_item *fi;
3402	u64 extent_len = 0;
3403	u32 item_size;
3404	int ret;
3405
3406	leaf = path->nodes[0];
3407	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3408
3409	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3410	       key.type != BTRFS_EXTENT_CSUM_KEY);
3411
3412	if (btrfs_leaf_free_space(leaf) >= ins_len)
3413		return 0;
3414
3415	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3416	if (key.type == BTRFS_EXTENT_DATA_KEY) {
3417		fi = btrfs_item_ptr(leaf, path->slots[0],
3418				    struct btrfs_file_extent_item);
3419		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3420	}
3421	btrfs_release_path(path);
3422
3423	path->keep_locks = 1;
3424	path->search_for_split = 1;
3425	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3426	path->search_for_split = 0;
3427	if (ret > 0)
3428		ret = -EAGAIN;
3429	if (ret < 0)
3430		goto err;
3431
3432	ret = -EAGAIN;
3433	leaf = path->nodes[0];
3434	/* if our item isn't there, return now */
3435	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3436		goto err;
3437
3438	/* the leaf has  changed, it now has room.  return now */
3439	if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3440		goto err;
3441
3442	if (key.type == BTRFS_EXTENT_DATA_KEY) {
3443		fi = btrfs_item_ptr(leaf, path->slots[0],
3444				    struct btrfs_file_extent_item);
3445		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3446			goto err;
3447	}
3448
3449	ret = split_leaf(trans, root, &key, path, ins_len, 1);
3450	if (ret)
3451		goto err;
3452
3453	path->keep_locks = 0;
3454	btrfs_unlock_up_safe(path, 1);
3455	return 0;
3456err:
3457	path->keep_locks = 0;
3458	return ret;
3459}
3460
3461static noinline int split_item(struct btrfs_path *path,
3462			       const struct btrfs_key *new_key,
3463			       unsigned long split_offset)
3464{
3465	struct extent_buffer *leaf;
3466	struct btrfs_item *item;
3467	struct btrfs_item *new_item;
3468	int slot;
3469	char *buf;
3470	u32 nritems;
3471	u32 item_size;
3472	u32 orig_offset;
3473	struct btrfs_disk_key disk_key;
3474
3475	leaf = path->nodes[0];
3476	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3477
3478	item = btrfs_item_nr(path->slots[0]);
3479	orig_offset = btrfs_item_offset(leaf, item);
3480	item_size = btrfs_item_size(leaf, item);
3481
3482	buf = kmalloc(item_size, GFP_NOFS);
3483	if (!buf)
3484		return -ENOMEM;
3485
3486	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3487			    path->slots[0]), item_size);
3488
3489	slot = path->slots[0] + 1;
3490	nritems = btrfs_header_nritems(leaf);
3491	if (slot != nritems) {
3492		/* shift the items */
3493		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3494				btrfs_item_nr_offset(slot),
3495				(nritems - slot) * sizeof(struct btrfs_item));
3496	}
3497
3498	btrfs_cpu_key_to_disk(&disk_key, new_key);
3499	btrfs_set_item_key(leaf, &disk_key, slot);
3500
3501	new_item = btrfs_item_nr(slot);
3502
3503	btrfs_set_item_offset(leaf, new_item, orig_offset);
3504	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3505
3506	btrfs_set_item_offset(leaf, item,
3507			      orig_offset + item_size - split_offset);
3508	btrfs_set_item_size(leaf, item, split_offset);
3509
3510	btrfs_set_header_nritems(leaf, nritems + 1);
3511
3512	/* write the data for the start of the original item */
3513	write_extent_buffer(leaf, buf,
3514			    btrfs_item_ptr_offset(leaf, path->slots[0]),
3515			    split_offset);
3516
3517	/* write the data for the new item */
3518	write_extent_buffer(leaf, buf + split_offset,
3519			    btrfs_item_ptr_offset(leaf, slot),
3520			    item_size - split_offset);
3521	btrfs_mark_buffer_dirty(leaf);
3522
3523	BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3524	kfree(buf);
3525	return 0;
3526}
3527
3528/*
3529 * This function splits a single item into two items,
3530 * giving 'new_key' to the new item and splitting the
3531 * old one at split_offset (from the start of the item).
3532 *
3533 * The path may be released by this operation.  After
3534 * the split, the path is pointing to the old item.  The
3535 * new item is going to be in the same node as the old one.
3536 *
3537 * Note, the item being split must be smaller enough to live alone on
3538 * a tree block with room for one extra struct btrfs_item
3539 *
3540 * This allows us to split the item in place, keeping a lock on the
3541 * leaf the entire time.
3542 */
3543int btrfs_split_item(struct btrfs_trans_handle *trans,
3544		     struct btrfs_root *root,
3545		     struct btrfs_path *path,
3546		     const struct btrfs_key *new_key,
3547		     unsigned long split_offset)
3548{
3549	int ret;
3550	ret = setup_leaf_for_split(trans, root, path,
3551				   sizeof(struct btrfs_item));
3552	if (ret)
3553		return ret;
3554
3555	ret = split_item(path, new_key, split_offset);
3556	return ret;
3557}
3558
3559/*
3560 * This function duplicate a item, giving 'new_key' to the new item.
3561 * It guarantees both items live in the same tree leaf and the new item
3562 * is contiguous with the original item.
3563 *
3564 * This allows us to split file extent in place, keeping a lock on the
3565 * leaf the entire time.
3566 */
3567int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3568			 struct btrfs_root *root,
3569			 struct btrfs_path *path,
3570			 const struct btrfs_key *new_key)
3571{
3572	struct extent_buffer *leaf;
3573	int ret;
3574	u32 item_size;
3575
3576	leaf = path->nodes[0];
3577	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3578	ret = setup_leaf_for_split(trans, root, path,
3579				   item_size + sizeof(struct btrfs_item));
3580	if (ret)
3581		return ret;
3582
3583	path->slots[0]++;
3584	setup_items_for_insert(root, path, new_key, &item_size, 1);
3585	leaf = path->nodes[0];
3586	memcpy_extent_buffer(leaf,
3587			     btrfs_item_ptr_offset(leaf, path->slots[0]),
3588			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3589			     item_size);
3590	return 0;
3591}
3592
3593/*
3594 * make the item pointed to by the path smaller.  new_size indicates
3595 * how small to make it, and from_end tells us if we just chop bytes
3596 * off the end of the item or if we shift the item to chop bytes off
3597 * the front.
3598 */
3599void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3600{
3601	int slot;
3602	struct extent_buffer *leaf;
3603	struct btrfs_item *item;
3604	u32 nritems;
3605	unsigned int data_end;
3606	unsigned int old_data_start;
3607	unsigned int old_size;
3608	unsigned int size_diff;
3609	int i;
3610	struct btrfs_map_token token;
3611
3612	leaf = path->nodes[0];
3613	slot = path->slots[0];
3614
3615	old_size = btrfs_item_size_nr(leaf, slot);
3616	if (old_size == new_size)
3617		return;
3618
3619	nritems = btrfs_header_nritems(leaf);
3620	data_end = leaf_data_end(leaf);
3621
3622	old_data_start = btrfs_item_offset_nr(leaf, slot);
3623
3624	size_diff = old_size - new_size;
3625
3626	BUG_ON(slot < 0);
3627	BUG_ON(slot >= nritems);
3628
3629	/*
3630	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3631	 */
3632	/* first correct the data pointers */
3633	btrfs_init_map_token(&token, leaf);
3634	for (i = slot; i < nritems; i++) {
3635		u32 ioff;
3636		item = btrfs_item_nr(i);
3637
3638		ioff = btrfs_token_item_offset(&token, item);
3639		btrfs_set_token_item_offset(&token, item, ioff + size_diff);
3640	}
3641
3642	/* shift the data */
3643	if (from_end) {
3644		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3645			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3646			      data_end, old_data_start + new_size - data_end);
3647	} else {
3648		struct btrfs_disk_key disk_key;
3649		u64 offset;
3650
3651		btrfs_item_key(leaf, &disk_key, slot);
3652
3653		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3654			unsigned long ptr;
3655			struct btrfs_file_extent_item *fi;
3656
3657			fi = btrfs_item_ptr(leaf, slot,
3658					    struct btrfs_file_extent_item);
3659			fi = (struct btrfs_file_extent_item *)(
3660			     (unsigned long)fi - size_diff);
3661
3662			if (btrfs_file_extent_type(leaf, fi) ==
3663			    BTRFS_FILE_EXTENT_INLINE) {
3664				ptr = btrfs_item_ptr_offset(leaf, slot);
3665				memmove_extent_buffer(leaf, ptr,
3666				      (unsigned long)fi,
3667				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
3668			}
3669		}
3670
3671		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3672			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3673			      data_end, old_data_start - data_end);
3674
3675		offset = btrfs_disk_key_offset(&disk_key);
3676		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3677		btrfs_set_item_key(leaf, &disk_key, slot);
3678		if (slot == 0)
3679			fixup_low_keys(path, &disk_key, 1);
3680	}
3681
3682	item = btrfs_item_nr(slot);
3683	btrfs_set_item_size(leaf, item, new_size);
3684	btrfs_mark_buffer_dirty(leaf);
3685
3686	if (btrfs_leaf_free_space(leaf) < 0) {
3687		btrfs_print_leaf(leaf);
3688		BUG();
3689	}
3690}
3691
3692/*
3693 * make the item pointed to by the path bigger, data_size is the added size.
3694 */
3695void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
3696{
3697	int slot;
3698	struct extent_buffer *leaf;
3699	struct btrfs_item *item;
3700	u32 nritems;
3701	unsigned int data_end;
3702	unsigned int old_data;
3703	unsigned int old_size;
3704	int i;
3705	struct btrfs_map_token token;
3706
3707	leaf = path->nodes[0];
3708
3709	nritems = btrfs_header_nritems(leaf);
3710	data_end = leaf_data_end(leaf);
3711
3712	if (btrfs_leaf_free_space(leaf) < data_size) {
3713		btrfs_print_leaf(leaf);
3714		BUG();
3715	}
3716	slot = path->slots[0];
3717	old_data = btrfs_item_end_nr(leaf, slot);
3718
3719	BUG_ON(slot < 0);
3720	if (slot >= nritems) {
3721		btrfs_print_leaf(leaf);
3722		btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
3723			   slot, nritems);
3724		BUG();
3725	}
3726
3727	/*
3728	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3729	 */
3730	/* first correct the data pointers */
3731	btrfs_init_map_token(&token, leaf);
3732	for (i = slot; i < nritems; i++) {
3733		u32 ioff;
3734		item = btrfs_item_nr(i);
3735
3736		ioff = btrfs_token_item_offset(&token, item);
3737		btrfs_set_token_item_offset(&token, item, ioff - data_size);
3738	}
3739
3740	/* shift the data */
3741	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3742		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
3743		      data_end, old_data - data_end);
3744
3745	data_end = old_data;
3746	old_size = btrfs_item_size_nr(leaf, slot);
3747	item = btrfs_item_nr(slot);
3748	btrfs_set_item_size(leaf, item, old_size + data_size);
3749	btrfs_mark_buffer_dirty(leaf);
3750
3751	if (btrfs_leaf_free_space(leaf) < 0) {
3752		btrfs_print_leaf(leaf);
3753		BUG();
3754	}
3755}
3756
3757/**
3758 * setup_items_for_insert - Helper called before inserting one or more items
3759 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
3760 * in a function that doesn't call btrfs_search_slot
3761 *
3762 * @root:	root we are inserting items to
3763 * @path:	points to the leaf/slot where we are going to insert new items
3764 * @cpu_key:	array of keys for items to be inserted
3765 * @data_size:	size of the body of each item we are going to insert
3766 * @nr:		size of @cpu_key/@data_size arrays
3767 */
3768void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
3769			    const struct btrfs_key *cpu_key, u32 *data_size,
3770			    int nr)
3771{
3772	struct btrfs_fs_info *fs_info = root->fs_info;
3773	struct btrfs_item *item;
3774	int i;
3775	u32 nritems;
3776	unsigned int data_end;
3777	struct btrfs_disk_key disk_key;
3778	struct extent_buffer *leaf;
3779	int slot;
3780	struct btrfs_map_token token;
3781	u32 total_size;
3782	u32 total_data = 0;
3783
3784	for (i = 0; i < nr; i++)
3785		total_data += data_size[i];
3786	total_size = total_data + (nr * sizeof(struct btrfs_item));
3787
3788	if (path->slots[0] == 0) {
3789		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3790		fixup_low_keys(path, &disk_key, 1);
3791	}
3792	btrfs_unlock_up_safe(path, 1);
3793
3794	leaf = path->nodes[0];
3795	slot = path->slots[0];
3796
3797	nritems = btrfs_header_nritems(leaf);
3798	data_end = leaf_data_end(leaf);
3799
3800	if (btrfs_leaf_free_space(leaf) < total_size) {
3801		btrfs_print_leaf(leaf);
3802		btrfs_crit(fs_info, "not enough freespace need %u have %d",
3803			   total_size, btrfs_leaf_free_space(leaf));
3804		BUG();
3805	}
3806
3807	btrfs_init_map_token(&token, leaf);
3808	if (slot != nritems) {
3809		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3810
3811		if (old_data < data_end) {
3812			btrfs_print_leaf(leaf);
3813			btrfs_crit(fs_info,
3814		"item at slot %d with data offset %u beyond data end of leaf %u",
3815				   slot, old_data, data_end);
3816			BUG();
3817		}
3818		/*
3819		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3820		 */
3821		/* first correct the data pointers */
3822		for (i = slot; i < nritems; i++) {
3823			u32 ioff;
3824
3825			item = btrfs_item_nr(i);
3826			ioff = btrfs_token_item_offset(&token, item);
3827			btrfs_set_token_item_offset(&token, item,
3828						    ioff - total_data);
3829		}
3830		/* shift the items */
3831		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3832			      btrfs_item_nr_offset(slot),
3833			      (nritems - slot) * sizeof(struct btrfs_item));
3834
3835		/* shift the data */
3836		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3837			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
3838			      data_end, old_data - data_end);
3839		data_end = old_data;
3840	}
3841
3842	/* setup the item for the new data */
3843	for (i = 0; i < nr; i++) {
3844		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3845		btrfs_set_item_key(leaf, &disk_key, slot + i);
3846		item = btrfs_item_nr(slot + i);
3847		data_end -= data_size[i];
3848		btrfs_set_token_item_offset(&token, item, data_end);
3849		btrfs_set_token_item_size(&token, item, data_size[i]);
3850	}
3851
3852	btrfs_set_header_nritems(leaf, nritems + nr);
3853	btrfs_mark_buffer_dirty(leaf);
3854
3855	if (btrfs_leaf_free_space(leaf) < 0) {
3856		btrfs_print_leaf(leaf);
3857		BUG();
3858	}
3859}
3860
3861/*
3862 * Given a key and some data, insert items into the tree.
3863 * This does all the path init required, making room in the tree if needed.
3864 */
3865int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3866			    struct btrfs_root *root,
3867			    struct btrfs_path *path,
3868			    const struct btrfs_key *cpu_key, u32 *data_size,
3869			    int nr)
3870{
3871	int ret = 0;
3872	int slot;
3873	int i;
3874	u32 total_size = 0;
3875	u32 total_data = 0;
3876
3877	for (i = 0; i < nr; i++)
3878		total_data += data_size[i];
3879
3880	total_size = total_data + (nr * sizeof(struct btrfs_item));
3881	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3882	if (ret == 0)
3883		return -EEXIST;
3884	if (ret < 0)
3885		return ret;
3886
3887	slot = path->slots[0];
3888	BUG_ON(slot < 0);
3889
3890	setup_items_for_insert(root, path, cpu_key, data_size, nr);
3891	return 0;
3892}
3893
3894/*
3895 * Given a key and some data, insert an item into the tree.
3896 * This does all the path init required, making room in the tree if needed.
3897 */
3898int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3899		      const struct btrfs_key *cpu_key, void *data,
3900		      u32 data_size)
3901{
3902	int ret = 0;
3903	struct btrfs_path *path;
3904	struct extent_buffer *leaf;
3905	unsigned long ptr;
3906
3907	path = btrfs_alloc_path();
3908	if (!path)
3909		return -ENOMEM;
3910	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3911	if (!ret) {
3912		leaf = path->nodes[0];
3913		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3914		write_extent_buffer(leaf, data, ptr, data_size);
3915		btrfs_mark_buffer_dirty(leaf);
3916	}
3917	btrfs_free_path(path);
3918	return ret;
3919}
3920
3921/*
3922 * delete the pointer from a given node.
3923 *
3924 * the tree should have been previously balanced so the deletion does not
3925 * empty a node.
3926 */
3927static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
3928		    int level, int slot)
3929{
3930	struct extent_buffer *parent = path->nodes[level];
3931	u32 nritems;
3932	int ret;
3933
3934	nritems = btrfs_header_nritems(parent);
3935	if (slot != nritems - 1) {
3936		if (level) {
3937			ret = btrfs_tree_mod_log_insert_move(parent, slot,
3938					slot + 1, nritems - slot - 1);
3939			BUG_ON(ret < 0);
3940		}
3941		memmove_extent_buffer(parent,
3942			      btrfs_node_key_ptr_offset(slot),
3943			      btrfs_node_key_ptr_offset(slot + 1),
3944			      sizeof(struct btrfs_key_ptr) *
3945			      (nritems - slot - 1));
3946	} else if (level) {
3947		ret = btrfs_tree_mod_log_insert_key(parent, slot,
3948				BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
3949		BUG_ON(ret < 0);
3950	}
3951
3952	nritems--;
3953	btrfs_set_header_nritems(parent, nritems);
3954	if (nritems == 0 && parent == root->node) {
3955		BUG_ON(btrfs_header_level(root->node) != 1);
3956		/* just turn the root into a leaf and break */
3957		btrfs_set_header_level(root->node, 0);
3958	} else if (slot == 0) {
3959		struct btrfs_disk_key disk_key;
3960
3961		btrfs_node_key(parent, &disk_key, 0);
3962		fixup_low_keys(path, &disk_key, level + 1);
3963	}
3964	btrfs_mark_buffer_dirty(parent);
3965}
3966
3967/*
3968 * a helper function to delete the leaf pointed to by path->slots[1] and
3969 * path->nodes[1].
3970 *
3971 * This deletes the pointer in path->nodes[1] and frees the leaf
3972 * block extent.  zero is returned if it all worked out, < 0 otherwise.
3973 *
3974 * The path must have already been setup for deleting the leaf, including
3975 * all the proper balancing.  path->nodes[1] must be locked.
3976 */
3977static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3978				    struct btrfs_root *root,
3979				    struct btrfs_path *path,
3980				    struct extent_buffer *leaf)
3981{
3982	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3983	del_ptr(root, path, 1, path->slots[1]);
3984
3985	/*
3986	 * btrfs_free_extent is expensive, we want to make sure we
3987	 * aren't holding any locks when we call it
3988	 */
3989	btrfs_unlock_up_safe(path, 0);
3990
3991	root_sub_used(root, leaf->len);
3992
3993	atomic_inc(&leaf->refs);
3994	btrfs_free_tree_block(trans, root, leaf, 0, 1);
3995	free_extent_buffer_stale(leaf);
3996}
3997/*
3998 * delete the item at the leaf level in path.  If that empties
3999 * the leaf, remove it from the tree
4000 */
4001int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4002		    struct btrfs_path *path, int slot, int nr)
4003{
4004	struct btrfs_fs_info *fs_info = root->fs_info;
4005	struct extent_buffer *leaf;
4006	struct btrfs_item *item;
4007	u32 last_off;
4008	u32 dsize = 0;
4009	int ret = 0;
4010	int wret;
4011	int i;
4012	u32 nritems;
4013
4014	leaf = path->nodes[0];
4015	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4016
4017	for (i = 0; i < nr; i++)
4018		dsize += btrfs_item_size_nr(leaf, slot + i);
4019
4020	nritems = btrfs_header_nritems(leaf);
4021
4022	if (slot + nr != nritems) {
4023		int data_end = leaf_data_end(leaf);
4024		struct btrfs_map_token token;
4025
4026		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4027			      data_end + dsize,
4028			      BTRFS_LEAF_DATA_OFFSET + data_end,
4029			      last_off - data_end);
4030
4031		btrfs_init_map_token(&token, leaf);
4032		for (i = slot + nr; i < nritems; i++) {
4033			u32 ioff;
4034
4035			item = btrfs_item_nr(i);
4036			ioff = btrfs_token_item_offset(&token, item);
4037			btrfs_set_token_item_offset(&token, item, ioff + dsize);
4038		}
4039
4040		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4041			      btrfs_item_nr_offset(slot + nr),
4042			      sizeof(struct btrfs_item) *
4043			      (nritems - slot - nr));
4044	}
4045	btrfs_set_header_nritems(leaf, nritems - nr);
4046	nritems -= nr;
4047
4048	/* delete the leaf if we've emptied it */
4049	if (nritems == 0) {
4050		if (leaf == root->node) {
4051			btrfs_set_header_level(leaf, 0);
4052		} else {
4053			btrfs_clean_tree_block(leaf);
4054			btrfs_del_leaf(trans, root, path, leaf);
4055		}
4056	} else {
4057		int used = leaf_space_used(leaf, 0, nritems);
4058		if (slot == 0) {
4059			struct btrfs_disk_key disk_key;
4060
4061			btrfs_item_key(leaf, &disk_key, 0);
4062			fixup_low_keys(path, &disk_key, 1);
4063		}
4064
4065		/* delete the leaf if it is mostly empty */
4066		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4067			/* push_leaf_left fixes the path.
4068			 * make sure the path still points to our leaf
4069			 * for possible call to del_ptr below
4070			 */
4071			slot = path->slots[1];
4072			atomic_inc(&leaf->refs);
4073
4074			wret = push_leaf_left(trans, root, path, 1, 1,
4075					      1, (u32)-1);
4076			if (wret < 0 && wret != -ENOSPC)
4077				ret = wret;
4078
4079			if (path->nodes[0] == leaf &&
4080			    btrfs_header_nritems(leaf)) {
4081				wret = push_leaf_right(trans, root, path, 1,
4082						       1, 1, 0);
4083				if (wret < 0 && wret != -ENOSPC)
4084					ret = wret;
4085			}
4086
4087			if (btrfs_header_nritems(leaf) == 0) {
4088				path->slots[1] = slot;
4089				btrfs_del_leaf(trans, root, path, leaf);
4090				free_extent_buffer(leaf);
4091				ret = 0;
4092			} else {
4093				/* if we're still in the path, make sure
4094				 * we're dirty.  Otherwise, one of the
4095				 * push_leaf functions must have already
4096				 * dirtied this buffer
4097				 */
4098				if (path->nodes[0] == leaf)
4099					btrfs_mark_buffer_dirty(leaf);
4100				free_extent_buffer(leaf);
4101			}
4102		} else {
4103			btrfs_mark_buffer_dirty(leaf);
4104		}
4105	}
4106	return ret;
4107}
4108
4109/*
4110 * search the tree again to find a leaf with lesser keys
4111 * returns 0 if it found something or 1 if there are no lesser leaves.
4112 * returns < 0 on io errors.
4113 *
4114 * This may release the path, and so you may lose any locks held at the
4115 * time you call it.
4116 */
4117int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4118{
4119	struct btrfs_key key;
4120	struct btrfs_disk_key found_key;
4121	int ret;
4122
4123	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4124
4125	if (key.offset > 0) {
4126		key.offset--;
4127	} else if (key.type > 0) {
4128		key.type--;
4129		key.offset = (u64)-1;
4130	} else if (key.objectid > 0) {
4131		key.objectid--;
4132		key.type = (u8)-1;
4133		key.offset = (u64)-1;
4134	} else {
4135		return 1;
4136	}
4137
4138	btrfs_release_path(path);
4139	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4140	if (ret < 0)
4141		return ret;
4142	btrfs_item_key(path->nodes[0], &found_key, 0);
4143	ret = comp_keys(&found_key, &key);
4144	/*
4145	 * We might have had an item with the previous key in the tree right
4146	 * before we released our path. And after we released our path, that
4147	 * item might have been pushed to the first slot (0) of the leaf we
4148	 * were holding due to a tree balance. Alternatively, an item with the
4149	 * previous key can exist as the only element of a leaf (big fat item).
4150	 * Therefore account for these 2 cases, so that our callers (like
4151	 * btrfs_previous_item) don't miss an existing item with a key matching
4152	 * the previous key we computed above.
4153	 */
4154	if (ret <= 0)
4155		return 0;
4156	return 1;
4157}
4158
4159/*
4160 * A helper function to walk down the tree starting at min_key, and looking
4161 * for nodes or leaves that are have a minimum transaction id.
4162 * This is used by the btree defrag code, and tree logging
4163 *
4164 * This does not cow, but it does stuff the starting key it finds back
4165 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4166 * key and get a writable path.
4167 *
4168 * This honors path->lowest_level to prevent descent past a given level
4169 * of the tree.
4170 *
4171 * min_trans indicates the oldest transaction that you are interested
4172 * in walking through.  Any nodes or leaves older than min_trans are
4173 * skipped over (without reading them).
4174 *
4175 * returns zero if something useful was found, < 0 on error and 1 if there
4176 * was nothing in the tree that matched the search criteria.
4177 */
4178int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4179			 struct btrfs_path *path,
4180			 u64 min_trans)
4181{
4182	struct extent_buffer *cur;
4183	struct btrfs_key found_key;
4184	int slot;
4185	int sret;
4186	u32 nritems;
4187	int level;
4188	int ret = 1;
4189	int keep_locks = path->keep_locks;
4190
4191	path->keep_locks = 1;
4192again:
4193	cur = btrfs_read_lock_root_node(root);
4194	level = btrfs_header_level(cur);
4195	WARN_ON(path->nodes[level]);
4196	path->nodes[level] = cur;
4197	path->locks[level] = BTRFS_READ_LOCK;
4198
4199	if (btrfs_header_generation(cur) < min_trans) {
4200		ret = 1;
4201		goto out;
4202	}
4203	while (1) {
4204		nritems = btrfs_header_nritems(cur);
4205		level = btrfs_header_level(cur);
4206		sret = btrfs_bin_search(cur, min_key, &slot);
4207		if (sret < 0) {
4208			ret = sret;
4209			goto out;
4210		}
4211
4212		/* at the lowest level, we're done, setup the path and exit */
4213		if (level == path->lowest_level) {
4214			if (slot >= nritems)
4215				goto find_next_key;
4216			ret = 0;
4217			path->slots[level] = slot;
4218			btrfs_item_key_to_cpu(cur, &found_key, slot);
4219			goto out;
4220		}
4221		if (sret && slot > 0)
4222			slot--;
4223		/*
4224		 * check this node pointer against the min_trans parameters.
4225		 * If it is too old, skip to the next one.
4226		 */
4227		while (slot < nritems) {
4228			u64 gen;
4229
4230			gen = btrfs_node_ptr_generation(cur, slot);
4231			if (gen < min_trans) {
4232				slot++;
4233				continue;
4234			}
4235			break;
4236		}
4237find_next_key:
4238		/*
4239		 * we didn't find a candidate key in this node, walk forward
4240		 * and find another one
4241		 */
4242		if (slot >= nritems) {
4243			path->slots[level] = slot;
4244			sret = btrfs_find_next_key(root, path, min_key, level,
4245						  min_trans);
4246			if (sret == 0) {
4247				btrfs_release_path(path);
4248				goto again;
4249			} else {
4250				goto out;
4251			}
4252		}
4253		/* save our key for returning back */
4254		btrfs_node_key_to_cpu(cur, &found_key, slot);
4255		path->slots[level] = slot;
4256		if (level == path->lowest_level) {
4257			ret = 0;
4258			goto out;
4259		}
4260		cur = btrfs_read_node_slot(cur, slot);
4261		if (IS_ERR(cur)) {
4262			ret = PTR_ERR(cur);
4263			goto out;
4264		}
4265
4266		btrfs_tree_read_lock(cur);
4267
4268		path->locks[level - 1] = BTRFS_READ_LOCK;
4269		path->nodes[level - 1] = cur;
4270		unlock_up(path, level, 1, 0, NULL);
4271	}
4272out:
4273	path->keep_locks = keep_locks;
4274	if (ret == 0) {
4275		btrfs_unlock_up_safe(path, path->lowest_level + 1);
4276		memcpy(min_key, &found_key, sizeof(found_key));
4277	}
4278	return ret;
4279}
4280
4281/*
4282 * this is similar to btrfs_next_leaf, but does not try to preserve
4283 * and fixup the path.  It looks for and returns the next key in the
4284 * tree based on the current path and the min_trans parameters.
4285 *
4286 * 0 is returned if another key is found, < 0 if there are any errors
4287 * and 1 is returned if there are no higher keys in the tree
4288 *
4289 * path->keep_locks should be set to 1 on the search made before
4290 * calling this function.
4291 */
4292int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4293			struct btrfs_key *key, int level, u64 min_trans)
4294{
4295	int slot;
4296	struct extent_buffer *c;
4297
4298	WARN_ON(!path->keep_locks && !path->skip_locking);
4299	while (level < BTRFS_MAX_LEVEL) {
4300		if (!path->nodes[level])
4301			return 1;
4302
4303		slot = path->slots[level] + 1;
4304		c = path->nodes[level];
4305next:
4306		if (slot >= btrfs_header_nritems(c)) {
4307			int ret;
4308			int orig_lowest;
4309			struct btrfs_key cur_key;
4310			if (level + 1 >= BTRFS_MAX_LEVEL ||
4311			    !path->nodes[level + 1])
4312				return 1;
4313
4314			if (path->locks[level + 1] || path->skip_locking) {
4315				level++;
4316				continue;
4317			}
4318
4319			slot = btrfs_header_nritems(c) - 1;
4320			if (level == 0)
4321				btrfs_item_key_to_cpu(c, &cur_key, slot);
4322			else
4323				btrfs_node_key_to_cpu(c, &cur_key, slot);
4324
4325			orig_lowest = path->lowest_level;
4326			btrfs_release_path(path);
4327			path->lowest_level = level;
4328			ret = btrfs_search_slot(NULL, root, &cur_key, path,
4329						0, 0);
4330			path->lowest_level = orig_lowest;
4331			if (ret < 0)
4332				return ret;
4333
4334			c = path->nodes[level];
4335			slot = path->slots[level];
4336			if (ret == 0)
4337				slot++;
4338			goto next;
4339		}
4340
4341		if (level == 0)
4342			btrfs_item_key_to_cpu(c, key, slot);
4343		else {
4344			u64 gen = btrfs_node_ptr_generation(c, slot);
4345
4346			if (gen < min_trans) {
4347				slot++;
4348				goto next;
4349			}
4350			btrfs_node_key_to_cpu(c, key, slot);
4351		}
4352		return 0;
4353	}
4354	return 1;
4355}
4356
4357/*
4358 * search the tree again to find a leaf with greater keys
4359 * returns 0 if it found something or 1 if there are no greater leaves.
4360 * returns < 0 on io errors.
4361 */
4362int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4363{
4364	return btrfs_next_old_leaf(root, path, 0);
4365}
4366
4367int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4368			u64 time_seq)
4369{
4370	int slot;
4371	int level;
4372	struct extent_buffer *c;
4373	struct extent_buffer *next;
4374	struct btrfs_key key;
4375	u32 nritems;
4376	int ret;
4377	int i;
4378
4379	nritems = btrfs_header_nritems(path->nodes[0]);
4380	if (nritems == 0)
4381		return 1;
4382
4383	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4384again:
4385	level = 1;
4386	next = NULL;
4387	btrfs_release_path(path);
4388
4389	path->keep_locks = 1;
4390
4391	if (time_seq)
4392		ret = btrfs_search_old_slot(root, &key, path, time_seq);
4393	else
4394		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4395	path->keep_locks = 0;
4396
4397	if (ret < 0)
4398		return ret;
4399
4400	nritems = btrfs_header_nritems(path->nodes[0]);
4401	/*
4402	 * by releasing the path above we dropped all our locks.  A balance
4403	 * could have added more items next to the key that used to be
4404	 * at the very end of the block.  So, check again here and
4405	 * advance the path if there are now more items available.
4406	 */
4407	if (nritems > 0 && path->slots[0] < nritems - 1) {
4408		if (ret == 0)
4409			path->slots[0]++;
4410		ret = 0;
4411		goto done;
4412	}
4413	/*
4414	 * So the above check misses one case:
4415	 * - after releasing the path above, someone has removed the item that
4416	 *   used to be at the very end of the block, and balance between leafs
4417	 *   gets another one with bigger key.offset to replace it.
4418	 *
4419	 * This one should be returned as well, or we can get leaf corruption
4420	 * later(esp. in __btrfs_drop_extents()).
4421	 *
4422	 * And a bit more explanation about this check,
4423	 * with ret > 0, the key isn't found, the path points to the slot
4424	 * where it should be inserted, so the path->slots[0] item must be the
4425	 * bigger one.
4426	 */
4427	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4428		ret = 0;
4429		goto done;
4430	}
4431
4432	while (level < BTRFS_MAX_LEVEL) {
4433		if (!path->nodes[level]) {
4434			ret = 1;
4435			goto done;
4436		}
4437
4438		slot = path->slots[level] + 1;
4439		c = path->nodes[level];
4440		if (slot >= btrfs_header_nritems(c)) {
4441			level++;
4442			if (level == BTRFS_MAX_LEVEL) {
4443				ret = 1;
4444				goto done;
4445			}
4446			continue;
4447		}
4448
4449
4450		/*
4451		 * Our current level is where we're going to start from, and to
4452		 * make sure lockdep doesn't complain we need to drop our locks
4453		 * and nodes from 0 to our current level.
4454		 */
4455		for (i = 0; i < level; i++) {
4456			if (path->locks[level]) {
4457				btrfs_tree_read_unlock(path->nodes[i]);
4458				path->locks[i] = 0;
4459			}
4460			free_extent_buffer(path->nodes[i]);
4461			path->nodes[i] = NULL;
4462		}
4463
4464		next = c;
4465		ret = read_block_for_search(root, path, &next, level,
4466					    slot, &key);
4467		if (ret == -EAGAIN)
4468			goto again;
4469
4470		if (ret < 0) {
4471			btrfs_release_path(path);
4472			goto done;
4473		}
4474
4475		if (!path->skip_locking) {
4476			ret = btrfs_try_tree_read_lock(next);
4477			if (!ret && time_seq) {
4478				/*
4479				 * If we don't get the lock, we may be racing
4480				 * with push_leaf_left, holding that lock while
4481				 * itself waiting for the leaf we've currently
4482				 * locked. To solve this situation, we give up
4483				 * on our lock and cycle.
4484				 */
4485				free_extent_buffer(next);
4486				btrfs_release_path(path);
4487				cond_resched();
4488				goto again;
4489			}
4490			if (!ret)
4491				btrfs_tree_read_lock(next);
4492		}
4493		break;
4494	}
4495	path->slots[level] = slot;
4496	while (1) {
4497		level--;
4498		path->nodes[level] = next;
4499		path->slots[level] = 0;
4500		if (!path->skip_locking)
4501			path->locks[level] = BTRFS_READ_LOCK;
4502		if (!level)
4503			break;
4504
4505		ret = read_block_for_search(root, path, &next, level,
4506					    0, &key);
4507		if (ret == -EAGAIN)
4508			goto again;
4509
4510		if (ret < 0) {
4511			btrfs_release_path(path);
4512			goto done;
4513		}
4514
4515		if (!path->skip_locking)
4516			btrfs_tree_read_lock(next);
4517	}
4518	ret = 0;
4519done:
4520	unlock_up(path, 0, 1, 0, NULL);
4521
4522	return ret;
4523}
4524
4525/*
4526 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4527 * searching until it gets past min_objectid or finds an item of 'type'
4528 *
4529 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4530 */
4531int btrfs_previous_item(struct btrfs_root *root,
4532			struct btrfs_path *path, u64 min_objectid,
4533			int type)
4534{
4535	struct btrfs_key found_key;
4536	struct extent_buffer *leaf;
4537	u32 nritems;
4538	int ret;
4539
4540	while (1) {
4541		if (path->slots[0] == 0) {
4542			ret = btrfs_prev_leaf(root, path);
4543			if (ret != 0)
4544				return ret;
4545		} else {
4546			path->slots[0]--;
4547		}
4548		leaf = path->nodes[0];
4549		nritems = btrfs_header_nritems(leaf);
4550		if (nritems == 0)
4551			return 1;
4552		if (path->slots[0] == nritems)
4553			path->slots[0]--;
4554
4555		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4556		if (found_key.objectid < min_objectid)
4557			break;
4558		if (found_key.type == type)
4559			return 0;
4560		if (found_key.objectid == min_objectid &&
4561		    found_key.type < type)
4562			break;
4563	}
4564	return 1;
4565}
4566
4567/*
4568 * search in extent tree to find a previous Metadata/Data extent item with
4569 * min objecitd.
4570 *
4571 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4572 */
4573int btrfs_previous_extent_item(struct btrfs_root *root,
4574			struct btrfs_path *path, u64 min_objectid)
4575{
4576	struct btrfs_key found_key;
4577	struct extent_buffer *leaf;
4578	u32 nritems;
4579	int ret;
4580
4581	while (1) {
4582		if (path->slots[0] == 0) {
4583			ret = btrfs_prev_leaf(root, path);
4584			if (ret != 0)
4585				return ret;
4586		} else {
4587			path->slots[0]--;
4588		}
4589		leaf = path->nodes[0];
4590		nritems = btrfs_header_nritems(leaf);
4591		if (nritems == 0)
4592			return 1;
4593		if (path->slots[0] == nritems)
4594			path->slots[0]--;
4595
4596		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4597		if (found_key.objectid < min_objectid)
4598			break;
4599		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
4600		    found_key.type == BTRFS_METADATA_ITEM_KEY)
4601			return 0;
4602		if (found_key.objectid == min_objectid &&
4603		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
4604			break;
4605	}
4606	return 1;
4607}
4608