1/* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5/* Now we have all buffers that must be used in balancing of the tree */ 6/* Further calculations can not cause schedule(), and thus the buffer */ 7/* tree will be stable until the balancing will be finished */ 8/* balance the tree according to the analysis made before, */ 9/* and using buffers obtained after all above. */ 10 11/** 12 ** balance_leaf_when_delete 13 ** balance_leaf 14 ** do_balance 15 ** 16 **/ 17 18#include <asm/uaccess.h> 19#include <linux/time.h> 20#include <linux/reiserfs_fs.h> 21#include <linux/buffer_head.h> 22#include <linux/kernel.h> 23 24#ifdef CONFIG_REISERFS_CHECK 25 26struct tree_balance *cur_tb = NULL; /* detects whether more than one 27 copy of tb exists as a means 28 of checking whether schedule 29 is interrupting do_balance */ 30#endif 31 32inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, 33 struct buffer_head *bh, int flag) 34{ 35 journal_mark_dirty(tb->transaction_handle, 36 tb->transaction_handle->t_super, bh); 37} 38 39#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty 40#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty 41 42/* summary: 43 if deleting something ( tb->insert_size[0] < 0 ) 44 return(balance_leaf_when_delete()); (flag d handled here) 45 else 46 if lnum is larger than 0 we put items into the left node 47 if rnum is larger than 0 we put items into the right node 48 if snum1 is larger than 0 we put items into the new node s1 49 if snum2 is larger than 0 we put items into the new node s2 50Note that all *num* count new items being created. 51 52It would be easier to read balance_leaf() if each of these summary 53lines was a separate procedure rather than being inlined. I think 54that there are many passages here and in balance_leaf_when_delete() in 55which two calls to one procedure can replace two passages, and it 56might save cache space and improve software maintenance costs to do so. 57 58Vladimir made the perceptive comment that we should offload most of 59the decision making in this function into fix_nodes/check_balance, and 60then create some sort of structure in tb that says what actions should 61be performed by do_balance. 62 63-Hans */ 64 65/* Balance leaf node in case of delete or cut: insert_size[0] < 0 66 * 67 * lnum, rnum can have values >= -1 68 * -1 means that the neighbor must be joined with S 69 * 0 means that nothing should be done with the neighbor 70 * >0 means to shift entirely or partly the specified number of items to the neighbor 71 */ 72static int balance_leaf_when_delete(struct tree_balance *tb, int flag) 73{ 74 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 75 int item_pos = PATH_LAST_POSITION(tb->tb_path); 76 int pos_in_item = tb->tb_path->pos_in_item; 77 struct buffer_info bi; 78 int n; 79 struct item_head *ih; 80 81 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1, 82 "vs- 12000: level: wrong FR %z", tb->FR[0]); 83 RFALSE(tb->blknum[0] > 1, 84 "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]); 85 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0), 86 "PAP-12010: tree can not be empty"); 87 88 ih = B_N_PITEM_HEAD(tbS0, item_pos); 89 90 /* Delete or truncate the item */ 91 92 switch (flag) { 93 case M_DELETE: /* delete item in S[0] */ 94 95 RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0], 96 "vs-12013: mode Delete, insert size %d, ih to be deleted %h", 97 -tb->insert_size[0], ih); 98 99 bi.tb = tb; 100 bi.bi_bh = tbS0; 101 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 102 bi.bi_position = PATH_H_POSITION(tb->tb_path, 1); 103 leaf_delete_items(&bi, 0, item_pos, 1, -1); 104 105 if (!item_pos && tb->CFL[0]) { 106 if (B_NR_ITEMS(tbS0)) { 107 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 108 0); 109 } else { 110 if (!PATH_H_POSITION(tb->tb_path, 1)) 111 replace_key(tb, tb->CFL[0], tb->lkey[0], 112 PATH_H_PPARENT(tb->tb_path, 113 0), 0); 114 } 115 } 116 117 RFALSE(!item_pos && !tb->CFL[0], 118 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], 119 tb->L[0]); 120 121 break; 122 123 case M_CUT:{ /* cut item in S[0] */ 124 bi.tb = tb; 125 bi.bi_bh = tbS0; 126 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 127 bi.bi_position = PATH_H_POSITION(tb->tb_path, 1); 128 if (is_direntry_le_ih(ih)) { 129 130 /* UFS unlink semantics are such that you can only delete one directory entry at a time. */ 131 /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */ 132 tb->insert_size[0] = -1; 133 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 134 -tb->insert_size[0]); 135 136 RFALSE(!item_pos && !pos_in_item && !tb->CFL[0], 137 "PAP-12030: can not change delimiting key. CFL[0]=%p", 138 tb->CFL[0]); 139 140 if (!item_pos && !pos_in_item && tb->CFL[0]) { 141 replace_key(tb, tb->CFL[0], tb->lkey[0], 142 tbS0, 0); 143 } 144 } else { 145 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 146 -tb->insert_size[0]); 147 148 RFALSE(!ih_item_len(ih), 149 "PAP-12035: cut must leave non-zero dynamic length of item"); 150 } 151 break; 152 } 153 154 default: 155 print_cur_tb("12040"); 156 reiserfs_panic(tb->tb_sb, 157 "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)", 158 (flag == 159 M_PASTE) ? "PASTE" : ((flag == 160 M_INSERT) ? "INSERT" : 161 "UNKNOWN"), flag); 162 } 163 164 /* the rule is that no shifting occurs unless by shifting a node can be freed */ 165 n = B_NR_ITEMS(tbS0); 166 if (tb->lnum[0]) { /* L[0] takes part in balancing */ 167 if (tb->lnum[0] == -1) { /* L[0] must be joined with S[0] */ 168 if (tb->rnum[0] == -1) { /* R[0] must be also joined with S[0] */ 169 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) { 170 /* all contents of all the 3 buffers will be in L[0] */ 171 if (PATH_H_POSITION(tb->tb_path, 1) == 0 172 && 1 < B_NR_ITEMS(tb->FR[0])) 173 replace_key(tb, tb->CFL[0], 174 tb->lkey[0], 175 tb->FR[0], 1); 176 177 leaf_move_items(LEAF_FROM_S_TO_L, tb, n, 178 -1, NULL); 179 leaf_move_items(LEAF_FROM_R_TO_L, tb, 180 B_NR_ITEMS(tb->R[0]), 181 -1, NULL); 182 183 reiserfs_invalidate_buffer(tb, tbS0); 184 reiserfs_invalidate_buffer(tb, 185 tb->R[0]); 186 187 return 0; 188 } 189 /* all contents of all the 3 buffers will be in R[0] */ 190 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, 191 NULL); 192 leaf_move_items(LEAF_FROM_L_TO_R, tb, 193 B_NR_ITEMS(tb->L[0]), -1, NULL); 194 195 /* right_delimiting_key is correct in R[0] */ 196 replace_key(tb, tb->CFR[0], tb->rkey[0], 197 tb->R[0], 0); 198 199 reiserfs_invalidate_buffer(tb, tbS0); 200 reiserfs_invalidate_buffer(tb, tb->L[0]); 201 202 return -1; 203 } 204 205 RFALSE(tb->rnum[0] != 0, 206 "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]); 207 /* all contents of L[0] and S[0] will be in L[0] */ 208 leaf_shift_left(tb, n, -1); 209 210 reiserfs_invalidate_buffer(tb, tbS0); 211 212 return 0; 213 } 214 /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */ 215 216 RFALSE((tb->lnum[0] + tb->rnum[0] < n) || 217 (tb->lnum[0] + tb->rnum[0] > n + 1), 218 "PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent", 219 tb->rnum[0], tb->lnum[0], n); 220 RFALSE((tb->lnum[0] + tb->rnum[0] == n) && 221 (tb->lbytes != -1 || tb->rbytes != -1), 222 "PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split", 223 tb->rbytes, tb->lbytes); 224 RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) && 225 (tb->lbytes < 1 || tb->rbytes != -1), 226 "PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split", 227 tb->rbytes, tb->lbytes); 228 229 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 230 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 231 232 reiserfs_invalidate_buffer(tb, tbS0); 233 234 return 0; 235 } 236 237 if (tb->rnum[0] == -1) { 238 /* all contents of R[0] and S[0] will be in R[0] */ 239 leaf_shift_right(tb, n, -1); 240 reiserfs_invalidate_buffer(tb, tbS0); 241 return 0; 242 } 243 244 RFALSE(tb->rnum[0], 245 "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]); 246 return 0; 247} 248 249static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item header of inserted item (this is on little endian) */ 250 const char *body, /* body of inserted item or bytes to paste */ 251 int flag, /* i - insert, d - delete, c - cut, p - paste 252 (see comment to do_balance) */ 253 struct item_head *insert_key, /* in our processing of one level we sometimes determine what 254 must be inserted into the next higher level. This insertion 255 consists of a key or two keys and their corresponding 256 pointers */ 257 struct buffer_head **insert_ptr /* inserted node-ptrs for the next level */ 258 ) 259{ 260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 261 int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] 262 of the affected item */ 263 struct buffer_info bi; 264 struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ 265 int snum[2]; /* number of items that will be placed 266 into S_new (includes partially shifted 267 items) */ 268 int sbytes[2]; /* if an item is partially shifted into S_new then 269 if it is a directory item 270 it is the number of entries from the item that are shifted into S_new 271 else 272 it is the number of bytes from the item that are shifted into S_new 273 */ 274 int n, i; 275 int ret_val; 276 int pos_in_item; 277 int zeros_num; 278 279 PROC_INFO_INC(tb->tb_sb, balance_at[0]); 280 281 /* Make balance in case insert_size[0] < 0 */ 282 if (tb->insert_size[0] < 0) 283 return balance_leaf_when_delete(tb, flag); 284 285 zeros_num = 0; 286 if (flag == M_INSERT && body == 0) 287 zeros_num = ih_item_len(ih); 288 289 pos_in_item = tb->tb_path->pos_in_item; 290 /* for indirect item pos_in_item is measured in unformatted node 291 pointers. Recalculate to bytes */ 292 if (flag != M_INSERT 293 && is_indirect_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) 294 pos_in_item *= UNFM_P_SIZE; 295 296 if (tb->lnum[0] > 0) { 297 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 298 if (item_pos < tb->lnum[0]) { 299 /* new item or it part falls to L[0], shift it too */ 300 n = B_NR_ITEMS(tb->L[0]); 301 302 switch (flag) { 303 case M_INSERT: /* insert item into L[0] */ 304 305 if (item_pos == tb->lnum[0] - 1 306 && tb->lbytes != -1) { 307 /* part of new item falls into L[0] */ 308 int new_item_len; 309 int version; 310 311 ret_val = 312 leaf_shift_left(tb, tb->lnum[0] - 1, 313 -1); 314 315 /* Calculate item length to insert to S[0] */ 316 new_item_len = 317 ih_item_len(ih) - tb->lbytes; 318 /* Calculate and check item length to insert to L[0] */ 319 put_ih_item_len(ih, 320 ih_item_len(ih) - 321 new_item_len); 322 323 RFALSE(ih_item_len(ih) <= 0, 324 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", 325 ih_item_len(ih)); 326 327 /* Insert new item into L[0] */ 328 bi.tb = tb; 329 bi.bi_bh = tb->L[0]; 330 bi.bi_parent = tb->FL[0]; 331 bi.bi_position = 332 get_left_neighbor_position(tb, 0); 333 leaf_insert_into_buf(&bi, 334 n + item_pos - 335 ret_val, ih, body, 336 zeros_num > 337 ih_item_len(ih) ? 338 ih_item_len(ih) : 339 zeros_num); 340 341 version = ih_version(ih); 342 343 /* Calculate key component, item length and body to insert into S[0] */ 344 set_le_ih_k_offset(ih, 345 le_ih_k_offset(ih) + 346 (tb-> 347 lbytes << 348 (is_indirect_le_ih 349 (ih) ? tb->tb_sb-> 350 s_blocksize_bits - 351 UNFM_P_SHIFT : 352 0))); 353 354 put_ih_item_len(ih, new_item_len); 355 if (tb->lbytes > zeros_num) { 356 body += 357 (tb->lbytes - zeros_num); 358 zeros_num = 0; 359 } else 360 zeros_num -= tb->lbytes; 361 362 RFALSE(ih_item_len(ih) <= 0, 363 "PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d", 364 ih_item_len(ih)); 365 } else { 366 /* new item in whole falls into L[0] */ 367 /* Shift lnum[0]-1 items to L[0] */ 368 ret_val = 369 leaf_shift_left(tb, tb->lnum[0] - 1, 370 tb->lbytes); 371 /* Insert new item into L[0] */ 372 bi.tb = tb; 373 bi.bi_bh = tb->L[0]; 374 bi.bi_parent = tb->FL[0]; 375 bi.bi_position = 376 get_left_neighbor_position(tb, 0); 377 leaf_insert_into_buf(&bi, 378 n + item_pos - 379 ret_val, ih, body, 380 zeros_num); 381 tb->insert_size[0] = 0; 382 zeros_num = 0; 383 } 384 break; 385 386 case M_PASTE: /* append item in L[0] */ 387 388 if (item_pos == tb->lnum[0] - 1 389 && tb->lbytes != -1) { 390 /* we must shift the part of the appended item */ 391 if (is_direntry_le_ih 392 (B_N_PITEM_HEAD(tbS0, item_pos))) { 393 394 RFALSE(zeros_num, 395 "PAP-12090: invalid parameter in case of a directory"); 396 /* directory item */ 397 if (tb->lbytes > pos_in_item) { 398 /* new directory entry falls into L[0] */ 399 struct item_head 400 *pasted; 401 int l_pos_in_item = 402 pos_in_item; 403 404 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ 405 ret_val = 406 leaf_shift_left(tb, 407 tb-> 408 lnum 409 [0], 410 tb-> 411 lbytes 412 - 413 1); 414 if (ret_val 415 && !item_pos) { 416 pasted = 417 B_N_PITEM_HEAD 418 (tb->L[0], 419 B_NR_ITEMS 420 (tb-> 421 L[0]) - 422 1); 423 l_pos_in_item += 424 I_ENTRY_COUNT 425 (pasted) - 426 (tb-> 427 lbytes - 428 1); 429 } 430 431 /* Append given directory entry to directory item */ 432 bi.tb = tb; 433 bi.bi_bh = tb->L[0]; 434 bi.bi_parent = 435 tb->FL[0]; 436 bi.bi_position = 437 get_left_neighbor_position 438 (tb, 0); 439 leaf_paste_in_buffer 440 (&bi, 441 n + item_pos - 442 ret_val, 443 l_pos_in_item, 444 tb->insert_size[0], 445 body, zeros_num); 446 447 /* previous string prepared space for pasting new entry, following string pastes this entry */ 448 449 /* when we have merge directory item, pos_in_item has been changed too */ 450 451 /* paste new directory entry. 1 is entry number */ 452 leaf_paste_entries(bi. 453 bi_bh, 454 n + 455 item_pos 456 - 457 ret_val, 458 l_pos_in_item, 459 1, 460 (struct 461 reiserfs_de_head 462 *) 463 body, 464 body 465 + 466 DEH_SIZE, 467 tb-> 468 insert_size 469 [0] 470 ); 471 tb->insert_size[0] = 0; 472 } else { 473 /* new directory item doesn't fall into L[0] */ 474 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ 475 leaf_shift_left(tb, 476 tb-> 477 lnum[0], 478 tb-> 479 lbytes); 480 } 481 /* Calculate new position to append in item body */ 482 pos_in_item -= tb->lbytes; 483 } else { 484 /* regular object */ 485 RFALSE(tb->lbytes <= 0, 486 "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", 487 tb->lbytes); 488 RFALSE(pos_in_item != 489 ih_item_len 490 (B_N_PITEM_HEAD 491 (tbS0, item_pos)), 492 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", 493 ih_item_len 494 (B_N_PITEM_HEAD 495 (tbS0, item_pos)), 496 pos_in_item); 497 498 if (tb->lbytes >= pos_in_item) { 499 /* appended item will be in L[0] in whole */ 500 int l_n; 501 502 /* this bytes number must be appended to the last item of L[h] */ 503 l_n = 504 tb->lbytes - 505 pos_in_item; 506 507 /* Calculate new insert_size[0] */ 508 tb->insert_size[0] -= 509 l_n; 510 511 RFALSE(tb-> 512 insert_size[0] <= 513 0, 514 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", 515 tb-> 516 insert_size[0]); 517 ret_val = 518 leaf_shift_left(tb, 519 tb-> 520 lnum 521 [0], 522 ih_item_len 523 (B_N_PITEM_HEAD 524 (tbS0, 525 item_pos))); 526 /* Append to body of item in L[0] */ 527 bi.tb = tb; 528 bi.bi_bh = tb->L[0]; 529 bi.bi_parent = 530 tb->FL[0]; 531 bi.bi_position = 532 get_left_neighbor_position 533 (tb, 0); 534 leaf_paste_in_buffer 535 (&bi, 536 n + item_pos - 537 ret_val, 538 ih_item_len 539 (B_N_PITEM_HEAD 540 (tb->L[0], 541 n + item_pos - 542 ret_val)), l_n, 543 body, 544 zeros_num > 545 l_n ? l_n : 546 zeros_num); 547 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ 548 { 549 int version; 550 int temp_l = 551 l_n; 552 553 RFALSE 554 (ih_item_len 555 (B_N_PITEM_HEAD 556 (tbS0, 557 0)), 558 "PAP-12106: item length must be 0"); 559 RFALSE 560 (comp_short_le_keys 561 (B_N_PKEY 562 (tbS0, 0), 563 B_N_PKEY 564 (tb->L[0], 565 n + 566 item_pos 567 - 568 ret_val)), 569 "PAP-12107: items must be of the same file"); 570 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { 571 temp_l = 572 l_n 573 << 574 (tb-> 575 tb_sb-> 576 s_blocksize_bits 577 - 578 UNFM_P_SHIFT); 579 } 580 /* update key of first item in S0 */ 581 version = 582 ih_version 583 (B_N_PITEM_HEAD 584 (tbS0, 0)); 585 set_le_key_k_offset 586 (version, 587 B_N_PKEY 588 (tbS0, 0), 589 le_key_k_offset 590 (version, 591 B_N_PKEY 592 (tbS0, 593 0)) + 594 temp_l); 595 /* update left delimiting key */ 596 set_le_key_k_offset 597 (version, 598 B_N_PDELIM_KEY 599 (tb-> 600 CFL[0], 601 tb-> 602 lkey[0]), 603 le_key_k_offset 604 (version, 605 B_N_PDELIM_KEY 606 (tb-> 607 CFL[0], 608 tb-> 609 lkey[0])) 610 + temp_l); 611 } 612 613 /* Calculate new body, position in item and insert_size[0] */ 614 if (l_n > zeros_num) { 615 body += 616 (l_n - 617 zeros_num); 618 zeros_num = 0; 619 } else 620 zeros_num -= 621 l_n; 622 pos_in_item = 0; 623 624 RFALSE 625 (comp_short_le_keys 626 (B_N_PKEY(tbS0, 0), 627 B_N_PKEY(tb->L[0], 628 B_NR_ITEMS 629 (tb-> 630 L[0]) - 631 1)) 632 || 633 !op_is_left_mergeable 634 (B_N_PKEY(tbS0, 0), 635 tbS0->b_size) 636 || 637 !op_is_left_mergeable 638 (B_N_PDELIM_KEY 639 (tb->CFL[0], 640 tb->lkey[0]), 641 tbS0->b_size), 642 "PAP-12120: item must be merge-able with left neighboring item"); 643 } else { /* only part of the appended item will be in L[0] */ 644 645 /* Calculate position in item for append in S[0] */ 646 pos_in_item -= 647 tb->lbytes; 648 649 RFALSE(pos_in_item <= 0, 650 "PAP-12125: no place for paste. pos_in_item=%d", 651 pos_in_item); 652 653 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 654 leaf_shift_left(tb, 655 tb-> 656 lnum[0], 657 tb-> 658 lbytes); 659 } 660 } 661 } else { /* appended item will be in L[0] in whole */ 662 663 struct item_head *pasted; 664 665 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ 666 /* then increment pos_in_item by the size of the last item in L[0] */ 667 pasted = 668 B_N_PITEM_HEAD(tb->L[0], 669 n - 1); 670 if (is_direntry_le_ih(pasted)) 671 pos_in_item += 672 ih_entry_count 673 (pasted); 674 else 675 pos_in_item += 676 ih_item_len(pasted); 677 } 678 679 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 680 ret_val = 681 leaf_shift_left(tb, tb->lnum[0], 682 tb->lbytes); 683 /* Append to body of item in L[0] */ 684 bi.tb = tb; 685 bi.bi_bh = tb->L[0]; 686 bi.bi_parent = tb->FL[0]; 687 bi.bi_position = 688 get_left_neighbor_position(tb, 0); 689 leaf_paste_in_buffer(&bi, 690 n + item_pos - 691 ret_val, 692 pos_in_item, 693 tb->insert_size[0], 694 body, zeros_num); 695 696 /* if appended item is directory, paste entry */ 697 pasted = 698 B_N_PITEM_HEAD(tb->L[0], 699 n + item_pos - 700 ret_val); 701 if (is_direntry_le_ih(pasted)) 702 leaf_paste_entries(bi.bi_bh, 703 n + 704 item_pos - 705 ret_val, 706 pos_in_item, 707 1, 708 (struct 709 reiserfs_de_head 710 *)body, 711 body + 712 DEH_SIZE, 713 tb-> 714 insert_size 715 [0] 716 ); 717 /* if appended item is indirect item, put unformatted node into un list */ 718 if (is_indirect_le_ih(pasted)) 719 set_ih_free_space(pasted, 0); 720 tb->insert_size[0] = 0; 721 zeros_num = 0; 722 } 723 break; 724 default: /* cases d and t */ 725 reiserfs_panic(tb->tb_sb, 726 "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)", 727 (flag == 728 M_DELETE) ? "DELETE" : ((flag == 729 M_CUT) 730 ? "CUT" 731 : 732 "UNKNOWN"), 733 flag); 734 } 735 } else { 736 /* new item doesn't fall into L[0] */ 737 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 738 } 739 } 740 741 /* tb->lnum[0] > 0 */ 742 /* Calculate new item position */ 743 item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0)); 744 745 if (tb->rnum[0] > 0) { 746 /* shift rnum[0] items from S[0] to the right neighbor R[0] */ 747 n = B_NR_ITEMS(tbS0); 748 switch (flag) { 749 750 case M_INSERT: /* insert item */ 751 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ 752 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ 753 loff_t old_key_comp, old_len, 754 r_zeros_number; 755 const char *r_body; 756 int version; 757 loff_t offset; 758 759 leaf_shift_right(tb, tb->rnum[0] - 1, 760 -1); 761 762 version = ih_version(ih); 763 /* Remember key component and item length */ 764 old_key_comp = le_ih_k_offset(ih); 765 old_len = ih_item_len(ih); 766 767 /* Calculate key component and item length to insert into R[0] */ 768 offset = 769 le_ih_k_offset(ih) + 770 ((old_len - 771 tb-> 772 rbytes) << (is_indirect_le_ih(ih) 773 ? tb->tb_sb-> 774 s_blocksize_bits - 775 UNFM_P_SHIFT : 0)); 776 set_le_ih_k_offset(ih, offset); 777 put_ih_item_len(ih, tb->rbytes); 778 /* Insert part of the item into R[0] */ 779 bi.tb = tb; 780 bi.bi_bh = tb->R[0]; 781 bi.bi_parent = tb->FR[0]; 782 bi.bi_position = 783 get_right_neighbor_position(tb, 0); 784 if ((old_len - tb->rbytes) > zeros_num) { 785 r_zeros_number = 0; 786 r_body = 787 body + (old_len - 788 tb->rbytes) - 789 zeros_num; 790 } else { 791 r_body = body; 792 r_zeros_number = 793 zeros_num - (old_len - 794 tb->rbytes); 795 zeros_num -= r_zeros_number; 796 } 797 798 leaf_insert_into_buf(&bi, 0, ih, r_body, 799 r_zeros_number); 800 801 /* Replace right delimiting key by first key in R[0] */ 802 replace_key(tb, tb->CFR[0], tb->rkey[0], 803 tb->R[0], 0); 804 805 /* Calculate key component and item length to insert into S[0] */ 806 set_le_ih_k_offset(ih, old_key_comp); 807 put_ih_item_len(ih, 808 old_len - tb->rbytes); 809 810 tb->insert_size[0] -= tb->rbytes; 811 812 } else { /* whole new item falls into R[0] */ 813 814 /* Shift rnum[0]-1 items to R[0] */ 815 ret_val = 816 leaf_shift_right(tb, 817 tb->rnum[0] - 1, 818 tb->rbytes); 819 /* Insert new item into R[0] */ 820 bi.tb = tb; 821 bi.bi_bh = tb->R[0]; 822 bi.bi_parent = tb->FR[0]; 823 bi.bi_position = 824 get_right_neighbor_position(tb, 0); 825 leaf_insert_into_buf(&bi, 826 item_pos - n + 827 tb->rnum[0] - 1, 828 ih, body, 829 zeros_num); 830 831 if (item_pos - n + tb->rnum[0] - 1 == 0) { 832 replace_key(tb, tb->CFR[0], 833 tb->rkey[0], 834 tb->R[0], 0); 835 836 } 837 zeros_num = tb->insert_size[0] = 0; 838 } 839 } else { /* new item or part of it doesn't fall into R[0] */ 840 841 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 842 } 843 break; 844 845 case M_PASTE: /* append item */ 846 847 if (n - tb->rnum[0] <= item_pos) { /* pasted item or part of it falls to R[0] */ 848 if (item_pos == n - tb->rnum[0] && tb->rbytes != -1) { /* we must shift the part of the appended item */ 849 if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { /* we append to directory item */ 850 int entry_count; 851 852 RFALSE(zeros_num, 853 "PAP-12145: invalid parameter in case of a directory"); 854 entry_count = 855 I_ENTRY_COUNT(B_N_PITEM_HEAD 856 (tbS0, 857 item_pos)); 858 if (entry_count - tb->rbytes < 859 pos_in_item) 860 /* new directory entry falls into R[0] */ 861 { 862 int paste_entry_position; 863 864 RFALSE(tb->rbytes - 1 >= 865 entry_count 866 || !tb-> 867 insert_size[0], 868 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", 869 tb->rbytes, 870 entry_count); 871 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ 872 leaf_shift_right(tb, 873 tb-> 874 rnum 875 [0], 876 tb-> 877 rbytes 878 - 1); 879 /* Paste given directory entry to directory item */ 880 paste_entry_position = 881 pos_in_item - 882 entry_count + 883 tb->rbytes - 1; 884 bi.tb = tb; 885 bi.bi_bh = tb->R[0]; 886 bi.bi_parent = 887 tb->FR[0]; 888 bi.bi_position = 889 get_right_neighbor_position 890 (tb, 0); 891 leaf_paste_in_buffer 892 (&bi, 0, 893 paste_entry_position, 894 tb->insert_size[0], 895 body, zeros_num); 896 /* paste entry */ 897 leaf_paste_entries(bi. 898 bi_bh, 899 0, 900 paste_entry_position, 901 1, 902 (struct 903 reiserfs_de_head 904 *) 905 body, 906 body 907 + 908 DEH_SIZE, 909 tb-> 910 insert_size 911 [0] 912 ); 913 914 if (paste_entry_position 915 == 0) { 916 /* change delimiting keys */ 917 replace_key(tb, 918 tb-> 919 CFR 920 [0], 921 tb-> 922 rkey 923 [0], 924 tb-> 925 R 926 [0], 927 0); 928 } 929 930 tb->insert_size[0] = 0; 931 pos_in_item++; 932 } else { /* new directory entry doesn't fall into R[0] */ 933 934 leaf_shift_right(tb, 935 tb-> 936 rnum 937 [0], 938 tb-> 939 rbytes); 940 } 941 } else { /* regular object */ 942 943 int n_shift, n_rem, 944 r_zeros_number; 945 const char *r_body; 946 947 /* Calculate number of bytes which must be shifted from appended item */ 948 if ((n_shift = 949 tb->rbytes - 950 tb->insert_size[0]) < 0) 951 n_shift = 0; 952 953 RFALSE(pos_in_item != 954 ih_item_len 955 (B_N_PITEM_HEAD 956 (tbS0, item_pos)), 957 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", 958 pos_in_item, 959 ih_item_len 960 (B_N_PITEM_HEAD 961 (tbS0, item_pos))); 962 963 leaf_shift_right(tb, 964 tb->rnum[0], 965 n_shift); 966 /* Calculate number of bytes which must remain in body after appending to R[0] */ 967 if ((n_rem = 968 tb->insert_size[0] - 969 tb->rbytes) < 0) 970 n_rem = 0; 971 972 { 973 int version; 974 unsigned long temp_rem = 975 n_rem; 976 977 version = 978 ih_version 979 (B_N_PITEM_HEAD 980 (tb->R[0], 0)); 981 if (is_indirect_le_key 982 (version, 983 B_N_PKEY(tb->R[0], 984 0))) { 985 temp_rem = 986 n_rem << 987 (tb->tb_sb-> 988 s_blocksize_bits 989 - 990 UNFM_P_SHIFT); 991 } 992 set_le_key_k_offset 993 (version, 994 B_N_PKEY(tb->R[0], 995 0), 996 le_key_k_offset 997 (version, 998 B_N_PKEY(tb->R[0], 999 0)) + 1000 temp_rem); 1001 set_le_key_k_offset 1002 (version, 1003 B_N_PDELIM_KEY(tb-> 1004 CFR 1005 [0], 1006 tb-> 1007 rkey 1008 [0]), 1009 le_key_k_offset 1010 (version, 1011 B_N_PDELIM_KEY 1012 (tb->CFR[0], 1013 tb->rkey[0])) + 1014 temp_rem); 1015 } 1016/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; 1017 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ 1018 do_balance_mark_internal_dirty 1019 (tb, tb->CFR[0], 0); 1020 1021 /* Append part of body into R[0] */ 1022 bi.tb = tb; 1023 bi.bi_bh = tb->R[0]; 1024 bi.bi_parent = tb->FR[0]; 1025 bi.bi_position = 1026 get_right_neighbor_position 1027 (tb, 0); 1028 if (n_rem > zeros_num) { 1029 r_zeros_number = 0; 1030 r_body = 1031 body + n_rem - 1032 zeros_num; 1033 } else { 1034 r_body = body; 1035 r_zeros_number = 1036 zeros_num - n_rem; 1037 zeros_num -= 1038 r_zeros_number; 1039 } 1040 1041 leaf_paste_in_buffer(&bi, 0, 1042 n_shift, 1043 tb-> 1044 insert_size 1045 [0] - 1046 n_rem, 1047 r_body, 1048 r_zeros_number); 1049 1050 if (is_indirect_le_ih 1051 (B_N_PITEM_HEAD 1052 (tb->R[0], 0))) { 1053 set_ih_free_space 1054 (B_N_PITEM_HEAD 1055 (tb->R[0], 0), 0); 1056 } 1057 tb->insert_size[0] = n_rem; 1058 if (!n_rem) 1059 pos_in_item++; 1060 } 1061 } else { /* pasted item in whole falls into R[0] */ 1062 1063 struct item_head *pasted; 1064 1065 ret_val = 1066 leaf_shift_right(tb, tb->rnum[0], 1067 tb->rbytes); 1068 /* append item in R[0] */ 1069 if (pos_in_item >= 0) { 1070 bi.tb = tb; 1071 bi.bi_bh = tb->R[0]; 1072 bi.bi_parent = tb->FR[0]; 1073 bi.bi_position = 1074 get_right_neighbor_position 1075 (tb, 0); 1076 leaf_paste_in_buffer(&bi, 1077 item_pos - 1078 n + 1079 tb-> 1080 rnum[0], 1081 pos_in_item, 1082 tb-> 1083 insert_size 1084 [0], body, 1085 zeros_num); 1086 } 1087 1088 /* paste new entry, if item is directory item */ 1089 pasted = 1090 B_N_PITEM_HEAD(tb->R[0], 1091 item_pos - n + 1092 tb->rnum[0]); 1093 if (is_direntry_le_ih(pasted) 1094 && pos_in_item >= 0) { 1095 leaf_paste_entries(bi.bi_bh, 1096 item_pos - 1097 n + 1098 tb->rnum[0], 1099 pos_in_item, 1100 1, 1101 (struct 1102 reiserfs_de_head 1103 *)body, 1104 body + 1105 DEH_SIZE, 1106 tb-> 1107 insert_size 1108 [0] 1109 ); 1110 if (!pos_in_item) { 1111 1112 RFALSE(item_pos - n + 1113 tb->rnum[0], 1114 "PAP-12165: directory item must be first item of node when pasting is in 0th position"); 1115 1116 /* update delimiting keys */ 1117 replace_key(tb, 1118 tb->CFR[0], 1119 tb->rkey[0], 1120 tb->R[0], 1121 0); 1122 } 1123 } 1124 1125 if (is_indirect_le_ih(pasted)) 1126 set_ih_free_space(pasted, 0); 1127 zeros_num = tb->insert_size[0] = 0; 1128 } 1129 } else { /* new item doesn't fall into R[0] */ 1130 1131 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 1132 } 1133 break; 1134 default: /* cases d and t */ 1135 reiserfs_panic(tb->tb_sb, 1136 "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)", 1137 (flag == 1138 M_DELETE) ? "DELETE" : ((flag == 1139 M_CUT) ? "CUT" 1140 : "UNKNOWN"), 1141 flag); 1142 } 1143 1144 } 1145 1146 /* tb->rnum[0] > 0 */ 1147 RFALSE(tb->blknum[0] > 3, 1148 "PAP-12180: blknum can not be %d. It must be <= 3", 1149 tb->blknum[0]); 1150 RFALSE(tb->blknum[0] < 0, 1151 "PAP-12185: blknum can not be %d. It must be >= 0", 1152 tb->blknum[0]); 1153 1154 /* if while adding to a node we discover that it is possible to split 1155 it in two, and merge the left part into the left neighbor and the 1156 right part into the right neighbor, eliminating the node */ 1157 if (tb->blknum[0] == 0) { /* node S[0] is empty now */ 1158 1159 RFALSE(!tb->lnum[0] || !tb->rnum[0], 1160 "PAP-12190: lnum and rnum must not be zero"); 1161 /* if insertion was done before 0-th position in R[0], right 1162 delimiting key of the tb->L[0]'s and left delimiting key are 1163 not set correctly */ 1164 if (tb->CFL[0]) { 1165 if (!tb->CFR[0]) 1166 reiserfs_panic(tb->tb_sb, 1167 "vs-12195: balance_leaf: CFR not initialized"); 1168 copy_key(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), 1169 B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])); 1170 do_balance_mark_internal_dirty(tb, tb->CFL[0], 0); 1171 } 1172 1173 reiserfs_invalidate_buffer(tb, tbS0); 1174 return 0; 1175 } 1176 1177 /* Fill new nodes that appear in place of S[0] */ 1178 1179 /* I am told that this copying is because we need an array to enable 1180 the looping code. -Hans */ 1181 snum[0] = tb->s1num, snum[1] = tb->s2num; 1182 sbytes[0] = tb->s1bytes; 1183 sbytes[1] = tb->s2bytes; 1184 for (i = tb->blknum[0] - 2; i >= 0; i--) { 1185 1186 RFALSE(!snum[i], "PAP-12200: snum[%d] == %d. Must be > 0", i, 1187 snum[i]); 1188 1189 /* here we shift from S to S_new nodes */ 1190 1191 S_new[i] = get_FEB(tb); 1192 1193 /* initialized block type and tree level */ 1194 set_blkh_level(B_BLK_HEAD(S_new[i]), DISK_LEAF_NODE_LEVEL); 1195 1196 n = B_NR_ITEMS(tbS0); 1197 1198 switch (flag) { 1199 case M_INSERT: /* insert item */ 1200 1201 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ 1202 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ 1203 int old_key_comp, old_len, 1204 r_zeros_number; 1205 const char *r_body; 1206 int version; 1207 1208 /* Move snum[i]-1 items from S[0] to S_new[i] */ 1209 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1210 snum[i] - 1, -1, 1211 S_new[i]); 1212 /* Remember key component and item length */ 1213 version = ih_version(ih); 1214 old_key_comp = le_ih_k_offset(ih); 1215 old_len = ih_item_len(ih); 1216 1217 /* Calculate key component and item length to insert into S_new[i] */ 1218 set_le_ih_k_offset(ih, 1219 le_ih_k_offset(ih) + 1220 ((old_len - 1221 sbytes[i]) << 1222 (is_indirect_le_ih 1223 (ih) ? tb->tb_sb-> 1224 s_blocksize_bits - 1225 UNFM_P_SHIFT : 1226 0))); 1227 1228 put_ih_item_len(ih, sbytes[i]); 1229 1230 /* Insert part of the item into S_new[i] before 0-th item */ 1231 bi.tb = tb; 1232 bi.bi_bh = S_new[i]; 1233 bi.bi_parent = NULL; 1234 bi.bi_position = 0; 1235 1236 if ((old_len - sbytes[i]) > zeros_num) { 1237 r_zeros_number = 0; 1238 r_body = 1239 body + (old_len - 1240 sbytes[i]) - 1241 zeros_num; 1242 } else { 1243 r_body = body; 1244 r_zeros_number = 1245 zeros_num - (old_len - 1246 sbytes[i]); 1247 zeros_num -= r_zeros_number; 1248 } 1249 1250 leaf_insert_into_buf(&bi, 0, ih, r_body, 1251 r_zeros_number); 1252 1253 /* Calculate key component and item length to insert into S[i] */ 1254 set_le_ih_k_offset(ih, old_key_comp); 1255 put_ih_item_len(ih, 1256 old_len - sbytes[i]); 1257 tb->insert_size[0] -= sbytes[i]; 1258 } else { /* whole new item falls into S_new[i] */ 1259 1260 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ 1261 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1262 snum[i] - 1, sbytes[i], 1263 S_new[i]); 1264 1265 /* Insert new item into S_new[i] */ 1266 bi.tb = tb; 1267 bi.bi_bh = S_new[i]; 1268 bi.bi_parent = NULL; 1269 bi.bi_position = 0; 1270 leaf_insert_into_buf(&bi, 1271 item_pos - n + 1272 snum[i] - 1, ih, 1273 body, zeros_num); 1274 1275 zeros_num = tb->insert_size[0] = 0; 1276 } 1277 } 1278 1279 else { /* new item or it part don't falls into S_new[i] */ 1280 1281 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1282 snum[i], sbytes[i], S_new[i]); 1283 } 1284 break; 1285 1286 case M_PASTE: /* append item */ 1287 1288 if (n - snum[i] <= item_pos) { /* pasted item or part if it falls to S_new[i] */ 1289 if (item_pos == n - snum[i] && sbytes[i] != -1) { /* we must shift part of the appended item */ 1290 struct item_head *aux_ih; 1291 1292 RFALSE(ih, "PAP-12210: ih must be 0"); 1293 1294 if (is_direntry_le_ih 1295 (aux_ih = 1296 B_N_PITEM_HEAD(tbS0, item_pos))) { 1297 /* we append to directory item */ 1298 1299 int entry_count; 1300 1301 entry_count = 1302 ih_entry_count(aux_ih); 1303 1304 if (entry_count - sbytes[i] < 1305 pos_in_item 1306 && pos_in_item <= 1307 entry_count) { 1308 /* new directory entry falls into S_new[i] */ 1309 1310 RFALSE(!tb-> 1311 insert_size[0], 1312 "PAP-12215: insert_size is already 0"); 1313 RFALSE(sbytes[i] - 1 >= 1314 entry_count, 1315 "PAP-12220: there are no so much entries (%d), only %d", 1316 sbytes[i] - 1, 1317 entry_count); 1318 1319 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ 1320 leaf_move_items 1321 (LEAF_FROM_S_TO_SNEW, 1322 tb, snum[i], 1323 sbytes[i] - 1, 1324 S_new[i]); 1325 /* Paste given directory entry to directory item */ 1326 bi.tb = tb; 1327 bi.bi_bh = S_new[i]; 1328 bi.bi_parent = NULL; 1329 bi.bi_position = 0; 1330 leaf_paste_in_buffer 1331 (&bi, 0, 1332 pos_in_item - 1333 entry_count + 1334 sbytes[i] - 1, 1335 tb->insert_size[0], 1336 body, zeros_num); 1337 /* paste new directory entry */ 1338 leaf_paste_entries(bi. 1339 bi_bh, 1340 0, 1341 pos_in_item 1342 - 1343 entry_count 1344 + 1345 sbytes 1346 [i] - 1347 1, 1, 1348 (struct 1349 reiserfs_de_head 1350 *) 1351 body, 1352 body 1353 + 1354 DEH_SIZE, 1355 tb-> 1356 insert_size 1357 [0] 1358 ); 1359 tb->insert_size[0] = 0; 1360 pos_in_item++; 1361 } else { /* new directory entry doesn't fall into S_new[i] */ 1362 leaf_move_items 1363 (LEAF_FROM_S_TO_SNEW, 1364 tb, snum[i], 1365 sbytes[i], 1366 S_new[i]); 1367 } 1368 } else { /* regular object */ 1369 1370 int n_shift, n_rem, 1371 r_zeros_number; 1372 const char *r_body; 1373 1374 RFALSE(pos_in_item != 1375 ih_item_len 1376 (B_N_PITEM_HEAD 1377 (tbS0, item_pos)) 1378 || tb->insert_size[0] <= 1379 0, 1380 "PAP-12225: item too short or insert_size <= 0"); 1381 1382 /* Calculate number of bytes which must be shifted from appended item */ 1383 n_shift = 1384 sbytes[i] - 1385 tb->insert_size[0]; 1386 if (n_shift < 0) 1387 n_shift = 0; 1388 leaf_move_items 1389 (LEAF_FROM_S_TO_SNEW, tb, 1390 snum[i], n_shift, 1391 S_new[i]); 1392 1393 /* Calculate number of bytes which must remain in body after append to S_new[i] */ 1394 n_rem = 1395 tb->insert_size[0] - 1396 sbytes[i]; 1397 if (n_rem < 0) 1398 n_rem = 0; 1399 /* Append part of body into S_new[0] */ 1400 bi.tb = tb; 1401 bi.bi_bh = S_new[i]; 1402 bi.bi_parent = NULL; 1403 bi.bi_position = 0; 1404 1405 if (n_rem > zeros_num) { 1406 r_zeros_number = 0; 1407 r_body = 1408 body + n_rem - 1409 zeros_num; 1410 } else { 1411 r_body = body; 1412 r_zeros_number = 1413 zeros_num - n_rem; 1414 zeros_num -= 1415 r_zeros_number; 1416 } 1417 1418 leaf_paste_in_buffer(&bi, 0, 1419 n_shift, 1420 tb-> 1421 insert_size 1422 [0] - 1423 n_rem, 1424 r_body, 1425 r_zeros_number); 1426 { 1427 struct item_head *tmp; 1428 1429 tmp = 1430 B_N_PITEM_HEAD(S_new 1431 [i], 1432 0); 1433 if (is_indirect_le_ih 1434 (tmp)) { 1435 set_ih_free_space 1436 (tmp, 0); 1437 set_le_ih_k_offset 1438 (tmp, 1439 le_ih_k_offset 1440 (tmp) + 1441 (n_rem << 1442 (tb-> 1443 tb_sb-> 1444 s_blocksize_bits 1445 - 1446 UNFM_P_SHIFT))); 1447 } else { 1448 set_le_ih_k_offset 1449 (tmp, 1450 le_ih_k_offset 1451 (tmp) + 1452 n_rem); 1453 } 1454 } 1455 1456 tb->insert_size[0] = n_rem; 1457 if (!n_rem) 1458 pos_in_item++; 1459 } 1460 } else 1461 /* item falls wholly into S_new[i] */ 1462 { 1463 int ret_val; 1464 struct item_head *pasted; 1465 1466#ifdef CONFIG_REISERFS_CHECK 1467 struct item_head *ih = 1468 B_N_PITEM_HEAD(tbS0, item_pos); 1469 1470 if (!is_direntry_le_ih(ih) 1471 && (pos_in_item != ih_item_len(ih) 1472 || tb->insert_size[0] <= 0)) 1473 reiserfs_panic(tb->tb_sb, 1474 "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len"); 1475#endif /* CONFIG_REISERFS_CHECK */ 1476 1477 ret_val = 1478 leaf_move_items(LEAF_FROM_S_TO_SNEW, 1479 tb, snum[i], 1480 sbytes[i], 1481 S_new[i]); 1482 1483 RFALSE(ret_val, 1484 "PAP-12240: unexpected value returned by leaf_move_items (%d)", 1485 ret_val); 1486 1487 /* paste into item */ 1488 bi.tb = tb; 1489 bi.bi_bh = S_new[i]; 1490 bi.bi_parent = NULL; 1491 bi.bi_position = 0; 1492 leaf_paste_in_buffer(&bi, 1493 item_pos - n + 1494 snum[i], 1495 pos_in_item, 1496 tb->insert_size[0], 1497 body, zeros_num); 1498 1499 pasted = 1500 B_N_PITEM_HEAD(S_new[i], 1501 item_pos - n + 1502 snum[i]); 1503 if (is_direntry_le_ih(pasted)) { 1504 leaf_paste_entries(bi.bi_bh, 1505 item_pos - 1506 n + snum[i], 1507 pos_in_item, 1508 1, 1509 (struct 1510 reiserfs_de_head 1511 *)body, 1512 body + 1513 DEH_SIZE, 1514 tb-> 1515 insert_size 1516 [0] 1517 ); 1518 } 1519 1520 /* if we paste to indirect item update ih_free_space */ 1521 if (is_indirect_le_ih(pasted)) 1522 set_ih_free_space(pasted, 0); 1523 zeros_num = tb->insert_size[0] = 0; 1524 } 1525 } 1526 1527 else { /* pasted item doesn't fall into S_new[i] */ 1528 1529 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1530 snum[i], sbytes[i], S_new[i]); 1531 } 1532 break; 1533 default: /* cases d and t */ 1534 reiserfs_panic(tb->tb_sb, 1535 "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)", 1536 (flag == 1537 M_DELETE) ? "DELETE" : ((flag == 1538 M_CUT) ? "CUT" 1539 : "UNKNOWN"), 1540 flag); 1541 } 1542 1543 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); 1544 insert_ptr[i] = S_new[i]; 1545 1546 RFALSE(!buffer_journaled(S_new[i]) 1547 || buffer_journal_dirty(S_new[i]) 1548 || buffer_dirty(S_new[i]), "PAP-12247: S_new[%d] : (%b)", 1549 i, S_new[i]); 1550 } 1551 1552 /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the 1553 affected item which remains in S */ 1554 if (0 <= item_pos && item_pos < tb->s0num) { /* if we must insert or append into buffer S[0] */ 1555 1556 switch (flag) { 1557 case M_INSERT: /* insert item into S[0] */ 1558 bi.tb = tb; 1559 bi.bi_bh = tbS0; 1560 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 1561 bi.bi_position = PATH_H_POSITION(tb->tb_path, 1); 1562 leaf_insert_into_buf(&bi, item_pos, ih, body, 1563 zeros_num); 1564 1565 /* If we insert the first key change the delimiting key */ 1566 if (item_pos == 0) { 1567 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1568 replace_key(tb, tb->CFL[0], tb->lkey[0], 1569 tbS0, 0); 1570 1571 } 1572 break; 1573 1574 case M_PASTE:{ /* append item in S[0] */ 1575 struct item_head *pasted; 1576 1577 pasted = B_N_PITEM_HEAD(tbS0, item_pos); 1578 /* when directory, may be new entry already pasted */ 1579 if (is_direntry_le_ih(pasted)) { 1580 if (pos_in_item >= 0 && 1581 pos_in_item <= 1582 ih_entry_count(pasted)) { 1583 1584 RFALSE(!tb->insert_size[0], 1585 "PAP-12260: insert_size is 0 already"); 1586 1587 /* prepare space */ 1588 bi.tb = tb; 1589 bi.bi_bh = tbS0; 1590 bi.bi_parent = 1591 PATH_H_PPARENT(tb->tb_path, 1592 0); 1593 bi.bi_position = 1594 PATH_H_POSITION(tb->tb_path, 1595 1); 1596 leaf_paste_in_buffer(&bi, 1597 item_pos, 1598 pos_in_item, 1599 tb-> 1600 insert_size 1601 [0], body, 1602 zeros_num); 1603 1604 /* paste entry */ 1605 leaf_paste_entries(bi.bi_bh, 1606 item_pos, 1607 pos_in_item, 1608 1, 1609 (struct 1610 reiserfs_de_head 1611 *)body, 1612 body + 1613 DEH_SIZE, 1614 tb-> 1615 insert_size 1616 [0] 1617 ); 1618 if (!item_pos && !pos_in_item) { 1619 RFALSE(!tb->CFL[0] 1620 || !tb->L[0], 1621 "PAP-12270: CFL[0]/L[0] must be specified"); 1622 if (tb->CFL[0]) { 1623 replace_key(tb, 1624 tb-> 1625 CFL 1626 [0], 1627 tb-> 1628 lkey 1629 [0], 1630 tbS0, 1631 0); 1632 1633 } 1634 } 1635 tb->insert_size[0] = 0; 1636 } 1637 } else { /* regular object */ 1638 if (pos_in_item == ih_item_len(pasted)) { 1639 1640 RFALSE(tb->insert_size[0] <= 0, 1641 "PAP-12275: insert size must not be %d", 1642 tb->insert_size[0]); 1643 bi.tb = tb; 1644 bi.bi_bh = tbS0; 1645 bi.bi_parent = 1646 PATH_H_PPARENT(tb->tb_path, 1647 0); 1648 bi.bi_position = 1649 PATH_H_POSITION(tb->tb_path, 1650 1); 1651 leaf_paste_in_buffer(&bi, 1652 item_pos, 1653 pos_in_item, 1654 tb-> 1655 insert_size 1656 [0], body, 1657 zeros_num); 1658 1659 if (is_indirect_le_ih(pasted)) { 1660 set_ih_free_space 1661 (pasted, 0); 1662 } 1663 tb->insert_size[0] = 0; 1664 } 1665#ifdef CONFIG_REISERFS_CHECK 1666 else { 1667 if (tb->insert_size[0]) { 1668 print_cur_tb("12285"); 1669 reiserfs_panic(tb-> 1670 tb_sb, 1671 "PAP-12285: balance_leaf: insert_size must be 0 (%d)", 1672 tb-> 1673 insert_size 1674 [0]); 1675 } 1676 } 1677#endif /* CONFIG_REISERFS_CHECK */ 1678 1679 } 1680 } /* case M_PASTE: */ 1681 } 1682 } 1683#ifdef CONFIG_REISERFS_CHECK 1684 if (flag == M_PASTE && tb->insert_size[0]) { 1685 print_cur_tb("12290"); 1686 reiserfs_panic(tb->tb_sb, 1687 "PAP-12290: balance_leaf: insert_size is still not 0 (%d)", 1688 tb->insert_size[0]); 1689 } 1690#endif /* CONFIG_REISERFS_CHECK */ 1691 1692 return 0; 1693} /* Leaf level of the tree is balanced (end of balance_leaf) */ 1694 1695/* Make empty node */ 1696void make_empty_node(struct buffer_info *bi) 1697{ 1698 struct block_head *blkh; 1699 1700 RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL"); 1701 1702 blkh = B_BLK_HEAD(bi->bi_bh); 1703 set_blkh_nr_item(blkh, 0); 1704 set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh)); 1705 1706 if (bi->bi_parent) 1707 B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */ 1708} 1709 1710/* Get first empty buffer */ 1711struct buffer_head *get_FEB(struct tree_balance *tb) 1712{ 1713 int i; 1714 struct buffer_head *first_b; 1715 struct buffer_info bi; 1716 1717 for (i = 0; i < MAX_FEB_SIZE; i++) 1718 if (tb->FEB[i] != 0) 1719 break; 1720 1721 if (i == MAX_FEB_SIZE) 1722 reiserfs_panic(tb->tb_sb, 1723 "vs-12300: get_FEB: FEB list is empty"); 1724 1725 bi.tb = tb; 1726 bi.bi_bh = first_b = tb->FEB[i]; 1727 bi.bi_parent = NULL; 1728 bi.bi_position = 0; 1729 make_empty_node(&bi); 1730 set_buffer_uptodate(first_b); 1731 tb->FEB[i] = NULL; 1732 tb->used[i] = first_b; 1733 1734 return (first_b); 1735} 1736 1737/* This is now used because reiserfs_free_block has to be able to 1738** schedule. 1739*/ 1740static void store_thrown(struct tree_balance *tb, struct buffer_head *bh) 1741{ 1742 int i; 1743 1744 if (buffer_dirty(bh)) 1745 reiserfs_warning(tb->tb_sb, 1746 "store_thrown deals with dirty buffer"); 1747 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) 1748 if (!tb->thrown[i]) { 1749 tb->thrown[i] = bh; 1750 get_bh(bh); /* free_thrown puts this */ 1751 return; 1752 } 1753 reiserfs_warning(tb->tb_sb, "store_thrown: too many thrown buffers"); 1754} 1755 1756static void free_thrown(struct tree_balance *tb) 1757{ 1758 int i; 1759 b_blocknr_t blocknr; 1760 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) { 1761 if (tb->thrown[i]) { 1762 blocknr = tb->thrown[i]->b_blocknr; 1763 if (buffer_dirty(tb->thrown[i])) 1764 reiserfs_warning(tb->tb_sb, 1765 "free_thrown deals with dirty buffer %d", 1766 blocknr); 1767 brelse(tb->thrown[i]); /* incremented in store_thrown */ 1768 reiserfs_free_block(tb->transaction_handle, NULL, 1769 blocknr, 0); 1770 } 1771 } 1772} 1773 1774void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh) 1775{ 1776 struct block_head *blkh; 1777 blkh = B_BLK_HEAD(bh); 1778 set_blkh_level(blkh, FREE_LEVEL); 1779 set_blkh_nr_item(blkh, 0); 1780 1781 clear_buffer_dirty(bh); 1782 store_thrown(tb, bh); 1783} 1784 1785/* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/ 1786void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest, 1787 struct buffer_head *src, int n_src) 1788{ 1789 1790 RFALSE(dest == NULL || src == NULL, 1791 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)", 1792 src, dest); 1793 RFALSE(!B_IS_KEYS_LEVEL(dest), 1794 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf", 1795 dest); 1796 RFALSE(n_dest < 0 || n_src < 0, 1797 "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest); 1798 RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src), 1799 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big", 1800 n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest)); 1801 1802 if (B_IS_ITEMS_LEVEL(src)) 1803 /* source buffer contains leaf node */ 1804 memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PITEM_HEAD(src, n_src), 1805 KEY_SIZE); 1806 else 1807 memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PDELIM_KEY(src, n_src), 1808 KEY_SIZE); 1809 1810 do_balance_mark_internal_dirty(tb, dest, 0); 1811} 1812 1813int get_left_neighbor_position(struct tree_balance *tb, int h) 1814{ 1815 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1816 1817 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == 0 || tb->FL[h] == 0, 1818 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist", 1819 h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h)); 1820 1821 if (Sh_position == 0) 1822 return B_NR_ITEMS(tb->FL[h]); 1823 else 1824 return Sh_position - 1; 1825} 1826 1827int get_right_neighbor_position(struct tree_balance *tb, int h) 1828{ 1829 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1830 1831 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == 0 || tb->FR[h] == 0, 1832 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist", 1833 h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]); 1834 1835 if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h))) 1836 return 0; 1837 else 1838 return Sh_position + 1; 1839} 1840 1841#ifdef CONFIG_REISERFS_CHECK 1842 1843int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value); 1844static void check_internal_node(struct super_block *s, struct buffer_head *bh, 1845 char *mes) 1846{ 1847 struct disk_child *dc; 1848 int i; 1849 1850 RFALSE(!bh, "PAP-12336: bh == 0"); 1851 1852 if (!bh || !B_IS_IN_TREE(bh)) 1853 return; 1854 1855 RFALSE(!buffer_dirty(bh) && 1856 !(buffer_journaled(bh) || buffer_journal_dirty(bh)), 1857 "PAP-12337: buffer (%b) must be dirty", bh); 1858 dc = B_N_CHILD(bh, 0); 1859 1860 for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) { 1861 if (!is_reusable(s, dc_block_number(dc), 1)) { 1862 print_cur_tb(mes); 1863 reiserfs_panic(s, 1864 "PAP-12338: check_internal_node: invalid child pointer %y in %b", 1865 dc, bh); 1866 } 1867 } 1868} 1869 1870static int locked_or_not_in_tree(struct buffer_head *bh, char *which) 1871{ 1872 if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) || 1873 !B_IS_IN_TREE(bh)) { 1874 reiserfs_warning(NULL, 1875 "vs-12339: locked_or_not_in_tree: %s (%b)", 1876 which, bh); 1877 return 1; 1878 } 1879 return 0; 1880} 1881 1882static int check_before_balancing(struct tree_balance *tb) 1883{ 1884 int retval = 0; 1885 1886 if (cur_tb) { 1887 reiserfs_panic(tb->tb_sb, "vs-12335: check_before_balancing: " 1888 "suspect that schedule occurred based on cur_tb not being null at this point in code. " 1889 "do_balance cannot properly handle schedule occurring while it runs."); 1890 } 1891 1892 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have 1893 prepped all of these for us). */ 1894 if (tb->lnum[0]) { 1895 retval |= locked_or_not_in_tree(tb->L[0], "L[0]"); 1896 retval |= locked_or_not_in_tree(tb->FL[0], "FL[0]"); 1897 retval |= locked_or_not_in_tree(tb->CFL[0], "CFL[0]"); 1898 check_leaf(tb->L[0]); 1899 } 1900 if (tb->rnum[0]) { 1901 retval |= locked_or_not_in_tree(tb->R[0], "R[0]"); 1902 retval |= locked_or_not_in_tree(tb->FR[0], "FR[0]"); 1903 retval |= locked_or_not_in_tree(tb->CFR[0], "CFR[0]"); 1904 check_leaf(tb->R[0]); 1905 } 1906 retval |= locked_or_not_in_tree(PATH_PLAST_BUFFER(tb->tb_path), "S[0]"); 1907 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1908 1909 return retval; 1910} 1911 1912static void check_after_balance_leaf(struct tree_balance *tb) 1913{ 1914 if (tb->lnum[0]) { 1915 if (B_FREE_SPACE(tb->L[0]) != 1916 MAX_CHILD_SIZE(tb->L[0]) - 1917 dc_size(B_N_CHILD 1918 (tb->FL[0], get_left_neighbor_position(tb, 0)))) { 1919 print_cur_tb("12221"); 1920 reiserfs_panic(tb->tb_sb, 1921 "PAP-12355: check_after_balance_leaf: shift to left was incorrect"); 1922 } 1923 } 1924 if (tb->rnum[0]) { 1925 if (B_FREE_SPACE(tb->R[0]) != 1926 MAX_CHILD_SIZE(tb->R[0]) - 1927 dc_size(B_N_CHILD 1928 (tb->FR[0], get_right_neighbor_position(tb, 0)))) { 1929 print_cur_tb("12222"); 1930 reiserfs_panic(tb->tb_sb, 1931 "PAP-12360: check_after_balance_leaf: shift to right was incorrect"); 1932 } 1933 } 1934 if (PATH_H_PBUFFER(tb->tb_path, 1) && 1935 (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) != 1936 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1937 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1938 PATH_H_POSITION(tb->tb_path, 1)))))) { 1939 int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)); 1940 int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1941 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1942 PATH_H_POSITION(tb->tb_path, 1943 1)))); 1944 print_cur_tb("12223"); 1945 reiserfs_warning(tb->tb_sb, 1946 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; " 1947 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d", 1948 left, 1949 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)), 1950 PATH_H_PBUFFER(tb->tb_path, 1), 1951 PATH_H_POSITION(tb->tb_path, 1), 1952 dc_size(B_N_CHILD 1953 (PATH_H_PBUFFER(tb->tb_path, 1), 1954 PATH_H_POSITION(tb->tb_path, 1))), 1955 right); 1956 reiserfs_panic(tb->tb_sb, 1957 "PAP-12365: check_after_balance_leaf: S is incorrect"); 1958 } 1959} 1960 1961static void check_leaf_level(struct tree_balance *tb) 1962{ 1963 check_leaf(tb->L[0]); 1964 check_leaf(tb->R[0]); 1965 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1966} 1967 1968static void check_internal_levels(struct tree_balance *tb) 1969{ 1970 int h; 1971 1972 /* check all internal nodes */ 1973 for (h = 1; tb->insert_size[h]; h++) { 1974 check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h), 1975 "BAD BUFFER ON PATH"); 1976 if (tb->lnum[h]) 1977 check_internal_node(tb->tb_sb, tb->L[h], "BAD L"); 1978 if (tb->rnum[h]) 1979 check_internal_node(tb->tb_sb, tb->R[h], "BAD R"); 1980 } 1981 1982} 1983 1984#endif 1985 1986/* Now we have all of the buffers that must be used in balancing of 1987 the tree. We rely on the assumption that schedule() will not occur 1988 while do_balance works. ( Only interrupt handlers are acceptable.) 1989 We balance the tree according to the analysis made before this, 1990 using buffers already obtained. For SMP support it will someday be 1991 necessary to add ordered locking of tb. */ 1992 1993/* Some interesting rules of balancing: 1994 1995 we delete a maximum of two nodes per level per balancing: we never 1996 delete R, when we delete two of three nodes L, S, R then we move 1997 them into R. 1998 1999 we only delete L if we are deleting two nodes, if we delete only 2000 one node we delete S 2001 2002 if we shift leaves then we shift as much as we can: this is a 2003 deliberate policy of extremism in node packing which results in 2004 higher average utilization after repeated random balance operations 2005 at the cost of more memory copies and more balancing as a result of 2006 small insertions to full nodes. 2007 2008 if we shift internal nodes we try to evenly balance the node 2009 utilization, with consequent less balancing at the cost of lower 2010 utilization. 2011 2012 one could argue that the policy for directories in leaves should be 2013 that of internal nodes, but we will wait until another day to 2014 evaluate this.... It would be nice to someday measure and prove 2015 these assumptions as to what is optimal.... 2016 2017*/ 2018 2019static inline void do_balance_starts(struct tree_balance *tb) 2020{ 2021 /* use print_cur_tb() to see initial state of struct 2022 tree_balance */ 2023 2024 /* store_print_tb (tb); */ 2025 2026 /* do not delete, just comment it out */ 2027/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, 2028 "check");*/ 2029 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 2030#ifdef CONFIG_REISERFS_CHECK 2031 cur_tb = tb; 2032#endif 2033} 2034 2035static inline void do_balance_completed(struct tree_balance *tb) 2036{ 2037 2038#ifdef CONFIG_REISERFS_CHECK 2039 check_leaf_level(tb); 2040 check_internal_levels(tb); 2041 cur_tb = NULL; 2042#endif 2043 2044 /* reiserfs_free_block is no longer schedule safe. So, we need to 2045 ** put the buffers we want freed on the thrown list during do_balance, 2046 ** and then free them now 2047 */ 2048 2049 REISERFS_SB(tb->tb_sb)->s_do_balance++; 2050 2051 /* release all nodes hold to perform the balancing */ 2052 unfix_nodes(tb); 2053 2054 free_thrown(tb); 2055} 2056 2057void do_balance(struct tree_balance *tb, /* tree_balance structure */ 2058 struct item_head *ih, /* item header of inserted item */ 2059 const char *body, /* body of inserted item or bytes to paste */ 2060 int flag) 2061{ /* i - insert, d - delete 2062 c - cut, p - paste 2063 2064 Cut means delete part of an item 2065 (includes removing an entry from a 2066 directory). 2067 2068 Delete means delete whole item. 2069 2070 Insert means add a new item into the 2071 tree. 2072 2073 Paste means to append to the end of an 2074 existing file or to insert a directory 2075 entry. */ 2076 int child_pos, /* position of a child node in its parent */ 2077 h; /* level of the tree being processed */ 2078 struct item_head insert_key[2]; /* in our processing of one level 2079 we sometimes determine what 2080 must be inserted into the next 2081 higher level. This insertion 2082 consists of a key or two keys 2083 and their corresponding 2084 pointers */ 2085 struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next 2086 level */ 2087 2088 tb->tb_mode = flag; 2089 tb->need_balance_dirty = 0; 2090 2091 if (FILESYSTEM_CHANGED_TB(tb)) { 2092 reiserfs_panic(tb->tb_sb, 2093 "clm-6000: do_balance, fs generation has changed\n"); 2094 } 2095 /* if we have no real work to do */ 2096 if (!tb->insert_size[0]) { 2097 reiserfs_warning(tb->tb_sb, 2098 "PAP-12350: do_balance: insert_size == 0, mode == %c", 2099 flag); 2100 unfix_nodes(tb); 2101 return; 2102 } 2103 2104 atomic_inc(&(fs_generation(tb->tb_sb))); 2105 do_balance_starts(tb); 2106 2107 /* balance leaf returns 0 except if combining L R and S into 2108 one node. see balance_internal() for explanation of this 2109 line of code. */ 2110 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) + 2111 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr); 2112 2113#ifdef CONFIG_REISERFS_CHECK 2114 check_after_balance_leaf(tb); 2115#endif 2116 2117 /* Balance internal level of the tree. */ 2118 for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++) 2119 child_pos = 2120 balance_internal(tb, h, child_pos, insert_key, insert_ptr); 2121 2122 do_balance_completed(tb); 2123 2124} 2125