1/* 2 * linux/fs/ext3/balloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 10 * Big-endian to little-endian byte-swapping/bitmaps by 11 * David S. Miller (davem@caip.rutgers.edu), 1995 12 */ 13 14#include <linux/time.h> 15#include <linux/capability.h> 16#include <linux/fs.h> 17#include <linux/slab.h> 18#include <linux/jbd.h> 19#include <linux/ext3_fs.h> 20#include <linux/ext3_jbd.h> 21#include <linux/quotaops.h> 22#include <linux/buffer_head.h> 23 24/* 25 * balloc.c contains the blocks allocation and deallocation routines 26 */ 27 28/* 29 * The free blocks are managed by bitmaps. A file system contains several 30 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 31 * block for inodes, N blocks for the inode table and data blocks. 32 * 33 * The file system contains group descriptors which are located after the 34 * super block. Each descriptor contains the number of the bitmap block and 35 * the free blocks count in the block. The descriptors are loaded in memory 36 * when a file system is mounted (see ext3_fill_super). 37 */ 38 39 40#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 41 42/** 43 * ext3_get_group_desc() -- load group descriptor from disk 44 * @sb: super block 45 * @block_group: given block group 46 * @bh: pointer to the buffer head to store the block 47 * group descriptor 48 */ 49struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, 50 unsigned int block_group, 51 struct buffer_head ** bh) 52{ 53 unsigned long group_desc; 54 unsigned long offset; 55 struct ext3_group_desc * desc; 56 struct ext3_sb_info *sbi = EXT3_SB(sb); 57 58 if (block_group >= sbi->s_groups_count) { 59 ext3_error (sb, "ext3_get_group_desc", 60 "block_group >= groups_count - " 61 "block_group = %d, groups_count = %lu", 62 block_group, sbi->s_groups_count); 63 64 return NULL; 65 } 66 smp_rmb(); 67 68 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); 69 offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); 70 if (!sbi->s_group_desc[group_desc]) { 71 ext3_error (sb, "ext3_get_group_desc", 72 "Group descriptor not loaded - " 73 "block_group = %d, group_desc = %lu, desc = %lu", 74 block_group, group_desc, offset); 75 return NULL; 76 } 77 78 desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; 79 if (bh) 80 *bh = sbi->s_group_desc[group_desc]; 81 return desc + offset; 82} 83 84static int ext3_valid_block_bitmap(struct super_block *sb, 85 struct ext3_group_desc *desc, 86 unsigned int block_group, 87 struct buffer_head *bh) 88{ 89 ext3_grpblk_t offset; 90 ext3_grpblk_t next_zero_bit; 91 ext3_fsblk_t bitmap_blk; 92 ext3_fsblk_t group_first_block; 93 94 group_first_block = ext3_group_first_block_no(sb, block_group); 95 96 /* check whether block bitmap block number is set */ 97 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); 98 offset = bitmap_blk - group_first_block; 99 if (!ext3_test_bit(offset, bh->b_data)) 100 /* bad block bitmap */ 101 goto err_out; 102 103 /* check whether the inode bitmap block number is set */ 104 bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap); 105 offset = bitmap_blk - group_first_block; 106 if (!ext3_test_bit(offset, bh->b_data)) 107 /* bad block bitmap */ 108 goto err_out; 109 110 /* check whether the inode table block number is set */ 111 bitmap_blk = le32_to_cpu(desc->bg_inode_table); 112 offset = bitmap_blk - group_first_block; 113 next_zero_bit = ext3_find_next_zero_bit(bh->b_data, 114 offset + EXT3_SB(sb)->s_itb_per_group, 115 offset); 116 if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group) 117 /* good bitmap for inode tables */ 118 return 1; 119 120err_out: 121 ext3_error(sb, __func__, 122 "Invalid block bitmap - " 123 "block_group = %d, block = %lu", 124 block_group, bitmap_blk); 125 return 0; 126} 127 128/** 129 * read_block_bitmap() 130 * @sb: super block 131 * @block_group: given block group 132 * 133 * Read the bitmap for a given block_group,and validate the 134 * bits for block/inode/inode tables are set in the bitmaps 135 * 136 * Return buffer_head on success or NULL in case of failure. 137 */ 138static struct buffer_head * 139read_block_bitmap(struct super_block *sb, unsigned int block_group) 140{ 141 struct ext3_group_desc * desc; 142 struct buffer_head * bh = NULL; 143 ext3_fsblk_t bitmap_blk; 144 145 desc = ext3_get_group_desc(sb, block_group, NULL); 146 if (!desc) 147 return NULL; 148 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); 149 bh = sb_getblk(sb, bitmap_blk); 150 if (unlikely(!bh)) { 151 ext3_error(sb, __func__, 152 "Cannot read block bitmap - " 153 "block_group = %d, block_bitmap = %u", 154 block_group, le32_to_cpu(desc->bg_block_bitmap)); 155 return NULL; 156 } 157 if (likely(bh_uptodate_or_lock(bh))) 158 return bh; 159 160 if (bh_submit_read(bh) < 0) { 161 brelse(bh); 162 ext3_error(sb, __func__, 163 "Cannot read block bitmap - " 164 "block_group = %d, block_bitmap = %u", 165 block_group, le32_to_cpu(desc->bg_block_bitmap)); 166 return NULL; 167 } 168 ext3_valid_block_bitmap(sb, desc, block_group, bh); 169 /* 170 * file system mounted not to panic on error, continue with corrupt 171 * bitmap 172 */ 173 return bh; 174} 175/* 176 * The reservation window structure operations 177 * -------------------------------------------- 178 * Operations include: 179 * dump, find, add, remove, is_empty, find_next_reservable_window, etc. 180 * 181 * We use a red-black tree to represent per-filesystem reservation 182 * windows. 183 * 184 */ 185 186/** 187 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map 188 * @rb_root: root of per-filesystem reservation rb tree 189 * @verbose: verbose mode 190 * @fn: function which wishes to dump the reservation map 191 * 192 * If verbose is turned on, it will print the whole block reservation 193 * windows(start, end). Otherwise, it will only print out the "bad" windows, 194 * those windows that overlap with their immediate neighbors. 195 */ 196static void __rsv_window_dump(struct rb_root *root, int verbose, 197 const char *fn) 198{ 199 struct rb_node *n; 200 struct ext3_reserve_window_node *rsv, *prev; 201 int bad; 202 203restart: 204 n = rb_first(root); 205 bad = 0; 206 prev = NULL; 207 208 printk("Block Allocation Reservation Windows Map (%s):\n", fn); 209 while (n) { 210 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); 211 if (verbose) 212 printk("reservation window 0x%p " 213 "start: %lu, end: %lu\n", 214 rsv, rsv->rsv_start, rsv->rsv_end); 215 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { 216 printk("Bad reservation %p (start >= end)\n", 217 rsv); 218 bad = 1; 219 } 220 if (prev && prev->rsv_end >= rsv->rsv_start) { 221 printk("Bad reservation %p (prev->end >= start)\n", 222 rsv); 223 bad = 1; 224 } 225 if (bad) { 226 if (!verbose) { 227 printk("Restarting reservation walk in verbose mode\n"); 228 verbose = 1; 229 goto restart; 230 } 231 } 232 n = rb_next(n); 233 prev = rsv; 234 } 235 printk("Window map complete.\n"); 236 BUG_ON(bad); 237} 238#define rsv_window_dump(root, verbose) \ 239 __rsv_window_dump((root), (verbose), __func__) 240 241/** 242 * goal_in_my_reservation() 243 * @rsv: inode's reservation window 244 * @grp_goal: given goal block relative to the allocation block group 245 * @group: the current allocation block group 246 * @sb: filesystem super block 247 * 248 * Test if the given goal block (group relative) is within the file's 249 * own block reservation window range. 250 * 251 * If the reservation window is outside the goal allocation group, return 0; 252 * grp_goal (given goal block) could be -1, which means no specific 253 * goal block. In this case, always return 1. 254 * If the goal block is within the reservation window, return 1; 255 * otherwise, return 0; 256 */ 257static int 258goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, 259 unsigned int group, struct super_block * sb) 260{ 261 ext3_fsblk_t group_first_block, group_last_block; 262 263 group_first_block = ext3_group_first_block_no(sb, group); 264 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); 265 266 if ((rsv->_rsv_start > group_last_block) || 267 (rsv->_rsv_end < group_first_block)) 268 return 0; 269 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start) 270 || (grp_goal + group_first_block > rsv->_rsv_end))) 271 return 0; 272 return 1; 273} 274 275/** 276 * search_reserve_window() 277 * @rb_root: root of reservation tree 278 * @goal: target allocation block 279 * 280 * Find the reserved window which includes the goal, or the previous one 281 * if the goal is not in any window. 282 * Returns NULL if there are no windows or if all windows start after the goal. 283 */ 284static struct ext3_reserve_window_node * 285search_reserve_window(struct rb_root *root, ext3_fsblk_t goal) 286{ 287 struct rb_node *n = root->rb_node; 288 struct ext3_reserve_window_node *rsv; 289 290 if (!n) 291 return NULL; 292 293 do { 294 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); 295 296 if (goal < rsv->rsv_start) 297 n = n->rb_left; 298 else if (goal > rsv->rsv_end) 299 n = n->rb_right; 300 else 301 return rsv; 302 } while (n); 303 /* 304 * We've fallen off the end of the tree: the goal wasn't inside 305 * any particular node. OK, the previous node must be to one 306 * side of the interval containing the goal. If it's the RHS, 307 * we need to back up one. 308 */ 309 if (rsv->rsv_start > goal) { 310 n = rb_prev(&rsv->rsv_node); 311 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); 312 } 313 return rsv; 314} 315 316/** 317 * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree. 318 * @sb: super block 319 * @rsv: reservation window to add 320 * 321 * Must be called with rsv_lock hold. 322 */ 323void ext3_rsv_window_add(struct super_block *sb, 324 struct ext3_reserve_window_node *rsv) 325{ 326 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; 327 struct rb_node *node = &rsv->rsv_node; 328 ext3_fsblk_t start = rsv->rsv_start; 329 330 struct rb_node ** p = &root->rb_node; 331 struct rb_node * parent = NULL; 332 struct ext3_reserve_window_node *this; 333 334 while (*p) 335 { 336 parent = *p; 337 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); 338 339 if (start < this->rsv_start) 340 p = &(*p)->rb_left; 341 else if (start > this->rsv_end) 342 p = &(*p)->rb_right; 343 else { 344 rsv_window_dump(root, 1); 345 BUG(); 346 } 347 } 348 349 rb_link_node(node, parent, p); 350 rb_insert_color(node, root); 351} 352 353/** 354 * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree 355 * @sb: super block 356 * @rsv: reservation window to remove 357 * 358 * Mark the block reservation window as not allocated, and unlink it 359 * from the filesystem reservation window rb tree. Must be called with 360 * rsv_lock hold. 361 */ 362static void rsv_window_remove(struct super_block *sb, 363 struct ext3_reserve_window_node *rsv) 364{ 365 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 366 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 367 rsv->rsv_alloc_hit = 0; 368 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); 369} 370 371/* 372 * rsv_is_empty() -- Check if the reservation window is allocated. 373 * @rsv: given reservation window to check 374 * 375 * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED. 376 */ 377static inline int rsv_is_empty(struct ext3_reserve_window *rsv) 378{ 379 /* a valid reservation end block could not be 0 */ 380 return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 381} 382 383/** 384 * ext3_init_block_alloc_info() 385 * @inode: file inode structure 386 * 387 * Allocate and initialize the reservation window structure, and 388 * link the window to the ext3 inode structure at last 389 * 390 * The reservation window structure is only dynamically allocated 391 * and linked to ext3 inode the first time the open file 392 * needs a new block. So, before every ext3_new_block(s) call, for 393 * regular files, we should check whether the reservation window 394 * structure exists or not. In the latter case, this function is called. 395 * Fail to do so will result in block reservation being turned off for that 396 * open file. 397 * 398 * This function is called from ext3_get_blocks_handle(), also called 399 * when setting the reservation window size through ioctl before the file 400 * is open for write (needs block allocation). 401 * 402 * Needs truncate_mutex protection prior to call this function. 403 */ 404void ext3_init_block_alloc_info(struct inode *inode) 405{ 406 struct ext3_inode_info *ei = EXT3_I(inode); 407 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; 408 struct super_block *sb = inode->i_sb; 409 410 block_i = kmalloc(sizeof(*block_i), GFP_NOFS); 411 if (block_i) { 412 struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node; 413 414 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 415 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; 416 417 /* 418 * if filesystem is mounted with NORESERVATION, the goal 419 * reservation window size is set to zero to indicate 420 * block reservation is off 421 */ 422 if (!test_opt(sb, RESERVATION)) 423 rsv->rsv_goal_size = 0; 424 else 425 rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS; 426 rsv->rsv_alloc_hit = 0; 427 block_i->last_alloc_logical_block = 0; 428 block_i->last_alloc_physical_block = 0; 429 } 430 ei->i_block_alloc_info = block_i; 431} 432 433/** 434 * ext3_discard_reservation() 435 * @inode: inode 436 * 437 * Discard(free) block reservation window on last file close, or truncate 438 * or at last iput(). 439 * 440 * It is being called in three cases: 441 * ext3_release_file(): last writer close the file 442 * ext3_clear_inode(): last iput(), when nobody link to this file. 443 * ext3_truncate(): when the block indirect map is about to change. 444 * 445 */ 446void ext3_discard_reservation(struct inode *inode) 447{ 448 struct ext3_inode_info *ei = EXT3_I(inode); 449 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; 450 struct ext3_reserve_window_node *rsv; 451 spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; 452 453 if (!block_i) 454 return; 455 456 rsv = &block_i->rsv_window_node; 457 if (!rsv_is_empty(&rsv->rsv_window)) { 458 spin_lock(rsv_lock); 459 if (!rsv_is_empty(&rsv->rsv_window)) 460 rsv_window_remove(inode->i_sb, rsv); 461 spin_unlock(rsv_lock); 462 } 463} 464 465/** 466 * ext3_free_blocks_sb() -- Free given blocks and update quota 467 * @handle: handle to this transaction 468 * @sb: super block 469 * @block: start physcial block to free 470 * @count: number of blocks to free 471 * @pdquot_freed_blocks: pointer to quota 472 */ 473void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, 474 ext3_fsblk_t block, unsigned long count, 475 unsigned long *pdquot_freed_blocks) 476{ 477 struct buffer_head *bitmap_bh = NULL; 478 struct buffer_head *gd_bh; 479 unsigned long block_group; 480 ext3_grpblk_t bit; 481 unsigned long i; 482 unsigned long overflow; 483 struct ext3_group_desc * desc; 484 struct ext3_super_block * es; 485 struct ext3_sb_info *sbi; 486 int err = 0, ret; 487 ext3_grpblk_t group_freed; 488 489 *pdquot_freed_blocks = 0; 490 sbi = EXT3_SB(sb); 491 es = sbi->s_es; 492 if (block < le32_to_cpu(es->s_first_data_block) || 493 block + count < block || 494 block + count > le32_to_cpu(es->s_blocks_count)) { 495 ext3_error (sb, "ext3_free_blocks", 496 "Freeing blocks not in datazone - " 497 "block = "E3FSBLK", count = %lu", block, count); 498 goto error_return; 499 } 500 501 ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); 502 503do_more: 504 overflow = 0; 505 block_group = (block - le32_to_cpu(es->s_first_data_block)) / 506 EXT3_BLOCKS_PER_GROUP(sb); 507 bit = (block - le32_to_cpu(es->s_first_data_block)) % 508 EXT3_BLOCKS_PER_GROUP(sb); 509 /* 510 * Check to see if we are freeing blocks across a group 511 * boundary. 512 */ 513 if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) { 514 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb); 515 count -= overflow; 516 } 517 brelse(bitmap_bh); 518 bitmap_bh = read_block_bitmap(sb, block_group); 519 if (!bitmap_bh) 520 goto error_return; 521 desc = ext3_get_group_desc (sb, block_group, &gd_bh); 522 if (!desc) 523 goto error_return; 524 525 if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) || 526 in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) || 527 in_range (block, le32_to_cpu(desc->bg_inode_table), 528 sbi->s_itb_per_group) || 529 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), 530 sbi->s_itb_per_group)) { 531 ext3_error (sb, "ext3_free_blocks", 532 "Freeing blocks in system zones - " 533 "Block = "E3FSBLK", count = %lu", 534 block, count); 535 goto error_return; 536 } 537 538 /* 539 * We are about to start releasing blocks in the bitmap, 540 * so we need undo access. 541 */ 542 /* @@@ check errors */ 543 BUFFER_TRACE(bitmap_bh, "getting undo access"); 544 err = ext3_journal_get_undo_access(handle, bitmap_bh); 545 if (err) 546 goto error_return; 547 548 /* 549 * We are about to modify some metadata. Call the journal APIs 550 * to unshare ->b_data if a currently-committing transaction is 551 * using it 552 */ 553 BUFFER_TRACE(gd_bh, "get_write_access"); 554 err = ext3_journal_get_write_access(handle, gd_bh); 555 if (err) 556 goto error_return; 557 558 jbd_lock_bh_state(bitmap_bh); 559 560 for (i = 0, group_freed = 0; i < count; i++) { 561 /* 562 * An HJ special. This is expensive... 563 */ 564#ifdef CONFIG_JBD_DEBUG 565 jbd_unlock_bh_state(bitmap_bh); 566 { 567 struct buffer_head *debug_bh; 568 debug_bh = sb_find_get_block(sb, block + i); 569 if (debug_bh) { 570 BUFFER_TRACE(debug_bh, "Deleted!"); 571 if (!bh2jh(bitmap_bh)->b_committed_data) 572 BUFFER_TRACE(debug_bh, 573 "No commited data in bitmap"); 574 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); 575 __brelse(debug_bh); 576 } 577 } 578 jbd_lock_bh_state(bitmap_bh); 579#endif 580 if (need_resched()) { 581 jbd_unlock_bh_state(bitmap_bh); 582 cond_resched(); 583 jbd_lock_bh_state(bitmap_bh); 584 } 585 /* @@@ This prevents newly-allocated data from being 586 * freed and then reallocated within the same 587 * transaction. 588 * 589 * Ideally we would want to allow that to happen, but to 590 * do so requires making journal_forget() capable of 591 * revoking the queued write of a data block, which 592 * implies blocking on the journal lock. *forget() 593 * cannot block due to truncate races. 594 * 595 * Eventually we can fix this by making journal_forget() 596 * return a status indicating whether or not it was able 597 * to revoke the buffer. On successful revoke, it is 598 * safe not to set the allocation bit in the committed 599 * bitmap, because we know that there is no outstanding 600 * activity on the buffer any more and so it is safe to 601 * reallocate it. 602 */ 603 BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); 604 J_ASSERT_BH(bitmap_bh, 605 bh2jh(bitmap_bh)->b_committed_data != NULL); 606 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, 607 bh2jh(bitmap_bh)->b_committed_data); 608 609 /* 610 * We clear the bit in the bitmap after setting the committed 611 * data bit, because this is the reverse order to that which 612 * the allocator uses. 613 */ 614 BUFFER_TRACE(bitmap_bh, "clear bit"); 615 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 616 bit + i, bitmap_bh->b_data)) { 617 jbd_unlock_bh_state(bitmap_bh); 618 ext3_error(sb, __func__, 619 "bit already cleared for block "E3FSBLK, 620 block + i); 621 jbd_lock_bh_state(bitmap_bh); 622 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 623 } else { 624 group_freed++; 625 } 626 } 627 jbd_unlock_bh_state(bitmap_bh); 628 629 spin_lock(sb_bgl_lock(sbi, block_group)); 630 le16_add_cpu(&desc->bg_free_blocks_count, group_freed); 631 spin_unlock(sb_bgl_lock(sbi, block_group)); 632 percpu_counter_add(&sbi->s_freeblocks_counter, count); 633 634 /* We dirtied the bitmap block */ 635 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 636 err = ext3_journal_dirty_metadata(handle, bitmap_bh); 637 638 /* And the group descriptor block */ 639 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 640 ret = ext3_journal_dirty_metadata(handle, gd_bh); 641 if (!err) err = ret; 642 *pdquot_freed_blocks += group_freed; 643 644 if (overflow && !err) { 645 block += count; 646 count = overflow; 647 goto do_more; 648 } 649 650error_return: 651 brelse(bitmap_bh); 652 ext3_std_error(sb, err); 653 return; 654} 655 656/** 657 * ext3_free_blocks() -- Free given blocks and update quota 658 * @handle: handle for this transaction 659 * @inode: inode 660 * @block: start physical block to free 661 * @count: number of blocks to count 662 */ 663void ext3_free_blocks(handle_t *handle, struct inode *inode, 664 ext3_fsblk_t block, unsigned long count) 665{ 666 struct super_block * sb; 667 unsigned long dquot_freed_blocks; 668 669 sb = inode->i_sb; 670 if (!sb) { 671 printk ("ext3_free_blocks: nonexistent device"); 672 return; 673 } 674 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); 675 if (dquot_freed_blocks) 676 dquot_free_block(inode, dquot_freed_blocks); 677 return; 678} 679 680/** 681 * ext3_test_allocatable() 682 * @nr: given allocation block group 683 * @bh: bufferhead contains the bitmap of the given block group 684 * 685 * For ext3 allocations, we must not reuse any blocks which are 686 * allocated in the bitmap buffer's "last committed data" copy. This 687 * prevents deletes from freeing up the page for reuse until we have 688 * committed the delete transaction. 689 * 690 * If we didn't do this, then deleting something and reallocating it as 691 * data would allow the old block to be overwritten before the 692 * transaction committed (because we force data to disk before commit). 693 * This would lead to corruption if we crashed between overwriting the 694 * data and committing the delete. 695 * 696 * @@@ We may want to make this allocation behaviour conditional on 697 * data-writes at some point, and disable it for metadata allocations or 698 * sync-data inodes. 699 */ 700static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh) 701{ 702 int ret; 703 struct journal_head *jh = bh2jh(bh); 704 705 if (ext3_test_bit(nr, bh->b_data)) 706 return 0; 707 708 jbd_lock_bh_state(bh); 709 if (!jh->b_committed_data) 710 ret = 1; 711 else 712 ret = !ext3_test_bit(nr, jh->b_committed_data); 713 jbd_unlock_bh_state(bh); 714 return ret; 715} 716 717/** 718 * bitmap_search_next_usable_block() 719 * @start: the starting block (group relative) of the search 720 * @bh: bufferhead contains the block group bitmap 721 * @maxblocks: the ending block (group relative) of the reservation 722 * 723 * The bitmap search --- search forward alternately through the actual 724 * bitmap on disk and the last-committed copy in journal, until we find a 725 * bit free in both bitmaps. 726 */ 727static ext3_grpblk_t 728bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, 729 ext3_grpblk_t maxblocks) 730{ 731 ext3_grpblk_t next; 732 struct journal_head *jh = bh2jh(bh); 733 734 while (start < maxblocks) { 735 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); 736 if (next >= maxblocks) 737 return -1; 738 if (ext3_test_allocatable(next, bh)) 739 return next; 740 jbd_lock_bh_state(bh); 741 if (jh->b_committed_data) 742 start = ext3_find_next_zero_bit(jh->b_committed_data, 743 maxblocks, next); 744 jbd_unlock_bh_state(bh); 745 } 746 return -1; 747} 748 749/** 750 * find_next_usable_block() 751 * @start: the starting block (group relative) to find next 752 * allocatable block in bitmap. 753 * @bh: bufferhead contains the block group bitmap 754 * @maxblocks: the ending block (group relative) for the search 755 * 756 * Find an allocatable block in a bitmap. We honor both the bitmap and 757 * its last-committed copy (if that exists), and perform the "most 758 * appropriate allocation" algorithm of looking for a free block near 759 * the initial goal; then for a free byte somewhere in the bitmap; then 760 * for any free bit in the bitmap. 761 */ 762static ext3_grpblk_t 763find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, 764 ext3_grpblk_t maxblocks) 765{ 766 ext3_grpblk_t here, next; 767 char *p, *r; 768 769 if (start > 0) { 770 /* 771 * The goal was occupied; search forward for a free 772 * block within the next XX blocks. 773 * 774 * end_goal is more or less random, but it has to be 775 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the 776 * next 64-bit boundary is simple.. 777 */ 778 ext3_grpblk_t end_goal = (start + 63) & ~63; 779 if (end_goal > maxblocks) 780 end_goal = maxblocks; 781 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); 782 if (here < end_goal && ext3_test_allocatable(here, bh)) 783 return here; 784 ext3_debug("Bit not found near goal\n"); 785 } 786 787 here = start; 788 if (here < 0) 789 here = 0; 790 791 p = ((char *)bh->b_data) + (here >> 3); 792 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); 793 next = (r - ((char *)bh->b_data)) << 3; 794 795 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) 796 return next; 797 798 /* 799 * The bitmap search --- search forward alternately through the actual 800 * bitmap and the last-committed copy until we find a bit free in 801 * both 802 */ 803 here = bitmap_search_next_usable_block(here, bh, maxblocks); 804 return here; 805} 806 807/** 808 * claim_block() 809 * @block: the free block (group relative) to allocate 810 * @bh: the bufferhead containts the block group bitmap 811 * 812 * We think we can allocate this block in this bitmap. Try to set the bit. 813 * If that succeeds then check that nobody has allocated and then freed the 814 * block since we saw that is was not marked in b_committed_data. If it _was_ 815 * allocated and freed then clear the bit in the bitmap again and return 816 * zero (failure). 817 */ 818static inline int 819claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh) 820{ 821 struct journal_head *jh = bh2jh(bh); 822 int ret; 823 824 if (ext3_set_bit_atomic(lock, block, bh->b_data)) 825 return 0; 826 jbd_lock_bh_state(bh); 827 if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) { 828 ext3_clear_bit_atomic(lock, block, bh->b_data); 829 ret = 0; 830 } else { 831 ret = 1; 832 } 833 jbd_unlock_bh_state(bh); 834 return ret; 835} 836 837/** 838 * ext3_try_to_allocate() 839 * @sb: superblock 840 * @handle: handle to this transaction 841 * @group: given allocation block group 842 * @bitmap_bh: bufferhead holds the block bitmap 843 * @grp_goal: given target block within the group 844 * @count: target number of blocks to allocate 845 * @my_rsv: reservation window 846 * 847 * Attempt to allocate blocks within a give range. Set the range of allocation 848 * first, then find the first free bit(s) from the bitmap (within the range), 849 * and at last, allocate the blocks by claiming the found free bit as allocated. 850 * 851 * To set the range of this allocation: 852 * if there is a reservation window, only try to allocate block(s) from the 853 * file's own reservation window; 854 * Otherwise, the allocation range starts from the give goal block, ends at 855 * the block group's last block. 856 * 857 * If we failed to allocate the desired block then we may end up crossing to a 858 * new bitmap. In that case we must release write access to the old one via 859 * ext3_journal_release_buffer(), else we'll run out of credits. 860 */ 861static ext3_grpblk_t 862ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, 863 struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal, 864 unsigned long *count, struct ext3_reserve_window *my_rsv) 865{ 866 ext3_fsblk_t group_first_block; 867 ext3_grpblk_t start, end; 868 unsigned long num = 0; 869 870 /* we do allocation within the reservation window if we have a window */ 871 if (my_rsv) { 872 group_first_block = ext3_group_first_block_no(sb, group); 873 if (my_rsv->_rsv_start >= group_first_block) 874 start = my_rsv->_rsv_start - group_first_block; 875 else 876 /* reservation window cross group boundary */ 877 start = 0; 878 end = my_rsv->_rsv_end - group_first_block + 1; 879 if (end > EXT3_BLOCKS_PER_GROUP(sb)) 880 /* reservation window crosses group boundary */ 881 end = EXT3_BLOCKS_PER_GROUP(sb); 882 if ((start <= grp_goal) && (grp_goal < end)) 883 start = grp_goal; 884 else 885 grp_goal = -1; 886 } else { 887 if (grp_goal > 0) 888 start = grp_goal; 889 else 890 start = 0; 891 end = EXT3_BLOCKS_PER_GROUP(sb); 892 } 893 894 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); 895 896repeat: 897 if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) { 898 grp_goal = find_next_usable_block(start, bitmap_bh, end); 899 if (grp_goal < 0) 900 goto fail_access; 901 if (!my_rsv) { 902 int i; 903 904 for (i = 0; i < 7 && grp_goal > start && 905 ext3_test_allocatable(grp_goal - 1, 906 bitmap_bh); 907 i++, grp_goal--) 908 ; 909 } 910 } 911 start = grp_goal; 912 913 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), 914 grp_goal, bitmap_bh)) { 915 /* 916 * The block was allocated by another thread, or it was 917 * allocated and then freed by another thread 918 */ 919 start++; 920 grp_goal++; 921 if (start >= end) 922 goto fail_access; 923 goto repeat; 924 } 925 num++; 926 grp_goal++; 927 while (num < *count && grp_goal < end 928 && ext3_test_allocatable(grp_goal, bitmap_bh) 929 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), 930 grp_goal, bitmap_bh)) { 931 num++; 932 grp_goal++; 933 } 934 *count = num; 935 return grp_goal - num; 936fail_access: 937 *count = num; 938 return -1; 939} 940 941/** 942 * find_next_reservable_window(): 943 * find a reservable space within the given range. 944 * It does not allocate the reservation window for now: 945 * alloc_new_reservation() will do the work later. 946 * 947 * @search_head: the head of the searching list; 948 * This is not necessarily the list head of the whole filesystem 949 * 950 * We have both head and start_block to assist the search 951 * for the reservable space. The list starts from head, 952 * but we will shift to the place where start_block is, 953 * then start from there, when looking for a reservable space. 954 * 955 * @size: the target new reservation window size 956 * 957 * @group_first_block: the first block we consider to start 958 * the real search from 959 * 960 * @last_block: 961 * the maximum block number that our goal reservable space 962 * could start from. This is normally the last block in this 963 * group. The search will end when we found the start of next 964 * possible reservable space is out of this boundary. 965 * This could handle the cross boundary reservation window 966 * request. 967 * 968 * basically we search from the given range, rather than the whole 969 * reservation double linked list, (start_block, last_block) 970 * to find a free region that is of my size and has not 971 * been reserved. 972 * 973 */ 974static int find_next_reservable_window( 975 struct ext3_reserve_window_node *search_head, 976 struct ext3_reserve_window_node *my_rsv, 977 struct super_block * sb, 978 ext3_fsblk_t start_block, 979 ext3_fsblk_t last_block) 980{ 981 struct rb_node *next; 982 struct ext3_reserve_window_node *rsv, *prev; 983 ext3_fsblk_t cur; 984 int size = my_rsv->rsv_goal_size; 985 986 /* TODO: make the start of the reservation window byte-aligned */ 987 /* cur = *start_block & ~7;*/ 988 cur = start_block; 989 rsv = search_head; 990 if (!rsv) 991 return -1; 992 993 while (1) { 994 if (cur <= rsv->rsv_end) 995 cur = rsv->rsv_end + 1; 996 997 /* TODO? 998 * in the case we could not find a reservable space 999 * that is what is expected, during the re-search, we could 1000 * remember what's the largest reservable space we could have 1001 * and return that one. 1002 * 1003 * For now it will fail if we could not find the reservable 1004 * space with expected-size (or more)... 1005 */ 1006 if (cur > last_block) 1007 return -1; /* fail */ 1008 1009 prev = rsv; 1010 next = rb_next(&rsv->rsv_node); 1011 rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node); 1012 1013 /* 1014 * Reached the last reservation, we can just append to the 1015 * previous one. 1016 */ 1017 if (!next) 1018 break; 1019 1020 if (cur + size <= rsv->rsv_start) { 1021 /* 1022 * Found a reserveable space big enough. We could 1023 * have a reservation across the group boundary here 1024 */ 1025 break; 1026 } 1027 } 1028 /* 1029 * we come here either : 1030 * when we reach the end of the whole list, 1031 * and there is empty reservable space after last entry in the list. 1032 * append it to the end of the list. 1033 * 1034 * or we found one reservable space in the middle of the list, 1035 * return the reservation window that we could append to. 1036 * succeed. 1037 */ 1038 1039 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window))) 1040 rsv_window_remove(sb, my_rsv); 1041 1042 /* 1043 * Let's book the whole avaliable window for now. We will check the 1044 * disk bitmap later and then, if there are free blocks then we adjust 1045 * the window size if it's larger than requested. 1046 * Otherwise, we will remove this node from the tree next time 1047 * call find_next_reservable_window. 1048 */ 1049 my_rsv->rsv_start = cur; 1050 my_rsv->rsv_end = cur + size - 1; 1051 my_rsv->rsv_alloc_hit = 0; 1052 1053 if (prev != my_rsv) 1054 ext3_rsv_window_add(sb, my_rsv); 1055 1056 return 0; 1057} 1058 1059/** 1060 * alloc_new_reservation()--allocate a new reservation window 1061 * 1062 * To make a new reservation, we search part of the filesystem 1063 * reservation list (the list that inside the group). We try to 1064 * allocate a new reservation window near the allocation goal, 1065 * or the beginning of the group, if there is no goal. 1066 * 1067 * We first find a reservable space after the goal, then from 1068 * there, we check the bitmap for the first free block after 1069 * it. If there is no free block until the end of group, then the 1070 * whole group is full, we failed. Otherwise, check if the free 1071 * block is inside the expected reservable space, if so, we 1072 * succeed. 1073 * If the first free block is outside the reservable space, then 1074 * start from the first free block, we search for next available 1075 * space, and go on. 1076 * 1077 * on succeed, a new reservation will be found and inserted into the list 1078 * It contains at least one free block, and it does not overlap with other 1079 * reservation windows. 1080 * 1081 * failed: we failed to find a reservation window in this group 1082 * 1083 * @rsv: the reservation 1084 * 1085 * @grp_goal: The goal (group-relative). It is where the search for a 1086 * free reservable space should start from. 1087 * if we have a grp_goal(grp_goal >0 ), then start from there, 1088 * no grp_goal(grp_goal = -1), we start from the first block 1089 * of the group. 1090 * 1091 * @sb: the super block 1092 * @group: the group we are trying to allocate in 1093 * @bitmap_bh: the block group block bitmap 1094 * 1095 */ 1096static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, 1097 ext3_grpblk_t grp_goal, struct super_block *sb, 1098 unsigned int group, struct buffer_head *bitmap_bh) 1099{ 1100 struct ext3_reserve_window_node *search_head; 1101 ext3_fsblk_t group_first_block, group_end_block, start_block; 1102 ext3_grpblk_t first_free_block; 1103 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; 1104 unsigned long size; 1105 int ret; 1106 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; 1107 1108 group_first_block = ext3_group_first_block_no(sb, group); 1109 group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); 1110 1111 if (grp_goal < 0) 1112 start_block = group_first_block; 1113 else 1114 start_block = grp_goal + group_first_block; 1115 1116 size = my_rsv->rsv_goal_size; 1117 1118 if (!rsv_is_empty(&my_rsv->rsv_window)) { 1119 /* 1120 * if the old reservation is cross group boundary 1121 * and if the goal is inside the old reservation window, 1122 * we will come here when we just failed to allocate from 1123 * the first part of the window. We still have another part 1124 * that belongs to the next group. In this case, there is no 1125 * point to discard our window and try to allocate a new one 1126 * in this group(which will fail). we should 1127 * keep the reservation window, just simply move on. 1128 * 1129 * Maybe we could shift the start block of the reservation 1130 * window to the first block of next group. 1131 */ 1132 1133 if ((my_rsv->rsv_start <= group_end_block) && 1134 (my_rsv->rsv_end > group_end_block) && 1135 (start_block >= my_rsv->rsv_start)) 1136 return -1; 1137 1138 if ((my_rsv->rsv_alloc_hit > 1139 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { 1140 /* 1141 * if the previously allocation hit ratio is 1142 * greater than 1/2, then we double the size of 1143 * the reservation window the next time, 1144 * otherwise we keep the same size window 1145 */ 1146 size = size * 2; 1147 if (size > EXT3_MAX_RESERVE_BLOCKS) 1148 size = EXT3_MAX_RESERVE_BLOCKS; 1149 my_rsv->rsv_goal_size= size; 1150 } 1151 } 1152 1153 spin_lock(rsv_lock); 1154 /* 1155 * shift the search start to the window near the goal block 1156 */ 1157 search_head = search_reserve_window(fs_rsv_root, start_block); 1158 1159 /* 1160 * find_next_reservable_window() simply finds a reservable window 1161 * inside the given range(start_block, group_end_block). 1162 * 1163 * To make sure the reservation window has a free bit inside it, we 1164 * need to check the bitmap after we found a reservable window. 1165 */ 1166retry: 1167 ret = find_next_reservable_window(search_head, my_rsv, sb, 1168 start_block, group_end_block); 1169 1170 if (ret == -1) { 1171 if (!rsv_is_empty(&my_rsv->rsv_window)) 1172 rsv_window_remove(sb, my_rsv); 1173 spin_unlock(rsv_lock); 1174 return -1; 1175 } 1176 1177 /* 1178 * On success, find_next_reservable_window() returns the 1179 * reservation window where there is a reservable space after it. 1180 * Before we reserve this reservable space, we need 1181 * to make sure there is at least a free block inside this region. 1182 * 1183 * searching the first free bit on the block bitmap and copy of 1184 * last committed bitmap alternatively, until we found a allocatable 1185 * block. Search start from the start block of the reservable space 1186 * we just found. 1187 */ 1188 spin_unlock(rsv_lock); 1189 first_free_block = bitmap_search_next_usable_block( 1190 my_rsv->rsv_start - group_first_block, 1191 bitmap_bh, group_end_block - group_first_block + 1); 1192 1193 if (first_free_block < 0) { 1194 /* 1195 * no free block left on the bitmap, no point 1196 * to reserve the space. return failed. 1197 */ 1198 spin_lock(rsv_lock); 1199 if (!rsv_is_empty(&my_rsv->rsv_window)) 1200 rsv_window_remove(sb, my_rsv); 1201 spin_unlock(rsv_lock); 1202 return -1; /* failed */ 1203 } 1204 1205 start_block = first_free_block + group_first_block; 1206 /* 1207 * check if the first free block is within the 1208 * free space we just reserved 1209 */ 1210 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) 1211 return 0; /* success */ 1212 /* 1213 * if the first free bit we found is out of the reservable space 1214 * continue search for next reservable space, 1215 * start from where the free block is, 1216 * we also shift the list head to where we stopped last time 1217 */ 1218 search_head = my_rsv; 1219 spin_lock(rsv_lock); 1220 goto retry; 1221} 1222 1223/** 1224 * try_to_extend_reservation() 1225 * @my_rsv: given reservation window 1226 * @sb: super block 1227 * @size: the delta to extend 1228 * 1229 * Attempt to expand the reservation window large enough to have 1230 * required number of free blocks 1231 * 1232 * Since ext3_try_to_allocate() will always allocate blocks within 1233 * the reservation window range, if the window size is too small, 1234 * multiple blocks allocation has to stop at the end of the reservation 1235 * window. To make this more efficient, given the total number of 1236 * blocks needed and the current size of the window, we try to 1237 * expand the reservation window size if necessary on a best-effort 1238 * basis before ext3_new_blocks() tries to allocate blocks, 1239 */ 1240static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, 1241 struct super_block *sb, int size) 1242{ 1243 struct ext3_reserve_window_node *next_rsv; 1244 struct rb_node *next; 1245 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; 1246 1247 if (!spin_trylock(rsv_lock)) 1248 return; 1249 1250 next = rb_next(&my_rsv->rsv_node); 1251 1252 if (!next) 1253 my_rsv->rsv_end += size; 1254 else { 1255 next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node); 1256 1257 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) 1258 my_rsv->rsv_end += size; 1259 else 1260 my_rsv->rsv_end = next_rsv->rsv_start - 1; 1261 } 1262 spin_unlock(rsv_lock); 1263} 1264 1265/** 1266 * ext3_try_to_allocate_with_rsv() 1267 * @sb: superblock 1268 * @handle: handle to this transaction 1269 * @group: given allocation block group 1270 * @bitmap_bh: bufferhead holds the block bitmap 1271 * @grp_goal: given target block within the group 1272 * @count: target number of blocks to allocate 1273 * @my_rsv: reservation window 1274 * @errp: pointer to store the error code 1275 * 1276 * This is the main function used to allocate a new block and its reservation 1277 * window. 1278 * 1279 * Each time when a new block allocation is need, first try to allocate from 1280 * its own reservation. If it does not have a reservation window, instead of 1281 * looking for a free bit on bitmap first, then look up the reservation list to 1282 * see if it is inside somebody else's reservation window, we try to allocate a 1283 * reservation window for it starting from the goal first. Then do the block 1284 * allocation within the reservation window. 1285 * 1286 * This will avoid keeping on searching the reservation list again and 1287 * again when somebody is looking for a free block (without 1288 * reservation), and there are lots of free blocks, but they are all 1289 * being reserved. 1290 * 1291 * We use a red-black tree for the per-filesystem reservation list. 1292 * 1293 */ 1294static ext3_grpblk_t 1295ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, 1296 unsigned int group, struct buffer_head *bitmap_bh, 1297 ext3_grpblk_t grp_goal, 1298 struct ext3_reserve_window_node * my_rsv, 1299 unsigned long *count, int *errp) 1300{ 1301 ext3_fsblk_t group_first_block, group_last_block; 1302 ext3_grpblk_t ret = 0; 1303 int fatal; 1304 unsigned long num = *count; 1305 1306 *errp = 0; 1307 1308 /* 1309 * Make sure we use undo access for the bitmap, because it is critical 1310 * that we do the frozen_data COW on bitmap buffers in all cases even 1311 * if the buffer is in BJ_Forget state in the committing transaction. 1312 */ 1313 BUFFER_TRACE(bitmap_bh, "get undo access for new block"); 1314 fatal = ext3_journal_get_undo_access(handle, bitmap_bh); 1315 if (fatal) { 1316 *errp = fatal; 1317 return -1; 1318 } 1319 1320 /* 1321 * we don't deal with reservation when 1322 * filesystem is mounted without reservation 1323 * or the file is not a regular file 1324 * or last attempt to allocate a block with reservation turned on failed 1325 */ 1326 if (my_rsv == NULL ) { 1327 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, 1328 grp_goal, count, NULL); 1329 goto out; 1330 } 1331 /* 1332 * grp_goal is a group relative block number (if there is a goal) 1333 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb) 1334 * first block is a filesystem wide block number 1335 * first block is the block number of the first block in this group 1336 */ 1337 group_first_block = ext3_group_first_block_no(sb, group); 1338 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); 1339 1340 /* 1341 * Basically we will allocate a new block from inode's reservation 1342 * window. 1343 * 1344 * We need to allocate a new reservation window, if: 1345 * a) inode does not have a reservation window; or 1346 * b) last attempt to allocate a block from existing reservation 1347 * failed; or 1348 * c) we come here with a goal and with a reservation window 1349 * 1350 * We do not need to allocate a new reservation window if we come here 1351 * at the beginning with a goal and the goal is inside the window, or 1352 * we don't have a goal but already have a reservation window. 1353 * then we could go to allocate from the reservation window directly. 1354 */ 1355 while (1) { 1356 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || 1357 !goal_in_my_reservation(&my_rsv->rsv_window, 1358 grp_goal, group, sb)) { 1359 if (my_rsv->rsv_goal_size < *count) 1360 my_rsv->rsv_goal_size = *count; 1361 ret = alloc_new_reservation(my_rsv, grp_goal, sb, 1362 group, bitmap_bh); 1363 if (ret < 0) 1364 break; /* failed */ 1365 1366 if (!goal_in_my_reservation(&my_rsv->rsv_window, 1367 grp_goal, group, sb)) 1368 grp_goal = -1; 1369 } else if (grp_goal >= 0) { 1370 int curr = my_rsv->rsv_end - 1371 (grp_goal + group_first_block) + 1; 1372 1373 if (curr < *count) 1374 try_to_extend_reservation(my_rsv, sb, 1375 *count - curr); 1376 } 1377 1378 if ((my_rsv->rsv_start > group_last_block) || 1379 (my_rsv->rsv_end < group_first_block)) { 1380 rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1); 1381 BUG(); 1382 } 1383 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, 1384 grp_goal, &num, &my_rsv->rsv_window); 1385 if (ret >= 0) { 1386 my_rsv->rsv_alloc_hit += num; 1387 *count = num; 1388 break; /* succeed */ 1389 } 1390 num = *count; 1391 } 1392out: 1393 if (ret >= 0) { 1394 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " 1395 "bitmap block"); 1396 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh); 1397 if (fatal) { 1398 *errp = fatal; 1399 return -1; 1400 } 1401 return ret; 1402 } 1403 1404 BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); 1405 ext3_journal_release_buffer(handle, bitmap_bh); 1406 return ret; 1407} 1408 1409/** 1410 * ext3_has_free_blocks() 1411 * @sbi: in-core super block structure. 1412 * 1413 * Check if filesystem has at least 1 free block available for allocation. 1414 */ 1415static int ext3_has_free_blocks(struct ext3_sb_info *sbi) 1416{ 1417 ext3_fsblk_t free_blocks, root_blocks; 1418 1419 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 1420 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); 1421 if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && 1422 sbi->s_resuid != current_fsuid() && 1423 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { 1424 return 0; 1425 } 1426 return 1; 1427} 1428 1429/** 1430 * ext3_should_retry_alloc() 1431 * @sb: super block 1432 * @retries number of attemps has been made 1433 * 1434 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if 1435 * it is profitable to retry the operation, this function will wait 1436 * for the current or commiting transaction to complete, and then 1437 * return TRUE. 1438 * 1439 * if the total number of retries exceed three times, return FALSE. 1440 */ 1441int ext3_should_retry_alloc(struct super_block *sb, int *retries) 1442{ 1443 if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) 1444 return 0; 1445 1446 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 1447 1448 return journal_force_commit_nested(EXT3_SB(sb)->s_journal); 1449} 1450 1451/** 1452 * ext3_new_blocks() -- core block(s) allocation function 1453 * @handle: handle to this transaction 1454 * @inode: file inode 1455 * @goal: given target block(filesystem wide) 1456 * @count: target number of blocks to allocate 1457 * @errp: error code 1458 * 1459 * ext3_new_blocks uses a goal block to assist allocation. It tries to 1460 * allocate block(s) from the block group contains the goal block first. If that 1461 * fails, it will try to allocate block(s) from other block groups without 1462 * any specific goal block. 1463 * 1464 */ 1465ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, 1466 ext3_fsblk_t goal, unsigned long *count, int *errp) 1467{ 1468 struct buffer_head *bitmap_bh = NULL; 1469 struct buffer_head *gdp_bh; 1470 int group_no; 1471 int goal_group; 1472 ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */ 1473 ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/ 1474 ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */ 1475 int bgi; /* blockgroup iteration index */ 1476 int fatal = 0, err; 1477 int performed_allocation = 0; 1478 ext3_grpblk_t free_blocks; /* number of free blocks in a group */ 1479 struct super_block *sb; 1480 struct ext3_group_desc *gdp; 1481 struct ext3_super_block *es; 1482 struct ext3_sb_info *sbi; 1483 struct ext3_reserve_window_node *my_rsv = NULL; 1484 struct ext3_block_alloc_info *block_i; 1485 unsigned short windowsz = 0; 1486#ifdef EXT3FS_DEBUG 1487 static int goal_hits, goal_attempts; 1488#endif 1489 unsigned long ngroups; 1490 unsigned long num = *count; 1491 1492 *errp = -ENOSPC; 1493 sb = inode->i_sb; 1494 if (!sb) { 1495 printk("ext3_new_block: nonexistent device"); 1496 return 0; 1497 } 1498 1499 /* 1500 * Check quota for allocation of this block. 1501 */ 1502 err = dquot_alloc_block(inode, num); 1503 if (err) { 1504 *errp = err; 1505 return 0; 1506 } 1507 1508 sbi = EXT3_SB(sb); 1509 es = EXT3_SB(sb)->s_es; 1510 ext3_debug("goal=%lu.\n", goal); 1511 /* 1512 * Allocate a block from reservation only when 1513 * filesystem is mounted with reservation(default,-o reservation), and 1514 * it's a regular file, and 1515 * the desired window size is greater than 0 (One could use ioctl 1516 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off 1517 * reservation on that particular file) 1518 */ 1519 block_i = EXT3_I(inode)->i_block_alloc_info; 1520 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) 1521 my_rsv = &block_i->rsv_window_node; 1522 1523 if (!ext3_has_free_blocks(sbi)) { 1524 *errp = -ENOSPC; 1525 goto out; 1526 } 1527 1528 /* 1529 * First, test whether the goal block is free. 1530 */ 1531 if (goal < le32_to_cpu(es->s_first_data_block) || 1532 goal >= le32_to_cpu(es->s_blocks_count)) 1533 goal = le32_to_cpu(es->s_first_data_block); 1534 group_no = (goal - le32_to_cpu(es->s_first_data_block)) / 1535 EXT3_BLOCKS_PER_GROUP(sb); 1536 goal_group = group_no; 1537retry_alloc: 1538 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); 1539 if (!gdp) 1540 goto io_error; 1541 1542 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1543 /* 1544 * if there is not enough free blocks to make a new resevation 1545 * turn off reservation for this allocation 1546 */ 1547 if (my_rsv && (free_blocks < windowsz) 1548 && (free_blocks > 0) 1549 && (rsv_is_empty(&my_rsv->rsv_window))) 1550 my_rsv = NULL; 1551 1552 if (free_blocks > 0) { 1553 grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) % 1554 EXT3_BLOCKS_PER_GROUP(sb)); 1555 bitmap_bh = read_block_bitmap(sb, group_no); 1556 if (!bitmap_bh) 1557 goto io_error; 1558 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle, 1559 group_no, bitmap_bh, grp_target_blk, 1560 my_rsv, &num, &fatal); 1561 if (fatal) 1562 goto out; 1563 if (grp_alloc_blk >= 0) 1564 goto allocated; 1565 } 1566 1567 ngroups = EXT3_SB(sb)->s_groups_count; 1568 smp_rmb(); 1569 1570 /* 1571 * Now search the rest of the groups. We assume that 1572 * group_no and gdp correctly point to the last group visited. 1573 */ 1574 for (bgi = 0; bgi < ngroups; bgi++) { 1575 group_no++; 1576 if (group_no >= ngroups) 1577 group_no = 0; 1578 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); 1579 if (!gdp) 1580 goto io_error; 1581 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1582 /* 1583 * skip this group (and avoid loading bitmap) if there 1584 * are no free blocks 1585 */ 1586 if (!free_blocks) 1587 continue; 1588 /* 1589 * skip this group if the number of 1590 * free blocks is less than half of the reservation 1591 * window size. 1592 */ 1593 if (my_rsv && (free_blocks <= (windowsz/2))) 1594 continue; 1595 1596 brelse(bitmap_bh); 1597 bitmap_bh = read_block_bitmap(sb, group_no); 1598 if (!bitmap_bh) 1599 goto io_error; 1600 /* 1601 * try to allocate block(s) from this group, without a goal(-1). 1602 */ 1603 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle, 1604 group_no, bitmap_bh, -1, my_rsv, 1605 &num, &fatal); 1606 if (fatal) 1607 goto out; 1608 if (grp_alloc_blk >= 0) 1609 goto allocated; 1610 } 1611 /* 1612 * We may end up a bogus ealier ENOSPC error due to 1613 * filesystem is "full" of reservations, but 1614 * there maybe indeed free blocks avaliable on disk 1615 * In this case, we just forget about the reservations 1616 * just do block allocation as without reservations. 1617 */ 1618 if (my_rsv) { 1619 my_rsv = NULL; 1620 windowsz = 0; 1621 group_no = goal_group; 1622 goto retry_alloc; 1623 } 1624 /* No space left on the device */ 1625 *errp = -ENOSPC; 1626 goto out; 1627 1628allocated: 1629 1630 ext3_debug("using block group %d(%d)\n", 1631 group_no, gdp->bg_free_blocks_count); 1632 1633 BUFFER_TRACE(gdp_bh, "get_write_access"); 1634 fatal = ext3_journal_get_write_access(handle, gdp_bh); 1635 if (fatal) 1636 goto out; 1637 1638 ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no); 1639 1640 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) || 1641 in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) || 1642 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), 1643 EXT3_SB(sb)->s_itb_per_group) || 1644 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), 1645 EXT3_SB(sb)->s_itb_per_group)) { 1646 ext3_error(sb, "ext3_new_block", 1647 "Allocating block in system zone - " 1648 "blocks from "E3FSBLK", length %lu", 1649 ret_block, num); 1650 /* 1651 * claim_block() marked the blocks we allocated as in use. So we 1652 * may want to selectively mark some of the blocks as free. 1653 */ 1654 goto retry_alloc; 1655 } 1656 1657 performed_allocation = 1; 1658 1659#ifdef CONFIG_JBD_DEBUG 1660 { 1661 struct buffer_head *debug_bh; 1662 1663 /* Record bitmap buffer state in the newly allocated block */ 1664 debug_bh = sb_find_get_block(sb, ret_block); 1665 if (debug_bh) { 1666 BUFFER_TRACE(debug_bh, "state when allocated"); 1667 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); 1668 brelse(debug_bh); 1669 } 1670 } 1671 jbd_lock_bh_state(bitmap_bh); 1672 spin_lock(sb_bgl_lock(sbi, group_no)); 1673 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { 1674 int i; 1675 1676 for (i = 0; i < num; i++) { 1677 if (ext3_test_bit(grp_alloc_blk+i, 1678 bh2jh(bitmap_bh)->b_committed_data)) { 1679 printk("%s: block was unexpectedly set in " 1680 "b_committed_data\n", __func__); 1681 } 1682 } 1683 } 1684 ext3_debug("found bit %d\n", grp_alloc_blk); 1685 spin_unlock(sb_bgl_lock(sbi, group_no)); 1686 jbd_unlock_bh_state(bitmap_bh); 1687#endif 1688 1689 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { 1690 ext3_error(sb, "ext3_new_block", 1691 "block("E3FSBLK") >= blocks count(%d) - " 1692 "block_group = %d, es == %p ", ret_block, 1693 le32_to_cpu(es->s_blocks_count), group_no, es); 1694 goto out; 1695 } 1696 1697 /* 1698 * It is up to the caller to add the new buffer to a journal 1699 * list of some description. We don't know in advance whether 1700 * the caller wants to use it as metadata or data. 1701 */ 1702 ext3_debug("allocating block %lu. Goal hits %d of %d.\n", 1703 ret_block, goal_hits, goal_attempts); 1704 1705 spin_lock(sb_bgl_lock(sbi, group_no)); 1706 le16_add_cpu(&gdp->bg_free_blocks_count, -num); 1707 spin_unlock(sb_bgl_lock(sbi, group_no)); 1708 percpu_counter_sub(&sbi->s_freeblocks_counter, num); 1709 1710 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); 1711 err = ext3_journal_dirty_metadata(handle, gdp_bh); 1712 if (!fatal) 1713 fatal = err; 1714 1715 if (fatal) 1716 goto out; 1717 1718 *errp = 0; 1719 brelse(bitmap_bh); 1720 dquot_free_block(inode, *count-num); 1721 *count = num; 1722 return ret_block; 1723 1724io_error: 1725 *errp = -EIO; 1726out: 1727 if (fatal) { 1728 *errp = fatal; 1729 ext3_std_error(sb, fatal); 1730 } 1731 /* 1732 * Undo the block allocation 1733 */ 1734 if (!performed_allocation) 1735 dquot_free_block(inode, *count); 1736 brelse(bitmap_bh); 1737 return 0; 1738} 1739 1740ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode, 1741 ext3_fsblk_t goal, int *errp) 1742{ 1743 unsigned long count = 1; 1744 1745 return ext3_new_blocks(handle, inode, goal, &count, errp); 1746} 1747 1748/** 1749 * ext3_count_free_blocks() -- count filesystem free blocks 1750 * @sb: superblock 1751 * 1752 * Adds up the number of free blocks from each block group. 1753 */ 1754ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb) 1755{ 1756 ext3_fsblk_t desc_count; 1757 struct ext3_group_desc *gdp; 1758 int i; 1759 unsigned long ngroups = EXT3_SB(sb)->s_groups_count; 1760#ifdef EXT3FS_DEBUG 1761 struct ext3_super_block *es; 1762 ext3_fsblk_t bitmap_count; 1763 unsigned long x; 1764 struct buffer_head *bitmap_bh = NULL; 1765 1766 es = EXT3_SB(sb)->s_es; 1767 desc_count = 0; 1768 bitmap_count = 0; 1769 gdp = NULL; 1770 1771 smp_rmb(); 1772 for (i = 0; i < ngroups; i++) { 1773 gdp = ext3_get_group_desc(sb, i, NULL); 1774 if (!gdp) 1775 continue; 1776 desc_count += le16_to_cpu(gdp->bg_free_blocks_count); 1777 brelse(bitmap_bh); 1778 bitmap_bh = read_block_bitmap(sb, i); 1779 if (bitmap_bh == NULL) 1780 continue; 1781 1782 x = ext3_count_free(bitmap_bh, sb->s_blocksize); 1783 printk("group %d: stored = %d, counted = %lu\n", 1784 i, le16_to_cpu(gdp->bg_free_blocks_count), x); 1785 bitmap_count += x; 1786 } 1787 brelse(bitmap_bh); 1788 printk("ext3_count_free_blocks: stored = "E3FSBLK 1789 ", computed = "E3FSBLK", "E3FSBLK"\n", 1790 le32_to_cpu(es->s_free_blocks_count), 1791 desc_count, bitmap_count); 1792 return bitmap_count; 1793#else 1794 desc_count = 0; 1795 smp_rmb(); 1796 for (i = 0; i < ngroups; i++) { 1797 gdp = ext3_get_group_desc(sb, i, NULL); 1798 if (!gdp) 1799 continue; 1800 desc_count += le16_to_cpu(gdp->bg_free_blocks_count); 1801 } 1802 1803 return desc_count; 1804#endif 1805} 1806 1807static inline int test_root(int a, int b) 1808{ 1809 int num = b; 1810 1811 while (a > num) 1812 num *= b; 1813 return num == a; 1814} 1815 1816static int ext3_group_sparse(int group) 1817{ 1818 if (group <= 1) 1819 return 1; 1820 if (!(group & 1)) 1821 return 0; 1822 return (test_root(group, 7) || test_root(group, 5) || 1823 test_root(group, 3)); 1824} 1825 1826/** 1827 * ext3_bg_has_super - number of blocks used by the superblock in group 1828 * @sb: superblock for filesystem 1829 * @group: group number to check 1830 * 1831 * Return the number of blocks used by the superblock (primary or backup) 1832 * in this group. Currently this will be only 0 or 1. 1833 */ 1834int ext3_bg_has_super(struct super_block *sb, int group) 1835{ 1836 if (EXT3_HAS_RO_COMPAT_FEATURE(sb, 1837 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && 1838 !ext3_group_sparse(group)) 1839 return 0; 1840 return 1; 1841} 1842 1843static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group) 1844{ 1845 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); 1846 unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb); 1847 unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1; 1848 1849 if (group == first || group == first + 1 || group == last) 1850 return 1; 1851 return 0; 1852} 1853 1854static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) 1855{ 1856 return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0; 1857} 1858 1859/** 1860 * ext3_bg_num_gdb - number of blocks used by the group table in group 1861 * @sb: superblock for filesystem 1862 * @group: group number to check 1863 * 1864 * Return the number of blocks used by the group descriptor table 1865 * (primary or backup) in this group. In the future there may be a 1866 * different number of descriptor blocks in each group. 1867 */ 1868unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) 1869{ 1870 unsigned long first_meta_bg = 1871 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg); 1872 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); 1873 1874 if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) || 1875 metagroup < first_meta_bg) 1876 return ext3_bg_num_gdb_nometa(sb,group); 1877 1878 return ext3_bg_num_gdb_meta(sb,group); 1879 1880} 1881