1/* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public Licens 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 19 20/* 21 * mballoc.c contains the multiblocks allocation routines 22 */ 23 24#include "mballoc.h" 25#include <linux/debugfs.h> 26#include <linux/slab.h> 27#include <trace/events/ext4.h> 28 29/* 30 * MUSTDO: 31 * - test ext4_ext_search_left() and ext4_ext_search_right() 32 * - search for metadata in few groups 33 * 34 * TODO v4: 35 * - normalization should take into account whether file is still open 36 * - discard preallocations if no free space left (policy?) 37 * - don't normalize tails 38 * - quota 39 * - reservation for superuser 40 * 41 * TODO v3: 42 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 43 * - track min/max extents in each group for better group selection 44 * - mb_mark_used() may allocate chunk right after splitting buddy 45 * - tree of groups sorted by number of free blocks 46 * - error handling 47 */ 48 49/* 50 * The allocation request involve request for multiple number of blocks 51 * near to the goal(block) value specified. 52 * 53 * During initialization phase of the allocator we decide to use the 54 * group preallocation or inode preallocation depending on the size of 55 * the file. The size of the file could be the resulting file size we 56 * would have after allocation, or the current file size, which ever 57 * is larger. If the size is less than sbi->s_mb_stream_request we 58 * select to use the group preallocation. The default value of 59 * s_mb_stream_request is 16 blocks. This can also be tuned via 60 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 61 * terms of number of blocks. 62 * 63 * The main motivation for having small file use group preallocation is to 64 * ensure that we have small files closer together on the disk. 65 * 66 * First stage the allocator looks at the inode prealloc list, 67 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 68 * spaces for this particular inode. The inode prealloc space is 69 * represented as: 70 * 71 * pa_lstart -> the logical start block for this prealloc space 72 * pa_pstart -> the physical start block for this prealloc space 73 * pa_len -> length for this prealloc space 74 * pa_free -> free space available in this prealloc space 75 * 76 * The inode preallocation space is used looking at the _logical_ start 77 * block. If only the logical file block falls within the range of prealloc 78 * space we will consume the particular prealloc space. This make sure that 79 * that the we have contiguous physical blocks representing the file blocks 80 * 81 * The important thing to be noted in case of inode prealloc space is that 82 * we don't modify the values associated to inode prealloc space except 83 * pa_free. 84 * 85 * If we are not able to find blocks in the inode prealloc space and if we 86 * have the group allocation flag set then we look at the locality group 87 * prealloc space. These are per CPU prealloc list repreasented as 88 * 89 * ext4_sb_info.s_locality_groups[smp_processor_id()] 90 * 91 * The reason for having a per cpu locality group is to reduce the contention 92 * between CPUs. It is possible to get scheduled at this point. 93 * 94 * The locality group prealloc space is used looking at whether we have 95 * enough free space (pa_free) withing the prealloc space. 96 * 97 * If we can't allocate blocks via inode prealloc or/and locality group 98 * prealloc then we look at the buddy cache. The buddy cache is represented 99 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 100 * mapped to the buddy and bitmap information regarding different 101 * groups. The buddy information is attached to buddy cache inode so that 102 * we can access them through the page cache. The information regarding 103 * each group is loaded via ext4_mb_load_buddy. The information involve 104 * block bitmap and buddy information. The information are stored in the 105 * inode as: 106 * 107 * { page } 108 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 109 * 110 * 111 * one block each for bitmap and buddy information. So for each group we 112 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 113 * blocksize) blocks. So it can have information regarding groups_per_page 114 * which is blocks_per_page/2 115 * 116 * The buddy cache inode is not stored on disk. The inode is thrown 117 * away when the filesystem is unmounted. 118 * 119 * We look for count number of blocks in the buddy cache. If we were able 120 * to locate that many free blocks we return with additional information 121 * regarding rest of the contiguous physical block available 122 * 123 * Before allocating blocks via buddy cache we normalize the request 124 * blocks. This ensure we ask for more blocks that we needed. The extra 125 * blocks that we get after allocation is added to the respective prealloc 126 * list. In case of inode preallocation we follow a list of heuristics 127 * based on file size. This can be found in ext4_mb_normalize_request. If 128 * we are doing a group prealloc we try to normalize the request to 129 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is 130 * 512 blocks. This can be tuned via 131 * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in 132 * terms of number of blocks. If we have mounted the file system with -O 133 * stripe=<value> option the group prealloc request is normalized to the 134 * stripe value (sbi->s_stripe) 135 * 136 * The regular allocator(using the buddy cache) supports few tunables. 137 * 138 * /sys/fs/ext4/<partition>/mb_min_to_scan 139 * /sys/fs/ext4/<partition>/mb_max_to_scan 140 * /sys/fs/ext4/<partition>/mb_order2_req 141 * 142 * The regular allocator uses buddy scan only if the request len is power of 143 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 144 * value of s_mb_order2_reqs can be tuned via 145 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 146 * stripe size (sbi->s_stripe), we try to search for contiguous block in 147 * stripe size. This should result in better allocation on RAID setups. If 148 * not, we search in the specific group using bitmap for best extents. The 149 * tunable min_to_scan and max_to_scan control the behaviour here. 150 * min_to_scan indicate how long the mballoc __must__ look for a best 151 * extent and max_to_scan indicates how long the mballoc __can__ look for a 152 * best extent in the found extents. Searching for the blocks starts with 153 * the group specified as the goal value in allocation context via 154 * ac_g_ex. Each group is first checked based on the criteria whether it 155 * can used for allocation. ext4_mb_good_group explains how the groups are 156 * checked. 157 * 158 * Both the prealloc space are getting populated as above. So for the first 159 * request we will hit the buddy cache which will result in this prealloc 160 * space getting filled. The prealloc space is then later used for the 161 * subsequent request. 162 */ 163 164/* 165 * mballoc operates on the following data: 166 * - on-disk bitmap 167 * - in-core buddy (actually includes buddy and bitmap) 168 * - preallocation descriptors (PAs) 169 * 170 * there are two types of preallocations: 171 * - inode 172 * assiged to specific inode and can be used for this inode only. 173 * it describes part of inode's space preallocated to specific 174 * physical blocks. any block from that preallocated can be used 175 * independent. the descriptor just tracks number of blocks left 176 * unused. so, before taking some block from descriptor, one must 177 * make sure corresponded logical block isn't allocated yet. this 178 * also means that freeing any block within descriptor's range 179 * must discard all preallocated blocks. 180 * - locality group 181 * assigned to specific locality group which does not translate to 182 * permanent set of inodes: inode can join and leave group. space 183 * from this type of preallocation can be used for any inode. thus 184 * it's consumed from the beginning to the end. 185 * 186 * relation between them can be expressed as: 187 * in-core buddy = on-disk bitmap + preallocation descriptors 188 * 189 * this mean blocks mballoc considers used are: 190 * - allocated blocks (persistent) 191 * - preallocated blocks (non-persistent) 192 * 193 * consistency in mballoc world means that at any time a block is either 194 * free or used in ALL structures. notice: "any time" should not be read 195 * literally -- time is discrete and delimited by locks. 196 * 197 * to keep it simple, we don't use block numbers, instead we count number of 198 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 199 * 200 * all operations can be expressed as: 201 * - init buddy: buddy = on-disk + PAs 202 * - new PA: buddy += N; PA = N 203 * - use inode PA: on-disk += N; PA -= N 204 * - discard inode PA buddy -= on-disk - PA; PA = 0 205 * - use locality group PA on-disk += N; PA -= N 206 * - discard locality group PA buddy -= PA; PA = 0 207 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 208 * is used in real operation because we can't know actual used 209 * bits from PA, only from on-disk bitmap 210 * 211 * if we follow this strict logic, then all operations above should be atomic. 212 * given some of them can block, we'd have to use something like semaphores 213 * killing performance on high-end SMP hardware. let's try to relax it using 214 * the following knowledge: 215 * 1) if buddy is referenced, it's already initialized 216 * 2) while block is used in buddy and the buddy is referenced, 217 * nobody can re-allocate that block 218 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 219 * bit set and PA claims same block, it's OK. IOW, one can set bit in 220 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 221 * block 222 * 223 * so, now we're building a concurrency table: 224 * - init buddy vs. 225 * - new PA 226 * blocks for PA are allocated in the buddy, buddy must be referenced 227 * until PA is linked to allocation group to avoid concurrent buddy init 228 * - use inode PA 229 * we need to make sure that either on-disk bitmap or PA has uptodate data 230 * given (3) we care that PA-=N operation doesn't interfere with init 231 * - discard inode PA 232 * the simplest way would be to have buddy initialized by the discard 233 * - use locality group PA 234 * again PA-=N must be serialized with init 235 * - discard locality group PA 236 * the simplest way would be to have buddy initialized by the discard 237 * - new PA vs. 238 * - use inode PA 239 * i_data_sem serializes them 240 * - discard inode PA 241 * discard process must wait until PA isn't used by another process 242 * - use locality group PA 243 * some mutex should serialize them 244 * - discard locality group PA 245 * discard process must wait until PA isn't used by another process 246 * - use inode PA 247 * - use inode PA 248 * i_data_sem or another mutex should serializes them 249 * - discard inode PA 250 * discard process must wait until PA isn't used by another process 251 * - use locality group PA 252 * nothing wrong here -- they're different PAs covering different blocks 253 * - discard locality group PA 254 * discard process must wait until PA isn't used by another process 255 * 256 * now we're ready to make few consequences: 257 * - PA is referenced and while it is no discard is possible 258 * - PA is referenced until block isn't marked in on-disk bitmap 259 * - PA changes only after on-disk bitmap 260 * - discard must not compete with init. either init is done before 261 * any discard or they're serialized somehow 262 * - buddy init as sum of on-disk bitmap and PAs is done atomically 263 * 264 * a special case when we've used PA to emptiness. no need to modify buddy 265 * in this case, but we should care about concurrent init 266 * 267 */ 268 269 /* 270 * Logic in few words: 271 * 272 * - allocation: 273 * load group 274 * find blocks 275 * mark bits in on-disk bitmap 276 * release group 277 * 278 * - use preallocation: 279 * find proper PA (per-inode or group) 280 * load group 281 * mark bits in on-disk bitmap 282 * release group 283 * release PA 284 * 285 * - free: 286 * load group 287 * mark bits in on-disk bitmap 288 * release group 289 * 290 * - discard preallocations in group: 291 * mark PAs deleted 292 * move them onto local list 293 * load on-disk bitmap 294 * load group 295 * remove PA from object (inode or locality group) 296 * mark free blocks in-core 297 * 298 * - discard inode's preallocations: 299 */ 300 301/* 302 * Locking rules 303 * 304 * Locks: 305 * - bitlock on a group (group) 306 * - object (inode/locality) (object) 307 * - per-pa lock (pa) 308 * 309 * Paths: 310 * - new pa 311 * object 312 * group 313 * 314 * - find and use pa: 315 * pa 316 * 317 * - release consumed pa: 318 * pa 319 * group 320 * object 321 * 322 * - generate in-core bitmap: 323 * group 324 * pa 325 * 326 * - discard all for given object (inode, locality group): 327 * object 328 * pa 329 * group 330 * 331 * - discard all for given group: 332 * group 333 * pa 334 * group 335 * object 336 * 337 */ 338static struct kmem_cache *ext4_pspace_cachep; 339static struct kmem_cache *ext4_ac_cachep; 340static struct kmem_cache *ext4_free_ext_cachep; 341static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 342 ext4_group_t group); 343static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 344 ext4_group_t group); 345static void release_blocks_on_commit(journal_t *journal, transaction_t *txn); 346 347static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 348{ 349#if BITS_PER_LONG == 64 350 *bit += ((unsigned long) addr & 7UL) << 3; 351 addr = (void *) ((unsigned long) addr & ~7UL); 352#elif BITS_PER_LONG == 32 353 *bit += ((unsigned long) addr & 3UL) << 3; 354 addr = (void *) ((unsigned long) addr & ~3UL); 355#else 356#error "how many bits you are?!" 357#endif 358 return addr; 359} 360 361static inline int mb_test_bit(int bit, void *addr) 362{ 363 /* 364 * ext4_test_bit on architecture like powerpc 365 * needs unsigned long aligned address 366 */ 367 addr = mb_correct_addr_and_bit(&bit, addr); 368 return ext4_test_bit(bit, addr); 369} 370 371static inline void mb_set_bit(int bit, void *addr) 372{ 373 addr = mb_correct_addr_and_bit(&bit, addr); 374 ext4_set_bit(bit, addr); 375} 376 377static inline void mb_clear_bit(int bit, void *addr) 378{ 379 addr = mb_correct_addr_and_bit(&bit, addr); 380 ext4_clear_bit(bit, addr); 381} 382 383static inline int mb_find_next_zero_bit(void *addr, int max, int start) 384{ 385 int fix = 0, ret, tmpmax; 386 addr = mb_correct_addr_and_bit(&fix, addr); 387 tmpmax = max + fix; 388 start += fix; 389 390 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 391 if (ret > max) 392 return max; 393 return ret; 394} 395 396static inline int mb_find_next_bit(void *addr, int max, int start) 397{ 398 int fix = 0, ret, tmpmax; 399 addr = mb_correct_addr_and_bit(&fix, addr); 400 tmpmax = max + fix; 401 start += fix; 402 403 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 404 if (ret > max) 405 return max; 406 return ret; 407} 408 409static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 410{ 411 char *bb; 412 413 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); 414 BUG_ON(max == NULL); 415 416 if (order > e4b->bd_blkbits + 1) { 417 *max = 0; 418 return NULL; 419 } 420 421 /* at order 0 we see each particular block */ 422 *max = 1 << (e4b->bd_blkbits + 3); 423 if (order == 0) 424 return EXT4_MB_BITMAP(e4b); 425 426 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 427 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 428 429 return bb; 430} 431 432#ifdef DOUBLE_CHECK 433static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 434 int first, int count) 435{ 436 int i; 437 struct super_block *sb = e4b->bd_sb; 438 439 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 440 return; 441 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 442 for (i = 0; i < count; i++) { 443 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 444 ext4_fsblk_t blocknr; 445 446 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 447 blocknr += first + i; 448 ext4_grp_locked_error(sb, e4b->bd_group, 449 inode ? inode->i_ino : 0, 450 blocknr, 451 "freeing block already freed " 452 "(bit %u)", 453 first + i); 454 } 455 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 456 } 457} 458 459static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 460{ 461 int i; 462 463 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 464 return; 465 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 466 for (i = 0; i < count; i++) { 467 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 468 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 469 } 470} 471 472static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 473{ 474 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 475 unsigned char *b1, *b2; 476 int i; 477 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 478 b2 = (unsigned char *) bitmap; 479 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 480 if (b1[i] != b2[i]) { 481 printk(KERN_ERR "corruption in group %u " 482 "at byte %u(%u): %x in copy != %x " 483 "on disk/prealloc\n", 484 e4b->bd_group, i, i * 8, b1[i], b2[i]); 485 BUG(); 486 } 487 } 488 } 489} 490 491#else 492static inline void mb_free_blocks_double(struct inode *inode, 493 struct ext4_buddy *e4b, int first, int count) 494{ 495 return; 496} 497static inline void mb_mark_used_double(struct ext4_buddy *e4b, 498 int first, int count) 499{ 500 return; 501} 502static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 503{ 504 return; 505} 506#endif 507 508#ifdef AGGRESSIVE_CHECK 509 510#define MB_CHECK_ASSERT(assert) \ 511do { \ 512 if (!(assert)) { \ 513 printk(KERN_EMERG \ 514 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 515 function, file, line, # assert); \ 516 BUG(); \ 517 } \ 518} while (0) 519 520static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 521 const char *function, int line) 522{ 523 struct super_block *sb = e4b->bd_sb; 524 int order = e4b->bd_blkbits + 1; 525 int max; 526 int max2; 527 int i; 528 int j; 529 int k; 530 int count; 531 struct ext4_group_info *grp; 532 int fragments = 0; 533 int fstart; 534 struct list_head *cur; 535 void *buddy; 536 void *buddy2; 537 538 { 539 static int mb_check_counter; 540 if (mb_check_counter++ % 100 != 0) 541 return 0; 542 } 543 544 while (order > 1) { 545 buddy = mb_find_buddy(e4b, order, &max); 546 MB_CHECK_ASSERT(buddy); 547 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 548 MB_CHECK_ASSERT(buddy2); 549 MB_CHECK_ASSERT(buddy != buddy2); 550 MB_CHECK_ASSERT(max * 2 == max2); 551 552 count = 0; 553 for (i = 0; i < max; i++) { 554 555 if (mb_test_bit(i, buddy)) { 556 /* only single bit in buddy2 may be 1 */ 557 if (!mb_test_bit(i << 1, buddy2)) { 558 MB_CHECK_ASSERT( 559 mb_test_bit((i<<1)+1, buddy2)); 560 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 561 MB_CHECK_ASSERT( 562 mb_test_bit(i << 1, buddy2)); 563 } 564 continue; 565 } 566 567 /* both bits in buddy2 must be 0 */ 568 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 569 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 570 571 for (j = 0; j < (1 << order); j++) { 572 k = (i * (1 << order)) + j; 573 MB_CHECK_ASSERT( 574 !mb_test_bit(k, EXT4_MB_BITMAP(e4b))); 575 } 576 count++; 577 } 578 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 579 order--; 580 } 581 582 fstart = -1; 583 buddy = mb_find_buddy(e4b, 0, &max); 584 for (i = 0; i < max; i++) { 585 if (!mb_test_bit(i, buddy)) { 586 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 587 if (fstart == -1) { 588 fragments++; 589 fstart = i; 590 } 591 continue; 592 } 593 fstart = -1; 594 /* check used bits only */ 595 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 596 buddy2 = mb_find_buddy(e4b, j, &max2); 597 k = i >> j; 598 MB_CHECK_ASSERT(k < max2); 599 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 600 } 601 } 602 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 603 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 604 605 grp = ext4_get_group_info(sb, e4b->bd_group); 606 buddy = mb_find_buddy(e4b, 0, &max); 607 list_for_each(cur, &grp->bb_prealloc_list) { 608 ext4_group_t groupnr; 609 struct ext4_prealloc_space *pa; 610 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 611 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 612 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 613 for (i = 0; i < pa->pa_len; i++) 614 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 615 } 616 return 0; 617} 618#undef MB_CHECK_ASSERT 619#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 620 __FILE__, __func__, __LINE__) 621#else 622#define mb_check_buddy(e4b) 623#endif 624 625static void ext4_mb_mark_free_simple(struct super_block *sb, 626 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 627 struct ext4_group_info *grp) 628{ 629 struct ext4_sb_info *sbi = EXT4_SB(sb); 630 ext4_grpblk_t min; 631 ext4_grpblk_t max; 632 ext4_grpblk_t chunk; 633 unsigned short border; 634 635 BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); 636 637 border = 2 << sb->s_blocksize_bits; 638 639 while (len > 0) { 640 /* find how many blocks can be covered since this position */ 641 max = ffs(first | border) - 1; 642 643 /* find how many blocks of power 2 we need to mark */ 644 min = fls(len) - 1; 645 646 if (max < min) 647 min = max; 648 chunk = 1 << min; 649 650 /* mark multiblock chunks only */ 651 grp->bb_counters[min]++; 652 if (min > 0) 653 mb_clear_bit(first >> min, 654 buddy + sbi->s_mb_offsets[min]); 655 656 len -= chunk; 657 first += chunk; 658 } 659} 660 661/* 662 * Cache the order of the largest free extent we have available in this block 663 * group. 664 */ 665static void 666mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 667{ 668 int i; 669 int bits; 670 671 grp->bb_largest_free_order = -1; /* uninit */ 672 673 bits = sb->s_blocksize_bits + 1; 674 for (i = bits; i >= 0; i--) { 675 if (grp->bb_counters[i] > 0) { 676 grp->bb_largest_free_order = i; 677 break; 678 } 679 } 680} 681 682static noinline_for_stack 683void ext4_mb_generate_buddy(struct super_block *sb, 684 void *buddy, void *bitmap, ext4_group_t group) 685{ 686 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 687 ext4_grpblk_t max = EXT4_BLOCKS_PER_GROUP(sb); 688 ext4_grpblk_t i = 0; 689 ext4_grpblk_t first; 690 ext4_grpblk_t len; 691 unsigned free = 0; 692 unsigned fragments = 0; 693 unsigned long long period = get_cycles(); 694 695 /* initialize buddy from bitmap which is aggregation 696 * of on-disk bitmap and preallocations */ 697 i = mb_find_next_zero_bit(bitmap, max, 0); 698 grp->bb_first_free = i; 699 while (i < max) { 700 fragments++; 701 first = i; 702 i = mb_find_next_bit(bitmap, max, i); 703 len = i - first; 704 free += len; 705 if (len > 1) 706 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 707 else 708 grp->bb_counters[0]++; 709 if (i < max) 710 i = mb_find_next_zero_bit(bitmap, max, i); 711 } 712 grp->bb_fragments = fragments; 713 714 if (free != grp->bb_free) { 715 ext4_grp_locked_error(sb, group, 0, 0, 716 "%u blocks in bitmap, %u in gd", 717 free, grp->bb_free); 718 /* 719 * If we intent to continue, we consider group descritor 720 * corrupt and update bb_free using bitmap value 721 */ 722 grp->bb_free = free; 723 } 724 mb_set_largest_free_order(sb, grp); 725 726 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 727 728 period = get_cycles() - period; 729 spin_lock(&EXT4_SB(sb)->s_bal_lock); 730 EXT4_SB(sb)->s_mb_buddies_generated++; 731 EXT4_SB(sb)->s_mb_generation_time += period; 732 spin_unlock(&EXT4_SB(sb)->s_bal_lock); 733} 734 735/* The buddy information is attached the buddy cache inode 736 * for convenience. The information regarding each group 737 * is loaded via ext4_mb_load_buddy. The information involve 738 * block bitmap and buddy information. The information are 739 * stored in the inode as 740 * 741 * { page } 742 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 743 * 744 * 745 * one block each for bitmap and buddy information. 746 * So for each group we take up 2 blocks. A page can 747 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 748 * So it can have information regarding groups_per_page which 749 * is blocks_per_page/2 750 * 751 * Locking note: This routine takes the block group lock of all groups 752 * for this page; do not hold this lock when calling this routine! 753 */ 754 755static int ext4_mb_init_cache(struct page *page, char *incore) 756{ 757 ext4_group_t ngroups; 758 int blocksize; 759 int blocks_per_page; 760 int groups_per_page; 761 int err = 0; 762 int i; 763 ext4_group_t first_group; 764 int first_block; 765 struct super_block *sb; 766 struct buffer_head *bhs; 767 struct buffer_head **bh; 768 struct inode *inode; 769 char *data; 770 char *bitmap; 771 772 mb_debug(1, "init page %lu\n", page->index); 773 774 inode = page->mapping->host; 775 sb = inode->i_sb; 776 ngroups = ext4_get_groups_count(sb); 777 blocksize = 1 << inode->i_blkbits; 778 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 779 780 groups_per_page = blocks_per_page >> 1; 781 if (groups_per_page == 0) 782 groups_per_page = 1; 783 784 /* allocate buffer_heads to read bitmaps */ 785 if (groups_per_page > 1) { 786 err = -ENOMEM; 787 i = sizeof(struct buffer_head *) * groups_per_page; 788 bh = kzalloc(i, GFP_NOFS); 789 if (bh == NULL) 790 goto out; 791 } else 792 bh = &bhs; 793 794 first_group = page->index * blocks_per_page / 2; 795 796 /* read all groups the page covers into the cache */ 797 for (i = 0; i < groups_per_page; i++) { 798 struct ext4_group_desc *desc; 799 800 if (first_group + i >= ngroups) 801 break; 802 803 err = -EIO; 804 desc = ext4_get_group_desc(sb, first_group + i, NULL); 805 if (desc == NULL) 806 goto out; 807 808 err = -ENOMEM; 809 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc)); 810 if (bh[i] == NULL) 811 goto out; 812 813 if (bitmap_uptodate(bh[i])) 814 continue; 815 816 lock_buffer(bh[i]); 817 if (bitmap_uptodate(bh[i])) { 818 unlock_buffer(bh[i]); 819 continue; 820 } 821 ext4_lock_group(sb, first_group + i); 822 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 823 ext4_init_block_bitmap(sb, bh[i], 824 first_group + i, desc); 825 set_bitmap_uptodate(bh[i]); 826 set_buffer_uptodate(bh[i]); 827 ext4_unlock_group(sb, first_group + i); 828 unlock_buffer(bh[i]); 829 continue; 830 } 831 ext4_unlock_group(sb, first_group + i); 832 if (buffer_uptodate(bh[i])) { 833 /* 834 * if not uninit if bh is uptodate, 835 * bitmap is also uptodate 836 */ 837 set_bitmap_uptodate(bh[i]); 838 unlock_buffer(bh[i]); 839 continue; 840 } 841 get_bh(bh[i]); 842 /* 843 * submit the buffer_head for read. We can 844 * safely mark the bitmap as uptodate now. 845 * We do it here so the bitmap uptodate bit 846 * get set with buffer lock held. 847 */ 848 set_bitmap_uptodate(bh[i]); 849 bh[i]->b_end_io = end_buffer_read_sync; 850 submit_bh(READ, bh[i]); 851 mb_debug(1, "read bitmap for group %u\n", first_group + i); 852 } 853 854 /* wait for I/O completion */ 855 for (i = 0; i < groups_per_page && bh[i]; i++) 856 wait_on_buffer(bh[i]); 857 858 err = -EIO; 859 for (i = 0; i < groups_per_page && bh[i]; i++) 860 if (!buffer_uptodate(bh[i])) 861 goto out; 862 863 err = 0; 864 first_block = page->index * blocks_per_page; 865 /* init the page */ 866 memset(page_address(page), 0xff, PAGE_CACHE_SIZE); 867 for (i = 0; i < blocks_per_page; i++) { 868 int group; 869 struct ext4_group_info *grinfo; 870 871 group = (first_block + i) >> 1; 872 if (group >= ngroups) 873 break; 874 875 /* 876 * data carry information regarding this 877 * particular group in the format specified 878 * above 879 * 880 */ 881 data = page_address(page) + (i * blocksize); 882 bitmap = bh[group - first_group]->b_data; 883 884 /* 885 * We place the buddy block and bitmap block 886 * close together 887 */ 888 if ((first_block + i) & 1) { 889 /* this is block of buddy */ 890 BUG_ON(incore == NULL); 891 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 892 group, page->index, i * blocksize); 893 trace_ext4_mb_buddy_bitmap_load(sb, group); 894 grinfo = ext4_get_group_info(sb, group); 895 grinfo->bb_fragments = 0; 896 memset(grinfo->bb_counters, 0, 897 sizeof(*grinfo->bb_counters) * 898 (sb->s_blocksize_bits+2)); 899 /* 900 * incore got set to the group block bitmap below 901 */ 902 ext4_lock_group(sb, group); 903 ext4_mb_generate_buddy(sb, data, incore, group); 904 ext4_unlock_group(sb, group); 905 incore = NULL; 906 } else { 907 /* this is block of bitmap */ 908 BUG_ON(incore != NULL); 909 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 910 group, page->index, i * blocksize); 911 trace_ext4_mb_bitmap_load(sb, group); 912 913 /* see comments in ext4_mb_put_pa() */ 914 ext4_lock_group(sb, group); 915 memcpy(data, bitmap, blocksize); 916 917 /* mark all preallocated blks used in in-core bitmap */ 918 ext4_mb_generate_from_pa(sb, data, group); 919 ext4_mb_generate_from_freelist(sb, data, group); 920 ext4_unlock_group(sb, group); 921 922 /* set incore so that the buddy information can be 923 * generated using this 924 */ 925 incore = data; 926 } 927 } 928 SetPageUptodate(page); 929 930out: 931 if (bh) { 932 for (i = 0; i < groups_per_page && bh[i]; i++) 933 brelse(bh[i]); 934 if (bh != &bhs) 935 kfree(bh); 936 } 937 return err; 938} 939 940/* 941 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 942 * block group lock of all groups for this page; do not hold the BG lock when 943 * calling this routine! 944 */ 945static noinline_for_stack 946int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 947{ 948 949 int ret = 0; 950 void *bitmap; 951 int blocks_per_page; 952 int block, pnum, poff; 953 int num_grp_locked = 0; 954 struct ext4_group_info *this_grp; 955 struct ext4_sb_info *sbi = EXT4_SB(sb); 956 struct inode *inode = sbi->s_buddy_cache; 957 struct page *page = NULL, *bitmap_page = NULL; 958 959 mb_debug(1, "init group %u\n", group); 960 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 961 this_grp = ext4_get_group_info(sb, group); 962 /* 963 * This ensures that we don't reinit the buddy cache 964 * page which map to the group from which we are already 965 * allocating. If we are looking at the buddy cache we would 966 * have taken a reference using ext4_mb_load_buddy and that 967 * would have taken the alloc_sem lock. 968 */ 969 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); 970 if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { 971 /* 972 * somebody initialized the group 973 * return without doing anything 974 */ 975 ret = 0; 976 goto err; 977 } 978 /* 979 * the buddy cache inode stores the block bitmap 980 * and buddy information in consecutive blocks. 981 * So for each group we need two blocks. 982 */ 983 block = group * 2; 984 pnum = block / blocks_per_page; 985 poff = block % blocks_per_page; 986 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 987 if (page) { 988 BUG_ON(page->mapping != inode->i_mapping); 989 ret = ext4_mb_init_cache(page, NULL); 990 if (ret) { 991 unlock_page(page); 992 goto err; 993 } 994 unlock_page(page); 995 } 996 if (page == NULL || !PageUptodate(page)) { 997 ret = -EIO; 998 goto err; 999 } 1000 mark_page_accessed(page); 1001 bitmap_page = page; 1002 bitmap = page_address(page) + (poff * sb->s_blocksize); 1003 1004 /* init buddy cache */ 1005 block++; 1006 pnum = block / blocks_per_page; 1007 poff = block % blocks_per_page; 1008 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1009 if (page == bitmap_page) { 1010 /* 1011 * If both the bitmap and buddy are in 1012 * the same page we don't need to force 1013 * init the buddy 1014 */ 1015 unlock_page(page); 1016 } else if (page) { 1017 BUG_ON(page->mapping != inode->i_mapping); 1018 ret = ext4_mb_init_cache(page, bitmap); 1019 if (ret) { 1020 unlock_page(page); 1021 goto err; 1022 } 1023 unlock_page(page); 1024 } 1025 if (page == NULL || !PageUptodate(page)) { 1026 ret = -EIO; 1027 goto err; 1028 } 1029 mark_page_accessed(page); 1030err: 1031 ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); 1032 if (bitmap_page) 1033 page_cache_release(bitmap_page); 1034 if (page) 1035 page_cache_release(page); 1036 return ret; 1037} 1038 1039/* 1040 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1041 * block group lock of all groups for this page; do not hold the BG lock when 1042 * calling this routine! 1043 */ 1044static noinline_for_stack int 1045ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1046 struct ext4_buddy *e4b) 1047{ 1048 int blocks_per_page; 1049 int block; 1050 int pnum; 1051 int poff; 1052 struct page *page; 1053 int ret; 1054 struct ext4_group_info *grp; 1055 struct ext4_sb_info *sbi = EXT4_SB(sb); 1056 struct inode *inode = sbi->s_buddy_cache; 1057 1058 mb_debug(1, "load group %u\n", group); 1059 1060 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1061 grp = ext4_get_group_info(sb, group); 1062 1063 e4b->bd_blkbits = sb->s_blocksize_bits; 1064 e4b->bd_info = ext4_get_group_info(sb, group); 1065 e4b->bd_sb = sb; 1066 e4b->bd_group = group; 1067 e4b->bd_buddy_page = NULL; 1068 e4b->bd_bitmap_page = NULL; 1069 e4b->alloc_semp = &grp->alloc_sem; 1070 1071 /* Take the read lock on the group alloc 1072 * sem. This would make sure a parallel 1073 * ext4_mb_init_group happening on other 1074 * groups mapped by the page is blocked 1075 * till we are done with allocation 1076 */ 1077repeat_load_buddy: 1078 down_read(e4b->alloc_semp); 1079 1080 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1081 /* we need to check for group need init flag 1082 * with alloc_semp held so that we can be sure 1083 * that new blocks didn't get added to the group 1084 * when we are loading the buddy cache 1085 */ 1086 up_read(e4b->alloc_semp); 1087 /* 1088 * we need full data about the group 1089 * to make a good selection 1090 */ 1091 ret = ext4_mb_init_group(sb, group); 1092 if (ret) 1093 return ret; 1094 goto repeat_load_buddy; 1095 } 1096 1097 /* 1098 * the buddy cache inode stores the block bitmap 1099 * and buddy information in consecutive blocks. 1100 * So for each group we need two blocks. 1101 */ 1102 block = group * 2; 1103 pnum = block / blocks_per_page; 1104 poff = block % blocks_per_page; 1105 1106 /* we could use find_or_create_page(), but it locks page 1107 * what we'd like to avoid in fast path ... */ 1108 page = find_get_page(inode->i_mapping, pnum); 1109 if (page == NULL || !PageUptodate(page)) { 1110 if (page) 1111 /* 1112 * drop the page reference and try 1113 * to get the page with lock. If we 1114 * are not uptodate that implies 1115 * somebody just created the page but 1116 * is yet to initialize the same. So 1117 * wait for it to initialize. 1118 */ 1119 page_cache_release(page); 1120 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1121 if (page) { 1122 BUG_ON(page->mapping != inode->i_mapping); 1123 if (!PageUptodate(page)) { 1124 ret = ext4_mb_init_cache(page, NULL); 1125 if (ret) { 1126 unlock_page(page); 1127 goto err; 1128 } 1129 mb_cmp_bitmaps(e4b, page_address(page) + 1130 (poff * sb->s_blocksize)); 1131 } 1132 unlock_page(page); 1133 } 1134 } 1135 if (page == NULL || !PageUptodate(page)) { 1136 ret = -EIO; 1137 goto err; 1138 } 1139 e4b->bd_bitmap_page = page; 1140 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1141 mark_page_accessed(page); 1142 1143 block++; 1144 pnum = block / blocks_per_page; 1145 poff = block % blocks_per_page; 1146 1147 page = find_get_page(inode->i_mapping, pnum); 1148 if (page == NULL || !PageUptodate(page)) { 1149 if (page) 1150 page_cache_release(page); 1151 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1152 if (page) { 1153 BUG_ON(page->mapping != inode->i_mapping); 1154 if (!PageUptodate(page)) { 1155 ret = ext4_mb_init_cache(page, e4b->bd_bitmap); 1156 if (ret) { 1157 unlock_page(page); 1158 goto err; 1159 } 1160 } 1161 unlock_page(page); 1162 } 1163 } 1164 if (page == NULL || !PageUptodate(page)) { 1165 ret = -EIO; 1166 goto err; 1167 } 1168 e4b->bd_buddy_page = page; 1169 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1170 mark_page_accessed(page); 1171 1172 BUG_ON(e4b->bd_bitmap_page == NULL); 1173 BUG_ON(e4b->bd_buddy_page == NULL); 1174 1175 return 0; 1176 1177err: 1178 if (e4b->bd_bitmap_page) 1179 page_cache_release(e4b->bd_bitmap_page); 1180 if (e4b->bd_buddy_page) 1181 page_cache_release(e4b->bd_buddy_page); 1182 e4b->bd_buddy = NULL; 1183 e4b->bd_bitmap = NULL; 1184 1185 /* Done with the buddy cache */ 1186 up_read(e4b->alloc_semp); 1187 return ret; 1188} 1189 1190static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1191{ 1192 if (e4b->bd_bitmap_page) 1193 page_cache_release(e4b->bd_bitmap_page); 1194 if (e4b->bd_buddy_page) 1195 page_cache_release(e4b->bd_buddy_page); 1196 /* Done with the buddy cache */ 1197 if (e4b->alloc_semp) 1198 up_read(e4b->alloc_semp); 1199} 1200 1201 1202static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1203{ 1204 int order = 1; 1205 void *bb; 1206 1207 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); 1208 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1209 1210 bb = EXT4_MB_BUDDY(e4b); 1211 while (order <= e4b->bd_blkbits + 1) { 1212 block = block >> 1; 1213 if (!mb_test_bit(block, bb)) { 1214 /* this block is part of buddy of order 'order' */ 1215 return order; 1216 } 1217 bb += 1 << (e4b->bd_blkbits - order); 1218 order++; 1219 } 1220 return 0; 1221} 1222 1223static void mb_clear_bits(void *bm, int cur, int len) 1224{ 1225 __u32 *addr; 1226 1227 len = cur + len; 1228 while (cur < len) { 1229 if ((cur & 31) == 0 && (len - cur) >= 32) { 1230 /* fast path: clear whole word at once */ 1231 addr = bm + (cur >> 3); 1232 *addr = 0; 1233 cur += 32; 1234 continue; 1235 } 1236 mb_clear_bit(cur, bm); 1237 cur++; 1238 } 1239} 1240 1241static void mb_set_bits(void *bm, int cur, int len) 1242{ 1243 __u32 *addr; 1244 1245 len = cur + len; 1246 while (cur < len) { 1247 if ((cur & 31) == 0 && (len - cur) >= 32) { 1248 /* fast path: set whole word at once */ 1249 addr = bm + (cur >> 3); 1250 *addr = 0xffffffff; 1251 cur += 32; 1252 continue; 1253 } 1254 mb_set_bit(cur, bm); 1255 cur++; 1256 } 1257} 1258 1259static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1260 int first, int count) 1261{ 1262 int block = 0; 1263 int max = 0; 1264 int order; 1265 void *buddy; 1266 void *buddy2; 1267 struct super_block *sb = e4b->bd_sb; 1268 1269 BUG_ON(first + count > (sb->s_blocksize << 3)); 1270 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1271 mb_check_buddy(e4b); 1272 mb_free_blocks_double(inode, e4b, first, count); 1273 1274 e4b->bd_info->bb_free += count; 1275 if (first < e4b->bd_info->bb_first_free) 1276 e4b->bd_info->bb_first_free = first; 1277 1278 /* let's maintain fragments counter */ 1279 if (first != 0) 1280 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b)); 1281 if (first + count < EXT4_SB(sb)->s_mb_maxs[0]) 1282 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b)); 1283 if (block && max) 1284 e4b->bd_info->bb_fragments--; 1285 else if (!block && !max) 1286 e4b->bd_info->bb_fragments++; 1287 1288 /* let's maintain buddy itself */ 1289 while (count-- > 0) { 1290 block = first++; 1291 order = 0; 1292 1293 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { 1294 ext4_fsblk_t blocknr; 1295 1296 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1297 blocknr += block; 1298 ext4_grp_locked_error(sb, e4b->bd_group, 1299 inode ? inode->i_ino : 0, 1300 blocknr, 1301 "freeing already freed block " 1302 "(bit %u)", block); 1303 } 1304 mb_clear_bit(block, EXT4_MB_BITMAP(e4b)); 1305 e4b->bd_info->bb_counters[order]++; 1306 1307 /* start of the buddy */ 1308 buddy = mb_find_buddy(e4b, order, &max); 1309 1310 do { 1311 block &= ~1UL; 1312 if (mb_test_bit(block, buddy) || 1313 mb_test_bit(block + 1, buddy)) 1314 break; 1315 1316 /* both the buddies are free, try to coalesce them */ 1317 buddy2 = mb_find_buddy(e4b, order + 1, &max); 1318 1319 if (!buddy2) 1320 break; 1321 1322 if (order > 0) { 1323 /* for special purposes, we don't set 1324 * free bits in bitmap */ 1325 mb_set_bit(block, buddy); 1326 mb_set_bit(block + 1, buddy); 1327 } 1328 e4b->bd_info->bb_counters[order]--; 1329 e4b->bd_info->bb_counters[order]--; 1330 1331 block = block >> 1; 1332 order++; 1333 e4b->bd_info->bb_counters[order]++; 1334 1335 mb_clear_bit(block, buddy2); 1336 buddy = buddy2; 1337 } while (1); 1338 } 1339 mb_set_largest_free_order(sb, e4b->bd_info); 1340 mb_check_buddy(e4b); 1341} 1342 1343static int mb_find_extent(struct ext4_buddy *e4b, int order, int block, 1344 int needed, struct ext4_free_extent *ex) 1345{ 1346 int next = block; 1347 int max; 1348 int ord; 1349 void *buddy; 1350 1351 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1352 BUG_ON(ex == NULL); 1353 1354 buddy = mb_find_buddy(e4b, order, &max); 1355 BUG_ON(buddy == NULL); 1356 BUG_ON(block >= max); 1357 if (mb_test_bit(block, buddy)) { 1358 ex->fe_len = 0; 1359 ex->fe_start = 0; 1360 ex->fe_group = 0; 1361 return 0; 1362 } 1363 1364 if (likely(order == 0)) { 1365 /* find actual order */ 1366 order = mb_find_order_for_block(e4b, block); 1367 block = block >> order; 1368 } 1369 1370 ex->fe_len = 1 << order; 1371 ex->fe_start = block << order; 1372 ex->fe_group = e4b->bd_group; 1373 1374 /* calc difference from given start */ 1375 next = next - ex->fe_start; 1376 ex->fe_len -= next; 1377 ex->fe_start += next; 1378 1379 while (needed > ex->fe_len && 1380 (buddy = mb_find_buddy(e4b, order, &max))) { 1381 1382 if (block + 1 >= max) 1383 break; 1384 1385 next = (block + 1) * (1 << order); 1386 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b))) 1387 break; 1388 1389 ord = mb_find_order_for_block(e4b, next); 1390 1391 order = ord; 1392 block = next >> order; 1393 ex->fe_len += 1 << order; 1394 } 1395 1396 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); 1397 return ex->fe_len; 1398} 1399 1400static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1401{ 1402 int ord; 1403 int mlen = 0; 1404 int max = 0; 1405 int cur; 1406 int start = ex->fe_start; 1407 int len = ex->fe_len; 1408 unsigned ret = 0; 1409 int len0 = len; 1410 void *buddy; 1411 1412 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1413 BUG_ON(e4b->bd_group != ex->fe_group); 1414 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1415 mb_check_buddy(e4b); 1416 mb_mark_used_double(e4b, start, len); 1417 1418 e4b->bd_info->bb_free -= len; 1419 if (e4b->bd_info->bb_first_free == start) 1420 e4b->bd_info->bb_first_free += len; 1421 1422 /* let's maintain fragments counter */ 1423 if (start != 0) 1424 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b)); 1425 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1426 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b)); 1427 if (mlen && max) 1428 e4b->bd_info->bb_fragments++; 1429 else if (!mlen && !max) 1430 e4b->bd_info->bb_fragments--; 1431 1432 /* let's maintain buddy itself */ 1433 while (len) { 1434 ord = mb_find_order_for_block(e4b, start); 1435 1436 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1437 /* the whole chunk may be allocated at once! */ 1438 mlen = 1 << ord; 1439 buddy = mb_find_buddy(e4b, ord, &max); 1440 BUG_ON((start >> ord) >= max); 1441 mb_set_bit(start >> ord, buddy); 1442 e4b->bd_info->bb_counters[ord]--; 1443 start += mlen; 1444 len -= mlen; 1445 BUG_ON(len < 0); 1446 continue; 1447 } 1448 1449 /* store for history */ 1450 if (ret == 0) 1451 ret = len | (ord << 16); 1452 1453 /* we have to split large buddy */ 1454 BUG_ON(ord <= 0); 1455 buddy = mb_find_buddy(e4b, ord, &max); 1456 mb_set_bit(start >> ord, buddy); 1457 e4b->bd_info->bb_counters[ord]--; 1458 1459 ord--; 1460 cur = (start >> ord) & ~1U; 1461 buddy = mb_find_buddy(e4b, ord, &max); 1462 mb_clear_bit(cur, buddy); 1463 mb_clear_bit(cur + 1, buddy); 1464 e4b->bd_info->bb_counters[ord]++; 1465 e4b->bd_info->bb_counters[ord]++; 1466 } 1467 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1468 1469 mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0); 1470 mb_check_buddy(e4b); 1471 1472 return ret; 1473} 1474 1475/* 1476 * Must be called under group lock! 1477 */ 1478static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1479 struct ext4_buddy *e4b) 1480{ 1481 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1482 int ret; 1483 1484 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1485 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1486 1487 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1488 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1489 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1490 1491 /* preallocation can change ac_b_ex, thus we store actually 1492 * allocated blocks for history */ 1493 ac->ac_f_ex = ac->ac_b_ex; 1494 1495 ac->ac_status = AC_STATUS_FOUND; 1496 ac->ac_tail = ret & 0xffff; 1497 ac->ac_buddy = ret >> 16; 1498 1499 /* 1500 * take the page reference. We want the page to be pinned 1501 * so that we don't get a ext4_mb_init_cache_call for this 1502 * group until we update the bitmap. That would mean we 1503 * double allocate blocks. The reference is dropped 1504 * in ext4_mb_release_context 1505 */ 1506 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1507 get_page(ac->ac_bitmap_page); 1508 ac->ac_buddy_page = e4b->bd_buddy_page; 1509 get_page(ac->ac_buddy_page); 1510 /* on allocation we use ac to track the held semaphore */ 1511 ac->alloc_semp = e4b->alloc_semp; 1512 e4b->alloc_semp = NULL; 1513 /* store last allocated for subsequent stream allocation */ 1514 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1515 spin_lock(&sbi->s_md_lock); 1516 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1517 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1518 spin_unlock(&sbi->s_md_lock); 1519 } 1520} 1521 1522/* 1523 * regular allocator, for general purposes allocation 1524 */ 1525 1526static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1527 struct ext4_buddy *e4b, 1528 int finish_group) 1529{ 1530 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1531 struct ext4_free_extent *bex = &ac->ac_b_ex; 1532 struct ext4_free_extent *gex = &ac->ac_g_ex; 1533 struct ext4_free_extent ex; 1534 int max; 1535 1536 if (ac->ac_status == AC_STATUS_FOUND) 1537 return; 1538 /* 1539 * We don't want to scan for a whole year 1540 */ 1541 if (ac->ac_found > sbi->s_mb_max_to_scan && 1542 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1543 ac->ac_status = AC_STATUS_BREAK; 1544 return; 1545 } 1546 1547 /* 1548 * Haven't found good chunk so far, let's continue 1549 */ 1550 if (bex->fe_len < gex->fe_len) 1551 return; 1552 1553 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1554 && bex->fe_group == e4b->bd_group) { 1555 /* recheck chunk's availability - we don't know 1556 * when it was found (within this lock-unlock 1557 * period or not) */ 1558 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex); 1559 if (max >= gex->fe_len) { 1560 ext4_mb_use_best_found(ac, e4b); 1561 return; 1562 } 1563 } 1564} 1565 1566static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1567 struct ext4_free_extent *ex, 1568 struct ext4_buddy *e4b) 1569{ 1570 struct ext4_free_extent *bex = &ac->ac_b_ex; 1571 struct ext4_free_extent *gex = &ac->ac_g_ex; 1572 1573 BUG_ON(ex->fe_len <= 0); 1574 BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 1575 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 1576 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1577 1578 ac->ac_found++; 1579 1580 /* 1581 * The special case - take what you catch first 1582 */ 1583 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1584 *bex = *ex; 1585 ext4_mb_use_best_found(ac, e4b); 1586 return; 1587 } 1588 1589 /* 1590 * Let's check whether the chuck is good enough 1591 */ 1592 if (ex->fe_len == gex->fe_len) { 1593 *bex = *ex; 1594 ext4_mb_use_best_found(ac, e4b); 1595 return; 1596 } 1597 1598 /* 1599 * If this is first found extent, just store it in the context 1600 */ 1601 if (bex->fe_len == 0) { 1602 *bex = *ex; 1603 return; 1604 } 1605 1606 /* 1607 * If new found extent is better, store it in the context 1608 */ 1609 if (bex->fe_len < gex->fe_len) { 1610 /* if the request isn't satisfied, any found extent 1611 * larger than previous best one is better */ 1612 if (ex->fe_len > bex->fe_len) 1613 *bex = *ex; 1614 } else if (ex->fe_len > gex->fe_len) { 1615 /* if the request is satisfied, then we try to find 1616 * an extent that still satisfy the request, but is 1617 * smaller than previous one */ 1618 if (ex->fe_len < bex->fe_len) 1619 *bex = *ex; 1620 } 1621 1622 ext4_mb_check_limits(ac, e4b, 0); 1623} 1624 1625static noinline_for_stack 1626int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1627 struct ext4_buddy *e4b) 1628{ 1629 struct ext4_free_extent ex = ac->ac_b_ex; 1630 ext4_group_t group = ex.fe_group; 1631 int max; 1632 int err; 1633 1634 BUG_ON(ex.fe_len <= 0); 1635 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1636 if (err) 1637 return err; 1638 1639 ext4_lock_group(ac->ac_sb, group); 1640 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex); 1641 1642 if (max > 0) { 1643 ac->ac_b_ex = ex; 1644 ext4_mb_use_best_found(ac, e4b); 1645 } 1646 1647 ext4_unlock_group(ac->ac_sb, group); 1648 ext4_mb_unload_buddy(e4b); 1649 1650 return 0; 1651} 1652 1653static noinline_for_stack 1654int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1655 struct ext4_buddy *e4b) 1656{ 1657 ext4_group_t group = ac->ac_g_ex.fe_group; 1658 int max; 1659 int err; 1660 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1661 struct ext4_free_extent ex; 1662 1663 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1664 return 0; 1665 1666 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1667 if (err) 1668 return err; 1669 1670 ext4_lock_group(ac->ac_sb, group); 1671 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start, 1672 ac->ac_g_ex.fe_len, &ex); 1673 1674 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1675 ext4_fsblk_t start; 1676 1677 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1678 ex.fe_start; 1679 /* use do_div to get remainder (would be 64-bit modulo) */ 1680 if (do_div(start, sbi->s_stripe) == 0) { 1681 ac->ac_found++; 1682 ac->ac_b_ex = ex; 1683 ext4_mb_use_best_found(ac, e4b); 1684 } 1685 } else if (max >= ac->ac_g_ex.fe_len) { 1686 BUG_ON(ex.fe_len <= 0); 1687 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1688 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1689 ac->ac_found++; 1690 ac->ac_b_ex = ex; 1691 ext4_mb_use_best_found(ac, e4b); 1692 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1693 /* Sometimes, caller may want to merge even small 1694 * number of blocks to an existing extent */ 1695 BUG_ON(ex.fe_len <= 0); 1696 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1697 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1698 ac->ac_found++; 1699 ac->ac_b_ex = ex; 1700 ext4_mb_use_best_found(ac, e4b); 1701 } 1702 ext4_unlock_group(ac->ac_sb, group); 1703 ext4_mb_unload_buddy(e4b); 1704 1705 return 0; 1706} 1707 1708/* 1709 * The routine scans buddy structures (not bitmap!) from given order 1710 * to max order and tries to find big enough chunk to satisfy the req 1711 */ 1712static noinline_for_stack 1713void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1714 struct ext4_buddy *e4b) 1715{ 1716 struct super_block *sb = ac->ac_sb; 1717 struct ext4_group_info *grp = e4b->bd_info; 1718 void *buddy; 1719 int i; 1720 int k; 1721 int max; 1722 1723 BUG_ON(ac->ac_2order <= 0); 1724 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1725 if (grp->bb_counters[i] == 0) 1726 continue; 1727 1728 buddy = mb_find_buddy(e4b, i, &max); 1729 BUG_ON(buddy == NULL); 1730 1731 k = mb_find_next_zero_bit(buddy, max, 0); 1732 BUG_ON(k >= max); 1733 1734 ac->ac_found++; 1735 1736 ac->ac_b_ex.fe_len = 1 << i; 1737 ac->ac_b_ex.fe_start = k << i; 1738 ac->ac_b_ex.fe_group = e4b->bd_group; 1739 1740 ext4_mb_use_best_found(ac, e4b); 1741 1742 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); 1743 1744 if (EXT4_SB(sb)->s_mb_stats) 1745 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1746 1747 break; 1748 } 1749} 1750 1751/* 1752 * The routine scans the group and measures all found extents. 1753 * In order to optimize scanning, caller must pass number of 1754 * free blocks in the group, so the routine can know upper limit. 1755 */ 1756static noinline_for_stack 1757void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1758 struct ext4_buddy *e4b) 1759{ 1760 struct super_block *sb = ac->ac_sb; 1761 void *bitmap = EXT4_MB_BITMAP(e4b); 1762 struct ext4_free_extent ex; 1763 int i; 1764 int free; 1765 1766 free = e4b->bd_info->bb_free; 1767 BUG_ON(free <= 0); 1768 1769 i = e4b->bd_info->bb_first_free; 1770 1771 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1772 i = mb_find_next_zero_bit(bitmap, 1773 EXT4_BLOCKS_PER_GROUP(sb), i); 1774 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { 1775 /* 1776 * IF we have corrupt bitmap, we won't find any 1777 * free blocks even though group info says we 1778 * we have free blocks 1779 */ 1780 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1781 "%d free blocks as per " 1782 "group info. But bitmap says 0", 1783 free); 1784 break; 1785 } 1786 1787 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); 1788 BUG_ON(ex.fe_len <= 0); 1789 if (free < ex.fe_len) { 1790 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1791 "%d free blocks as per " 1792 "group info. But got %d blocks", 1793 free, ex.fe_len); 1794 /* 1795 * The number of free blocks differs. This mostly 1796 * indicate that the bitmap is corrupt. So exit 1797 * without claiming the space. 1798 */ 1799 break; 1800 } 1801 1802 ext4_mb_measure_extent(ac, &ex, e4b); 1803 1804 i += ex.fe_len; 1805 free -= ex.fe_len; 1806 } 1807 1808 ext4_mb_check_limits(ac, e4b, 1); 1809} 1810 1811/* 1812 * This is a special case for storages like raid5 1813 * we try to find stripe-aligned chunks for stripe-size-multiple requests 1814 */ 1815static noinline_for_stack 1816void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 1817 struct ext4_buddy *e4b) 1818{ 1819 struct super_block *sb = ac->ac_sb; 1820 struct ext4_sb_info *sbi = EXT4_SB(sb); 1821 void *bitmap = EXT4_MB_BITMAP(e4b); 1822 struct ext4_free_extent ex; 1823 ext4_fsblk_t first_group_block; 1824 ext4_fsblk_t a; 1825 ext4_grpblk_t i; 1826 int max; 1827 1828 BUG_ON(sbi->s_stripe == 0); 1829 1830 /* find first stripe-aligned block in group */ 1831 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 1832 1833 a = first_group_block + sbi->s_stripe - 1; 1834 do_div(a, sbi->s_stripe); 1835 i = (a * sbi->s_stripe) - first_group_block; 1836 1837 while (i < EXT4_BLOCKS_PER_GROUP(sb)) { 1838 if (!mb_test_bit(i, bitmap)) { 1839 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex); 1840 if (max >= sbi->s_stripe) { 1841 ac->ac_found++; 1842 ac->ac_b_ex = ex; 1843 ext4_mb_use_best_found(ac, e4b); 1844 break; 1845 } 1846 } 1847 i += sbi->s_stripe; 1848 } 1849} 1850 1851/* This is now called BEFORE we load the buddy bitmap. */ 1852static int ext4_mb_good_group(struct ext4_allocation_context *ac, 1853 ext4_group_t group, int cr) 1854{ 1855 unsigned free, fragments; 1856 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 1857 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1858 1859 BUG_ON(cr < 0 || cr >= 4); 1860 1861 /* We only do this if the grp has never been initialized */ 1862 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1863 int ret = ext4_mb_init_group(ac->ac_sb, group); 1864 if (ret) 1865 return 0; 1866 } 1867 1868 free = grp->bb_free; 1869 fragments = grp->bb_fragments; 1870 if (free == 0) 1871 return 0; 1872 if (fragments == 0) 1873 return 0; 1874 1875 switch (cr) { 1876 case 0: 1877 BUG_ON(ac->ac_2order == 0); 1878 1879 if (grp->bb_largest_free_order < ac->ac_2order) 1880 return 0; 1881 1882 /* Avoid using the first bg of a flexgroup for data files */ 1883 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 1884 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 1885 ((group % flex_size) == 0)) 1886 return 0; 1887 1888 return 1; 1889 case 1: 1890 if ((free / fragments) >= ac->ac_g_ex.fe_len) 1891 return 1; 1892 break; 1893 case 2: 1894 if (free >= ac->ac_g_ex.fe_len) 1895 return 1; 1896 break; 1897 case 3: 1898 return 1; 1899 default: 1900 BUG(); 1901 } 1902 1903 return 0; 1904} 1905 1906/* 1907 * lock the group_info alloc_sem of all the groups 1908 * belonging to the same buddy cache page. This 1909 * make sure other parallel operation on the buddy 1910 * cache doesn't happen whild holding the buddy cache 1911 * lock 1912 */ 1913int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group) 1914{ 1915 int i; 1916 int block, pnum; 1917 int blocks_per_page; 1918 int groups_per_page; 1919 ext4_group_t ngroups = ext4_get_groups_count(sb); 1920 ext4_group_t first_group; 1921 struct ext4_group_info *grp; 1922 1923 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1924 /* 1925 * the buddy cache inode stores the block bitmap 1926 * and buddy information in consecutive blocks. 1927 * So for each group we need two blocks. 1928 */ 1929 block = group * 2; 1930 pnum = block / blocks_per_page; 1931 first_group = pnum * blocks_per_page / 2; 1932 1933 groups_per_page = blocks_per_page >> 1; 1934 if (groups_per_page == 0) 1935 groups_per_page = 1; 1936 /* read all groups the page covers into the cache */ 1937 for (i = 0; i < groups_per_page; i++) { 1938 1939 if ((first_group + i) >= ngroups) 1940 break; 1941 grp = ext4_get_group_info(sb, first_group + i); 1942 /* take all groups write allocation 1943 * semaphore. This make sure there is 1944 * no block allocation going on in any 1945 * of that groups 1946 */ 1947 down_write_nested(&grp->alloc_sem, i); 1948 } 1949 return i; 1950} 1951 1952void ext4_mb_put_buddy_cache_lock(struct super_block *sb, 1953 ext4_group_t group, int locked_group) 1954{ 1955 int i; 1956 int block, pnum; 1957 int blocks_per_page; 1958 ext4_group_t first_group; 1959 struct ext4_group_info *grp; 1960 1961 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1962 /* 1963 * the buddy cache inode stores the block bitmap 1964 * and buddy information in consecutive blocks. 1965 * So for each group we need two blocks. 1966 */ 1967 block = group * 2; 1968 pnum = block / blocks_per_page; 1969 first_group = pnum * blocks_per_page / 2; 1970 /* release locks on all the groups */ 1971 for (i = 0; i < locked_group; i++) { 1972 1973 grp = ext4_get_group_info(sb, first_group + i); 1974 /* take all groups write allocation 1975 * semaphore. This make sure there is 1976 * no block allocation going on in any 1977 * of that groups 1978 */ 1979 up_write(&grp->alloc_sem); 1980 } 1981 1982} 1983 1984static noinline_for_stack int 1985ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 1986{ 1987 ext4_group_t ngroups, group, i; 1988 int cr; 1989 int err = 0; 1990 struct ext4_sb_info *sbi; 1991 struct super_block *sb; 1992 struct ext4_buddy e4b; 1993 1994 sb = ac->ac_sb; 1995 sbi = EXT4_SB(sb); 1996 ngroups = ext4_get_groups_count(sb); 1997 /* non-extent files are limited to low blocks/groups */ 1998 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 1999 ngroups = sbi->s_blockfile_groups; 2000 2001 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2002 2003 /* first, try the goal */ 2004 err = ext4_mb_find_by_goal(ac, &e4b); 2005 if (err || ac->ac_status == AC_STATUS_FOUND) 2006 goto out; 2007 2008 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2009 goto out; 2010 2011 /* 2012 * ac->ac2_order is set only if the fe_len is a power of 2 2013 * if ac2_order is set we also set criteria to 0 so that we 2014 * try exact allocation using buddy. 2015 */ 2016 i = fls(ac->ac_g_ex.fe_len); 2017 ac->ac_2order = 0; 2018 /* 2019 * We search using buddy data only if the order of the request 2020 * is greater than equal to the sbi_s_mb_order2_reqs 2021 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2022 */ 2023 if (i >= sbi->s_mb_order2_reqs) { 2024 /* 2025 * This should tell if fe_len is exactly power of 2 2026 */ 2027 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2028 ac->ac_2order = i - 1; 2029 } 2030 2031 /* if stream allocation is enabled, use global goal */ 2032 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2033 /* TBD: may be hot point */ 2034 spin_lock(&sbi->s_md_lock); 2035 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2036 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2037 spin_unlock(&sbi->s_md_lock); 2038 } 2039 2040 /* Let's just scan groups to find more-less suitable blocks */ 2041 cr = ac->ac_2order ? 0 : 1; 2042 /* 2043 * cr == 0 try to get exact allocation, 2044 * cr == 3 try to get anything 2045 */ 2046repeat: 2047 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2048 ac->ac_criteria = cr; 2049 /* 2050 * searching for the right group start 2051 * from the goal value specified 2052 */ 2053 group = ac->ac_g_ex.fe_group; 2054 2055 for (i = 0; i < ngroups; group++, i++) { 2056 if (group == ngroups) 2057 group = 0; 2058 2059 /* This now checks without needing the buddy page */ 2060 if (!ext4_mb_good_group(ac, group, cr)) 2061 continue; 2062 2063 err = ext4_mb_load_buddy(sb, group, &e4b); 2064 if (err) 2065 goto out; 2066 2067 ext4_lock_group(sb, group); 2068 2069 /* 2070 * We need to check again after locking the 2071 * block group 2072 */ 2073 if (!ext4_mb_good_group(ac, group, cr)) { 2074 ext4_unlock_group(sb, group); 2075 ext4_mb_unload_buddy(&e4b); 2076 continue; 2077 } 2078 2079 ac->ac_groups_scanned++; 2080 if (cr == 0) 2081 ext4_mb_simple_scan_group(ac, &e4b); 2082 else if (cr == 1 && sbi->s_stripe && 2083 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2084 ext4_mb_scan_aligned(ac, &e4b); 2085 else 2086 ext4_mb_complex_scan_group(ac, &e4b); 2087 2088 ext4_unlock_group(sb, group); 2089 ext4_mb_unload_buddy(&e4b); 2090 2091 if (ac->ac_status != AC_STATUS_CONTINUE) 2092 break; 2093 } 2094 } 2095 2096 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2097 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2098 /* 2099 * We've been searching too long. Let's try to allocate 2100 * the best chunk we've found so far 2101 */ 2102 2103 ext4_mb_try_best_found(ac, &e4b); 2104 if (ac->ac_status != AC_STATUS_FOUND) { 2105 /* 2106 * Someone more lucky has already allocated it. 2107 * The only thing we can do is just take first 2108 * found block(s) 2109 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); 2110 */ 2111 ac->ac_b_ex.fe_group = 0; 2112 ac->ac_b_ex.fe_start = 0; 2113 ac->ac_b_ex.fe_len = 0; 2114 ac->ac_status = AC_STATUS_CONTINUE; 2115 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2116 cr = 3; 2117 atomic_inc(&sbi->s_mb_lost_chunks); 2118 goto repeat; 2119 } 2120 } 2121out: 2122 return err; 2123} 2124 2125static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2126{ 2127 struct super_block *sb = seq->private; 2128 ext4_group_t group; 2129 2130 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2131 return NULL; 2132 group = *pos + 1; 2133 return (void *) ((unsigned long) group); 2134} 2135 2136static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2137{ 2138 struct super_block *sb = seq->private; 2139 ext4_group_t group; 2140 2141 ++*pos; 2142 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2143 return NULL; 2144 group = *pos + 1; 2145 return (void *) ((unsigned long) group); 2146} 2147 2148static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2149{ 2150 struct super_block *sb = seq->private; 2151 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2152 int i; 2153 int err; 2154 struct ext4_buddy e4b; 2155 struct sg { 2156 struct ext4_group_info info; 2157 ext4_grpblk_t counters[16]; 2158 } sg; 2159 2160 group--; 2161 if (group == 0) 2162 seq_printf(seq, "#%-5s: %-5s %-5s %-5s " 2163 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " 2164 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", 2165 "group", "free", "frags", "first", 2166 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", 2167 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); 2168 2169 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2170 sizeof(struct ext4_group_info); 2171 err = ext4_mb_load_buddy(sb, group, &e4b); 2172 if (err) { 2173 seq_printf(seq, "#%-5u: I/O error\n", group); 2174 return 0; 2175 } 2176 ext4_lock_group(sb, group); 2177 memcpy(&sg, ext4_get_group_info(sb, group), i); 2178 ext4_unlock_group(sb, group); 2179 ext4_mb_unload_buddy(&e4b); 2180 2181 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2182 sg.info.bb_fragments, sg.info.bb_first_free); 2183 for (i = 0; i <= 13; i++) 2184 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2185 sg.info.bb_counters[i] : 0); 2186 seq_printf(seq, " ]\n"); 2187 2188 return 0; 2189} 2190 2191static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2192{ 2193} 2194 2195static const struct seq_operations ext4_mb_seq_groups_ops = { 2196 .start = ext4_mb_seq_groups_start, 2197 .next = ext4_mb_seq_groups_next, 2198 .stop = ext4_mb_seq_groups_stop, 2199 .show = ext4_mb_seq_groups_show, 2200}; 2201 2202static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) 2203{ 2204 struct super_block *sb = PDE(inode)->data; 2205 int rc; 2206 2207 rc = seq_open(file, &ext4_mb_seq_groups_ops); 2208 if (rc == 0) { 2209 struct seq_file *m = file->private_data; 2210 m->private = sb; 2211 } 2212 return rc; 2213 2214} 2215 2216static const struct file_operations ext4_mb_seq_groups_fops = { 2217 .owner = THIS_MODULE, 2218 .open = ext4_mb_seq_groups_open, 2219 .read = seq_read, 2220 .llseek = seq_lseek, 2221 .release = seq_release, 2222}; 2223 2224 2225/* Create and initialize ext4_group_info data for the given group. */ 2226int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2227 struct ext4_group_desc *desc) 2228{ 2229 int i, len; 2230 int metalen = 0; 2231 struct ext4_sb_info *sbi = EXT4_SB(sb); 2232 struct ext4_group_info **meta_group_info; 2233 2234 /* 2235 * First check if this group is the first of a reserved block. 2236 * If it's true, we have to allocate a new table of pointers 2237 * to ext4_group_info structures 2238 */ 2239 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2240 metalen = sizeof(*meta_group_info) << 2241 EXT4_DESC_PER_BLOCK_BITS(sb); 2242 meta_group_info = kmalloc(metalen, GFP_KERNEL); 2243 if (meta_group_info == NULL) { 2244 printk(KERN_ERR "EXT4-fs: can't allocate mem for a " 2245 "buddy group\n"); 2246 goto exit_meta_group_info; 2247 } 2248 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = 2249 meta_group_info; 2250 } 2251 2252 /* 2253 * calculate needed size. if change bb_counters size, 2254 * don't forget about ext4_mb_generate_buddy() 2255 */ 2256 len = offsetof(typeof(**meta_group_info), 2257 bb_counters[sb->s_blocksize_bits + 2]); 2258 2259 meta_group_info = 2260 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2261 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2262 2263 meta_group_info[i] = kzalloc(len, GFP_KERNEL); 2264 if (meta_group_info[i] == NULL) { 2265 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n"); 2266 goto exit_group_info; 2267 } 2268 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2269 &(meta_group_info[i]->bb_state)); 2270 2271 /* 2272 * initialize bb_free to be able to skip 2273 * empty groups without initialization 2274 */ 2275 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2276 meta_group_info[i]->bb_free = 2277 ext4_free_blocks_after_init(sb, group, desc); 2278 } else { 2279 meta_group_info[i]->bb_free = 2280 ext4_free_blks_count(sb, desc); 2281 } 2282 2283 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2284 init_rwsem(&meta_group_info[i]->alloc_sem); 2285 meta_group_info[i]->bb_free_root = RB_ROOT; 2286 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2287 2288#ifdef DOUBLE_CHECK 2289 { 2290 struct buffer_head *bh; 2291 meta_group_info[i]->bb_bitmap = 2292 kmalloc(sb->s_blocksize, GFP_KERNEL); 2293 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2294 bh = ext4_read_block_bitmap(sb, group); 2295 BUG_ON(bh == NULL); 2296 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, 2297 sb->s_blocksize); 2298 put_bh(bh); 2299 } 2300#endif 2301 2302 return 0; 2303 2304exit_group_info: 2305 /* If a meta_group_info table has been allocated, release it now */ 2306 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) 2307 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); 2308exit_meta_group_info: 2309 return -ENOMEM; 2310} /* ext4_mb_add_groupinfo */ 2311 2312static int ext4_mb_init_backend(struct super_block *sb) 2313{ 2314 ext4_group_t ngroups = ext4_get_groups_count(sb); 2315 ext4_group_t i; 2316 struct ext4_sb_info *sbi = EXT4_SB(sb); 2317 struct ext4_super_block *es = sbi->s_es; 2318 int num_meta_group_infos; 2319 int num_meta_group_infos_max; 2320 int array_size; 2321 struct ext4_group_desc *desc; 2322 2323 /* This is the number of blocks used by GDT */ 2324 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 2325 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); 2326 2327 /* 2328 * This is the total number of blocks used by GDT including 2329 * the number of reserved blocks for GDT. 2330 * The s_group_info array is allocated with this value 2331 * to allow a clean online resize without a complex 2332 * manipulation of pointer. 2333 * The drawback is the unused memory when no resize 2334 * occurs but it's very low in terms of pages 2335 * (see comments below) 2336 * Need to handle this properly when META_BG resizing is allowed 2337 */ 2338 num_meta_group_infos_max = num_meta_group_infos + 2339 le16_to_cpu(es->s_reserved_gdt_blocks); 2340 2341 /* 2342 * array_size is the size of s_group_info array. We round it 2343 * to the next power of two because this approximation is done 2344 * internally by kmalloc so we can have some more memory 2345 * for free here (e.g. may be used for META_BG resize). 2346 */ 2347 array_size = 1; 2348 while (array_size < sizeof(*sbi->s_group_info) * 2349 num_meta_group_infos_max) 2350 array_size = array_size << 1; 2351 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte 2352 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem. 2353 * So a two level scheme suffices for now. */ 2354 sbi->s_group_info = kmalloc(array_size, GFP_KERNEL); 2355 if (sbi->s_group_info == NULL) { 2356 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n"); 2357 return -ENOMEM; 2358 } 2359 sbi->s_buddy_cache = new_inode(sb); 2360 if (sbi->s_buddy_cache == NULL) { 2361 printk(KERN_ERR "EXT4-fs: can't get new inode\n"); 2362 goto err_freesgi; 2363 } 2364 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2365 for (i = 0; i < ngroups; i++) { 2366 desc = ext4_get_group_desc(sb, i, NULL); 2367 if (desc == NULL) { 2368 printk(KERN_ERR 2369 "EXT4-fs: can't read descriptor %u\n", i); 2370 goto err_freebuddy; 2371 } 2372 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2373 goto err_freebuddy; 2374 } 2375 2376 return 0; 2377 2378err_freebuddy: 2379 while (i-- > 0) 2380 kfree(ext4_get_group_info(sb, i)); 2381 i = num_meta_group_infos; 2382 while (i-- > 0) 2383 kfree(sbi->s_group_info[i]); 2384 iput(sbi->s_buddy_cache); 2385err_freesgi: 2386 kfree(sbi->s_group_info); 2387 return -ENOMEM; 2388} 2389 2390int ext4_mb_init(struct super_block *sb, int needs_recovery) 2391{ 2392 struct ext4_sb_info *sbi = EXT4_SB(sb); 2393 unsigned i, j; 2394 unsigned offset; 2395 unsigned max; 2396 int ret; 2397 2398 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2399 2400 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2401 if (sbi->s_mb_offsets == NULL) { 2402 return -ENOMEM; 2403 } 2404 2405 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2406 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2407 if (sbi->s_mb_maxs == NULL) { 2408 kfree(sbi->s_mb_offsets); 2409 return -ENOMEM; 2410 } 2411 2412 /* order 0 is regular bitmap */ 2413 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2414 sbi->s_mb_offsets[0] = 0; 2415 2416 i = 1; 2417 offset = 0; 2418 max = sb->s_blocksize << 2; 2419 do { 2420 sbi->s_mb_offsets[i] = offset; 2421 sbi->s_mb_maxs[i] = max; 2422 offset += 1 << (sb->s_blocksize_bits - i); 2423 max = max >> 1; 2424 i++; 2425 } while (i <= sb->s_blocksize_bits + 1); 2426 2427 /* init file for buddy data */ 2428 ret = ext4_mb_init_backend(sb); 2429 if (ret != 0) { 2430 kfree(sbi->s_mb_offsets); 2431 kfree(sbi->s_mb_maxs); 2432 return ret; 2433 } 2434 2435 spin_lock_init(&sbi->s_md_lock); 2436 spin_lock_init(&sbi->s_bal_lock); 2437 2438 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2439 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2440 sbi->s_mb_stats = MB_DEFAULT_STATS; 2441 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2442 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2443 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC; 2444 2445 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2446 if (sbi->s_locality_groups == NULL) { 2447 kfree(sbi->s_mb_offsets); 2448 kfree(sbi->s_mb_maxs); 2449 return -ENOMEM; 2450 } 2451 for_each_possible_cpu(i) { 2452 struct ext4_locality_group *lg; 2453 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2454 mutex_init(&lg->lg_mutex); 2455 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2456 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2457 spin_lock_init(&lg->lg_prealloc_lock); 2458 } 2459 2460 if (sbi->s_proc) 2461 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, 2462 &ext4_mb_seq_groups_fops, sb); 2463 2464 if (sbi->s_journal) 2465 sbi->s_journal->j_commit_callback = release_blocks_on_commit; 2466 return 0; 2467} 2468 2469/* need to called with the ext4 group lock held */ 2470static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2471{ 2472 struct ext4_prealloc_space *pa; 2473 struct list_head *cur, *tmp; 2474 int count = 0; 2475 2476 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2477 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2478 list_del(&pa->pa_group_list); 2479 count++; 2480 kmem_cache_free(ext4_pspace_cachep, pa); 2481 } 2482 if (count) 2483 mb_debug(1, "mballoc: %u PAs left\n", count); 2484 2485} 2486 2487int ext4_mb_release(struct super_block *sb) 2488{ 2489 ext4_group_t ngroups = ext4_get_groups_count(sb); 2490 ext4_group_t i; 2491 int num_meta_group_infos; 2492 struct ext4_group_info *grinfo; 2493 struct ext4_sb_info *sbi = EXT4_SB(sb); 2494 2495 if (sbi->s_group_info) { 2496 for (i = 0; i < ngroups; i++) { 2497 grinfo = ext4_get_group_info(sb, i); 2498#ifdef DOUBLE_CHECK 2499 kfree(grinfo->bb_bitmap); 2500#endif 2501 ext4_lock_group(sb, i); 2502 ext4_mb_cleanup_pa(grinfo); 2503 ext4_unlock_group(sb, i); 2504 kfree(grinfo); 2505 } 2506 num_meta_group_infos = (ngroups + 2507 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2508 EXT4_DESC_PER_BLOCK_BITS(sb); 2509 for (i = 0; i < num_meta_group_infos; i++) 2510 kfree(sbi->s_group_info[i]); 2511 kfree(sbi->s_group_info); 2512 } 2513 kfree(sbi->s_mb_offsets); 2514 kfree(sbi->s_mb_maxs); 2515 if (sbi->s_buddy_cache) 2516 iput(sbi->s_buddy_cache); 2517 if (sbi->s_mb_stats) { 2518 printk(KERN_INFO 2519 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n", 2520 atomic_read(&sbi->s_bal_allocated), 2521 atomic_read(&sbi->s_bal_reqs), 2522 atomic_read(&sbi->s_bal_success)); 2523 printk(KERN_INFO 2524 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, " 2525 "%u 2^N hits, %u breaks, %u lost\n", 2526 atomic_read(&sbi->s_bal_ex_scanned), 2527 atomic_read(&sbi->s_bal_goals), 2528 atomic_read(&sbi->s_bal_2orders), 2529 atomic_read(&sbi->s_bal_breaks), 2530 atomic_read(&sbi->s_mb_lost_chunks)); 2531 printk(KERN_INFO 2532 "EXT4-fs: mballoc: %lu generated and it took %Lu\n", 2533 sbi->s_mb_buddies_generated++, 2534 sbi->s_mb_generation_time); 2535 printk(KERN_INFO 2536 "EXT4-fs: mballoc: %u preallocated, %u discarded\n", 2537 atomic_read(&sbi->s_mb_preallocated), 2538 atomic_read(&sbi->s_mb_discarded)); 2539 } 2540 2541 free_percpu(sbi->s_locality_groups); 2542 if (sbi->s_proc) 2543 remove_proc_entry("mb_groups", sbi->s_proc); 2544 2545 return 0; 2546} 2547 2548static inline void ext4_issue_discard(struct super_block *sb, 2549 ext4_group_t block_group, ext4_grpblk_t block, int count) 2550{ 2551 int ret; 2552 ext4_fsblk_t discard_block; 2553 2554 discard_block = block + ext4_group_first_block_no(sb, block_group); 2555 trace_ext4_discard_blocks(sb, 2556 (unsigned long long) discard_block, count); 2557 ret = sb_issue_discard(sb, discard_block, count); 2558 if (ret == EOPNOTSUPP) { 2559 ext4_warning(sb, "discard not supported, disabling"); 2560 clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD); 2561 } 2562} 2563 2564/* 2565 * This function is called by the jbd2 layer once the commit has finished, 2566 * so we know we can free the blocks that were released with that commit. 2567 */ 2568static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) 2569{ 2570 struct super_block *sb = journal->j_private; 2571 struct ext4_buddy e4b; 2572 struct ext4_group_info *db; 2573 int err, count = 0, count2 = 0; 2574 struct ext4_free_data *entry; 2575 struct list_head *l, *ltmp; 2576 2577 list_for_each_safe(l, ltmp, &txn->t_private_list) { 2578 entry = list_entry(l, struct ext4_free_data, list); 2579 2580 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2581 entry->count, entry->group, entry); 2582 2583 if (test_opt(sb, DISCARD)) 2584 ext4_issue_discard(sb, entry->group, 2585 entry->start_blk, entry->count); 2586 2587 err = ext4_mb_load_buddy(sb, entry->group, &e4b); 2588 /* we expect to find existing buddy because it's pinned */ 2589 BUG_ON(err != 0); 2590 2591 db = e4b.bd_info; 2592 /* there are blocks to put in buddy to make them really free */ 2593 count += entry->count; 2594 count2++; 2595 ext4_lock_group(sb, entry->group); 2596 /* Take it out of per group rb tree */ 2597 rb_erase(&entry->node, &(db->bb_free_root)); 2598 mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count); 2599 2600 if (!db->bb_free_root.rb_node) { 2601 /* No more items in the per group rb tree 2602 * balance refcounts from ext4_mb_free_metadata() 2603 */ 2604 page_cache_release(e4b.bd_buddy_page); 2605 page_cache_release(e4b.bd_bitmap_page); 2606 } 2607 ext4_unlock_group(sb, entry->group); 2608 kmem_cache_free(ext4_free_ext_cachep, entry); 2609 ext4_mb_unload_buddy(&e4b); 2610 } 2611 2612 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2613} 2614 2615#ifdef CONFIG_EXT4_DEBUG 2616u8 mb_enable_debug __read_mostly; 2617 2618static struct dentry *debugfs_dir; 2619static struct dentry *debugfs_debug; 2620 2621static void __init ext4_create_debugfs_entry(void) 2622{ 2623 debugfs_dir = debugfs_create_dir("ext4", NULL); 2624 if (debugfs_dir) 2625 debugfs_debug = debugfs_create_u8("mballoc-debug", 2626 S_IRUGO | S_IWUSR, 2627 debugfs_dir, 2628 &mb_enable_debug); 2629} 2630 2631static void ext4_remove_debugfs_entry(void) 2632{ 2633 debugfs_remove(debugfs_debug); 2634 debugfs_remove(debugfs_dir); 2635} 2636 2637#else 2638 2639static void __init ext4_create_debugfs_entry(void) 2640{ 2641} 2642 2643static void ext4_remove_debugfs_entry(void) 2644{ 2645} 2646 2647#endif 2648 2649int __init init_ext4_mballoc(void) 2650{ 2651 ext4_pspace_cachep = 2652 kmem_cache_create("ext4_prealloc_space", 2653 sizeof(struct ext4_prealloc_space), 2654 0, SLAB_RECLAIM_ACCOUNT, NULL); 2655 if (ext4_pspace_cachep == NULL) 2656 return -ENOMEM; 2657 2658 ext4_ac_cachep = 2659 kmem_cache_create("ext4_alloc_context", 2660 sizeof(struct ext4_allocation_context), 2661 0, SLAB_RECLAIM_ACCOUNT, NULL); 2662 if (ext4_ac_cachep == NULL) { 2663 kmem_cache_destroy(ext4_pspace_cachep); 2664 return -ENOMEM; 2665 } 2666 2667 ext4_free_ext_cachep = 2668 kmem_cache_create("ext4_free_block_extents", 2669 sizeof(struct ext4_free_data), 2670 0, SLAB_RECLAIM_ACCOUNT, NULL); 2671 if (ext4_free_ext_cachep == NULL) { 2672 kmem_cache_destroy(ext4_pspace_cachep); 2673 kmem_cache_destroy(ext4_ac_cachep); 2674 return -ENOMEM; 2675 } 2676 ext4_create_debugfs_entry(); 2677 return 0; 2678} 2679 2680void exit_ext4_mballoc(void) 2681{ 2682 /* 2683 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2684 * before destroying the slab cache. 2685 */ 2686 rcu_barrier(); 2687 kmem_cache_destroy(ext4_pspace_cachep); 2688 kmem_cache_destroy(ext4_ac_cachep); 2689 kmem_cache_destroy(ext4_free_ext_cachep); 2690 ext4_remove_debugfs_entry(); 2691} 2692 2693 2694/* 2695 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 2696 * Returns 0 if success or error code 2697 */ 2698static noinline_for_stack int 2699ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2700 handle_t *handle, unsigned int reserv_blks) 2701{ 2702 struct buffer_head *bitmap_bh = NULL; 2703 struct ext4_group_desc *gdp; 2704 struct buffer_head *gdp_bh; 2705 struct ext4_sb_info *sbi; 2706 struct super_block *sb; 2707 ext4_fsblk_t block; 2708 int err, len; 2709 2710 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 2711 BUG_ON(ac->ac_b_ex.fe_len <= 0); 2712 2713 sb = ac->ac_sb; 2714 sbi = EXT4_SB(sb); 2715 2716 err = -EIO; 2717 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 2718 if (!bitmap_bh) 2719 goto out_err; 2720 2721 err = ext4_journal_get_write_access(handle, bitmap_bh); 2722 if (err) 2723 goto out_err; 2724 2725 err = -EIO; 2726 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 2727 if (!gdp) 2728 goto out_err; 2729 2730 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 2731 ext4_free_blks_count(sb, gdp)); 2732 2733 err = ext4_journal_get_write_access(handle, gdp_bh); 2734 if (err) 2735 goto out_err; 2736 2737 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 2738 2739 len = ac->ac_b_ex.fe_len; 2740 if (!ext4_data_block_valid(sbi, block, len)) { 2741 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2742 "fs metadata\n", block, block+len); 2743 /* File system mounted not to panic on error 2744 * Fix the bitmap and repeat the block allocation 2745 * We leak some of the blocks here. 2746 */ 2747 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2748 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2749 ac->ac_b_ex.fe_len); 2750 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2751 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2752 if (!err) 2753 err = -EAGAIN; 2754 goto out_err; 2755 } 2756 2757 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2758#ifdef AGGRESSIVE_CHECK 2759 { 2760 int i; 2761 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 2762 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 2763 bitmap_bh->b_data)); 2764 } 2765 } 2766#endif 2767 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len); 2768 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2769 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2770 ext4_free_blks_set(sb, gdp, 2771 ext4_free_blocks_after_init(sb, 2772 ac->ac_b_ex.fe_group, gdp)); 2773 } 2774 len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; 2775 ext4_free_blks_set(sb, gdp, len); 2776 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); 2777 2778 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2779 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); 2780 /* 2781 * Now reduce the dirty block count also. Should not go negative 2782 */ 2783 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 2784 /* release all the reserved blocks if non delalloc */ 2785 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); 2786 2787 if (sbi->s_log_groups_per_flex) { 2788 ext4_group_t flex_group = ext4_flex_group(sbi, 2789 ac->ac_b_ex.fe_group); 2790 atomic_sub(ac->ac_b_ex.fe_len, 2791 &sbi->s_flex_groups[flex_group].free_blocks); 2792 } 2793 2794 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2795 if (err) 2796 goto out_err; 2797 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 2798 2799out_err: 2800 ext4_mark_super_dirty(sb); 2801 brelse(bitmap_bh); 2802 return err; 2803} 2804 2805static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 2806{ 2807 struct super_block *sb = ac->ac_sb; 2808 struct ext4_locality_group *lg = ac->ac_lg; 2809 2810 BUG_ON(lg == NULL); 2811 if (EXT4_SB(sb)->s_stripe) 2812 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe; 2813 else 2814 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 2815 mb_debug(1, "#%u: goal %u blocks for locality group\n", 2816 current->pid, ac->ac_g_ex.fe_len); 2817} 2818 2819/* 2820 * Normalization means making request better in terms of 2821 * size and alignment 2822 */ 2823static noinline_for_stack void 2824ext4_mb_normalize_request(struct ext4_allocation_context *ac, 2825 struct ext4_allocation_request *ar) 2826{ 2827 int bsbits, max; 2828 ext4_lblk_t end; 2829 loff_t size, orig_size, start_off; 2830 ext4_lblk_t start; 2831 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 2832 struct ext4_prealloc_space *pa; 2833 2834 /* do normalize only data requests, metadata requests 2835 do not need preallocation */ 2836 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 2837 return; 2838 2839 /* sometime caller may want exact blocks */ 2840 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2841 return; 2842 2843 /* caller may indicate that preallocation isn't 2844 * required (it's a tail, for example) */ 2845 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 2846 return; 2847 2848 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 2849 ext4_mb_normalize_group_request(ac); 2850 return ; 2851 } 2852 2853 bsbits = ac->ac_sb->s_blocksize_bits; 2854 2855 /* first, let's learn actual file size 2856 * given current request is allocated */ 2857 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; 2858 size = size << bsbits; 2859 if (size < i_size_read(ac->ac_inode)) 2860 size = i_size_read(ac->ac_inode); 2861 orig_size = size; 2862 2863 /* max size of free chunks */ 2864 max = 2 << bsbits; 2865 2866#define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 2867 (req <= (size) || max <= (chunk_size)) 2868 2869 /* first, try to predict filesize */ 2870 start_off = 0; 2871 if (size <= 16 * 1024) { 2872 size = 16 * 1024; 2873 } else if (size <= 32 * 1024) { 2874 size = 32 * 1024; 2875 } else if (size <= 64 * 1024) { 2876 size = 64 * 1024; 2877 } else if (size <= 128 * 1024) { 2878 size = 128 * 1024; 2879 } else if (size <= 256 * 1024) { 2880 size = 256 * 1024; 2881 } else if (size <= 512 * 1024) { 2882 size = 512 * 1024; 2883 } else if (size <= 1024 * 1024) { 2884 size = 1024 * 1024; 2885 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 2886 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 2887 (21 - bsbits)) << 21; 2888 size = 2 * 1024 * 1024; 2889 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 2890 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 2891 (22 - bsbits)) << 22; 2892 size = 4 * 1024 * 1024; 2893 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 2894 (8<<20)>>bsbits, max, 8 * 1024)) { 2895 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 2896 (23 - bsbits)) << 23; 2897 size = 8 * 1024 * 1024; 2898 } else { 2899 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; 2900 size = ac->ac_o_ex.fe_len << bsbits; 2901 } 2902 size = size >> bsbits; 2903 start = start_off >> bsbits; 2904 2905 /* don't cover already allocated blocks in selected range */ 2906 if (ar->pleft && start <= ar->lleft) { 2907 size -= ar->lleft + 1 - start; 2908 start = ar->lleft + 1; 2909 } 2910 if (ar->pright && start + size - 1 >= ar->lright) 2911 size -= start + size - ar->lright; 2912 2913 end = start + size; 2914 2915 /* check we don't cross already preallocated blocks */ 2916 rcu_read_lock(); 2917 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 2918 ext4_lblk_t pa_end; 2919 2920 if (pa->pa_deleted) 2921 continue; 2922 spin_lock(&pa->pa_lock); 2923 if (pa->pa_deleted) { 2924 spin_unlock(&pa->pa_lock); 2925 continue; 2926 } 2927 2928 pa_end = pa->pa_lstart + pa->pa_len; 2929 2930 /* PA must not overlap original request */ 2931 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 2932 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 2933 2934 /* skip PAs this normalized request doesn't overlap with */ 2935 if (pa->pa_lstart >= end || pa_end <= start) { 2936 spin_unlock(&pa->pa_lock); 2937 continue; 2938 } 2939 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 2940 2941 /* adjust start or end to be adjacent to this pa */ 2942 if (pa_end <= ac->ac_o_ex.fe_logical) { 2943 BUG_ON(pa_end < start); 2944 start = pa_end; 2945 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 2946 BUG_ON(pa->pa_lstart > end); 2947 end = pa->pa_lstart; 2948 } 2949 spin_unlock(&pa->pa_lock); 2950 } 2951 rcu_read_unlock(); 2952 size = end - start; 2953 2954 rcu_read_lock(); 2955 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 2956 ext4_lblk_t pa_end; 2957 spin_lock(&pa->pa_lock); 2958 if (pa->pa_deleted == 0) { 2959 pa_end = pa->pa_lstart + pa->pa_len; 2960 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 2961 } 2962 spin_unlock(&pa->pa_lock); 2963 } 2964 rcu_read_unlock(); 2965 2966 if (start + size <= ac->ac_o_ex.fe_logical && 2967 start > ac->ac_o_ex.fe_logical) { 2968 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n", 2969 (unsigned long) start, (unsigned long) size, 2970 (unsigned long) ac->ac_o_ex.fe_logical); 2971 } 2972 BUG_ON(start + size <= ac->ac_o_ex.fe_logical && 2973 start > ac->ac_o_ex.fe_logical); 2974 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 2975 2976 /* now prepare goal request */ 2977 2978 ac->ac_g_ex.fe_logical = start; 2979 ac->ac_g_ex.fe_len = size; 2980 2981 /* define goal start in order to merge */ 2982 if (ar->pright && (ar->lright == (start + size))) { 2983 /* merge to the right */ 2984 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 2985 &ac->ac_f_ex.fe_group, 2986 &ac->ac_f_ex.fe_start); 2987 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 2988 } 2989 if (ar->pleft && (ar->lleft + 1 == start)) { 2990 /* merge to the left */ 2991 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 2992 &ac->ac_f_ex.fe_group, 2993 &ac->ac_f_ex.fe_start); 2994 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 2995 } 2996 2997 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, 2998 (unsigned) orig_size, (unsigned) start); 2999} 3000 3001static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3002{ 3003 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3004 3005 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3006 atomic_inc(&sbi->s_bal_reqs); 3007 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3008 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3009 atomic_inc(&sbi->s_bal_success); 3010 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3011 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3012 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3013 atomic_inc(&sbi->s_bal_goals); 3014 if (ac->ac_found > sbi->s_mb_max_to_scan) 3015 atomic_inc(&sbi->s_bal_breaks); 3016 } 3017 3018 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3019 trace_ext4_mballoc_alloc(ac); 3020 else 3021 trace_ext4_mballoc_prealloc(ac); 3022} 3023 3024/* 3025 * Called on failure; free up any blocks from the inode PA for this 3026 * context. We don't need this for MB_GROUP_PA because we only change 3027 * pa_free in ext4_mb_release_context(), but on failure, we've already 3028 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3029 */ 3030static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3031{ 3032 struct ext4_prealloc_space *pa = ac->ac_pa; 3033 int len; 3034 3035 if (pa && pa->pa_type == MB_INODE_PA) { 3036 len = ac->ac_b_ex.fe_len; 3037 pa->pa_free += len; 3038 } 3039 3040} 3041 3042/* 3043 * use blocks preallocated to inode 3044 */ 3045static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3046 struct ext4_prealloc_space *pa) 3047{ 3048 ext4_fsblk_t start; 3049 ext4_fsblk_t end; 3050 int len; 3051 3052 /* found preallocated blocks, use them */ 3053 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3054 end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len); 3055 len = end - start; 3056 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3057 &ac->ac_b_ex.fe_start); 3058 ac->ac_b_ex.fe_len = len; 3059 ac->ac_status = AC_STATUS_FOUND; 3060 ac->ac_pa = pa; 3061 3062 BUG_ON(start < pa->pa_pstart); 3063 BUG_ON(start + len > pa->pa_pstart + pa->pa_len); 3064 BUG_ON(pa->pa_free < len); 3065 pa->pa_free -= len; 3066 3067 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); 3068} 3069 3070/* 3071 * use blocks preallocated to locality group 3072 */ 3073static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3074 struct ext4_prealloc_space *pa) 3075{ 3076 unsigned int len = ac->ac_o_ex.fe_len; 3077 3078 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3079 &ac->ac_b_ex.fe_group, 3080 &ac->ac_b_ex.fe_start); 3081 ac->ac_b_ex.fe_len = len; 3082 ac->ac_status = AC_STATUS_FOUND; 3083 ac->ac_pa = pa; 3084 3085 /* we don't correct pa_pstart or pa_plen here to avoid 3086 * possible race when the group is being loaded concurrently 3087 * instead we correct pa later, after blocks are marked 3088 * in on-disk bitmap -- see ext4_mb_release_context() 3089 * Other CPUs are prevented from allocating from this pa by lg_mutex 3090 */ 3091 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3092} 3093 3094/* 3095 * Return the prealloc space that have minimal distance 3096 * from the goal block. @cpa is the prealloc 3097 * space that is having currently known minimal distance 3098 * from the goal block. 3099 */ 3100static struct ext4_prealloc_space * 3101ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3102 struct ext4_prealloc_space *pa, 3103 struct ext4_prealloc_space *cpa) 3104{ 3105 ext4_fsblk_t cur_distance, new_distance; 3106 3107 if (cpa == NULL) { 3108 atomic_inc(&pa->pa_count); 3109 return pa; 3110 } 3111 cur_distance = abs(goal_block - cpa->pa_pstart); 3112 new_distance = abs(goal_block - pa->pa_pstart); 3113 3114 if (cur_distance < new_distance) 3115 return cpa; 3116 3117 /* drop the previous reference */ 3118 atomic_dec(&cpa->pa_count); 3119 atomic_inc(&pa->pa_count); 3120 return pa; 3121} 3122 3123/* 3124 * search goal blocks in preallocated space 3125 */ 3126static noinline_for_stack int 3127ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3128{ 3129 int order, i; 3130 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3131 struct ext4_locality_group *lg; 3132 struct ext4_prealloc_space *pa, *cpa = NULL; 3133 ext4_fsblk_t goal_block; 3134 3135 /* only data can be preallocated */ 3136 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3137 return 0; 3138 3139 /* first, try per-file preallocation */ 3140 rcu_read_lock(); 3141 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3142 3143 /* all fields in this condition don't change, 3144 * so we can skip locking for them */ 3145 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3146 ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len) 3147 continue; 3148 3149 /* non-extent files can't have physical blocks past 2^32 */ 3150 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3151 pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS) 3152 continue; 3153 3154 /* found preallocated blocks, use them */ 3155 spin_lock(&pa->pa_lock); 3156 if (pa->pa_deleted == 0 && pa->pa_free) { 3157 atomic_inc(&pa->pa_count); 3158 ext4_mb_use_inode_pa(ac, pa); 3159 spin_unlock(&pa->pa_lock); 3160 ac->ac_criteria = 10; 3161 rcu_read_unlock(); 3162 return 1; 3163 } 3164 spin_unlock(&pa->pa_lock); 3165 } 3166 rcu_read_unlock(); 3167 3168 /* can we use group allocation? */ 3169 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3170 return 0; 3171 3172 /* inode may have no locality group for some reason */ 3173 lg = ac->ac_lg; 3174 if (lg == NULL) 3175 return 0; 3176 order = fls(ac->ac_o_ex.fe_len) - 1; 3177 if (order > PREALLOC_TB_SIZE - 1) 3178 /* The max size of hash table is PREALLOC_TB_SIZE */ 3179 order = PREALLOC_TB_SIZE - 1; 3180 3181 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3182 /* 3183 * search for the prealloc space that is having 3184 * minimal distance from the goal block. 3185 */ 3186 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3187 rcu_read_lock(); 3188 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3189 pa_inode_list) { 3190 spin_lock(&pa->pa_lock); 3191 if (pa->pa_deleted == 0 && 3192 pa->pa_free >= ac->ac_o_ex.fe_len) { 3193 3194 cpa = ext4_mb_check_group_pa(goal_block, 3195 pa, cpa); 3196 } 3197 spin_unlock(&pa->pa_lock); 3198 } 3199 rcu_read_unlock(); 3200 } 3201 if (cpa) { 3202 ext4_mb_use_group_pa(ac, cpa); 3203 ac->ac_criteria = 20; 3204 return 1; 3205 } 3206 return 0; 3207} 3208 3209/* 3210 * the function goes through all block freed in the group 3211 * but not yet committed and marks them used in in-core bitmap. 3212 * buddy must be generated from this bitmap 3213 * Need to be called with the ext4 group lock held 3214 */ 3215static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3216 ext4_group_t group) 3217{ 3218 struct rb_node *n; 3219 struct ext4_group_info *grp; 3220 struct ext4_free_data *entry; 3221 3222 grp = ext4_get_group_info(sb, group); 3223 n = rb_first(&(grp->bb_free_root)); 3224 3225 while (n) { 3226 entry = rb_entry(n, struct ext4_free_data, node); 3227 mb_set_bits(bitmap, entry->start_blk, entry->count); 3228 n = rb_next(n); 3229 } 3230 return; 3231} 3232 3233/* 3234 * the function goes through all preallocation in this group and marks them 3235 * used in in-core bitmap. buddy must be generated from this bitmap 3236 * Need to be called with ext4 group lock held 3237 */ 3238static noinline_for_stack 3239void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3240 ext4_group_t group) 3241{ 3242 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3243 struct ext4_prealloc_space *pa; 3244 struct list_head *cur; 3245 ext4_group_t groupnr; 3246 ext4_grpblk_t start; 3247 int preallocated = 0; 3248 int count = 0; 3249 int len; 3250 3251 /* all form of preallocation discards first load group, 3252 * so the only competing code is preallocation use. 3253 * we don't need any locking here 3254 * notice we do NOT ignore preallocations with pa_deleted 3255 * otherwise we could leave used blocks available for 3256 * allocation in buddy when concurrent ext4_mb_put_pa() 3257 * is dropping preallocation 3258 */ 3259 list_for_each(cur, &grp->bb_prealloc_list) { 3260 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3261 spin_lock(&pa->pa_lock); 3262 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3263 &groupnr, &start); 3264 len = pa->pa_len; 3265 spin_unlock(&pa->pa_lock); 3266 if (unlikely(len == 0)) 3267 continue; 3268 BUG_ON(groupnr != group); 3269 mb_set_bits(bitmap, start, len); 3270 preallocated += len; 3271 count++; 3272 } 3273 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); 3274} 3275 3276static void ext4_mb_pa_callback(struct rcu_head *head) 3277{ 3278 struct ext4_prealloc_space *pa; 3279 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3280 kmem_cache_free(ext4_pspace_cachep, pa); 3281} 3282 3283/* 3284 * drops a reference to preallocated space descriptor 3285 * if this was the last reference and the space is consumed 3286 */ 3287static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3288 struct super_block *sb, struct ext4_prealloc_space *pa) 3289{ 3290 ext4_group_t grp; 3291 ext4_fsblk_t grp_blk; 3292 3293 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) 3294 return; 3295 3296 /* in this short window concurrent discard can set pa_deleted */ 3297 spin_lock(&pa->pa_lock); 3298 if (pa->pa_deleted == 1) { 3299 spin_unlock(&pa->pa_lock); 3300 return; 3301 } 3302 3303 pa->pa_deleted = 1; 3304 spin_unlock(&pa->pa_lock); 3305 3306 grp_blk = pa->pa_pstart; 3307 /* 3308 * If doing group-based preallocation, pa_pstart may be in the 3309 * next group when pa is used up 3310 */ 3311 if (pa->pa_type == MB_GROUP_PA) 3312 grp_blk--; 3313 3314 ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL); 3315 3316 /* 3317 * possible race: 3318 * 3319 * P1 (buddy init) P2 (regular allocation) 3320 * find block B in PA 3321 * copy on-disk bitmap to buddy 3322 * mark B in on-disk bitmap 3323 * drop PA from group 3324 * mark all PAs in buddy 3325 * 3326 * thus, P1 initializes buddy with B available. to prevent this 3327 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3328 * against that pair 3329 */ 3330 ext4_lock_group(sb, grp); 3331 list_del(&pa->pa_group_list); 3332 ext4_unlock_group(sb, grp); 3333 3334 spin_lock(pa->pa_obj_lock); 3335 list_del_rcu(&pa->pa_inode_list); 3336 spin_unlock(pa->pa_obj_lock); 3337 3338 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3339} 3340 3341/* 3342 * creates new preallocated space for given inode 3343 */ 3344static noinline_for_stack int 3345ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3346{ 3347 struct super_block *sb = ac->ac_sb; 3348 struct ext4_prealloc_space *pa; 3349 struct ext4_group_info *grp; 3350 struct ext4_inode_info *ei; 3351 3352 /* preallocate only when found space is larger then requested */ 3353 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3354 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3355 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3356 3357 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3358 if (pa == NULL) 3359 return -ENOMEM; 3360 3361 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3362 int winl; 3363 int wins; 3364 int win; 3365 int offs; 3366 3367 /* we can't allocate as much as normalizer wants. 3368 * so, found space must get proper lstart 3369 * to cover original request */ 3370 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3371 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 3372 3373 /* we're limited by original request in that 3374 * logical block must be covered any way 3375 * winl is window we can move our chunk within */ 3376 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 3377 3378 /* also, we should cover whole original request */ 3379 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len; 3380 3381 /* the smallest one defines real window */ 3382 win = min(winl, wins); 3383 3384 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len; 3385 if (offs && offs < win) 3386 win = offs; 3387 3388 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win; 3389 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 3390 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 3391 } 3392 3393 /* preallocation can change ac_b_ex, thus we store actually 3394 * allocated blocks for history */ 3395 ac->ac_f_ex = ac->ac_b_ex; 3396 3397 pa->pa_lstart = ac->ac_b_ex.fe_logical; 3398 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3399 pa->pa_len = ac->ac_b_ex.fe_len; 3400 pa->pa_free = pa->pa_len; 3401 atomic_set(&pa->pa_count, 1); 3402 spin_lock_init(&pa->pa_lock); 3403 INIT_LIST_HEAD(&pa->pa_inode_list); 3404 INIT_LIST_HEAD(&pa->pa_group_list); 3405 pa->pa_deleted = 0; 3406 pa->pa_type = MB_INODE_PA; 3407 3408 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, 3409 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3410 trace_ext4_mb_new_inode_pa(ac, pa); 3411 3412 ext4_mb_use_inode_pa(ac, pa); 3413 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3414 3415 ei = EXT4_I(ac->ac_inode); 3416 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3417 3418 pa->pa_obj_lock = &ei->i_prealloc_lock; 3419 pa->pa_inode = ac->ac_inode; 3420 3421 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3422 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3423 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3424 3425 spin_lock(pa->pa_obj_lock); 3426 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 3427 spin_unlock(pa->pa_obj_lock); 3428 3429 return 0; 3430} 3431 3432/* 3433 * creates new preallocated space for locality group inodes belongs to 3434 */ 3435static noinline_for_stack int 3436ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 3437{ 3438 struct super_block *sb = ac->ac_sb; 3439 struct ext4_locality_group *lg; 3440 struct ext4_prealloc_space *pa; 3441 struct ext4_group_info *grp; 3442 3443 /* preallocate only when found space is larger then requested */ 3444 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3445 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3446 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3447 3448 BUG_ON(ext4_pspace_cachep == NULL); 3449 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3450 if (pa == NULL) 3451 return -ENOMEM; 3452 3453 /* preallocation can change ac_b_ex, thus we store actually 3454 * allocated blocks for history */ 3455 ac->ac_f_ex = ac->ac_b_ex; 3456 3457 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3458 pa->pa_lstart = pa->pa_pstart; 3459 pa->pa_len = ac->ac_b_ex.fe_len; 3460 pa->pa_free = pa->pa_len; 3461 atomic_set(&pa->pa_count, 1); 3462 spin_lock_init(&pa->pa_lock); 3463 INIT_LIST_HEAD(&pa->pa_inode_list); 3464 INIT_LIST_HEAD(&pa->pa_group_list); 3465 pa->pa_deleted = 0; 3466 pa->pa_type = MB_GROUP_PA; 3467 3468 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, 3469 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3470 trace_ext4_mb_new_group_pa(ac, pa); 3471 3472 ext4_mb_use_group_pa(ac, pa); 3473 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3474 3475 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3476 lg = ac->ac_lg; 3477 BUG_ON(lg == NULL); 3478 3479 pa->pa_obj_lock = &lg->lg_prealloc_lock; 3480 pa->pa_inode = NULL; 3481 3482 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3483 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3484 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3485 3486 /* 3487 * We will later add the new pa to the right bucket 3488 * after updating the pa_free in ext4_mb_release_context 3489 */ 3490 return 0; 3491} 3492 3493static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 3494{ 3495 int err; 3496 3497 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 3498 err = ext4_mb_new_group_pa(ac); 3499 else 3500 err = ext4_mb_new_inode_pa(ac); 3501 return err; 3502} 3503 3504/* 3505 * finds all unused blocks in on-disk bitmap, frees them in 3506 * in-core bitmap and buddy. 3507 * @pa must be unlinked from inode and group lists, so that 3508 * nobody else can find/use it. 3509 * the caller MUST hold group/inode locks. 3510 * TODO: optimize the case when there are no in-core structures yet 3511 */ 3512static noinline_for_stack int 3513ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 3514 struct ext4_prealloc_space *pa, 3515 struct ext4_allocation_context *ac) 3516{ 3517 struct super_block *sb = e4b->bd_sb; 3518 struct ext4_sb_info *sbi = EXT4_SB(sb); 3519 unsigned int end; 3520 unsigned int next; 3521 ext4_group_t group; 3522 ext4_grpblk_t bit; 3523 unsigned long long grp_blk_start; 3524 int err = 0; 3525 int free = 0; 3526 3527 BUG_ON(pa->pa_deleted == 0); 3528 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3529 grp_blk_start = pa->pa_pstart - bit; 3530 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3531 end = bit + pa->pa_len; 3532 3533 if (ac) { 3534 ac->ac_sb = sb; 3535 ac->ac_inode = pa->pa_inode; 3536 } 3537 3538 while (bit < end) { 3539 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3540 if (bit >= end) 3541 break; 3542 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3543 mb_debug(1, " free preallocated %u/%u in group %u\n", 3544 (unsigned) ext4_group_first_block_no(sb, group) + bit, 3545 (unsigned) next - bit, (unsigned) group); 3546 free += next - bit; 3547 3548 if (ac) { 3549 ac->ac_b_ex.fe_group = group; 3550 ac->ac_b_ex.fe_start = bit; 3551 ac->ac_b_ex.fe_len = next - bit; 3552 ac->ac_b_ex.fe_logical = 0; 3553 trace_ext4_mballoc_discard(ac); 3554 } 3555 3556 trace_ext4_mb_release_inode_pa(sb, ac, pa, grp_blk_start + bit, 3557 next - bit); 3558 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3559 bit = next + 1; 3560 } 3561 if (free != pa->pa_free) { 3562 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n", 3563 pa, (unsigned long) pa->pa_lstart, 3564 (unsigned long) pa->pa_pstart, 3565 (unsigned long) pa->pa_len); 3566 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 3567 free, pa->pa_free); 3568 /* 3569 * pa is already deleted so we use the value obtained 3570 * from the bitmap and continue. 3571 */ 3572 } 3573 atomic_add(free, &sbi->s_mb_discarded); 3574 3575 return err; 3576} 3577 3578static noinline_for_stack int 3579ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3580 struct ext4_prealloc_space *pa, 3581 struct ext4_allocation_context *ac) 3582{ 3583 struct super_block *sb = e4b->bd_sb; 3584 ext4_group_t group; 3585 ext4_grpblk_t bit; 3586 3587 trace_ext4_mb_release_group_pa(sb, ac, pa); 3588 BUG_ON(pa->pa_deleted == 0); 3589 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3590 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3591 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3592 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3593 3594 if (ac) { 3595 ac->ac_sb = sb; 3596 ac->ac_inode = NULL; 3597 ac->ac_b_ex.fe_group = group; 3598 ac->ac_b_ex.fe_start = bit; 3599 ac->ac_b_ex.fe_len = pa->pa_len; 3600 ac->ac_b_ex.fe_logical = 0; 3601 trace_ext4_mballoc_discard(ac); 3602 } 3603 3604 return 0; 3605} 3606 3607/* 3608 * releases all preallocations in given group 3609 * 3610 * first, we need to decide discard policy: 3611 * - when do we discard 3612 * 1) ENOSPC 3613 * - how many do we discard 3614 * 1) how many requested 3615 */ 3616static noinline_for_stack int 3617ext4_mb_discard_group_preallocations(struct super_block *sb, 3618 ext4_group_t group, int needed) 3619{ 3620 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3621 struct buffer_head *bitmap_bh = NULL; 3622 struct ext4_prealloc_space *pa, *tmp; 3623 struct ext4_allocation_context *ac; 3624 struct list_head list; 3625 struct ext4_buddy e4b; 3626 int err; 3627 int busy = 0; 3628 int free = 0; 3629 3630 mb_debug(1, "discard preallocation for group %u\n", group); 3631 3632 if (list_empty(&grp->bb_prealloc_list)) 3633 return 0; 3634 3635 bitmap_bh = ext4_read_block_bitmap(sb, group); 3636 if (bitmap_bh == NULL) { 3637 ext4_error(sb, "Error reading block bitmap for %u", group); 3638 return 0; 3639 } 3640 3641 err = ext4_mb_load_buddy(sb, group, &e4b); 3642 if (err) { 3643 ext4_error(sb, "Error loading buddy information for %u", group); 3644 put_bh(bitmap_bh); 3645 return 0; 3646 } 3647 3648 if (needed == 0) 3649 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1; 3650 3651 INIT_LIST_HEAD(&list); 3652 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 3653 if (ac) 3654 ac->ac_sb = sb; 3655repeat: 3656 ext4_lock_group(sb, group); 3657 list_for_each_entry_safe(pa, tmp, 3658 &grp->bb_prealloc_list, pa_group_list) { 3659 spin_lock(&pa->pa_lock); 3660 if (atomic_read(&pa->pa_count)) { 3661 spin_unlock(&pa->pa_lock); 3662 busy = 1; 3663 continue; 3664 } 3665 if (pa->pa_deleted) { 3666 spin_unlock(&pa->pa_lock); 3667 continue; 3668 } 3669 3670 /* seems this one can be freed ... */ 3671 pa->pa_deleted = 1; 3672 3673 /* we can trust pa_free ... */ 3674 free += pa->pa_free; 3675 3676 spin_unlock(&pa->pa_lock); 3677 3678 list_del(&pa->pa_group_list); 3679 list_add(&pa->u.pa_tmp_list, &list); 3680 } 3681 3682 /* if we still need more blocks and some PAs were used, try again */ 3683 if (free < needed && busy) { 3684 busy = 0; 3685 ext4_unlock_group(sb, group); 3686 /* 3687 * Yield the CPU here so that we don't get soft lockup 3688 * in non preempt case. 3689 */ 3690 yield(); 3691 goto repeat; 3692 } 3693 3694 /* found anything to free? */ 3695 if (list_empty(&list)) { 3696 BUG_ON(free != 0); 3697 goto out; 3698 } 3699 3700 /* now free all selected PAs */ 3701 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3702 3703 /* remove from object (inode or locality group) */ 3704 spin_lock(pa->pa_obj_lock); 3705 list_del_rcu(&pa->pa_inode_list); 3706 spin_unlock(pa->pa_obj_lock); 3707 3708 if (pa->pa_type == MB_GROUP_PA) 3709 ext4_mb_release_group_pa(&e4b, pa, ac); 3710 else 3711 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); 3712 3713 list_del(&pa->u.pa_tmp_list); 3714 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3715 } 3716 3717out: 3718 ext4_unlock_group(sb, group); 3719 if (ac) 3720 kmem_cache_free(ext4_ac_cachep, ac); 3721 ext4_mb_unload_buddy(&e4b); 3722 put_bh(bitmap_bh); 3723 return free; 3724} 3725 3726void ext4_discard_preallocations(struct inode *inode) 3727{ 3728 struct ext4_inode_info *ei = EXT4_I(inode); 3729 struct super_block *sb = inode->i_sb; 3730 struct buffer_head *bitmap_bh = NULL; 3731 struct ext4_prealloc_space *pa, *tmp; 3732 struct ext4_allocation_context *ac; 3733 ext4_group_t group = 0; 3734 struct list_head list; 3735 struct ext4_buddy e4b; 3736 int err; 3737 3738 if (!S_ISREG(inode->i_mode)) { 3739 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 3740 return; 3741 } 3742 3743 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); 3744 trace_ext4_discard_preallocations(inode); 3745 3746 INIT_LIST_HEAD(&list); 3747 3748 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 3749 if (ac) { 3750 ac->ac_sb = sb; 3751 ac->ac_inode = inode; 3752 } 3753repeat: 3754 /* first, collect all pa's in the inode */ 3755 spin_lock(&ei->i_prealloc_lock); 3756 while (!list_empty(&ei->i_prealloc_list)) { 3757 pa = list_entry(ei->i_prealloc_list.next, 3758 struct ext4_prealloc_space, pa_inode_list); 3759 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 3760 spin_lock(&pa->pa_lock); 3761 if (atomic_read(&pa->pa_count)) { 3762 /* this shouldn't happen often - nobody should 3763 * use preallocation while we're discarding it */ 3764 spin_unlock(&pa->pa_lock); 3765 spin_unlock(&ei->i_prealloc_lock); 3766 printk(KERN_ERR "uh-oh! used pa while discarding\n"); 3767 WARN_ON(1); 3768 schedule_timeout_uninterruptible(HZ); 3769 goto repeat; 3770 3771 } 3772 if (pa->pa_deleted == 0) { 3773 pa->pa_deleted = 1; 3774 spin_unlock(&pa->pa_lock); 3775 list_del_rcu(&pa->pa_inode_list); 3776 list_add(&pa->u.pa_tmp_list, &list); 3777 continue; 3778 } 3779 3780 /* someone is deleting pa right now */ 3781 spin_unlock(&pa->pa_lock); 3782 spin_unlock(&ei->i_prealloc_lock); 3783 3784 /* we have to wait here because pa_deleted 3785 * doesn't mean pa is already unlinked from 3786 * the list. as we might be called from 3787 * ->clear_inode() the inode will get freed 3788 * and concurrent thread which is unlinking 3789 * pa from inode's list may access already 3790 * freed memory, bad-bad-bad */ 3791 3792 schedule_timeout_uninterruptible(HZ); 3793 goto repeat; 3794 } 3795 spin_unlock(&ei->i_prealloc_lock); 3796 3797 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3798 BUG_ON(pa->pa_type != MB_INODE_PA); 3799 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); 3800 3801 err = ext4_mb_load_buddy(sb, group, &e4b); 3802 if (err) { 3803 ext4_error(sb, "Error loading buddy information for %u", 3804 group); 3805 continue; 3806 } 3807 3808 bitmap_bh = ext4_read_block_bitmap(sb, group); 3809 if (bitmap_bh == NULL) { 3810 ext4_error(sb, "Error reading block bitmap for %u", 3811 group); 3812 ext4_mb_unload_buddy(&e4b); 3813 continue; 3814 } 3815 3816 ext4_lock_group(sb, group); 3817 list_del(&pa->pa_group_list); 3818 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); 3819 ext4_unlock_group(sb, group); 3820 3821 ext4_mb_unload_buddy(&e4b); 3822 put_bh(bitmap_bh); 3823 3824 list_del(&pa->u.pa_tmp_list); 3825 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3826 } 3827 if (ac) 3828 kmem_cache_free(ext4_ac_cachep, ac); 3829} 3830 3831static void ext4_mb_return_to_preallocation(struct inode *inode, 3832 struct ext4_buddy *e4b, 3833 sector_t block, int count) 3834{ 3835 BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list)); 3836} 3837#ifdef CONFIG_EXT4_DEBUG 3838static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 3839{ 3840 struct super_block *sb = ac->ac_sb; 3841 ext4_group_t ngroups, i; 3842 3843 if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 3844 return; 3845 3846 printk(KERN_ERR "EXT4-fs: Can't allocate:" 3847 " Allocation context details:\n"); 3848 printk(KERN_ERR "EXT4-fs: status %d flags %d\n", 3849 ac->ac_status, ac->ac_flags); 3850 printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, " 3851 "best %lu/%lu/%lu@%lu cr %d\n", 3852 (unsigned long)ac->ac_o_ex.fe_group, 3853 (unsigned long)ac->ac_o_ex.fe_start, 3854 (unsigned long)ac->ac_o_ex.fe_len, 3855 (unsigned long)ac->ac_o_ex.fe_logical, 3856 (unsigned long)ac->ac_g_ex.fe_group, 3857 (unsigned long)ac->ac_g_ex.fe_start, 3858 (unsigned long)ac->ac_g_ex.fe_len, 3859 (unsigned long)ac->ac_g_ex.fe_logical, 3860 (unsigned long)ac->ac_b_ex.fe_group, 3861 (unsigned long)ac->ac_b_ex.fe_start, 3862 (unsigned long)ac->ac_b_ex.fe_len, 3863 (unsigned long)ac->ac_b_ex.fe_logical, 3864 (int)ac->ac_criteria); 3865 printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned, 3866 ac->ac_found); 3867 printk(KERN_ERR "EXT4-fs: groups: \n"); 3868 ngroups = ext4_get_groups_count(sb); 3869 for (i = 0; i < ngroups; i++) { 3870 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3871 struct ext4_prealloc_space *pa; 3872 ext4_grpblk_t start; 3873 struct list_head *cur; 3874 ext4_lock_group(sb, i); 3875 list_for_each(cur, &grp->bb_prealloc_list) { 3876 pa = list_entry(cur, struct ext4_prealloc_space, 3877 pa_group_list); 3878 spin_lock(&pa->pa_lock); 3879 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3880 NULL, &start); 3881 spin_unlock(&pa->pa_lock); 3882 printk(KERN_ERR "PA:%u:%d:%u \n", i, 3883 start, pa->pa_len); 3884 } 3885 ext4_unlock_group(sb, i); 3886 3887 if (grp->bb_free == 0) 3888 continue; 3889 printk(KERN_ERR "%u: %d/%d \n", 3890 i, grp->bb_free, grp->bb_fragments); 3891 } 3892 printk(KERN_ERR "\n"); 3893} 3894#else 3895static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 3896{ 3897 return; 3898} 3899#endif 3900 3901/* 3902 * We use locality group preallocation for small size file. The size of the 3903 * file is determined by the current size or the resulting size after 3904 * allocation which ever is larger 3905 * 3906 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 3907 */ 3908static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 3909{ 3910 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3911 int bsbits = ac->ac_sb->s_blocksize_bits; 3912 loff_t size, isize; 3913 3914 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3915 return; 3916 3917 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3918 return; 3919 3920 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; 3921 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 3922 >> bsbits; 3923 3924 if ((size == isize) && 3925 !ext4_fs_is_busy(sbi) && 3926 (atomic_read(&ac->ac_inode->i_writecount) == 0)) { 3927 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 3928 return; 3929 } 3930 3931 /* don't use group allocation for large files */ 3932 size = max(size, isize); 3933 if (size > sbi->s_mb_stream_request) { 3934 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 3935 return; 3936 } 3937 3938 BUG_ON(ac->ac_lg != NULL); 3939 /* 3940 * locality group prealloc space are per cpu. The reason for having 3941 * per cpu locality group is to reduce the contention between block 3942 * request from multiple CPUs. 3943 */ 3944 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); 3945 3946 /* we're going to use group allocation */ 3947 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 3948 3949 /* serialize all allocations in the group */ 3950 mutex_lock(&ac->ac_lg->lg_mutex); 3951} 3952 3953static noinline_for_stack int 3954ext4_mb_initialize_context(struct ext4_allocation_context *ac, 3955 struct ext4_allocation_request *ar) 3956{ 3957 struct super_block *sb = ar->inode->i_sb; 3958 struct ext4_sb_info *sbi = EXT4_SB(sb); 3959 struct ext4_super_block *es = sbi->s_es; 3960 ext4_group_t group; 3961 unsigned int len; 3962 ext4_fsblk_t goal; 3963 ext4_grpblk_t block; 3964 3965 /* we can't allocate > group size */ 3966 len = ar->len; 3967 3968 /* just a dirty hack to filter too big requests */ 3969 if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10) 3970 len = EXT4_BLOCKS_PER_GROUP(sb) - 10; 3971 3972 /* start searching from the goal */ 3973 goal = ar->goal; 3974 if (goal < le32_to_cpu(es->s_first_data_block) || 3975 goal >= ext4_blocks_count(es)) 3976 goal = le32_to_cpu(es->s_first_data_block); 3977 ext4_get_group_no_and_offset(sb, goal, &group, &block); 3978 3979 /* set up allocation goals */ 3980 memset(ac, 0, sizeof(struct ext4_allocation_context)); 3981 ac->ac_b_ex.fe_logical = ar->logical; 3982 ac->ac_status = AC_STATUS_CONTINUE; 3983 ac->ac_sb = sb; 3984 ac->ac_inode = ar->inode; 3985 ac->ac_o_ex.fe_logical = ar->logical; 3986 ac->ac_o_ex.fe_group = group; 3987 ac->ac_o_ex.fe_start = block; 3988 ac->ac_o_ex.fe_len = len; 3989 ac->ac_g_ex.fe_logical = ar->logical; 3990 ac->ac_g_ex.fe_group = group; 3991 ac->ac_g_ex.fe_start = block; 3992 ac->ac_g_ex.fe_len = len; 3993 ac->ac_flags = ar->flags; 3994 3995 /* we have to define context: we'll we work with a file or 3996 * locality group. this is a policy, actually */ 3997 ext4_mb_group_or_file(ac); 3998 3999 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " 4000 "left: %u/%u, right %u/%u to %swritable\n", 4001 (unsigned) ar->len, (unsigned) ar->logical, 4002 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4003 (unsigned) ar->lleft, (unsigned) ar->pleft, 4004 (unsigned) ar->lright, (unsigned) ar->pright, 4005 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); 4006 return 0; 4007 4008} 4009 4010static noinline_for_stack void 4011ext4_mb_discard_lg_preallocations(struct super_block *sb, 4012 struct ext4_locality_group *lg, 4013 int order, int total_entries) 4014{ 4015 ext4_group_t group = 0; 4016 struct ext4_buddy e4b; 4017 struct list_head discard_list; 4018 struct ext4_prealloc_space *pa, *tmp; 4019 struct ext4_allocation_context *ac; 4020 4021 mb_debug(1, "discard locality group preallocation\n"); 4022 4023 INIT_LIST_HEAD(&discard_list); 4024 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4025 if (ac) 4026 ac->ac_sb = sb; 4027 4028 spin_lock(&lg->lg_prealloc_lock); 4029 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4030 pa_inode_list) { 4031 spin_lock(&pa->pa_lock); 4032 if (atomic_read(&pa->pa_count)) { 4033 /* 4034 * This is the pa that we just used 4035 * for block allocation. So don't 4036 * free that 4037 */ 4038 spin_unlock(&pa->pa_lock); 4039 continue; 4040 } 4041 if (pa->pa_deleted) { 4042 spin_unlock(&pa->pa_lock); 4043 continue; 4044 } 4045 /* only lg prealloc space */ 4046 BUG_ON(pa->pa_type != MB_GROUP_PA); 4047 4048 /* seems this one can be freed ... */ 4049 pa->pa_deleted = 1; 4050 spin_unlock(&pa->pa_lock); 4051 4052 list_del_rcu(&pa->pa_inode_list); 4053 list_add(&pa->u.pa_tmp_list, &discard_list); 4054 4055 total_entries--; 4056 if (total_entries <= 5) { 4057 /* 4058 * we want to keep only 5 entries 4059 * allowing it to grow to 8. This 4060 * mak sure we don't call discard 4061 * soon for this list. 4062 */ 4063 break; 4064 } 4065 } 4066 spin_unlock(&lg->lg_prealloc_lock); 4067 4068 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4069 4070 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); 4071 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4072 ext4_error(sb, "Error loading buddy information for %u", 4073 group); 4074 continue; 4075 } 4076 ext4_lock_group(sb, group); 4077 list_del(&pa->pa_group_list); 4078 ext4_mb_release_group_pa(&e4b, pa, ac); 4079 ext4_unlock_group(sb, group); 4080 4081 ext4_mb_unload_buddy(&e4b); 4082 list_del(&pa->u.pa_tmp_list); 4083 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4084 } 4085 if (ac) 4086 kmem_cache_free(ext4_ac_cachep, ac); 4087} 4088 4089/* 4090 * We have incremented pa_count. So it cannot be freed at this 4091 * point. Also we hold lg_mutex. So no parallel allocation is 4092 * possible from this lg. That means pa_free cannot be updated. 4093 * 4094 * A parallel ext4_mb_discard_group_preallocations is possible. 4095 * which can cause the lg_prealloc_list to be updated. 4096 */ 4097 4098static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4099{ 4100 int order, added = 0, lg_prealloc_count = 1; 4101 struct super_block *sb = ac->ac_sb; 4102 struct ext4_locality_group *lg = ac->ac_lg; 4103 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4104 4105 order = fls(pa->pa_free) - 1; 4106 if (order > PREALLOC_TB_SIZE - 1) 4107 /* The max size of hash table is PREALLOC_TB_SIZE */ 4108 order = PREALLOC_TB_SIZE - 1; 4109 /* Add the prealloc space to lg */ 4110 rcu_read_lock(); 4111 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4112 pa_inode_list) { 4113 spin_lock(&tmp_pa->pa_lock); 4114 if (tmp_pa->pa_deleted) { 4115 spin_unlock(&tmp_pa->pa_lock); 4116 continue; 4117 } 4118 if (!added && pa->pa_free < tmp_pa->pa_free) { 4119 /* Add to the tail of the previous entry */ 4120 list_add_tail_rcu(&pa->pa_inode_list, 4121 &tmp_pa->pa_inode_list); 4122 added = 1; 4123 /* 4124 * we want to count the total 4125 * number of entries in the list 4126 */ 4127 } 4128 spin_unlock(&tmp_pa->pa_lock); 4129 lg_prealloc_count++; 4130 } 4131 if (!added) 4132 list_add_tail_rcu(&pa->pa_inode_list, 4133 &lg->lg_prealloc_list[order]); 4134 rcu_read_unlock(); 4135 4136 /* Now trim the list to be not more than 8 elements */ 4137 if (lg_prealloc_count > 8) { 4138 ext4_mb_discard_lg_preallocations(sb, lg, 4139 order, lg_prealloc_count); 4140 return; 4141 } 4142 return ; 4143} 4144 4145/* 4146 * release all resource we used in allocation 4147 */ 4148static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4149{ 4150 struct ext4_prealloc_space *pa = ac->ac_pa; 4151 if (pa) { 4152 if (pa->pa_type == MB_GROUP_PA) { 4153 /* see comment in ext4_mb_use_group_pa() */ 4154 spin_lock(&pa->pa_lock); 4155 pa->pa_pstart += ac->ac_b_ex.fe_len; 4156 pa->pa_lstart += ac->ac_b_ex.fe_len; 4157 pa->pa_free -= ac->ac_b_ex.fe_len; 4158 pa->pa_len -= ac->ac_b_ex.fe_len; 4159 spin_unlock(&pa->pa_lock); 4160 } 4161 } 4162 if (ac->alloc_semp) 4163 up_read(ac->alloc_semp); 4164 if (pa) { 4165 /* 4166 * We want to add the pa to the right bucket. 4167 * Remove it from the list and while adding 4168 * make sure the list to which we are adding 4169 * doesn't grow big. We need to release 4170 * alloc_semp before calling ext4_mb_add_n_trim() 4171 */ 4172 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4173 spin_lock(pa->pa_obj_lock); 4174 list_del_rcu(&pa->pa_inode_list); 4175 spin_unlock(pa->pa_obj_lock); 4176 ext4_mb_add_n_trim(ac); 4177 } 4178 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4179 } 4180 if (ac->ac_bitmap_page) 4181 page_cache_release(ac->ac_bitmap_page); 4182 if (ac->ac_buddy_page) 4183 page_cache_release(ac->ac_buddy_page); 4184 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4185 mutex_unlock(&ac->ac_lg->lg_mutex); 4186 ext4_mb_collect_stats(ac); 4187 return 0; 4188} 4189 4190static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4191{ 4192 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4193 int ret; 4194 int freed = 0; 4195 4196 trace_ext4_mb_discard_preallocations(sb, needed); 4197 for (i = 0; i < ngroups && needed > 0; i++) { 4198 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4199 freed += ret; 4200 needed -= ret; 4201 } 4202 4203 return freed; 4204} 4205 4206/* 4207 * Main entry point into mballoc to allocate blocks 4208 * it tries to use preallocation first, then falls back 4209 * to usual allocation 4210 */ 4211ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4212 struct ext4_allocation_request *ar, int *errp) 4213{ 4214 int freed; 4215 struct ext4_allocation_context *ac = NULL; 4216 struct ext4_sb_info *sbi; 4217 struct super_block *sb; 4218 ext4_fsblk_t block = 0; 4219 unsigned int inquota = 0; 4220 unsigned int reserv_blks = 0; 4221 4222 sb = ar->inode->i_sb; 4223 sbi = EXT4_SB(sb); 4224 4225 trace_ext4_request_blocks(ar); 4226 4227 /* 4228 * For delayed allocation, we could skip the ENOSPC and 4229 * EDQUOT check, as blocks and quotas have been already 4230 * reserved when data being copied into pagecache. 4231 */ 4232 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) 4233 ar->flags |= EXT4_MB_DELALLOC_RESERVED; 4234 else { 4235 /* Without delayed allocation we need to verify 4236 * there is enough free blocks to do block allocation 4237 * and verify allocation doesn't exceed the quota limits. 4238 */ 4239 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { 4240 /* let others to free the space */ 4241 yield(); 4242 ar->len = ar->len >> 1; 4243 } 4244 if (!ar->len) { 4245 *errp = -ENOSPC; 4246 return 0; 4247 } 4248 reserv_blks = ar->len; 4249 while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { 4250 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4251 ar->len--; 4252 } 4253 inquota = ar->len; 4254 if (ar->len == 0) { 4255 *errp = -EDQUOT; 4256 goto out; 4257 } 4258 } 4259 4260 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4261 if (!ac) { 4262 ar->len = 0; 4263 *errp = -ENOMEM; 4264 goto out; 4265 } 4266 4267 *errp = ext4_mb_initialize_context(ac, ar); 4268 if (*errp) { 4269 ar->len = 0; 4270 goto out; 4271 } 4272 4273 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4274 if (!ext4_mb_use_preallocated(ac)) { 4275 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4276 ext4_mb_normalize_request(ac, ar); 4277repeat: 4278 /* allocate space in core */ 4279 *errp = ext4_mb_regular_allocator(ac); 4280 if (*errp) 4281 goto errout; 4282 4283 /* as we've just preallocated more space than 4284 * user requested orinally, we store allocated 4285 * space in a special descriptor */ 4286 if (ac->ac_status == AC_STATUS_FOUND && 4287 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4288 ext4_mb_new_preallocation(ac); 4289 } 4290 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4291 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks); 4292 if (*errp == -EAGAIN) { 4293 /* 4294 * drop the reference that we took 4295 * in ext4_mb_use_best_found 4296 */ 4297 ext4_mb_release_context(ac); 4298 ac->ac_b_ex.fe_group = 0; 4299 ac->ac_b_ex.fe_start = 0; 4300 ac->ac_b_ex.fe_len = 0; 4301 ac->ac_status = AC_STATUS_CONTINUE; 4302 goto repeat; 4303 } else if (*errp) 4304 errout: 4305 ext4_discard_allocated_blocks(ac); 4306 else { 4307 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4308 ar->len = ac->ac_b_ex.fe_len; 4309 } 4310 } else { 4311 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4312 if (freed) 4313 goto repeat; 4314 *errp = -ENOSPC; 4315 } 4316 4317 if (*errp) { 4318 ac->ac_b_ex.fe_len = 0; 4319 ar->len = 0; 4320 ext4_mb_show_ac(ac); 4321 } 4322 ext4_mb_release_context(ac); 4323out: 4324 if (ac) 4325 kmem_cache_free(ext4_ac_cachep, ac); 4326 if (inquota && ar->len < inquota) 4327 dquot_free_block(ar->inode, inquota - ar->len); 4328 if (!ar->len) { 4329 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) 4330 /* release all the reserved blocks if non delalloc */ 4331 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 4332 reserv_blks); 4333 } 4334 4335 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 4336 4337 return block; 4338} 4339 4340/* 4341 * We can merge two free data extents only if the physical blocks 4342 * are contiguous, AND the extents were freed by the same transaction, 4343 * AND the blocks are associated with the same group. 4344 */ 4345static int can_merge(struct ext4_free_data *entry1, 4346 struct ext4_free_data *entry2) 4347{ 4348 if ((entry1->t_tid == entry2->t_tid) && 4349 (entry1->group == entry2->group) && 4350 ((entry1->start_blk + entry1->count) == entry2->start_blk)) 4351 return 1; 4352 return 0; 4353} 4354 4355static noinline_for_stack int 4356ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4357 struct ext4_free_data *new_entry) 4358{ 4359 ext4_group_t group = e4b->bd_group; 4360 ext4_grpblk_t block; 4361 struct ext4_free_data *entry; 4362 struct ext4_group_info *db = e4b->bd_info; 4363 struct super_block *sb = e4b->bd_sb; 4364 struct ext4_sb_info *sbi = EXT4_SB(sb); 4365 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4366 struct rb_node *parent = NULL, *new_node; 4367 4368 BUG_ON(!ext4_handle_valid(handle)); 4369 BUG_ON(e4b->bd_bitmap_page == NULL); 4370 BUG_ON(e4b->bd_buddy_page == NULL); 4371 4372 new_node = &new_entry->node; 4373 block = new_entry->start_blk; 4374 4375 if (!*n) { 4376 /* first free block exent. We need to 4377 protect buddy cache from being freed, 4378 * otherwise we'll refresh it from 4379 * on-disk bitmap and lose not-yet-available 4380 * blocks */ 4381 page_cache_get(e4b->bd_buddy_page); 4382 page_cache_get(e4b->bd_bitmap_page); 4383 } 4384 while (*n) { 4385 parent = *n; 4386 entry = rb_entry(parent, struct ext4_free_data, node); 4387 if (block < entry->start_blk) 4388 n = &(*n)->rb_left; 4389 else if (block >= (entry->start_blk + entry->count)) 4390 n = &(*n)->rb_right; 4391 else { 4392 ext4_grp_locked_error(sb, group, 0, 4393 ext4_group_first_block_no(sb, group) + block, 4394 "Block already on to-be-freed list"); 4395 return 0; 4396 } 4397 } 4398 4399 rb_link_node(new_node, parent, n); 4400 rb_insert_color(new_node, &db->bb_free_root); 4401 4402 /* Now try to see the extent can be merged to left and right */ 4403 node = rb_prev(new_node); 4404 if (node) { 4405 entry = rb_entry(node, struct ext4_free_data, node); 4406 if (can_merge(entry, new_entry)) { 4407 new_entry->start_blk = entry->start_blk; 4408 new_entry->count += entry->count; 4409 rb_erase(node, &(db->bb_free_root)); 4410 spin_lock(&sbi->s_md_lock); 4411 list_del(&entry->list); 4412 spin_unlock(&sbi->s_md_lock); 4413 kmem_cache_free(ext4_free_ext_cachep, entry); 4414 } 4415 } 4416 4417 node = rb_next(new_node); 4418 if (node) { 4419 entry = rb_entry(node, struct ext4_free_data, node); 4420 if (can_merge(new_entry, entry)) { 4421 new_entry->count += entry->count; 4422 rb_erase(node, &(db->bb_free_root)); 4423 spin_lock(&sbi->s_md_lock); 4424 list_del(&entry->list); 4425 spin_unlock(&sbi->s_md_lock); 4426 kmem_cache_free(ext4_free_ext_cachep, entry); 4427 } 4428 } 4429 /* Add the extent to transaction's private list */ 4430 spin_lock(&sbi->s_md_lock); 4431 list_add(&new_entry->list, &handle->h_transaction->t_private_list); 4432 spin_unlock(&sbi->s_md_lock); 4433 return 0; 4434} 4435 4436/** 4437 * ext4_free_blocks() -- Free given blocks and update quota 4438 * @handle: handle for this transaction 4439 * @inode: inode 4440 * @block: start physical block to free 4441 * @count: number of blocks to count 4442 * @metadata: Are these metadata blocks 4443 */ 4444void ext4_free_blocks(handle_t *handle, struct inode *inode, 4445 struct buffer_head *bh, ext4_fsblk_t block, 4446 unsigned long count, int flags) 4447{ 4448 struct buffer_head *bitmap_bh = NULL; 4449 struct super_block *sb = inode->i_sb; 4450 struct ext4_allocation_context *ac = NULL; 4451 struct ext4_group_desc *gdp; 4452 unsigned long freed = 0; 4453 unsigned int overflow; 4454 ext4_grpblk_t bit; 4455 struct buffer_head *gd_bh; 4456 ext4_group_t block_group; 4457 struct ext4_sb_info *sbi; 4458 struct ext4_buddy e4b; 4459 int err = 0; 4460 int ret; 4461 4462 if (bh) { 4463 if (block) 4464 BUG_ON(block != bh->b_blocknr); 4465 else 4466 block = bh->b_blocknr; 4467 } 4468 4469 sbi = EXT4_SB(sb); 4470 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 4471 !ext4_data_block_valid(sbi, block, count)) { 4472 ext4_error(sb, "Freeing blocks not in datazone - " 4473 "block = %llu, count = %lu", block, count); 4474 goto error_return; 4475 } 4476 4477 ext4_debug("freeing block %llu\n", block); 4478 trace_ext4_free_blocks(inode, block, count, flags); 4479 4480 if (flags & EXT4_FREE_BLOCKS_FORGET) { 4481 struct buffer_head *tbh = bh; 4482 int i; 4483 4484 BUG_ON(bh && (count > 1)); 4485 4486 for (i = 0; i < count; i++) { 4487 if (!bh) 4488 tbh = sb_find_get_block(inode->i_sb, 4489 block + i); 4490 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4491 inode, tbh, block + i); 4492 } 4493 } 4494 4495 /* 4496 * We need to make sure we don't reuse the freed block until 4497 * after the transaction is committed, which we can do by 4498 * treating the block as metadata, below. We make an 4499 * exception if the inode is to be written in writeback mode 4500 * since writeback mode has weak data consistency guarantees. 4501 */ 4502 if (!ext4_should_writeback_data(inode)) 4503 flags |= EXT4_FREE_BLOCKS_METADATA; 4504 4505 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4506 if (ac) { 4507 ac->ac_inode = inode; 4508 ac->ac_sb = sb; 4509 } 4510 4511do_more: 4512 overflow = 0; 4513 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4514 4515 /* 4516 * Check to see if we are freeing blocks across a group 4517 * boundary. 4518 */ 4519 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4520 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); 4521 count -= overflow; 4522 } 4523 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4524 if (!bitmap_bh) { 4525 err = -EIO; 4526 goto error_return; 4527 } 4528 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 4529 if (!gdp) { 4530 err = -EIO; 4531 goto error_return; 4532 } 4533 4534 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 4535 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 4536 in_range(block, ext4_inode_table(sb, gdp), 4537 EXT4_SB(sb)->s_itb_per_group) || 4538 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4539 EXT4_SB(sb)->s_itb_per_group)) { 4540 4541 ext4_error(sb, "Freeing blocks in system zone - " 4542 "Block = %llu, count = %lu", block, count); 4543 /* err = 0. ext4_std_error should be a no op */ 4544 goto error_return; 4545 } 4546 4547 BUFFER_TRACE(bitmap_bh, "getting write access"); 4548 err = ext4_journal_get_write_access(handle, bitmap_bh); 4549 if (err) 4550 goto error_return; 4551 4552 /* 4553 * We are about to modify some metadata. Call the journal APIs 4554 * to unshare ->b_data if a currently-committing transaction is 4555 * using it 4556 */ 4557 BUFFER_TRACE(gd_bh, "get_write_access"); 4558 err = ext4_journal_get_write_access(handle, gd_bh); 4559 if (err) 4560 goto error_return; 4561#ifdef AGGRESSIVE_CHECK 4562 { 4563 int i; 4564 for (i = 0; i < count; i++) 4565 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4566 } 4567#endif 4568 if (ac) { 4569 ac->ac_b_ex.fe_group = block_group; 4570 ac->ac_b_ex.fe_start = bit; 4571 ac->ac_b_ex.fe_len = count; 4572 trace_ext4_mballoc_free(ac); 4573 } 4574 4575 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4576 if (err) 4577 goto error_return; 4578 4579 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { 4580 struct ext4_free_data *new_entry; 4581 /* 4582 * blocks being freed are metadata. these blocks shouldn't 4583 * be used until this transaction is committed 4584 */ 4585 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); 4586 new_entry->start_blk = bit; 4587 new_entry->group = block_group; 4588 new_entry->count = count; 4589 new_entry->t_tid = handle->h_transaction->t_tid; 4590 4591 ext4_lock_group(sb, block_group); 4592 mb_clear_bits(bitmap_bh->b_data, bit, count); 4593 ext4_mb_free_metadata(handle, &e4b, new_entry); 4594 } else { 4595 /* need to update group_info->bb_free and bitmap 4596 * with group lock held. generate_buddy look at 4597 * them with group lock_held 4598 */ 4599 ext4_lock_group(sb, block_group); 4600 mb_clear_bits(bitmap_bh->b_data, bit, count); 4601 mb_free_blocks(inode, &e4b, bit, count); 4602 ext4_mb_return_to_preallocation(inode, &e4b, block, count); 4603 if (test_opt(sb, DISCARD)) 4604 ext4_issue_discard(sb, block_group, bit, count); 4605 } 4606 4607 ret = ext4_free_blks_count(sb, gdp) + count; 4608 ext4_free_blks_set(sb, gdp, ret); 4609 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); 4610 ext4_unlock_group(sb, block_group); 4611 percpu_counter_add(&sbi->s_freeblocks_counter, count); 4612 4613 if (sbi->s_log_groups_per_flex) { 4614 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4615 atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks); 4616 } 4617 4618 ext4_mb_unload_buddy(&e4b); 4619 4620 freed += count; 4621 4622 /* We dirtied the bitmap block */ 4623 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4624 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4625 4626 /* And the group descriptor block */ 4627 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4628 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4629 if (!err) 4630 err = ret; 4631 4632 if (overflow && !err) { 4633 block += count; 4634 count = overflow; 4635 put_bh(bitmap_bh); 4636 goto do_more; 4637 } 4638 ext4_mark_super_dirty(sb); 4639error_return: 4640 if (freed) 4641 dquot_free_block(inode, freed); 4642 brelse(bitmap_bh); 4643 ext4_std_error(sb, err); 4644 if (ac) 4645 kmem_cache_free(ext4_ac_cachep, ac); 4646 return; 4647} 4648