1/* 2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7#include "BlockAllocator.h" 8 9#include <string.h> 10 11#include <algorithm> 12 13#include <util/AutoLock.h> 14 15#include "Block.h" 16#include "checksumfs.h" 17#include "DebugSupport.h" 18#include "SuperBlock.h" 19#include "Volume.h" 20 21 22// This is a simple block allocator implementation. It manages the on-disk 23// block bitmap structures. Those consist of: 24// * The block bitmap itself: A contiguous run of blocks with one bit for each 25// of the blocks accessible by the file system, indicating whether the block 26// is used or free. The bit is set for a used, clear for a free block. For 27// faster access the bits are grouped in 32 bit integers (host endian ATM). 28// * The allocation groups blocks: An allocation group consists of the blocks 29// that 2048 block bitmap blocks refer to. Each allocation group is 30// represented by a single block which contains 2048 16 bit numbers (host 31// endian ATM), each of which referring to a block bitmap block and encoding 32// how many free blocks the block bitmap block refers to. 33// The blocks for the allocation groups are directly followed by the block 34// bitmap blocks. The blocks beyond the blocks accessible by the file system 35// that the last block bitmap block may refer to are marked used. Non-existing 36// block bitmap blocks referred to by the last allocation group block are set 37// to have 0 free blocks. 38 39 40static const uint32 kBlocksPerBitmapBlock = B_PAGE_SIZE * 8; 41static const uint32 kBitmapBlocksPerGroup = B_PAGE_SIZE / 2; 42static const uint32 kBlocksPerGroup 43 = kBitmapBlocksPerGroup * kBlocksPerBitmapBlock; 44 45 46BlockAllocator::BlockAllocator(Volume* volume) 47 : 48 fVolume(volume), 49 fTotalBlocks(volume->TotalBlocks()), 50 fBitmapBlockCount((fTotalBlocks + B_PAGE_SIZE - 1) / B_PAGE_SIZE) 51{ 52 fAllocationGroupCount = (fBitmapBlockCount + kBitmapBlocksPerGroup - 1) 53 / kBitmapBlocksPerGroup; 54 mutex_init(&fLock, "checksumfs block allocator"); 55} 56 57 58BlockAllocator::~BlockAllocator() 59{ 60 mutex_destroy(&fLock); 61} 62 63 64status_t 65BlockAllocator::Init(uint64 blockBitmap, uint64 freeBlocks) 66{ 67 if (blockBitmap <= kCheckSumFSSuperBlockOffset / B_PAGE_SIZE 68 || blockBitmap >= fTotalBlocks 69 || fTotalBlocks - blockBitmap < fAllocationGroupCount) { 70 return B_BAD_DATA; 71 } 72 73 fFreeBlocks = freeBlocks; 74 fAllocationGroupBlock = blockBitmap; 75 fBitmapBlock = blockBitmap + fAllocationGroupCount; 76 77 return B_OK; 78} 79 80 81status_t 82BlockAllocator::Initialize(Transaction& transaction) 83{ 84 status_t error = Init(kCheckSumFSSuperBlockOffset / B_PAGE_SIZE + 1, 85 fTotalBlocks); 86 if (error != B_OK) 87 return error; 88 89 PRINT("BlockAllocator::Initialize():\n"); 90 PRINT(" fTotalBlocks: %" B_PRIu64 "\n", fTotalBlocks); 91 PRINT(" fFreeBlocks: %" B_PRIu64 "\n", fFreeBlocks); 92 PRINT(" fAllocationGroupBlock: %" B_PRIu64 "\n", fAllocationGroupBlock); 93 PRINT(" fAllocationGroupCount: %" B_PRIu64 "\n", fAllocationGroupCount); 94 PRINT(" fBitmapBlock: %" B_PRIu64 "\n", fBitmapBlock); 95 PRINT(" fBitmapBlockCount: %" B_PRIu64 "\n", fBitmapBlockCount); 96 97 // clear the block bitmap 98 for (uint64 i = 0; i < fBitmapBlockCount; i++) { 99 Block block; 100 if (!block.GetZero(fVolume, fBitmapBlock + i, transaction)) 101 return B_ERROR; 102 } 103 104 // the last block of the block bitmap may be partial -- mark the blocks 105 // beyond the end used 106 uint32 partialBitmapBlock = fTotalBlocks % kBlocksPerBitmapBlock; 107 if (partialBitmapBlock != 0) { 108 Block block; 109 if (!block.GetZero(fVolume, fBitmapBlock + fBitmapBlockCount - 1, 110 transaction)) { 111 return B_ERROR; 112 } 113 114 // set full uint32s 115 uint32* bits = (uint32*)block.Data(); 116 uint32 offset = (partialBitmapBlock + 31) / 32; 117 if (partialBitmapBlock <= B_PAGE_SIZE - 4) 118 memset(bits + offset, 0xff, B_PAGE_SIZE - offset * 4); 119 120 // set the partial uint32 121 uint32 bitOffset = partialBitmapBlock % 32; 122 if (bitOffset != 0) 123 bits[offset] = ~(((uint32)1 << bitOffset) - 1); 124 } 125 126 // init the allocation groups 127 uint32 partialGroup = fTotalBlocks % kBlocksPerGroup; 128 for (uint64 i = 0; i < fAllocationGroupCount; i++) { 129 Block block; 130 if (!block.GetZero(fVolume, fAllocationGroupBlock + i, transaction)) 131 return B_ERROR; 132 133 uint16* counts = (uint16*)block.Data(); 134 135 if (i < fAllocationGroupCount - 1 || partialGroup == 0) { 136 // not the last group or the last group is not partial -- all 137 // blocks are free 138 for (uint32 i = 0; i < kBitmapBlocksPerGroup; i++) 139 counts[i] = kBlocksPerBitmapBlock; 140 } else { 141 // the last, partial group 142 if (partialGroup != 0) { 143 uint32 offset = partialGroup / kBlocksPerBitmapBlock; 144 for (uint32 i = 0; i < offset; i++) 145 counts[i] = kBlocksPerBitmapBlock; 146 counts[offset] = partialBitmapBlock; 147 } 148 } 149 } 150 151 // mark all blocks we already use used 152 error = AllocateExactly(0, fBitmapBlock + fBitmapBlockCount, transaction); 153 if (error != B_OK) 154 return error; 155 156 PRINT("BlockAllocator::Initialize() done:\n"); 157 PRINT(" fFreeBlocks: %" B_PRIu64 "\n", fFreeBlocks); 158 159 return B_OK; 160} 161 162 163status_t 164BlockAllocator::Allocate(uint64 baseHint, uint64 count, 165 Transaction& transaction, uint64& _allocatedBase, uint64& _allocatedCount) 166{ 167 MutexLocker locker(fLock); 168 169 PRINT("BlockAllocator::Allocate(%" B_PRIu64 ", %" B_PRIu64 ")\n", baseHint, 170 count); 171 172 if (fFreeBlocks == 0) 173 return B_DEVICE_FULL; 174 175 if (baseHint >= fTotalBlocks) 176 baseHint = 0; 177 178 // search from base hint to end 179 status_t error = _Allocate(baseHint, fTotalBlocks, count, transaction, 180 &_allocatedBase, _allocatedCount); 181 if (error == B_OK) 182 return _UpdateSuperBlock(transaction); 183 if (baseHint == 0) 184 return error; 185 186 // search from 0 to hint 187 error = _Allocate(0, baseHint, count, transaction, &_allocatedBase, 188 _allocatedCount); 189 if (error != B_OK) 190 return error; 191 192 return _UpdateSuperBlock(transaction); 193} 194 195 196status_t 197BlockAllocator::AllocateExactly(uint64 base, uint64 count, 198 Transaction& transaction) 199{ 200 MutexLocker locker(fLock); 201 202 PRINT("BlockAllocator::AllocateExactly(%" B_PRIu64 ", %" B_PRIu64 ")\n", 203 base, count); 204 205 uint64 allocated; 206 status_t error = _Allocate(base, fTotalBlocks, count, transaction, NULL, 207 allocated); 208 if (error != B_OK) 209 return error; 210 211 if (allocated < count) 212 return B_BUSY; 213 214 return _UpdateSuperBlock(transaction); 215} 216 217 218status_t 219BlockAllocator::Free(uint64 base, uint64 count, Transaction& transaction) 220{ 221 MutexLocker locker(fLock); 222 223 status_t error = _Free(base, count, transaction); 224 if (error != B_OK) 225 return error; 226 227 return _UpdateSuperBlock(transaction); 228} 229 230 231void 232BlockAllocator::ResetFreeBlocks(uint64 count) 233{ 234 MutexLocker locker(fLock); 235 236 fFreeBlocks = count; 237} 238 239 240/*! Allocates contiguous blocks. 241 242 Might allocate fewer block than requested. If no block could be allocated 243 at all, an error is returned. 244 If \a _allocatedBase is not \c NULL, the method may move the base up, if it 245 isn't able to allocate anything at the given base. 246 247 \param base The first potential block to allocate. 248 \param searchEnd A hint for the method where to stop searching for free 249 blocks. 250 \param count The maximum number of blocks to allocate. 251 \param _allocatedBase If not \c NULL, the may allocate at a greater base. 252 The base of the actual allocation is returned via this variable. 253 \param _allocatedCount On success the variable will be set to the number of 254 blocks actually allocated. 255 \return \c B_OK, if one or more blocks could be allocated, another error 256 code otherwise. 257*/ 258status_t 259BlockAllocator::_Allocate(uint64 base, uint64 searchEnd, uint64 count, 260 Transaction& transaction, uint64* _allocatedBase, uint64& _allocatedCount) 261{ 262 ASSERT(base <= fTotalBlocks); 263 ASSERT(searchEnd <= fTotalBlocks); 264 265 if (base >= searchEnd || fFreeBlocks == 0) 266 RETURN_ERROR(B_BUSY); 267 268 uint64 groupOffset = base % kBlocksPerGroup; 269 uint64 remaining = count; 270 271 // If we're allowed to move the base, loop until we allocate something. 272 if (_allocatedBase != NULL) { 273 while (count > 0 && base < searchEnd) { 274 uint64 toAllocate = std::min(count, kBlocksPerGroup - groupOffset); 275 276 uint32 allocated; 277 status_t error = _AllocateInGroup(base, searchEnd, toAllocate, 278 transaction, _allocatedBase, allocated); 279 if (error == B_OK) { 280 fFreeBlocks -= toAllocate; 281 282 if (allocated == toAllocate) { 283 _allocatedCount = allocated; 284 return B_OK; 285 } 286 287 count = std::min(count, 288 searchEnd - (uint32)*_allocatedBase % kBlocksPerGroup); 289 _allocatedBase = NULL; 290 remaining = count - allocated; 291 292 break; 293 } 294 295 // nothing yet -- continue with the next group 296 count -= toAllocate; 297 base += toAllocate; 298 groupOffset = 0; 299 } 300 301 if (_allocatedBase != NULL) 302 return B_BUSY; 303 } 304 305 // We're not/no longer allowed to move the base. Loop as long as we can 306 // allocate what ask for. 307 while (remaining > 0 && base < searchEnd) { 308 uint64 toAllocate = std::min(remaining, kBlocksPerGroup - groupOffset); 309 uint32 allocated; 310 status_t error = _AllocateInGroup(base, searchEnd, toAllocate, 311 transaction, NULL, allocated); 312 if (error != B_OK) 313 break; 314 315 fFreeBlocks -= toAllocate; 316 remaining -= allocated; 317 318 if (allocated < toAllocate) 319 break; 320 321 base += toAllocate; 322 groupOffset = 0; 323 } 324 325 if (remaining == count) 326 RETURN_ERROR(B_BUSY); 327 328 _allocatedCount = count - remaining; 329 return B_OK; 330} 331 332 333/*! Allocates contiguous blocks in an allocation group. 334 335 The range specified by \a base and \a count must lie fully within a single 336 allocation group. 337 338 Might allocate fewer block than requested. If no block could be allocated 339 at all, an error is returned. 340 If \a _allocatedBase is not \c NULL, the method may move the base up, if it 341 isn't able to allocate anything at the given base. 342 343 \param base The first potential block to allocate. 344 \param searchEnd A hint for the method where to stop searching for free 345 blocks. 346 \param count The maximum number of blocks to allocate. 347 \param _allocatedBase If not \c NULL, the may allocate at a greater base. 348 The base of the actual allocation is returned via this variable. 349 \param _allocatedCount On success the variable will be set to the number of 350 blocks actually allocated. 351 \return \c B_OK, if one or more blocks could be allocated, another error 352 code otherwise. 353*/ 354status_t 355BlockAllocator::_AllocateInGroup(uint64 base, uint64 searchEnd, uint32 count, 356 Transaction& transaction, uint64* _allocatedBase, uint32& _allocatedCount) 357{ 358 PRINT("BlockAllocator::_AllocateInGroup(%" B_PRIu64 ", %" B_PRIu32 ")\n", 359 base, count); 360 361 ASSERT(count <= kBlocksPerGroup); 362 ASSERT(base % kBlocksPerGroup + count <= kBlocksPerGroup); 363 364 if (base >= searchEnd) 365 RETURN_ERROR(B_BUSY); 366 367 Block block; 368 if (!block.GetWritable(fVolume, 369 fAllocationGroupBlock + base / kBlocksPerGroup, transaction)) { 370 RETURN_ERROR(B_ERROR); 371 } 372 373 uint16* counts = (uint16*)block.Data(); 374 375 uint32 blockIndex = base / kBlocksPerBitmapBlock % kBitmapBlocksPerGroup; 376 uint64 inBlockOffset = base % kBlocksPerBitmapBlock; 377 uint64 remaining = count; 378 379 // If we're allowed to move the base, skip used blocks. 380 if (_allocatedBase != NULL) { 381 // check partial block 382 if (inBlockOffset != 0) { 383 if (counts[blockIndex] > 0) { 384 uint32 allocated; 385 if (_AllocateInBitmapBlock(base, count, transaction, 386 _allocatedBase, allocated) == B_OK) { 387 counts[blockIndex] -= allocated; 388 389 if (inBlockOffset + allocated < kBlocksPerBitmapBlock 390 || allocated == remaining) { 391 _allocatedCount = allocated; 392 return B_OK; 393 } 394 395 count = std::min(count, 396 kBlocksPerGroup 397 - (uint32)*_allocatedBase % kBlocksPerGroup); 398 _allocatedBase = NULL; 399 remaining = count - allocated; 400 } 401 } 402 403 base += kBlocksPerBitmapBlock - inBlockOffset; 404 inBlockOffset = 0; 405 blockIndex++; 406 } 407 408 // skip completely used blocks 409 if (_allocatedBase != NULL) { 410 while (blockIndex < kBitmapBlocksPerGroup && base < searchEnd) { 411 if (counts[blockIndex] > 0) 412 break; 413 414 base += kBlocksPerBitmapBlock; 415 blockIndex++; 416 } 417 } 418 419 if (blockIndex == kBitmapBlocksPerGroup) 420 return B_BUSY; 421 422 // Clamp the count to allocate, if we have moved the base too far. Do 423 // this only, if we haven't allocated anything so far. 424 if (_allocatedBase != NULL) { 425 count = std::min(count, 426 kBlocksPerGroup - (uint32)base % kBlocksPerGroup); 427 remaining = count; 428 } 429 } 430 431 // Allocate as many of the requested blocks as we can. 432 while (remaining > 0 && base < searchEnd) { 433 if (counts[blockIndex] == 0) 434 break; 435 436 uint32 toAllocate = std::min(remaining, 437 kBlocksPerBitmapBlock - inBlockOffset); 438 439 uint32 allocated; 440 status_t error = _AllocateInBitmapBlock(base, toAllocate, transaction, 441 _allocatedBase, allocated); 442 if (error != B_OK) 443 break; 444 445 counts[blockIndex] -= allocated; 446 remaining -= allocated; 447 448 if (allocated < toAllocate) 449 break; 450 451 base += allocated; 452 blockIndex++; 453 inBlockOffset = 0; 454 _allocatedBase = NULL; 455 } 456 457 if (remaining == count) 458 return B_BUSY; 459 460 _allocatedCount = count - remaining; 461 return B_OK; 462} 463 464 465/*! Allocates contiguous blocks in a bitmap block. 466 467 The range specified by \a base and \a count must lie fully within a single 468 bitmap block. 469 470 Might allocate fewer block than requested. If no block could be allocated 471 at all, an error is returned. 472 If \a _allocatedBase is not \c NULL, the method may move the base up, if it 473 isn't able to allocate anything at the given base. 474 475 \param base The first potential block to allocate. 476 \param count The maximum number of blocks to allocate. 477 \param _allocatedBase If not \c NULL, the may allocate at a greater base. 478 The base of the actual allocation is returned via this variable. 479 \param _allocatedCount On success the variable will be set to the number of 480 blocks actually allocated. 481 \return \c B_OK, if one or more blocks could be allocated, another error 482 code otherwise. 483*/ 484status_t 485BlockAllocator::_AllocateInBitmapBlock(uint64 base, uint32 count, 486 Transaction& transaction, uint64* _allocatedBase, uint32& _allocatedCount) 487{ 488 PRINT("BlockAllocator::_AllocateInBitmapBlock(%" B_PRIu64 ", %" B_PRIu32 489 ")\n", base, count); 490 491 ASSERT(count <= kBlocksPerBitmapBlock); 492 ASSERT(base % kBlocksPerBitmapBlock + count <= kBlocksPerBitmapBlock); 493 494 Block block; 495 if (!block.GetWritable(fVolume, 496 fBitmapBlock + base / kBlocksPerBitmapBlock, transaction)) { 497 RETURN_ERROR(B_ERROR); 498 } 499 500 uint32* bits = (uint32*)block.Data() 501 + base % kBlocksPerBitmapBlock / 32; 502 uint32* const bitsEnd = (uint32*)block.Data() + kBlocksPerBitmapBlock / 32; 503 uint32 bitOffset = base % 32; 504 505 // If we're allowed to move the base, skip used blocks. 506 if (_allocatedBase != NULL) { 507 // check partial uint32 at the beginning 508 bool foundBase = false; 509 if (bitOffset > 0) { 510 uint32 mask = ~(((uint32)1 << bitOffset) - 1); 511 if ((*bits & mask) != mask) { 512 while ((*bits & ((uint32)1 << bitOffset)) != 0) { 513 bitOffset++; 514 base++; 515 } 516 foundBase = true; 517 } else { 518 // all used -- skip 519 bits++; 520 base += 32 - bitOffset; 521 bitOffset = 0; 522 } 523 } 524 525 // check complete uint32s 526 if (!foundBase) { 527 while (bits < bitsEnd) { 528 if (*bits != 0xffffffff) { 529 bitOffset = 0; 530 while ((*bits & ((uint32)1 << bitOffset)) != 0) { 531 bitOffset++; 532 base++; 533 } 534 foundBase = true; 535 break; 536 } 537 538 bits++; 539 base += 32; 540 } 541 } 542 543 if (!foundBase) 544 return B_BUSY; 545 546 // Clamp the count to allocate, if we have moved the base too far. 547 if (base % kBlocksPerBitmapBlock + count > kBlocksPerBitmapBlock) 548 count = kBlocksPerBitmapBlock - base % kBlocksPerBitmapBlock; 549 } 550 551 // Allocate as many of the requested blocks as we can, starting at the base 552 // we have. 553 uint32 remaining = count; 554 555 while (remaining > 0 && bits < bitsEnd) { 556 PRINT(" remaining: %" B_PRIu32 ", index: %" B_PRIu32 ".%" B_PRIu32 557 ", bits: %#" B_PRIx32 "\n", remaining, 558 kBlocksPerBitmapBlock - (bitsEnd - bits), bitOffset, *bits); 559 560 // TODO: Not particularly efficient for large allocations. 561 uint32 endOffset = std::min(bitOffset + remaining, (uint32)32); 562 for (; bitOffset < endOffset; bitOffset++) { 563 if ((*bits & ((uint32)1 << bitOffset)) != 0) { 564 bits = bitsEnd; 565 break; 566 } 567 568 *bits |= (uint32)1 << bitOffset; 569 remaining--; 570 } 571 572 bits++; 573 bitOffset = 0; 574 } 575 576 if (remaining == count) 577 RETURN_ERROR(B_BUSY); 578 579 _allocatedCount = count - remaining; 580 if (_allocatedBase != NULL) 581 *_allocatedBase = base; 582 583 return B_OK; 584} 585 586 587status_t 588BlockAllocator::_Free(uint64 base, uint64 count, Transaction& transaction) 589{ 590 if (count == 0) 591 return B_OK; 592 593 PRINT("BlockAllocator::_Free(%" B_PRIu64 ", %" B_PRIu64 ")\n", base, count); 594 595 ASSERT(count <= fTotalBlocks - fFreeBlocks); 596 ASSERT(base < fTotalBlocks && fTotalBlocks - base >= count); 597 598 uint64 groupOffset = base % kBlocksPerGroup; 599 uint64 remaining = count; 600 601 while (remaining > 0) { 602 uint64 toFree = std::min(remaining, kBlocksPerGroup - groupOffset); 603 status_t error = _FreeInGroup(base, toFree, transaction); 604 if (error != B_OK) 605 RETURN_ERROR(error); 606 607 fFreeBlocks += toFree; 608 remaining -= toFree; 609 base += toFree; 610 groupOffset = 0; 611 } 612 613 return B_OK; 614} 615 616 617status_t 618BlockAllocator::_FreeInGroup(uint64 base, uint32 count, 619 Transaction& transaction) 620{ 621 if (count == 0) 622 return B_OK; 623 624 PRINT("BlockAllocator::_FreeInGroup(%" B_PRIu64 ", %" B_PRIu32 ")\n", 625 base, count); 626 627 ASSERT(count <= kBlocksPerGroup); 628 ASSERT(base % kBlocksPerGroup + count <= kBlocksPerGroup); 629 630 Block block; 631 if (!block.GetWritable(fVolume, 632 fAllocationGroupBlock + base / kBlocksPerGroup, transaction)) { 633 RETURN_ERROR(B_ERROR); 634 } 635 636 uint16* counts = (uint16*)block.Data(); 637 638 uint32 blockIndex = base / kBlocksPerBitmapBlock % kBitmapBlocksPerGroup; 639 uint64 inBlockOffset = base % kBlocksPerBitmapBlock; 640 uint64 remaining = count; 641 642 while (remaining > 0) { 643 uint32 toFree = std::min(remaining, 644 kBlocksPerBitmapBlock - inBlockOffset); 645 646 if (counts[blockIndex] + toFree > kBlocksPerBitmapBlock) 647 RETURN_ERROR(B_BAD_VALUE); 648 649 status_t error = _FreeInBitmapBlock(base, toFree, transaction); 650 if (error != B_OK) 651 RETURN_ERROR(error); 652 653 counts[blockIndex] += toFree; 654 remaining -= toFree; 655 base += toFree; 656 blockIndex++; 657 inBlockOffset = 0; 658 } 659 660 return B_OK; 661} 662 663 664status_t 665BlockAllocator::_FreeInBitmapBlock(uint64 base, uint32 count, 666 Transaction& transaction) 667{ 668 PRINT("BlockAllocator::_FreeInBitmapBlock(%" B_PRIu64 ", %" B_PRIu32 ")\n", 669 base, count); 670 671 ASSERT(count <= kBlocksPerBitmapBlock); 672 ASSERT(base % kBlocksPerBitmapBlock + count <= kBlocksPerBitmapBlock); 673 674 Block block; 675 if (!block.GetWritable(fVolume, 676 fBitmapBlock + base / kBlocksPerBitmapBlock, transaction)) { 677 RETURN_ERROR(B_ERROR); 678 } 679 680 uint32* bits = (uint32*)block.Data() + base % kBlocksPerBitmapBlock / 32; 681 uint32 bitOffset = base % 32; 682 uint32 remaining = count; 683 684 // handle partial uint32 at the beginning 685 if (bitOffset > 0) { 686 uint32 endOffset = std::min(bitOffset + remaining, (uint32)32); 687 688 uint32 mask = ~(((uint32)1 << bitOffset) - 1); 689 if (endOffset < 32) 690 mask &= ((uint32)1 << endOffset) - 1; 691 692 if ((*bits & mask) != mask) 693 RETURN_ERROR(B_BAD_VALUE); 694 695 *bits &= ~mask; 696 remaining -= endOffset - bitOffset; 697 } 698 699 // handle complete uint32s in the middle 700 while (remaining >= 32) { 701 if (*bits != 0xffffffff) 702 RETURN_ERROR(B_BUSY); 703 704 *bits = 0; 705 remaining -= 32; 706 } 707 708 // handle partial uint32 at the end 709 if (remaining > 0) { 710 uint32 mask = ((uint32)1 << remaining) - 1; 711 712 if ((*bits & mask) != mask) 713 return B_BUSY; 714 715 *bits &= ~mask; 716 } 717 718 return B_OK; 719} 720 721 722status_t 723BlockAllocator::_UpdateSuperBlock(Transaction& transaction) 724{ 725 // write the superblock 726 Block block; 727 if (!block.GetWritable(fVolume, kCheckSumFSSuperBlockOffset / B_PAGE_SIZE, 728 transaction)) { 729 return B_ERROR; 730 } 731 732 SuperBlock* superBlock = (SuperBlock*)block.Data(); 733 superBlock->SetFreeBlocks(fFreeBlocks); 734 735 block.Put(); 736 737 return B_OK; 738} 739