Lines Matching defs:numBlocks

354 	register u_int32_t	numBlocks);
359 u_int32_t numBlocks,
366 u_int32_t numBlocks, struct jnl_trim_list *list);
402 u_int32_t numBlocks);
407 u_int32_t numBlocks);
412 u_int32_t numBlocks,
425 u_int32_t numBlocks,
501 ; numBlocks - The number of allocation blocks of the extent being freed.
504 static void hfs_unmap_free_extent(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
512 KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_FREE | DBG_FUNC_START, startingBlock, numBlocks, 0, 0, 0);
515 if (hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
516 panic("hfs: %p: (%u,%u) unmapping allocated blocks", hfsmp, startingBlock, numBlocks);
523 length = (u_int64_t) numBlocks * hfsmp->blockSize;
568 ; numBlocks - The number of allocation blocks of the extent being freed.
573 u_int32_t numBlocks, struct jnl_trim_list *list) {
582 length = (u_int64_t) numBlocks * hfsmp->blockSize;
642 ; numBlocks - The number of allocation blocks being allocated.
645 static void hfs_unmap_alloc_extent(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
652 KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_ALLOC | DBG_FUNC_START, startingBlock, numBlocks, 0, 0, 0);
656 length = (u_int64_t) numBlocks * hfsmp->blockSize;
701 uint32_t startBlock, numBlocks;
710 numBlocks = extents[i].length / hfsmp->blockSize;
711 (void) add_free_extent_cache(hfsmp, startBlock, numBlocks);
1109 ; numBlocks - Number of allocation blocks to free up (must be > 0!)
1123 u_int32_t numBlocks, // Number of contiguous blocks to deallocate
1131 KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_DEALLOCATE | DBG_FUNC_START, firstBlock, numBlocks, flags, 0, 0);
1136 if (numBlocks == 0) {
1147 if ((firstBlock + numBlocks) >= hfsmp->totalBlocks) {
1175 err = BlockMarkFreeRBTree(vcb, firstBlock, numBlocks);
1179 err = BlockMarkFreeInternal(vcb, firstBlock, numBlocks, true);
1183 err = BlockMarkFreeInternal(vcb, firstBlock, numBlocks, true);
1198 vcb->freeBlocks += numBlocks;
1201 vcb->hfs_freed_block_count += numBlocks;
1203 if (vcb->nextAllocation == (firstBlock + numBlocks)) {
1204 HFS_UPDATE_NEXT_ALLOCATION(vcb, (vcb->nextAllocation - numBlocks));
1212 (void) add_free_extent_cache(vcb, firstBlock, numBlocks);
2194 register u_int32_t numBlocks)
2212 err = BlockMarkAllocatedRBTree(vcb, startingBlock, numBlocks);
2216 if (!hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
2218 startingBlock, numBlocks);
2220 check_rbtree_extents (hfsmp, startingBlock, numBlocks, ASSERT_ALLOC);
2229 return BlockMarkAllocatedInternal(vcb, startingBlock, numBlocks);
2251 numBlocks Number of blocks to mark as allocated
2258 register u_int32_t numBlocks)
2274 KERNEL_DEBUG_CONSTANT(HFSDBG_MARK_ALLOC_BITMAP | DBG_FUNC_START, startingBlock, numBlocks, 0, 0, 0);
2276 hfs_unmap_alloc_extent(vcb, startingBlock, numBlocks);
2313 if (numBits > numBlocks) {
2314 numBits = numBlocks; // entire allocation is inside this one word
2323 numBlocks -= numBits; // adjust number of blocks left to allocate
2334 while (numBlocks >= kBitsPerWord) {
2361 numBlocks -= kBitsPerWord;
2371 if (numBlocks != 0) {
2372 bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits
2427 u_int32_t numBlocks)
2436 if (hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
2438 startingBlock, numBlocks);
2440 check_rbtree_extents (VCBTOHFS(vcb), startingBlock, numBlocks, ASSERT_FREE);
2443 err = BlockMarkAllocatedInternal (vcb, startingBlock, numBlocks);
2448 if (!hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
2450 startingBlock, numBlocks);
2457 rb_err = extent_tree_offset_alloc_space(&hfsmp->offset_tree, numBlocks, startingBlock);
2475 rb_err = extent_tree_offset_alloc_unaligned (&hfsmp->offset_tree, numBlocks, startingBlock);
2485 check_rbtree_extents (VCBTOHFS(vcb), startingBlock, numBlocks, ASSERT_ALLOC);
2523 register u_int32_t numBlocks)
2540 err = BlockMarkFreeRBTree(vcb, startingBlock, numBlocks);
2544 if (hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
2546 startingBlock, numBlocks);
2548 check_rbtree_extents (hfsmp, startingBlock, numBlocks, ASSERT_FREE);
2555 return BlockMarkFreeInternal(vcb, startingBlock, numBlocks, true);
2580 * numBlocks: Number of blocks in the range to mark unused
2584 OSErr BlockMarkFreeUnused(ExtendedVCB *vcb, u_int32_t startingBlock, register u_int32_t numBlocks)
2606 if (curNumBlocks > numBlocks) {
2607 curNumBlocks = numBlocks;
2614 numBlocks -= curNumBlocks;
2629 while (numBlocks) {
2630 if (numBlocks >= bitsPerBlock) {
2633 curNumBlocks = numBlocks;
2642 numBlocks -= curNumBlocks;
2660 numBlocks Number of blocks to mark as freed
2676 u_int32_t numBlocks = numBlocks_in;
2700 (startingBlock + numBlocks > vcb->totalBlocks)) {
2702 panic ("BlockMarkFreeInternal() free non-existent blocks at %u (numBlock=%u) on vol %s\n", startingBlock, numBlocks, vcb->vcbVN);
2705 printf ("hfs: BlockMarkFreeInternal() trying to free non-existent blocks starting at %u (numBlock=%u) on volume %s\n", startingBlock, numBlocks, vcb->vcbVN);
2764 if (numBits > numBlocks) {
2765 numBits = numBlocks; // entire allocation is inside this one word
2773 numBlocks -= numBits; // adjust number of blocks left to free
2783 while (numBlocks >= kBitsPerWord) {
2809 numBlocks -= kBitsPerWord;
2819 if (numBlocks != 0) {
2820 bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits
2913 register u_int32_t numBlocks)
2921 if (!hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
2923 startingBlock, numBlocks);
2925 check_rbtree_extents (VCBTOHFS(vcb), startingBlock, numBlocks, ASSERT_ALLOC);
2928 err = BlockMarkFreeInternal(vcb, startingBlock, numBlocks, true);
2950 if (hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
2952 startingBlock, numBlocks);
2969 newnode = extent_tree_free_space(&hfsmp->offset_tree, numBlocks, startingBlock);
2976 check_rbtree_extents (VCBTOHFS(vcb), startingBlock, numBlocks, ASSERT_FREE);
3370 u_int32_t numBlocks, int shouldBeFree) {
3375 alloc = hfs_isrbtree_allocated (hfsmp, startBlocks, numBlocks, &node1);
3391 node1, off1, len1, startBlocks, numBlocks);
3402 node1, off1, len1, startBlocks, numBlocks);
3488 u_int32_t numBlocks, extent_node_t **ret_node) {
3499 search_sentinel.length = numBlocks;
3517 if ((node->offset + node->length) >= (startBlock + numBlocks)) {
3565 * numBlocks Total number of blocks that need to be scanned.
3583 u_int32_t numBlocks, Boolean stop_on_first, u_int32_t *allocCount)
3598 KERNEL_DEBUG_CONSTANT(HFSDBG_IS_ALLOCATED | DBG_FUNC_START, startingBlock, numBlocks, stop_on_first, 0, 0);
3628 if (numBits > numBlocks) {
3629 numBits = numBlocks;
3639 numBlocks -= numBits;
3647 while (numBlocks >= kBitsPerWord) {
3670 numBlocks -= kBitsPerWord;
3678 if (numBlocks != 0) {
3679 bitMask = ~(kAllBitsSetInWord >> numBlocks);
3730 u_int32_t numBlocks, u_int32_t *allocCount)
3732 return hfs_isallocated_internal(hfsmp, startBlock, numBlocks, false, allocCount);
3752 hfs_isallocated(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
3757 error = hfs_isallocated_internal(hfsmp, startingBlock, numBlocks, true, &allocCount);
3942 u_int32_t numBlocks = 1;
3979 if (numBits > numBlocks) {
3980 numBits = numBlocks;
3987 numBlocks -= numBits;