Lines Matching refs:block

48  * map is only updated on allocation.  Each metadata block contains
309 * @block: block of interest
313 * Check to see if the allocation can fit in the block's contig hint.
314 * Note, a chunk uses the same hints as a block so this can also check against
317 static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
320 int bit_off = ALIGN(block->contig_hint_start, align) -
321 block->contig_hint_start;
323 return bit_off + bits <= block->contig_hint;
328 * @block: block of interest
337 static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
346 if (block->scan_hint &&
347 block->contig_hint_start > block->scan_hint_start &&
348 alloc_bits > block->scan_hint)
349 return block->scan_hint_start + block->scan_hint;
351 return block->first_free;
361 * block->contig_hint and performs aggregation across blocks to find the
370 struct pcpu_block_md *block;
373 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
374 block++, i++) {
377 *bits += block->left_free;
378 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
388 * the next block and should be handled by the contig area
391 *bits = block->contig_hint;
392 if (*bits && block->contig_hint_start >= block_off &&
393 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
395 block->contig_hint_start);
401 *bits = block->right_free;
402 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
416 * allocation. block->first_free is returned if the allocation request fits
417 * within the block to see if the request can be fulfilled prior to the contig
425 struct pcpu_block_md *block;
428 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
429 block++, i++) {
432 *bits += block->left_free;
435 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
439 /* check block->contig_hint */
440 *bits = ALIGN(block->contig_hint_start, align) -
441 block->contig_hint_start;
443 * This uses the block offset to determine if this has been
446 if (block->contig_hint &&
447 block->contig_hint_start >= block_off &&
448 block->contig_hint >= *bits + alloc_bits) {
449 int start = pcpu_next_hint(block, alloc_bits);
451 *bits += alloc_bits + block->contig_hint_start -
459 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
595 * a md_block covers a page. The hint update functions recognize if a block
621 * pcpu_block_update - updates a block given a free area
622 * @block: block of interest
623 * @start: start offset in block
624 * @end: end offset in block
626 * Updates a block given a known free area. The region [start, end) is
627 * expected to be the entirety of the free area within a block. Chooses
630 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
634 block->first_free = min(block->first_free, start);
636 block->left_free = contig;
638 if (end == block->nr_bits)
639 block->right_free = contig;
641 if (contig > block->contig_hint) {
643 if (start > block->contig_hint_start) {
644 if (block->contig_hint > block->scan_hint) {
645 block->scan_hint_start =
646 block->contig_hint_start;
647 block->scan_hint = block->contig_hint;
648 } else if (start < block->scan_hint_start) {
654 block->scan_hint = 0;
657 block->scan_hint = 0;
659 block->contig_hint_start = start;
660 block->contig_hint = contig;
661 } else if (contig == block->contig_hint) {
662 if (block->contig_hint_start &&
664 __ffs(start) > __ffs(block->contig_hint_start))) {
666 block->contig_hint_start = start;
667 if (start < block->scan_hint_start &&
668 block->contig_hint > block->scan_hint)
669 block->scan_hint = 0;
670 } else if (start > block->scan_hint_start ||
671 block->contig_hint > block->scan_hint) {
677 block->scan_hint_start = start;
678 block->scan_hint = contig;
686 if ((start < block->contig_hint_start &&
687 (contig > block->scan_hint ||
688 (contig == block->scan_hint &&
689 start > block->scan_hint_start)))) {
690 block->scan_hint_start = start;
691 block->scan_hint = contig;
697 * pcpu_block_update_scan - update a block given a free area from a scan
703 * to find a block that can hold the allocation and then pcpu_alloc_area()
708 * This takes a given free area hole and updates a block as it may change the
718 struct pcpu_block_md *block;
724 block = chunk->md_blocks + s_index;
730 pcpu_block_update(block, s_off, e_off);
769 * @index: index of the metadata block
771 * Scans over the block beginning at first_free and updates the block
776 struct pcpu_block_md *block = chunk->md_blocks + index;
781 if (block->scan_hint) {
782 start = block->scan_hint_start + block->scan_hint;
783 block->contig_hint_start = block->scan_hint_start;
784 block->contig_hint = block->scan_hint;
785 block->scan_hint = 0;
787 start = block->first_free;
788 block->contig_hint = 0;
791 block->right_free = 0;
795 pcpu_block_update(block, start, end);
806 * scans are required if the block's contig hint is broken.
813 struct pcpu_block_md *s_block, *e_block, *block;
814 int s_index, e_index; /* block indexes of the freed allocation */
815 int s_off, e_off; /* block offsets of the freed allocation */
818 * Calculate per block offsets.
820 * are [start, end). e_index always points to the last block in the
838 * block->first_free must be updated if the allocation takes its place.
859 /* block contig hint is broken - scan to fix it */
889 /* reset the block */
908 for (block = s_block + 1; block < e_block; block++) {
909 block->scan_hint = 0;
910 block->contig_hint = 0;
911 block->left_free = 0;
912 block->right_free = 0;
946 * pcpu_block_update_hint_free - updates the block hints on the free path
951 * Updates metadata for the allocation path. This avoids a blind block
952 * refresh by making use of the block contig hints. If this fails, it scans
956 * A chunk update is triggered if a page becomes free, a block becomes free,
958 * over the block metadata to update chunk_md->contig_hint.
960 * than the available space. If the contig hint is contained in one block, it
967 struct pcpu_block_md *s_block, *e_block, *block;
968 int s_index, e_index; /* block indexes of the freed allocation */
969 int s_off, e_off; /* block offsets of the freed allocation */
973 * Calculate per block offsets.
975 * are [start, end). e_index always points to the last block in the
987 * Check if the freed area aligns with the block->contig_hint.
994 * or end of the block.
1024 /* freeing in the same block */
1033 for (block = s_block + 1; block < e_block; block++) {
1034 block->first_free = 0;
1035 block->scan_hint = 0;
1036 block->contig_hint_start = 0;
1037 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1038 block->left_free = PCPU_BITMAP_BLOCK_BITS;
1039 block->right_free = PCPU_BITMAP_BLOCK_BITS;
1047 * Refresh chunk metadata when the free makes a block free or spans
1049 * the contig_hint is contained in a block, it will be accurate with
1092 * pcpu_find_block_fit - finds the block index to start searching
1102 * of a block or chunk, it is skipped. This errs on the side of caution
1206 * the allocation map because if it fits within the block's contig hint,
1207 * @start will be block->first_free. This is an attempt to fill the
1309 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1311 block->scan_hint = 0;
1312 block->contig_hint = nr_bits;
1313 block->left_free = nr_bits;
1314 block->right_free = nr_bits;
1315 block->first_free = 0;
1316 block->nr_bits = nr_bits;
1323 /* init the chunk's block */
1920 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
2129 struct pcpu_block_md *block;
2171 block = chunk->md_blocks + i;
2172 if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&