Lines Matching defs:heap

14 #include <heap.h>
225 out.Print("heap allocate: 0x%08lx (%lu bytes)", fAddress, fSize);
246 out.Print("heap reallocate: 0x%08lx -> 0x%08lx (%lu bytes)",
267 out.Print("heap free: 0x%08lx", fAddress);
339 dump_bin_list(heap_allocator *heap)
341 for (uint32 i = 0; i < heap->bin_count; i++)
342 dump_bin(&heap->bins[i]);
348 dump_allocator_areas(heap_allocator *heap)
350 heap_area *area = heap->all_areas;
365 dump_allocator(heap_allocator *heap, bool areas, bool bins)
369 "empty_areas: %" B_PRIu32 "\n", heap, heap->name, heap->page_size,
370 heap->bin_count, heap->total_pages, heap->total_free_pages,
371 heap->empty_areas);
374 dump_allocator_areas(heap);
376 dump_bin_list(heap);
385 // only dump dedicated grow heap info
386 kprintf("dedicated grow heap:\n");
415 // dump specified heap
443 heap_allocator *heap = sHeaps[heapIndex];
445 heap = (heap_allocator *)(addr_t)heapAddress;
448 heap_allocator *heap = (heap_allocator *)(addr_t)heapAddress;
449 if (heap == NULL) {
459 heap_area *area = heap->all_areas;
466 addr_t base = area->base + i * heap->page_size;
467 if (page->bin_index < heap->bin_count) {
471 = heap->bins[page->bin_index].element_size;
501 == heap->bin_count
506 size_t size = pageCount * heap->page_size;
563 heap_allocator *heap = sHeaps[heapIndex];
566 heap_area *area = heap->all_areas;
574 addr_t base = area->base + i * heap->page_size;
575 if (page->bin_index < heap->bin_count) {
579 = heap->bins[page->bin_index].element_size;
620 == heap->bin_count
626 * heap->page_size - sizeof(heap_leak_check_info));
700 analyze_allocation_callers(heap_allocator *heap)
703 heap_area *area = heap->all_areas;
711 addr_t base = area->base + i * heap->page_size;
712 if (page->bin_index < heap->bin_count) {
715 size_t elementSize = heap->bins[page->bin_index].element_size;
748 == heap->bin_count
755 * heap->page_size - sizeof(heap_leak_check_info));
782 heap_allocator *heap = NULL;
795 heap = (heap_allocator*)(addr_t)heapAddress;
804 if (heap != NULL) {
805 if (!analyze_allocation_callers(heap))
849 heap_validate_heap(heap_allocator *heap)
851 ReadLocker areaReadLocker(heap->area_lock);
852 for (uint32 i = 0; i < heap->bin_count; i++)
853 mutex_lock(&heap->bins[i].lock);
854 MutexLocker pageLocker(heap->page_lock);
858 heap_area *area = heap->all_areas;
908 area = heap->areas;
924 area = heap->all_areas;
934 for (uint32 i = 0; i < heap->bin_count; i++) {
935 heap_bin *bin = &heap->bins[i];
940 area = heap->all_areas;
982 addr_t pageBase = area->base + page->index * heap->page_size;
985 || (addr_t)element >= pageBase + heap->page_size)
1012 for (uint32 i = 0; i < heap->bin_count; i++)
1013 mutex_unlock(&heap->bins[i].lock);
1023 heap_add_area(heap_allocator *heap, area_id areaID, addr_t base, size_t size)
1031 uint32 pageCount = size / heap->page_size;
1042 pageCount = area->size / heap->page_size;
1062 WriteLocker areaWriteLocker(heap->area_lock);
1063 MutexLocker pageLocker(heap->page_lock);
1064 if (heap->areas == NULL) {
1065 // it's the only (empty) area in that heap
1067 heap->areas = area;
1070 heap_area *lastArea = heap->areas;
1079 if (heap->all_areas == NULL || heap->all_areas->base < area->base) {
1080 area->all_next = heap->all_areas;
1081 heap->all_areas = area;
1083 heap_area *insert = heap->all_areas;
1091 heap->total_pages += area->page_count;
1092 heap->total_free_pages += area->free_page_count;
1097 heap->empty_areas++;
1103 dprintf("heap_add_area: area %" B_PRId32 " added to %s heap %p - usable "
1104 "range %p - %p\n", area->area, heap->name, heap, (void *)area->base,
1110 heap_remove_area(heap_allocator *heap, heap_area *area)
1113 panic("tried removing heap area that has still pages in use");
1118 panic("tried removing the last non-full heap area");
1122 if (heap->areas == area)
1123 heap->areas = area->next;
1129 if (heap->all_areas == area)
1130 heap->all_areas = area->all_next;
1132 heap_area *previous = heap->all_areas;
1143 panic("removing heap area that is not in all list");
1146 heap->total_pages -= area->page_count;
1147 heap->total_free_pages -= area->free_page_count;
1150 "from %s heap %p\n", area->area, (void *)area->base,
1151 (void *)(area->base + area->size), heap->name, heap);
1161 heap_allocator *heap;
1163 // allocate seperately on the heap
1164 heap = (heap_allocator *)malloc(sizeof(heap_allocator)
1168 heap = (heap_allocator *)base;
1173 heap->name = name;
1174 heap->page_size = heapClass->page_size;
1175 heap->total_pages = heap->total_free_pages = heap->empty_areas = 0;
1176 heap->areas = heap->all_areas = NULL;
1177 heap->bins = (heap_bin *)((addr_t)heap + sizeof(heap_allocator));
1180 heap->get_caller = &get_caller;
1183 heap->bin_count = 0;
1185 uint32 count = heap->page_size / heapClass->min_bin_size;
1187 if (heap->bin_count >= MAX_BIN_COUNT)
1188 panic("heap configuration invalid - max bin count reached\n");
1190 binSize = (heap->page_size / count) & ~(heapClass->bin_alignment - 1);
1193 if (heap->page_size - count * binSize > heapClass->max_waste_per_page)
1196 heap_bin *bin = &heap->bins[heap->bin_count];
1197 mutex_init(&bin->lock, "heap bin lock");
1199 bin->max_free_count = heap->page_size / binSize;
1201 heap->bin_count++;
1205 base += heap->bin_count * sizeof(heap_bin);
1206 size -= heap->bin_count * sizeof(heap_bin);
1209 rw_lock_init(&heap->area_lock, "heap area rw lock");
1210 mutex_init(&heap->page_lock, "heap page lock");
1212 heap_add_area(heap, -1, base, size);
1213 return heap;
1218 heap_free_pages_added(heap_allocator *heap, heap_area *area, uint32 pageCount)
1221 heap->total_free_pages += pageCount;
1224 // we need to add ourselfs to the area list of the heap
1226 area->next = heap->areas;
1229 heap->areas = area;
1243 if (heap->areas == area)
1244 heap->areas = area->next;
1255 heap->empty_areas++;
1260 heap_free_pages_removed(heap_allocator *heap, heap_area *area, uint32 pageCount)
1264 heap->empty_areas--;
1268 heap->total_free_pages -= pageCount;
1276 if (heap->areas == area)
1277 heap->areas = area->next;
1297 if (heap->areas == insert)
1298 heap->areas = area;
1332 heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount,
1335 MutexLocker pageLocker(heap->page_lock);
1336 heap_area *area = heap->areas;
1347 if (alignment > heap->page_size) {
1349 / heap->page_size;
1350 step = alignment / heap->page_size;
1380 page->bin_index = heap->bin_count;
1389 heap_free_pages_removed(heap, area, pageCount);
1399 heap_add_leak_check_info(heap_allocator *heap, addr_t address, size_t allocated,
1407 info->caller = heap->get_caller();
1413 heap_raw_alloc(heap_allocator *heap, size_t size, size_t alignment)
1415 TRACE(("heap %p: allocate %lu bytes from raw pages with alignment %lu\n",
1416 heap, size, alignment));
1418 uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
1419 heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount,
1422 TRACE(("heap %p: found no contiguous pages to allocate %ld bytes\n",
1423 heap, size));
1427 addr_t address = firstPage->area->base + firstPage->index * heap->page_size;
1429 heap_add_leak_check_info(heap, address, pageCount * heap->page_size, size);
1436 heap_allocate_from_bin(heap_allocator *heap, uint32 binIndex, size_t size)
1438 heap_bin *bin = &heap->bins[binIndex];
1439 TRACE(("heap %p: allocate %lu bytes from bin %lu with element_size %lu\n",
1440 heap, size, binIndex, bin->element_size));
1445 MutexLocker pageLocker(heap->page_lock);
1446 heap_area *area = heap->areas;
1448 TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap,
1460 heap_free_pages_removed(heap, area, 1);
1484 address = (void *)(page->area->base + page->index * heap->page_size
1500 heap_add_leak_check_info(heap, (addr_t)address, bin->element_size, size);
1515 heap_should_grow(heap_allocator *heap)
1518 return heap->total_free_pages * heap->page_size < HEAP_GROW_SIZE / 5;
1523 heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
1546 for (uint32 i = 0; i < heap->bin_count; i++) {
1547 if (size <= heap->bins[i].element_size
1548 && is_valid_alignment(heap->bins[i].element_size)) {
1549 address = heap_allocate_from_bin(heap, i, size);
1554 for (uint32 i = 0; i < heap->bin_count; i++) {
1555 if (size <= heap->bins[i].element_size) {
1556 address = heap_allocate_from_bin(heap, i, size);
1564 address = heap_raw_alloc(heap, size, alignment);
1593 heap_free(heap_allocator *heap, void *address)
1598 ReadLocker areaReadLocker(heap->area_lock);
1599 heap_area *area = heap->all_areas;
1626 / heap->page_size];
1631 if (page->bin_index > heap->bin_count) {
1636 if (page->bin_index < heap->bin_count) {
1638 heap_bin *bin = &heap->bins[page->bin_index];
1664 * heap->page_size) % bin->element_size != 0) {
1677 MutexLocker pageLocker(heap->page_lock);
1681 heap_free_pages_added(heap, area, 1);
1709 MutexLocker pageLocker(heap->page_lock);
1712 if (!page[i].in_use || page[i].bin_index != heap->bin_count
1725 heap_free_pages_added(heap, area, pageCount);
1731 if (heap->empty_areas > 1) {
1732 WriteLocker areaWriteLocker(heap->area_lock);
1733 MutexLocker pageLocker(heap->page_lock);
1735 area_id areasToDelete[heap->empty_areas - 1];
1738 area = heap->areas;
1739 while (area != NULL && heap->empty_areas > 1) {
1743 && heap_remove_area(heap, area) == B_OK) {
1745 heap->empty_areas--;
1764 heap_set_get_caller(heap_allocator* heap, addr_t (*getCaller)())
1766 heap->get_caller = getCaller;
1775 heap_realloc(heap_allocator *heap, void *address, void **newAddress,
1778 ReadLocker areaReadLocker(heap->area_lock);
1779 heap_area *area = heap->all_areas;
1807 / heap->page_size];
1808 if (page->bin_index > heap->bin_count) {
1817 if (page->bin_index < heap->bin_count) {
1819 heap_bin *bin = &heap->bins[page->bin_index];
1822 minSize = heap->bins[page->bin_index - 1].element_size + 1;
1827 maxSize = heap->page_size;
1829 MutexLocker pageLocker(heap->page_lock);
1831 if (!page[i].in_use || page[i].bin_index != heap->bin_count
1835 minSize += heap->page_size;
1836 maxSize += heap->page_size;
1876 heap_free(heap, address);
1906 // hopefully the heap grower will manage to create a new heap
1908 dprintf("heap: requesting new grow heap\n");
1924 heap_allocator *heap = sHeaps[heapIndex];
1925 result = heap_memalign(heap, alignment, size);
1932 panic("heap: all heaps have run out of memory while growing\n");
1934 dprintf("heap: all heaps have run out of memory\n");
1941 heap_create_new_heap_area(heap_allocator *heap, const char *name, size_t size)
1948 TRACE(("heap: couldn't allocate heap area \"%s\"\n", name));
1952 heap_add_area(heap, heapArea, (addr_t)address, size);
1954 heap_validate_heap(heap);
1964 // wait for a request to grow the heap list
1969 // the grow heap is going to run full soon, try to allocate a new
1972 if (heap_create_new_heap_area(sGrowHeap, "additional grow heap",
1974 dprintf("heap_grower: failed to create new grow heap area\n");
1978 heap_allocator *heap = sHeaps[i];
1980 || heap_should_grow(heap)) {
1981 // grow this heap if it is nearly full or if a grow was
1982 // explicitly requested for this heap (happens when a large
1985 if (heap_create_new_heap_area(heap, "additional heap",
1987 dprintf("heap_grower: failed to create new heap area\n");
2048 add_debugger_command_etc("heap", &dump_heap_list,
2049 "Dump infos about the kernel heap(s)",
2050 "[(\"grow\" | \"stats\" | <heap>)]\n"
2051 "Dump infos about the kernel heap(s). If \"grow\" is specified, only\n"
2052 "infos about the dedicated grow heap are printed. If \"stats\" is\n"
2053 "given as the argument, currently only the heap count is printed.\n"
2054 "If <heap> is given, it is interpreted as the address of the heap to\n"
2058 "Dump current heap allocations",
2059 "[\"stats\"] [<heap>]\n"
2063 "If a specific heap address is given, only allocations of this\n"
2067 "Dump current heap allocations",
2077 "Dump current heap allocations summed up per caller",
2078 "[ \"-c\" ] [ -h <heap> ]\n"
2081 "specified, by allocation count. If given <heap> specifies the\n"
2082 "address of the heap for which to print the allocations.\n", 0);
2092 area_id growHeapArea = create_area("dedicated grow heap", &address,
2096 panic("heap_init_post_area(): couldn't allocate dedicate grow heap "
2104 panic("heap_init_post_area(): failed to create dedicated grow heap\n");
2108 // create the VIP heap
2120 area_id vipHeapArea = create_area("VIP heap", &address,
2124 panic("heap_init_post_area(): couldn't allocate VIP heap area");
2128 sVIPHeap = heap_create_allocator("VIP heap", (addr_t)address,
2131 panic("heap_init_post_area(): failed to create VIP heap\n");
2135 dprintf("heap_init_post_area(): created VIP heap: %p\n", sVIPHeap);
2146 panic("heap_init_post_sem(): failed to create heap grow sem\n");
2152 panic("heap_init_post_sem(): failed to create heap grown notify sem\n");
2167 sHeapGrowThread = spawn_kernel_thread(heap_grow_thread, "heap grower",
2170 panic("heap_init_post_thread(): cannot create heap grow thread\n");
2180 area_id perCPUHeapArea = create_area("per cpu initial heap",
2203 add_debugger_command_etc("heap", &dump_heap_list,
2204 "Dump infos about a specific heap",
2205 "[\"stats\"] <heap>\n"
2206 "Dump infos about the specified kernel heap. If \"stats\" is given\n"
2207 "as the argument, currently only the heap count is printed.\n", 0);
2210 "Dump current heap allocations",
2211 "[\"stats\"] <heap>\n"
2253 dprintf("heap: failed to create area for huge allocation\n");
2272 TRACE(("heap: allocated area %ld for huge allocation of %lu bytes\n",
2290 heap_allocator *heap = sHeaps[heapIndex];
2291 result = heap_memalign(heap, alignment, size);
2293 shouldGrow = heap_should_grow(heap);
2298 heap_validate_heap(heap);
2319 panic("heap: kernel heap has run out of memory\n");
2366 heap_allocator *heap = sHeaps[(i + offset) % sHeapCount];
2367 if (heap_free(heap, address) == B_OK) {
2369 heap_validate_heap(heap);
2375 // maybe it was allocated from the dedicated grow heap
2379 // or maybe it was allocated from the VIP heap
2423 heap_allocator *heap = sHeaps[(i + offset) % sHeapCount];
2424 if (heap_realloc(heap, address, &newAddress, newSize, flags) == B_OK) {
2426 heap_validate_heap(heap);
2432 // maybe it was allocated from the dedicated grow heap