Lines Matching refs:area

177 	area_id				area;
225 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
228 addr_t address = area.base + pageIndex * B_PAGE_SIZE;
306 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
317 guarded_heap_page& page = area.pages[startPageIndex + i];
340 list_remove_item(&area.free_list, &page);
344 guarded_heap_page_protect(area, startPageIndex + i, 0);
346 guarded_heap_page_protect(area, startPageIndex + i,
354 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
357 guarded_heap_page& page = area.pages[pageIndex];
359 if (area.heap->reuse_memory || force)
366 list_add_item(&area.free_list, &page);
368 guarded_heap_page_protect(area, pageIndex, 0);
380 guarded_heap_area_allocate(guarded_heap_area& area, size_t pagesNeeded,
383 if (pagesNeeded > area.page_count - area.used_pages)
390 = (guarded_heap_page*)list_get_next_item(&area.free_list, NULL);
393 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
398 size_t pageIndex = page - area.pages;
399 if (pageIndex > area.page_count - pagesNeeded)
406 if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
417 void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
420 guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
423 area.used_pages += pagesNeeded;
424 guarded_heap_pages_allocated(*area.heap, pagesNeeded);
436 guarded_heap_area* area = (guarded_heap_area*)baseAddress;
437 area->heap = &heap;
438 area->area = id;
439 area->size = size;
440 area->page_count = area->size / B_PAGE_SIZE;
441 area->used_pages = 0;
444 + area->page_count * sizeof(guarded_heap_page)
447 area->page_count -= pagesNeeded;
448 area->size = area->page_count * B_PAGE_SIZE;
449 area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
451 mutex_init(&area->lock, "guarded_heap_area_lock");
453 list_init_etc(&area->free_list,
456 for (size_t i = 0; i < area->page_count; i++)
457 guarded_heap_free_page(*area, i, true);
459 area->next = heap.areas;
460 heap.areas = area;
461 heap.page_count += area->page_count;
486 panic("failed to allocate a new heap area");
514 area_id area = create_area("guarded_heap_huge_allocation", &address,
517 if (area < 0) {
518 panic("failed to create area for allocation of %" B_PRIuSIZE " pages",
572 // Don't bother, use an area directly. Since it will also fault once
580 for (guarded_heap_area* area = heap.areas; area != NULL;
581 area = area->next) {
583 MutexLocker locker(area->lock);
584 result = guarded_heap_area_allocate(*area, pagesNeeded, size,
609 for (guarded_heap_area* area = heap.areas; area != NULL;
610 area = area->next) {
611 if ((addr_t)address < area->base)
614 if ((addr_t)address >= area->base + area->size)
617 mutex_lock(&area->lock);
618 return area;
626 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
628 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
629 guarded_heap_page& page = area.pages[pageIndex];
633 return area.page_count;
639 return area.page_count;
645 return area.page_count;
651 return area.page_count;
659 guarded_heap_area_free(guarded_heap_area& area, void* address)
661 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
662 if (pageIndex >= area.page_count)
666 guarded_heap_page* page = &area.pages[pageIndex];
669 guarded_heap_free_page(area, pageIndex);
681 page = &area.pages[pageIndex];
685 guarded_heap_free_page(area, pageIndex);
688 if (area.heap->reuse_memory) {
689 area.used_pages -= pagesFreed;
690 atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
741 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
743 if (area == NULL)
746 MutexLocker locker(area->lock, true);
747 return guarded_heap_area_free(*area, address);
754 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
759 if (area != NULL) {
760 MutexLocker locker(area->lock, true);
761 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
762 if (pageIndex >= area->page_count)
765 guarded_heap_page& page = area->pages[pageIndex];
790 MutexLocker locker(area->lock);
791 guarded_heap_area_free(*area, address);
826 // Find the area that contains this page.
827 guarded_heap_area* area = NULL;
836 area = candidate;
840 if (area == NULL) {
841 panic("didn't find area for address %p\n", address);
845 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
846 guarded_heap_page& page = area->pages[pageIndex];
852 guarded_heap_page& candidate = area->pages[candidateIndex];
890 dump_guarded_heap_area(guarded_heap_area& area)
892 printf("guarded heap area: %p\n", &area);
893 printf("next heap area: %p\n", area.next);
894 printf("guarded heap: %p\n", area.heap);
895 printf("area id: %" B_PRId32 "\n", area.area);
896 printf("base: 0x%" B_PRIxADDR "\n", area.base);
897 printf("size: %" B_PRIuSIZE "\n", area.size);
898 printf("page count: %" B_PRIuSIZE "\n", area.page_count);
899 printf("used pages: %" B_PRIuSIZE "\n", area.used_pages);
900 printf("lock: %p\n", &area.lock);
903 void* item = list_get_next_item(&area.free_list, NULL);
912 item = list_get_next_item(&area.free_list, item);
915 printf("free_list: %p (%" B_PRIuSIZE " free)\n", &area.free_list,
921 for (size_t i = 0; i <= area.page_count; i++) {
922 guarded_heap_page& page = area.pages[i];
923 if (i == area.page_count
933 for (size_t j = 1; j < area.page_count - i; j++) {
934 if ((area.pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
946 printf("pages: %p\n", area.pages);
957 printf("area creation counter: %" B_PRIu32 "\n",
961 guarded_heap_area* area = heap.areas;
962 while (area != NULL) {
964 area = area->next;
978 for (guarded_heap_area* area = heap.areas; area != NULL;
979 area = area->next) {
981 MutexLocker areaLocker(area->lock);
982 for (size_t i = 0; i < area->page_count; i++) {
983 guarded_heap_page& page = area->pages[i];
1059 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1060 area = area->next) {
1061 MutexLocker areaLocker(area->lock);
1062 dump_guarded_heap_area(*area);
1067 for (size_t i = 0; i < area->page_count; i++) {
1068 dump_guarded_heap_page(area->pages[i]);
1069 if ((area->pages[i].flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
1070 guarded_heap_print_stack_traces(area->pages[i]);
1120 // but the area ids have changed.
1121 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1122 area = area->next) {
1123 area->area = area_for(area);
1124 if (area->area < 0)
1125 panic("failed to find area for heap area %p after fork", area);