Lines Matching refs:area

53 	area_id				area;
156 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
159 if (area.area < 0)
162 addr_t address = area.base + pageIndex * B_PAGE_SIZE;
163 vm_set_kernel_area_debug_protection(area.protection_cookie, (void*)address,
169 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
180 guarded_heap_page& page = area.pages[startPageIndex + i];
206 list_remove_item(&area.free_list, &page);
210 guarded_heap_page_protect(area, startPageIndex + i, 0);
212 guarded_heap_page_protect(area, startPageIndex + i,
216 T(Allocate(area.heap,
217 (void*)(area.base + (startPageIndex + i) * B_PAGE_SIZE),
224 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
227 guarded_heap_page& page = area.pages[pageIndex];
230 if (force || area.area < 0)
247 list_add_item(&area.free_list, &page);
249 guarded_heap_page_protect(area, pageIndex, 0);
251 T(Free(area.heap, (void*)(area.base + pageIndex * B_PAGE_SIZE)));
265 guarded_heap_area_allocate(guarded_heap_area& area, size_t size,
274 if (pagesNeeded > area.page_count - area.used_pages)
277 if (pagesNeeded > area.page_count)
284 = (guarded_heap_page*)list_get_first_item(&area.free_list);
287 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
292 size_t pageIndex = page - area.pages;
293 if (pageIndex > area.page_count - pagesNeeded)
300 if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
314 void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
317 guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
320 area.used_pages += pagesNeeded;
321 grow = guarded_heap_pages_allocated(*area.heap, pagesNeeded);
333 guarded_heap_area* area = (guarded_heap_area*)baseAddress;
334 area->heap = &heap;
335 area->area = id;
336 area->size = size;
337 area->page_count = area->size / B_PAGE_SIZE;
338 area->used_pages = 0;
341 + area->page_count * sizeof(guarded_heap_page)
344 area->page_count -= pagesNeeded;
345 area->size = area->page_count * B_PAGE_SIZE;
346 area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
348 if (area->area >= 0 && vm_prepare_kernel_area_debug_protection(area->area,
349 &area->protection_cookie) != B_OK) {
353 mutex_init(&area->lock, "guarded_heap_area_lock");
355 list_init_etc(&area->free_list,
358 for (size_t i = 0; i < area->page_count; i++)
359 guarded_heap_free_page(*area, i, true);
362 area->next = heap.areas;
363 heap.areas = area;
364 heap.page_count += area->page_count;
390 panic("failed to allocate a new heap area");
419 for (guarded_heap_area* area = heap.areas; area != NULL;
420 area = area->next) {
422 MutexLocker locker(area->lock);
423 result = guarded_heap_area_allocate(*area, size, alignment, flags,
449 for (guarded_heap_area* area = heap.areas; area != NULL;
450 area = area->next) {
451 if ((addr_t)address < area->base)
454 if ((addr_t)address >= area->base + area->size)
457 mutex_lock(&area->lock);
458 return area;
461 panic("guarded heap area for address %p not found", address);
467 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
469 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
470 guarded_heap_page& page = area.pages[pageIndex];
474 return area.page_count;
480 return area.page_count;
486 return area.page_count;
492 return area.page_count;
500 guarded_heap_area_free(guarded_heap_area& area, void* address, uint32 flags)
502 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
503 if (pageIndex >= area.page_count)
507 guarded_heap_page* page = &area.pages[pageIndex];
510 guarded_heap_free_page(area, pageIndex);
514 page = &area.pages[pageIndex];
518 guarded_heap_free_page(area, pageIndex);
522 area.used_pages -= pagesFreed;
523 atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
534 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
536 if (area == NULL)
539 MutexLocker locker(area->lock, true);
540 guarded_heap_area_free(*area, address, flags);
547 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
549 if (area == NULL)
552 MutexLocker locker(area->lock, true);
554 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
555 if (pageIndex >= area->page_count)
558 guarded_heap_page& page = area->pages[pageIndex];
615 // Find the area that contains this page.
616 guarded_heap_area* area = NULL;
625 area = candidate;
629 if (area == NULL) {
630 kprintf("didn't find area for address\n");
634 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
635 guarded_heap_page& page = area->pages[pageIndex];
670 // Find the area that contains this page.
671 guarded_heap_area* area = NULL;
682 area = candidate;
686 if (area == NULL) {
687 kprintf("didn't find area for address\n");
691 kprintf("guarded heap area: %p\n", area);
692 kprintf("next heap area: %p\n", area->next);
693 kprintf("guarded heap: %p\n", area->heap);
694 kprintf("area id: %" B_PRId32 "\n", area->area);
695 kprintf("base: 0x%" B_PRIxADDR "\n", area->base);
696 kprintf("size: %" B_PRIuSIZE "\n", area->size);
697 kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
698 kprintf("used pages: %" B_PRIuSIZE "\n", area->used_pages);
699 kprintf("protection cookie: %p\n", area->protection_cookie);
700 kprintf("lock: %p\n", &area->lock);
703 void* item = list_get_first_item(&area->free_list);
712 item = list_get_next_item(&area->free_list, item);
715 kprintf("free_list: %p (%" B_PRIuSIZE " free)\n", &area->free_list,
721 for (size_t i = 0; i <= area->page_count; i++) {
722 guarded_heap_page& page = area->pages[i];
723 if (i == area->page_count
733 for (size_t j = 1; j < area->page_count - i; j++) {
734 if ((area->pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
746 kprintf("pages: %p\n", area->pages);
769 kprintf("area creation counter: %" B_PRId32 "\n",
773 guarded_heap_area* area = heap->areas;
774 while (area != NULL) {
776 area = area->next;
816 guarded_heap_area* area = sGuardedHeap.areas;
817 while (area != NULL) {
818 for (size_t i = 0; i < area->page_count; i++) {
819 guarded_heap_page& page = area->pages[i];
842 area = area->next;
872 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
873 area = area->next) {
874 if (area->area >= 0)
877 area_id id = area_for((void*)area->base);
879 &area->protection_cookie) != B_OK) {
884 area->area = id;
885 for (size_t i = 0; i < area->page_count; i++) {
886 guarded_heap_page& page = area->pages[i];
890 guarded_heap_page_protect(*area, i,
893 guarded_heap_page_protect(*area, i, 0);
900 "Dump info about a guarded heap area",
901 "<address>\nDump info about guarded heap area containing address.\n",