Lines Matching defs:pages

473 	// unmap and free unused pages
567 MemoryManager::Free(void* pages, uint32 flags)
569 TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
571 T(Free(pages, flags));
574 Area* area = _AreaForAddress((addr_t)pages);
576 ((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
579 ASSERT((addr_t)pages >= metaChunk->chunkBase);
580 ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
583 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
593 _FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
693 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
695 TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
698 T(FreeRawOrReturnCache(pages, flags));
706 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
716 VMArea* area = addressSpace->LookupArea((addr_t)pages);
719 if (area != NULL && (addr_t)pages == area->Base())
722 panic("freeing unknown block %p from area %p", pages, area);
728 ((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
732 ASSERT((addr_t)pages >= metaChunk->chunkBase);
733 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
741 ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
742 ASSERT(reference > (addr_t)pages);
744 size_t size = reference - (addr_t)pages + 1;
748 _UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
754 _FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
1459 // reserve the pages we need now
1473 // map the pages
1519 // unmap the pages
1525 // free the pages
1528 VMCachePagesTree::Iterator it = cache->pages.GetIterator(