Lines Matching refs:page

24 	2) segment + page translation. The first mechanism does this directly
35 (page table) |
48 setting a register to another page directory, since we only have one
49 page table containing both kernel and user address mappings. Instead we
70 * The current locking scheme is insufficient. The page table is a resource
158 vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
159 if (!page)
160 panic("destroy_tmap: didn't find pgtable page\n");
161 DEBUG_PAGE_ACCESS_START(page);
162 vm_page_set_state(page, PAGE_STATE_FREE);
223 // allocate a physical page mapper
230 // allocate the page directory
236 // look up the page directory's physical address
247 // get the physical page mapper
292 // Search for the page table entry using the primary hash value
369 // Search for a free page table slot using the primary hash value
416 // check to see if a page table exists for this range
420 vm_page *page;
423 page = vm_page_allocate_page(reservation,
426 DEBUG_PAGE_ACCESS_END(page);
428 pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
430 TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
438 // update any other page directories, if it maps kernel space
511 // no page table here, move the start up to access the next page
526 // page mapping not valid
530 TRACE("unmap_tmap: removing page 0x%lx\n", start);
567 // get the area's first physical page
604 // no page table here, move the start up to access the next page
647 /*! Caller must have locked the cache of the page to be unmapped.
711 // page mapping not valid
724 // NOTE: Between clearing the page table entry and Flush() other
726 // same team) could still access the page in question via their cached
728 // effect that the page looks unmodified (and might thus be recycled),
772 // no page table here, move the start up to access the next page
801 // get the page
802 vm_page* page = vm_lookup_page(
804 ASSERT(page != NULL);
806 DEBUG_PAGE_ACCESS_START(page);
808 // transfer the accessed/dirty flags to the page
810 page->accessed = true;
812 page->modified = true;
815 // page
819 = page->mappings.GetIterator();
828 page->mappings.Remove(mapping);
831 page->DecrementWiredCount();
833 if (!page->IsMapped()) {
837 if (page->Cache()->temporary)
838 vm_page_set_state(page, PAGE_STATE_INACTIVE);
839 else if (page->modified)
840 vm_page_set_state(page, PAGE_STATE_MODIFIED);
842 vm_page_set_state(page, PAGE_STATE_CACHED);
846 DEBUG_PAGE_ACCESS_END(page);
854 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
894 vm_page* page = mapping->page;
895 page->mappings.Remove(mapping);
897 VMCache* cache = page->Cache();
900 if (!page->IsMapped()) {
907 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
911 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
912 "has no page dir entry", page, area, address);
928 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
929 "has no page table entry", page, area, address);
933 // transfer the accessed/dirty flags to the page and invalidate
936 page->accessed = true;
943 page->modified = true;
946 DEBUG_PAGE_ACCESS_START(page);
949 vm_page_set_state(page, PAGE_STATE_INACTIVE);
950 else if (page->modified)
951 vm_page_set_state(page, PAGE_STATE_MODIFIED);
953 vm_page_set_state(page, PAGE_STATE_CACHED);
955 DEBUG_PAGE_ACCESS_END(page);
1025 // read in the page state flags
1063 // map page table entry
1071 // read in the page state flags
1116 // no page table here, move the start up to access the next page
1132 // page mapping not valid
1136 TRACE("protect_tmap: protect page 0x%lx\n", start);
1264 // page could have been accessed. We try to compensate by considering
1269 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1270 if (page == NULL)
1273 _modified |= page->modified;
1275 return page->accessed;
1303 // page mapping not valid
1308 // page was accessed -- just clear the flags
1314 // page hasn't been accessed -- unmap it