Lines Matching refs:page

5  * x86_64 specific management of page tables
10 * sized page is the number of page table levels + 1.
13 * Warning: Additional slots will be required to map a BASE_PAGE_SIZE size page,
67 // returns whether va1 and va2 share a page directory entry
74 // returns whether va1 and va2 share a page directory pointer table entry
79 // returns whether va1 and va2 share a page map level 4 entry
136 * \brief Returns the vnode for the page directory mapping a given vspace
213 * \brief Returns the vnode for the page directory pointer table mapping for a
226 * \brief Returns the vnode for the page directory mapping a given vspace
306 printf("page already exists in 0x%"
310 // clean out empty page tables. We do this here because we benefit
311 // from having the page tables in place when doing lots of small
318 struct vnode *page = slab_alloc(&pmap->p.m.slab);
319 assert(page);
320 page->v.is_vnode = false;
321 page->is_cloned = false;
322 page->v.entry = table_base;
323 page->v.cap = frame;
324 page->v.u.frame.offset = offset;
325 page->v.u.frame.flags = flags;
326 page->v.u.frame.pte_count = pte_count;
327 page->u.frame.vaddr = vaddr;
328 page->u.frame.cloned_count = 0;
331 pmap_vnode_insert_child(ptable, page);
333 set_mapping_cap(&pmap->p, page, ptable, table_base);
338 assert(!capref_is_null(page->v.mapping));
340 pmap_flags, offset, pte_count, page->v.mapping);
359 // determine page size and relevant address part
378 // huge page branch (1GB)
390 // large page branch (2MB)
400 // round to the next full page and calculate end address and #ptes
418 "; pte_count = %zd; frame bytes = 0x%zx; page size = 0x%zx\n",
437 else { // multiple leaf page tables
538 * \brief Create page mappings
565 // Adjust the parameters to page boundaries
608 struct vnode *page;
618 * \arg pt the last-level page table meta-data we found if any
619 * \arg page the page meta-data we found if any
625 struct vnode *pdpt = NULL, *pdir = NULL, *pt = NULL, *page = NULL;
631 // find page and last-level page table (can be pdir or pdpt)
633 page = pmap_find_vnode(pdpt, X86_64_PDPT_BASE(vaddr));
634 if (page && page->v.is_vnode) { // not 1G pages
635 pdir = page;
636 page = pmap_find_vnode(pdir, X86_64_PDIR_BASE(vaddr));
637 if (page && page->v.is_vnode) { // not 2M pages
638 pt = page;
639 page = pmap_find_vnode(pt, X86_64_PTABLE_BASE(vaddr));
643 } else if (page) {
644 assert(is_large_page(page));
650 } else if (page) {
651 assert(is_huge_page(page));
660 info->page = page;
665 if (pt && page) {
681 assert(info.page_table && info.page_table->v.is_vnode && info.page && !info.page->v.is_vnode);
683 if (info.page->v.u.frame.pte_count == pte_count) {
684 err = vnode_unmap(info.page_table->v.cap, info.page->v.mapping);
691 // delete&free page->v.mapping after doing vnode_unmap()
692 err = cap_delete(info.page->v.mapping);
697 err = pmap->p.slot_alloc->free(pmap->p.slot_alloc, info.page->v.mapping);
706 pmap_remove_vnode(info.page_table, info.page);
707 slab_free(&pmap->p.m.slab, info.page);
714 * \brief Remove page mappings
729 //determine if we unmap a larger page
738 assert(!info.page->v.is_vnode);
740 if (info.page->v.entry > info.table_base) {
752 (is_same_pdpt(vaddr, vend) && is_large_page(info.page)) ||
753 (is_same_pml4(vaddr, vend) && is_huge_page(info.page)))
824 assert(info.page_table && info.page_table->v.is_vnode && info.page && !info.page->v.is_vnode);
830 // page (as offset from first page in mapping), #affected
833 size_t off = info.table_base - info.page->v.entry;
848 * default strategy is to only use selective flushing for single page
851 // do assisted selective flush for single page
855 err = invoke_mapping_modify_flags(info.page->v.mapping, off, pages,
869 * \brief Modify page mapping
884 //determine if we unmap a larger page
892 assert(info.page && !info.page->v.is_vnode);
905 (is_same_pdpt(vaddr, vend) && is_large_page(info.page)) ||
906 (is_same_pml4(vaddr, vend) && is_huge_page(info.page))) {
960 * \brief Query existing page mapping
987 info->vaddr = find_info.page->u.frame.vaddr;
988 info->size = find_info.page_size * find_info.page->v.u.frame.pte_count;
989 info->cap = find_info.page->v.cap;
990 info->offset = find_info.page->v.u.frame.offset;
991 info->flags = find_info.page->v.u.frame.flags;
992 info->mapping = find_info.page->v.mapping;
1101 * creates pinned page table entries
1140 /* do the actual creation of the page tables */
1154 /* map the page-table read only for access to status bits */
1161 /* copy the page-table capability */
1183 /* get the page table of the reserved range and map the PT */
1212 // find page and last-level page table (can be pdir or pdpt)
1357 * provide the mapping cnode for the first half of the root page table as