1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1991 Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department and William Jolitz of UUNET Technologies Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Derived from hp300 version by Mike Hibler, this version by William 37 * Jolitz uses a recursive map [a pde points to the page directory] to 38 * map the page tables using the pagetables themselves. This is done to 39 * reduce the impact on kernel virtual memory for lots of sparse address 40 * space, and to reduce the cost of memory to each process. 41 * 42 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 43 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 44 * $FreeBSD$ 45 */ 46 47#ifndef _MACHINE_PMAP_H_ 48#define _MACHINE_PMAP_H_ 49 50/* 51 * Page-directory and page-table entries follow this format, with a few 52 * of the fields not present here and there, depending on a lot of things. 53 */ 54 /* ---- Intel Nomenclature ---- */ 55#define X86_PG_V 0x001 /* P Valid */ 56#define X86_PG_RW 0x002 /* R/W Read/Write */ 57#define X86_PG_U 0x004 /* U/S User/Supervisor */ 58#define X86_PG_NC_PWT 0x008 /* PWT Write through */ 59#define X86_PG_NC_PCD 0x010 /* PCD Cache disable */ 60#define X86_PG_A 0x020 /* A Accessed */ 61#define X86_PG_M 0x040 /* D Dirty */ 62#define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */ 63#define X86_PG_PTE_PAT 0x080 /* PAT PAT index */ 64#define X86_PG_G 0x100 /* G Global */ 65#define X86_PG_AVAIL1 0x200 /* / Available for system */ 66#define X86_PG_AVAIL2 0x400 /* < programmers use */ 67#define X86_PG_AVAIL3 0x800 /* \ */ 68#define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */ 69#define X86_PG_PKU(idx) ((pt_entry_t)idx << 59) 70#define X86_PG_NX (1ul<<63) /* No-execute */ 71#define X86_PG_AVAIL(x) (1ul << (x)) 72 73/* Page level cache control fields used to determine the PAT type */ 74#define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 75#define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 76 77/* Protection keys indexes */ 78#define PMAP_MAX_PKRU_IDX 0xf 79#define X86_PG_PKU_MASK X86_PG_PKU(PMAP_MAX_PKRU_IDX) 80 81/* 82 * Intel extended page table (EPT) bit definitions. 83 */ 84#define EPT_PG_READ 0x001 /* R Read */ 85#define EPT_PG_WRITE 0x002 /* W Write */ 86#define EPT_PG_EXECUTE 0x004 /* X Execute */ 87#define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */ 88#define EPT_PG_PS 0x080 /* PS Page size */ 89#define EPT_PG_A 0x100 /* A Accessed */ 90#define EPT_PG_M 0x200 /* D Dirty */ 91#define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */ 92 93/* 94 * Define the PG_xx macros in terms of the bits on x86 PTEs. 95 */ 96#define PG_V X86_PG_V 97#define PG_RW X86_PG_RW 98#define PG_U X86_PG_U 99#define PG_NC_PWT X86_PG_NC_PWT 100#define PG_NC_PCD X86_PG_NC_PCD 101#define PG_A X86_PG_A 102#define PG_M X86_PG_M 103#define PG_PS X86_PG_PS 104#define PG_PTE_PAT X86_PG_PTE_PAT 105#define PG_G X86_PG_G 106#define PG_AVAIL1 X86_PG_AVAIL1 107#define PG_AVAIL2 X86_PG_AVAIL2 108#define PG_AVAIL3 X86_PG_AVAIL3 109#define PG_PDE_PAT X86_PG_PDE_PAT 110#define PG_NX X86_PG_NX 111#define PG_PDE_CACHE X86_PG_PDE_CACHE 112#define PG_PTE_CACHE X86_PG_PTE_CACHE 113 114/* Our various interpretations of the above */ 115#define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */ 116#define PG_MANAGED X86_PG_AVAIL2 117#define EPT_PG_EMUL_V X86_PG_AVAIL(52) 118#define EPT_PG_EMUL_RW X86_PG_AVAIL(53) 119#define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */ 120#define PG_FRAME (0x000ffffffffff000ul) 121#define PG_PS_FRAME (0x000fffffffe00000ul) 122#define PG_PS_PDP_FRAME (0x000fffffc0000000ul) 123 124/* 125 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 126 * (PTE) page mappings have identical settings for the following fields: 127 */ 128#define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \ 129 PG_M | PG_A | PG_U | PG_RW | PG_V | PG_PKU_MASK) 130 131/* 132 * Page Protection Exception bits 133 */ 134 135#define PGEX_P 0x01 /* Protection violation vs. not present */ 136#define PGEX_W 0x02 /* during a Write cycle */ 137#define PGEX_U 0x04 /* access from User mode (UPL) */ 138#define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 139#define PGEX_I 0x10 /* during an instruction fetch */ 140#define PGEX_PK 0x20 /* protection key violation */ 141#define PGEX_SGX 0x8000 /* SGX-related */ 142 143/* 144 * undef the PG_xx macros that define bits in the regular x86 PTEs that 145 * have a different position in nested PTEs. This is done when compiling 146 * code that needs to be aware of the differences between regular x86 and 147 * nested PTEs. 148 * 149 * The appropriate bitmask will be calculated at runtime based on the pmap 150 * type. 151 */ 152#ifdef AMD64_NPT_AWARE 153#undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */ 154#undef PG_G 155#undef PG_A 156#undef PG_M 157#undef PG_PDE_PAT 158#undef PG_PDE_CACHE 159#undef PG_PTE_PAT 160#undef PG_PTE_CACHE 161#undef PG_RW 162#undef PG_V 163#endif 164 165/* 166 * Pte related macros. This is complicated by having to deal with 167 * the sign extension of the 48th bit. 168 */ 169#define KVADDR(l4, l3, l2, l1) ( \ 170 ((unsigned long)-1 << 47) | \ 171 ((unsigned long)(l4) << PML4SHIFT) | \ 172 ((unsigned long)(l3) << PDPSHIFT) | \ 173 ((unsigned long)(l2) << PDRSHIFT) | \ 174 ((unsigned long)(l1) << PAGE_SHIFT)) 175 176#define UVADDR(l4, l3, l2, l1) ( \ 177 ((unsigned long)(l4) << PML4SHIFT) | \ 178 ((unsigned long)(l3) << PDPSHIFT) | \ 179 ((unsigned long)(l2) << PDRSHIFT) | \ 180 ((unsigned long)(l1) << PAGE_SHIFT)) 181 182/* 183 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so, 184 * but setting it larger than NDMPML4E makes no sense. 185 * 186 * Each slot provides .5 TB of kernel virtual space. 187 */ 188#define NKPML4E 4 189 190#define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ 191#define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ 192#define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */ 193 194/* 195 * NDMPML4E is the maximum number of PML4 entries that will be 196 * used to implement the direct map. It must be a power of two, 197 * and should generally exceed NKPML4E. The maximum possible 198 * value is 64; using 128 will make the direct map intrude into 199 * the recursive page table map. 200 */ 201#define NDMPML4E 8 202 203/* 204 * These values control the layout of virtual memory. The starting address 205 * of the direct map, which is controlled by DMPML4I, must be a multiple of 206 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.) 207 * 208 * Note: KPML4I is the index of the (single) level 4 page that maps 209 * the KVA that holds KERNBASE, while KPML4BASE is the index of the 210 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E 211 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra 212 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to 213 * KERNBASE. 214 * 215 * (KPML4I combines with KPDPI to choose where KERNBASE starts. 216 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE, 217 * and KPDPI provides bits 30..38.) 218 */ 219#define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 220 221#define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */ 222#define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */ 223 224#define KPML4I (NPML4EPG-1) 225#define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 226 227/* Large map: index of the first and max last pml4 entry */ 228#define LMSPML4I (PML4PML4I + 1) 229#define LMEPML4I (DMPML4I - 1) 230 231/* 232 * XXX doesn't really belong here I guess... 233 */ 234#define ISA_HOLE_START 0xa0000 235#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 236 237#define PMAP_PCID_NONE 0xffffffff 238#define PMAP_PCID_KERN 0 239#define PMAP_PCID_OVERMAX 0x1000 240#define PMAP_PCID_OVERMAX_KERN 0x800 241#define PMAP_PCID_USER_PT 0x800 242 243#define PMAP_NO_CR3 0xffffffffffffffff 244 245#ifndef LOCORE 246 247#include <sys/queue.h> 248#include <sys/_cpuset.h> 249#include <sys/_lock.h> 250#include <sys/_mutex.h> 251#include <sys/_pctrie.h> 252#include <sys/_rangeset.h> 253 254#include <vm/_vm_radix.h> 255 256typedef u_int64_t pd_entry_t; 257typedef u_int64_t pt_entry_t; 258typedef u_int64_t pdp_entry_t; 259typedef u_int64_t pml4_entry_t; 260 261/* 262 * Address of current address space page table maps and directories. 263 */ 264#ifdef _KERNEL 265#define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 266#define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 267#define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 268#define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 269#define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 270#define PTmap ((pt_entry_t *)(addr_PTmap)) 271#define PDmap ((pd_entry_t *)(addr_PDmap)) 272#define PDPmap ((pd_entry_t *)(addr_PDPmap)) 273#define PML4map ((pd_entry_t *)(addr_PML4map)) 274#define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 275 276extern int nkpt; /* Initial number of kernel page tables */ 277extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 278 279/* 280 * virtual address to page table entry and 281 * to physical address. 282 * Note: these work recursively, thus vtopte of a pte will give 283 * the corresponding pde that in turn maps it. 284 */ 285pt_entry_t *vtopte(vm_offset_t); 286#define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 287 288#define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte) 289#define pte_load_clear(ptep) atomic_swap_long(ptep, 0) 290#define pte_store(ptep, pte) do { \ 291 *(u_long *)(ptep) = (u_long)(pte); \ 292} while (0) 293#define pte_clear(ptep) pte_store(ptep, 0) 294 295#define pde_store(pdep, pde) pte_store(pdep, pde) 296 297extern pt_entry_t pg_nx; 298 299#endif /* _KERNEL */ 300 301/* 302 * Pmap stuff 303 */ 304struct pv_entry; 305struct pv_chunk; 306 307/* 308 * Locks 309 * (p) PV list lock 310 */ 311struct md_page { 312 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */ 313 int pv_gen; /* (p) */ 314 int pat_mode; 315}; 316 317enum pmap_type { 318 PT_X86, /* regular x86 page tables */ 319 PT_EPT, /* Intel's nested page tables */ 320 PT_RVI, /* AMD's nested page tables */ 321}; 322 323struct pmap_pcids { 324 uint32_t pm_pcid; 325 uint32_t pm_gen; 326}; 327 328/* 329 * The kernel virtual address (KVA) of the level 4 page table page is always 330 * within the direct map (DMAP) region. 331 */ 332struct pmap { 333 struct mtx pm_mtx; 334 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 335 pml4_entry_t *pm_pml4u; /* KVA of user l4 page table */ 336 uint64_t pm_cr3; 337 uint64_t pm_ucr3; 338 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 339 cpuset_t pm_active; /* active on cpus */ 340 enum pmap_type pm_type; /* regular or nested tables */ 341 struct pmap_statistics pm_stats; /* pmap statistics */ 342 struct vm_radix pm_root; /* spare page table pages */ 343 long pm_eptgen; /* EPT pmap generation id */ 344 int pm_flags; 345 struct pmap_pcids pm_pcids[MAXCPU]; 346 struct rangeset pm_pkru; 347}; 348 349/* flags */ 350#define PMAP_NESTED_IPIMASK 0xff 351#define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */ 352#define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */ 353#define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */ 354 355typedef struct pmap *pmap_t; 356 357#ifdef _KERNEL 358extern struct pmap kernel_pmap_store; 359#define kernel_pmap (&kernel_pmap_store) 360 361#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 362#define PMAP_LOCK_ASSERT(pmap, type) \ 363 mtx_assert(&(pmap)->pm_mtx, (type)) 364#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 365#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 366 NULL, MTX_DEF | MTX_DUPOK) 367#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 368#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 369#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 370#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 371 372int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags); 373int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype); 374#endif 375 376/* 377 * For each vm_page_t, there is a list of all currently valid virtual 378 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 379 */ 380typedef struct pv_entry { 381 vm_offset_t pv_va; /* virtual address for mapping */ 382 TAILQ_ENTRY(pv_entry) pv_next; 383} *pv_entry_t; 384 385/* 386 * pv_entries are allocated in chunks per-process. This avoids the 387 * need to track per-pmap assignments. 388 */ 389#define _NPCM 3 390#define _NPCPV 168 391#define PV_CHUNK_HEADER \ 392 pmap_t pc_pmap; \ 393 TAILQ_ENTRY(pv_chunk) pc_list; \ 394 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \ 395 TAILQ_ENTRY(pv_chunk) pc_lru; 396 397struct pv_chunk_header { 398 PV_CHUNK_HEADER 399}; 400 401struct pv_chunk { 402 PV_CHUNK_HEADER 403 struct pv_entry pc_pventry[_NPCPV]; 404}; 405 406#ifdef _KERNEL 407 408extern caddr_t CADDR1; 409extern pt_entry_t *CMAP1; 410extern vm_paddr_t phys_avail[]; 411extern vm_paddr_t dump_avail[]; 412extern vm_offset_t virtual_avail; 413extern vm_offset_t virtual_end; 414extern vm_paddr_t dmaplimit; 415extern int pmap_pcid_enabled; 416extern int invpcid_works; 417 418#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 419#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 420#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 421 422struct thread; 423 424void pmap_activate_boot(pmap_t pmap); 425void pmap_activate_sw(struct thread *); 426void pmap_allow_2m_x_ept_recalculate(void); 427void pmap_bootstrap(vm_paddr_t *); 428int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde); 429int pmap_change_attr(vm_offset_t, vm_size_t, int); 430int pmap_change_prot(vm_offset_t, vm_size_t, vm_prot_t); 431void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate); 432void pmap_flush_cache_range(vm_offset_t, vm_offset_t); 433void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t); 434void pmap_init_pat(void); 435void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 436void *pmap_kenter_temporary(vm_paddr_t pa, int i); 437vm_paddr_t pmap_kextract(vm_offset_t); 438void pmap_kremove(vm_offset_t); 439int pmap_large_map(vm_paddr_t, vm_size_t, void **, vm_memattr_t); 440void pmap_large_map_wb(void *sva, vm_size_t len); 441void pmap_large_unmap(void *sva, vm_size_t len); 442void *pmap_mapbios(vm_paddr_t, vm_size_t); 443void *pmap_mapdev(vm_paddr_t, vm_size_t); 444void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 445void *pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size); 446bool pmap_not_in_di(void); 447boolean_t pmap_page_is_mapped(vm_page_t m); 448void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 449void pmap_pinit_pml4(vm_page_t); 450bool pmap_ps_enabled(pmap_t pmap); 451void pmap_unmapdev(vm_offset_t, vm_size_t); 452void pmap_invalidate_page(pmap_t, vm_offset_t); 453void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 454void pmap_invalidate_all(pmap_t); 455void pmap_invalidate_cache(void); 456void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 457void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 458void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 459void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); 460boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); 461void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); 462void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec); 463void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva); 464void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3); 465void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va); 466void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, 467 vm_offset_t eva); 468int pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); 469int pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 470 u_int keyidx, int flags); 471void pmap_thread_init_invl_gen(struct thread *td); 472int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap); 473#endif /* _KERNEL */ 474 475/* Return various clipped indexes for a given VA */ 476static __inline vm_pindex_t 477pmap_pte_index(vm_offset_t va) 478{ 479 480 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); 481} 482 483static __inline vm_pindex_t 484pmap_pde_index(vm_offset_t va) 485{ 486 487 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 488} 489 490static __inline vm_pindex_t 491pmap_pdpe_index(vm_offset_t va) 492{ 493 494 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 495} 496 497static __inline vm_pindex_t 498pmap_pml4e_index(vm_offset_t va) 499{ 500 501 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 502} 503 504#endif /* !LOCORE */ 505 506#endif /* !_MACHINE_PMAP_H_ */ 507