1139790Simp/*- 24Srgrimes * Copyright (c) 1991 Regents of the University of California. 34Srgrimes * All rights reserved. 44Srgrimes * 54Srgrimes * This code is derived from software contributed to Berkeley by 64Srgrimes * the Systems Programming Group of the University of Utah Computer 74Srgrimes * Science Department and William Jolitz of UUNET Technologies Inc. 84Srgrimes * 94Srgrimes * Redistribution and use in source and binary forms, with or without 104Srgrimes * modification, are permitted provided that the following conditions 114Srgrimes * are met: 124Srgrimes * 1. Redistributions of source code must retain the above copyright 134Srgrimes * notice, this list of conditions and the following disclaimer. 144Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 154Srgrimes * notice, this list of conditions and the following disclaimer in the 164Srgrimes * documentation and/or other materials provided with the distribution. 174Srgrimes * 4. Neither the name of the University nor the names of its contributors 184Srgrimes * may be used to endorse or promote products derived from this software 194Srgrimes * without specific prior written permission. 204Srgrimes * 214Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 224Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 234Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 244Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 254Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 264Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 274Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 284Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 294Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 304Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 314Srgrimes * SUCH DAMAGE. 324Srgrimes * 334Srgrimes * Derived from hp300 version by Mike Hibler, this version by William 344Srgrimes * Jolitz uses a recursive map [a pde points to the page directory] to 354Srgrimes * map the page tables using the pagetables themselves. This is done to 364Srgrimes * reduce the impact on kernel virtual memory for lots of sparse address 374Srgrimes * space, and to reduce the cost of memory to each process. 384Srgrimes * 39607Srgrimes * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 40607Srgrimes * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 4150477Speter * $FreeBSD: releng/10.3/sys/i386/include/pmap.h 282065 2015-04-27 08:02:12Z kib $ 424Srgrimes */ 434Srgrimes 444471Sbde#ifndef _MACHINE_PMAP_H_ 454471Sbde#define _MACHINE_PMAP_H_ 464Srgrimes 4715565Sphk/* 48153179Sjhb * Page-directory and page-table entries follow this format, with a few 4915565Sphk * of the fields not present here and there, depending on a lot of things. 5015565Sphk */ 5115565Sphk /* ---- Intel Nomenclature ---- */ 5215565Sphk#define PG_V 0x001 /* P Valid */ 5315565Sphk#define PG_RW 0x002 /* R/W Read/Write */ 5415565Sphk#define PG_U 0x004 /* U/S User/Supervisor */ 5515565Sphk#define PG_NC_PWT 0x008 /* PWT Write through */ 5615565Sphk#define PG_NC_PCD 0x010 /* PCD Cache disable */ 5715565Sphk#define PG_A 0x020 /* A Accessed */ 5815565Sphk#define PG_M 0x040 /* D Dirty */ 5915565Sphk#define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */ 60158238Sjhb#define PG_PTE_PAT 0x080 /* PAT PAT index */ 6115565Sphk#define PG_G 0x100 /* G Global */ 6215565Sphk#define PG_AVAIL1 0x200 /* / Available for system */ 6315565Sphk#define PG_AVAIL2 0x400 /* < programmers use */ 6415565Sphk#define PG_AVAIL3 0x800 /* \ */ 65158238Sjhb#define PG_PDE_PAT 0x1000 /* PAT PAT index */ 66282065Skib#if defined(PAE) || defined(PAE_TABLES) 67168439Sru#define PG_NX (1ull<<63) /* No-execute */ 68168439Sru#endif 694Srgrimes 704Srgrimes 7115565Sphk/* Our various interpretations of the above */ 7215565Sphk#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */ 7315809Sdyson#define PG_MANAGED PG_AVAIL2 74282065Skib#if defined(PAE) || defined(PAE_TABLES) 75168439Sru#define PG_FRAME (0x000ffffffffff000ull) 76168439Sru#define PG_PS_FRAME (0x000fffffffe00000ull) 77168439Sru#else 78168439Sru#define PG_FRAME (~PAGE_MASK) 79168439Sru#define PG_PS_FRAME (0xffc00000) 80168439Sru#endif 8115565Sphk#define PG_PROT (PG_RW|PG_U) /* all protection bits . */ 8215565Sphk#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */ 8315565Sphk 84196705Sjhb/* Page level cache control fields used to determine the PAT type */ 85196705Sjhb#define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD) 86196705Sjhb#define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD) 87196705Sjhb 884Srgrimes/* 89177659Salc * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding 90177659Salc * 4KB (PTE) page mappings have identical settings for the following fields: 91177659Salc */ 92177659Salc#define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \ 93177659Salc PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V) 94177659Salc 95177659Salc/* 9615543Sphk * Page Protection Exception bits 9715543Sphk */ 9815543Sphk 9915543Sphk#define PGEX_P 0x01 /* Protection violation vs. not present */ 10015543Sphk#define PGEX_W 0x02 /* during a Write cycle */ 10115543Sphk#define PGEX_U 0x04 /* access from User mode (UPL) */ 102168668Salc#define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 103168439Sru#define PGEX_I 0x10 /* during an instruction fetch */ 10415543Sphk 10515543Sphk/* 10683757Speter * Size of Kernel address space. This is the number of page table pages 10783757Speter * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte. 10883757Speter * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). 109175329Speter * For PAE, the page table page unit size is 2MB. This means that 512 pages 110175329Speter * is 1 Gigabyte. Double everything. It must be a multiple of 8 for PAE. 11183757Speter */ 11283757Speter#ifndef KVA_PAGES 113282065Skib#if defined(PAE) || defined(PAE_TABLES) 114112841Sjake#define KVA_PAGES 512 115112841Sjake#else 11683757Speter#define KVA_PAGES 256 11783757Speter#endif 118112841Sjake#endif 11983757Speter 12083757Speter/* 12115543Sphk * Pte related macros 12215543Sphk */ 12315543Sphk#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT))) 12415543Sphk 125281547Skib/* 126281547Skib * The initial number of kernel page table pages that are constructed 127281547Skib * by locore must be sufficient to map vm_page_array. That number can 128281547Skib * be calculated as follows: 129281547Skib * max_phys / PAGE_SIZE * sizeof(struct vm_page) / NBPDR 130281547Skib * PAE: max_phys 16G, sizeof(vm_page) 76, NBPDR 2M, 152 page table pages. 131282065Skib * PAE_TABLES: max_phys 4G, sizeof(vm_page) 68, NBPDR 2M, 36 page table pages. 132281547Skib * Non-PAE: max_phys 4G, sizeof(vm_page) 68, NBPDR 4M, 18 page table pages. 133281547Skib */ 134974Sdg#ifndef NKPT 135282065Skib#if defined(PAE) 136164262Sru#define NKPT 240 137282065Skib#elif defined(PAE_TABLES) 138282065Skib#define NKPT 60 139112841Sjake#else 140164262Sru#define NKPT 30 141974Sdg#endif 142112841Sjake#endif 143164262Sru 144974Sdg#ifndef NKPDE 145113225Sjake#define NKPDE (KVA_PAGES) /* number of page tables/pde's */ 146974Sdg#endif 147974Sdg 148974Sdg/* 149588Srgrimes * The *PTDI values control the layout of virtual memory 150588Srgrimes * 151588Srgrimes * XXX This works for now, but I am not real happy with it, I'll fix it 152588Srgrimes * right after I fix locore.s and the magic 28K hole 153588Srgrimes */ 154173592Speter#define KPTDI (NPDEPTD-NKPDE) /* start of kernel virtual pde's */ 155111363Sjake#define PTDPTDI (KPTDI-NPGPTD) /* ptd entry that points to ptd! */ 1564Srgrimes 15715565Sphk/* 15815565Sphk * XXX doesn't really belong here I guess... 15915565Sphk */ 16015565Sphk#define ISA_HOLE_START 0xa0000 16115565Sphk#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 16215565Sphk 16315543Sphk#ifndef LOCORE 16418163Sdyson 165130755Sbde#include <sys/queue.h> 166222813Sattilio#include <sys/_cpuset.h> 167130573Salc#include <sys/_lock.h> 168130573Salc#include <sys/_mutex.h> 16918163Sdyson 170248449Sattilio#include <vm/_vm_radix.h> 171248449Sattilio 172282065Skib#if defined(PAE) || defined(PAE_TABLES) 17315543Sphk 174112841Sjaketypedef uint64_t pdpt_entry_t; 175112841Sjaketypedef uint64_t pd_entry_t; 176112841Sjaketypedef uint64_t pt_entry_t; 177112841Sjake 178112841Sjake#define PTESHIFT (3) 179112841Sjake#define PDESHIFT (3) 180112841Sjake 181112841Sjake#else 182112841Sjake 183112841Sjaketypedef uint32_t pd_entry_t; 184112841Sjaketypedef uint32_t pt_entry_t; 185112841Sjake 186111299Sjake#define PTESHIFT (2) 187111299Sjake#define PDESHIFT (2) 188757Sdg 189112841Sjake#endif 190112841Sjake 1914Srgrimes/* 192190272Salc * Address of current address space page table maps and directories. 1934Srgrimes */ 19455205Speter#ifdef _KERNEL 195113040Sjakeextern pt_entry_t PTmap[]; 196113040Sjakeextern pd_entry_t PTD[]; 197113040Sjakeextern pd_entry_t PTDpde[]; 1984Srgrimes 199282065Skib#if defined(PAE) || defined(PAE_TABLES) 200112841Sjakeextern pdpt_entry_t *IdlePDPT; 201112841Sjake#endif 20299862Speterextern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */ 2034Srgrimes 2044Srgrimes/* 205213455Salc * Translate a virtual address to the kernel virtual address of its page table 206213455Salc * entry (PTE). This can be used recursively. If the address of a PTE as 207213455Salc * previously returned by this macro is itself given as the argument, then the 208213455Salc * address of the page directory entry (PDE) that maps the PTE will be 209213455Salc * returned. 210213455Salc * 211213455Salc * This macro may be used before pmap_bootstrap() is called. 2124Srgrimes */ 2134Srgrimes#define vtopte(va) (PTmap + i386_btop(va)) 214213455Salc 215213455Salc/* 216213455Salc * Translate a virtual address to its physical address. 217213455Salc * 218213455Salc * This macro may be used before pmap_bootstrap() is called. 219213455Salc */ 220153179Sjhb#define vtophys(va) pmap_kextract((vm_offset_t)(va)) 2214Srgrimes 222216956Srwatson#if defined(XEN) 223181775Skmacy#include <sys/param.h> 224255040Sgibbs 225255040Sgibbs#include <xen/xen-os.h> 226255040Sgibbs 227181775Skmacy#include <machine/xen/xenvar.h> 228181775Skmacy#include <machine/xen/xenpmap.h> 229181775Skmacy 230181775Skmacyextern pt_entry_t pg_nx; 231181775Skmacy 232181775Skmacy#define PG_KERNEL (PG_V | PG_A | PG_RW | PG_M) 233181775Skmacy 234181775Skmacy#define MACH_TO_VM_PAGE(ma) PHYS_TO_VM_PAGE(xpmap_mtop((ma))) 235181775Skmacy#define VM_PAGE_TO_MACH(m) xpmap_ptom(VM_PAGE_TO_PHYS((m))) 236181775Skmacy 237215587Scperciva#define VTOM(va) xpmap_ptom(VTOP(va)) 238215587Scperciva 239181775Skmacystatic __inline vm_paddr_t 240181775Skmacypmap_kextract_ma(vm_offset_t va) 241181775Skmacy{ 242181775Skmacy vm_paddr_t ma; 243181775Skmacy if ((ma = PTD[va >> PDRSHIFT]) & PG_PS) { 244181775Skmacy ma = (ma & ~(NBPDR - 1)) | (va & (NBPDR - 1)); 245181775Skmacy } else { 246181775Skmacy ma = (*vtopte(va) & PG_FRAME) | (va & PAGE_MASK); 247181775Skmacy } 248181775Skmacy return ma; 249181775Skmacy} 250181775Skmacy 251181775Skmacystatic __inline vm_paddr_t 252181775Skmacypmap_kextract(vm_offset_t va) 253181775Skmacy{ 254181775Skmacy return xpmap_mtop(pmap_kextract_ma(va)); 255181775Skmacy} 256181775Skmacy#define vtomach(va) pmap_kextract_ma(((vm_offset_t) (va))) 257181775Skmacy 258181775Skmacyvm_paddr_t pmap_extract_ma(struct pmap *pmap, vm_offset_t va); 259181775Skmacy 260181775Skmacyvoid pmap_kenter_ma(vm_offset_t va, vm_paddr_t pa); 261181775Skmacyvoid pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len); 262181775Skmacyvoid pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len); 263181775Skmacy 264181775Skmacystatic __inline pt_entry_t 265181775Skmacypte_load_store(pt_entry_t *ptep, pt_entry_t v) 266181775Skmacy{ 267181775Skmacy pt_entry_t r; 268181775Skmacy 269181775Skmacy r = *ptep; 270181775Skmacy PT_SET_VA(ptep, v, TRUE); 271181775Skmacy return (r); 272181775Skmacy} 273181775Skmacy 274181775Skmacystatic __inline pt_entry_t 275181775Skmacypte_load_store_ma(pt_entry_t *ptep, pt_entry_t v) 276181775Skmacy{ 277181775Skmacy pt_entry_t r; 278181775Skmacy 279181775Skmacy r = *ptep; 280181775Skmacy PT_SET_VA_MA(ptep, v, TRUE); 281181775Skmacy return (r); 282181775Skmacy} 283181775Skmacy 284181775Skmacy#define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL) 285181775Skmacy 286181775Skmacy#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte) 287181775Skmacy#define pte_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte) 288181775Skmacy#define pde_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte) 289181775Skmacy 290181775Skmacy#elif !defined(XEN) 291202894Salc 2924Srgrimes/* 293202894Salc * KPTmap is a linear mapping of the kernel page table. It differs from the 294202894Salc * recursive mapping in two ways: (1) it only provides access to kernel page 295202894Salc * table pages, and not user page table pages, and (2) it provides access to 296202894Salc * a kernel page table page after the corresponding virtual addresses have 297202894Salc * been promoted to a 2/4MB page mapping. 298213455Salc * 299213455Salc * KPTmap is first initialized by locore to support just NPKT page table 300213455Salc * pages. Later, it is reinitialized by pmap_bootstrap() to allow for 301213455Salc * expansion of the kernel page table. 302202894Salc */ 303202894Salcextern pt_entry_t *KPTmap; 304202894Salc 305202894Salc/* 306213455Salc * Extract from the kernel page table the physical address that is mapped by 307213455Salc * the given virtual address "va". 308213455Salc * 309213455Salc * This function may be used before pmap_bootstrap() is called. 3101307Sdg */ 311112569Sjakestatic __inline vm_paddr_t 3124471Sbdepmap_kextract(vm_offset_t va) 3131307Sdg{ 314112569Sjake vm_paddr_t pa; 315112569Sjake 316113266Sjake if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) { 317168439Sru pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 31827464Sdyson } else { 319202894Salc /* 320202894Salc * Beware of a concurrent promotion that changes the PDE at 321202894Salc * this point! For example, vtopte() must not be used to 322202894Salc * access the PTE because it would use the new PDE. It is, 323202894Salc * however, safe to use the old PDE because the page table 324202894Salc * page is preserved by the promotion. 325202894Salc */ 326202894Salc pa = KPTmap[i386_btop(va)]; 32727464Sdyson pa = (pa & PG_FRAME) | (va & PAGE_MASK); 32827464Sdyson } 329202894Salc return (pa); 3301307Sdg} 331216956Srwatson#endif 332181854Skmacy 333216956Srwatson#if !defined(XEN) 334181854Skmacy#define PT_UPDATES_FLUSH() 335181775Skmacy#endif 33627464Sdyson 337282065Skib#if (defined(PAE) || defined(PAE_TABLES)) && !defined(XEN) 338112841Sjake 339254623Sjkim#define pde_cmpset(pdep, old, new) atomic_cmpset_64_i586(pdep, old, new) 340254623Sjkim#define pte_load_store(ptep, pte) atomic_swap_64_i586(ptep, pte) 341254623Sjkim#define pte_load_clear(ptep) atomic_swap_64_i586(ptep, 0) 342254623Sjkim#define pte_store(ptep, pte) atomic_store_rel_64_i586(ptep, pte) 343177659Salc 344168439Sruextern pt_entry_t pg_nx; 345168439Sru 346282065Skib#elif !defined(PAE) && !defined(PAE_TABLES) && !defined(XEN) 347114177Sjake 348254623Sjkim#define pde_cmpset(pdep, old, new) atomic_cmpset_int(pdep, old, new) 349254623Sjkim#define pte_load_store(ptep, pte) atomic_swap_int(ptep, pte) 350254623Sjkim#define pte_load_clear(ptep) atomic_swap_int(ptep, 0) 351254623Sjkim#define pte_store(ptep, pte) do { \ 352254623Sjkim *(u_int *)(ptep) = (u_int)(pte); \ 353254623Sjkim} while (0) 354177659Salc 355114177Sjake#endif /* PAE */ 3561307Sdg 357254623Sjkim#define pte_clear(ptep) pte_store(ptep, 0) 358112841Sjake 359254623Sjkim#define pde_store(pdep, pde) pte_store(pdep, pde) 360114177Sjake 361114177Sjake#endif /* _KERNEL */ 362114177Sjake 3631307Sdg/* 3644Srgrimes * Pmap stuff 3654Srgrimes */ 36618163Sdysonstruct pv_entry; 367158060Speterstruct pv_chunk; 36860755Speter 36960755Speterstruct md_page { 37060938Sjake TAILQ_HEAD(,pv_entry) pv_list; 371195649Salc int pat_mode; 37260755Speter}; 3734Srgrimes 3744Srgrimesstruct pmap { 375130573Salc struct mtx pm_mtx; 3764Srgrimes pd_entry_t *pm_pdir; /* KVA of page directory */ 377158060Speter TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 378222813Sattilio cpuset_t pm_active; /* active on cpus */ 3794Srgrimes struct pmap_statistics pm_stats; /* pmap statistics */ 38064728Stegge LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ 381282065Skib#if defined(PAE) || defined(PAE_TABLES) 382282065Skib pdpt_entry_t *pm_pdpt; /* KVA of page directory pointer 383112841Sjake table */ 384112841Sjake#endif 385248449Sattilio struct vm_radix pm_root; /* spare page table pages */ 3864Srgrimes}; 3874Srgrimes 3884Srgrimestypedef struct pmap *pmap_t; 3894Srgrimes 39055205Speter#ifdef _KERNEL 39195710Speterextern struct pmap kernel_pmap_store; 39295710Speter#define kernel_pmap (&kernel_pmap_store) 393130573Salc 394130573Salc#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 395130573Salc#define PMAP_LOCK_ASSERT(pmap, type) \ 396130573Salc mtx_assert(&(pmap)->pm_mtx, (type)) 397130573Salc#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 398130573Salc#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 399135939Salc NULL, MTX_DEF | MTX_DUPOK) 400130573Salc#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 401130573Salc#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 402130573Salc#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 403130573Salc#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 4044Srgrimes#endif 4054Srgrimes 4064Srgrimes/* 4074Srgrimes * For each vm_page_t, there is a list of all currently valid virtual 408164250Sru * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 4094Srgrimes */ 4104Srgrimestypedef struct pv_entry { 4114Srgrimes vm_offset_t pv_va; /* virtual address for mapping */ 412247622Sattilio TAILQ_ENTRY(pv_entry) pv_next; 4134Srgrimes} *pv_entry_t; 4144Srgrimes 415158060Speter/* 416158060Speter * pv_entries are allocated in chunks per-process. This avoids the 417158060Speter * need to track per-pmap assignments. 418158060Speter */ 419158060Speter#define _NPCM 11 420158060Speter#define _NPCPV 336 421158060Speterstruct pv_chunk { 422158060Speter pmap_t pc_pmap; 423158060Speter TAILQ_ENTRY(pv_chunk) pc_list; 424158060Speter uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 425236045Salc TAILQ_ENTRY(pv_chunk) pc_lru; 426158060Speter struct pv_entry pc_pventry[_NPCPV]; 427158060Speter}; 428158060Speter 42955205Speter#ifdef _KERNEL 4304Srgrimes 431267964Sjhbextern caddr_t CADDR3; 432267964Sjhbextern pt_entry_t *CMAP3; 433112569Sjakeextern vm_paddr_t phys_avail[]; 434147671Speterextern vm_paddr_t dump_avail[]; 435120654Speterextern int pseflag; 436120654Speterextern int pgeflag; 43731321Sbdeextern char *ptvmmap; /* poor name! */ 4387090Sbdeextern vm_offset_t virtual_avail; 4397090Sbdeextern vm_offset_t virtual_end; 4404Srgrimes 441195649Salc#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 442237168Salc#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 443161223Sjhb#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 444130399Salc 445213455Salc/* 446213455Salc * Only the following functions or macros may be used before pmap_bootstrap() 447213455Salc * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and 448213455Salc * vtopte(). 449213455Salc */ 450167668Salcvoid pmap_bootstrap(vm_paddr_t); 451195940Skibint pmap_cache_bits(int mode, boolean_t is_pde); 452161223Sjhbint pmap_change_attr(vm_offset_t, vm_size_t, int); 453158238Sjhbvoid pmap_init_pat(void); 454112569Sjakevoid pmap_kenter(vm_offset_t va, vm_paddr_t pa); 455128098Salcvoid *pmap_kenter_temporary(vm_paddr_t pa, int i); 456112312Sjakevoid pmap_kremove(vm_offset_t); 457161223Sjhbvoid *pmap_mapbios(vm_paddr_t, vm_size_t); 458112569Sjakevoid *pmap_mapdev(vm_paddr_t, vm_size_t); 459161223Sjhbvoid *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 460177659Salcboolean_t pmap_page_is_mapped(vm_page_t m); 461195649Salcvoid pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 46292761Salfredvoid pmap_unmapdev(vm_offset_t, vm_size_t); 463122284Salcpt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2; 46499862Spetervoid pmap_invalidate_page(pmap_t, vm_offset_t); 46599862Spetervoid pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 46699862Spetervoid pmap_invalidate_all(pmap_t); 467158236Sjhbvoid pmap_invalidate_cache(void); 468220803Skibvoid pmap_invalidate_cache_pages(vm_page_t *pages, int count); 469273136Skibvoid pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, 470273136Skib boolean_t force); 4714471Sbde 47255205Speter#endif /* _KERNEL */ 47318896Sbde 47415543Sphk#endif /* !LOCORE */ 4754Srgrimes 4764471Sbde#endif /* !_MACHINE_PMAP_H_ */ 477