pmap.h revision 342927
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * Derived from hp300 version by Mike Hibler, this version by William 34 * Jolitz uses a recursive map [a pde points to the page directory] to 35 * map the page tables using the pagetables themselves. This is done to 36 * reduce the impact on kernel virtual memory for lots of sparse address 37 * space, and to reduce the cost of memory to each process. 38 * 39 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 40 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 41 * $FreeBSD: stable/11/sys/i386/include/pmap.h 342927 2019-01-11 02:09:48Z kib $ 42 */ 43 44#ifndef _MACHINE_PMAP_H_ 45#define _MACHINE_PMAP_H_ 46 47/* 48 * Page-directory and page-table entries follow this format, with a few 49 * of the fields not present here and there, depending on a lot of things. 50 */ 51 /* ---- Intel Nomenclature ---- */ 52#define PG_V 0x001 /* P Valid */ 53#define PG_RW 0x002 /* R/W Read/Write */ 54#define PG_U 0x004 /* U/S User/Supervisor */ 55#define PG_NC_PWT 0x008 /* PWT Write through */ 56#define PG_NC_PCD 0x010 /* PCD Cache disable */ 57#define PG_A 0x020 /* A Accessed */ 58#define PG_M 0x040 /* D Dirty */ 59#define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */ 60#define PG_PTE_PAT 0x080 /* PAT PAT index */ 61#define PG_G 0x100 /* G Global */ 62#define PG_AVAIL1 0x200 /* / Available for system */ 63#define PG_AVAIL2 0x400 /* < programmers use */ 64#define PG_AVAIL3 0x800 /* \ */ 65#define PG_PDE_PAT 0x1000 /* PAT PAT index */ 66#if defined(PAE) || defined(PAE_TABLES) 67#define PG_NX (1ull<<63) /* No-execute */ 68#endif 69 70 71/* Our various interpretations of the above */ 72#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */ 73#define PG_MANAGED PG_AVAIL2 74#define PG_PROMOTED PG_AVAIL3 /* PDE only */ 75#if defined(PAE) || defined(PAE_TABLES) 76#define PG_FRAME (0x000ffffffffff000ull) 77#define PG_PS_FRAME (0x000fffffffe00000ull) 78#else 79#define PG_FRAME (~PAGE_MASK) 80#define PG_PS_FRAME (0xffc00000) 81#endif 82#define PG_PROT (PG_RW|PG_U) /* all protection bits . */ 83#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */ 84 85/* Page level cache control fields used to determine the PAT type */ 86#define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD) 87#define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD) 88 89/* 90 * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding 91 * 4KB (PTE) page mappings have identical settings for the following fields: 92 */ 93#define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \ 94 PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V) 95 96/* 97 * Page Protection Exception bits 98 */ 99 100#define PGEX_P 0x01 /* Protection violation vs. not present */ 101#define PGEX_W 0x02 /* during a Write cycle */ 102#define PGEX_U 0x04 /* access from User mode (UPL) */ 103#define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 104#define PGEX_I 0x10 /* during an instruction fetch */ 105 106/* 107 * Size of Kernel address space. This is the number of page table pages 108 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte. 109 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). 110 * For PAE, the page table page unit size is 2MB. This means that 512 pages 111 * is 1 Gigabyte. Double everything. It must be a multiple of 8 for PAE. 112 */ 113#ifndef KVA_PAGES 114#if defined(PAE) || defined(PAE_TABLES) 115#define KVA_PAGES 512 116#else 117#define KVA_PAGES 256 118#endif 119#endif 120 121/* 122 * Pte related macros 123 */ 124#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT))) 125 126/* 127 * The initial number of kernel page table pages that are constructed 128 * by locore must be sufficient to map vm_page_array. That number can 129 * be calculated as follows: 130 * max_phys / PAGE_SIZE * sizeof(struct vm_page) / NBPDR 131 * PAE: max_phys 16G, sizeof(vm_page) 76, NBPDR 2M, 152 page table pages. 132 * PAE_TABLES: max_phys 4G, sizeof(vm_page) 68, NBPDR 2M, 36 page table pages. 133 * Non-PAE: max_phys 4G, sizeof(vm_page) 68, NBPDR 4M, 18 page table pages. 134 */ 135#ifndef NKPT 136#if defined(PAE) 137#define NKPT 240 138#elif defined(PAE_TABLES) 139#define NKPT 60 140#else 141#define NKPT 30 142#endif 143#endif 144 145#ifndef NKPDE 146#define NKPDE (KVA_PAGES) /* number of page tables/pde's */ 147#endif 148 149/* 150 * The *PTDI values control the layout of virtual memory 151 * 152 * XXX This works for now, but I am not real happy with it, I'll fix it 153 * right after I fix locore.s and the magic 28K hole 154 */ 155#define KPTDI (NPDEPTD-NKPDE) /* start of kernel virtual pde's */ 156#define PTDPTDI (KPTDI-NPGPTD) /* ptd entry that points to ptd! */ 157 158/* 159 * XXX doesn't really belong here I guess... 160 */ 161#define ISA_HOLE_START 0xa0000 162#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 163 164#ifndef LOCORE 165 166#include <sys/queue.h> 167#include <sys/_cpuset.h> 168#include <sys/_lock.h> 169#include <sys/_mutex.h> 170 171#include <vm/_vm_radix.h> 172 173#if defined(PAE) || defined(PAE_TABLES) 174 175typedef uint64_t pdpt_entry_t; 176typedef uint64_t pd_entry_t; 177typedef uint64_t pt_entry_t; 178 179#define PTESHIFT (3) 180#define PDESHIFT (3) 181 182#else 183 184typedef uint32_t pd_entry_t; 185typedef uint32_t pt_entry_t; 186 187#define PTESHIFT (2) 188#define PDESHIFT (2) 189 190#endif 191 192/* 193 * Address of current address space page table maps and directories. 194 */ 195#ifdef _KERNEL 196#include <machine/atomic.h> 197 198extern pt_entry_t PTmap[]; 199extern pd_entry_t PTD[]; 200extern pd_entry_t PTDpde[]; 201 202#if defined(PAE) || defined(PAE_TABLES) 203extern pdpt_entry_t *IdlePDPT; 204#endif 205extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */ 206 207/* 208 * Translate a virtual address to the kernel virtual address of its page table 209 * entry (PTE). This can be used recursively. If the address of a PTE as 210 * previously returned by this macro is itself given as the argument, then the 211 * address of the page directory entry (PDE) that maps the PTE will be 212 * returned. 213 * 214 * This macro may be used before pmap_bootstrap() is called. 215 */ 216#define vtopte(va) (PTmap + i386_btop(va)) 217 218/* 219 * Translate a virtual address to its physical address. 220 * 221 * This macro may be used before pmap_bootstrap() is called. 222 */ 223#define vtophys(va) pmap_kextract((vm_offset_t)(va)) 224 225/* 226 * KPTmap is a linear mapping of the kernel page table. It differs from the 227 * recursive mapping in two ways: (1) it only provides access to kernel page 228 * table pages, and not user page table pages, and (2) it provides access to 229 * a kernel page table page after the corresponding virtual addresses have 230 * been promoted to a 2/4MB page mapping. 231 * 232 * KPTmap is first initialized by locore to support just NPKT page table 233 * pages. Later, it is reinitialized by pmap_bootstrap() to allow for 234 * expansion of the kernel page table. 235 */ 236extern pt_entry_t *KPTmap; 237 238#if (defined(PAE) || defined(PAE_TABLES)) 239 240#define pde_cmpset(pdep, old, new) atomic_cmpset_64_i586(pdep, old, new) 241#define pte_load_store(ptep, pte) atomic_swap_64_i586(ptep, pte) 242#define pte_load_clear(ptep) atomic_swap_64_i586(ptep, 0) 243#define pte_store(ptep, pte) atomic_store_rel_64_i586(ptep, pte) 244#define pte_load(ptep) atomic_load_acq_64_i586(ptep) 245 246extern pt_entry_t pg_nx; 247 248#else /* !(PAE || PAE_TABLES) */ 249 250#define pde_cmpset(pdep, old, new) atomic_cmpset_int(pdep, old, new) 251#define pte_load_store(ptep, pte) atomic_swap_int(ptep, pte) 252#define pte_load_clear(ptep) atomic_swap_int(ptep, 0) 253#define pte_store(ptep, pte) do { \ 254 *(u_int *)(ptep) = (u_int)(pte); \ 255} while (0) 256#define pte_load(ptep) atomic_load_acq_int(ptep) 257 258#endif /* !(PAE || PAE_TABLES) */ 259 260#define pte_clear(ptep) pte_store(ptep, 0) 261 262#define pde_store(pdep, pde) pte_store(pdep, pde) 263 264/* 265 * Extract from the kernel page table the physical address that is mapped by 266 * the given virtual address "va". 267 * 268 * This function may be used before pmap_bootstrap() is called. 269 */ 270static __inline vm_paddr_t 271pmap_kextract(vm_offset_t va) 272{ 273 vm_paddr_t pa; 274 275 if ((pa = pte_load(&PTD[va >> PDRSHIFT])) & PG_PS) { 276 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 277 } else { 278 /* 279 * Beware of a concurrent promotion that changes the PDE at 280 * this point! For example, vtopte() must not be used to 281 * access the PTE because it would use the new PDE. It is, 282 * however, safe to use the old PDE because the page table 283 * page is preserved by the promotion. 284 */ 285 pa = KPTmap[i386_btop(va)]; 286 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 287 } 288 return (pa); 289} 290 291#endif /* _KERNEL */ 292 293/* 294 * Pmap stuff 295 */ 296struct pv_entry; 297struct pv_chunk; 298 299struct md_page { 300 TAILQ_HEAD(,pv_entry) pv_list; 301 int pat_mode; 302}; 303 304struct pmap { 305 struct mtx pm_mtx; 306 pd_entry_t *pm_pdir; /* KVA of page directory */ 307 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 308 cpuset_t pm_active; /* active on cpus */ 309 struct pmap_statistics pm_stats; /* pmap statistics */ 310 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ 311#if defined(PAE) || defined(PAE_TABLES) 312 pdpt_entry_t *pm_pdpt; /* KVA of page directory pointer 313 table */ 314#endif 315 struct vm_radix pm_root; /* spare page table pages */ 316}; 317 318typedef struct pmap *pmap_t; 319 320#ifdef _KERNEL 321extern struct pmap kernel_pmap_store; 322#define kernel_pmap (&kernel_pmap_store) 323 324#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 325#define PMAP_LOCK_ASSERT(pmap, type) \ 326 mtx_assert(&(pmap)->pm_mtx, (type)) 327#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 328#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 329 NULL, MTX_DEF | MTX_DUPOK) 330#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 331#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 332#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 333#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 334#endif 335 336/* 337 * For each vm_page_t, there is a list of all currently valid virtual 338 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 339 */ 340typedef struct pv_entry { 341 vm_offset_t pv_va; /* virtual address for mapping */ 342 TAILQ_ENTRY(pv_entry) pv_next; 343} *pv_entry_t; 344 345/* 346 * pv_entries are allocated in chunks per-process. This avoids the 347 * need to track per-pmap assignments. 348 */ 349#define _NPCM 11 350#define _NPCPV 336 351struct pv_chunk { 352 pmap_t pc_pmap; 353 TAILQ_ENTRY(pv_chunk) pc_list; 354 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 355 TAILQ_ENTRY(pv_chunk) pc_lru; 356 struct pv_entry pc_pventry[_NPCPV]; 357}; 358 359#ifdef _KERNEL 360 361extern caddr_t CADDR3; 362extern pt_entry_t *CMAP3; 363extern vm_paddr_t phys_avail[]; 364extern vm_paddr_t dump_avail[]; 365extern int pseflag; 366extern int pgeflag; 367extern char *ptvmmap; /* poor name! */ 368extern vm_offset_t virtual_avail; 369extern vm_offset_t virtual_end; 370 371#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 372#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 373#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 374 375/* 376 * Only the following functions or macros may be used before pmap_bootstrap() 377 * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and 378 * vtopte(). 379 */ 380void pmap_activate_boot(pmap_t pmap); 381void pmap_bootstrap(vm_paddr_t); 382int pmap_cache_bits(int mode, boolean_t is_pde); 383int pmap_change_attr(vm_offset_t, vm_size_t, int); 384void pmap_init_pat(void); 385void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 386void *pmap_kenter_temporary(vm_paddr_t pa, int i); 387void pmap_kremove(vm_offset_t); 388void *pmap_mapbios(vm_paddr_t, vm_size_t); 389void *pmap_mapdev(vm_paddr_t, vm_size_t); 390void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 391boolean_t pmap_page_is_mapped(vm_page_t m); 392void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 393void pmap_unmapdev(vm_offset_t, vm_size_t); 394pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2; 395void pmap_invalidate_page(pmap_t, vm_offset_t); 396void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 397void pmap_invalidate_all(pmap_t); 398void pmap_invalidate_cache(void); 399void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 400void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, 401 boolean_t force); 402 403void invltlb_glob(void); 404 405#endif /* _KERNEL */ 406 407#endif /* !LOCORE */ 408 409#endif /* !_MACHINE_PMAP_H_ */ 410