1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD: stable/11/sys/arm/include/pmap-v4.h 314530 2017-03-02 01:18:46Z ian $ 48 */ 49 50#ifndef _MACHINE_PMAP_V4_H_ 51#define _MACHINE_PMAP_V4_H_ 52 53#include <machine/pte-v4.h> 54 55/* 56 * Define the MMU types we support based on the cpu types. While the code has 57 * some theoretical support for multiple MMU types in a single kernel, there are 58 * no actual working configurations that use that feature. 59 */ 60#if (defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_FA526)) 61#define ARM_MMU_GENERIC 1 62#else 63#define ARM_MMU_GENERIC 0 64#endif 65 66#if (defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ 67 defined(CPU_XSCALE_81342)) 68#define ARM_MMU_XSCALE 1 69#else 70#define ARM_MMU_XSCALE 0 71#endif 72 73#define ARM_NMMUS (ARM_MMU_GENERIC + ARM_MMU_XSCALE) 74#if ARM_NMMUS == 0 && !defined(KLD_MODULE) && defined(_KERNEL) 75#error ARM_NMMUS is 0 76#endif 77 78/* 79 * Pte related macros 80 */ 81#define PTE_NOCACHE 1 82#define PTE_CACHE 2 83#define PTE_DEVICE PTE_NOCACHE 84#define PTE_PAGETABLE 3 85 86enum mem_type { 87 STRONG_ORD = 0, 88 DEVICE_NOSHARE, 89 DEVICE_SHARE, 90 NRML_NOCACHE, 91 NRML_IWT_OWT, 92 NRML_IWB_OWB, 93 NRML_IWBA_OWBA 94}; 95 96#ifndef LOCORE 97 98#include <sys/queue.h> 99#include <sys/_cpuset.h> 100#include <sys/_lock.h> 101#include <sys/_mutex.h> 102 103#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 104#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 105 106#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) 107#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 108 109/* 110 * Pmap stuff 111 */ 112 113/* 114 * This structure is used to hold a virtual<->physical address 115 * association and is used mostly by bootstrap code 116 */ 117struct pv_addr { 118 SLIST_ENTRY(pv_addr) pv_list; 119 vm_offset_t pv_va; 120 vm_paddr_t pv_pa; 121}; 122 123struct pv_entry; 124struct pv_chunk; 125 126struct md_page { 127 int pvh_attrs; 128 vm_memattr_t pv_memattr; 129 vm_offset_t pv_kva; /* first kernel VA mapping */ 130 TAILQ_HEAD(,pv_entry) pv_list; 131}; 132 133struct l1_ttable; 134struct l2_dtable; 135 136 137/* 138 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 139 * A bucket size of 16 provides for 16MB of contiguous virtual address 140 * space per l2_dtable. Most processes will, therefore, require only two or 141 * three of these to map their whole working set. 142 */ 143#define L2_BUCKET_LOG2 4 144#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 145/* 146 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 147 * of l2_dtable structures required to track all possible page descriptors 148 * mappable by an L1 translation table is given by the following constants: 149 */ 150#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 151#define L2_SIZE (1 << L2_LOG2) 152 153struct pmap { 154 struct mtx pm_mtx; 155 u_int8_t pm_domain; 156 struct l1_ttable *pm_l1; 157 struct l2_dtable *pm_l2[L2_SIZE]; 158 cpuset_t pm_active; /* active on cpus */ 159 struct pmap_statistics pm_stats; /* pmap statictics */ 160 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 161}; 162 163typedef struct pmap *pmap_t; 164 165#ifdef _KERNEL 166extern struct pmap kernel_pmap_store; 167#define kernel_pmap (&kernel_pmap_store) 168 169#define PMAP_ASSERT_LOCKED(pmap) \ 170 mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 171#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 172#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 173#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 174 NULL, MTX_DEF | MTX_DUPOK) 175#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 176#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 177#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 178#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 179#endif 180 181/* 182 * For each vm_page_t, there is a list of all currently valid virtual 183 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 184 */ 185typedef struct pv_entry { 186 vm_offset_t pv_va; /* virtual address for mapping */ 187 TAILQ_ENTRY(pv_entry) pv_list; 188 int pv_flags; /* flags (wired, etc...) */ 189 pmap_t pv_pmap; /* pmap where mapping lies */ 190 TAILQ_ENTRY(pv_entry) pv_plist; 191} *pv_entry_t; 192 193/* 194 * pv_entries are allocated in chunks per-process. This avoids the 195 * need to track per-pmap assignments. 196 */ 197#define _NPCM 8 198#define _NPCPV 252 199 200struct pv_chunk { 201 pmap_t pc_pmap; 202 TAILQ_ENTRY(pv_chunk) pc_list; 203 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 204 uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ 205 TAILQ_ENTRY(pv_chunk) pc_lru; 206 struct pv_entry pc_pventry[_NPCPV]; 207}; 208 209#ifdef _KERNEL 210 211boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 212 213/* 214 * virtual address to page table entry and 215 * to physical address. Likewise for alternate address space. 216 * Note: these work recursively, thus vtopte of a pte will give 217 * the corresponding pde that in turn maps it. 218 */ 219 220/* 221 * The current top of kernel VM. 222 */ 223extern vm_offset_t pmap_curmaxkvaddr; 224 225/* Virtual address to page table entry */ 226static __inline pt_entry_t * 227vtopte(vm_offset_t va) 228{ 229 pd_entry_t *pdep; 230 pt_entry_t *ptep; 231 232 if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) 233 return (NULL); 234 return (ptep); 235} 236 237void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); 238int pmap_change_attr(vm_offset_t, vm_size_t, int); 239void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 240void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 241void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 242vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); 243void pmap_kremove(vm_offset_t); 244vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 245void pmap_debug(int); 246void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 247void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 248vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 249void 250pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 251 int cache); 252int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 253 254/* 255 * Definitions for MMU domains 256 */ 257#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 258#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 259 260/* 261 * The new pmap ensures that page-tables are always mapping Write-Thru. 262 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 263 * on every change. 264 * 265 * Unfortunately, not all CPUs have a write-through cache mode. So we 266 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 267 * and if there is the chance for PTE syncs to be needed, we define 268 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 269 * the code. 270 */ 271extern int pmap_needs_pte_sync; 272 273/* 274 * These macros define the various bit masks in the PTE. 275 * 276 * We use these macros since we use different bits on different processor 277 * models. 278 */ 279 280#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 281#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ 282 L1_S_XSCALE_TEX(TEX_XSCALE_T)) 283 284#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 285#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ 286 L2_XSCALE_L_TEX(TEX_XSCALE_T)) 287 288#define L2_S_PROT_U_generic (L2_AP(AP_U)) 289#define L2_S_PROT_W_generic (L2_AP(AP_W)) 290#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 291 292#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 293#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 294#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 295 296#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 297#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ 298 L2_XSCALE_T_TEX(TEX_XSCALE_X)) 299 300#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 301#define L1_S_PROTO_xscale (L1_TYPE_S) 302 303#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 304#define L1_C_PROTO_xscale (L1_TYPE_C) 305 306#define L2_L_PROTO (L2_TYPE_L) 307 308#define L2_S_PROTO_generic (L2_TYPE_S) 309#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 310 311/* 312 * User-visible names for the ones that vary with MMU class. 313 */ 314#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) 315 316#if ARM_NMMUS > 1 317/* More than one MMU class configured; use variables. */ 318#define L2_S_PROT_U pte_l2_s_prot_u 319#define L2_S_PROT_W pte_l2_s_prot_w 320#define L2_S_PROT_MASK pte_l2_s_prot_mask 321 322#define L1_S_CACHE_MASK pte_l1_s_cache_mask 323#define L2_L_CACHE_MASK pte_l2_l_cache_mask 324#define L2_S_CACHE_MASK pte_l2_s_cache_mask 325 326#define L1_S_PROTO pte_l1_s_proto 327#define L1_C_PROTO pte_l1_c_proto 328#define L2_S_PROTO pte_l2_s_proto 329 330#elif ARM_MMU_GENERIC != 0 331#define L2_S_PROT_U L2_S_PROT_U_generic 332#define L2_S_PROT_W L2_S_PROT_W_generic 333#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 334 335#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 336#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 337#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 338 339#define L1_S_PROTO L1_S_PROTO_generic 340#define L1_C_PROTO L1_C_PROTO_generic 341#define L2_S_PROTO L2_S_PROTO_generic 342 343#elif ARM_MMU_XSCALE == 1 344#define L2_S_PROT_U L2_S_PROT_U_xscale 345#define L2_S_PROT_W L2_S_PROT_W_xscale 346#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 347 348#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 349#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 350#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 351 352#define L1_S_PROTO L1_S_PROTO_xscale 353#define L1_C_PROTO L1_C_PROTO_xscale 354#define L2_S_PROTO L2_S_PROTO_xscale 355 356#endif /* ARM_NMMUS > 1 */ 357 358#if defined(CPU_XSCALE_81342) 359#define PMAP_NEEDS_PTE_SYNC 1 360#define PMAP_INCLUDE_PTE_SYNC 361#else 362#define PMAP_NEEDS_PTE_SYNC 0 363#endif 364 365/* 366 * These macros return various bits based on kernel/user and protection. 367 * Note that the compiler will usually fold these at compile time. 368 */ 369#define L1_S_PROT_U (L1_S_AP(AP_U)) 370#define L1_S_PROT_W (L1_S_AP(AP_W)) 371#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 372#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) 373 374#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 375 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 376 377#define L2_L_PROT_U (L2_AP(AP_U)) 378#define L2_L_PROT_W (L2_AP(AP_W)) 379#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 380 381#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 382 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 383 384#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 385 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 386 387/* 388 * Macros to test if a mapping is mappable with an L1 Section mapping 389 * or an L2 Large Page mapping. 390 */ 391#define L1_S_MAPPABLE_P(va, pa, size) \ 392 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 393 394#define L2_L_MAPPABLE_P(va, pa, size) \ 395 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 396 397/* 398 * Provide a fallback in case we were not able to determine it at 399 * compile-time. 400 */ 401#ifndef PMAP_NEEDS_PTE_SYNC 402#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 403#define PMAP_INCLUDE_PTE_SYNC 404#endif 405 406#ifdef ARM_L2_PIPT 407#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) 408#else 409#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) 410#endif 411 412#define PTE_SYNC(pte) \ 413do { \ 414 if (PMAP_NEEDS_PTE_SYNC) { \ 415 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 416 cpu_drain_writebuf(); \ 417 _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ 418 } else \ 419 cpu_drain_writebuf(); \ 420} while (/*CONSTCOND*/0) 421 422#define PTE_SYNC_RANGE(pte, cnt) \ 423do { \ 424 if (PMAP_NEEDS_PTE_SYNC) { \ 425 cpu_dcache_wb_range((vm_offset_t)(pte), \ 426 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 427 cpu_drain_writebuf(); \ 428 _sync_l2((vm_offset_t)(pte), \ 429 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 430 } else \ 431 cpu_drain_writebuf(); \ 432} while (/*CONSTCOND*/0) 433 434extern pt_entry_t pte_l1_s_cache_mode; 435extern pt_entry_t pte_l1_s_cache_mask; 436 437extern pt_entry_t pte_l2_l_cache_mode; 438extern pt_entry_t pte_l2_l_cache_mask; 439 440extern pt_entry_t pte_l2_s_cache_mode; 441extern pt_entry_t pte_l2_s_cache_mask; 442 443extern pt_entry_t pte_l1_s_cache_mode_pt; 444extern pt_entry_t pte_l2_l_cache_mode_pt; 445extern pt_entry_t pte_l2_s_cache_mode_pt; 446 447extern pt_entry_t pte_l2_s_prot_u; 448extern pt_entry_t pte_l2_s_prot_w; 449extern pt_entry_t pte_l2_s_prot_mask; 450 451extern pt_entry_t pte_l1_s_proto; 452extern pt_entry_t pte_l1_c_proto; 453extern pt_entry_t pte_l2_s_proto; 454 455extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 456extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 457 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 458extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 459 460#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) 461void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 462void pmap_zero_page_generic(vm_paddr_t, int, int); 463 464void pmap_pte_init_generic(void); 465#endif /* ARM_MMU_GENERIC != 0 */ 466 467#if ARM_MMU_XSCALE == 1 468void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 469void pmap_zero_page_xscale(vm_paddr_t, int, int); 470 471void pmap_pte_init_xscale(void); 472 473void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 474 475void pmap_use_minicache(vm_offset_t, vm_size_t); 476#endif /* ARM_MMU_XSCALE == 1 */ 477#if defined(CPU_XSCALE_81342) 478#define ARM_HAVE_SUPERSECTIONS 479#endif 480 481#define PTE_KERNEL 0 482#define PTE_USER 1 483#define l1pte_valid(pde) ((pde) != 0) 484#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 485#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 486#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 487 488#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) 489#define l2pte_valid(pte) ((pte) != 0) 490#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 491#define l2pte_minidata(pte) (((pte) & \ 492 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 493 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 494 495/* L1 and L2 page table macros */ 496#define pmap_pde_v(pde) l1pte_valid(*(pde)) 497#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 498#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 499#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 500 501#define pmap_pte_v(pte) l2pte_valid(*(pte)) 502#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 503 504/* 505 * Flags that indicate attributes of pages or mappings of pages. 506 * 507 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 508 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 509 * pv_entry's for each page. They live in the same "namespace" so 510 * that we can clear multiple attributes at a time. 511 * 512 * Note the "non-cacheable" flag generally means the page has 513 * multiple mappings in a given address space. 514 */ 515#define PVF_MOD 0x01 /* page is modified */ 516#define PVF_REF 0x02 /* page is referenced */ 517#define PVF_WIRED 0x04 /* mapping is wired */ 518#define PVF_WRITE 0x08 /* mapping is writable */ 519#define PVF_EXEC 0x10 /* mapping is executable */ 520#define PVF_NC 0x20 /* mapping is non-cacheable */ 521#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 522#define PVF_UNMAN 0x80 /* mapping is unmanaged */ 523 524void vector_page_setprot(int); 525 526#define SECTION_CACHE 0x1 527#define SECTION_PT 0x2 528void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); 529#ifdef ARM_HAVE_SUPERSECTIONS 530void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); 531#endif 532 533void pmap_postinit(void); 534 535#endif /* _KERNEL */ 536 537#endif /* !LOCORE */ 538 539#endif /* !_MACHINE_PMAP_V4_H_ */ 540