1139735Simp/*- 2129198Scognet * Copyright (c) 1991 Regents of the University of California. 3129198Scognet * All rights reserved. 4129198Scognet * 5129198Scognet * This code is derived from software contributed to Berkeley by 6129198Scognet * the Systems Programming Group of the University of Utah Computer 7129198Scognet * Science Department and William Jolitz of UUNET Technologies Inc. 8129198Scognet * 9129198Scognet * Redistribution and use in source and binary forms, with or without 10129198Scognet * modification, are permitted provided that the following conditions 11129198Scognet * are met: 12129198Scognet * 1. Redistributions of source code must retain the above copyright 13129198Scognet * notice, this list of conditions and the following disclaimer. 14129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 15129198Scognet * notice, this list of conditions and the following disclaimer in the 16129198Scognet * documentation and/or other materials provided with the distribution. 17129198Scognet * 3. All advertising materials mentioning features or use of this software 18129198Scognet * must display the following acknowledgement: 19129198Scognet * This product includes software developed by the University of 20129198Scognet * California, Berkeley and its contributors. 21129198Scognet * 4. Neither the name of the University nor the names of its contributors 22129198Scognet * may be used to endorse or promote products derived from this software 23129198Scognet * without specific prior written permission. 24129198Scognet * 25129198Scognet * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28129198Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29129198Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35129198Scognet * SUCH DAMAGE. 36129198Scognet * 37129198Scognet * Derived from hp300 version by Mike Hibler, this version by William 38129198Scognet * Jolitz uses a recursive map [a pde points to the page directory] to 39129198Scognet * map the page tables using the pagetables themselves. This is done to 40129198Scognet * reduce the impact on kernel virtual memory for lots of sparse address 41129198Scognet * space, and to reduce the cost of memory to each process. 42129198Scognet * 43129198Scognet * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44129198Scognet * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45129198Scognet * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46129198Scognet * 47129198Scognet * $FreeBSD: releng/11.0/sys/arm/include/pmap-v4.h 295801 2016-02-19 09:23:32Z skra $ 48129198Scognet */ 49295036Smmel 50295798Sskra#ifndef _MACHINE_PMAP_V4_H_ 51295798Sskra#define _MACHINE_PMAP_V4_H_ 52129198Scognet 53295801Sskra#include <machine/pte-v4.h> 54159100Scognet#include <machine/cpuconf.h> 55129198Scognet/* 56129198Scognet * Pte related macros 57129198Scognet */ 58239268Sgonzo#define PTE_NOCACHE 1 59239268Sgonzo#define PTE_CACHE 2 60257672Sian#define PTE_DEVICE PTE_NOCACHE 61239268Sgonzo#define PTE_PAGETABLE 3 62236992Simp 63239268Sgonzoenum mem_type { 64239268Sgonzo STRONG_ORD = 0, 65239268Sgonzo DEVICE_NOSHARE, 66239268Sgonzo DEVICE_SHARE, 67239268Sgonzo NRML_NOCACHE, 68239268Sgonzo NRML_IWT_OWT, 69239268Sgonzo NRML_IWB_OWB, 70239268Sgonzo NRML_IWBA_OWBA 71239268Sgonzo}; 72239268Sgonzo 73129198Scognet#ifndef LOCORE 74129198Scognet 75129198Scognet#include <sys/queue.h> 76222813Sattilio#include <sys/_cpuset.h> 77159325Salc#include <sys/_lock.h> 78159325Salc#include <sys/_mutex.h> 79129198Scognet 80129198Scognet#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 81129198Scognet#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 82129198Scognet 83244414Scognet#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) 84135641Scognet#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 85195649Salc 86129198Scognet/* 87137362Scognet * Pmap stuff 88129198Scognet */ 89129198Scognet 90129198Scognet/* 91129198Scognet * This structure is used to hold a virtual<->physical address 92129198Scognet * association and is used mostly by bootstrap code 93129198Scognet */ 94129198Scognetstruct pv_addr { 95129198Scognet SLIST_ENTRY(pv_addr) pv_list; 96129198Scognet vm_offset_t pv_va; 97129198Scognet vm_paddr_t pv_pa; 98129198Scognet}; 99129198Scognet 100129198Scognetstruct pv_entry; 101250634Sgberstruct pv_chunk; 102129198Scognet 103129198Scognetstruct md_page { 104129198Scognet int pvh_attrs; 105244414Scognet vm_memattr_t pv_memattr; 106194459Sthompsa vm_offset_t pv_kva; /* first kernel VA mapping */ 107129198Scognet TAILQ_HEAD(,pv_entry) pv_list; 108129198Scognet}; 109129198Scognet 110129198Scognetstruct l1_ttable; 111129198Scognetstruct l2_dtable; 112129198Scognet 113129198Scognet 114129198Scognet/* 115129198Scognet * The number of L2 descriptor tables which can be tracked by an l2_dtable. 116129198Scognet * A bucket size of 16 provides for 16MB of contiguous virtual address 117129198Scognet * space per l2_dtable. Most processes will, therefore, require only two or 118129198Scognet * three of these to map their whole working set. 119129198Scognet */ 120129198Scognet#define L2_BUCKET_LOG2 4 121129198Scognet#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 122129198Scognet/* 123129198Scognet * Given the above "L2-descriptors-per-l2_dtable" constant, the number 124129198Scognet * of l2_dtable structures required to track all possible page descriptors 125129198Scognet * mappable by an L1 translation table is given by the following constants: 126129198Scognet */ 127129198Scognet#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 128129198Scognet#define L2_SIZE (1 << L2_LOG2) 129129198Scognet 130129198Scognetstruct pmap { 131159325Salc struct mtx pm_mtx; 132129198Scognet u_int8_t pm_domain; 133129198Scognet struct l1_ttable *pm_l1; 134129198Scognet struct l2_dtable *pm_l2[L2_SIZE]; 135222813Sattilio cpuset_t pm_active; /* active on cpus */ 136129198Scognet struct pmap_statistics pm_stats; /* pmap statictics */ 137144760Scognet TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 138129198Scognet}; 139129198Scognet 140129198Scognettypedef struct pmap *pmap_t; 141129198Scognet 142129198Scognet#ifdef _KERNEL 143191873Salcextern struct pmap kernel_pmap_store; 144191873Salc#define kernel_pmap (&kernel_pmap_store) 145137362Scognet 146159325Salc#define PMAP_ASSERT_LOCKED(pmap) \ 147159325Salc mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 148159325Salc#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 149159325Salc#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 150159325Salc#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 151159325Salc NULL, MTX_DEF | MTX_DUPOK) 152159325Salc#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 153159325Salc#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 154159325Salc#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 155159325Salc#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 156129198Scognet#endif 157129198Scognet 158129198Scognet/* 159129198Scognet * For each vm_page_t, there is a list of all currently valid virtual 160164250Sru * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 161129198Scognet */ 162129198Scognettypedef struct pv_entry { 163138413Scognet vm_offset_t pv_va; /* virtual address for mapping */ 164138413Scognet TAILQ_ENTRY(pv_entry) pv_list; 165250634Sgber int pv_flags; /* flags (wired, etc...) */ 166250634Sgber pmap_t pv_pmap; /* pmap where mapping lies */ 167144760Scognet TAILQ_ENTRY(pv_entry) pv_plist; 168129198Scognet} *pv_entry_t; 169129198Scognet 170250634Sgber/* 171250634Sgber * pv_entries are allocated in chunks per-process. This avoids the 172250634Sgber * need to track per-pmap assignments. 173250634Sgber */ 174250634Sgber#define _NPCM 8 175250634Sgber#define _NPCPV 252 176250634Sgber 177250634Sgberstruct pv_chunk { 178250634Sgber pmap_t pc_pmap; 179250634Sgber TAILQ_ENTRY(pv_chunk) pc_list; 180250634Sgber uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 181250634Sgber uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ 182250634Sgber TAILQ_ENTRY(pv_chunk) pc_lru; 183250634Sgber struct pv_entry pc_pventry[_NPCPV]; 184250634Sgber}; 185250634Sgber 186129198Scognet#ifdef _KERNEL 187129198Scognet 188129198Scognetboolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 189129198Scognet 190129198Scognet/* 191129198Scognet * virtual address to page table entry and 192129198Scognet * to physical address. Likewise for alternate address space. 193129198Scognet * Note: these work recursively, thus vtopte of a pte will give 194129198Scognet * the corresponding pde that in turn maps it. 195129198Scognet */ 196129198Scognet 197135641Scognet/* 198135641Scognet * The current top of kernel VM. 199135641Scognet */ 200135641Scognetextern vm_offset_t pmap_curmaxkvaddr; 201135641Scognet 202129198Scognet/* Virtual address to page table entry */ 203129198Scognetstatic __inline pt_entry_t * 204129198Scognetvtopte(vm_offset_t va) 205129198Scognet{ 206129198Scognet pd_entry_t *pdep; 207129198Scognet pt_entry_t *ptep; 208129198Scognet 209295042Sskra if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) 210129198Scognet return (NULL); 211129198Scognet return (ptep); 212129198Scognet} 213129198Scognet 214247046Salcvoid pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); 215239268Sgonzoint pmap_change_attr(vm_offset_t, vm_size_t, int); 216129198Scognetvoid pmap_kenter(vm_offset_t va, vm_paddr_t pa); 217156191Scognetvoid pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 218142570Scognetvoid pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 219294722Sskravm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); 220129198Scognetvoid pmap_kremove(vm_offset_t); 221129198Scognetvm_page_t pmap_use_pt(pmap_t, vm_offset_t); 222129198Scognetvoid pmap_debug(int); 223129198Scognetvoid pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 224129198Scognetvoid pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 225129198Scognetvm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 226129198Scognetvoid 227129198Scognetpmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 228129198Scognet int cache); 229129198Scognetint pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 230129198Scognet 231129198Scognet/* 232129198Scognet * Definitions for MMU domains 233129198Scognet */ 234169756Scognet#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 235169756Scognet#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 236129198Scognet 237129198Scognet/* 238129198Scognet * The new pmap ensures that page-tables are always mapping Write-Thru. 239129198Scognet * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 240129198Scognet * on every change. 241129198Scognet * 242129198Scognet * Unfortunately, not all CPUs have a write-through cache mode. So we 243129198Scognet * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 244129198Scognet * and if there is the chance for PTE syncs to be needed, we define 245129198Scognet * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 246129198Scognet * the code. 247129198Scognet */ 248129198Scognetextern int pmap_needs_pte_sync; 249129198Scognet 250129198Scognet/* 251129198Scognet * These macros define the various bit masks in the PTE. 252129198Scognet * 253129198Scognet * We use these macros since we use different bits on different processor 254129198Scognet * models. 255129198Scognet */ 256129198Scognet 257129198Scognet#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 258171620Scognet#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ 259171620Scognet L1_S_XSCALE_TEX(TEX_XSCALE_T)) 260129198Scognet 261129198Scognet#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 262171620Scognet#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ 263171620Scognet L2_XSCALE_L_TEX(TEX_XSCALE_T)) 264129198Scognet 265129198Scognet#define L2_S_PROT_U_generic (L2_AP(AP_U)) 266129198Scognet#define L2_S_PROT_W_generic (L2_AP(AP_W)) 267129198Scognet#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 268129198Scognet 269129198Scognet#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 270129198Scognet#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 271129198Scognet#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 272129198Scognet 273129198Scognet#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 274171620Scognet#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ 275171620Scognet L2_XSCALE_T_TEX(TEX_XSCALE_X)) 276129198Scognet 277129198Scognet#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 278129198Scognet#define L1_S_PROTO_xscale (L1_TYPE_S) 279129198Scognet 280129198Scognet#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 281129198Scognet#define L1_C_PROTO_xscale (L1_TYPE_C) 282129198Scognet 283129198Scognet#define L2_L_PROTO (L2_TYPE_L) 284129198Scognet 285129198Scognet#define L2_S_PROTO_generic (L2_TYPE_S) 286129198Scognet#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 287129198Scognet 288129198Scognet/* 289129198Scognet * User-visible names for the ones that vary with MMU class. 290129198Scognet */ 291239268Sgonzo#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) 292129198Scognet 293295129Sskra#if ARM_NMMUS > 1 294263676Sandrew/* More than one MMU class configured; use variables. */ 295263676Sandrew#define L2_S_PROT_U pte_l2_s_prot_u 296263676Sandrew#define L2_S_PROT_W pte_l2_s_prot_w 297263676Sandrew#define L2_S_PROT_MASK pte_l2_s_prot_mask 298263676Sandrew 299263676Sandrew#define L1_S_CACHE_MASK pte_l1_s_cache_mask 300263676Sandrew#define L2_L_CACHE_MASK pte_l2_l_cache_mask 301263676Sandrew#define L2_S_CACHE_MASK pte_l2_s_cache_mask 302263676Sandrew 303263676Sandrew#define L1_S_PROTO pte_l1_s_proto 304263676Sandrew#define L1_C_PROTO pte_l1_c_proto 305263676Sandrew#define L2_S_PROTO pte_l2_s_proto 306263676Sandrew 307263676Sandrew#elif ARM_MMU_GENERIC != 0 308263676Sandrew#define L2_S_PROT_U L2_S_PROT_U_generic 309263676Sandrew#define L2_S_PROT_W L2_S_PROT_W_generic 310263676Sandrew#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 311263676Sandrew 312263676Sandrew#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 313263676Sandrew#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 314263676Sandrew#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 315263676Sandrew 316263676Sandrew#define L1_S_PROTO L1_S_PROTO_generic 317263676Sandrew#define L1_C_PROTO L1_C_PROTO_generic 318263676Sandrew#define L2_S_PROTO L2_S_PROTO_generic 319263676Sandrew 320263676Sandrew#elif ARM_MMU_XSCALE == 1 321263676Sandrew#define L2_S_PROT_U L2_S_PROT_U_xscale 322263676Sandrew#define L2_S_PROT_W L2_S_PROT_W_xscale 323263676Sandrew#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 324263676Sandrew 325263676Sandrew#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 326263676Sandrew#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 327263676Sandrew#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 328263676Sandrew 329263676Sandrew#define L1_S_PROTO L1_S_PROTO_xscale 330263676Sandrew#define L1_C_PROTO L1_C_PROTO_xscale 331263676Sandrew#define L2_S_PROTO L2_S_PROTO_xscale 332263676Sandrew 333129198Scognet#endif /* ARM_NMMUS > 1 */ 334129198Scognet 335295129Sskra#if defined(CPU_XSCALE_81342) 336171620Scognet#define PMAP_NEEDS_PTE_SYNC 1 337171620Scognet#define PMAP_INCLUDE_PTE_SYNC 338262958Sian#else 339129198Scognet#define PMAP_NEEDS_PTE_SYNC 0 340129198Scognet#endif 341129198Scognet 342129198Scognet/* 343129198Scognet * These macros return various bits based on kernel/user and protection. 344129198Scognet * Note that the compiler will usually fold these at compile time. 345129198Scognet */ 346239268Sgonzo#define L1_S_PROT_U (L1_S_AP(AP_U)) 347239268Sgonzo#define L1_S_PROT_W (L1_S_AP(AP_W)) 348239268Sgonzo#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 349239268Sgonzo#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) 350239268Sgonzo 351129198Scognet#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 352129198Scognet (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 353129198Scognet 354239268Sgonzo#define L2_L_PROT_U (L2_AP(AP_U)) 355239268Sgonzo#define L2_L_PROT_W (L2_AP(AP_W)) 356239268Sgonzo#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 357239268Sgonzo 358129198Scognet#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 359129198Scognet (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 360129198Scognet 361129198Scognet#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 362129198Scognet (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 363129198Scognet 364129198Scognet/* 365129198Scognet * Macros to test if a mapping is mappable with an L1 Section mapping 366129198Scognet * or an L2 Large Page mapping. 367129198Scognet */ 368129198Scognet#define L1_S_MAPPABLE_P(va, pa, size) \ 369129198Scognet ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 370129198Scognet 371129198Scognet#define L2_L_MAPPABLE_P(va, pa, size) \ 372129198Scognet ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 373129198Scognet 374129198Scognet/* 375129198Scognet * Provide a fallback in case we were not able to determine it at 376129198Scognet * compile-time. 377129198Scognet */ 378129198Scognet#ifndef PMAP_NEEDS_PTE_SYNC 379129198Scognet#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 380129198Scognet#define PMAP_INCLUDE_PTE_SYNC 381129198Scognet#endif 382129198Scognet 383256707Scognet#ifdef ARM_L2_PIPT 384256707Scognet#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) 385256707Scognet#else 386256708Scognet#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) 387256707Scognet#endif 388256707Scognet 389129198Scognet#define PTE_SYNC(pte) \ 390129198Scognetdo { \ 391171620Scognet if (PMAP_NEEDS_PTE_SYNC) { \ 392129198Scognet cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 393256707Scognet cpu_drain_writebuf(); \ 394256707Scognet _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ 395228530Sraj } else \ 396228530Sraj cpu_drain_writebuf(); \ 397129198Scognet} while (/*CONSTCOND*/0) 398129198Scognet 399129198Scognet#define PTE_SYNC_RANGE(pte, cnt) \ 400129198Scognetdo { \ 401129198Scognet if (PMAP_NEEDS_PTE_SYNC) { \ 402129198Scognet cpu_dcache_wb_range((vm_offset_t)(pte), \ 403129198Scognet (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 404256707Scognet cpu_drain_writebuf(); \ 405256707Scognet _sync_l2((vm_offset_t)(pte), \ 406171620Scognet (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 407228530Sraj } else \ 408228530Sraj cpu_drain_writebuf(); \ 409129198Scognet} while (/*CONSTCOND*/0) 410129198Scognet 411129198Scognetextern pt_entry_t pte_l1_s_cache_mode; 412129198Scognetextern pt_entry_t pte_l1_s_cache_mask; 413129198Scognet 414129198Scognetextern pt_entry_t pte_l2_l_cache_mode; 415129198Scognetextern pt_entry_t pte_l2_l_cache_mask; 416129198Scognet 417129198Scognetextern pt_entry_t pte_l2_s_cache_mode; 418129198Scognetextern pt_entry_t pte_l2_s_cache_mask; 419129198Scognet 420129198Scognetextern pt_entry_t pte_l1_s_cache_mode_pt; 421129198Scognetextern pt_entry_t pte_l2_l_cache_mode_pt; 422129198Scognetextern pt_entry_t pte_l2_s_cache_mode_pt; 423129198Scognet 424129198Scognetextern pt_entry_t pte_l2_s_prot_u; 425129198Scognetextern pt_entry_t pte_l2_s_prot_w; 426129198Scognetextern pt_entry_t pte_l2_s_prot_mask; 427236992Simp 428129198Scognetextern pt_entry_t pte_l1_s_proto; 429129198Scognetextern pt_entry_t pte_l1_c_proto; 430129198Scognetextern pt_entry_t pte_l2_s_proto; 431129198Scognet 432129198Scognetextern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 433248280Skibextern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 434248280Skib vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 435129198Scognetextern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 436129198Scognet 437295129Sskra#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) 438129198Scognetvoid pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 439129198Scognetvoid pmap_zero_page_generic(vm_paddr_t, int, int); 440129198Scognet 441129198Scognetvoid pmap_pte_init_generic(void); 442295129Sskra#endif /* ARM_MMU_GENERIC != 0 */ 443129198Scognet 444129198Scognet#if ARM_MMU_XSCALE == 1 445129198Scognetvoid pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 446129198Scognetvoid pmap_zero_page_xscale(vm_paddr_t, int, int); 447129198Scognet 448129198Scognetvoid pmap_pte_init_xscale(void); 449129198Scognet 450129198Scognetvoid xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 451129198Scognet 452135641Scognetvoid pmap_use_minicache(vm_offset_t, vm_size_t); 453129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 454171620Scognet#if defined(CPU_XSCALE_81342) 455171620Scognet#define ARM_HAVE_SUPERSECTIONS 456171620Scognet#endif 457171620Scognet 458129198Scognet#define PTE_KERNEL 0 459129198Scognet#define PTE_USER 1 460129198Scognet#define l1pte_valid(pde) ((pde) != 0) 461129198Scognet#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 462129198Scognet#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 463129198Scognet#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 464129198Scognet 465295752Sskra#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) 466129198Scognet#define l2pte_valid(pte) ((pte) != 0) 467129198Scognet#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 468129198Scognet#define l2pte_minidata(pte) (((pte) & \ 469129198Scognet (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 470129198Scognet == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 471129198Scognet 472129198Scognet/* L1 and L2 page table macros */ 473129198Scognet#define pmap_pde_v(pde) l1pte_valid(*(pde)) 474129198Scognet#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 475129198Scognet#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 476129198Scognet#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 477129198Scognet 478129198Scognet#define pmap_pte_v(pte) l2pte_valid(*(pte)) 479129198Scognet#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 480129198Scognet 481129198Scognet/* 482129198Scognet * Flags that indicate attributes of pages or mappings of pages. 483129198Scognet * 484129198Scognet * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 485129198Scognet * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 486129198Scognet * pv_entry's for each page. They live in the same "namespace" so 487129198Scognet * that we can clear multiple attributes at a time. 488129198Scognet * 489129198Scognet * Note the "non-cacheable" flag generally means the page has 490129198Scognet * multiple mappings in a given address space. 491129198Scognet */ 492129198Scognet#define PVF_MOD 0x01 /* page is modified */ 493129198Scognet#define PVF_REF 0x02 /* page is referenced */ 494129198Scognet#define PVF_WIRED 0x04 /* mapping is wired */ 495129198Scognet#define PVF_WRITE 0x08 /* mapping is writable */ 496129198Scognet#define PVF_EXEC 0x10 /* mapping is executable */ 497175840Scognet#define PVF_NC 0x20 /* mapping is non-cacheable */ 498175840Scognet#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 499194459Sthompsa#define PVF_UNMAN 0x80 /* mapping is unmanaged */ 500129198Scognet 501129198Scognetvoid vector_page_setprot(int); 502135641Scognet 503147114Scognet#define SECTION_CACHE 0x1 504147114Scognet#define SECTION_PT 0x2 505147114Scognetvoid pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); 506171620Scognet#ifdef ARM_HAVE_SUPERSECTIONS 507170582Scognetvoid pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); 508171620Scognet#endif 509147114Scognet 510152128Scognetvoid pmap_postinit(void); 511152128Scognet 512129198Scognet#endif /* _KERNEL */ 513129198Scognet 514129198Scognet#endif /* !LOCORE */ 515129198Scognet 516295798Sskra#endif /* !_MACHINE_PMAP_V4_H_ */ 517