pmap-v4.h revision 250930
1139735Simp/*- 2129198Scognet * Copyright (c) 1991 Regents of the University of California. 3129198Scognet * All rights reserved. 4129198Scognet * 5129198Scognet * This code is derived from software contributed to Berkeley by 6129198Scognet * the Systems Programming Group of the University of Utah Computer 7129198Scognet * Science Department and William Jolitz of UUNET Technologies Inc. 8129198Scognet * 9129198Scognet * Redistribution and use in source and binary forms, with or without 10129198Scognet * modification, are permitted provided that the following conditions 11129198Scognet * are met: 12129198Scognet * 1. Redistributions of source code must retain the above copyright 13129198Scognet * notice, this list of conditions and the following disclaimer. 14129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 15129198Scognet * notice, this list of conditions and the following disclaimer in the 16129198Scognet * documentation and/or other materials provided with the distribution. 17129198Scognet * 3. All advertising materials mentioning features or use of this software 18129198Scognet * must display the following acknowledgement: 19129198Scognet * This product includes software developed by the University of 20129198Scognet * California, Berkeley and its contributors. 21129198Scognet * 4. Neither the name of the University nor the names of its contributors 22129198Scognet * may be used to endorse or promote products derived from this software 23129198Scognet * without specific prior written permission. 24129198Scognet * 25129198Scognet * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28129198Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29129198Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35129198Scognet * SUCH DAMAGE. 36129198Scognet * 37129198Scognet * Derived from hp300 version by Mike Hibler, this version by William 38129198Scognet * Jolitz uses a recursive map [a pde points to the page directory] to 39129198Scognet * map the page tables using the pagetables themselves. This is done to 40129198Scognet * reduce the impact on kernel virtual memory for lots of sparse address 41129198Scognet * space, and to reduce the cost of memory to each process. 42129198Scognet * 43129198Scognet * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44129198Scognet * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45129198Scognet * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46129198Scognet * 47129198Scognet * $FreeBSD: head/sys/arm/include/pmap.h 250930 2013-05-23 12:23:18Z gber $ 48129198Scognet */ 49129198Scognet 50129198Scognet#ifndef _MACHINE_PMAP_H_ 51129198Scognet#define _MACHINE_PMAP_H_ 52129198Scognet 53129198Scognet#include <machine/pte.h> 54159100Scognet#include <machine/cpuconf.h> 55129198Scognet/* 56129198Scognet * Pte related macros 57129198Scognet */ 58239268Sgonzo#if ARM_ARCH_6 || ARM_ARCH_7A 59239268Sgonzo#ifdef SMP 60239268Sgonzo#define PTE_NOCACHE 2 61239268Sgonzo#else 62239268Sgonzo#define PTE_NOCACHE 1 63239268Sgonzo#endif 64245147Sgonzo#define PTE_CACHE 6 65239268Sgonzo#define PTE_DEVICE 2 66239268Sgonzo#define PTE_PAGETABLE 4 67239268Sgonzo#else 68239268Sgonzo#define PTE_NOCACHE 1 69239268Sgonzo#define PTE_CACHE 2 70239268Sgonzo#define PTE_PAGETABLE 3 71239268Sgonzo#endif 72236992Simp 73239268Sgonzoenum mem_type { 74239268Sgonzo STRONG_ORD = 0, 75239268Sgonzo DEVICE_NOSHARE, 76239268Sgonzo DEVICE_SHARE, 77239268Sgonzo NRML_NOCACHE, 78239268Sgonzo NRML_IWT_OWT, 79239268Sgonzo NRML_IWB_OWB, 80239268Sgonzo NRML_IWBA_OWBA 81239268Sgonzo}; 82239268Sgonzo 83129198Scognet#ifndef LOCORE 84129198Scognet 85129198Scognet#include <sys/queue.h> 86222813Sattilio#include <sys/_cpuset.h> 87159325Salc#include <sys/_lock.h> 88159325Salc#include <sys/_mutex.h> 89129198Scognet 90129198Scognet#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 91129198Scognet#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 92129198Scognet 93129198Scognet#ifdef _KERNEL 94129198Scognet 95240983Salc#define vtophys(va) pmap_kextract((vm_offset_t)(va)) 96129198Scognet 97129198Scognet#endif 98129198Scognet 99244414Scognet#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) 100135641Scognet#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 101237168Salc#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 102244414Scognetvoid pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 103195649Salc 104129198Scognet/* 105137362Scognet * Pmap stuff 106129198Scognet */ 107129198Scognet 108129198Scognet/* 109129198Scognet * This structure is used to hold a virtual<->physical address 110129198Scognet * association and is used mostly by bootstrap code 111129198Scognet */ 112129198Scognetstruct pv_addr { 113129198Scognet SLIST_ENTRY(pv_addr) pv_list; 114129198Scognet vm_offset_t pv_va; 115129198Scognet vm_paddr_t pv_pa; 116129198Scognet}; 117129198Scognet 118129198Scognetstruct pv_entry; 119250634Sgberstruct pv_chunk; 120129198Scognet 121129198Scognetstruct md_page { 122129198Scognet int pvh_attrs; 123244414Scognet vm_memattr_t pv_memattr; 124194459Sthompsa vm_offset_t pv_kva; /* first kernel VA mapping */ 125129198Scognet TAILQ_HEAD(,pv_entry) pv_list; 126129198Scognet}; 127129198Scognet 128129198Scognetstruct l1_ttable; 129129198Scognetstruct l2_dtable; 130129198Scognet 131129198Scognet 132129198Scognet/* 133129198Scognet * The number of L2 descriptor tables which can be tracked by an l2_dtable. 134129198Scognet * A bucket size of 16 provides for 16MB of contiguous virtual address 135129198Scognet * space per l2_dtable. Most processes will, therefore, require only two or 136129198Scognet * three of these to map their whole working set. 137129198Scognet */ 138129198Scognet#define L2_BUCKET_LOG2 4 139129198Scognet#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 140129198Scognet/* 141129198Scognet * Given the above "L2-descriptors-per-l2_dtable" constant, the number 142129198Scognet * of l2_dtable structures required to track all possible page descriptors 143129198Scognet * mappable by an L1 translation table is given by the following constants: 144129198Scognet */ 145129198Scognet#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 146129198Scognet#define L2_SIZE (1 << L2_LOG2) 147129198Scognet 148129198Scognetstruct pmap { 149159325Salc struct mtx pm_mtx; 150129198Scognet u_int8_t pm_domain; 151129198Scognet struct l1_ttable *pm_l1; 152129198Scognet struct l2_dtable *pm_l2[L2_SIZE]; 153222813Sattilio cpuset_t pm_active; /* active on cpus */ 154129198Scognet struct pmap_statistics pm_stats; /* pmap statictics */ 155250634Sgber#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 156250634Sgber TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 157250634Sgber#else 158144760Scognet TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 159250634Sgber#endif 160129198Scognet}; 161129198Scognet 162129198Scognettypedef struct pmap *pmap_t; 163129198Scognet 164129198Scognet#ifdef _KERNEL 165191873Salcextern struct pmap kernel_pmap_store; 166191873Salc#define kernel_pmap (&kernel_pmap_store) 167129198Scognet#define pmap_kernel() kernel_pmap 168137362Scognet 169159325Salc#define PMAP_ASSERT_LOCKED(pmap) \ 170159325Salc mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 171159325Salc#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 172159325Salc#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 173159325Salc#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 174159325Salc NULL, MTX_DEF | MTX_DUPOK) 175159325Salc#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 176159325Salc#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 177159325Salc#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 178159325Salc#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 179129198Scognet#endif 180129198Scognet 181135641Scognet 182129198Scognet/* 183129198Scognet * For each vm_page_t, there is a list of all currently valid virtual 184164250Sru * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 185129198Scognet */ 186129198Scognettypedef struct pv_entry { 187138413Scognet vm_offset_t pv_va; /* virtual address for mapping */ 188138413Scognet TAILQ_ENTRY(pv_entry) pv_list; 189250634Sgber int pv_flags; /* flags (wired, etc...) */ 190250634Sgber#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 191250634Sgber pmap_t pv_pmap; /* pmap where mapping lies */ 192144760Scognet TAILQ_ENTRY(pv_entry) pv_plist; 193250634Sgber#endif 194129198Scognet} *pv_entry_t; 195129198Scognet 196250634Sgber/* 197250634Sgber * pv_entries are allocated in chunks per-process. This avoids the 198250634Sgber * need to track per-pmap assignments. 199250634Sgber */ 200250634Sgber#define _NPCM 8 201250634Sgber#define _NPCPV 252 202250634Sgber 203250634Sgberstruct pv_chunk { 204250634Sgber pmap_t pc_pmap; 205250634Sgber TAILQ_ENTRY(pv_chunk) pc_list; 206250634Sgber uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 207250634Sgber uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ 208250634Sgber TAILQ_ENTRY(pv_chunk) pc_lru; 209250634Sgber struct pv_entry pc_pventry[_NPCPV]; 210250634Sgber}; 211250634Sgber 212129198Scognet#ifdef _KERNEL 213129198Scognet 214129198Scognetboolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 215129198Scognet 216129198Scognet/* 217129198Scognet * virtual address to page table entry and 218129198Scognet * to physical address. Likewise for alternate address space. 219129198Scognet * Note: these work recursively, thus vtopte of a pte will give 220129198Scognet * the corresponding pde that in turn maps it. 221129198Scognet */ 222129198Scognet 223135641Scognet/* 224135641Scognet * The current top of kernel VM. 225135641Scognet */ 226135641Scognetextern vm_offset_t pmap_curmaxkvaddr; 227135641Scognet 228132056Scognetstruct pcb; 229132056Scognet 230129198Scognetvoid pmap_set_pcb_pagedir(pmap_t, struct pcb *); 231129198Scognet/* Virtual address to page table entry */ 232129198Scognetstatic __inline pt_entry_t * 233129198Scognetvtopte(vm_offset_t va) 234129198Scognet{ 235129198Scognet pd_entry_t *pdep; 236129198Scognet pt_entry_t *ptep; 237129198Scognet 238129198Scognet if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 239129198Scognet return (NULL); 240129198Scognet return (ptep); 241129198Scognet} 242129198Scognet 243218311Simpextern vm_paddr_t phys_avail[]; 244129198Scognetextern vm_offset_t virtual_avail; 245129198Scognetextern vm_offset_t virtual_end; 246129198Scognet 247247046Salcvoid pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); 248239268Sgonzoint pmap_change_attr(vm_offset_t, vm_size_t, int); 249129198Scognetvoid pmap_kenter(vm_offset_t va, vm_paddr_t pa); 250156191Scognetvoid pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 251184728Srajvoid *pmap_kenter_temp(vm_paddr_t pa, int i); 252142570Scognetvoid pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 253240983Salcvm_paddr_t pmap_kextract(vm_offset_t va); 254129198Scognetvoid pmap_kremove(vm_offset_t); 255129198Scognetvoid *pmap_mapdev(vm_offset_t, vm_size_t); 256129198Scognetvoid pmap_unmapdev(vm_offset_t, vm_size_t); 257129198Scognetvm_page_t pmap_use_pt(pmap_t, vm_offset_t); 258129198Scognetvoid pmap_debug(int); 259129198Scognetvoid pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 260129198Scognetvoid pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 261129198Scognetvm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 262129198Scognetvoid 263129198Scognetpmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 264129198Scognet int cache); 265129198Scognetint pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 266239268Sgonzoint pmap_dmap_iscurrent(pmap_t pmap); 267129198Scognet 268129198Scognet/* 269129198Scognet * Definitions for MMU domains 270129198Scognet */ 271169756Scognet#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 272169756Scognet#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 273129198Scognet 274129198Scognet/* 275129198Scognet * The new pmap ensures that page-tables are always mapping Write-Thru. 276129198Scognet * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 277129198Scognet * on every change. 278129198Scognet * 279129198Scognet * Unfortunately, not all CPUs have a write-through cache mode. So we 280129198Scognet * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 281129198Scognet * and if there is the chance for PTE syncs to be needed, we define 282129198Scognet * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 283129198Scognet * the code. 284129198Scognet */ 285129198Scognetextern int pmap_needs_pte_sync; 286129198Scognet 287129198Scognet/* 288129198Scognet * These macros define the various bit masks in the PTE. 289129198Scognet * 290129198Scognet * We use these macros since we use different bits on different processor 291129198Scognet * models. 292129198Scognet */ 293129198Scognet 294129198Scognet#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 295171620Scognet#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ 296171620Scognet L1_S_XSCALE_TEX(TEX_XSCALE_T)) 297129198Scognet 298129198Scognet#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 299171620Scognet#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ 300171620Scognet L2_XSCALE_L_TEX(TEX_XSCALE_T)) 301129198Scognet 302129198Scognet#define L2_S_PROT_U_generic (L2_AP(AP_U)) 303129198Scognet#define L2_S_PROT_W_generic (L2_AP(AP_W)) 304129198Scognet#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 305129198Scognet 306129198Scognet#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 307129198Scognet#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 308129198Scognet#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 309129198Scognet 310129198Scognet#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 311171620Scognet#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ 312171620Scognet L2_XSCALE_T_TEX(TEX_XSCALE_X)) 313129198Scognet 314129198Scognet#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 315129198Scognet#define L1_S_PROTO_xscale (L1_TYPE_S) 316129198Scognet 317129198Scognet#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 318129198Scognet#define L1_C_PROTO_xscale (L1_TYPE_C) 319129198Scognet 320129198Scognet#define L2_L_PROTO (L2_TYPE_L) 321129198Scognet 322129198Scognet#define L2_S_PROTO_generic (L2_TYPE_S) 323129198Scognet#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 324129198Scognet 325129198Scognet/* 326129198Scognet * User-visible names for the ones that vary with MMU class. 327129198Scognet */ 328239268Sgonzo#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 329239268Sgonzo#define L2_AP(x) (L2_AP0(x)) 330239268Sgonzo#else 331239268Sgonzo#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) 332239268Sgonzo#endif 333129198Scognet 334129198Scognet#if ARM_NMMUS > 1 335129198Scognet/* More than one MMU class configured; use variables. */ 336129198Scognet#define L2_S_PROT_U pte_l2_s_prot_u 337129198Scognet#define L2_S_PROT_W pte_l2_s_prot_w 338129198Scognet#define L2_S_PROT_MASK pte_l2_s_prot_mask 339129198Scognet 340129198Scognet#define L1_S_CACHE_MASK pte_l1_s_cache_mask 341129198Scognet#define L2_L_CACHE_MASK pte_l2_l_cache_mask 342129198Scognet#define L2_S_CACHE_MASK pte_l2_s_cache_mask 343129198Scognet 344129198Scognet#define L1_S_PROTO pte_l1_s_proto 345129198Scognet#define L1_C_PROTO pte_l1_c_proto 346129198Scognet#define L2_S_PROTO pte_l2_s_proto 347129198Scognet 348129198Scognet#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 349129198Scognet#define L2_S_PROT_U L2_S_PROT_U_generic 350129198Scognet#define L2_S_PROT_W L2_S_PROT_W_generic 351129198Scognet#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 352129198Scognet 353129198Scognet#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 354129198Scognet#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 355129198Scognet#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 356129198Scognet 357129198Scognet#define L1_S_PROTO L1_S_PROTO_generic 358129198Scognet#define L1_C_PROTO L1_C_PROTO_generic 359129198Scognet#define L2_S_PROTO L2_S_PROTO_generic 360129198Scognet 361129198Scognet#elif ARM_MMU_XSCALE == 1 362129198Scognet#define L2_S_PROT_U L2_S_PROT_U_xscale 363129198Scognet#define L2_S_PROT_W L2_S_PROT_W_xscale 364129198Scognet#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 365129198Scognet 366129198Scognet#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 367129198Scognet#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 368129198Scognet#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 369129198Scognet 370129198Scognet#define L1_S_PROTO L1_S_PROTO_xscale 371129198Scognet#define L1_C_PROTO L1_C_PROTO_xscale 372129198Scognet#define L2_S_PROTO L2_S_PROTO_xscale 373129198Scognet 374239268Sgonzo#elif (ARM_MMU_V6 + ARM_MMU_V7) != 0 375250928Sgber/* 376250928Sgber * AP[2:1] access permissions model: 377250928Sgber * 378250928Sgber * AP[2](APX) - Write Disable 379250928Sgber * AP[1] - User Enable 380250928Sgber * AP[0] - Reference Flag 381250928Sgber * 382250928Sgber * AP[2] AP[1] Kernel User 383250928Sgber * 0 0 R/W N 384250928Sgber * 0 1 R/W R/W 385250928Sgber * 1 0 R N 386250928Sgber * 1 1 R R 387250928Sgber * 388250928Sgber */ 389250928Sgber#define L2_S_PROT_R (0) /* kernel read */ 390250928Sgber#define L2_S_PROT_U (L2_AP0(2)) /* user read */ 391250928Sgber#define L2_S_REF (L2_AP0(1)) /* reference flag */ 392239268Sgonzo 393239268Sgonzo#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R) 394250930Sgber#define L2_S_EXECUTABLE(pte) (!(pte & L2_XN)) 395239268Sgonzo#define L2_S_WRITABLE(pte) (!(pte & L2_APX)) 396250928Sgber#define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF)) 397239268Sgonzo 398239268Sgonzo#ifndef SMP 399239268Sgonzo#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C) 400239268Sgonzo#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C) 401239268Sgonzo#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C) 402239268Sgonzo#else 403239268Sgonzo#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED) 404239268Sgonzo#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED) 405239268Sgonzo#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED) 406239268Sgonzo#endif /* SMP */ 407239268Sgonzo 408239268Sgonzo#define L1_S_PROTO (L1_TYPE_S) 409239268Sgonzo#define L1_C_PROTO (L1_TYPE_C) 410239268Sgonzo#define L2_S_PROTO (L2_TYPE_S) 411239268Sgonzo 412239268Sgonzo#ifndef SMP 413239268Sgonzo#define ARM_L1S_STRONG_ORD (0) 414239268Sgonzo#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) 415239268Sgonzo#define ARM_L1S_DEVICE_SHARE (L1_S_B) 416239268Sgonzo#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)) 417239268Sgonzo#define ARM_L1S_NRML_IWT_OWT (L1_S_C) 418239268Sgonzo#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B) 419239268Sgonzo#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B) 420239268Sgonzo 421239268Sgonzo#define ARM_L2L_STRONG_ORD (0) 422239268Sgonzo#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) 423239268Sgonzo#define ARM_L2L_DEVICE_SHARE (L2_B) 424239268Sgonzo#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)) 425239268Sgonzo#define ARM_L2L_NRML_IWT_OWT (L2_C) 426239268Sgonzo#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B) 427239268Sgonzo#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B) 428239268Sgonzo 429239268Sgonzo#define ARM_L2S_STRONG_ORD (0) 430239268Sgonzo#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) 431239268Sgonzo#define ARM_L2S_DEVICE_SHARE (L2_B) 432239268Sgonzo#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)) 433239268Sgonzo#define ARM_L2S_NRML_IWT_OWT (L2_C) 434239268Sgonzo#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B) 435239268Sgonzo#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B) 436239268Sgonzo#else 437239268Sgonzo#define ARM_L1S_STRONG_ORD (0) 438239268Sgonzo#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) 439239268Sgonzo#define ARM_L1S_DEVICE_SHARE (L1_S_B) 440239268Sgonzo#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED) 441239268Sgonzo#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED) 442239268Sgonzo#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED) 443239268Sgonzo#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED) 444239268Sgonzo 445239268Sgonzo#define ARM_L2L_STRONG_ORD (0) 446239268Sgonzo#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) 447239268Sgonzo#define ARM_L2L_DEVICE_SHARE (L2_B) 448239268Sgonzo#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED) 449239268Sgonzo#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED) 450239268Sgonzo#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) 451239268Sgonzo#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED) 452239268Sgonzo 453239268Sgonzo#define ARM_L2S_STRONG_ORD (0) 454239268Sgonzo#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) 455239268Sgonzo#define ARM_L2S_DEVICE_SHARE (L2_B) 456239268Sgonzo#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED) 457239268Sgonzo#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED) 458239268Sgonzo#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) 459239268Sgonzo#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED) 460239268Sgonzo#endif /* SMP */ 461129198Scognet#endif /* ARM_NMMUS > 1 */ 462129198Scognet 463129198Scognet#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1) 464129198Scognet#define PMAP_NEEDS_PTE_SYNC 1 465129198Scognet#define PMAP_INCLUDE_PTE_SYNC 466171620Scognet#elif defined(CPU_XSCALE_81342) 467171620Scognet#define PMAP_NEEDS_PTE_SYNC 1 468171620Scognet#define PMAP_INCLUDE_PTE_SYNC 469129198Scognet#elif (ARM_MMU_SA1 == 0) 470129198Scognet#define PMAP_NEEDS_PTE_SYNC 0 471129198Scognet#endif 472129198Scognet 473129198Scognet/* 474129198Scognet * These macros return various bits based on kernel/user and protection. 475129198Scognet * Note that the compiler will usually fold these at compile time. 476129198Scognet */ 477239268Sgonzo#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 478239268Sgonzo 479239268Sgonzo#define L1_S_PROT_U (L1_S_AP(AP_U)) 480239268Sgonzo#define L1_S_PROT_W (L1_S_AP(AP_W)) 481239268Sgonzo#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 482239268Sgonzo#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) 483239268Sgonzo 484129198Scognet#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 485129198Scognet (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 486129198Scognet 487239268Sgonzo#define L2_L_PROT_U (L2_AP(AP_U)) 488239268Sgonzo#define L2_L_PROT_W (L2_AP(AP_W)) 489239268Sgonzo#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 490239268Sgonzo 491129198Scognet#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 492129198Scognet (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 493129198Scognet 494129198Scognet#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 495129198Scognet (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 496239268Sgonzo#else 497239268Sgonzo#define L1_S_PROT_U (L1_S_AP(AP_U)) 498239268Sgonzo#define L1_S_PROT_MASK (L1_S_APX|L1_S_AP(0x3)) 499239268Sgonzo#define L1_S_WRITABLE(pd) (!((pd) & L1_S_APX)) 500129198Scognet 501239268Sgonzo#define L1_S_PROT(ku, pr) (L1_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L1_S_PROT_U : 0) | \ 502239268Sgonzo (((pr) & VM_PROT_WRITE) ? L1_S_APX : 0))) 503239268Sgonzo 504239268Sgonzo#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3)) 505239268Sgonzo#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ 506239268Sgonzo (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) 507239268Sgonzo 508239268Sgonzo#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ 509239268Sgonzo (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) 510239268Sgonzo 511239268Sgonzo#endif 512239268Sgonzo 513129198Scognet/* 514129198Scognet * Macros to test if a mapping is mappable with an L1 Section mapping 515129198Scognet * or an L2 Large Page mapping. 516129198Scognet */ 517129198Scognet#define L1_S_MAPPABLE_P(va, pa, size) \ 518129198Scognet ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 519129198Scognet 520129198Scognet#define L2_L_MAPPABLE_P(va, pa, size) \ 521129198Scognet ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 522129198Scognet 523129198Scognet/* 524129198Scognet * Provide a fallback in case we were not able to determine it at 525129198Scognet * compile-time. 526129198Scognet */ 527129198Scognet#ifndef PMAP_NEEDS_PTE_SYNC 528129198Scognet#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 529129198Scognet#define PMAP_INCLUDE_PTE_SYNC 530129198Scognet#endif 531129198Scognet 532129198Scognet#define PTE_SYNC(pte) \ 533129198Scognetdo { \ 534171620Scognet if (PMAP_NEEDS_PTE_SYNC) { \ 535129198Scognet cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 536171620Scognet cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 537228530Sraj } else \ 538228530Sraj cpu_drain_writebuf(); \ 539129198Scognet} while (/*CONSTCOND*/0) 540129198Scognet 541129198Scognet#define PTE_SYNC_RANGE(pte, cnt) \ 542129198Scognetdo { \ 543129198Scognet if (PMAP_NEEDS_PTE_SYNC) { \ 544129198Scognet cpu_dcache_wb_range((vm_offset_t)(pte), \ 545129198Scognet (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 546171620Scognet cpu_l2cache_wb_range((vm_offset_t)(pte), \ 547171620Scognet (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 548228530Sraj } else \ 549228530Sraj cpu_drain_writebuf(); \ 550129198Scognet} while (/*CONSTCOND*/0) 551129198Scognet 552129198Scognetextern pt_entry_t pte_l1_s_cache_mode; 553129198Scognetextern pt_entry_t pte_l1_s_cache_mask; 554129198Scognet 555129198Scognetextern pt_entry_t pte_l2_l_cache_mode; 556129198Scognetextern pt_entry_t pte_l2_l_cache_mask; 557129198Scognet 558129198Scognetextern pt_entry_t pte_l2_s_cache_mode; 559129198Scognetextern pt_entry_t pte_l2_s_cache_mask; 560129198Scognet 561129198Scognetextern pt_entry_t pte_l1_s_cache_mode_pt; 562129198Scognetextern pt_entry_t pte_l2_l_cache_mode_pt; 563129198Scognetextern pt_entry_t pte_l2_s_cache_mode_pt; 564129198Scognet 565129198Scognetextern pt_entry_t pte_l2_s_prot_u; 566129198Scognetextern pt_entry_t pte_l2_s_prot_w; 567129198Scognetextern pt_entry_t pte_l2_s_prot_mask; 568236992Simp 569129198Scognetextern pt_entry_t pte_l1_s_proto; 570129198Scognetextern pt_entry_t pte_l1_c_proto; 571129198Scognetextern pt_entry_t pte_l2_s_proto; 572129198Scognet 573129198Scognetextern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 574248280Skibextern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 575248280Skib vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 576129198Scognetextern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 577129198Scognet 578239268Sgonzo#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342) 579129198Scognetvoid pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 580129198Scognetvoid pmap_zero_page_generic(vm_paddr_t, int, int); 581129198Scognet 582129198Scognetvoid pmap_pte_init_generic(void); 583129198Scognet#if defined(CPU_ARM8) 584129198Scognetvoid pmap_pte_init_arm8(void); 585129198Scognet#endif 586129198Scognet#if defined(CPU_ARM9) 587129198Scognetvoid pmap_pte_init_arm9(void); 588129198Scognet#endif /* CPU_ARM9 */ 589129198Scognet#if defined(CPU_ARM10) 590129198Scognetvoid pmap_pte_init_arm10(void); 591129198Scognet#endif /* CPU_ARM10 */ 592239268Sgonzo#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 593239268Sgonzovoid pmap_pte_init_mmu_v6(void); 594244476Sgonzo#endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */ 595129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 596129198Scognet 597129198Scognet#if /* ARM_MMU_SA1 == */1 598129198Scognetvoid pmap_pte_init_sa1(void); 599129198Scognet#endif /* ARM_MMU_SA1 == 1 */ 600129198Scognet 601129198Scognet#if ARM_MMU_XSCALE == 1 602129198Scognetvoid pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 603129198Scognetvoid pmap_zero_page_xscale(vm_paddr_t, int, int); 604129198Scognet 605129198Scognetvoid pmap_pte_init_xscale(void); 606129198Scognet 607129198Scognetvoid xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 608129198Scognet 609135641Scognetvoid pmap_use_minicache(vm_offset_t, vm_size_t); 610129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 611171620Scognet#if defined(CPU_XSCALE_81342) 612171620Scognet#define ARM_HAVE_SUPERSECTIONS 613171620Scognet#endif 614171620Scognet 615129198Scognet#define PTE_KERNEL 0 616129198Scognet#define PTE_USER 1 617129198Scognet#define l1pte_valid(pde) ((pde) != 0) 618129198Scognet#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 619129198Scognet#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 620129198Scognet#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 621129198Scognet 622129198Scognet#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 623129198Scognet#define l2pte_valid(pte) ((pte) != 0) 624129198Scognet#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 625129198Scognet#define l2pte_minidata(pte) (((pte) & \ 626129198Scognet (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 627129198Scognet == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 628129198Scognet 629129198Scognet/* L1 and L2 page table macros */ 630129198Scognet#define pmap_pde_v(pde) l1pte_valid(*(pde)) 631129198Scognet#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 632129198Scognet#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 633129198Scognet#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 634129198Scognet 635129198Scognet#define pmap_pte_v(pte) l2pte_valid(*(pte)) 636129198Scognet#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 637129198Scognet 638129198Scognet/* 639129198Scognet * Flags that indicate attributes of pages or mappings of pages. 640129198Scognet * 641129198Scognet * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 642129198Scognet * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 643129198Scognet * pv_entry's for each page. They live in the same "namespace" so 644129198Scognet * that we can clear multiple attributes at a time. 645129198Scognet * 646129198Scognet * Note the "non-cacheable" flag generally means the page has 647129198Scognet * multiple mappings in a given address space. 648129198Scognet */ 649129198Scognet#define PVF_MOD 0x01 /* page is modified */ 650129198Scognet#define PVF_REF 0x02 /* page is referenced */ 651129198Scognet#define PVF_WIRED 0x04 /* mapping is wired */ 652129198Scognet#define PVF_WRITE 0x08 /* mapping is writable */ 653129198Scognet#define PVF_EXEC 0x10 /* mapping is executable */ 654175840Scognet#define PVF_NC 0x20 /* mapping is non-cacheable */ 655175840Scognet#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 656194459Sthompsa#define PVF_UNMAN 0x80 /* mapping is unmanaged */ 657129198Scognet 658129198Scognetvoid vector_page_setprot(int); 659135641Scognet 660129198Scognet/* 661135641Scognet * This structure is used by machine-dependent code to describe 662135641Scognet * static mappings of devices, created at bootstrap time. 663129198Scognet */ 664135641Scognetstruct pmap_devmap { 665135641Scognet vm_offset_t pd_va; /* virtual address */ 666135641Scognet vm_paddr_t pd_pa; /* physical address */ 667135641Scognet vm_size_t pd_size; /* size of region */ 668135641Scognet vm_prot_t pd_prot; /* protection code */ 669135641Scognet int pd_cache; /* cache attributes */ 670135641Scognet}; 671129198Scognet 672135641Scognetconst struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t); 673135641Scognetconst struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t); 674129198Scognet 675135641Scognetvoid pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *); 676135641Scognetvoid pmap_devmap_register(const struct pmap_devmap *); 677137362Scognet 678147114Scognet#define SECTION_CACHE 0x1 679147114Scognet#define SECTION_PT 0x2 680147114Scognetvoid pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); 681171620Scognet#ifdef ARM_HAVE_SUPERSECTIONS 682170582Scognetvoid pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); 683171620Scognet#endif 684147114Scognet 685137362Scognetextern char *_tmppt; 686137362Scognet 687152128Scognetvoid pmap_postinit(void); 688152128Scognet 689147114Scognet#ifdef ARM_USE_SMALL_ALLOC 690147114Scognetvoid arm_add_smallalloc_pages(void *, void *, int, int); 691161105Scognetvm_offset_t arm_ptovirt(vm_paddr_t); 692161105Scognetvoid arm_init_smallalloc(void); 693147114Scognetstruct arm_small_page { 694147114Scognet void *addr; 695147114Scognet TAILQ_ENTRY(arm_small_page) pg_list; 696147114Scognet}; 697150867Scognet 698150936Scognet#endif 699156191Scognet 700166063Scognet#define ARM_NOCACHE_KVA_SIZE 0x1000000 701156191Scognetextern vm_offset_t arm_nocache_startaddr; 702156191Scognetvoid *arm_remap_nocache(void *, vm_size_t); 703156191Scognetvoid arm_unmap_nocache(void *, vm_size_t); 704156191Scognet 705150867Scognetextern vm_paddr_t dump_avail[]; 706129198Scognet#endif /* _KERNEL */ 707129198Scognet 708129198Scognet#endif /* !LOCORE */ 709129198Scognet 710129198Scognet#endif /* !_MACHINE_PMAP_H_ */ 711