pmap-v4.h revision 295036
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD: head/sys/arm/include/pmap.h 295036 2016-01-29 10:31:54Z mmel $ 48 */ 49 #include <machine/acle-compat.h> 50 51#if __ARM_ARCH >= 6 52#include <machine/pmap-v6.h> 53#else /* __ARM_ARCH >= 6 */ 54 55#ifndef _MACHINE_PMAP_H_ 56#define _MACHINE_PMAP_H_ 57 58#include <machine/pte.h> 59#include <machine/cpuconf.h> 60/* 61 * Pte related macros 62 */ 63#if ARM_ARCH_6 || ARM_ARCH_7A 64#ifdef SMP 65#define PTE_NOCACHE 2 66#else 67#define PTE_NOCACHE 1 68#endif 69#define PTE_CACHE 6 70#define PTE_DEVICE 2 71#define PTE_PAGETABLE 6 72#else 73#define PTE_NOCACHE 1 74#define PTE_CACHE 2 75#define PTE_DEVICE PTE_NOCACHE 76#define PTE_PAGETABLE 3 77#endif 78 79enum mem_type { 80 STRONG_ORD = 0, 81 DEVICE_NOSHARE, 82 DEVICE_SHARE, 83 NRML_NOCACHE, 84 NRML_IWT_OWT, 85 NRML_IWB_OWB, 86 NRML_IWBA_OWBA 87}; 88 89#ifndef LOCORE 90 91#include <sys/queue.h> 92#include <sys/_cpuset.h> 93#include <sys/_lock.h> 94#include <sys/_mutex.h> 95 96#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 97#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 98 99#ifdef _KERNEL 100 101#define vtophys(va) pmap_kextract((vm_offset_t)(va)) 102 103#endif 104 105#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) 106#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 107#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 108boolean_t pmap_page_is_mapped(vm_page_t); 109#else 110#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 111#endif 112void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 113 114/* 115 * Pmap stuff 116 */ 117 118/* 119 * This structure is used to hold a virtual<->physical address 120 * association and is used mostly by bootstrap code 121 */ 122struct pv_addr { 123 SLIST_ENTRY(pv_addr) pv_list; 124 vm_offset_t pv_va; 125 vm_paddr_t pv_pa; 126}; 127 128struct pv_entry; 129struct pv_chunk; 130 131struct md_page { 132 int pvh_attrs; 133 vm_memattr_t pv_memattr; 134#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 135 vm_offset_t pv_kva; /* first kernel VA mapping */ 136#endif 137 TAILQ_HEAD(,pv_entry) pv_list; 138}; 139 140struct l1_ttable; 141struct l2_dtable; 142 143 144/* 145 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 146 * A bucket size of 16 provides for 16MB of contiguous virtual address 147 * space per l2_dtable. Most processes will, therefore, require only two or 148 * three of these to map their whole working set. 149 */ 150#define L2_BUCKET_LOG2 4 151#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 152/* 153 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 154 * of l2_dtable structures required to track all possible page descriptors 155 * mappable by an L1 translation table is given by the following constants: 156 */ 157#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 158#define L2_SIZE (1 << L2_LOG2) 159 160struct pmap { 161 struct mtx pm_mtx; 162 u_int8_t pm_domain; 163 struct l1_ttable *pm_l1; 164 struct l2_dtable *pm_l2[L2_SIZE]; 165 cpuset_t pm_active; /* active on cpus */ 166 struct pmap_statistics pm_stats; /* pmap statictics */ 167#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 168 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 169#else 170 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 171#endif 172}; 173 174typedef struct pmap *pmap_t; 175 176#ifdef _KERNEL 177extern struct pmap kernel_pmap_store; 178#define kernel_pmap (&kernel_pmap_store) 179#define pmap_kernel() kernel_pmap 180 181#define PMAP_ASSERT_LOCKED(pmap) \ 182 mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 183#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 184#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 185#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 186 NULL, MTX_DEF | MTX_DUPOK) 187#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 188#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 189#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 190#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 191#endif 192 193 194/* 195 * For each vm_page_t, there is a list of all currently valid virtual 196 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 197 */ 198typedef struct pv_entry { 199 vm_offset_t pv_va; /* virtual address for mapping */ 200 TAILQ_ENTRY(pv_entry) pv_list; 201 int pv_flags; /* flags (wired, etc...) */ 202#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 203 pmap_t pv_pmap; /* pmap where mapping lies */ 204 TAILQ_ENTRY(pv_entry) pv_plist; 205#endif 206} *pv_entry_t; 207 208/* 209 * pv_entries are allocated in chunks per-process. This avoids the 210 * need to track per-pmap assignments. 211 */ 212#define _NPCM 8 213#define _NPCPV 252 214 215struct pv_chunk { 216 pmap_t pc_pmap; 217 TAILQ_ENTRY(pv_chunk) pc_list; 218 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 219 uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ 220 TAILQ_ENTRY(pv_chunk) pc_lru; 221 struct pv_entry pc_pventry[_NPCPV]; 222}; 223 224#ifdef _KERNEL 225 226boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 227 228/* 229 * virtual address to page table entry and 230 * to physical address. Likewise for alternate address space. 231 * Note: these work recursively, thus vtopte of a pte will give 232 * the corresponding pde that in turn maps it. 233 */ 234 235/* 236 * The current top of kernel VM. 237 */ 238extern vm_offset_t pmap_curmaxkvaddr; 239 240struct pcb; 241 242void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 243/* Virtual address to page table entry */ 244static __inline pt_entry_t * 245vtopte(vm_offset_t va) 246{ 247 pd_entry_t *pdep; 248 pt_entry_t *ptep; 249 250 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 251 return (NULL); 252 return (ptep); 253} 254 255extern vm_paddr_t phys_avail[]; 256extern vm_offset_t virtual_avail; 257extern vm_offset_t virtual_end; 258 259void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); 260int pmap_change_attr(vm_offset_t, vm_size_t, int); 261void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 262void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 263void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); 264void pmap_kremove_device(vm_offset_t, vm_size_t); 265void *pmap_kenter_temporary(vm_paddr_t pa, int i); 266void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 267vm_paddr_t pmap_kextract(vm_offset_t va); 268vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); 269void pmap_kremove(vm_offset_t); 270void *pmap_mapdev(vm_offset_t, vm_size_t); 271void pmap_unmapdev(vm_offset_t, vm_size_t); 272vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 273void pmap_debug(int); 274#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 275void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 276#endif 277void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 278vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 279void 280pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 281 int cache); 282int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 283 284/* 285 * Definitions for MMU domains 286 */ 287#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 288#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 289 290/* 291 * The new pmap ensures that page-tables are always mapping Write-Thru. 292 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 293 * on every change. 294 * 295 * Unfortunately, not all CPUs have a write-through cache mode. So we 296 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 297 * and if there is the chance for PTE syncs to be needed, we define 298 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 299 * the code. 300 */ 301extern int pmap_needs_pte_sync; 302 303/* 304 * These macros define the various bit masks in the PTE. 305 * 306 * We use these macros since we use different bits on different processor 307 * models. 308 */ 309 310#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 311#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ 312 L1_S_XSCALE_TEX(TEX_XSCALE_T)) 313 314#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 315#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ 316 L2_XSCALE_L_TEX(TEX_XSCALE_T)) 317 318#define L2_S_PROT_U_generic (L2_AP(AP_U)) 319#define L2_S_PROT_W_generic (L2_AP(AP_W)) 320#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 321 322#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 323#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 324#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 325 326#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 327#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ 328 L2_XSCALE_T_TEX(TEX_XSCALE_X)) 329 330#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 331#define L1_S_PROTO_xscale (L1_TYPE_S) 332 333#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 334#define L1_C_PROTO_xscale (L1_TYPE_C) 335 336#define L2_L_PROTO (L2_TYPE_L) 337 338#define L2_S_PROTO_generic (L2_TYPE_S) 339#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 340 341/* 342 * User-visible names for the ones that vary with MMU class. 343 */ 344#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 345#define L2_AP(x) (L2_AP0(x)) 346#else 347#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) 348#endif 349 350#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 351/* 352 * AP[2:1] access permissions model: 353 * 354 * AP[2](APX) - Write Disable 355 * AP[1] - User Enable 356 * AP[0] - Reference Flag 357 * 358 * AP[2] AP[1] Kernel User 359 * 0 0 R/W N 360 * 0 1 R/W R/W 361 * 1 0 R N 362 * 1 1 R R 363 * 364 */ 365#define L2_S_PROT_R (0) /* kernel read */ 366#define L2_S_PROT_U (L2_AP0(2)) /* user read */ 367#define L2_S_REF (L2_AP0(1)) /* reference flag */ 368 369#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R|L2_APX) 370#define L2_S_EXECUTABLE(pte) (!(pte & L2_XN)) 371#define L2_S_WRITABLE(pte) (!(pte & L2_APX)) 372#define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF)) 373 374#ifndef SMP 375#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C) 376#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C) 377#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C) 378#else 379#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED) 380#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED) 381#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED) 382#endif /* SMP */ 383 384#define L1_S_PROTO (L1_TYPE_S) 385#define L1_C_PROTO (L1_TYPE_C) 386#define L2_S_PROTO (L2_TYPE_S) 387 388/* 389 * Promotion to a 1MB (SECTION) mapping requires that the corresponding 390 * 4KB (SMALL) page mappings have identical settings for the following fields: 391 */ 392#define L2_S_PROMOTE (L2_S_REF | L2_SHARED | L2_S_PROT_MASK | \ 393 L2_XN | L2_S_PROTO) 394 395/* 396 * In order to compare 1MB (SECTION) entry settings with the 4KB (SMALL) 397 * page mapping it is necessary to read and shift appropriate bits from 398 * L1 entry to positions of the corresponding bits in the L2 entry. 399 */ 400#define L1_S_DEMOTE(l1pd) ((((l1pd) & L1_S_PROTO) >> 0) | \ 401 (((l1pd) & L1_SHARED) >> 6) | \ 402 (((l1pd) & L1_S_REF) >> 6) | \ 403 (((l1pd) & L1_S_PROT_MASK) >> 6) | \ 404 (((l1pd) & L1_S_XN) >> 4)) 405 406#ifndef SMP 407#define ARM_L1S_STRONG_ORD (0) 408#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) 409#define ARM_L1S_DEVICE_SHARE (L1_S_B) 410#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)) 411#define ARM_L1S_NRML_IWT_OWT (L1_S_C) 412#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B) 413#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B) 414 415#define ARM_L2L_STRONG_ORD (0) 416#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) 417#define ARM_L2L_DEVICE_SHARE (L2_B) 418#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)) 419#define ARM_L2L_NRML_IWT_OWT (L2_C) 420#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B) 421#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B) 422 423#define ARM_L2S_STRONG_ORD (0) 424#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) 425#define ARM_L2S_DEVICE_SHARE (L2_B) 426#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)) 427#define ARM_L2S_NRML_IWT_OWT (L2_C) 428#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B) 429#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B) 430#else 431#define ARM_L1S_STRONG_ORD (0) 432#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) 433#define ARM_L1S_DEVICE_SHARE (L1_S_B) 434#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED) 435#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED) 436#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED) 437#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED) 438 439#define ARM_L2L_STRONG_ORD (0) 440#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) 441#define ARM_L2L_DEVICE_SHARE (L2_B) 442#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED) 443#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED) 444#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) 445#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED) 446 447#define ARM_L2S_STRONG_ORD (0) 448#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) 449#define ARM_L2S_DEVICE_SHARE (L2_B) 450#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED) 451#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED) 452#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) 453#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED) 454#endif /* SMP */ 455 456#elif ARM_NMMUS > 1 457/* More than one MMU class configured; use variables. */ 458#define L2_S_PROT_U pte_l2_s_prot_u 459#define L2_S_PROT_W pte_l2_s_prot_w 460#define L2_S_PROT_MASK pte_l2_s_prot_mask 461 462#define L1_S_CACHE_MASK pte_l1_s_cache_mask 463#define L2_L_CACHE_MASK pte_l2_l_cache_mask 464#define L2_S_CACHE_MASK pte_l2_s_cache_mask 465 466#define L1_S_PROTO pte_l1_s_proto 467#define L1_C_PROTO pte_l1_c_proto 468#define L2_S_PROTO pte_l2_s_proto 469 470#elif ARM_MMU_GENERIC != 0 471#define L2_S_PROT_U L2_S_PROT_U_generic 472#define L2_S_PROT_W L2_S_PROT_W_generic 473#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 474 475#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 476#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 477#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 478 479#define L1_S_PROTO L1_S_PROTO_generic 480#define L1_C_PROTO L1_C_PROTO_generic 481#define L2_S_PROTO L2_S_PROTO_generic 482 483#elif ARM_MMU_XSCALE == 1 484#define L2_S_PROT_U L2_S_PROT_U_xscale 485#define L2_S_PROT_W L2_S_PROT_W_xscale 486#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 487 488#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 489#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 490#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 491 492#define L1_S_PROTO L1_S_PROTO_xscale 493#define L1_C_PROTO L1_C_PROTO_xscale 494#define L2_S_PROTO L2_S_PROTO_xscale 495 496#endif /* ARM_NMMUS > 1 */ 497 498#if defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A 499#define PMAP_NEEDS_PTE_SYNC 1 500#define PMAP_INCLUDE_PTE_SYNC 501#else 502#define PMAP_NEEDS_PTE_SYNC 0 503#endif 504 505/* 506 * These macros return various bits based on kernel/user and protection. 507 * Note that the compiler will usually fold these at compile time. 508 */ 509#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 510 511#define L1_S_PROT_U (L1_S_AP(AP_U)) 512#define L1_S_PROT_W (L1_S_AP(AP_W)) 513#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 514#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) 515 516#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 517 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 518 519#define L2_L_PROT_U (L2_AP(AP_U)) 520#define L2_L_PROT_W (L2_AP(AP_W)) 521#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 522 523#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 524 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 525 526#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 527 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 528#else 529#define L1_S_PROT_U (L1_S_AP(AP_U)) 530#define L1_S_PROT_W (L1_S_APX) /* Write disable */ 531#define L1_S_PROT_MASK (L1_S_PROT_W|L1_S_PROT_U) 532#define L1_S_REF (L1_S_AP(AP_REF)) /* Reference flag */ 533#define L1_S_WRITABLE(pd) (!((pd) & L1_S_PROT_W)) 534#define L1_S_EXECUTABLE(pd) (!((pd) & L1_S_XN)) 535#define L1_S_REFERENCED(pd) ((pd) & L1_S_REF) 536 537#define L1_S_PROT(ku, pr) (((((ku) == PTE_KERNEL) ? 0 : L1_S_PROT_U) | \ 538 (((pr) & VM_PROT_WRITE) ? 0 : L1_S_PROT_W) | \ 539 (((pr) & VM_PROT_EXECUTE) ? 0 : L1_S_XN))) 540 541#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3)) 542#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ 543 (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) 544 545#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ 546 (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) 547 548#endif 549 550/* 551 * Macros to test if a mapping is mappable with an L1 Section mapping 552 * or an L2 Large Page mapping. 553 */ 554#define L1_S_MAPPABLE_P(va, pa, size) \ 555 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 556 557#define L2_L_MAPPABLE_P(va, pa, size) \ 558 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 559 560/* 561 * Provide a fallback in case we were not able to determine it at 562 * compile-time. 563 */ 564#ifndef PMAP_NEEDS_PTE_SYNC 565#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 566#define PMAP_INCLUDE_PTE_SYNC 567#endif 568 569#ifdef ARM_L2_PIPT 570#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) 571#else 572#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) 573#endif 574 575#define PTE_SYNC(pte) \ 576do { \ 577 if (PMAP_NEEDS_PTE_SYNC) { \ 578 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 579 cpu_drain_writebuf(); \ 580 _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ 581 } else \ 582 cpu_drain_writebuf(); \ 583} while (/*CONSTCOND*/0) 584 585#define PTE_SYNC_RANGE(pte, cnt) \ 586do { \ 587 if (PMAP_NEEDS_PTE_SYNC) { \ 588 cpu_dcache_wb_range((vm_offset_t)(pte), \ 589 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 590 cpu_drain_writebuf(); \ 591 _sync_l2((vm_offset_t)(pte), \ 592 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 593 } else \ 594 cpu_drain_writebuf(); \ 595} while (/*CONSTCOND*/0) 596 597extern pt_entry_t pte_l1_s_cache_mode; 598extern pt_entry_t pte_l1_s_cache_mask; 599 600extern pt_entry_t pte_l2_l_cache_mode; 601extern pt_entry_t pte_l2_l_cache_mask; 602 603extern pt_entry_t pte_l2_s_cache_mode; 604extern pt_entry_t pte_l2_s_cache_mask; 605 606extern pt_entry_t pte_l1_s_cache_mode_pt; 607extern pt_entry_t pte_l2_l_cache_mode_pt; 608extern pt_entry_t pte_l2_s_cache_mode_pt; 609 610extern pt_entry_t pte_l2_s_prot_u; 611extern pt_entry_t pte_l2_s_prot_w; 612extern pt_entry_t pte_l2_s_prot_mask; 613 614extern pt_entry_t pte_l1_s_proto; 615extern pt_entry_t pte_l1_c_proto; 616extern pt_entry_t pte_l2_s_proto; 617 618extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 619extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 620 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 621extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 622 623#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 || defined(CPU_XSCALE_81342) 624void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 625void pmap_zero_page_generic(vm_paddr_t, int, int); 626 627void pmap_pte_init_generic(void); 628#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 629void pmap_pte_init_mmu_v6(void); 630#endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */ 631#endif /* (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 */ 632 633#if ARM_MMU_XSCALE == 1 634void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 635void pmap_zero_page_xscale(vm_paddr_t, int, int); 636 637void pmap_pte_init_xscale(void); 638 639void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 640 641void pmap_use_minicache(vm_offset_t, vm_size_t); 642#endif /* ARM_MMU_XSCALE == 1 */ 643#if defined(CPU_XSCALE_81342) 644#define ARM_HAVE_SUPERSECTIONS 645#endif 646 647#define PTE_KERNEL 0 648#define PTE_USER 1 649#define l1pte_valid(pde) ((pde) != 0) 650#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 651#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 652#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 653 654#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 655#define l2pte_valid(pte) ((pte) != 0) 656#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 657#define l2pte_minidata(pte) (((pte) & \ 658 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 659 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 660 661/* L1 and L2 page table macros */ 662#define pmap_pde_v(pde) l1pte_valid(*(pde)) 663#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 664#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 665#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 666 667#define pmap_pte_v(pte) l2pte_valid(*(pte)) 668#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 669 670/* 671 * Flags that indicate attributes of pages or mappings of pages. 672 * 673 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 674 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 675 * pv_entry's for each page. They live in the same "namespace" so 676 * that we can clear multiple attributes at a time. 677 * 678 * Note the "non-cacheable" flag generally means the page has 679 * multiple mappings in a given address space. 680 */ 681#define PVF_MOD 0x01 /* page is modified */ 682#define PVF_REF 0x02 /* page is referenced */ 683#define PVF_WIRED 0x04 /* mapping is wired */ 684#define PVF_WRITE 0x08 /* mapping is writable */ 685#define PVF_EXEC 0x10 /* mapping is executable */ 686#define PVF_NC 0x20 /* mapping is non-cacheable */ 687#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 688#define PVF_UNMAN 0x80 /* mapping is unmanaged */ 689 690void vector_page_setprot(int); 691 692#define SECTION_CACHE 0x1 693#define SECTION_PT 0x2 694void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); 695#ifdef ARM_HAVE_SUPERSECTIONS 696void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); 697#endif 698 699extern char *_tmppt; 700 701void pmap_postinit(void); 702 703extern vm_paddr_t dump_avail[]; 704#endif /* _KERNEL */ 705 706#endif /* !LOCORE */ 707 708#endif /* !_MACHINE_PMAP_H_ */ 709#endif /* __ARM_ARCH >= 6 */ 710