1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD$ 48 */ 49 50#ifndef _MACHINE_PMAP_V4_H_ 51#define _MACHINE_PMAP_V4_H_ 52 53#include <machine/pte-v4.h> 54 55/* 56 * Pte related macros 57 */ 58#define PTE_NOCACHE 1 59#define PTE_CACHE 2 60#define PTE_DEVICE PTE_NOCACHE 61#define PTE_PAGETABLE 3 62 63enum mem_type { 64 STRONG_ORD = 0, 65 DEVICE_NOSHARE, 66 DEVICE_SHARE, 67 NRML_NOCACHE, 68 NRML_IWT_OWT, 69 NRML_IWB_OWB, 70 NRML_IWBA_OWBA 71}; 72 73#ifndef LOCORE 74 75#include <sys/queue.h> 76#include <sys/_cpuset.h> 77#include <sys/_lock.h> 78#include <sys/_mutex.h> 79 80#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 81#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 82 83#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) 84#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 85 86/* 87 * Pmap stuff 88 */ 89 90/* 91 * This structure is used to hold a virtual<->physical address 92 * association and is used mostly by bootstrap code 93 */ 94struct pv_addr { 95 SLIST_ENTRY(pv_addr) pv_list; 96 vm_offset_t pv_va; 97 vm_paddr_t pv_pa; 98}; 99 100struct pv_entry; 101struct pv_chunk; 102 103struct md_page { 104 int pvh_attrs; 105 vm_memattr_t pv_memattr; 106 vm_offset_t pv_kva; /* first kernel VA mapping */ 107 TAILQ_HEAD(,pv_entry) pv_list; 108}; 109 110struct l1_ttable; 111struct l2_dtable; 112 113 114/* 115 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 116 * A bucket size of 16 provides for 16MB of contiguous virtual address 117 * space per l2_dtable. Most processes will, therefore, require only two or 118 * three of these to map their whole working set. 119 */ 120#define L2_BUCKET_LOG2 4 121#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 122/* 123 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 124 * of l2_dtable structures required to track all possible page descriptors 125 * mappable by an L1 translation table is given by the following constants: 126 */ 127#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 128#define L2_SIZE (1 << L2_LOG2) 129 130struct pmap { 131 struct mtx pm_mtx; 132 u_int8_t pm_domain; 133 struct l1_ttable *pm_l1; 134 struct l2_dtable *pm_l2[L2_SIZE]; 135 cpuset_t pm_active; /* active on cpus */ 136 struct pmap_statistics pm_stats; /* pmap statictics */ 137 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 138}; 139 140typedef struct pmap *pmap_t; 141 142#ifdef _KERNEL 143extern struct pmap kernel_pmap_store; 144#define kernel_pmap (&kernel_pmap_store) 145 146#define PMAP_ASSERT_LOCKED(pmap) \ 147 mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 148#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 149#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 150#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 151 NULL, MTX_DEF | MTX_DUPOK) 152#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 153#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 154#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 155#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 156#endif 157 158/* 159 * For each vm_page_t, there is a list of all currently valid virtual 160 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 161 */ 162typedef struct pv_entry { 163 vm_offset_t pv_va; /* virtual address for mapping */ 164 TAILQ_ENTRY(pv_entry) pv_list; 165 int pv_flags; /* flags (wired, etc...) */ 166 pmap_t pv_pmap; /* pmap where mapping lies */ 167 TAILQ_ENTRY(pv_entry) pv_plist; 168} *pv_entry_t; 169 170/* 171 * pv_entries are allocated in chunks per-process. This avoids the 172 * need to track per-pmap assignments. 173 */ 174#define _NPCM 8 175#define _NPCPV 252 176 177struct pv_chunk { 178 pmap_t pc_pmap; 179 TAILQ_ENTRY(pv_chunk) pc_list; 180 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 181 uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ 182 TAILQ_ENTRY(pv_chunk) pc_lru; 183 struct pv_entry pc_pventry[_NPCPV]; 184}; 185 186#ifdef _KERNEL 187 188boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 189 190/* 191 * virtual address to page table entry and 192 * to physical address. Likewise for alternate address space. 193 * Note: these work recursively, thus vtopte of a pte will give 194 * the corresponding pde that in turn maps it. 195 */ 196 197/* 198 * The current top of kernel VM. 199 */ 200extern vm_offset_t pmap_curmaxkvaddr; 201 202/* Virtual address to page table entry */ 203static __inline pt_entry_t * 204vtopte(vm_offset_t va) 205{ 206 pd_entry_t *pdep; 207 pt_entry_t *ptep; 208 209 if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) 210 return (NULL); 211 return (ptep); 212} 213 214void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); 215int pmap_change_attr(vm_offset_t, vm_size_t, int); 216void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 217void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 218void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 219vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); 220void pmap_kremove(vm_offset_t); 221vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 222void pmap_debug(int); 223void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 224void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 225vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 226void 227pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 228 int cache); 229int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 230 231/* 232 * Definitions for MMU domains 233 */ 234#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 235#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 236 237/* 238 * The new pmap ensures that page-tables are always mapping Write-Thru. 239 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 240 * on every change. 241 * 242 * Unfortunately, not all CPUs have a write-through cache mode. So we 243 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 244 * and if there is the chance for PTE syncs to be needed, we define 245 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 246 * the code. 247 */ 248extern int pmap_needs_pte_sync; 249 250/* 251 * These macros define the various bit masks in the PTE. 252 */ 253 254#define L1_S_CACHE_MASK (L1_S_B|L1_S_C) 255#define L2_L_CACHE_MASK (L2_B|L2_C) 256#define L2_S_PROT_U (L2_AP(AP_U)) 257#define L2_S_PROT_W (L2_AP(AP_W)) 258#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_W) 259#define L2_S_CACHE_MASK (L2_B|L2_C) 260#define L1_S_PROTO (L1_TYPE_S | L1_S_IMP) 261#define L1_C_PROTO (L1_TYPE_C | L1_C_IMP2) 262#define L2_L_PROTO (L2_TYPE_L) 263#define L2_S_PROTO (L2_TYPE_S) 264 265/* 266 * User-visible names for the ones that vary with MMU class. 267 */ 268#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) 269 270#if defined(CPU_XSCALE_81342) 271#define CPU_XSCALE_CORE3 272#define PMAP_NEEDS_PTE_SYNC 1 273#define PMAP_INCLUDE_PTE_SYNC 274#else 275#define PMAP_NEEDS_PTE_SYNC 0 276#endif 277 278/* 279 * These macros return various bits based on kernel/user and protection. 280 * Note that the compiler will usually fold these at compile time. 281 */ 282#define L1_S_PROT_U (L1_S_AP(AP_U)) 283#define L1_S_PROT_W (L1_S_AP(AP_W)) 284#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 285#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) 286 287#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 288 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 289 290#define L2_L_PROT_U (L2_AP(AP_U)) 291#define L2_L_PROT_W (L2_AP(AP_W)) 292#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 293 294#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 295 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 296 297#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 298 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 299 300/* 301 * Macros to test if a mapping is mappable with an L1 Section mapping 302 * or an L2 Large Page mapping. 303 */ 304#define L1_S_MAPPABLE_P(va, pa, size) \ 305 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 306 307#define L2_L_MAPPABLE_P(va, pa, size) \ 308 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 309 310/* 311 * Provide a fallback in case we were not able to determine it at 312 * compile-time. 313 */ 314#ifndef PMAP_NEEDS_PTE_SYNC 315#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 316#define PMAP_INCLUDE_PTE_SYNC 317#endif 318 319#ifdef ARM_L2_PIPT 320#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) 321#else 322#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) 323#endif 324 325#define PTE_SYNC(pte) \ 326do { \ 327 if (PMAP_NEEDS_PTE_SYNC) { \ 328 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 329 cpu_drain_writebuf(); \ 330 _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ 331 } else \ 332 cpu_drain_writebuf(); \ 333} while (/*CONSTCOND*/0) 334 335#define PTE_SYNC_RANGE(pte, cnt) \ 336do { \ 337 if (PMAP_NEEDS_PTE_SYNC) { \ 338 cpu_dcache_wb_range((vm_offset_t)(pte), \ 339 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 340 cpu_drain_writebuf(); \ 341 _sync_l2((vm_offset_t)(pte), \ 342 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 343 } else \ 344 cpu_drain_writebuf(); \ 345} while (/*CONSTCOND*/0) 346 347void pmap_pte_init_generic(void); 348 349#define PTE_KERNEL 0 350#define PTE_USER 1 351 352/* 353 * Flags that indicate attributes of pages or mappings of pages. 354 * 355 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 356 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 357 * pv_entry's for each page. They live in the same "namespace" so 358 * that we can clear multiple attributes at a time. 359 * 360 * Note the "non-cacheable" flag generally means the page has 361 * multiple mappings in a given address space. 362 */ 363#define PVF_MOD 0x01 /* page is modified */ 364#define PVF_REF 0x02 /* page is referenced */ 365#define PVF_WIRED 0x04 /* mapping is wired */ 366#define PVF_WRITE 0x08 /* mapping is writable */ 367#define PVF_EXEC 0x10 /* mapping is executable */ 368#define PVF_NC 0x20 /* mapping is non-cacheable */ 369#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 370#define PVF_UNMAN 0x80 /* mapping is unmanaged */ 371 372void vector_page_setprot(int); 373 374#define SECTION_CACHE 0x1 375#define SECTION_PT 0x2 376void pmap_postinit(void); 377 378#endif /* _KERNEL */ 379 380#endif /* !LOCORE */ 381 382#endif /* !_MACHINE_PMAP_V4_H_ */ 383