pmap-v4.h revision 194459
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD: head/sys/arm/include/pmap.h 194459 2009-06-18 20:42:37Z thompsa $ 48 */ 49 50#ifndef _MACHINE_PMAP_H_ 51#define _MACHINE_PMAP_H_ 52 53#include <machine/pte.h> 54#include <machine/cpuconf.h> 55/* 56 * Pte related macros 57 */ 58#define PTE_NOCACHE 0 59#define PTE_CACHE 1 60#define PTE_PAGETABLE 2 61 62#ifndef LOCORE 63 64#include <sys/queue.h> 65#include <sys/_lock.h> 66#include <sys/_mutex.h> 67 68#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 69#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 70 71#ifdef _KERNEL 72 73#define vtophys(va) pmap_extract(pmap_kernel(), (vm_offset_t)(va)) 74#define pmap_kextract(va) pmap_extract(pmap_kernel(), (vm_offset_t)(va)) 75 76#endif 77 78#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 79/* 80 * Pmap stuff 81 */ 82 83/* 84 * This structure is used to hold a virtual<->physical address 85 * association and is used mostly by bootstrap code 86 */ 87struct pv_addr { 88 SLIST_ENTRY(pv_addr) pv_list; 89 vm_offset_t pv_va; 90 vm_paddr_t pv_pa; 91}; 92 93struct pv_entry; 94 95struct md_page { 96 int pvh_attrs; 97 vm_offset_t pv_kva; /* first kernel VA mapping */ 98 TAILQ_HEAD(,pv_entry) pv_list; 99}; 100 101#define VM_MDPAGE_INIT(pg) \ 102do { \ 103 TAILQ_INIT(&pg->pv_list); \ 104 mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\ 105 (pg)->mdpage.pvh_attrs = 0; \ 106} while (/*CONSTCOND*/0) 107 108struct l1_ttable; 109struct l2_dtable; 110 111 112/* 113 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 114 * A bucket size of 16 provides for 16MB of contiguous virtual address 115 * space per l2_dtable. Most processes will, therefore, require only two or 116 * three of these to map their whole working set. 117 */ 118#define L2_BUCKET_LOG2 4 119#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 120/* 121 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 122 * of l2_dtable structures required to track all possible page descriptors 123 * mappable by an L1 translation table is given by the following constants: 124 */ 125#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 126#define L2_SIZE (1 << L2_LOG2) 127 128struct pmap { 129 struct mtx pm_mtx; 130 u_int8_t pm_domain; 131 struct l1_ttable *pm_l1; 132 struct l2_dtable *pm_l2[L2_SIZE]; 133 pd_entry_t *pm_pdir; /* KVA of page directory */ 134 int pm_active; /* active on cpus */ 135 struct pmap_statistics pm_stats; /* pmap statictics */ 136 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 137}; 138 139typedef struct pmap *pmap_t; 140 141#ifdef _KERNEL 142extern struct pmap kernel_pmap_store; 143#define kernel_pmap (&kernel_pmap_store) 144#define pmap_kernel() kernel_pmap 145 146#define PMAP_ASSERT_LOCKED(pmap) \ 147 mtx_assert(&(pmap)->pm_mtx, MA_OWNED) 148#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 149#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 150#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 151 NULL, MTX_DEF | MTX_DUPOK) 152#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) 153#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 154#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 155#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 156#endif 157 158 159/* 160 * For each vm_page_t, there is a list of all currently valid virtual 161 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 162 */ 163typedef struct pv_entry { 164 pmap_t pv_pmap; /* pmap where mapping lies */ 165 vm_offset_t pv_va; /* virtual address for mapping */ 166 TAILQ_ENTRY(pv_entry) pv_list; 167 TAILQ_ENTRY(pv_entry) pv_plist; 168 int pv_flags; /* flags (wired, etc...) */ 169} *pv_entry_t; 170 171#ifdef _KERNEL 172 173boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 174 175/* 176 * virtual address to page table entry and 177 * to physical address. Likewise for alternate address space. 178 * Note: these work recursively, thus vtopte of a pte will give 179 * the corresponding pde that in turn maps it. 180 */ 181 182/* 183 * The current top of kernel VM. 184 */ 185extern vm_offset_t pmap_curmaxkvaddr; 186 187struct pcb; 188 189void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 190/* Virtual address to page table entry */ 191static __inline pt_entry_t * 192vtopte(vm_offset_t va) 193{ 194 pd_entry_t *pdep; 195 pt_entry_t *ptep; 196 197 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 198 return (NULL); 199 return (ptep); 200} 201 202extern vm_offset_t phys_avail[]; 203extern vm_offset_t virtual_avail; 204extern vm_offset_t virtual_end; 205 206void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *); 207void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 208void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); 209void *pmap_kenter_temp(vm_paddr_t pa, int i); 210void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); 211void pmap_kremove(vm_offset_t); 212void *pmap_mapdev(vm_offset_t, vm_size_t); 213void pmap_unmapdev(vm_offset_t, vm_size_t); 214vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 215void pmap_debug(int); 216void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 217void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 218vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 219void 220pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 221 int cache); 222int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 223 224/* 225 * Definitions for MMU domains 226 */ 227#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 228#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 229 230/* 231 * The new pmap ensures that page-tables are always mapping Write-Thru. 232 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 233 * on every change. 234 * 235 * Unfortunately, not all CPUs have a write-through cache mode. So we 236 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 237 * and if there is the chance for PTE syncs to be needed, we define 238 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 239 * the code. 240 */ 241extern int pmap_needs_pte_sync; 242 243/* 244 * These macros define the various bit masks in the PTE. 245 * 246 * We use these macros since we use different bits on different processor 247 * models. 248 */ 249#define L1_S_PROT_U (L1_S_AP(AP_U)) 250#define L1_S_PROT_W (L1_S_AP(AP_W)) 251#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 252 253#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 254#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ 255 L1_S_XSCALE_TEX(TEX_XSCALE_T)) 256 257#define L2_L_PROT_U (L2_AP(AP_U)) 258#define L2_L_PROT_W (L2_AP(AP_W)) 259#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 260 261#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 262#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ 263 L2_XSCALE_L_TEX(TEX_XSCALE_T)) 264 265#define L2_S_PROT_U_generic (L2_AP(AP_U)) 266#define L2_S_PROT_W_generic (L2_AP(AP_W)) 267#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 268 269#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 270#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 271#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 272 273#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 274#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ 275 L2_XSCALE_T_TEX(TEX_XSCALE_X)) 276 277#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 278#define L1_S_PROTO_xscale (L1_TYPE_S) 279 280#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 281#define L1_C_PROTO_xscale (L1_TYPE_C) 282 283#define L2_L_PROTO (L2_TYPE_L) 284 285#define L2_S_PROTO_generic (L2_TYPE_S) 286#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 287 288/* 289 * User-visible names for the ones that vary with MMU class. 290 */ 291 292#if ARM_NMMUS > 1 293/* More than one MMU class configured; use variables. */ 294#define L2_S_PROT_U pte_l2_s_prot_u 295#define L2_S_PROT_W pte_l2_s_prot_w 296#define L2_S_PROT_MASK pte_l2_s_prot_mask 297 298#define L1_S_CACHE_MASK pte_l1_s_cache_mask 299#define L2_L_CACHE_MASK pte_l2_l_cache_mask 300#define L2_S_CACHE_MASK pte_l2_s_cache_mask 301 302#define L1_S_PROTO pte_l1_s_proto 303#define L1_C_PROTO pte_l1_c_proto 304#define L2_S_PROTO pte_l2_s_proto 305 306#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 307#define L2_S_PROT_U L2_S_PROT_U_generic 308#define L2_S_PROT_W L2_S_PROT_W_generic 309#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 310 311#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 312#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 313#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 314 315#define L1_S_PROTO L1_S_PROTO_generic 316#define L1_C_PROTO L1_C_PROTO_generic 317#define L2_S_PROTO L2_S_PROTO_generic 318 319#elif ARM_MMU_XSCALE == 1 320#define L2_S_PROT_U L2_S_PROT_U_xscale 321#define L2_S_PROT_W L2_S_PROT_W_xscale 322#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 323 324#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 325#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 326#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 327 328#define L1_S_PROTO L1_S_PROTO_xscale 329#define L1_C_PROTO L1_C_PROTO_xscale 330#define L2_S_PROTO L2_S_PROTO_xscale 331 332#endif /* ARM_NMMUS > 1 */ 333 334#ifdef SKYEYE_WORKAROUNDS 335#define PMAP_NEEDS_PTE_SYNC 1 336#define PMAP_INCLUDE_PTE_SYNC 337#else 338#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1) 339#define PMAP_NEEDS_PTE_SYNC 1 340#define PMAP_INCLUDE_PTE_SYNC 341#elif defined(CPU_XSCALE_81342) 342#define PMAP_NEEDS_PTE_SYNC 1 343#define PMAP_INCLUDE_PTE_SYNC 344#elif (ARM_MMU_SA1 == 0) 345#define PMAP_NEEDS_PTE_SYNC 0 346#endif 347#endif 348 349/* 350 * These macros return various bits based on kernel/user and protection. 351 * Note that the compiler will usually fold these at compile time. 352 */ 353#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 354 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 355 356#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 357 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 358 359#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 360 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 361 362/* 363 * Macros to test if a mapping is mappable with an L1 Section mapping 364 * or an L2 Large Page mapping. 365 */ 366#define L1_S_MAPPABLE_P(va, pa, size) \ 367 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 368 369#define L2_L_MAPPABLE_P(va, pa, size) \ 370 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 371 372/* 373 * Provide a fallback in case we were not able to determine it at 374 * compile-time. 375 */ 376#ifndef PMAP_NEEDS_PTE_SYNC 377#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 378#define PMAP_INCLUDE_PTE_SYNC 379#endif 380 381#define PTE_SYNC(pte) \ 382do { \ 383 if (PMAP_NEEDS_PTE_SYNC) { \ 384 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 385 cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 386 }\ 387} while (/*CONSTCOND*/0) 388 389#define PTE_SYNC_RANGE(pte, cnt) \ 390do { \ 391 if (PMAP_NEEDS_PTE_SYNC) { \ 392 cpu_dcache_wb_range((vm_offset_t)(pte), \ 393 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 394 cpu_l2cache_wb_range((vm_offset_t)(pte), \ 395 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 396 } \ 397} while (/*CONSTCOND*/0) 398 399extern pt_entry_t pte_l1_s_cache_mode; 400extern pt_entry_t pte_l1_s_cache_mask; 401 402extern pt_entry_t pte_l2_l_cache_mode; 403extern pt_entry_t pte_l2_l_cache_mask; 404 405extern pt_entry_t pte_l2_s_cache_mode; 406extern pt_entry_t pte_l2_s_cache_mask; 407 408extern pt_entry_t pte_l1_s_cache_mode_pt; 409extern pt_entry_t pte_l2_l_cache_mode_pt; 410extern pt_entry_t pte_l2_s_cache_mode_pt; 411 412extern pt_entry_t pte_l2_s_prot_u; 413extern pt_entry_t pte_l2_s_prot_w; 414extern pt_entry_t pte_l2_s_prot_mask; 415 416extern pt_entry_t pte_l1_s_proto; 417extern pt_entry_t pte_l1_c_proto; 418extern pt_entry_t pte_l2_s_proto; 419 420extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 421extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 422 423#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342) 424void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 425void pmap_zero_page_generic(vm_paddr_t, int, int); 426 427void pmap_pte_init_generic(void); 428#if defined(CPU_ARM8) 429void pmap_pte_init_arm8(void); 430#endif 431#if defined(CPU_ARM9) 432void pmap_pte_init_arm9(void); 433#endif /* CPU_ARM9 */ 434#if defined(CPU_ARM10) 435void pmap_pte_init_arm10(void); 436#endif /* CPU_ARM10 */ 437#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 438 439#if /* ARM_MMU_SA1 == */1 440void pmap_pte_init_sa1(void); 441#endif /* ARM_MMU_SA1 == 1 */ 442 443#if ARM_MMU_XSCALE == 1 444void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 445void pmap_zero_page_xscale(vm_paddr_t, int, int); 446 447void pmap_pte_init_xscale(void); 448 449void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 450 451void pmap_use_minicache(vm_offset_t, vm_size_t); 452#endif /* ARM_MMU_XSCALE == 1 */ 453#if defined(CPU_XSCALE_81342) 454#define ARM_HAVE_SUPERSECTIONS 455#endif 456 457#define PTE_KERNEL 0 458#define PTE_USER 1 459#define l1pte_valid(pde) ((pde) != 0) 460#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 461#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 462#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 463 464#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 465#define l2pte_valid(pte) ((pte) != 0) 466#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 467#define l2pte_minidata(pte) (((pte) & \ 468 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 469 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 470 471/* L1 and L2 page table macros */ 472#define pmap_pde_v(pde) l1pte_valid(*(pde)) 473#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 474#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 475#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 476 477#define pmap_pte_v(pte) l2pte_valid(*(pte)) 478#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 479 480/* 481 * Flags that indicate attributes of pages or mappings of pages. 482 * 483 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 484 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 485 * pv_entry's for each page. They live in the same "namespace" so 486 * that we can clear multiple attributes at a time. 487 * 488 * Note the "non-cacheable" flag generally means the page has 489 * multiple mappings in a given address space. 490 */ 491#define PVF_MOD 0x01 /* page is modified */ 492#define PVF_REF 0x02 /* page is referenced */ 493#define PVF_WIRED 0x04 /* mapping is wired */ 494#define PVF_WRITE 0x08 /* mapping is writable */ 495#define PVF_EXEC 0x10 /* mapping is executable */ 496#define PVF_NC 0x20 /* mapping is non-cacheable */ 497#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ 498#define PVF_UNMAN 0x80 /* mapping is unmanaged */ 499 500void vector_page_setprot(int); 501 502void pmap_update(pmap_t); 503 504/* 505 * This structure is used by machine-dependent code to describe 506 * static mappings of devices, created at bootstrap time. 507 */ 508struct pmap_devmap { 509 vm_offset_t pd_va; /* virtual address */ 510 vm_paddr_t pd_pa; /* physical address */ 511 vm_size_t pd_size; /* size of region */ 512 vm_prot_t pd_prot; /* protection code */ 513 int pd_cache; /* cache attributes */ 514}; 515 516const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t); 517const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t); 518 519void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *); 520void pmap_devmap_register(const struct pmap_devmap *); 521 522#define SECTION_CACHE 0x1 523#define SECTION_PT 0x2 524void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); 525#ifdef ARM_HAVE_SUPERSECTIONS 526void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); 527#endif 528 529extern char *_tmppt; 530 531void pmap_postinit(void); 532 533#ifdef ARM_USE_SMALL_ALLOC 534void arm_add_smallalloc_pages(void *, void *, int, int); 535vm_offset_t arm_ptovirt(vm_paddr_t); 536void arm_init_smallalloc(void); 537struct arm_small_page { 538 void *addr; 539 TAILQ_ENTRY(arm_small_page) pg_list; 540}; 541 542#endif 543 544#define ARM_NOCACHE_KVA_SIZE 0x1000000 545extern vm_offset_t arm_nocache_startaddr; 546void *arm_remap_nocache(void *, vm_size_t); 547void arm_unmap_nocache(void *, vm_size_t); 548 549extern vm_paddr_t dump_avail[]; 550#endif /* _KERNEL */ 551 552#endif /* !LOCORE */ 553 554#endif /* !_MACHINE_PMAP_H_ */ 555