pmap-v4.h revision 132056
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 46 * 47 * $FreeBSD: head/sys/arm/include/pmap.h 132056 2004-07-12 21:22:40Z cognet $ 48 */ 49 50#ifndef _MACHINE_PMAP_H_ 51#define _MACHINE_PMAP_H_ 52 53#include <machine/pte.h> 54 55/* 56 * Pte related macros 57 */ 58#define PTE_NOCACHE 0 59#define PTE_CACHE 1 60 61#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDR_SHIFT)+((pti)<<PAGE_SHIFT))) 62#define PTDIPDE(ptd) ((ptd)/1024) 63#define PTDIPTE(ptd) ((ptd)%256) 64 65#ifndef NKPT 66#define NKPT 120 /* actual number of kernel page tables */ 67#endif 68 69#ifndef NKPDE 70#define NKPDE 1019 /* Maximum number of kernel PDE */ 71#endif 72 73#define NPDEPTD 16 /* Number of PDE in each PTD */ 74 75/* 76 * The *PTDI values control the layout of virtual memory 77 */ 78 79#define KPTDI (NPDEPG-NKPDE) /* ptd entry for kernel space begin */ 80#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */ 81#define KPTPTDI (PTDPTDI-1) /* ptd entry for kernel PTEs */ 82#define UPTPTDI (KPTPTDI-3) /* ptd entry for uspace PTEs */ 83#define UMAXPTDI (UPTPTDI-1) /* ptd entry for user space end */ 84#define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */ 85 86#ifndef LOCORE 87 88#include <sys/queue.h> 89 90#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ 91#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ 92 93#ifdef _KERNEL 94#define ARM_PTE_TO_PFN(pte) ((pt_entry_t)(pte) >> PAGE_SHIFT) 95#define ARM_PDE_TO_PFN(pde) ((pd_entry_t)(pde) >> 10) 96#define ARM_PHYS_TO_KSPACE(x) ((vm_offset_t) (x) | (UPTPTDI << PDR_SHIFT)) 97#define ARM_KSPACE_TO_PHYS(x) ((vm_offset_t) (x) & ~(UPTPTDI << PDR_SHIFT)) 98 99extern pt_entry_t PTmap[], APTmap; 100extern pd_entry_t PTD[], APTD, PTDpde, APTDpde; 101 102extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */ 103 104 105 106#if 0 107static __inline vm_offset_t 108pmap_akextract(vm_offset_t va) 109{ 110 vm_offset_t pa; 111 pa = *(vm_offset_t *)avtopte(va); 112 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 113 return pa; 114} 115#endif 116#define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 117 118#define avtophys(va) pmap_akextract(((vm_offset_t) (va))) 119 120#endif 121 122/* 123 * Pmap sutff 124 */ 125 126/* 127 * This structure is used to hold a virtual<->physical address 128 * association and is used mostly by bootstrap code 129 */ 130struct pv_addr { 131 SLIST_ENTRY(pv_addr) pv_list; 132 vm_offset_t pv_va; 133 vm_paddr_t pv_pa; 134}; 135 136struct pv_entry; 137 138struct md_page { 139 int pvh_attrs; 140 u_int uro_mappings; 141 u_int urw_mappings; 142 union { 143 u_short s_mappings[2]; /* Assume kernel count <= 65535 */ 144 u_int i_mappings; 145 } k_u; 146#define kro_mappings k_u.s_mappings[0] 147#define krw_mappings k_u.s_mappings[1] 148#define k_mappings k_u.i_mappings 149 int pv_list_count; 150 TAILQ_HEAD(,pv_entry) pv_list; 151}; 152 153#define VM_MDPAGE_INIT(pg) \ 154do { \ 155 TAILQ_INIT(&pg->pv_list); \ 156 mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\ 157 (pg)->mdpage.pvh_attrs = 0; \ 158 (pg)->mdpage.uro_mappings = 0; \ 159 (pg)->mdpage.urw_mappings = 0; \ 160 (pg)->mdpage.k_mappings = 0; \ 161} while (/*CONSTCOND*/0) 162 163struct l1_ttable; 164struct l2_dtable; 165 166/* 167 * Track cache/tlb occupancy using the following structure 168 */ 169union pmap_cache_state { 170 struct { 171 union { 172 u_int8_t csu_cache_b[2]; 173 u_int16_t csu_cache; 174 } cs_cache_u; 175 176 union { 177 u_int8_t csu_tlb_b[2]; 178 u_int16_t csu_tlb; 179 } cs_tlb_u; 180 } cs_s; 181 u_int32_t cs_all; 182}; 183#define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0] 184#define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1] 185#define cs_cache cs_s.cs_cache_u.csu_cache 186#define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0] 187#define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1] 188#define cs_tlb cs_s.cs_tlb_u.csu_tlb 189 190/* 191 * Assigned to cs_all to force cacheops to work for a particular pmap 192 */ 193#define PMAP_CACHE_STATE_ALL 0xffffffffu 194 195/* 196 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 197 * A bucket size of 16 provides for 16MB of contiguous virtual address 198 * space per l2_dtable. Most processes will, therefore, require only two or 199 * three of these to map their whole working set. 200 */ 201#define L2_BUCKET_LOG2 4 202#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 203/* 204 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 205 * of l2_dtable structures required to track all possible page descriptors 206 * mappable by an L1 translation table is given by the following constants: 207 */ 208#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 209#define L2_SIZE (1 << L2_LOG2) 210 211struct pmap { 212 u_int8_t pm_domain; 213 struct l1_ttable *pm_l1; 214 struct l2_dtable *pm_l2[L2_SIZE]; 215 pd_entry_t *pm_pdir; /* KVA of page directory */ 216 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 217 struct pv_addr pm_ptpt; /* pagetable of pagetables */ 218 int pm_count; /* reference count */ 219 int pm_active; /* active on cpus */ 220 struct pmap_statistics pm_stats; /* pmap statictics */ 221 struct vm_page *pm_ptphint; /* pmap ptp hint */ 222 union pmap_cache_state pm_cstate; 223 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ 224}; 225 226typedef struct pmap *pmap_t; 227 228#ifdef _KERNEL 229extern pmap_t kernel_pmap; 230#define pmap_kernel() kernel_pmap 231#endif 232 233/* 234 * For each vm_page_t, there is a list of all currently valid virtual 235 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 236 */ 237typedef struct pv_entry { 238 pmap_t pv_pmap; /* pmap where mapping lies */ 239 vm_offset_t pv_va; /* virtual address for mapping */ 240 TAILQ_ENTRY(pv_entry) pv_list; 241 TAILQ_ENTRY(pv_entry) pv_plist; 242 vm_page_t pv_ptem; /* VM page for pte */ 243 int pv_flags; /* flags (wired, etc...) */ 244} *pv_entry_t; 245 246#define PV_ENTRY_NULL ((pv_entry_t) 0) 247 248#define PV_CI 0x01 /* all entries must be cache inhibited */ 249#define PV_PTPAGE 0x02 /* entry maps a page table page */ 250 251/* 252 * Page hooks. 253 * For speed we store the both the virtual address and the page table 254 * entry address for each page hook. 255 */ 256typedef struct { 257 vm_offset_t va; 258 pt_entry_t *pte; 259} pagehook_t; 260 261 262#ifdef _KERNEL 263 264boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); 265 266/* 267 * virtual address to page table entry and 268 * to physical address. Likewise for alternate address space. 269 * Note: these work recursively, thus vtopte of a pte will give 270 * the corresponding pde that in turn maps it. 271 */ 272 273struct pcb; 274 275void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 276/* Virtual address to page table entry */ 277static __inline pt_entry_t * 278vtopte(vm_offset_t va) 279{ 280 pd_entry_t *pdep; 281 pt_entry_t *ptep; 282 283 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 284 return (NULL); 285 return (ptep); 286} 287 288extern vm_offset_t avail_end; 289extern vm_offset_t clean_eva; 290extern vm_offset_t clean_sva; 291extern vm_offset_t phys_avail[]; 292extern vm_offset_t virtual_avail; 293extern vm_offset_t virtual_end; 294 295void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *); 296void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 297void pmap_kremove(vm_offset_t); 298void *pmap_mapdev(vm_offset_t, vm_size_t); 299void pmap_unmapdev(vm_offset_t, vm_size_t); 300vm_page_t pmap_use_pt(pmap_t, vm_offset_t); 301void pmap_debug(int); 302void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); 303void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); 304vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); 305void 306pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 307 int cache); 308int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); 309 310/* 311 * Definitions for MMU domains 312 */ 313#define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */ 314#define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */ 315 316/* 317 * The new pmap ensures that page-tables are always mapping Write-Thru. 318 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 319 * on every change. 320 * 321 * Unfortunately, not all CPUs have a write-through cache mode. So we 322 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 323 * and if there is the chance for PTE syncs to be needed, we define 324 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 325 * the code. 326 */ 327extern int pmap_needs_pte_sync; 328 329/* 330 * These macros define the various bit masks in the PTE. 331 * 332 * We use these macros since we use different bits on different processor 333 * models. 334 */ 335#define L1_S_PROT_U (L1_S_AP(AP_U)) 336#define L1_S_PROT_W (L1_S_AP(AP_W)) 337#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 338 339#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 340#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)) 341 342#define L2_L_PROT_U (L2_AP(AP_U)) 343#define L2_L_PROT_W (L2_AP(AP_W)) 344#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 345 346#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 347#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X)) 348 349#define L2_S_PROT_U_generic (L2_AP(AP_U)) 350#define L2_S_PROT_W_generic (L2_AP(AP_W)) 351#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 352 353#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 354#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 355#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 356 357#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 358#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)) 359 360#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 361#define L1_S_PROTO_xscale (L1_TYPE_S) 362 363#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 364#define L1_C_PROTO_xscale (L1_TYPE_C) 365 366#define L2_L_PROTO (L2_TYPE_L) 367 368#define L2_S_PROTO_generic (L2_TYPE_S) 369#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 370 371/* 372 * User-visible names for the ones that vary with MMU class. 373 */ 374 375#if ARM_NMMUS > 1 376/* More than one MMU class configured; use variables. */ 377#define L2_S_PROT_U pte_l2_s_prot_u 378#define L2_S_PROT_W pte_l2_s_prot_w 379#define L2_S_PROT_MASK pte_l2_s_prot_mask 380 381#define L1_S_CACHE_MASK pte_l1_s_cache_mask 382#define L2_L_CACHE_MASK pte_l2_l_cache_mask 383#define L2_S_CACHE_MASK pte_l2_s_cache_mask 384 385#define L1_S_PROTO pte_l1_s_proto 386#define L1_C_PROTO pte_l1_c_proto 387#define L2_S_PROTO pte_l2_s_proto 388 389#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 390#define L2_S_PROT_U L2_S_PROT_U_generic 391#define L2_S_PROT_W L2_S_PROT_W_generic 392#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 393 394#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 395#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 396#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 397 398#define L1_S_PROTO L1_S_PROTO_generic 399#define L1_C_PROTO L1_C_PROTO_generic 400#define L2_S_PROTO L2_S_PROTO_generic 401 402#elif ARM_MMU_XSCALE == 1 403#define L2_S_PROT_U L2_S_PROT_U_xscale 404#define L2_S_PROT_W L2_S_PROT_W_xscale 405#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 406 407#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 408#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 409#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 410 411#define L1_S_PROTO L1_S_PROTO_xscale 412#define L1_C_PROTO L1_C_PROTO_xscale 413#define L2_S_PROTO L2_S_PROTO_xscale 414 415#endif /* ARM_NMMUS > 1 */ 416 417#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1) 418#define PMAP_NEEDS_PTE_SYNC 1 419#define PMAP_INCLUDE_PTE_SYNC 420#elif (ARM_MMU_SA1 == 0) 421#define PMAP_NEEDS_PTE_SYNC 0 422#endif 423 424/* 425 * These macros return various bits based on kernel/user and protection. 426 * Note that the compiler will usually fold these at compile time. 427 */ 428#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 429 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 430 431#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 432 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 433 434#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 435 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 436 437/* 438 * Macros to test if a mapping is mappable with an L1 Section mapping 439 * or an L2 Large Page mapping. 440 */ 441#define L1_S_MAPPABLE_P(va, pa, size) \ 442 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 443 444#define L2_L_MAPPABLE_P(va, pa, size) \ 445 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 446 447/* 448 * Provide a fallback in case we were not able to determine it at 449 * compile-time. 450 */ 451#ifndef PMAP_NEEDS_PTE_SYNC 452#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 453#define PMAP_INCLUDE_PTE_SYNC 454#endif 455 456#define PTE_SYNC(pte) \ 457do { \ 458 if (PMAP_NEEDS_PTE_SYNC) \ 459 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ 460} while (/*CONSTCOND*/0) 461 462#define PTE_SYNC_RANGE(pte, cnt) \ 463do { \ 464 if (PMAP_NEEDS_PTE_SYNC) { \ 465 cpu_dcache_wb_range((vm_offset_t)(pte), \ 466 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 467 } \ 468} while (/*CONSTCOND*/0) 469 470extern pt_entry_t pte_l1_s_cache_mode; 471extern pt_entry_t pte_l1_s_cache_mask; 472 473extern pt_entry_t pte_l2_l_cache_mode; 474extern pt_entry_t pte_l2_l_cache_mask; 475 476extern pt_entry_t pte_l2_s_cache_mode; 477extern pt_entry_t pte_l2_s_cache_mask; 478 479extern pt_entry_t pte_l1_s_cache_mode_pt; 480extern pt_entry_t pte_l2_l_cache_mode_pt; 481extern pt_entry_t pte_l2_s_cache_mode_pt; 482 483extern pt_entry_t pte_l2_s_prot_u; 484extern pt_entry_t pte_l2_s_prot_w; 485extern pt_entry_t pte_l2_s_prot_mask; 486 487extern pt_entry_t pte_l1_s_proto; 488extern pt_entry_t pte_l1_c_proto; 489extern pt_entry_t pte_l2_s_proto; 490 491extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 492extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); 493 494#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 495void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); 496void pmap_zero_page_generic(vm_paddr_t, int, int); 497 498void pmap_pte_init_generic(void); 499#if defined(CPU_ARM8) 500void pmap_pte_init_arm8(void); 501#endif 502#if defined(CPU_ARM9) 503void pmap_pte_init_arm9(void); 504#endif /* CPU_ARM9 */ 505#if defined(CPU_ARM10) 506void pmap_pte_init_arm10(void); 507#endif /* CPU_ARM10 */ 508#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 509 510#if /* ARM_MMU_SA1 == */1 511void pmap_pte_init_sa1(void); 512#endif /* ARM_MMU_SA1 == 1 */ 513 514#if ARM_MMU_XSCALE == 1 515void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); 516void pmap_zero_page_xscale(vm_paddr_t, int, int); 517 518void pmap_pte_init_xscale(void); 519 520void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); 521 522#define PMAP_UAREA(va) pmap_uarea(va) 523void pmap_uarea(vm_offset_t); 524#endif /* ARM_MMU_XSCALE == 1 */ 525#define PTE_KERNEL 0 526#define PTE_USER 1 527#define l1pte_valid(pde) ((pde) != 0) 528#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 529#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 530#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 531 532#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 533#define l2pte_valid(pte) ((pte) != 0) 534#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 535#define l2pte_minidata(pte) (((pte) & \ 536 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ 537 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) 538 539/* L1 and L2 page table macros */ 540#define pmap_pde_v(pde) l1pte_valid(*(pde)) 541#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 542#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 543#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 544 545#define pmap_pte_v(pte) l2pte_valid(*(pte)) 546#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 547 548/* Size of the kernel part of the L1 page table */ 549#define KERNEL_PD_SIZE \ 550 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t)) 551#define PTE_PAGETABLE 2 552 553/* 554 * Flags that indicate attributes of pages or mappings of pages. 555 * 556 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 557 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 558 * pv_entry's for each page. They live in the same "namespace" so 559 * that we can clear multiple attributes at a time. 560 * 561 * Note the "non-cacheable" flag generally means the page has 562 * multiple mappings in a given address space. 563 */ 564#define PVF_MOD 0x01 /* page is modified */ 565#define PVF_REF 0x02 /* page is referenced */ 566#define PVF_WIRED 0x04 /* mapping is wired */ 567#define PVF_WRITE 0x08 /* mapping is writable */ 568#define PVF_EXEC 0x10 /* mapping is executable */ 569#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */ 570#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */ 571#define PVF_NC (PVF_UNC|PVF_KNC) 572 573void vector_page_setprot(int); 574/* 575 * Routine: pmap_kextract 576 * Function: 577 * Extract the physical page address associated 578 * kernel virtual address. 579 */ 580 581vm_paddr_t pmap_kextract(vm_offset_t); 582 583void pmap_update(pmap_t); 584#endif /* _KERNEL */ 585 586#endif /* !LOCORE */ 587 588#endif /* !_MACHINE_PMAP_H_ */ 589