pmap.h revision 1.5
1/* $NetBSD: pmap.h,v 1.5 2000/04/25 12:17:06 tsubai Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgment: 17 * This product includes software developed by Charles D. Cranor and 18 * Washington University. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34/* 35 * pmap.h: see pmap.c for the history of this pmap module. 36 */ 37 38#ifndef _SH3_PMAP_H_ 39#define _SH3_PMAP_H_ 40 41#include <machine/cpufunc.h> 42#include <machine/pte.h> 43#include <uvm/uvm_object.h> 44 45/* 46 * see pte.h for a description of i386 MMU terminology and hardware 47 * interface. 48 * 49 * a pmap describes a processes' 4GB virtual address space. this 50 * virtual address space can be broken up into 1024 4MB regions which 51 * are described by PDEs in the PDP. the PDEs are defined as follows: 52 * 53 * (ranges are inclusive -> exclusive, just like vm_map_entry start/end) 54 * (the following assumes that KERNBASE is 0xf0000000) 55 * 56 * PDE#s VA range usage 57 * 0->959 0x0 -> 0xefc00000 user address space, note that the 58 * max user address is 0xefbfe000 59 * the final two pages in the last 4MB 60 * used to be reserved for the UAREA 61 * but now are no longer used 62 * 959 0xefc00000-> recursive mapping of PDP (used for 63 * 0xf0000000 linear mapping of PTPs) 64 * 960->1023 0xf0000000-> kernel address space (constant 65 * 0xffc00000 across all pmap's/processes) 66 * 1023 0xffc00000-> "alternate" recursive PDP mapping 67 * <end> (for other pmaps) 68 * 69 * 70 * note: a recursive PDP mapping provides a way to map all the PTEs for 71 * a 4GB address space into a linear chunk of virtual memory. in other 72 * words, the PTE for page 0 is the first int mapped into the 4MB recursive 73 * area. the PTE for page 1 is the second int. the very last int in the 74 * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB 75 * address). 76 * 77 * all pmap's PD's must have the same values in slots 960->1023 so that 78 * the kernel is always mapped in every process. these values are loaded 79 * into the PD at pmap creation time. 80 * 81 * at any one time only one pmap can be active on a processor. this is 82 * the pmap whose PDP is pointed to by processor register %cr3. this pmap 83 * will have all its PTEs mapped into memory at the recursive mapping 84 * point (slot #959 as show above). when the pmap code wants to find the 85 * PTE for a virtual address, all it has to do is the following: 86 * 87 * address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t) 88 * = 0xefc00000 + (VA / 4096) * 4 89 * 90 * what happens if the pmap layer is asked to perform an operation 91 * on a pmap that is not the one which is currently active? in that 92 * case we take the PA of the PDP of non-active pmap and put it in 93 * slot 1023 of the active pmap. this causes the non-active pmap's 94 * PTEs to get mapped in the final 4MB of the 4GB address space 95 * (e.g. starting at 0xffc00000). 96 * 97 * the following figure shows the effects of the recursive PDP mapping: 98 * 99 * PDP (%cr3) 100 * +----+ 101 * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000 102 * | | 103 * | | 104 * | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000 105 * | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000) 106 * | | 107 * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end) 108 * +----+ 109 * 110 * note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE" 111 * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE" 112 * 113 * starting at VA 0xefc00000 the current active PDP (%cr3) acts as a 114 * PTP: 115 * 116 * PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000 117 * +----+ 118 * | 0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000 119 * | | 120 * | | 121 * | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000 122 * | 960| -> maps contents of first kernel PTP 123 * | | 124 * |1023| 125 * +----+ 126 * 127 * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is 128 * defined as "PDP_BASE".... within that mapping there are two 129 * defines: 130 * "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP 131 * which points back to itself. 132 * "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which 133 * establishes the recursive mapping of the alternate pmap. 134 * to set the alternate PDP, one just has to put the correct 135 * PA info in *APDP_PDE. 136 * 137 * note that in the APTE_BASE space, the APDP appears at VA 138 * "APDP_BASE" (0xfffff000). 139 */ 140 141/* 142 * the following defines identify the slots used as described above. 143 */ 144 145#define PDSLOT_PTE ((u_int)0x33f) /* PTDPTDI for recursive PDP map */ 146#define PDSLOT_KERN ((u_int)0x340) /* KPTDI start of kernel space */ 147#define PDSLOT_APTE ((u_int)0x37f) /* alternative recursive slot */ 148 149/* 150 * the following defines give the virtual addresses of various MMU 151 * data structures: 152 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings 153 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD 154 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP 155 */ 156 157#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) ) 158#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) ) 159#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG))) 160#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG))) 161#define PDP_PDE (PDP_BASE + PDSLOT_PTE) 162#define APDP_PDE (PDP_BASE + PDSLOT_APTE) 163 164/* 165 * XXXCDC: tmp xlate from old names: 166 * PTDPTDI -> PDSLOT_PTE 167 * KPTDI -> PDSLOT_KERN 168 * APTDPTDI -> PDSLOT_APTE 169 */ 170 171/* 172 * the follow define determines how many PTPs should be set up for the 173 * kernel by locore.s at boot time. this should be large enough to 174 * get the VM system running. once the VM system is running, the 175 * pmap module can add more PTPs to the kernel area on demand. 176 */ 177 178#ifndef NKPTP 179#define NKPTP 8 /* 32MB to start */ 180#endif 181#define NKPTP_MIN 8 /* smallest value we allow */ 182#define NKPTP_MAX 63 /* (1024 - (0xd0000000/NBPD) - 1) */ 183 /* largest value (-1 for APTP space) */ 184 185/* 186 * various address macros 187 * 188 * vtopte: return a pointer to the PTE mapping a VA 189 * kvtopte: same as above (takes a KVA, but doesn't matter with this pmap) 190 * ptetov: given a pointer to a PTE, return the VA that it maps 191 * vtophys: translate a VA to the PA mapped to it 192 * 193 * plus alternative versions of the above 194 */ 195 196#define vtopte(VA) (PTE_BASE + sh3_btop(VA)) 197#define kvtopte(VA) vtopte(VA) 198#define ptetov(PT) (sh3_ptob(PT - PTE_BASE)) 199#define avtopte(VA) (APTE_BASE + sh3_btop(VA)) 200#define ptetoav(PT) (sh3_ptob(PT - APTE_BASE)) 201#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \ 202 ((unsigned)(VA) & ~PG_FRAME)) 203 204/* 205 * pdei/ptei: generate index into PDP/PTP from a VA 206 */ 207#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT) 208#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT) 209 210/* 211 * PTP macros: 212 * a PTP's index is the PD index of the PDE that points to it 213 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 214 * a PTP's VA is the first VA mapped by that PTP 215 * 216 * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries) 217 * NBPD == number of bytes a PTP can map (4MB) 218 */ 219 220#define ptp_i2o(I) ((I) * NBPG) /* index => offset */ 221#define ptp_o2i(O) ((O) / NBPG) /* offset => index */ 222#define ptp_i2v(I) ((I) * NBPD) /* index => VA */ 223#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */ 224 225/* 226 * PG_AVAIL usage: we make use of the ignored bits of the PTE 227 */ 228 229#define PG_PVLIST PG_AVAIL1 /* mapping has entry on pvlist */ 230 231#ifdef _KERNEL 232/* 233 * pmap data structures: see pmap.c for details of locking. 234 */ 235 236struct pmap; 237typedef struct pmap *pmap_t; 238 239/* 240 * we maintain a list of all non-kernel pmaps 241 */ 242 243LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 244 245/* 246 * the pmap structure 247 * 248 * note that the pm_obj contains the simple_lock, the reference count, 249 * page list, and number of PTPs within the pmap. 250 */ 251 252struct pmap { 253 struct uvm_object pm_obj; /* object (lck by object lock) */ 254#define pm_lock pm_obj.vmobjlock 255 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 256 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 257 u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */ 258 struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */ 259 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 260 261 int pm_flags; /* see below */ 262}; 263 264/* pm_flags */ 265#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */ 266 267/* 268 * for each managed physical page we maintain a list of <PMAP,VA>'s 269 * which it is mapped at. the list is headed by a pv_head structure. 270 * there is one pv_head per managed phys page (allocated at boot time). 271 * the pv_head structure points to a list of pv_entry structures (each 272 * describes one mapping). 273 */ 274 275struct pv_entry; 276 277struct pv_head { 278 simple_lock_data_t pvh_lock; /* locks every pv on this list */ 279 struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */ 280}; 281 282struct pv_entry { /* locked by its list's pvh_lock */ 283 struct pv_entry *pv_next; /* next entry */ 284 struct pmap *pv_pmap; /* the pmap */ 285 vaddr_t pv_va; /* the virtual address */ 286 struct vm_page *pv_ptp; /* the vm_page of the PTP */ 287}; 288 289/* 290 * pv_entrys are dynamically allocated in chunks from a single page. 291 * we keep track of how many pv_entrys are in use for each page and 292 * we can free pv_entry pages if needed. there is one lock for the 293 * entire allocation system. 294 */ 295 296struct pv_page_info { 297 TAILQ_ENTRY(pv_page) pvpi_list; 298 struct pv_entry *pvpi_pvfree; 299 int pvpi_nfree; 300}; 301 302/* 303 * number of pv_entry's in a pv_page 304 * (note: won't work on systems where NPBG isn't a constant) 305 */ 306 307#define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \ 308 sizeof(struct pv_entry)) 309 310/* 311 * a pv_page: where pv_entrys are allocated from 312 */ 313 314struct pv_page { 315 struct pv_page_info pvinfo; 316 struct pv_entry pvents[PVE_PER_PVPAGE]; 317}; 318 319/* 320 * pmap_remove_record: a record of VAs that have been unmapped, used to 321 * flush TLB. if we have more than PMAP_RR_MAX then we stop recording. 322 */ 323 324#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */ 325 326struct pmap_remove_record { 327 int prr_npages; 328 vaddr_t prr_vas[PMAP_RR_MAX]; 329}; 330 331/* 332 * pmap_transfer_location: used to pass the current location in the 333 * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during 334 * a pmap_copy]. 335 */ 336 337struct pmap_transfer_location { 338 vaddr_t addr; /* the address (page-aligned) */ 339 pt_entry_t *pte; /* the PTE that maps address */ 340 struct vm_page *ptp; /* the PTP that the PTE lives in */ 341}; 342 343/* 344 * global kernel variables 345 */ 346 347/* PTDpaddr: is the physical address of the kernel's PDP */ 348extern u_long PTDpaddr; 349 350extern struct pmap kernel_pmap_store; /* kernel pmap */ 351extern int nkpde; /* current # of PDEs for kernel */ 352extern int pmap_pg_g; /* do we support PG_G? */ 353 354/* 355 * macros 356 */ 357 358#define pmap_kernel() (&kernel_pmap_store) 359#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 360#ifdef SH4 361#define pmap_update() (cacheflush(), tlbflush()) 362#else 363#define pmap_update() tlbflush() 364#endif 365 366#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M) 367#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U) 368#define pmap_copy(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, FALSE) 369#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 370#ifdef notyet 371#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 372#else 373#define pmap_is_referenced(pg) 1 374#endif 375#define pmap_move(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, TRUE) 376#define pmap_phys_address(ppn) sh3_ptob(ppn) 377#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 378 379 380/* 381 * prototypes 382 */ 383 384void pmap_activate __P((struct proc *)); 385void pmap_bootstrap __P((vaddr_t)); 386boolean_t pmap_change_attrs __P((struct vm_page *, int, int)); 387void pmap_deactivate __P((struct proc *)); 388static void pmap_page_protect __P((struct vm_page *, vm_prot_t)); 389void pmap_page_remove __P((struct vm_page *)); 390static void pmap_protect __P((struct pmap *, vaddr_t, 391 vaddr_t, vm_prot_t)); 392void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t)); 393boolean_t pmap_test_attrs __P((struct vm_page *, int)); 394void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t, 395 vsize_t, vaddr_t, boolean_t)); 396static void pmap_update_pg __P((vaddr_t)); 397static void pmap_update_2pg __P((vaddr_t,vaddr_t)); 398void pmap_write_protect __P((struct pmap *, vaddr_t, 399 vaddr_t, vm_prot_t)); 400 401vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */ 402 403#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 404 405/* 406 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. 407 */ 408#define PMAP_MAP_POOLPAGE(pa) SH3_PHYS_TO_P1SEG((pa)) 409#define PMAP_UNMAP_POOLPAGE(va) SH3_P1SEG_TO_PHYS((va)) 410 411/* 412 * inline functions 413 */ 414 415/* 416 * pmap_update_pg: flush one page from the TLB 417 */ 418 419__inline static void 420pmap_update_pg(va) 421 vaddr_t va; 422{ 423#ifdef SH4 424#if 1 425 tlbflush(); 426 cacheflush(); 427#else 428 u_int32_t *addr, data; 429 430 addr = (void *)(0xf6000080 | (va & 0x00003f00)); /* 13-8 */ 431 data = (0x00000000 | (va & 0xfffff000)); /* 31-17, 11-10 */ 432 *addr = data; 433#endif 434#else 435 u_int32_t *addr, data; 436 437 addr = (void *)(0xf2000080 | (va & 0x0001f000)); /* 16-12 */ 438 data = (0x00000000 | (va & 0xfffe0c00)); /* 31-17, 11-10 */ 439 440 *addr = data; 441#endif 442} 443 444/* 445 * pmap_update_2pg: flush two pages from the TLB 446 */ 447 448__inline static void 449pmap_update_2pg(va, vb) 450 vaddr_t va, vb; 451{ 452#ifdef SH4 453 tlbflush(); 454 cacheflush(); 455#else 456 pmap_update_pg(va); 457 pmap_update_pg(vb); 458#endif 459} 460 461/* 462 * pmap_page_protect: change the protection of all recorded mappings 463 * of a managed page 464 * 465 * => this function is a frontend for pmap_page_remove/pmap_change_attrs 466 * => we only have to worry about making the page more protected. 467 * unprotecting a page is done on-demand at fault time. 468 */ 469 470__inline static void 471pmap_page_protect(pg, prot) 472 struct vm_page *pg; 473 vm_prot_t prot; 474{ 475 if ((prot & VM_PROT_WRITE) == 0) { 476 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 477 (void) pmap_change_attrs(pg, PG_RO, PG_RW); 478 } else { 479 pmap_page_remove(pg); 480 } 481 } 482} 483 484/* 485 * pmap_protect: change the protection of pages in a pmap 486 * 487 * => this function is a frontend for pmap_remove/pmap_write_protect 488 * => we only have to worry about making the page more protected. 489 * unprotecting a page is done on-demand at fault time. 490 */ 491 492__inline static void 493pmap_protect(pmap, sva, eva, prot) 494 struct pmap *pmap; 495 vaddr_t sva, eva; 496 vm_prot_t prot; 497{ 498 if ((prot & VM_PROT_WRITE) == 0) { 499 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 500 pmap_write_protect(pmap, sva, eva, prot); 501 } else { 502 pmap_remove(pmap, sva, eva); 503 } 504 } 505} 506 507vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t)); 508paddr_t vtophys __P((vaddr_t)); 509void pmap_emulate_reference __P((struct proc *, vaddr_t, int, int)); 510 511/* XXX */ 512#define PG_U 0 /* referenced bit */ 513 514#endif /* _KERNEL */ 515#endif /* _SH3_PMAP_H_ */ 516