pmap.h revision 1.21
1/* $NetBSD: pmap.h,v 1.21 2002/04/04 18:12:23 uch Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgment: 17 * This product includes software developed by Charles D. Cranor and 18 * Washington University. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34/* 35 * pmap.h: see pmap.c for the history of this pmap module. 36 */ 37 38#ifndef _SH3_PMAP_H_ 39#define _SH3_PMAP_H_ 40 41#include <sh3/cache.h> 42#include <sh3/psl.h> 43#include <sh3/pte.h> 44#include <uvm/uvm_object.h> 45 46/* 47 * see pte.h for a description of i386 MMU terminology and hardware 48 * interface. 49 * 50 * a pmap describes a processes' 4GB virtual address space. this 51 * virtual address space can be broken up into 1024 4MB regions which 52 * are described by PDEs in the PDP. the PDEs are defined as follows: 53 * 54 * (ranges are inclusive -> exclusive, just like vm_map_entry start/end) 55 * (the following assumes that KERNBASE is 0xf0000000) 56 * 57 * PDE#s VA range usage 58 * 0->959 0x0 -> 0xefc00000 user address space, note that the 59 * max user address is 0xefbfe000 60 * the final two pages in the last 4MB 61 * used to be reserved for the UAREA 62 * but now are no longer used 63 * 959 0xefc00000-> recursive mapping of PDP (used for 64 * 0xf0000000 linear mapping of PTPs) 65 * 960->1023 0xf0000000-> kernel address space (constant 66 * 0xffc00000 across all pmap's/processes) 67 * 1023 0xffc00000-> "alternate" recursive PDP mapping 68 * <end> (for other pmaps) 69 * 70 * 71 * note: a recursive PDP mapping provides a way to map all the PTEs for 72 * a 4GB address space into a linear chunk of virtual memory. in other 73 * words, the PTE for page 0 is the first int mapped into the 4MB recursive 74 * area. the PTE for page 1 is the second int. the very last int in the 75 * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB 76 * address). 77 * 78 * all pmap's PD's must have the same values in slots 960->1023 so that 79 * the kernel is always mapped in every process. these values are loaded 80 * into the PD at pmap creation time. 81 * 82 * at any one time only one pmap can be active on a processor. this is 83 * the pmap whose PDP is pointed to by processor register %cr3. this pmap 84 * will have all its PTEs mapped into memory at the recursive mapping 85 * point (slot #959 as show above). when the pmap code wants to find the 86 * PTE for a virtual address, all it has to do is the following: 87 * 88 * address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t) 89 * = 0xefc00000 + (VA / 4096) * 4 90 * 91 * what happens if the pmap layer is asked to perform an operation 92 * on a pmap that is not the one which is currently active? in that 93 * case we take the PA of the PDP of non-active pmap and put it in 94 * slot 1023 of the active pmap. this causes the non-active pmap's 95 * PTEs to get mapped in the final 4MB of the 4GB address space 96 * (e.g. starting at 0xffc00000). 97 * 98 * the following figure shows the effects of the recursive PDP mapping: 99 * 100 * PDP (%cr3) 101 * +----+ 102 * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000 103 * | | 104 * | | 105 * | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000 106 * | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000) 107 * | | 108 * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end) 109 * +----+ 110 * 111 * note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE" 112 * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE" 113 * 114 * starting at VA 0xefc00000 the current active PDP (%cr3) acts as a 115 * PTP: 116 * 117 * PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000 118 * +----+ 119 * | 0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000 120 * | | 121 * | | 122 * | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000 123 * | 960| -> maps contents of first kernel PTP 124 * | | 125 * |1023| 126 * +----+ 127 * 128 * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is 129 * defined as "PDP_BASE".... within that mapping there are two 130 * defines: 131 * "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP 132 * which points back to itself. 133 * "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which 134 * establishes the recursive mapping of the alternate pmap. 135 * to set the alternate PDP, one just has to put the correct 136 * PA info in *APDP_PDE. 137 * 138 * note that in the APTE_BASE space, the APDP appears at VA 139 * "APDP_BASE" (0xfffff000). 140 */ 141 142/* 143 * the following defines identify the slots used as described above. 144 */ 145 146#define PDSLOT_PTE ((u_int)0x33f) /* PTDPTDI for recursive PDP map */ 147#define PDSLOT_KERN ((u_int)0x340) /* KPTDI start of kernel space */ 148#define PDSLOT_APTE ((u_int)0x37f) /* alternative recursive slot */ 149 150/* 151 * the following defines give the virtual addresses of various MMU 152 * data structures: 153 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings 154 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD 155 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP 156 */ 157 158#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) ) 159#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) ) 160#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG))) 161#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG))) 162#define PDP_PDE (PDP_BASE + PDSLOT_PTE) 163#define APDP_PDE (PDP_BASE + PDSLOT_APTE) 164 165/* 166 * XXXCDC: tmp xlate from old names: 167 * PTDPTDI -> PDSLOT_PTE 168 * KPTDI -> PDSLOT_KERN 169 * APTDPTDI -> PDSLOT_APTE 170 */ 171 172/* 173 * the follow define determines how many PTPs should be set up for the 174 * kernel by locore.s at boot time. this should be large enough to 175 * get the VM system running. once the VM system is running, the 176 * pmap module can add more PTPs to the kernel area on demand. 177 */ 178 179#ifndef NKPTP 180#define NKPTP 8 /* 32MB to start */ 181#endif 182#define NKPTP_MIN 8 /* smallest value we allow */ 183#define NKPTP_MAX 63 /* (1024 - (0xd0000000/NBPD) - 1) */ 184 /* largest value (-1 for APTP space) */ 185 186/* 187 * various address macros 188 * 189 * vtopte: return a pointer to the PTE mapping a VA 190 * kvtopte: same as above (takes a KVA, but doesn't matter with this pmap) 191 * ptetov: given a pointer to a PTE, return the VA that it maps 192 * vtophys: translate a VA to the PA mapped to it 193 * 194 * plus alternative versions of the above 195 */ 196 197#define vtopte(VA) (PTE_BASE + sh3_btop(VA)) 198#define kvtopte(VA) vtopte(VA) 199#define ptetov(PT) (sh3_ptob(PT - PTE_BASE)) 200#define avtopte(VA) (APTE_BASE + sh3_btop(VA)) 201#define ptetoav(PT) (sh3_ptob(PT - APTE_BASE)) 202#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \ 203 ((unsigned)(VA) & ~PG_FRAME)) 204 205/* 206 * pdei/ptei: generate index into PDP/PTP from a VA 207 */ 208#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT) 209#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT) 210 211/* 212 * PTP macros: 213 * a PTP's index is the PD index of the PDE that points to it 214 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 215 * a PTP's VA is the first VA mapped by that PTP 216 * 217 * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries) 218 * NBPD == number of bytes a PTP can map (4MB) 219 */ 220 221#define ptp_i2o(I) ((I) * NBPG) /* index => offset */ 222#define ptp_o2i(O) ((O) / NBPG) /* offset => index */ 223#define ptp_i2v(I) ((I) * NBPD) /* index => VA */ 224#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */ 225 226#ifdef _KERNEL 227/* 228 * pmap data structures: see pmap.c for details of locking. 229 */ 230 231struct pmap; 232typedef struct pmap *pmap_t; 233 234/* 235 * we maintain a list of all non-kernel pmaps 236 */ 237 238LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 239 240/* 241 * the pmap structure 242 * 243 * note that the pm_obj contains the simple_lock, the reference count, 244 * page list, and number of PTPs within the pmap. 245 */ 246 247struct pmap { 248 struct uvm_object pm_obj; /* object (lck by object lock) */ 249#define pm_lock pm_obj.vmobjlock 250 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 251 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 252 u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */ 253 struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */ 254 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 255 256 int pm_flags; /* see below */ 257}; 258 259/* pm_flags */ 260#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */ 261 262/* 263 * for each managed physical page we maintain a list of <PMAP,VA>'s 264 * which it is mapped at. the list is headed by a pv_head structure. 265 * there is one pv_head per managed phys page (allocated at boot time). 266 * the pv_head structure points to a list of pv_entry structures (each 267 * describes one mapping). 268 */ 269 270struct pv_entry; 271 272struct pv_head { 273 struct simplelock pvh_lock; /* locks every pv on this list */ 274 struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */ 275}; 276 277/* These are kept in the vm_physseg array. */ 278#define PGA_REFERENCED 0x01 /* page is referenced */ 279#define PGA_MODIFIED 0x02 /* page is modified */ 280 281struct pv_entry { /* locked by its list's pvh_lock */ 282 struct pv_entry *pv_next; /* next entry */ 283 struct pmap *pv_pmap; /* the pmap */ 284 vaddr_t pv_va; /* the virtual address */ 285 struct vm_page *pv_ptp; /* the vm_page of the PTP */ 286}; 287 288/* 289 * pv_entrys are dynamically allocated in chunks from a single page. 290 * we keep track of how many pv_entrys are in use for each page and 291 * we can free pv_entry pages if needed. there is one lock for the 292 * entire allocation system. 293 */ 294 295struct pv_page_info { 296 TAILQ_ENTRY(pv_page) pvpi_list; 297 struct pv_entry *pvpi_pvfree; 298 int pvpi_nfree; 299}; 300 301/* 302 * number of pv_entry's in a pv_page 303 * (note: won't work on systems where NPBG isn't a constant) 304 */ 305 306#define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \ 307 sizeof(struct pv_entry)) 308 309/* 310 * a pv_page: where pv_entrys are allocated from 311 */ 312 313struct pv_page { 314 struct pv_page_info pvinfo; 315 struct pv_entry pvents[PVE_PER_PVPAGE]; 316}; 317 318/* 319 * pmap_remove_record: a record of VAs that have been unmapped, used to 320 * flush TLB. if we have more than PMAP_RR_MAX then we stop recording. 321 */ 322 323#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */ 324 325struct pmap_remove_record { 326 int prr_npages; 327 vaddr_t prr_vas[PMAP_RR_MAX]; 328}; 329 330/* 331 * global kernel variables 332 */ 333 334/* PTDpaddr: is the physical address of the kernel's PDP */ 335extern u_long PTDpaddr; 336 337extern struct pmap kernel_pmap_store; /* kernel pmap */ 338extern int nkpde; /* current # of PDEs for kernel */ 339extern int pmap_pg_g; /* do we support PG_G? */ 340 341/* 342 * macros 343 */ 344 345#define pmap_kernel() (&kernel_pmap_store) 346#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 347#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 348#define pmap_update(pmap) /* nothing (yet) */ 349 350#define pmap_is_referenced(pg) pmap_test_attrs(pg, PGA_REFERENCED) 351#define pmap_is_modified(pg) pmap_test_attrs(pg, PGA_MODIFIED) 352 353#define pmap_copy(DP,SP,D,L,S) 354#define pmap_move(DP,SP,D,L,S) 355#define pmap_phys_address(ppn) sh3_ptob(ppn) 356#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 357 358 359/* 360 * prototypes 361 */ 362 363void pmap_activate(struct proc *); 364void pmap_bootstrap(vaddr_t); 365boolean_t pmap_change_attrs(struct vm_page *, int, int); 366void pmap_deactivate(struct proc *); 367void pmap_page_remove (struct vm_page *); 368void pmap_protect(struct pmap *, vaddr_t, 369 vaddr_t, vm_prot_t); 370void pmap_remove(struct pmap *, vaddr_t, vaddr_t); 371boolean_t pmap_test_attrs(struct vm_page *, int); 372void pmap_update_pg(vaddr_t); 373void pmap_update_2pg(vaddr_t,vaddr_t); 374void pmap_write_protect(struct pmap *, vaddr_t, 375 vaddr_t, vm_prot_t); 376 377vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 378 379#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 380 381/* 382 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. 383 */ 384/* 385 * XXX Indeed, first, we should refine physical address v.s. virtual 386 * address mapping. 387 * See 388 * uvm_km.c:uvm_km_free_poolpage1, 389 * vm_page.h:PHYS_TO_VM_PAGE, vm_physseg_find 390 * machdep.c:pmap_bootstrap (uvm_page_physload, etc) 391 */ 392/* XXX broken */ 393#define PMAP_MAP_POOLPAGE(pa) (pa) 394#define PMAP_UNMAP_POOLPAGE(va) (va) 395 396vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t); 397paddr_t vtophys(vaddr_t); 398void pmap_emulate_reference(struct proc *, vaddr_t, int, int); 399 400#endif /* _KERNEL */ 401#endif /* _SH3_PMAP_H_ */ 402