pmap.c revision 15088
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.84 1996/03/31 23:00:32 davidg Exp $ 43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. 51 * 52 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 53 */ 54 55/* 56 * Manages physical address maps. 57 * 58 * In addition to hardware address maps, this 59 * module is called upon to provide software-use-only 60 * maps which may or may not be stored in the same 61 * form as hardware maps. These pseudo-maps are 62 * used to store intermediate results from copy 63 * operations to and from address spaces. 64 * 65 * Since the information managed by this module is 66 * also stored by the logical address mapping module, 67 * this module may throw away valid virtual-to-physical 68 * mappings at almost any time. However, invalidations 69 * of virtual-to-physical mappings must be done as 70 * requested. 71 * 72 * In order to cope with hardware architectures which 73 * make virtual-to-physical map invalidates expensive, 74 * this module may delay invalidate or reduced protection 75 * operations until such time as they are actually 76 * necessary. This module is given full information as 77 * to which processors are currently using which maps, 78 * and to when physical maps must be made correct. 79 */ 80 81#include <sys/param.h> 82#include <sys/systm.h> 83#include <sys/proc.h> 84#include <sys/malloc.h> 85#include <sys/msgbuf.h> 86#include <sys/queue.h> 87#include <sys/vmmeter.h> 88 89#include <vm/vm.h> 90#include <vm/vm_param.h> 91#include <vm/vm_prot.h> 92#include <vm/lock.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_page.h> 95#include <vm/vm_map.h> 96#include <vm/vm_object.h> 97#include <vm/vm_extern.h> 98 99#include <machine/pcb.h> 100#include <machine/cputypes.h> 101#include <machine/md_var.h> 102 103#include <i386/isa/isa.h> 104 105#define PMAP_KEEP_PDIRS 106 107#if defined(DIAGNOSTIC) 108#define PMAP_DIAGNOSTIC 109#endif 110 111static void init_pv_entries __P((int)); 112 113/* 114 * Get PDEs and PTEs for user/kernel address space 115 */ 116#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 117#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 118 119#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 120 121#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 122#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 123#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 124#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) 125#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 126 127#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 128#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 129 130/* 131 * Given a map and a machine independent protection code, 132 * convert to a vax protection code. 133 */ 134#define pte_prot(m, p) (protection_codes[p]) 135static int protection_codes[8]; 136 137static struct pmap kernel_pmap_store; 138pmap_t kernel_pmap; 139 140vm_offset_t avail_start; /* PA of first available physical page */ 141vm_offset_t avail_end; /* PA of last available physical page */ 142vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 143vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 144static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 145static vm_offset_t vm_first_phys; 146 147static int nkpt; 148 149extern vm_offset_t clean_sva, clean_eva; 150extern int cpu_class; 151 152/* 153 * All those kernel PT submaps that BSD is so fond of 154 */ 155pt_entry_t *CMAP1; 156static pt_entry_t *CMAP2, *ptmmap; 157static pv_entry_t pv_table; 158caddr_t CADDR1, ptvmmap; 159static caddr_t CADDR2; 160static pt_entry_t *msgbufmap; 161struct msgbuf *msgbufp; 162 163static void free_pv_entry __P((pv_entry_t pv)); 164pt_entry_t * 165 get_ptbase __P((pmap_t pmap)); 166static pv_entry_t 167 get_pv_entry __P((void)); 168static void i386_protection_init __P((void)); 169static void pmap_alloc_pv_entry __P((void)); 170static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); 171static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, 172 vm_offset_t pa)); 173static int pmap_is_managed __P((vm_offset_t pa)); 174static void pmap_remove_all __P((vm_offset_t pa)); 175static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); 176static __inline void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, 177 vm_offset_t va)); 178static void pmap_remove_pte __P((struct pmap *pmap, pt_entry_t *ptq, 179 vm_offset_t sva)); 180static vm_page_t 181 pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt)); 182static boolean_t 183 pmap_testbit __P((vm_offset_t pa, int bit)); 184static void * pmap_getpdir __P((void)); 185 186 187#if defined(PMAP_DIAGNOSTIC) 188 189/* 190 * This code checks for non-writeable/modified pages. 191 * This should be an invalid condition. 192 */ 193static int 194pmap_nw_modified(pt_entry_t ptea) { 195 int pte; 196 197 pte = (int) ptea; 198 199 if ((pte & (PG_M|PG_RW)) == PG_M) 200 return 1; 201 else 202 return 0; 203} 204#endif 205 206/* 207 * The below are finer grained pmap_update routines. These eliminate 208 * the gratuitious tlb flushes on non-i386 architectures. 209 */ 210static __inline void 211pmap_update_1pg( vm_offset_t va) { 212#if defined(I386_CPU) 213 if (cpu_class == CPUCLASS_386) 214 pmap_update(); 215 else 216#endif 217 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va)); 218} 219 220static __inline void 221pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) { 222#if defined(I386_CPU) 223 if (cpu_class == CPUCLASS_386) { 224 pmap_update(); 225 } else 226#endif 227 { 228 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va1)); 229 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va2)); 230 } 231} 232 233/* 234 * Routine: pmap_pte 235 * Function: 236 * Extract the page table entry associated 237 * with the given map/virtual_address pair. 238 * [ what about induced faults -wfj] 239 */ 240 241__inline pt_entry_t * __pure 242pmap_pte(pmap, va) 243 register pmap_t pmap; 244 vm_offset_t va; 245{ 246 247 if (pmap && *pmap_pde(pmap, va)) { 248 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 249 250 /* are we current address space or kernel? */ 251 if ((pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME))) 252 return ((pt_entry_t *) vtopte(va)); 253 /* otherwise, we are alternate address space */ 254 else { 255 if (frame != ((int) APTDpde & PG_FRAME)) { 256 APTDpde = pmap->pm_pdir[PTDPTDI]; 257 pmap_update(); 258 } 259 return ((pt_entry_t *) avtopte(va)); 260 } 261 } 262 return (0); 263} 264 265/* 266 * Routine: pmap_extract 267 * Function: 268 * Extract the physical page address associated 269 * with the given map/virtual_address pair. 270 */ 271 272vm_offset_t 273pmap_extract(pmap, va) 274 register pmap_t pmap; 275 vm_offset_t va; 276{ 277 vm_offset_t pa; 278 279 if (pmap && *pmap_pde(pmap, va)) { 280 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 281 282 /* are we current address space or kernel? */ 283 if ((pmap == kernel_pmap) 284 || (frame == ((int) PTDpde & PG_FRAME))) { 285 pa = *(int *) vtopte(va); 286 /* otherwise, we are alternate address space */ 287 } else { 288 if (frame != ((int) APTDpde & PG_FRAME)) { 289 APTDpde = pmap->pm_pdir[PTDPTDI]; 290 pmap_update(); 291 } 292 pa = *(int *) avtopte(va); 293 } 294 return ((pa & PG_FRAME) | (va & ~PG_FRAME)); 295 } 296 return 0; 297 298} 299 300/* 301 * determine if a page is managed (memory vs. device) 302 */ 303static __inline int 304pmap_is_managed(pa) 305 vm_offset_t pa; 306{ 307 int i; 308 309 if (!pmap_initialized) 310 return 0; 311 312 for (i = 0; phys_avail[i + 1]; i += 2) { 313 if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) 314 return 1; 315 } 316 return 0; 317} 318 319vm_page_t 320pmap_use_pt(pmap, va) 321 pmap_t pmap; 322 vm_offset_t va; 323{ 324 vm_offset_t ptepa; 325 vm_page_t mpte; 326 327 if (va >= UPT_MIN_ADDRESS) 328 return NULL; 329 330 ptepa = ((vm_offset_t) *pmap_pde(pmap, va)) & PG_FRAME; 331#if defined(PMAP_DIAGNOSTIC) 332 if (!ptepa) 333 panic("pmap_use_pt: pagetable page missing, va: 0x%x", va); 334#endif 335 336 mpte = PHYS_TO_VM_PAGE(ptepa); 337 ++mpte->hold_count; 338 return mpte; 339} 340 341#if !defined(PMAP_DIAGNOSTIC) 342__inline 343#endif 344void 345pmap_unuse_pt(pmap, va, mpte) 346 pmap_t pmap; 347 vm_offset_t va; 348 vm_page_t mpte; 349{ 350 if (va >= UPT_MIN_ADDRESS) 351 return; 352 353 if (mpte == NULL) { 354 vm_offset_t ptepa; 355 ptepa = ((vm_offset_t) *pmap_pde(pmap, va)) & PG_FRAME; 356#if defined(PMAP_DIAGNOSTIC) 357 if (!ptepa) 358 panic("pmap_unuse_pt: pagetable page missing, va: 0x%x", va); 359#endif 360 mpte = PHYS_TO_VM_PAGE(ptepa); 361 } 362 363#if defined(PMAP_DIAGNOSTIC) 364 if (mpte->hold_count == 0) { 365 panic("pmap_unuse_pt: hold count < 0, va: 0x%x", va); 366 } 367#endif 368 369 vm_page_unhold(mpte); 370 371 if ((mpte->hold_count == 0) && 372 (mpte->wire_count == 0) && 373 (pmap != kernel_pmap) && 374 (va < KPT_MIN_ADDRESS)) { 375/* 376 * We don't free page-table-pages anymore because it can have a negative 377 * impact on perf at times. Now we just deactivate, and it'll get cleaned 378 * up if needed... Also, if the page ends up getting used, it will fault 379 * back into the process address space and be reactivated. 380 */ 381#if defined(PMAP_FREE_OLD_PTES) 382 pmap_page_protect(VM_PAGE_TO_PHYS(mpte), VM_PROT_NONE); 383 vm_page_free(mpte); 384#else 385 mpte->dirty = 0; 386 vm_page_deactivate(mpte); 387#endif 388 } 389} 390 391/* 392 * Bootstrap the system enough to run with virtual memory. 393 * 394 * On the i386 this is called after mapping has already been enabled 395 * and just syncs the pmap module with what has already been done. 396 * [We can't call it easily with mapping off since the kernel is not 397 * mapped with PA == VA, hence we would have to relocate every address 398 * from the linked base (virtual) address "KERNBASE" to the actual 399 * (physical) address starting relative to 0] 400 */ 401void 402pmap_bootstrap(firstaddr, loadaddr) 403 vm_offset_t firstaddr; 404 vm_offset_t loadaddr; 405{ 406 vm_offset_t va; 407 pt_entry_t *pte; 408 409 avail_start = firstaddr; 410 411 /* 412 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 413 * large. It should instead be correctly calculated in locore.s and 414 * not based on 'first' (which is a physical address, not a virtual 415 * address, for the start of unused physical memory). The kernel 416 * page tables are NOT double mapped and thus should not be included 417 * in this calculation. 418 */ 419 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 420 virtual_end = VM_MAX_KERNEL_ADDRESS; 421 422 /* 423 * Initialize protection array. 424 */ 425 i386_protection_init(); 426 427 /* 428 * The kernel's pmap is statically allocated so we don't have to use 429 * pmap_create, which is unlikely to work correctly at this part of 430 * the boot sequence (XXX and which no longer exists). 431 */ 432 kernel_pmap = &kernel_pmap_store; 433 434 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 435 436 kernel_pmap->pm_count = 1; 437 nkpt = NKPT; 438 439 /* 440 * Reserve some special page table entries/VA space for temporary 441 * mapping of pages. 442 */ 443#define SYSMAP(c, p, v, n) \ 444 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 445 446 va = virtual_avail; 447 pte = pmap_pte(kernel_pmap, va); 448 449 /* 450 * CMAP1/CMAP2 are used for zeroing and copying pages. 451 */ 452 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 453 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 454 455 /* 456 * ptmmap is used for reading arbitrary physical pages via /dev/mem. 457 */ 458 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 459 460 /* 461 * msgbufmap is used to map the system message buffer. 462 */ 463 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 464 465 virtual_avail = va; 466 467 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 468 pmap_update(); 469} 470 471/* 472 * Initialize the pmap module. 473 * Called by vm_init, to initialize any structures that the pmap 474 * system needs to map virtual memory. 475 * pmap_init has been enhanced to support in a fairly consistant 476 * way, discontiguous physical memory. 477 */ 478void 479pmap_init(phys_start, phys_end) 480 vm_offset_t phys_start, phys_end; 481{ 482 vm_offset_t addr; 483 vm_size_t npg, s; 484 int i; 485 486 /* 487 * calculate the number of pv_entries needed 488 */ 489 vm_first_phys = phys_avail[0]; 490 for (i = 0; phys_avail[i + 1]; i += 2); 491 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; 492 493 /* 494 * Allocate memory for random pmap data structures. Includes the 495 * pv_head_table. 496 */ 497 s = (vm_size_t) (sizeof(struct pv_entry) * npg); 498 s = round_page(s); 499 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 500 pv_table = (pv_entry_t) addr; 501 502 /* 503 * init the pv free list 504 */ 505 init_pv_entries(npg); 506 /* 507 * Now it is safe to enable pv_table recording. 508 */ 509 pmap_initialized = TRUE; 510} 511 512/* 513 * Used to map a range of physical addresses into kernel 514 * virtual address space. 515 * 516 * For now, VM is already on, we only need to map the 517 * specified memory. 518 */ 519vm_offset_t 520pmap_map(virt, start, end, prot) 521 vm_offset_t virt; 522 vm_offset_t start; 523 vm_offset_t end; 524 int prot; 525{ 526 while (start < end) { 527 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 528 virt += PAGE_SIZE; 529 start += PAGE_SIZE; 530 } 531 return (virt); 532} 533 534#if defined(PMAP_KEEP_PDIRS) 535int nfreepdir; 536caddr_t *pdirlist; 537#define NFREEPDIR 3 538 539static void * 540pmap_getpdir() { 541 caddr_t *pdir; 542 if (pdirlist) { 543 --nfreepdir; 544 pdir = pdirlist; 545 pdirlist = (caddr_t *) *pdir; 546#if 0 /* Not needed anymore */ 547 bzero( (caddr_t) pdir, PAGE_SIZE); 548#endif 549 } else { 550 pdir = (caddr_t *) kmem_alloc(kernel_map, PAGE_SIZE); 551 } 552 553 return (void *) pdir; 554} 555 556static void 557pmap_freepdir(void *pdir) { 558 if (nfreepdir > NFREEPDIR) { 559 kmem_free(kernel_map, (vm_offset_t) pdir, PAGE_SIZE); 560 } else { 561 int i; 562 pt_entry_t *s; 563 s = (pt_entry_t *) pdir; 564 565 /* 566 * remove wired in kernel mappings 567 */ 568 bzero(s + KPTDI, nkpt * PTESIZE); 569 s[APTDPTDI] = 0; 570 s[PTDPTDI] = 0; 571 572#if defined(PMAP_DIAGNOSTIC) 573 for(i=0;i<PAGE_SIZE/4;i++,s++) { 574 if (*s) { 575 printf("pmap_freepdir: index %d not zero: %lx\n", i, *s); 576 } 577 } 578#endif 579 * (caddr_t *) pdir = (caddr_t) pdirlist; 580 pdirlist = (caddr_t *) pdir; 581 ++nfreepdir; 582 } 583} 584#endif 585 586/* 587 * Initialize a preallocated and zeroed pmap structure, 588 * such as one in a vmspace structure. 589 */ 590void 591pmap_pinit(pmap) 592 register struct pmap *pmap; 593{ 594 /* 595 * No need to allocate page table space yet but we do need a valid 596 * page directory table. 597 */ 598 599#if defined(PMAP_KEEP_PDIRS) 600 pmap->pm_pdir = pmap_getpdir(); 601#else 602 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, PAGE_SIZE); 603#endif 604 605 /* wire in kernel global address entries */ 606 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 607 608 /* install self-referential address mapping entry */ 609 *(int *) (pmap->pm_pdir + PTDPTDI) = 610 ((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW; 611 612 pmap->pm_count = 1; 613} 614 615/* 616 * grow the number of kernel page table entries, if needed 617 */ 618 619static vm_page_t nkpg; 620vm_offset_t kernel_vm_end; 621 622void 623pmap_growkernel(vm_offset_t addr) 624{ 625 struct proc *p; 626 struct pmap *pmap; 627 int s; 628 629 s = splhigh(); 630 if (kernel_vm_end == 0) { 631 kernel_vm_end = KERNBASE; 632 nkpt = 0; 633 while (pdir_pde(PTD, kernel_vm_end)) { 634 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 635 ++nkpt; 636 } 637 } 638 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 639 while (kernel_vm_end < addr) { 640 if (pdir_pde(PTD, kernel_vm_end)) { 641 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 642 continue; 643 } 644 ++nkpt; 645 if (!nkpg) { 646 nkpg = vm_page_alloc(kernel_object, 0, VM_ALLOC_SYSTEM); 647 if (!nkpg) 648 panic("pmap_growkernel: no memory to grow kernel"); 649 vm_page_wire(nkpg); 650 vm_page_remove(nkpg); 651 pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 652 } 653 pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW); 654 nkpg = NULL; 655 656 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 657 if (p->p_vmspace) { 658 pmap = &p->p_vmspace->vm_pmap; 659 *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 660 } 661 } 662 *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 663 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 664 } 665 splx(s); 666} 667 668/* 669 * Retire the given physical map from service. 670 * Should only be called if the map contains 671 * no valid mappings. 672 */ 673void 674pmap_destroy(pmap) 675 register pmap_t pmap; 676{ 677 int count; 678 679 if (pmap == NULL) 680 return; 681 682 count = --pmap->pm_count; 683 if (count == 0) { 684 pmap_release(pmap); 685 free((caddr_t) pmap, M_VMPMAP); 686 } 687} 688 689/* 690 * Release any resources held by the given physical map. 691 * Called when a pmap initialized by pmap_pinit is being released. 692 * Should only be called if the map contains no valid mappings. 693 */ 694void 695pmap_release(pmap) 696 register struct pmap *pmap; 697{ 698#if defined(PMAP_KEEP_PDIRS) 699 pmap_freepdir( (void *)pmap->pm_pdir); 700#else 701 kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 702#endif 703} 704 705/* 706 * Add a reference to the specified pmap. 707 */ 708void 709pmap_reference(pmap) 710 pmap_t pmap; 711{ 712 if (pmap != NULL) { 713 pmap->pm_count++; 714 } 715} 716 717#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2) 718 719/* 720 * Data for the pv entry allocation mechanism 721 */ 722static int pv_freelistcnt; 723static pv_entry_t pv_freelist; 724static vm_offset_t pvva; 725static int npvvapg; 726 727/* 728 * free the pv_entry back to the free list 729 */ 730static __inline void 731free_pv_entry(pv) 732 pv_entry_t pv; 733{ 734 if (!pv) 735 return; 736 ++pv_freelistcnt; 737 pv->pv_next = pv_freelist; 738 pv_freelist = pv; 739} 740 741/* 742 * get a new pv_entry, allocating a block from the system 743 * when needed. 744 * the memory allocation is performed bypassing the malloc code 745 * because of the possibility of allocations at interrupt time. 746 */ 747static __inline pv_entry_t 748get_pv_entry() 749{ 750 pv_entry_t tmp; 751 752 /* 753 * get more pv_entry pages if needed 754 */ 755 if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) { 756 pmap_alloc_pv_entry(); 757 } 758 /* 759 * get a pv_entry off of the free list 760 */ 761 --pv_freelistcnt; 762 tmp = pv_freelist; 763 pv_freelist = tmp->pv_next; 764 return tmp; 765} 766 767/* 768 * this *strange* allocation routine *statistically* eliminates the 769 * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure. 770 * also -- this code is MUCH MUCH faster than the malloc equiv... 771 */ 772static void 773pmap_alloc_pv_entry() 774{ 775 /* 776 * do we have any pre-allocated map-pages left? 777 */ 778 if (npvvapg) { 779 vm_page_t m; 780 781 /* 782 * we do this to keep recursion away 783 */ 784 pv_freelistcnt += PV_FREELIST_MIN; 785 /* 786 * allocate a physical page out of the vm system 787 */ 788 m = vm_page_alloc(kernel_object, 789 OFF_TO_IDX(pvva - vm_map_min(kernel_map)), 790 VM_ALLOC_INTERRUPT); 791 if (m) { 792 int newentries; 793 int i; 794 pv_entry_t entry; 795 796 newentries = (PAGE_SIZE / sizeof(struct pv_entry)); 797 /* 798 * wire the page 799 */ 800 vm_page_wire(m); 801 m->flags &= ~PG_BUSY; 802 /* 803 * let the kernel see it 804 */ 805 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 806 807 entry = (pv_entry_t) pvva; 808 /* 809 * update the allocation pointers 810 */ 811 pvva += PAGE_SIZE; 812 --npvvapg; 813 814 /* 815 * free the entries into the free list 816 */ 817 for (i = 0; i < newentries; i++) { 818 free_pv_entry(entry); 819 entry++; 820 } 821 } 822 pv_freelistcnt -= PV_FREELIST_MIN; 823 } 824 if (!pv_freelist) 825 panic("get_pv_entry: cannot get a pv_entry_t"); 826} 827 828 829 830/* 831 * init the pv_entry allocation system 832 */ 833#define PVSPERPAGE 64 834void 835init_pv_entries(npg) 836 int npg; 837{ 838 /* 839 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 840 * kvm space is fairly cheap, be generous!!! (the system can panic if 841 * this is too small.) 842 */ 843 npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) 844 + PAGE_SIZE - 1) / PAGE_SIZE; 845 pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE); 846 /* 847 * get the first batch of entries 848 */ 849 free_pv_entry(get_pv_entry()); 850} 851 852__inline pt_entry_t * 853get_ptbase(pmap) 854 pmap_t pmap; 855{ 856 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 857 858 /* are we current address space or kernel? */ 859 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) { 860 return PTmap; 861 } 862 /* otherwise, we are alternate address space */ 863 if (frame != ((int) APTDpde & PG_FRAME)) { 864 APTDpde = pmap->pm_pdir[PTDPTDI]; 865 pmap_update(); 866 } 867 return APTmap; 868} 869 870/* 871 * If it is the first entry on the list, it is actually 872 * in the header and we must copy the following entry up 873 * to the header. Otherwise we must search the list for 874 * the entry. In either case we free the now unused entry. 875 */ 876static __inline void 877pmap_remove_entry(pmap, pv, va) 878 struct pmap *pmap; 879 pv_entry_t pv; 880 vm_offset_t va; 881{ 882 pv_entry_t npv; 883 int s; 884 s = splhigh(); 885 if (pmap == pv->pv_pmap && va == pv->pv_va) { 886 pmap_unuse_pt(pmap, va, pv->pv_ptem); 887 npv = pv->pv_next; 888 if (npv) { 889 *pv = *npv; 890 free_pv_entry(npv); 891 } else { 892 pv->pv_pmap = NULL; 893 } 894 } else { 895 for (npv = pv->pv_next; npv; (pv = npv, npv = pv->pv_next)) { 896 if (pmap == npv->pv_pmap && va == npv->pv_va) { 897 pmap_unuse_pt(pmap, va, npv->pv_ptem); 898 pv->pv_next = npv->pv_next; 899 free_pv_entry(npv); 900 break; 901 } 902 } 903 } 904 splx(s); 905} 906 907/* 908 * pmap_remove_pte: do the things to unmap a page in a process 909 */ 910static void 911pmap_remove_pte(pmap, ptq, sva) 912 struct pmap *pmap; 913 pt_entry_t *ptq; 914 vm_offset_t sva; 915{ 916 pt_entry_t oldpte; 917 vm_offset_t pa; 918 pv_entry_t pv; 919 920 oldpte = *ptq; 921 if (((int)oldpte) & PG_W) 922 pmap->pm_stats.wired_count--; 923 pmap->pm_stats.resident_count--; 924 925 pa = ((vm_offset_t)oldpte) & PG_FRAME; 926 if (pmap_is_managed(pa)) { 927 if ((int) oldpte & PG_M) { 928#if defined(PMAP_DIAGNOSTIC) 929 if (pmap_nw_modified(oldpte)) { 930 printf("pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n", sva, (int) oldpte); 931 } 932#endif 933 934 if (sva < USRSTACK + (UPAGES * PAGE_SIZE) || 935 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 936 PHYS_TO_VM_PAGE(pa)->dirty = VM_PAGE_BITS_ALL; 937 } 938 } 939 pv = pa_to_pvh(pa); 940 pmap_remove_entry(pmap, pv, sva); 941 } else { 942 pmap_unuse_pt(pmap, sva, NULL); 943 } 944 945 *ptq = 0; 946 return; 947} 948 949/* 950 * Remove a single page from a process address space 951 */ 952static __inline void 953pmap_remove_page(pmap, va) 954 struct pmap *pmap; 955 register vm_offset_t va; 956{ 957 register pt_entry_t *ptbase, *ptq; 958 /* 959 * if there is no pte for this address, just skip it!!! 960 */ 961 if (*pmap_pde(pmap, va) == 0) 962 return; 963 /* 964 * get a local va for mappings for this pmap. 965 */ 966 ptbase = get_ptbase(pmap); 967 ptq = ptbase + i386_btop(va); 968 if (*ptq) { 969 pmap_remove_pte(pmap, ptq, va); 970 pmap_update_1pg(va); 971 } 972 return; 973} 974 975/* 976 * Remove the given range of addresses from the specified map. 977 * 978 * It is assumed that the start and end are properly 979 * rounded to the page size. 980 */ 981void 982pmap_remove(pmap, sva, eva) 983 struct pmap *pmap; 984 register vm_offset_t sva; 985 register vm_offset_t eva; 986{ 987 register pt_entry_t *ptbase; 988 vm_offset_t va; 989 vm_offset_t pdnxt; 990 vm_offset_t ptpaddr; 991 vm_offset_t sindex, eindex; 992 vm_page_t mpte; 993 994 if (pmap == NULL) 995 return; 996 997 /* 998 * special handling of removing one page. a very 999 * common operation and easy to short circuit some 1000 * code. 1001 */ 1002 if ((sva + PAGE_SIZE) == eva) { 1003 pmap_remove_page(pmap, sva); 1004 return; 1005 } 1006 1007 /* 1008 * Get a local virtual address for the mappings that are being 1009 * worked with. 1010 */ 1011 ptbase = get_ptbase(pmap); 1012 1013 sindex = i386_btop(sva); 1014 eindex = i386_btop(eva); 1015 1016 for (; sindex < eindex; sindex = pdnxt) { 1017 1018 /* 1019 * Calculate index for next page table. 1020 */ 1021 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1022 ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex)); 1023 1024 /* 1025 * Weed out invalid mappings. Note: we assume that the page 1026 * directory table is always allocated, and in kernel virtual. 1027 */ 1028 if (ptpaddr == 0) 1029 continue; 1030 1031 /* 1032 * get the vm_page_t for the page table page 1033 */ 1034 mpte = PHYS_TO_VM_PAGE(ptpaddr); 1035 1036 /* 1037 * if the pte isn't wired or held, just skip it. 1038 */ 1039 if ((mpte->hold_count == 0) && (mpte->wire_count == 0)) 1040 continue; 1041 1042 /* 1043 * Limit our scan to either the end of the va represented 1044 * by the current page table page, or to the end of the 1045 * range being removed. 1046 */ 1047 if (pdnxt > eindex) { 1048 pdnxt = eindex; 1049 } 1050 1051 for ( ;sindex != pdnxt; sindex++) { 1052 if (ptbase[sindex] == 0) 1053 continue; 1054 pmap_remove_pte(pmap, ptbase + sindex, i386_ptob(sindex)); 1055 if (mpte->hold_count == 0 && mpte->wire_count == 0) 1056 break; 1057 } 1058 } 1059 pmap_update(); 1060} 1061 1062/* 1063 * Routine: pmap_remove_all 1064 * Function: 1065 * Removes this physical page from 1066 * all physical maps in which it resides. 1067 * Reflects back modify bits to the pager. 1068 * 1069 * Notes: 1070 * Original versions of this routine were very 1071 * inefficient because they iteratively called 1072 * pmap_remove (slow...) 1073 */ 1074static void 1075pmap_remove_all(pa) 1076 vm_offset_t pa; 1077{ 1078 register pv_entry_t pv, opv, npv; 1079 register pt_entry_t *pte, *ptbase; 1080 vm_offset_t va; 1081 struct pmap *pmap; 1082 vm_page_t m; 1083 int s; 1084 int anyvalid = 0; 1085 1086#if defined(PMAP_DIAGNOSTIC) 1087 /* 1088 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1089 * pages! 1090 */ 1091 if (!pmap_is_managed(pa)) { 1092 panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa); 1093 } 1094#endif 1095 1096 pa = pa & PG_FRAME; 1097 opv = pa_to_pvh(pa); 1098 if (opv->pv_pmap == NULL) 1099 return; 1100 1101 m = PHYS_TO_VM_PAGE(pa); 1102 s = splhigh(); 1103 pv = opv; 1104 while (pv && ((pmap = pv->pv_pmap) != NULL)) { 1105 int tpte; 1106 ptbase = get_ptbase(pmap); 1107 va = pv->pv_va; 1108 pte = ptbase + i386_btop(va); 1109 if (tpte = ((int) *pte)) { 1110 *pte = 0; 1111 if (tpte & PG_W) 1112 pmap->pm_stats.wired_count--; 1113 pmap->pm_stats.resident_count--; 1114 anyvalid = 1; 1115 1116 /* 1117 * Update the vm_page_t clean and reference bits. 1118 */ 1119 if ((tpte & PG_M) != 0) { 1120#if defined(PMAP_DIAGNOSTIC) 1121 if (pmap_nw_modified((pt_entry_t) tpte)) { 1122 printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, tpte); 1123 } 1124#endif 1125 if (va < USRSTACK + (UPAGES * PAGE_SIZE) || 1126 (va >= KERNBASE && (va < clean_sva || va >= clean_eva))) { 1127 m->dirty = VM_PAGE_BITS_ALL; 1128 } 1129 } 1130 } 1131 pv = pv->pv_next; 1132 } 1133 1134 if (opv->pv_pmap != NULL) { 1135 pmap_unuse_pt(opv->pv_pmap, opv->pv_va, opv->pv_ptem); 1136 for (pv = opv->pv_next; pv; pv = npv) { 1137 npv = pv->pv_next; 1138 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 1139 free_pv_entry(pv); 1140 } 1141 } 1142 1143 opv->pv_pmap = NULL; 1144 opv->pv_next = NULL; 1145 1146 splx(s); 1147 if (anyvalid) 1148 pmap_update(); 1149} 1150 1151 1152/* 1153 * Set the physical protection on the 1154 * specified range of this map as requested. 1155 */ 1156void 1157pmap_protect(pmap, sva, eva, prot) 1158 register pmap_t pmap; 1159 vm_offset_t sva, eva; 1160 vm_prot_t prot; 1161{ 1162 register pt_entry_t *pte; 1163 register vm_offset_t va; 1164 register pt_entry_t *ptbase; 1165 vm_offset_t pdnxt; 1166 vm_offset_t ptpaddr; 1167 vm_offset_t sindex, eindex; 1168 vm_page_t mpte; 1169 int anychanged; 1170 1171 1172 if (pmap == NULL) 1173 return; 1174 1175 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1176 pmap_remove(pmap, sva, eva); 1177 return; 1178 } 1179 if (prot & VM_PROT_WRITE) 1180 return; 1181 1182 anychanged = 0; 1183 1184 ptbase = get_ptbase(pmap); 1185 1186 sindex = i386_btop(sva); 1187 eindex = i386_btop(eva); 1188 1189 for (; sindex < eindex; sindex = pdnxt) { 1190 int pprot; 1191 int pbits; 1192 1193 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1194 ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex)); 1195 1196 /* 1197 * Weed out invalid mappings. Note: we assume that the page 1198 * directory table is always allocated, and in kernel virtual. 1199 */ 1200 if (ptpaddr == 0) 1201 continue; 1202 1203 mpte = PHYS_TO_VM_PAGE(ptpaddr); 1204 1205 if ((mpte->hold_count == 0) && (mpte->wire_count == 0)) 1206 continue; 1207 1208 if (pdnxt > eindex) { 1209 pdnxt = eindex; 1210 } 1211 1212 for (; sindex != pdnxt; sindex++) { 1213 if (ptbase[sindex] == 0) 1214 continue; 1215 pte = ptbase + sindex; 1216 pbits = *(int *)pte; 1217 if (pbits & PG_RW) { 1218 if (pbits & PG_M) { 1219 vm_page_t m; 1220 vm_offset_t pa = pbits & PG_FRAME; 1221 m = PHYS_TO_VM_PAGE(pa); 1222 m->dirty = VM_PAGE_BITS_ALL; 1223 } 1224 *(int *)pte &= ~(PG_M|PG_RW); 1225 anychanged=1; 1226 } 1227 } 1228 } 1229 if (anychanged) 1230 pmap_update(); 1231} 1232 1233/* 1234 * Insert the given physical page (p) at 1235 * the specified virtual address (v) in the 1236 * target physical map with the protection requested. 1237 * 1238 * If specified, the page will be wired down, meaning 1239 * that the related pte can not be reclaimed. 1240 * 1241 * NB: This is the only routine which MAY NOT lazy-evaluate 1242 * or lose information. That is, this routine must actually 1243 * insert this page into the given map NOW. 1244 */ 1245void 1246pmap_enter(pmap, va, pa, prot, wired) 1247 register pmap_t pmap; 1248 vm_offset_t va; 1249 register vm_offset_t pa; 1250 vm_prot_t prot; 1251 boolean_t wired; 1252{ 1253 register pt_entry_t *pte; 1254 vm_offset_t opa; 1255 register pv_entry_t pv, npv; 1256 int ptevalid; 1257 vm_offset_t origpte, newpte; 1258 1259 if (pmap == NULL) 1260 return; 1261 1262 pv = NULL; 1263 1264 va = va & PG_FRAME; 1265 if (va > VM_MAX_KERNEL_ADDRESS) 1266 panic("pmap_enter: toobig"); 1267 1268 /* 1269 * In the case that a page table page is not 1270 * resident, we are creating it here. 1271 */ 1272 if ((va < VM_MIN_KERNEL_ADDRESS) && 1273 (curproc != NULL) && 1274 (pmap->pm_map->pmap == pmap)) { 1275 vm_offset_t v; 1276 1277 v = (vm_offset_t) vtopte(va); 1278 /* Fault the pte only if needed: */ 1279 if (*((int *)vtopte(v)) == 0) 1280 (void) vm_fault(pmap->pm_map, 1281 trunc_page(v), VM_PROT_WRITE, FALSE); 1282 } 1283 1284 /* 1285 * Page Directory table entry not valid, we need a new PT page 1286 */ 1287 pte = pmap_pte(pmap, va); 1288 if (pte == NULL) { 1289 printf("kernel page directory invalid pdir=%p, va=0x%lx\n", 1290 pmap->pm_pdir[PTDPTDI], va); 1291 panic("invalid kernel page directory"); 1292 } 1293 1294 origpte = *(vm_offset_t *)pte; 1295 opa = origpte & PG_FRAME; 1296 1297 pa = pa & PG_FRAME; 1298 1299 /* 1300 * Mapping has not changed, must be protection or wiring change. 1301 */ 1302 if (opa == pa) { 1303 /* 1304 * Wiring change, just update stats. We don't worry about 1305 * wiring PT pages as they remain resident as long as there 1306 * are valid mappings in them. Hence, if a user page is wired, 1307 * the PT page will be also. 1308 */ 1309 if (wired && ((origpte & PG_W) == 0)) 1310 pmap->pm_stats.wired_count++; 1311 else if (!wired && (origpte & PG_W)) 1312 pmap->pm_stats.wired_count--; 1313 1314#if defined(PMAP_DIAGNOSTIC) 1315 if (pmap_nw_modified((pt_entry_t) origpte)) { 1316 printf("pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, origpte); 1317 } 1318#endif 1319 1320 /* 1321 * We might be turning off write access to the page, 1322 * so we go ahead and sense modify status. 1323 */ 1324 if (origpte & PG_M) { 1325 vm_page_t m; 1326 m = PHYS_TO_VM_PAGE(pa); 1327 m->dirty = VM_PAGE_BITS_ALL; 1328 } 1329 goto validate; 1330 } 1331 /* 1332 * Mapping has changed, invalidate old range and fall through to 1333 * handle validating new mapping. 1334 */ 1335 if (opa) { 1336 pmap_remove_page(pmap, va); 1337 opa = 0; 1338 origpte = 0; 1339 } 1340 /* 1341 * Enter on the PV list if part of our managed memory Note that we 1342 * raise IPL while manipulating pv_table since pmap_enter can be 1343 * called at interrupt time. 1344 */ 1345 if (pmap_is_managed(pa)) { 1346 int s; 1347 1348 pv = pa_to_pvh(pa); 1349 s = splhigh(); 1350 /* 1351 * No entries yet, use header as the first entry 1352 */ 1353 if (pv->pv_pmap == NULL) { 1354 pv->pv_va = va; 1355 pv->pv_pmap = pmap; 1356 pv->pv_next = NULL; 1357 pv->pv_ptem = NULL; 1358 } 1359 /* 1360 * There is at least one other VA mapping this page. Place 1361 * this entry after the header. 1362 */ 1363 else { 1364 npv = get_pv_entry(); 1365 npv->pv_va = va; 1366 npv->pv_pmap = pmap; 1367 npv->pv_next = pv->pv_next; 1368 pv->pv_next = npv; 1369 pv = npv; 1370 pv->pv_ptem = NULL; 1371 } 1372 splx(s); 1373 } 1374 1375 /* 1376 * Increment counters 1377 */ 1378 pmap->pm_stats.resident_count++; 1379 if (wired) 1380 pmap->pm_stats.wired_count++; 1381 1382validate: 1383 /* 1384 * Now validate mapping with desired protection/wiring. 1385 */ 1386 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V); 1387 1388 if (wired) 1389 newpte |= PG_W; 1390 if (va < UPT_MIN_ADDRESS) 1391 newpte |= PG_u; 1392 else if (va < UPT_MAX_ADDRESS) 1393 newpte |= PG_u | PG_RW; 1394 1395 /* 1396 * if the mapping or permission bits are different, we need 1397 * to update the pte. 1398 */ 1399 if ((origpte & ~(PG_M|PG_U)) != newpte) { 1400 *pte = (pt_entry_t) newpte; 1401 if (origpte) 1402 pmap_update_1pg(va); 1403 } 1404 1405 if (origpte == 0) { 1406 vm_page_t mpte; 1407 mpte = pmap_use_pt(pmap, va); 1408 if (pv) 1409 pv->pv_ptem = mpte; 1410 } 1411} 1412 1413/* 1414 * Add a list of wired pages to the kva 1415 * this routine is only used for temporary 1416 * kernel mappings that do not need to have 1417 * page modification or references recorded. 1418 * Note that old mappings are simply written 1419 * over. The page *must* be wired. 1420 */ 1421void 1422pmap_qenter(va, m, count) 1423 vm_offset_t va; 1424 vm_page_t *m; 1425 int count; 1426{ 1427 int i; 1428 int anyvalid = 0; 1429 register pt_entry_t *pte; 1430 1431 for (i = 0; i < count; i++) { 1432 vm_offset_t tva = va + i * PAGE_SIZE; 1433 pt_entry_t npte = (pt_entry_t) ((int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V)); 1434 pt_entry_t opte; 1435 pte = vtopte(tva); 1436 opte = *pte; 1437 *pte = npte; 1438 if (opte) pmap_update_1pg(tva); 1439 } 1440} 1441/* 1442 * this routine jerks page mappings from the 1443 * kernel -- it is meant only for temporary mappings. 1444 */ 1445void 1446pmap_qremove(va, count) 1447 vm_offset_t va; 1448 int count; 1449{ 1450 int i; 1451 register pt_entry_t *pte; 1452 1453 for (i = 0; i < count; i++) { 1454 vm_offset_t tva = va + i * PAGE_SIZE; 1455 pte = vtopte(tva); 1456 *pte = 0; 1457 pmap_update_1pg(tva); 1458 } 1459} 1460 1461/* 1462 * add a wired page to the kva 1463 * note that in order for the mapping to take effect -- you 1464 * should do a pmap_update after doing the pmap_kenter... 1465 */ 1466void 1467pmap_kenter(va, pa) 1468 vm_offset_t va; 1469 register vm_offset_t pa; 1470{ 1471 register pt_entry_t *pte; 1472 pt_entry_t npte, opte; 1473 1474 npte = (pt_entry_t) ((int) (pa | PG_RW | PG_V)); 1475 pte = vtopte(va); 1476 opte = *pte; 1477 *pte = npte; 1478 if (opte) pmap_update_1pg(va); 1479} 1480 1481/* 1482 * remove a page from the kernel pagetables 1483 */ 1484void 1485pmap_kremove(va) 1486 vm_offset_t va; 1487{ 1488 register pt_entry_t *pte; 1489 1490 pte = vtopte(va); 1491 *pte = (pt_entry_t) 0; 1492 pmap_update_1pg(va); 1493} 1494 1495/* 1496 * this code makes some *MAJOR* assumptions: 1497 * 1. Current pmap & pmap exists. 1498 * 2. Not wired. 1499 * 3. Read access. 1500 * 4. No page table pages. 1501 * 5. Tlbflush is deferred to calling procedure. 1502 * 6. Page IS managed. 1503 * but is *MUCH* faster than pmap_enter... 1504 */ 1505 1506static void 1507pmap_enter_quick(pmap, va, pa) 1508 register pmap_t pmap; 1509 vm_offset_t va; 1510 register vm_offset_t pa; 1511{ 1512 register pt_entry_t *pte; 1513 register pv_entry_t pv, npv; 1514 int s; 1515 1516 /* 1517 * Enter on the PV list if part of our managed memory Note that we 1518 * raise IPL while manipulating pv_table since pmap_enter can be 1519 * called at interrupt time. 1520 */ 1521 1522 pte = vtopte(va); 1523 /* a fault on the page table might occur here */ 1524 if (*pte) { 1525 pmap_remove_page(pmap, va); 1526 } 1527 1528 pv = pa_to_pvh(pa); 1529 s = splhigh(); 1530 /* 1531 * No entries yet, use header as the first entry 1532 */ 1533 if (pv->pv_pmap == NULL) { 1534 pv->pv_pmap = pmap; 1535 pv->pv_va = va; 1536 pv->pv_next = NULL; 1537 } 1538 /* 1539 * There is at least one other VA mapping this page. Place this entry 1540 * after the header. 1541 */ 1542 else { 1543 npv = get_pv_entry(); 1544 npv->pv_va = va; 1545 npv->pv_pmap = pmap; 1546 npv->pv_next = pv->pv_next; 1547 pv->pv_next = npv; 1548 pv = npv; 1549 } 1550 splx(s); 1551 pv->pv_ptem = pmap_use_pt(pmap, va); 1552 1553 /* 1554 * Increment counters 1555 */ 1556 pmap->pm_stats.resident_count++; 1557 1558 /* 1559 * Now validate mapping with RO protection 1560 */ 1561 *pte = (pt_entry_t) ((int) (pa | PG_V | PG_u)); 1562 1563 return; 1564} 1565 1566#define MAX_INIT_PT (96) 1567/* 1568 * pmap_object_init_pt preloads the ptes for a given object 1569 * into the specified pmap. This eliminates the blast of soft 1570 * faults on process startup and immediately after an mmap. 1571 */ 1572void 1573pmap_object_init_pt(pmap, addr, object, pindex, size) 1574 pmap_t pmap; 1575 vm_offset_t addr; 1576 vm_object_t object; 1577 vm_pindex_t pindex; 1578 vm_size_t size; 1579{ 1580 vm_offset_t tmpidx; 1581 int psize; 1582 vm_page_t p; 1583 int objpgs; 1584 1585 psize = (size >> PAGE_SHIFT); 1586 1587 if (!pmap || (object->type != OBJT_VNODE) || 1588 ((psize > MAX_INIT_PT) && 1589 (object->resident_page_count > MAX_INIT_PT))) { 1590 return; 1591 } 1592 1593 /* 1594 * remove any already used mappings 1595 */ 1596 pmap_remove( pmap, trunc_page(addr), round_page(addr + size)); 1597 1598 /* 1599 * if we are processing a major portion of the object, then scan the 1600 * entire thing. 1601 */ 1602 if (psize > (object->size >> 2)) { 1603 objpgs = psize; 1604 1605 for (p = object->memq.tqh_first; 1606 ((objpgs > 0) && (p != NULL)); 1607 p = p->listq.tqe_next) { 1608 1609 tmpidx = p->pindex; 1610 if (tmpidx < pindex) { 1611 continue; 1612 } 1613 tmpidx -= pindex; 1614 if (tmpidx >= psize) { 1615 continue; 1616 } 1617 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1618 (p->busy == 0) && 1619 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1620 if (p->queue == PQ_CACHE) 1621 vm_page_deactivate(p); 1622 vm_page_hold(p); 1623 p->flags |= PG_MAPPED; 1624 pmap_enter_quick(pmap, 1625 addr + (tmpidx << PAGE_SHIFT), 1626 VM_PAGE_TO_PHYS(p)); 1627 vm_page_unhold(p); 1628 } 1629 objpgs -= 1; 1630 } 1631 } else { 1632 /* 1633 * else lookup the pages one-by-one. 1634 */ 1635 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 1636 p = vm_page_lookup(object, tmpidx + pindex); 1637 if (p && (p->busy == 0) && 1638 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1639 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1640 if (p->queue == PQ_CACHE) 1641 vm_page_deactivate(p); 1642 vm_page_hold(p); 1643 p->flags |= PG_MAPPED; 1644 pmap_enter_quick(pmap, 1645 addr + (tmpidx << PAGE_SHIFT), 1646 VM_PAGE_TO_PHYS(p)); 1647 vm_page_unhold(p); 1648 } 1649 } 1650 } 1651 return; 1652} 1653 1654/* 1655 * pmap_prefault provides a quick way of clustering 1656 * pagefaults into a processes address space. It is a "cousin" 1657 * of pmap_object_init_pt, except it runs at page fault time instead 1658 * of mmap time. 1659 */ 1660#define PFBAK 2 1661#define PFFOR 2 1662#define PAGEORDER_SIZE (PFBAK+PFFOR) 1663 1664static int pmap_prefault_pageorder[] = { 1665 -NBPG, NBPG, -2 * NBPG, 2 * NBPG 1666}; 1667 1668void 1669pmap_prefault(pmap, addra, entry, object) 1670 pmap_t pmap; 1671 vm_offset_t addra; 1672 vm_map_entry_t entry; 1673 vm_object_t object; 1674{ 1675 int i; 1676 vm_offset_t starta; 1677 vm_offset_t addr; 1678 vm_pindex_t pindex; 1679 vm_page_t m; 1680 int pageorder_index; 1681 1682 if (entry->object.vm_object != object) 1683 return; 1684 1685 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) 1686 return; 1687 1688 starta = addra - PFBAK * PAGE_SIZE; 1689 if (starta < entry->start) { 1690 starta = entry->start; 1691 } else if (starta > addra) { 1692 starta = 0; 1693 } 1694 1695 for (i = 0; i < PAGEORDER_SIZE; i++) { 1696 vm_object_t lobject; 1697 pt_entry_t *pte; 1698 1699 addr = addra + pmap_prefault_pageorder[i]; 1700 if (addr < starta || addr >= entry->end) 1701 continue; 1702 1703 pte = vtopte(addr); 1704 if (*pte) 1705 continue; 1706 1707 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1708 lobject = object; 1709 for (m = vm_page_lookup(lobject, pindex); 1710 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 1711 lobject = lobject->backing_object) { 1712 if (lobject->backing_object_offset & PAGE_MASK) 1713 break; 1714 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 1715 m = vm_page_lookup(lobject->backing_object, pindex); 1716 } 1717 1718 /* 1719 * give-up when a page is not in memory 1720 */ 1721 if (m == NULL) 1722 break; 1723 1724 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1725 (m->busy == 0) && 1726 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1727 1728 if (m->queue == PQ_CACHE) { 1729 if ((cnt.v_free_count + cnt.v_cache_count) < 1730 cnt.v_free_min) 1731 break; 1732 vm_page_deactivate(m); 1733 } 1734 vm_page_hold(m); 1735 m->flags |= PG_MAPPED; 1736 pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m)); 1737 vm_page_unhold(m); 1738 1739 } 1740 } 1741} 1742 1743/* 1744 * Routine: pmap_change_wiring 1745 * Function: Change the wiring attribute for a map/virtual-address 1746 * pair. 1747 * In/out conditions: 1748 * The mapping must already exist in the pmap. 1749 */ 1750void 1751pmap_change_wiring(pmap, va, wired) 1752 register pmap_t pmap; 1753 vm_offset_t va; 1754 boolean_t wired; 1755{ 1756 register pt_entry_t *pte; 1757 1758 if (pmap == NULL) 1759 return; 1760 1761 pte = pmap_pte(pmap, va); 1762 1763 if (wired && !pmap_pte_w(pte)) 1764 pmap->pm_stats.wired_count++; 1765 else if (!wired && pmap_pte_w(pte)) 1766 pmap->pm_stats.wired_count--; 1767 1768 /* 1769 * Wiring is not a hardware characteristic so there is no need to 1770 * invalidate TLB. 1771 */ 1772 pmap_pte_set_w(pte, wired); 1773} 1774 1775 1776 1777/* 1778 * Copy the range specified by src_addr/len 1779 * from the source map to the range dst_addr/len 1780 * in the destination map. 1781 * 1782 * This routine is only advisory and need not do anything. 1783 */ 1784void 1785pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1786 pmap_t dst_pmap, src_pmap; 1787 vm_offset_t dst_addr; 1788 vm_size_t len; 1789 vm_offset_t src_addr; 1790{ 1791} 1792 1793/* 1794 * Routine: pmap_kernel 1795 * Function: 1796 * Returns the physical map handle for the kernel. 1797 */ 1798pmap_t 1799pmap_kernel() 1800{ 1801 return (kernel_pmap); 1802} 1803 1804/* 1805 * pmap_zero_page zeros the specified (machine independent) 1806 * page by mapping the page into virtual memory and using 1807 * bzero to clear its contents, one machine dependent page 1808 * at a time. 1809 */ 1810void 1811pmap_zero_page(phys) 1812 vm_offset_t phys; 1813{ 1814 if (*(int *) CMAP2) 1815 panic("pmap_zero_page: CMAP busy"); 1816 1817 *(int *) CMAP2 = PG_V | PG_KW | (phys & PG_FRAME); 1818 bzero(CADDR2, PAGE_SIZE); 1819 1820 *(int *) CMAP2 = 0; 1821 pmap_update_1pg((vm_offset_t) CADDR2); 1822} 1823 1824/* 1825 * pmap_copy_page copies the specified (machine independent) 1826 * page by mapping the page into virtual memory and using 1827 * bcopy to copy the page, one machine dependent page at a 1828 * time. 1829 */ 1830void 1831pmap_copy_page(src, dst) 1832 vm_offset_t src; 1833 vm_offset_t dst; 1834{ 1835 if (*(int *) CMAP1 || *(int *) CMAP2) 1836 panic("pmap_copy_page: CMAP busy"); 1837 1838 *(int *) CMAP1 = PG_V | PG_KW | (src & PG_FRAME); 1839 *(int *) CMAP2 = PG_V | PG_KW | (dst & PG_FRAME); 1840 1841#if __GNUC__ > 1 1842 memcpy(CADDR2, CADDR1, PAGE_SIZE); 1843#else 1844 bcopy(CADDR1, CADDR2, PAGE_SIZE); 1845#endif 1846 *(int *) CMAP1 = 0; 1847 *(int *) CMAP2 = 0; 1848 pmap_update_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2); 1849} 1850 1851 1852/* 1853 * Routine: pmap_pageable 1854 * Function: 1855 * Make the specified pages (by pmap, offset) 1856 * pageable (or not) as requested. 1857 * 1858 * A page which is not pageable may not take 1859 * a fault; therefore, its page table entry 1860 * must remain valid for the duration. 1861 * 1862 * This routine is merely advisory; pmap_enter 1863 * will specify that these pages are to be wired 1864 * down (or not) as appropriate. 1865 */ 1866void 1867pmap_pageable(pmap, sva, eva, pageable) 1868 pmap_t pmap; 1869 vm_offset_t sva, eva; 1870 boolean_t pageable; 1871{ 1872} 1873 1874/* 1875 * this routine returns true if a physical page resides 1876 * in the given pmap. 1877 */ 1878boolean_t 1879pmap_page_exists(pmap, pa) 1880 pmap_t pmap; 1881 vm_offset_t pa; 1882{ 1883 register pv_entry_t pv; 1884 int s; 1885 1886 if (!pmap_is_managed(pa)) 1887 return FALSE; 1888 1889 pv = pa_to_pvh(pa); 1890 s = splhigh(); 1891 1892 /* 1893 * Not found, check current mappings returning immediately if found. 1894 */ 1895 if (pv->pv_pmap != NULL) { 1896 for (; pv; pv = pv->pv_next) { 1897 if (pv->pv_pmap == pmap) { 1898 splx(s); 1899 return TRUE; 1900 } 1901 } 1902 } 1903 splx(s); 1904 return (FALSE); 1905} 1906 1907/* 1908 * pmap_testbit tests bits in pte's 1909 * note that the testbit/changebit routines are inline, 1910 * and a lot of things compile-time evaluate. 1911 */ 1912static __inline boolean_t 1913pmap_testbit(pa, bit) 1914 register vm_offset_t pa; 1915 int bit; 1916{ 1917 register pv_entry_t pv; 1918 pt_entry_t *pte; 1919 int s; 1920 1921 if (!pmap_is_managed(pa)) 1922 return FALSE; 1923 1924 pv = pa_to_pvh(pa); 1925 s = splhigh(); 1926 1927 /* 1928 * Not found, check current mappings returning immediately if found. 1929 */ 1930 if (pv->pv_pmap != NULL) { 1931 for (; pv; pv = pv->pv_next) { 1932 /* 1933 * if the bit being tested is the modified bit, then 1934 * mark UPAGES as always modified, and ptes as never 1935 * modified. 1936 */ 1937 if (bit & (PG_U|PG_M)) { 1938 if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) { 1939 continue; 1940 } 1941 } 1942 if (!pv->pv_pmap) { 1943#if defined(PMAP_DIAGNOSTIC) 1944 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 1945#endif 1946 continue; 1947 } 1948 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1949 if ((int) *pte & bit) { 1950 splx(s); 1951 return TRUE; 1952 } 1953 } 1954 } 1955 splx(s); 1956 return (FALSE); 1957} 1958 1959/* 1960 * this routine is used to modify bits in ptes 1961 */ 1962static __inline void 1963pmap_changebit(pa, bit, setem) 1964 vm_offset_t pa; 1965 int bit; 1966 boolean_t setem; 1967{ 1968 register pv_entry_t pv; 1969 register pt_entry_t *pte, npte; 1970 vm_offset_t va; 1971 int changed; 1972 int s; 1973 1974 if (!pmap_is_managed(pa)) 1975 return; 1976 1977 pv = pa_to_pvh(pa); 1978 s = splhigh(); 1979 1980 /* 1981 * Loop over all current mappings setting/clearing as appropos If 1982 * setting RO do we need to clear the VAC? 1983 */ 1984 if (pv->pv_pmap != NULL) { 1985 for (; pv; pv = pv->pv_next) { 1986 va = pv->pv_va; 1987 1988 /* 1989 * don't write protect pager mappings 1990 */ 1991 if (!setem && (bit == PG_RW)) { 1992 if (va >= clean_sva && va < clean_eva) 1993 continue; 1994 } 1995 if (!pv->pv_pmap) { 1996#if defined(PMAP_DIAGNOSTIC) 1997 printf("Null pmap (cb) at va: 0x%lx\n", va); 1998#endif 1999 continue; 2000 } 2001 2002 pte = pmap_pte(pv->pv_pmap, va); 2003 if (setem) { 2004 *(int *)pte |= bit; 2005 } else { 2006 if (bit == PG_RW) { 2007 vm_offset_t pbits = *(vm_offset_t *)pte; 2008 if (pbits & PG_M) { 2009 vm_page_t m; 2010 vm_offset_t pa = pbits & PG_FRAME; 2011 m = PHYS_TO_VM_PAGE(pa); 2012 m->dirty = VM_PAGE_BITS_ALL; 2013 } 2014 *(int *)pte &= ~(PG_M|PG_RW); 2015 } else { 2016 *(int *)pte &= ~bit; 2017 } 2018 } 2019 } 2020 } 2021 splx(s); 2022 pmap_update(); 2023} 2024 2025/* 2026 * pmap_page_protect: 2027 * 2028 * Lower the permission for all mappings to a given page. 2029 */ 2030void 2031pmap_page_protect(phys, prot) 2032 vm_offset_t phys; 2033 vm_prot_t prot; 2034{ 2035 if ((prot & VM_PROT_WRITE) == 0) { 2036 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) 2037 pmap_changebit(phys, PG_RW, FALSE); 2038 else 2039 pmap_remove_all(phys); 2040 } 2041} 2042 2043vm_offset_t 2044pmap_phys_address(ppn) 2045 int ppn; 2046{ 2047 return (i386_ptob(ppn)); 2048} 2049 2050/* 2051 * pmap_is_referenced: 2052 * 2053 * Return whether or not the specified physical page was referenced 2054 * by any physical maps. 2055 */ 2056boolean_t 2057pmap_is_referenced(vm_offset_t pa) 2058{ 2059 return pmap_testbit((pa), PG_U); 2060} 2061 2062/* 2063 * pmap_is_modified: 2064 * 2065 * Return whether or not the specified physical page was modified 2066 * in any physical maps. 2067 */ 2068boolean_t 2069pmap_is_modified(vm_offset_t pa) 2070{ 2071 return pmap_testbit((pa), PG_M); 2072} 2073 2074/* 2075 * Clear the modify bits on the specified physical page. 2076 */ 2077void 2078pmap_clear_modify(vm_offset_t pa) 2079{ 2080 pmap_changebit((pa), PG_M, FALSE); 2081} 2082 2083/* 2084 * pmap_clear_reference: 2085 * 2086 * Clear the reference bit on the specified physical page. 2087 */ 2088void 2089pmap_clear_reference(vm_offset_t pa) 2090{ 2091 pmap_changebit((pa), PG_U, FALSE); 2092} 2093 2094/* 2095 * Miscellaneous support routines follow 2096 */ 2097 2098static void 2099i386_protection_init() 2100{ 2101 register int *kp, prot; 2102 2103 kp = protection_codes; 2104 for (prot = 0; prot < 8; prot++) { 2105 switch (prot) { 2106 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2107 /* 2108 * Read access is also 0. There isn't any execute bit, 2109 * so just make it readable. 2110 */ 2111 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2112 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2113 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2114 *kp++ = 0; 2115 break; 2116 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2117 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2118 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2119 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2120 *kp++ = PG_RW; 2121 break; 2122 } 2123 } 2124} 2125 2126/* 2127 * Map a set of physical memory pages into the kernel virtual 2128 * address space. Return a pointer to where it is mapped. This 2129 * routine is intended to be used for mapping device memory, 2130 * NOT real memory. The non-cacheable bits are set on each 2131 * mapped page. 2132 */ 2133void * 2134pmap_mapdev(pa, size) 2135 vm_offset_t pa; 2136 vm_size_t size; 2137{ 2138 vm_offset_t va, tmpva; 2139 pt_entry_t *pte; 2140 2141 size = roundup(size, PAGE_SIZE); 2142 2143 va = kmem_alloc_pageable(kernel_map, size); 2144 if (!va) 2145 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2146 2147 pa = pa & PG_FRAME; 2148 for (tmpva = va; size > 0;) { 2149 pte = vtopte(tmpva); 2150 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_N)); 2151 size -= PAGE_SIZE; 2152 tmpva += PAGE_SIZE; 2153 pa += PAGE_SIZE; 2154 } 2155 pmap_update(); 2156 2157 return ((void *) va); 2158} 2159 2160#if defined(PMAP_DEBUG) 2161pmap_pid_dump(int pid) { 2162 pmap_t pmap; 2163 struct proc *p; 2164 int npte = 0; 2165 int index; 2166 for (p = allproc.lh_first; p != NULL; p = p->p_list.le_next) { 2167 if (p->p_pid != pid) 2168 continue; 2169 2170 if (p->p_vmspace) { 2171 int i,j; 2172 index = 0; 2173 pmap = &p->p_vmspace->vm_pmap; 2174 for(i=0;i<1024;i++) { 2175 pd_entry_t *pde; 2176 pt_entry_t *pte; 2177 unsigned base = i << PD_SHIFT; 2178 2179 pde = &pmap->pm_pdir[i]; 2180 if (pde && pmap_pde_v(pde)) { 2181 for(j=0;j<1024;j++) { 2182 unsigned va = base + (j << PG_SHIFT); 2183 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2184 if (index) { 2185 index = 0; 2186 printf("\n"); 2187 } 2188 return npte; 2189 } 2190 pte = pmap_pte( pmap, va); 2191 if (pte && pmap_pte_v(pte)) { 2192 vm_offset_t pa; 2193 vm_page_t m; 2194 pa = *(int *)pte; 2195 m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); 2196 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2197 va, pa, m->hold_count, m->wire_count, m->flags); 2198 npte++; 2199 index++; 2200 if (index >= 2) { 2201 index = 0; 2202 printf("\n"); 2203 } else { 2204 printf(" "); 2205 } 2206 } 2207 } 2208 } 2209 } 2210 } 2211 } 2212 return npte; 2213} 2214#endif 2215 2216#if defined(DEBUG) 2217 2218static void pads __P((pmap_t pm)); 2219static void pmap_pvdump __P((vm_offset_t pa)); 2220 2221/* print address space of pmap*/ 2222static void 2223pads(pm) 2224 pmap_t pm; 2225{ 2226 unsigned va, i, j; 2227 pt_entry_t *ptep; 2228 2229 if (pm == kernel_pmap) 2230 return; 2231 for (i = 0; i < 1024; i++) 2232 if (pm->pm_pdir[i]) 2233 for (j = 0; j < 1024; j++) { 2234 va = (i << PD_SHIFT) + (j << PG_SHIFT); 2235 if (pm == kernel_pmap && va < KERNBASE) 2236 continue; 2237 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2238 continue; 2239 ptep = pmap_pte(pm, va); 2240 if (pmap_pte_v(ptep)) 2241 printf("%x:%x ", va, *(int *) ptep); 2242 }; 2243 2244} 2245 2246static void 2247pmap_pvdump(pa) 2248 vm_offset_t pa; 2249{ 2250 register pv_entry_t pv; 2251 2252 printf("pa %x", pa); 2253 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 2254#ifdef used_to_be 2255 printf(" -> pmap %x, va %x, flags %x", 2256 pv->pv_pmap, pv->pv_va, pv->pv_flags); 2257#endif 2258 printf(" -> pmap %x, va %x", 2259 pv->pv_pmap, pv->pv_va); 2260 pads(pv->pv_pmap); 2261 } 2262 printf(" "); 2263} 2264#endif 2265