pmap.c revision 18538
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.120 1996/09/28 04:22:10 dyson Exp $ 43 */ 44 45/* 46 * Manages physical address maps. 47 * 48 * In addition to hardware address maps, this 49 * module is called upon to provide software-use-only 50 * maps which may or may not be stored in the same 51 * form as hardware maps. These pseudo-maps are 52 * used to store intermediate results from copy 53 * operations to and from address spaces. 54 * 55 * Since the information managed by this module is 56 * also stored by the logical address mapping module, 57 * this module may throw away valid virtual-to-physical 58 * mappings at almost any time. However, invalidations 59 * of virtual-to-physical mappings must be done as 60 * requested. 61 * 62 * In order to cope with hardware architectures which 63 * make virtual-to-physical map invalidates expensive, 64 * this module may delay invalidate or reduced protection 65 * operations until such time as they are actually 66 * necessary. This module is given full information as 67 * to which processors are currently using which maps, 68 * and to when physical maps must be made correct. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/proc.h> 74#include <sys/malloc.h> 75#include <sys/msgbuf.h> 76#include <sys/queue.h> 77#include <sys/vmmeter.h> 78#include <sys/mman.h> 79 80#include <vm/vm.h> 81#include <vm/vm_param.h> 82#include <vm/vm_prot.h> 83#include <vm/lock.h> 84#include <vm/vm_kern.h> 85#include <vm/vm_page.h> 86#include <vm/vm_map.h> 87#include <vm/vm_object.h> 88#include <vm/vm_extern.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_pager.h> 91 92#include <machine/pcb.h> 93#include <machine/cputypes.h> 94#include <machine/md_var.h> 95 96#define PMAP_KEEP_PDIRS 97 98#if defined(DIAGNOSTIC) 99#define PMAP_DIAGNOSTIC 100#endif 101 102#if !defined(PMAP_DIAGNOSTIC) 103#define PMAP_INLINE __inline 104#else 105#define PMAP_INLINE 106#endif 107 108#define PTPHINT 109 110static void init_pv_entries __P((int)); 111 112/* 113 * Get PDEs and PTEs for user/kernel address space 114 */ 115#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 116#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 117 118#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 119#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 120#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 121#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 122#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 123 124#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 125#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 126 127/* 128 * Given a map and a machine independent protection code, 129 * convert to a vax protection code. 130 */ 131#define pte_prot(m, p) (protection_codes[p]) 132static int protection_codes[8]; 133 134static struct pmap kernel_pmap_store; 135pmap_t kernel_pmap; 136 137vm_offset_t avail_start; /* PA of first available physical page */ 138vm_offset_t avail_end; /* PA of last available physical page */ 139vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 140vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 141static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 142static vm_offset_t vm_first_phys; 143 144static int nkpt; 145static vm_page_t nkpg; 146vm_offset_t kernel_vm_end; 147 148extern vm_offset_t clean_sva, clean_eva; 149extern int cpu_class; 150 151#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2) 152 153/* 154 * Data for the pv entry allocation mechanism 155 */ 156static int pv_freelistcnt; 157TAILQ_HEAD (,pv_entry) pv_freelist; 158static vm_offset_t pvva; 159static int npvvapg; 160 161/* 162 * All those kernel PT submaps that BSD is so fond of 163 */ 164pt_entry_t *CMAP1; 165static pt_entry_t *CMAP2, *ptmmap; 166caddr_t CADDR1, ptvmmap; 167static caddr_t CADDR2; 168static pt_entry_t *msgbufmap; 169struct msgbuf *msgbufp; 170 171pt_entry_t *PMAP1; 172unsigned *PADDR1; 173 174static void free_pv_entry __P((pv_entry_t pv)); 175static unsigned * get_ptbase __P((pmap_t pmap)); 176static pv_entry_t get_pv_entry __P((void)); 177static void i386_protection_init __P((void)); 178static void pmap_alloc_pv_entry __P((void)); 179static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); 180 181static int pmap_is_managed __P((vm_offset_t pa)); 182static void pmap_remove_all __P((vm_offset_t pa)); 183static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, 184 vm_offset_t pa, vm_page_t mpte)); 185static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq, 186 vm_offset_t sva)); 187static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); 188static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv, 189 vm_offset_t va)); 190static boolean_t pmap_testbit __P((vm_offset_t pa, int bit)); 191static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va, 192 vm_page_t mpte, vm_offset_t pa)); 193 194static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va)); 195 196static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p)); 197static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex)); 198static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va)); 199static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex)); 200static PMAP_INLINE void pmap_lock __P((pmap_t pmap)); 201static PMAP_INLINE void pmap_unlock __P((pmap_t pmap)); 202static void pmap_lock2 __P((pmap_t pmap1, pmap_t pmap2)); 203 204#define PDSTACKMAX 6 205static vm_offset_t pdstack[PDSTACKMAX]; 206static int pdstackptr; 207 208/* 209 * Bootstrap the system enough to run with virtual memory. 210 * 211 * On the i386 this is called after mapping has already been enabled 212 * and just syncs the pmap module with what has already been done. 213 * [We can't call it easily with mapping off since the kernel is not 214 * mapped with PA == VA, hence we would have to relocate every address 215 * from the linked base (virtual) address "KERNBASE" to the actual 216 * (physical) address starting relative to 0] 217 */ 218void 219pmap_bootstrap(firstaddr, loadaddr) 220 vm_offset_t firstaddr; 221 vm_offset_t loadaddr; 222{ 223 vm_offset_t va; 224 pt_entry_t *pte; 225 226 avail_start = firstaddr; 227 228 /* 229 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 230 * large. It should instead be correctly calculated in locore.s and 231 * not based on 'first' (which is a physical address, not a virtual 232 * address, for the start of unused physical memory). The kernel 233 * page tables are NOT double mapped and thus should not be included 234 * in this calculation. 235 */ 236 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 237 virtual_end = VM_MAX_KERNEL_ADDRESS; 238 239 /* 240 * Initialize protection array. 241 */ 242 i386_protection_init(); 243 244 /* 245 * The kernel's pmap is statically allocated so we don't have to use 246 * pmap_create, which is unlikely to work correctly at this part of 247 * the boot sequence (XXX and which no longer exists). 248 */ 249 kernel_pmap = &kernel_pmap_store; 250 251 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 252 253 kernel_pmap->pm_count = 1; 254 TAILQ_INIT(&kernel_pmap->pm_pvlist); 255 nkpt = NKPT; 256 257 /* 258 * Reserve some special page table entries/VA space for temporary 259 * mapping of pages. 260 */ 261#define SYSMAP(c, p, v, n) \ 262 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 263 264 va = virtual_avail; 265 pte = (pt_entry_t *) pmap_pte(kernel_pmap, va); 266 267 /* 268 * CMAP1/CMAP2 are used for zeroing and copying pages. 269 */ 270 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 271 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 272 273 /* 274 * ptmmap is used for reading arbitrary physical pages via /dev/mem. 275 */ 276 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 277 278 /* 279 * msgbufmap is used to map the system message buffer. 280 */ 281 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 282 283 /* 284 * ptemap is used for pmap_pte_quick 285 */ 286 SYSMAP(unsigned *, PMAP1, PADDR1, 1); 287 288 virtual_avail = va; 289 290 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 291 pmap_update(); 292 293} 294 295/* 296 * Initialize the pmap module. 297 * Called by vm_init, to initialize any structures that the pmap 298 * system needs to map virtual memory. 299 * pmap_init has been enhanced to support in a fairly consistant 300 * way, discontiguous physical memory. 301 */ 302void 303pmap_init(phys_start, phys_end) 304 vm_offset_t phys_start, phys_end; 305{ 306 vm_offset_t addr; 307 vm_size_t npg, s; 308 int i; 309 310 /* 311 * calculate the number of pv_entries needed 312 */ 313 vm_first_phys = phys_avail[0]; 314 for (i = 0; phys_avail[i + 1]; i += 2); 315 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; 316 317 /* 318 * Allocate memory for random pmap data structures. Includes the 319 * pv_head_table. 320 */ 321 s = (vm_size_t) (sizeof(pv_table_t) * npg); 322 s = round_page(s); 323 324 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 325 pv_table = (pv_table_t *) addr; 326 for(i=0;i<npg;i++) { 327 vm_offset_t pa; 328 TAILQ_INIT(&pv_table[i].pv_list); 329 pv_table[i].pv_list_count = 0; 330 pa = vm_first_phys + i * PAGE_SIZE; 331 pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa); 332 } 333 TAILQ_INIT(&pv_freelist); 334 335 /* 336 * init the pv free list 337 */ 338 init_pv_entries(npg); 339 /* 340 * Now it is safe to enable pv_table recording. 341 */ 342 pmap_initialized = TRUE; 343} 344 345/* 346 * Used to map a range of physical addresses into kernel 347 * virtual address space. 348 * 349 * For now, VM is already on, we only need to map the 350 * specified memory. 351 */ 352vm_offset_t 353pmap_map(virt, start, end, prot) 354 vm_offset_t virt; 355 vm_offset_t start; 356 vm_offset_t end; 357 int prot; 358{ 359 while (start < end) { 360 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 361 virt += PAGE_SIZE; 362 start += PAGE_SIZE; 363 } 364 return (virt); 365} 366 367 368/*************************************************** 369 * Low level helper routines..... 370 ***************************************************/ 371 372#if defined(PMAP_DIAGNOSTIC) 373 374/* 375 * This code checks for non-writeable/modified pages. 376 * This should be an invalid condition. 377 */ 378static int 379pmap_nw_modified(pt_entry_t ptea) { 380 int pte; 381 382 pte = (int) ptea; 383 384 if ((pte & (PG_M|PG_RW)) == PG_M) 385 return 1; 386 else 387 return 0; 388} 389#endif 390 391 392/* 393 * this routine defines the region(s) of memory that should 394 * not be tested for the modified bit. 395 */ 396static PMAP_INLINE int 397pmap_track_modified( vm_offset_t va) { 398 if ((va < clean_sva) || (va >= clean_eva)) 399 return 1; 400 else 401 return 0; 402} 403 404static PMAP_INLINE void 405pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) { 406#if defined(I386_CPU) 407 if (cpu_class == CPUCLASS_386) { 408 pmap_update(); 409 } else 410#endif 411 { 412 pmap_update_1pg(va1); 413 pmap_update_1pg(va2); 414 } 415} 416 417 418static PMAP_INLINE void 419pmap_lock(pmap) 420pmap_t pmap; 421{ 422 int s; 423 if (pmap == kernel_pmap) 424 return; 425 s = splhigh(); 426 while (pmap->pm_flags & PM_FLAG_LOCKED) { 427 pmap->pm_flags |= PM_FLAG_WANTED; 428 tsleep(pmap, PVM - 1, "pmaplk", 0); 429 } 430 splx(s); 431} 432 433static PMAP_INLINE void 434pmap_unlock(pmap) 435pmap_t pmap; 436{ 437 int s; 438 if (pmap == kernel_pmap) 439 return; 440 s = splhigh(); 441 pmap->pm_flags &= ~PM_FLAG_LOCKED; 442 if (pmap->pm_flags & PM_FLAG_WANTED) { 443 pmap->pm_flags &= ~PM_FLAG_WANTED; 444 wakeup(pmap); 445 } 446} 447 448static void 449pmap_lock2(pmap1, pmap2) 450pmap_t pmap1, pmap2; 451{ 452 int s; 453 if (pmap1 == kernel_pmap || pmap2 == kernel_pmap) 454 return; 455 s = splhigh(); 456 while ((pmap1->pm_flags | pmap2->pm_flags) & PM_FLAG_LOCKED) { 457 while (pmap1->pm_flags & PM_FLAG_LOCKED) { 458 pmap1->pm_flags |= PM_FLAG_WANTED; 459 tsleep(pmap1, PVM - 1, "pmapl1", 0); 460 } 461 while (pmap2->pm_flags & PM_FLAG_LOCKED) { 462 pmap2->pm_flags |= PM_FLAG_WANTED; 463 tsleep(pmap2, PVM - 1, "pmapl2", 0); 464 } 465 } 466 splx(s); 467} 468 469static unsigned * 470get_ptbase(pmap) 471 pmap_t pmap; 472{ 473 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 474 475 /* are we current address space or kernel? */ 476 if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) { 477 return (unsigned *) PTmap; 478 } 479 /* otherwise, we are alternate address space */ 480 if (frame != (((unsigned) APTDpde) & PG_FRAME)) { 481 APTDpde = (pd_entry_t) (frame | PG_RW | PG_V); 482 pmap_update(); 483 } 484 return (unsigned *) APTmap; 485} 486 487/* 488 * Routine: pmap_pte 489 * Function: 490 * Extract the page table entry associated 491 * with the given map/virtual_address pair. 492 */ 493 494PMAP_INLINE unsigned * 495pmap_pte(pmap, va) 496 register pmap_t pmap; 497 vm_offset_t va; 498{ 499 if (pmap && *pmap_pde(pmap, va)) { 500 return get_ptbase(pmap) + i386_btop(va); 501 } 502 return (0); 503} 504 505/* 506 * Super fast pmap_pte routine best used when scanning 507 * the pv lists. This eliminates many coarse-grained 508 * pmap_update calls. Note that many of the pv list 509 * scans are across different pmaps. It is very wasteful 510 * to do an entire pmap_update for checking a single mapping. 511 */ 512 513unsigned * 514pmap_pte_quick(pmap, va) 515 register pmap_t pmap; 516 vm_offset_t va; 517{ 518 unsigned pde, newpf; 519 if (pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) { 520 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 521 unsigned index = i386_btop(va); 522 /* are we current address space or kernel? */ 523 if ((pmap == kernel_pmap) || 524 (frame == (((unsigned) PTDpde) & PG_FRAME))) { 525 return (unsigned *) PTmap + index; 526 } 527 newpf = pde & PG_FRAME; 528 if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) { 529 * (unsigned *) PMAP1 = newpf | PG_RW | PG_V; 530 pmap_update_1pg((vm_offset_t) PADDR1); 531 } 532 return PADDR1 + ((unsigned) index & (NPTEPG - 1)); 533 } 534 return (0); 535} 536 537/* 538 * Routine: pmap_extract 539 * Function: 540 * Extract the physical page address associated 541 * with the given map/virtual_address pair. 542 */ 543vm_offset_t 544pmap_extract(pmap, va) 545 register pmap_t pmap; 546 vm_offset_t va; 547{ 548 vm_offset_t rtval; 549 pmap_lock(pmap); 550 if (pmap && *pmap_pde(pmap, va)) { 551 unsigned *pte; 552 pte = get_ptbase(pmap) + i386_btop(va); 553 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK)); 554 pmap_unlock(pmap); 555 return rtval; 556 } 557 pmap_unlock(pmap); 558 return 0; 559 560} 561 562/* 563 * determine if a page is managed (memory vs. device) 564 */ 565static PMAP_INLINE int 566pmap_is_managed(pa) 567 vm_offset_t pa; 568{ 569 int i; 570 571 if (!pmap_initialized) 572 return 0; 573 574 for (i = 0; phys_avail[i + 1]; i += 2) { 575 if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) 576 return 1; 577 } 578 return 0; 579} 580 581 582/*************************************************** 583 * Low level mapping routines..... 584 ***************************************************/ 585 586/* 587 * Add a list of wired pages to the kva 588 * this routine is only used for temporary 589 * kernel mappings that do not need to have 590 * page modification or references recorded. 591 * Note that old mappings are simply written 592 * over. The page *must* be wired. 593 */ 594void 595pmap_qenter(va, m, count) 596 vm_offset_t va; 597 vm_page_t *m; 598 int count; 599{ 600 int i; 601 register unsigned *pte; 602 603 for (i = 0; i < count; i++) { 604 vm_offset_t tva = va + i * PAGE_SIZE; 605 unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V; 606 unsigned opte; 607 pte = (unsigned *)vtopte(tva); 608 opte = *pte; 609 *pte = npte; 610 if (opte) 611 pmap_update_1pg(tva); 612 } 613} 614 615/* 616 * this routine jerks page mappings from the 617 * kernel -- it is meant only for temporary mappings. 618 */ 619void 620pmap_qremove(va, count) 621 vm_offset_t va; 622 int count; 623{ 624 int i; 625 register unsigned *pte; 626 627 for (i = 0; i < count; i++) { 628 pte = (unsigned *)vtopte(va); 629 *pte = 0; 630 pmap_update_1pg(va); 631 va += PAGE_SIZE; 632 } 633} 634 635/* 636 * add a wired page to the kva 637 * note that in order for the mapping to take effect -- you 638 * should do a pmap_update after doing the pmap_kenter... 639 */ 640PMAP_INLINE void 641pmap_kenter(va, pa) 642 vm_offset_t va; 643 register vm_offset_t pa; 644{ 645 register unsigned *pte; 646 unsigned npte, opte; 647 648 npte = pa | PG_RW | PG_V; 649 pte = (unsigned *)vtopte(va); 650 opte = *pte; 651 *pte = npte; 652 if (opte) 653 pmap_update_1pg(va); 654} 655 656/* 657 * remove a page from the kernel pagetables 658 */ 659PMAP_INLINE void 660pmap_kremove(va) 661 vm_offset_t va; 662{ 663 register unsigned *pte; 664 665 pte = (unsigned *)vtopte(va); 666 *pte = 0; 667 pmap_update_1pg(va); 668} 669 670static vm_page_t 671pmap_page_alloc(object, pindex) 672 vm_object_t object; 673 vm_pindex_t pindex; 674{ 675 vm_page_t m; 676 m = vm_page_alloc(object, pindex, VM_ALLOC_ZERO); 677 if (m == NULL) { 678 VM_WAIT; 679 } 680 return m; 681} 682 683vm_page_t 684pmap_page_lookup(object, pindex) 685 vm_object_t object; 686 vm_pindex_t pindex; 687{ 688 vm_page_t m; 689retry: 690 m = vm_page_lookup(object, pindex); 691 if (m) { 692 if (m->flags & PG_BUSY) { 693 m->flags |= PG_WANTED; 694 tsleep(m, PVM, "pplookp", 0); 695 goto retry; 696 } 697 } 698 699 return m; 700} 701 702 703 704 705/*************************************************** 706 * Page table page management routines..... 707 ***************************************************/ 708 709/* 710 * This routine unholds page table pages, and if the hold count 711 * drops to zero, then it decrements the wire count. 712 */ 713static int 714pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) { 715 int s; 716 717 vm_page_unhold(m); 718 719 s = splvm(); 720 while (m->flags & PG_BUSY) { 721 m->flags |= PG_WANTED; 722 tsleep(m, PVM, "pmuwpt", 0); 723 } 724 splx(s); 725 726 if (m->hold_count == 0) { 727 vm_offset_t pteva; 728 /* 729 * unmap the page table page 730 */ 731 pmap->pm_pdir[m->pindex] = 0; 732 --pmap->pm_stats.resident_count; 733 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) == 734 (((unsigned) PTDpde) & PG_FRAME)) { 735 /* 736 * Do a pmap_update to make the invalidated mapping 737 * take effect immediately. 738 */ 739 pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex); 740 pmap_update_1pg(pteva); 741 } 742 743#if defined(PTPHINT) 744 if (pmap->pm_ptphint == m) 745 pmap->pm_ptphint = NULL; 746#endif 747 748 /* 749 * If the page is finally unwired, simply free it. 750 */ 751 --m->wire_count; 752 if (m->wire_count == 0) { 753 754 if (m->flags & PG_WANTED) { 755 m->flags &= ~PG_WANTED; 756 wakeup(m); 757 } 758 759 vm_page_free_zero(m); 760 --cnt.v_wire_count; 761 } 762 return 1; 763 } 764 return 0; 765} 766 767/* 768 * After removing a page table entry, this routine is used to 769 * conditionally free the page, and manage the hold/wire counts. 770 */ 771int 772pmap_unuse_pt(pmap, va, mpte) 773 pmap_t pmap; 774 vm_offset_t va; 775 vm_page_t mpte; 776{ 777 unsigned ptepindex; 778 if (va >= UPT_MIN_ADDRESS) 779 return 0; 780 781 if (mpte == NULL) { 782 ptepindex = (va >> PDRSHIFT); 783#if defined(PTPHINT) 784 if (pmap->pm_ptphint && 785 (pmap->pm_ptphint->pindex == ptepindex)) { 786 mpte = pmap->pm_ptphint; 787 } else { 788 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 789 pmap->pm_ptphint = mpte; 790 } 791#else 792 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 793#endif 794 } 795 796 return pmap_unwire_pte_hold(pmap, mpte); 797} 798 799/* 800 * Initialize a preallocated and zeroed pmap structure, 801 * such as one in a vmspace structure. 802 */ 803void 804pmap_pinit(pmap) 805 register struct pmap *pmap; 806{ 807 vm_page_t ptdpg; 808 /* 809 * No need to allocate page table space yet but we do need a valid 810 * page directory table. 811 */ 812 813 if (pdstackptr > 0) { 814 --pdstackptr; 815 pmap->pm_pdir = (pd_entry_t *)pdstack[pdstackptr]; 816 } else { 817 pmap->pm_pdir = 818 (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); 819 } 820 821 /* 822 * allocate object for the ptes 823 */ 824 pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1); 825 826 /* 827 * allocate the page directory page 828 */ 829retry: 830 ptdpg = pmap_page_alloc( pmap->pm_pteobj, PTDPTDI); 831 if (ptdpg == NULL) 832 goto retry; 833 834 ptdpg->wire_count = 1; 835 ++cnt.v_wire_count; 836 837 ptdpg->flags &= ~(PG_MAPPED|PG_BUSY); /* not mapped normally */ 838 ptdpg->valid = VM_PAGE_BITS_ALL; 839 840 pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg)); 841 if ((ptdpg->flags & PG_ZERO) == 0) 842 bzero(pmap->pm_pdir, PAGE_SIZE); 843 844 /* wire in kernel global address entries */ 845 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 846 847 /* install self-referential address mapping entry */ 848 *(unsigned *) (pmap->pm_pdir + PTDPTDI) = 849 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW; 850 851 pmap->pm_flags = 0; 852 pmap->pm_count = 1; 853 pmap->pm_ptphint = NULL; 854 TAILQ_INIT(&pmap->pm_pvlist); 855} 856 857static int 858pmap_release_free_page(pmap, p) 859 struct pmap *pmap; 860 vm_page_t p; 861{ 862 int s; 863 unsigned *pde = (unsigned *) pmap->pm_pdir; 864 /* 865 * This code optimizes the case of freeing non-busy 866 * page-table pages. Those pages are zero now, and 867 * might as well be placed directly into the zero queue. 868 */ 869 s = splvm(); 870 if (p->flags & PG_BUSY) { 871 p->flags |= PG_WANTED; 872 tsleep(p, PVM, "pmaprl", 0); 873 splx(s); 874 return 0; 875 } 876 877 if (p->flags & PG_WANTED) { 878 p->flags &= ~PG_WANTED; 879 wakeup(p); 880 } 881 882 /* 883 * Remove the page table page from the processes address space. 884 */ 885 pde[p->pindex] = 0; 886 --pmap->pm_stats.resident_count; 887 888 if (p->hold_count) { 889 panic("pmap_release: freeing held page table page"); 890 } 891 /* 892 * Page directory pages need to have the kernel 893 * stuff cleared, so they can go into the zero queue also. 894 */ 895 if (p->pindex == PTDPTDI) { 896 bzero(pde + KPTDI, nkpt * PTESIZE); 897 pde[APTDPTDI] = 0; 898 pmap_kremove((vm_offset_t) pmap->pm_pdir); 899 } 900 901#if defined(PTPHINT) 902 if (pmap->pm_ptphint && 903 (pmap->pm_ptphint->pindex == p->pindex)) 904 pmap->pm_ptphint = NULL; 905#endif 906 907 vm_page_free_zero(p); 908 splx(s); 909 return 1; 910} 911 912/* 913 * this routine is called if the page table page is not 914 * mapped correctly. 915 */ 916static vm_page_t 917_pmap_allocpte(pmap, ptepindex) 918 pmap_t pmap; 919 unsigned ptepindex; 920{ 921 vm_offset_t pteva, ptepa; 922 vm_page_t m; 923 int needszero = 0; 924 925 /* 926 * Find or fabricate a new pagetable page 927 */ 928retry: 929 m = vm_page_lookup(pmap->pm_pteobj, ptepindex); 930 if (m == NULL) { 931 m = pmap_page_alloc(pmap->pm_pteobj, ptepindex); 932 if (m == NULL) 933 goto retry; 934 if ((m->flags & PG_ZERO) == 0) 935 needszero = 1; 936 m->flags &= ~(PG_ZERO|PG_BUSY); 937 m->valid = VM_PAGE_BITS_ALL; 938 } else { 939 if ((m->flags & PG_BUSY) || m->busy) { 940 m->flags |= PG_WANTED; 941 tsleep(m, PVM, "ptewai", 0); 942 goto retry; 943 } 944 } 945 946 if (m->queue != PQ_NONE) { 947 int s = splvm(); 948 vm_page_unqueue(m); 949 splx(s); 950 } 951 952 if (m->wire_count == 0) 953 ++cnt.v_wire_count; 954 ++m->wire_count; 955 956 /* 957 * Increment the hold count for the page table page 958 * (denoting a new mapping.) 959 */ 960 ++m->hold_count; 961 962 /* 963 * Map the pagetable page into the process address space, if 964 * it isn't already there. 965 */ 966 967 pmap->pm_stats.resident_count++; 968 969 ptepa = VM_PAGE_TO_PHYS(m); 970 pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V); 971 972#if defined(PTPHINT) 973 /* 974 * Set the page table hint 975 */ 976 pmap->pm_ptphint = m; 977#endif 978 979 /* 980 * Try to use the new mapping, but if we cannot, then 981 * do it with the routine that maps the page explicitly. 982 */ 983 if (needszero) { 984 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) == 985 (((unsigned) PTDpde) & PG_FRAME)) { 986 pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex); 987 bzero((caddr_t) pteva, PAGE_SIZE); 988 } else { 989 pmap_zero_page(ptepa); 990 } 991 } 992 993 m->valid = VM_PAGE_BITS_ALL; 994 m->flags |= PG_MAPPED; 995 996 return m; 997} 998 999static vm_page_t 1000pmap_allocpte(pmap, va) 1001 pmap_t pmap; 1002 vm_offset_t va; 1003{ 1004 unsigned ptepindex; 1005 vm_offset_t ptepa; 1006 vm_page_t m; 1007 1008 /* 1009 * Calculate pagetable page index 1010 */ 1011 ptepindex = va >> PDRSHIFT; 1012 1013 /* 1014 * Get the page directory entry 1015 */ 1016 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex]; 1017 1018 /* 1019 * If the page table page is mapped, we just increment the 1020 * hold count, and activate it. 1021 */ 1022 if (ptepa) { 1023#if defined(PTPHINT) 1024 /* 1025 * In order to get the page table page, try the 1026 * hint first. 1027 */ 1028 if (pmap->pm_ptphint && 1029 (pmap->pm_ptphint->pindex == ptepindex)) { 1030 m = pmap->pm_ptphint; 1031 } else { 1032 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1033 pmap->pm_ptphint = m; 1034 } 1035#else 1036 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1037#endif 1038 ++m->hold_count; 1039 return m; 1040 } 1041 /* 1042 * Here if the pte page isn't mapped, or if it has been deallocated. 1043 */ 1044 return _pmap_allocpte(pmap, ptepindex); 1045} 1046 1047 1048/*************************************************** 1049* Pmap allocation/deallocation routines. 1050 ***************************************************/ 1051 1052/* 1053 * Release any resources held by the given physical map. 1054 * Called when a pmap initialized by pmap_pinit is being released. 1055 * Should only be called if the map contains no valid mappings. 1056 */ 1057void 1058pmap_release(pmap) 1059 register struct pmap *pmap; 1060{ 1061 vm_page_t p,n,ptdpg; 1062 vm_object_t object = pmap->pm_pteobj; 1063 1064 if (object->ref_count != 1) 1065 panic("pmap_release: pteobj reference count != 1"); 1066 1067 pmap_lock(pmap); 1068 ptdpg = NULL; 1069retry: 1070 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) { 1071 n = TAILQ_NEXT(p, listq); 1072 if (p->pindex == PTDPTDI) { 1073 ptdpg = p; 1074 continue; 1075 } 1076 if (!pmap_release_free_page(pmap, p)) 1077 goto retry; 1078 } 1079 1080 if (ptdpg && !pmap_release_free_page(pmap, ptdpg)) 1081 goto retry; 1082 1083 vm_object_deallocate(object); 1084 if (pdstackptr < PDSTACKMAX) { 1085 pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir; 1086 ++pdstackptr; 1087 } else { 1088 kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 1089 } 1090 pmap->pm_pdir = 0; 1091} 1092 1093/* 1094 * grow the number of kernel page table entries, if needed 1095 */ 1096void 1097pmap_growkernel(vm_offset_t addr) 1098{ 1099 struct proc *p; 1100 struct pmap *pmap; 1101 int s; 1102 1103 s = splhigh(); 1104 if (kernel_vm_end == 0) { 1105 kernel_vm_end = KERNBASE; 1106 nkpt = 0; 1107 while (pdir_pde(PTD, kernel_vm_end)) { 1108 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1109 ++nkpt; 1110 } 1111 } 1112 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1113 while (kernel_vm_end < addr) { 1114 if (pdir_pde(PTD, kernel_vm_end)) { 1115 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1116 continue; 1117 } 1118 ++nkpt; 1119 if (!nkpg) { 1120 vm_offset_t ptpkva = (vm_offset_t) vtopte(addr); 1121 /* 1122 * This index is bogus, but out of the way 1123 */ 1124 vm_pindex_t ptpidx = (ptpkva >> PAGE_SHIFT); 1125 nkpg = vm_page_alloc(kernel_object, 1126 ptpidx, VM_ALLOC_SYSTEM); 1127 if (!nkpg) 1128 panic("pmap_growkernel: no memory to grow kernel"); 1129 vm_page_wire(nkpg); 1130 vm_page_remove(nkpg); 1131 pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 1132 } 1133 pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW); 1134 nkpg = NULL; 1135 1136 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1137 if (p->p_vmspace) { 1138 pmap = &p->p_vmspace->vm_pmap; 1139 *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 1140 } 1141 } 1142 *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 1143 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1144 } 1145 splx(s); 1146} 1147 1148/* 1149 * Retire the given physical map from service. 1150 * Should only be called if the map contains 1151 * no valid mappings. 1152 */ 1153void 1154pmap_destroy(pmap) 1155 register pmap_t pmap; 1156{ 1157 int count; 1158 1159 if (pmap == NULL) 1160 return; 1161 1162 count = --pmap->pm_count; 1163 if (count == 0) { 1164 pmap_release(pmap); 1165 free((caddr_t) pmap, M_VMPMAP); 1166 } 1167} 1168 1169/* 1170 * Add a reference to the specified pmap. 1171 */ 1172void 1173pmap_reference(pmap) 1174 pmap_t pmap; 1175{ 1176 if (pmap != NULL) { 1177 pmap->pm_count++; 1178 } 1179} 1180 1181/*************************************************** 1182* page management routines. 1183 ***************************************************/ 1184 1185/* 1186 * free the pv_entry back to the free list 1187 */ 1188static PMAP_INLINE void 1189free_pv_entry(pv) 1190 pv_entry_t pv; 1191{ 1192 ++pv_freelistcnt; 1193 TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list); 1194} 1195 1196/* 1197 * get a new pv_entry, allocating a block from the system 1198 * when needed. 1199 * the memory allocation is performed bypassing the malloc code 1200 * because of the possibility of allocations at interrupt time. 1201 */ 1202static pv_entry_t 1203get_pv_entry() 1204{ 1205 pv_entry_t tmp; 1206 1207 /* 1208 * get more pv_entry pages if needed 1209 */ 1210 if (pv_freelistcnt < PV_FREELIST_MIN || !TAILQ_FIRST(&pv_freelist)) { 1211 pmap_alloc_pv_entry(); 1212 } 1213 /* 1214 * get a pv_entry off of the free list 1215 */ 1216 --pv_freelistcnt; 1217 tmp = TAILQ_FIRST(&pv_freelist); 1218 TAILQ_REMOVE(&pv_freelist, tmp, pv_list); 1219 return tmp; 1220} 1221 1222/* 1223 * This *strange* allocation routine eliminates the possibility of a malloc 1224 * failure (*FATAL*) for a pv_entry_t data structure. 1225 * also -- this code is MUCH MUCH faster than the malloc equiv... 1226 * We really need to do the slab allocator thingie here. 1227 */ 1228static void 1229pmap_alloc_pv_entry() 1230{ 1231 /* 1232 * do we have any pre-allocated map-pages left? 1233 */ 1234 if (npvvapg) { 1235 vm_page_t m; 1236 1237 /* 1238 * allocate a physical page out of the vm system 1239 */ 1240 m = vm_page_alloc(kernel_object, 1241 OFF_TO_IDX(pvva - vm_map_min(kernel_map)), 1242 VM_ALLOC_INTERRUPT); 1243 if (m) { 1244 int newentries; 1245 int i; 1246 pv_entry_t entry; 1247 1248 newentries = (PAGE_SIZE / sizeof(struct pv_entry)); 1249 /* 1250 * wire the page 1251 */ 1252 vm_page_wire(m); 1253 m->flags &= ~PG_BUSY; 1254 /* 1255 * let the kernel see it 1256 */ 1257 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 1258 1259 entry = (pv_entry_t) pvva; 1260 /* 1261 * update the allocation pointers 1262 */ 1263 pvva += PAGE_SIZE; 1264 --npvvapg; 1265 1266 /* 1267 * free the entries into the free list 1268 */ 1269 for (i = 0; i < newentries; i++) { 1270 free_pv_entry(entry); 1271 entry++; 1272 } 1273 } 1274 } 1275 if (!TAILQ_FIRST(&pv_freelist)) 1276 panic("get_pv_entry: cannot get a pv_entry_t"); 1277} 1278 1279/* 1280 * init the pv_entry allocation system 1281 */ 1282#define PVSPERPAGE 64 1283void 1284init_pv_entries(npg) 1285 int npg; 1286{ 1287 /* 1288 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 1289 * kvm space is fairly cheap, be generous!!! (the system can panic if 1290 * this is too small.) 1291 */ 1292 npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) 1293 + PAGE_SIZE - 1) / PAGE_SIZE; 1294 pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE); 1295 /* 1296 * get the first batch of entries 1297 */ 1298 pmap_alloc_pv_entry(); 1299} 1300 1301/* 1302 * If it is the first entry on the list, it is actually 1303 * in the header and we must copy the following entry up 1304 * to the header. Otherwise we must search the list for 1305 * the entry. In either case we free the now unused entry. 1306 */ 1307 1308static int 1309pmap_remove_entry(pmap, ppv, va) 1310 struct pmap *pmap; 1311 pv_table_t *ppv; 1312 vm_offset_t va; 1313{ 1314 pv_entry_t pv; 1315 int rtval; 1316 int s; 1317 1318 s = splvm(); 1319 if (ppv->pv_list_count < pmap->pm_stats.resident_count) { 1320 for (pv = TAILQ_FIRST(&ppv->pv_list); 1321 pv; 1322 pv = TAILQ_NEXT(pv, pv_list)) { 1323 if (pmap == pv->pv_pmap && va == pv->pv_va) 1324 break; 1325 } 1326 } else { 1327 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); 1328 pv; 1329 pv = TAILQ_NEXT(pv, pv_plist)) { 1330 if (va == pv->pv_va) 1331 break; 1332 } 1333 } 1334 1335 rtval = 0; 1336 if (pv) { 1337 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); 1338 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); 1339 --ppv->pv_list_count; 1340 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1341 free_pv_entry(pv); 1342 } 1343 1344 splx(s); 1345 return rtval; 1346} 1347 1348/* 1349 * Create a pv entry for page at pa for 1350 * (pmap, va). 1351 */ 1352static void 1353pmap_insert_entry(pmap, va, mpte, pa) 1354 pmap_t pmap; 1355 vm_offset_t va; 1356 vm_page_t mpte; 1357 vm_offset_t pa; 1358{ 1359 1360 int s; 1361 pv_entry_t pv; 1362 pv_table_t *ppv; 1363 1364 s = splvm(); 1365 pv = get_pv_entry(); 1366 pv->pv_va = va; 1367 pv->pv_pmap = pmap; 1368 pv->pv_ptem = mpte; 1369 1370 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1371 1372 ppv = pa_to_pvh(pa); 1373 TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); 1374 ++ppv->pv_list_count; 1375 1376 splx(s); 1377} 1378 1379/* 1380 * pmap_remove_pte: do the things to unmap a page in a process 1381 */ 1382static int 1383pmap_remove_pte(pmap, ptq, va) 1384 struct pmap *pmap; 1385 unsigned *ptq; 1386 vm_offset_t va; 1387{ 1388 unsigned oldpte; 1389 pv_table_t *ppv; 1390 1391 oldpte = *ptq; 1392 *ptq = 0; 1393 if (oldpte & PG_W) 1394 pmap->pm_stats.wired_count -= 1; 1395 pmap->pm_stats.resident_count -= 1; 1396 if (oldpte & PG_MANAGED) { 1397 ppv = pa_to_pvh(oldpte); 1398 if (oldpte & PG_M) { 1399#if defined(PMAP_DIAGNOSTIC) 1400 if (pmap_nw_modified((pt_entry_t) oldpte)) { 1401 printf("pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, (int) oldpte); 1402 } 1403#endif 1404 if (pmap_track_modified(va)) 1405 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 1406 } 1407 return pmap_remove_entry(pmap, ppv, va); 1408 } else { 1409 return pmap_unuse_pt(pmap, va, NULL); 1410 } 1411 1412 return 0; 1413} 1414 1415/* 1416 * Remove a single page from a process address space 1417 */ 1418static void 1419pmap_remove_page(pmap, va) 1420 struct pmap *pmap; 1421 register vm_offset_t va; 1422{ 1423 register unsigned *ptq; 1424 1425 /* 1426 * if there is no pte for this address, just skip it!!! 1427 */ 1428 if (*pmap_pde(pmap, va) == 0) { 1429 return; 1430 } 1431 1432 /* 1433 * get a local va for mappings for this pmap. 1434 */ 1435 ptq = get_ptbase(pmap) + i386_btop(va); 1436 if (*ptq) { 1437 (void) pmap_remove_pte(pmap, ptq, va); 1438 pmap_update_1pg(va); 1439 } 1440 return; 1441} 1442 1443/* 1444 * Remove the given range of addresses from the specified map. 1445 * 1446 * It is assumed that the start and end are properly 1447 * rounded to the page size. 1448 */ 1449void 1450pmap_remove(pmap, sva, eva) 1451 struct pmap *pmap; 1452 register vm_offset_t sva; 1453 register vm_offset_t eva; 1454{ 1455 register unsigned *ptbase; 1456 vm_offset_t pdnxt; 1457 vm_offset_t ptpaddr; 1458 vm_offset_t sindex, eindex; 1459 int anyvalid; 1460 1461 if (pmap == NULL) 1462 return; 1463 1464 pmap_lock(pmap); 1465 /* 1466 * special handling of removing one page. a very 1467 * common operation and easy to short circuit some 1468 * code. 1469 */ 1470 if ((sva + PAGE_SIZE) == eva) { 1471 pmap_remove_page(pmap, sva); 1472 pmap_unlock(pmap); 1473 return; 1474 } 1475 1476 anyvalid = 0; 1477 1478 /* 1479 * Get a local virtual address for the mappings that are being 1480 * worked with. 1481 */ 1482 ptbase = get_ptbase(pmap); 1483 1484 sindex = i386_btop(sva); 1485 eindex = i386_btop(eva); 1486 1487 for (; sindex < eindex; sindex = pdnxt) { 1488 1489 /* 1490 * Calculate index for next page table. 1491 */ 1492 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1493 ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex)); 1494 1495 /* 1496 * Weed out invalid mappings. Note: we assume that the page 1497 * directory table is always allocated, and in kernel virtual. 1498 */ 1499 if (ptpaddr == 0) 1500 continue; 1501 1502 /* 1503 * Limit our scan to either the end of the va represented 1504 * by the current page table page, or to the end of the 1505 * range being removed. 1506 */ 1507 if (pdnxt > eindex) { 1508 pdnxt = eindex; 1509 } 1510 1511 for ( ;sindex != pdnxt; sindex++) { 1512 vm_offset_t va; 1513 if (ptbase[sindex] == 0) { 1514 continue; 1515 } 1516 va = i386_ptob(sindex); 1517 1518 anyvalid++; 1519 if (pmap_remove_pte(pmap, 1520 ptbase + sindex, va)) 1521 break; 1522 } 1523 } 1524 1525 if (anyvalid) { 1526 pmap_update(); 1527 } 1528 pmap_unlock(pmap); 1529} 1530 1531/* 1532 * Routine: pmap_remove_all 1533 * Function: 1534 * Removes this physical page from 1535 * all physical maps in which it resides. 1536 * Reflects back modify bits to the pager. 1537 * 1538 * Notes: 1539 * Original versions of this routine were very 1540 * inefficient because they iteratively called 1541 * pmap_remove (slow...) 1542 */ 1543 1544static void 1545pmap_remove_all(pa) 1546 vm_offset_t pa; 1547{ 1548 register pv_entry_t pv; 1549 pv_table_t *ppv; 1550 register unsigned *pte, tpte; 1551 int nmodify; 1552 int update_needed; 1553 int s; 1554 1555 nmodify = 0; 1556 update_needed = 0; 1557#if defined(PMAP_DIAGNOSTIC) 1558 /* 1559 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1560 * pages! 1561 */ 1562 if (!pmap_is_managed(pa)) { 1563 panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa); 1564 } 1565#endif 1566 1567 s = splvm(); 1568 ppv = pa_to_pvh(pa); 1569 while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { 1570 pmap_lock(pv->pv_pmap); 1571 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 1572 1573 pv->pv_pmap->pm_stats.resident_count--; 1574 1575 tpte = *pte; 1576 *pte = 0; 1577 if (tpte & PG_W) 1578 pv->pv_pmap->pm_stats.wired_count--; 1579 /* 1580 * Update the vm_page_t clean and reference bits. 1581 */ 1582 if (tpte & PG_M) { 1583#if defined(PMAP_DIAGNOSTIC) 1584 if (pmap_nw_modified((pt_entry_t) tpte)) { 1585 printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", pv->pv_va, tpte); 1586 } 1587#endif 1588 if (pmap_track_modified(pv->pv_va)) 1589 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 1590 } 1591 if (!update_needed && 1592 ((!curproc || (&curproc->p_vmspace->vm_pmap == pv->pv_pmap)) || 1593 (pv->pv_pmap == kernel_pmap))) { 1594 update_needed = 1; 1595 } 1596 1597 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 1598 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); 1599 --ppv->pv_list_count; 1600 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 1601 pmap_unlock(pv->pv_pmap); 1602 free_pv_entry(pv); 1603 } 1604 1605 if (update_needed) 1606 pmap_update(); 1607 splx(s); 1608 return; 1609} 1610 1611/* 1612 * Set the physical protection on the 1613 * specified range of this map as requested. 1614 */ 1615void 1616pmap_protect(pmap, sva, eva, prot) 1617 register pmap_t pmap; 1618 vm_offset_t sva, eva; 1619 vm_prot_t prot; 1620{ 1621 register unsigned *ptbase; 1622 vm_offset_t pdnxt; 1623 vm_offset_t ptpaddr; 1624 vm_offset_t sindex, eindex; 1625 int anychanged; 1626 1627 1628 if (pmap == NULL) 1629 return; 1630 1631 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1632 pmap_remove(pmap, sva, eva); 1633 return; 1634 } 1635 if (prot & VM_PROT_WRITE) { 1636 return; 1637 } 1638 1639 pmap_lock(pmap); 1640 anychanged = 0; 1641 1642 ptbase = get_ptbase(pmap); 1643 1644 sindex = i386_btop(sva); 1645 eindex = i386_btop(eva); 1646 1647 for (; sindex < eindex; sindex = pdnxt) { 1648 1649 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1650 ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex)); 1651 1652 /* 1653 * Weed out invalid mappings. Note: we assume that the page 1654 * directory table is always allocated, and in kernel virtual. 1655 */ 1656 if (ptpaddr == 0) 1657 continue; 1658 1659 if (pdnxt > eindex) { 1660 pdnxt = eindex; 1661 } 1662 1663 for (; sindex != pdnxt; sindex++) { 1664 1665 unsigned pbits = ptbase[sindex]; 1666 1667 if (pbits & PG_RW) { 1668 if (pbits & PG_M) { 1669 vm_offset_t sva = i386_ptob(sindex); 1670 if (pmap_track_modified(sva)) { 1671 vm_page_t m = PHYS_TO_VM_PAGE(pbits); 1672 m->dirty = VM_PAGE_BITS_ALL; 1673 } 1674 } 1675 ptbase[sindex] = pbits & ~(PG_M|PG_RW); 1676 anychanged = 1; 1677 } 1678 } 1679 } 1680 pmap_unlock(pmap); 1681 if (anychanged) 1682 pmap_update(); 1683} 1684 1685/* 1686 * Insert the given physical page (p) at 1687 * the specified virtual address (v) in the 1688 * target physical map with the protection requested. 1689 * 1690 * If specified, the page will be wired down, meaning 1691 * that the related pte can not be reclaimed. 1692 * 1693 * NB: This is the only routine which MAY NOT lazy-evaluate 1694 * or lose information. That is, this routine must actually 1695 * insert this page into the given map NOW. 1696 */ 1697void 1698pmap_enter(pmap, va, pa, prot, wired) 1699 register pmap_t pmap; 1700 vm_offset_t va; 1701 register vm_offset_t pa; 1702 vm_prot_t prot; 1703 boolean_t wired; 1704{ 1705 register unsigned *pte; 1706 vm_offset_t opa; 1707 vm_offset_t origpte, newpte; 1708 vm_page_t mpte; 1709 1710 if (pmap == NULL) 1711 return; 1712 1713 pmap_lock(pmap); 1714 va &= PG_FRAME; 1715#ifdef PMAP_DIAGNOSTIC 1716 if (va > VM_MAX_KERNEL_ADDRESS) 1717 panic("pmap_enter: toobig"); 1718 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 1719 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); 1720#endif 1721 1722 mpte = NULL; 1723 /* 1724 * In the case that a page table page is not 1725 * resident, we are creating it here. 1726 */ 1727 if (va < UPT_MIN_ADDRESS) 1728 mpte = pmap_allocpte(pmap, va); 1729 1730 pte = pmap_pte(pmap, va); 1731 /* 1732 * Page Directory table entry not valid, we need a new PT page 1733 */ 1734 if (pte == NULL) { 1735 panic("pmap_enter: invalid page directory, pdir=%p, va=0x%lx\n", 1736 pmap->pm_pdir[PTDPTDI], va); 1737 } 1738 1739 origpte = *(vm_offset_t *)pte; 1740 pa &= PG_FRAME; 1741 opa = origpte & PG_FRAME; 1742 1743 /* 1744 * Mapping has not changed, must be protection or wiring change. 1745 */ 1746 if (origpte && (opa == pa)) { 1747 /* 1748 * Wiring change, just update stats. We don't worry about 1749 * wiring PT pages as they remain resident as long as there 1750 * are valid mappings in them. Hence, if a user page is wired, 1751 * the PT page will be also. 1752 */ 1753 if (wired && ((origpte & PG_W) == 0)) 1754 pmap->pm_stats.wired_count++; 1755 else if (!wired && (origpte & PG_W)) 1756 pmap->pm_stats.wired_count--; 1757 1758#if defined(PMAP_DIAGNOSTIC) 1759 if (pmap_nw_modified((pt_entry_t) origpte)) { 1760 printf("pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, origpte); 1761 } 1762#endif 1763 1764 /* 1765 * We might be turning off write access to the page, 1766 * so we go ahead and sense modify status. 1767 */ 1768 if (origpte & PG_MANAGED) { 1769 vm_page_t m; 1770 if (origpte & PG_M) { 1771 if (pmap_track_modified(va)) { 1772 m = PHYS_TO_VM_PAGE(pa); 1773 m->dirty = VM_PAGE_BITS_ALL; 1774 } 1775 } 1776 pa |= PG_MANAGED; 1777 } 1778 1779 if (mpte) 1780 --mpte->hold_count; 1781 1782 goto validate; 1783 } 1784 /* 1785 * Mapping has changed, invalidate old range and fall through to 1786 * handle validating new mapping. 1787 */ 1788 if (opa) { 1789 int err; 1790 err = pmap_remove_pte(pmap, pte, va); 1791 if (err) 1792 panic("pmap_enter: pte vanished, va: 0x%x", va); 1793 } 1794 1795 /* 1796 * Enter on the PV list if part of our managed memory Note that we 1797 * raise IPL while manipulating pv_table since pmap_enter can be 1798 * called at interrupt time. 1799 */ 1800 if (pmap_is_managed(pa)) { 1801 pmap_insert_entry(pmap, va, mpte, pa); 1802 pa |= PG_MANAGED; 1803 } 1804 1805 /* 1806 * Increment counters 1807 */ 1808 pmap->pm_stats.resident_count++; 1809 if (wired) 1810 pmap->pm_stats.wired_count++; 1811 1812validate: 1813 /* 1814 * Now validate mapping with desired protection/wiring. 1815 */ 1816 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V); 1817 1818 if (wired) 1819 newpte |= PG_W; 1820 if (va < UPT_MIN_ADDRESS) 1821 newpte |= PG_U; 1822 1823 /* 1824 * if the mapping or permission bits are different, we need 1825 * to update the pte. 1826 */ 1827 if ((origpte & ~(PG_M|PG_A)) != newpte) { 1828 *pte = newpte; 1829 if (origpte) 1830 pmap_update_1pg(va); 1831 } 1832 pmap_unlock(pmap); 1833} 1834 1835/* 1836 * this code makes some *MAJOR* assumptions: 1837 * 1. Current pmap & pmap exists. 1838 * 2. Not wired. 1839 * 3. Read access. 1840 * 4. No page table pages. 1841 * 5. Tlbflush is deferred to calling procedure. 1842 * 6. Page IS managed. 1843 * but is *MUCH* faster than pmap_enter... 1844 */ 1845 1846static vm_page_t 1847pmap_enter_quick(pmap, va, pa, mpte) 1848 register pmap_t pmap; 1849 vm_offset_t va; 1850 register vm_offset_t pa; 1851 vm_page_t mpte; 1852{ 1853 register unsigned *pte; 1854 1855 /* 1856 * In the case that a page table page is not 1857 * resident, we are creating it here. 1858 */ 1859 if (va < UPT_MIN_ADDRESS) { 1860 unsigned ptepindex; 1861 vm_offset_t ptepa; 1862 1863 /* 1864 * Calculate pagetable page index 1865 */ 1866 ptepindex = va >> PDRSHIFT; 1867 if (mpte && (mpte->pindex == ptepindex)) { 1868 ++mpte->hold_count; 1869 } else { 1870retry: 1871 /* 1872 * Get the page directory entry 1873 */ 1874 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex]; 1875 1876 /* 1877 * If the page table page is mapped, we just increment 1878 * the hold count, and activate it. 1879 */ 1880 if (ptepa) { 1881#if defined(PTPHINT) 1882 if (pmap->pm_ptphint && 1883 (pmap->pm_ptphint->pindex == ptepindex)) { 1884 mpte = pmap->pm_ptphint; 1885 } else { 1886 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1887 pmap->pm_ptphint = mpte; 1888 } 1889#else 1890 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1891#endif 1892 if (mpte == NULL) 1893 goto retry; 1894 ++mpte->hold_count; 1895 } else { 1896 mpte = _pmap_allocpte(pmap, ptepindex); 1897 } 1898 } 1899 } else { 1900 mpte = NULL; 1901 } 1902 1903 /* 1904 * This call to vtopte makes the assumption that we are 1905 * entering the page into the current pmap. In order to support 1906 * quick entry into any pmap, one would likely use pmap_pte_quick. 1907 * But that isn't as quick as vtopte. 1908 */ 1909 pte = (unsigned *)vtopte(va); 1910 if (*pte) { 1911 if (mpte) 1912 pmap_unwire_pte_hold(pmap, mpte); 1913 return 0; 1914 } 1915 1916 /* 1917 * Enter on the PV list if part of our managed memory Note that we 1918 * raise IPL while manipulating pv_table since pmap_enter can be 1919 * called at interrupt time. 1920 */ 1921 pmap_insert_entry(pmap, va, mpte, pa); 1922 1923 /* 1924 * Increment counters 1925 */ 1926 pmap->pm_stats.resident_count++; 1927 1928 /* 1929 * Now validate mapping with RO protection 1930 */ 1931 *pte = pa | PG_V | PG_U | PG_MANAGED; 1932 1933 return mpte; 1934} 1935 1936#define MAX_INIT_PT (96) 1937/* 1938 * pmap_object_init_pt preloads the ptes for a given object 1939 * into the specified pmap. This eliminates the blast of soft 1940 * faults on process startup and immediately after an mmap. 1941 */ 1942void 1943pmap_object_init_pt(pmap, addr, object, pindex, size, limit) 1944 pmap_t pmap; 1945 vm_offset_t addr; 1946 vm_object_t object; 1947 vm_pindex_t pindex; 1948 vm_size_t size; 1949 int limit; 1950{ 1951 vm_offset_t tmpidx; 1952 int psize; 1953 vm_page_t p, mpte; 1954 int objpgs; 1955 1956 psize = i386_btop(size); 1957 1958 if (!pmap || (object->type != OBJT_VNODE) || 1959 (limit && (psize > MAX_INIT_PT) && 1960 (object->resident_page_count > MAX_INIT_PT))) { 1961 return; 1962 } 1963 1964 pmap_lock(pmap); 1965 if (psize + pindex > object->size) 1966 psize = object->size - pindex; 1967 1968 mpte = NULL; 1969 /* 1970 * if we are processing a major portion of the object, then scan the 1971 * entire thing. 1972 */ 1973 if (psize > (object->size >> 2)) { 1974 objpgs = psize; 1975 1976 for (p = TAILQ_FIRST(&object->memq); 1977 ((objpgs > 0) && (p != NULL)); 1978 p = TAILQ_NEXT(p, listq)) { 1979 1980 tmpidx = p->pindex; 1981 if (tmpidx < pindex) { 1982 continue; 1983 } 1984 tmpidx -= pindex; 1985 if (tmpidx >= psize) { 1986 continue; 1987 } 1988 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1989 (p->busy == 0) && 1990 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1991 if ((p->queue - p->pc) == PQ_CACHE) 1992 vm_page_deactivate(p); 1993 p->flags |= PG_BUSY; 1994 mpte = pmap_enter_quick(pmap, 1995 addr + i386_ptob(tmpidx), 1996 VM_PAGE_TO_PHYS(p), mpte); 1997 p->flags |= PG_MAPPED; 1998 PAGE_WAKEUP(p); 1999 } 2000 objpgs -= 1; 2001 } 2002 } else { 2003 /* 2004 * else lookup the pages one-by-one. 2005 */ 2006 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 2007 p = vm_page_lookup(object, tmpidx + pindex); 2008 if (p && 2009 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2010 (p->busy == 0) && 2011 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2012 if ((p->queue - p->pc) == PQ_CACHE) 2013 vm_page_deactivate(p); 2014 p->flags |= PG_BUSY; 2015 mpte = pmap_enter_quick(pmap, 2016 addr + i386_ptob(tmpidx), 2017 VM_PAGE_TO_PHYS(p), mpte); 2018 p->flags |= PG_MAPPED; 2019 PAGE_WAKEUP(p); 2020 } 2021 } 2022 } 2023 pmap_unlock(pmap); 2024 return; 2025} 2026 2027/* 2028 * pmap_prefault provides a quick way of clustering 2029 * pagefaults into a processes address space. It is a "cousin" 2030 * of pmap_object_init_pt, except it runs at page fault time instead 2031 * of mmap time. 2032 */ 2033#define PFBAK 2 2034#define PFFOR 2 2035#define PAGEORDER_SIZE (PFBAK+PFFOR) 2036 2037static int pmap_prefault_pageorder[] = { 2038 -PAGE_SIZE, PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE 2039}; 2040 2041void 2042pmap_prefault(pmap, addra, entry, object) 2043 pmap_t pmap; 2044 vm_offset_t addra; 2045 vm_map_entry_t entry; 2046 vm_object_t object; 2047{ 2048 int i; 2049 vm_offset_t starta; 2050 vm_offset_t addr; 2051 vm_pindex_t pindex; 2052 vm_page_t m, mpte; 2053 2054 if (entry->object.vm_object != object) 2055 return; 2056 2057 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) 2058 return; 2059 2060 pmap_lock(pmap); 2061 starta = addra - PFBAK * PAGE_SIZE; 2062 if (starta < entry->start) { 2063 starta = entry->start; 2064 } else if (starta > addra) { 2065 starta = 0; 2066 } 2067 2068 mpte = NULL; 2069 for (i = 0; i < PAGEORDER_SIZE; i++) { 2070 vm_object_t lobject; 2071 unsigned *pte; 2072 2073 addr = addra + pmap_prefault_pageorder[i]; 2074 if (addr < starta || addr >= entry->end) 2075 continue; 2076 2077 if ((*pmap_pde(pmap, addr)) == NULL) 2078 continue; 2079 2080 pte = (unsigned *) vtopte(addr); 2081 if (*pte) 2082 continue; 2083 2084 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 2085 lobject = object; 2086 for (m = vm_page_lookup(lobject, pindex); 2087 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 2088 lobject = lobject->backing_object) { 2089 if (lobject->backing_object_offset & PAGE_MASK) 2090 break; 2091 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 2092 m = vm_page_lookup(lobject->backing_object, pindex); 2093 } 2094 2095 /* 2096 * give-up when a page is not in memory 2097 */ 2098 if (m == NULL) 2099 break; 2100 2101 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2102 (m->busy == 0) && 2103 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2104 2105 if ((m->queue - m->pc) == PQ_CACHE) { 2106 vm_page_deactivate(m); 2107 } 2108 m->flags |= PG_BUSY; 2109 mpte = pmap_enter_quick(pmap, addr, 2110 VM_PAGE_TO_PHYS(m), mpte); 2111 m->flags |= PG_MAPPED; 2112 PAGE_WAKEUP(m); 2113 } 2114 } 2115 pmap_unlock(pmap); 2116} 2117 2118/* 2119 * Routine: pmap_change_wiring 2120 * Function: Change the wiring attribute for a map/virtual-address 2121 * pair. 2122 * In/out conditions: 2123 * The mapping must already exist in the pmap. 2124 */ 2125void 2126pmap_change_wiring(pmap, va, wired) 2127 register pmap_t pmap; 2128 vm_offset_t va; 2129 boolean_t wired; 2130{ 2131 register unsigned *pte; 2132 2133 if (pmap == NULL) 2134 return; 2135 2136 pmap_lock(pmap); 2137 pte = pmap_pte(pmap, va); 2138 2139 if (wired && !pmap_pte_w(pte)) 2140 pmap->pm_stats.wired_count++; 2141 else if (!wired && pmap_pte_w(pte)) 2142 pmap->pm_stats.wired_count--; 2143 2144 /* 2145 * Wiring is not a hardware characteristic so there is no need to 2146 * invalidate TLB. 2147 */ 2148 pmap_pte_set_w(pte, wired); 2149 pmap_unlock(pmap); 2150} 2151 2152 2153 2154/* 2155 * Copy the range specified by src_addr/len 2156 * from the source map to the range dst_addr/len 2157 * in the destination map. 2158 * 2159 * This routine is only advisory and need not do anything. 2160 */ 2161 2162void 2163pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 2164 pmap_t dst_pmap, src_pmap; 2165 vm_offset_t dst_addr; 2166 vm_size_t len; 2167 vm_offset_t src_addr; 2168{ 2169 vm_offset_t addr; 2170 vm_offset_t end_addr = src_addr + len; 2171 vm_offset_t pdnxt; 2172 unsigned src_frame, dst_frame; 2173 2174 if (dst_addr != src_addr) 2175 return; 2176 2177 pmap_lock2(src_pmap, dst_pmap); 2178 src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME; 2179 if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) { 2180 pmap_unlock(src_pmap); 2181 pmap_unlock(dst_pmap); 2182 return; 2183 } 2184 2185 dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME; 2186 if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) { 2187 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V); 2188 pmap_update(); 2189 } 2190 2191 for(addr = src_addr; addr < end_addr; addr = pdnxt) { 2192 unsigned *src_pte, *dst_pte; 2193 vm_page_t dstmpte, srcmpte; 2194 vm_offset_t srcptepaddr; 2195 unsigned ptepindex; 2196 2197 if (addr >= UPT_MIN_ADDRESS) 2198 panic("pmap_copy: invalid to pmap_copy page tables\n"); 2199 2200 pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1)); 2201 ptepindex = addr >> PDRSHIFT; 2202 2203 srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex]; 2204 if (srcptepaddr == 0) 2205 continue; 2206 2207 srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex); 2208 if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY)) 2209 continue; 2210 2211 if (pdnxt > end_addr) 2212 pdnxt = end_addr; 2213 2214 src_pte = (unsigned *) vtopte(addr); 2215 dst_pte = (unsigned *) avtopte(addr); 2216 while (addr < pdnxt) { 2217 unsigned ptetemp; 2218 ptetemp = *src_pte; 2219 /* 2220 * we only virtual copy managed pages 2221 */ 2222 if ((ptetemp & PG_MANAGED) != 0) { 2223 /* 2224 * We have to check after allocpte for the 2225 * pte still being around... allocpte can 2226 * block. 2227 */ 2228 dstmpte = pmap_allocpte(dst_pmap, addr); 2229 if ((*dst_pte == 0) && (ptetemp = *src_pte)) { 2230 /* 2231 * Clear the modified and 2232 * accessed (referenced) bits 2233 * during the copy. 2234 */ 2235 *dst_pte = ptetemp & ~(PG_M|PG_A); 2236 dst_pmap->pm_stats.resident_count++; 2237 pmap_insert_entry(dst_pmap, addr, 2238 dstmpte, 2239 (ptetemp & PG_FRAME)); 2240 } else { 2241 pmap_unwire_pte_hold(dst_pmap, dstmpte); 2242 } 2243 if (dstmpte->hold_count >= srcmpte->hold_count) 2244 break; 2245 } 2246 addr += PAGE_SIZE; 2247 ++src_pte; 2248 ++dst_pte; 2249 } 2250 } 2251 pmap_unlock(src_pmap); 2252 pmap_unlock(dst_pmap); 2253} 2254 2255/* 2256 * Routine: pmap_kernel 2257 * Function: 2258 * Returns the physical map handle for the kernel. 2259 */ 2260pmap_t 2261pmap_kernel() 2262{ 2263 return (kernel_pmap); 2264} 2265 2266/* 2267 * pmap_zero_page zeros the specified (machine independent) 2268 * page by mapping the page into virtual memory and using 2269 * bzero to clear its contents, one machine dependent page 2270 * at a time. 2271 */ 2272void 2273pmap_zero_page(phys) 2274 vm_offset_t phys; 2275{ 2276 if (*(int *) CMAP2) 2277 panic("pmap_zero_page: CMAP busy"); 2278 2279 *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME); 2280 bzero(CADDR2, PAGE_SIZE); 2281 *(int *) CMAP2 = 0; 2282 pmap_update_1pg((vm_offset_t) CADDR2); 2283} 2284 2285/* 2286 * pmap_copy_page copies the specified (machine independent) 2287 * page by mapping the page into virtual memory and using 2288 * bcopy to copy the page, one machine dependent page at a 2289 * time. 2290 */ 2291void 2292pmap_copy_page(src, dst) 2293 vm_offset_t src; 2294 vm_offset_t dst; 2295{ 2296 if (*(int *) CMAP1 || *(int *) CMAP2) 2297 panic("pmap_copy_page: CMAP busy"); 2298 2299 *(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME); 2300 *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME); 2301 2302 bcopy(CADDR1, CADDR2, PAGE_SIZE); 2303 2304 *(int *) CMAP1 = 0; 2305 *(int *) CMAP2 = 0; 2306 pmap_update_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2); 2307} 2308 2309 2310/* 2311 * Routine: pmap_pageable 2312 * Function: 2313 * Make the specified pages (by pmap, offset) 2314 * pageable (or not) as requested. 2315 * 2316 * A page which is not pageable may not take 2317 * a fault; therefore, its page table entry 2318 * must remain valid for the duration. 2319 * 2320 * This routine is merely advisory; pmap_enter 2321 * will specify that these pages are to be wired 2322 * down (or not) as appropriate. 2323 */ 2324void 2325pmap_pageable(pmap, sva, eva, pageable) 2326 pmap_t pmap; 2327 vm_offset_t sva, eva; 2328 boolean_t pageable; 2329{ 2330} 2331 2332/* 2333 * this routine returns true if a physical page resides 2334 * in the given pmap. 2335 */ 2336boolean_t 2337pmap_page_exists(pmap, pa) 2338 pmap_t pmap; 2339 vm_offset_t pa; 2340{ 2341 register pv_entry_t pv; 2342 pv_table_t *ppv; 2343 int s; 2344 2345 if (!pmap_is_managed(pa)) 2346 return FALSE; 2347 2348 s = splvm(); 2349 2350 ppv = pa_to_pvh(pa); 2351 /* 2352 * Not found, check current mappings returning immediately if found. 2353 */ 2354 for (pv = TAILQ_FIRST(&ppv->pv_list); 2355 pv; 2356 pv = TAILQ_NEXT(pv, pv_list)) { 2357 if (pv->pv_pmap == pmap) { 2358 splx(s); 2359 return TRUE; 2360 } 2361 } 2362 splx(s); 2363 return (FALSE); 2364} 2365 2366#define PMAP_REMOVE_PAGES_CURPROC_ONLY 2367/* 2368 * Remove all pages from specified address space 2369 * this aids process exit speeds. Also, this code 2370 * is special cased for current process only, but 2371 * can have the more generic (and slightly slower) 2372 * mode enabled. This is much faster than pmap_remove 2373 * in the case of running down an entire address space. 2374 */ 2375void 2376pmap_remove_pages(pmap, sva, eva) 2377 pmap_t pmap; 2378 vm_offset_t sva, eva; 2379{ 2380 unsigned *pte, tpte; 2381 pv_table_t *ppv; 2382 pv_entry_t pv, npv; 2383 int s; 2384 2385#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 2386 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) { 2387 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2388 return; 2389 } 2390#endif 2391 2392 pmap_lock(pmap); 2393 s = splhigh(); 2394 2395 for(pv = TAILQ_FIRST(&pmap->pm_pvlist); 2396 pv; 2397 pv = npv) { 2398 2399 if (pv->pv_va >= eva || pv->pv_va < sva) { 2400 npv = TAILQ_NEXT(pv, pv_plist); 2401 continue; 2402 } 2403 2404#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 2405 pte = (unsigned *)vtopte(pv->pv_va); 2406#else 2407 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2408#endif 2409 tpte = *pte; 2410 *pte = 0; 2411 2412 ppv = pa_to_pvh(tpte); 2413 2414 if (tpte) { 2415 pv->pv_pmap->pm_stats.resident_count--; 2416 if (tpte & PG_W) 2417 pv->pv_pmap->pm_stats.wired_count--; 2418 /* 2419 * Update the vm_page_t clean and reference bits. 2420 */ 2421 if (tpte & PG_M) { 2422 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 2423 } 2424 } 2425 2426 npv = TAILQ_NEXT(pv, pv_plist); 2427 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 2428 2429 --ppv->pv_list_count; 2430 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); 2431 2432 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 2433 free_pv_entry(pv); 2434 } 2435 splx(s); 2436 pmap_update(); 2437 pmap_unlock(pmap); 2438} 2439 2440/* 2441 * pmap_testbit tests bits in pte's 2442 * note that the testbit/changebit routines are inline, 2443 * and a lot of things compile-time evaluate. 2444 */ 2445static boolean_t 2446pmap_testbit(pa, bit) 2447 register vm_offset_t pa; 2448 int bit; 2449{ 2450 register pv_entry_t pv; 2451 pv_table_t *ppv; 2452 unsigned *pte; 2453 int s; 2454 2455 if (!pmap_is_managed(pa)) 2456 return FALSE; 2457 2458 ppv = pa_to_pvh(pa); 2459 if (TAILQ_FIRST(&ppv->pv_list) == NULL) 2460 return FALSE; 2461 2462 s = splvm(); 2463 2464 for (pv = TAILQ_FIRST(&ppv->pv_list); 2465 pv; 2466 pv = TAILQ_NEXT(pv, pv_list)) { 2467 2468 /* 2469 * if the bit being tested is the modified bit, then 2470 * mark clean_map and ptes as never 2471 * modified. 2472 */ 2473 if (bit & (PG_A|PG_M)) { 2474 if (!pmap_track_modified(pv->pv_va)) 2475 continue; 2476 } 2477 2478#if defined(PMAP_DIAGNOSTIC) 2479 if (!pv->pv_pmap) { 2480 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 2481 continue; 2482 } 2483#endif 2484 pmap_lock(pv->pv_pmap); 2485 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2486 if (pte == NULL) { 2487 pmap_unlock(pv->pv_pmap); 2488 continue; 2489 } 2490 if (*pte & bit) { 2491 pmap_unlock(pv->pv_pmap); 2492 splx(s); 2493 return TRUE; 2494 } 2495 pmap_unlock(pv->pv_pmap); 2496 } 2497 splx(s); 2498 return (FALSE); 2499} 2500 2501/* 2502 * this routine is used to modify bits in ptes 2503 */ 2504static void 2505pmap_changebit(pa, bit, setem) 2506 vm_offset_t pa; 2507 int bit; 2508 boolean_t setem; 2509{ 2510 register pv_entry_t pv; 2511 pv_table_t *ppv; 2512 register unsigned *pte; 2513 vm_offset_t va; 2514 int changed; 2515 int s; 2516 2517 if (!pmap_is_managed(pa)) 2518 return; 2519 2520 s = splvm(); 2521 changed = 0; 2522 ppv = pa_to_pvh(pa); 2523 2524 /* 2525 * Loop over all current mappings setting/clearing as appropos If 2526 * setting RO do we need to clear the VAC? 2527 */ 2528 for (pv = TAILQ_FIRST(&ppv->pv_list); 2529 pv; 2530 pv = TAILQ_NEXT(pv, pv_list)) { 2531 2532 /* 2533 * don't write protect pager mappings 2534 */ 2535 if (!setem && (bit == PG_RW)) { 2536 if (!pmap_track_modified(pv->pv_va)) 2537 continue; 2538 } 2539 2540#if defined(PMAP_DIAGNOSTIC) 2541 if (!pv->pv_pmap) { 2542 printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va); 2543 continue; 2544 } 2545#endif 2546 2547 pmap_lock(pv->pv_pmap); 2548 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2549 if (pte == NULL) { 2550 pmap_unlock(pv->pv_pmap); 2551 continue; 2552 } 2553 if (setem) { 2554 *(int *)pte |= bit; 2555 changed = 1; 2556 } else { 2557 vm_offset_t pbits = *(vm_offset_t *)pte; 2558 if (pbits & bit) { 2559 changed = 1; 2560 if (bit == PG_RW) { 2561 if (pbits & PG_M) { 2562 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 2563 } 2564 *(int *)pte = pbits & ~(PG_M|PG_RW); 2565 } else { 2566 *(int *)pte = pbits & ~bit; 2567 } 2568 } 2569 } 2570 pmap_unlock(pv->pv_pmap); 2571 } 2572 splx(s); 2573 if (changed) 2574 pmap_update(); 2575} 2576 2577/* 2578 * pmap_page_protect: 2579 * 2580 * Lower the permission for all mappings to a given page. 2581 */ 2582void 2583pmap_page_protect(phys, prot) 2584 vm_offset_t phys; 2585 vm_prot_t prot; 2586{ 2587 if ((prot & VM_PROT_WRITE) == 0) { 2588 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 2589 pmap_changebit(phys, PG_RW, FALSE); 2590 } else { 2591 pmap_remove_all(phys); 2592 } 2593 } 2594} 2595 2596vm_offset_t 2597pmap_phys_address(ppn) 2598 int ppn; 2599{ 2600 return (i386_ptob(ppn)); 2601} 2602 2603/* 2604 * pmap_is_referenced: 2605 * 2606 * Return whether or not the specified physical page was referenced 2607 * by any physical maps. 2608 */ 2609boolean_t 2610pmap_is_referenced(vm_offset_t pa) 2611{ 2612 register pv_entry_t pv; 2613 pv_table_t *ppv; 2614 unsigned *pte; 2615 int s; 2616 2617 if (!pmap_is_managed(pa)) 2618 return FALSE; 2619 2620 ppv = pa_to_pvh(pa); 2621 2622 s = splvm(); 2623 /* 2624 * Not found, check current mappings returning immediately if found. 2625 */ 2626 for (pv = TAILQ_FIRST(&ppv->pv_list); 2627 pv; 2628 pv = TAILQ_NEXT(pv, pv_list)) { 2629 2630 /* 2631 * if the bit being tested is the modified bit, then 2632 * mark clean_map and ptes as never 2633 * modified. 2634 */ 2635 if (!pmap_track_modified(pv->pv_va)) 2636 continue; 2637 2638 pmap_lock(pv->pv_pmap); 2639 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2640 if (pte == NULL) { 2641 pmap_unlock(pv->pv_pmap); 2642 continue; 2643 } 2644 if ((int) *pte & PG_A) { 2645 pmap_unlock(pv->pv_pmap); 2646 splx(s); 2647 return TRUE; 2648 } 2649 pmap_unlock(pv->pv_pmap); 2650 } 2651 splx(s); 2652 return (FALSE); 2653} 2654 2655/* 2656 * pmap_ts_referenced: 2657 * 2658 * Return the count of reference bits for a page, clearing all of them. 2659 * 2660 */ 2661int 2662pmap_ts_referenced(vm_offset_t pa) 2663{ 2664 register pv_entry_t pv; 2665 pv_table_t *ppv; 2666 unsigned *pte; 2667 int s; 2668 int rtval = 0; 2669 2670 if (!pmap_is_managed(pa)) 2671 return FALSE; 2672 2673 s = splvm(); 2674 2675 ppv = pa_to_pvh(pa); 2676 2677 if (TAILQ_FIRST(&ppv->pv_list) == NULL) { 2678 splx(s); 2679 return 0; 2680 } 2681 2682 /* 2683 * Not found, check current mappings returning immediately if found. 2684 */ 2685 for (pv = TAILQ_FIRST(&ppv->pv_list); 2686 pv; 2687 pv = TAILQ_NEXT(pv, pv_list)) { 2688 /* 2689 * if the bit being tested is the modified bit, then 2690 * mark clean_map and ptes as never 2691 * modified. 2692 */ 2693 if (!pmap_track_modified(pv->pv_va)) 2694 continue; 2695 2696 pmap_lock(pv->pv_pmap); 2697 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2698 if (pte == NULL) { 2699 pmap_unlock(pv->pv_pmap); 2700 continue; 2701 } 2702 if (*pte & PG_A) { 2703 rtval++; 2704 *pte &= ~PG_A; 2705 } 2706 pmap_unlock(pv->pv_pmap); 2707 } 2708 splx(s); 2709 if (rtval) { 2710 pmap_update(); 2711 } 2712 return (rtval); 2713} 2714 2715/* 2716 * pmap_is_modified: 2717 * 2718 * Return whether or not the specified physical page was modified 2719 * in any physical maps. 2720 */ 2721boolean_t 2722pmap_is_modified(vm_offset_t pa) 2723{ 2724 return pmap_testbit((pa), PG_M); 2725} 2726 2727/* 2728 * Clear the modify bits on the specified physical page. 2729 */ 2730void 2731pmap_clear_modify(vm_offset_t pa) 2732{ 2733 pmap_changebit((pa), PG_M, FALSE); 2734} 2735 2736/* 2737 * pmap_clear_reference: 2738 * 2739 * Clear the reference bit on the specified physical page. 2740 */ 2741void 2742pmap_clear_reference(vm_offset_t pa) 2743{ 2744 pmap_changebit((pa), PG_A, FALSE); 2745} 2746 2747#if 0 2748void 2749pmap_update_map(pmap_t pmap) { 2750 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 2751 if ((pmap == kernel_pmap) || 2752 (frame == (((unsigned) PTDpde) & PG_FRAME))) { 2753 pmap_update(); 2754 } 2755} 2756#endif 2757 2758/* 2759 * Miscellaneous support routines follow 2760 */ 2761 2762static void 2763i386_protection_init() 2764{ 2765 register int *kp, prot; 2766 2767 kp = protection_codes; 2768 for (prot = 0; prot < 8; prot++) { 2769 switch (prot) { 2770 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2771 /* 2772 * Read access is also 0. There isn't any execute bit, 2773 * so just make it readable. 2774 */ 2775 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2776 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2777 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2778 *kp++ = 0; 2779 break; 2780 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2781 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2782 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2783 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2784 *kp++ = PG_RW; 2785 break; 2786 } 2787 } 2788} 2789 2790/* 2791 * Map a set of physical memory pages into the kernel virtual 2792 * address space. Return a pointer to where it is mapped. This 2793 * routine is intended to be used for mapping device memory, 2794 * NOT real memory. The non-cacheable bits are set on each 2795 * mapped page. 2796 */ 2797void * 2798pmap_mapdev(pa, size) 2799 vm_offset_t pa; 2800 vm_size_t size; 2801{ 2802 vm_offset_t va, tmpva; 2803 unsigned *pte; 2804 2805 size = roundup(size, PAGE_SIZE); 2806 2807 va = kmem_alloc_pageable(kernel_map, size); 2808 if (!va) 2809 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2810 2811 pa = pa & PG_FRAME; 2812 for (tmpva = va; size > 0;) { 2813 pte = (unsigned *)vtopte(tmpva); 2814 *pte = pa | PG_RW | PG_V | PG_N; 2815 size -= PAGE_SIZE; 2816 tmpva += PAGE_SIZE; 2817 pa += PAGE_SIZE; 2818 } 2819 pmap_update(); 2820 2821 return ((void *) va); 2822} 2823 2824/* 2825 * perform the pmap work for mincore 2826 */ 2827int 2828pmap_mincore(pmap, addr) 2829 pmap_t pmap; 2830 vm_offset_t addr; 2831{ 2832 2833 unsigned *ptep, pte; 2834 int val = 0; 2835 2836 pmap_lock(pmap); 2837 ptep = pmap_pte(pmap, addr); 2838 if (ptep == 0) { 2839 pmap_unlock(pmap); 2840 return 0; 2841 } 2842 2843 if (pte = *ptep) { 2844 vm_offset_t pa; 2845 val = MINCORE_INCORE; 2846 pa = pte & PG_FRAME; 2847 2848 /* 2849 * Modified by us 2850 */ 2851 if (pte & PG_M) 2852 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 2853 /* 2854 * Modified by someone 2855 */ 2856 else if (PHYS_TO_VM_PAGE(pa)->dirty || 2857 pmap_is_modified(pa)) 2858 val |= MINCORE_MODIFIED_OTHER; 2859 /* 2860 * Referenced by us 2861 */ 2862 if (pte & PG_U) 2863 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 2864 2865 /* 2866 * Referenced by someone 2867 */ 2868 else if ((PHYS_TO_VM_PAGE(pa)->flags & PG_REFERENCED) || 2869 pmap_is_referenced(pa)) 2870 val |= MINCORE_REFERENCED_OTHER; 2871 } 2872 pmap_unlock(pmap); 2873 return val; 2874} 2875 2876#if defined(PMAP_DEBUG) 2877pmap_pid_dump(int pid) { 2878 pmap_t pmap; 2879 struct proc *p; 2880 int npte = 0; 2881 int index; 2882 for (p = allproc.lh_first; p != NULL; p = p->p_list.le_next) { 2883 if (p->p_pid != pid) 2884 continue; 2885 2886 if (p->p_vmspace) { 2887 int i,j; 2888 index = 0; 2889 pmap = &p->p_vmspace->vm_pmap; 2890 for(i=0;i<1024;i++) { 2891 pd_entry_t *pde; 2892 unsigned *pte; 2893 unsigned base = i << PDRSHIFT; 2894 2895 pde = &pmap->pm_pdir[i]; 2896 if (pde && pmap_pde_v(pde)) { 2897 for(j=0;j<1024;j++) { 2898 unsigned va = base + (j << PAGE_SHIFT); 2899 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2900 if (index) { 2901 index = 0; 2902 printf("\n"); 2903 } 2904 return npte; 2905 } 2906 pte = pmap_pte_quick( pmap, va); 2907 if (pte && pmap_pte_v(pte)) { 2908 vm_offset_t pa; 2909 vm_page_t m; 2910 pa = *(int *)pte; 2911 m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); 2912 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2913 va, pa, m->hold_count, m->wire_count, m->flags); 2914 npte++; 2915 index++; 2916 if (index >= 2) { 2917 index = 0; 2918 printf("\n"); 2919 } else { 2920 printf(" "); 2921 } 2922 } 2923 } 2924 } 2925 } 2926 } 2927 } 2928 return npte; 2929} 2930#endif 2931 2932#if defined(DEBUG) 2933 2934static void pads __P((pmap_t pm)); 2935static void pmap_pvdump __P((vm_offset_t pa)); 2936 2937/* print address space of pmap*/ 2938static void 2939pads(pm) 2940 pmap_t pm; 2941{ 2942 unsigned va, i, j; 2943 unsigned *ptep; 2944 2945 if (pm == kernel_pmap) 2946 return; 2947 for (i = 0; i < 1024; i++) 2948 if (pm->pm_pdir[i]) 2949 for (j = 0; j < 1024; j++) { 2950 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 2951 if (pm == kernel_pmap && va < KERNBASE) 2952 continue; 2953 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2954 continue; 2955 ptep = pmap_pte_quick(pm, va); 2956 if (pmap_pte_v(ptep)) 2957 printf("%x:%x ", va, *(int *) ptep); 2958 }; 2959 2960} 2961 2962static void 2963pmap_pvdump(pa) 2964 vm_offset_t pa; 2965{ 2966 pv_table_t *ppv; 2967 register pv_entry_t pv; 2968 2969 printf("pa %x", pa); 2970 ppv = pa_to_pvh(pa); 2971 for (pv = TAILQ_FIRST(&ppv->pv_list); 2972 pv; 2973 pv = TAILQ_NEXT(pv, pv_list)) { 2974#ifdef used_to_be 2975 printf(" -> pmap %x, va %x, flags %x", 2976 pv->pv_pmap, pv->pv_va, pv->pv_flags); 2977#endif 2978 printf(" -> pmap %x, va %x", 2979 pv->pv_pmap, pv->pv_va); 2980 pads(pv->pv_pmap); 2981 } 2982 printf(" "); 2983} 2984#endif 2985