pmap.c revision 18842
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.122 1996/09/28 22:37:38 dyson Exp $ 43 */ 44 45/* 46 * Manages physical address maps. 47 * 48 * In addition to hardware address maps, this 49 * module is called upon to provide software-use-only 50 * maps which may or may not be stored in the same 51 * form as hardware maps. These pseudo-maps are 52 * used to store intermediate results from copy 53 * operations to and from address spaces. 54 * 55 * Since the information managed by this module is 56 * also stored by the logical address mapping module, 57 * this module may throw away valid virtual-to-physical 58 * mappings at almost any time. However, invalidations 59 * of virtual-to-physical mappings must be done as 60 * requested. 61 * 62 * In order to cope with hardware architectures which 63 * make virtual-to-physical map invalidates expensive, 64 * this module may delay invalidate or reduced protection 65 * operations until such time as they are actually 66 * necessary. This module is given full information as 67 * to which processors are currently using which maps, 68 * and to when physical maps must be made correct. 69 */ 70 71#include "opt_cpu.h" 72 73#include <sys/param.h> 74#include <sys/systm.h> 75#include <sys/proc.h> 76#include <sys/malloc.h> 77#include <sys/msgbuf.h> 78#include <sys/queue.h> 79#include <sys/vmmeter.h> 80#include <sys/mman.h> 81 82#include <vm/vm.h> 83#include <vm/vm_param.h> 84#include <vm/vm_prot.h> 85#include <vm/lock.h> 86#include <vm/vm_kern.h> 87#include <vm/vm_page.h> 88#include <vm/vm_map.h> 89#include <vm/vm_object.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_pager.h> 93 94#include <machine/pcb.h> 95#include <machine/cputypes.h> 96#include <machine/md_var.h> 97 98#define PMAP_KEEP_PDIRS 99 100#if defined(DIAGNOSTIC) 101#define PMAP_DIAGNOSTIC 102#endif 103 104#if !defined(PMAP_DIAGNOSTIC) 105#define PMAP_INLINE __inline 106#else 107#define PMAP_INLINE 108#endif 109 110#define PTPHINT 111 112static void init_pv_entries __P((int)); 113 114/* 115 * Get PDEs and PTEs for user/kernel address space 116 */ 117#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 118#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 119 120#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 121#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 122#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 123#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 124#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 125 126#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 127#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 128 129/* 130 * Given a map and a machine independent protection code, 131 * convert to a vax protection code. 132 */ 133#define pte_prot(m, p) (protection_codes[p]) 134static int protection_codes[8]; 135 136static struct pmap kernel_pmap_store; 137pmap_t kernel_pmap; 138 139vm_offset_t avail_start; /* PA of first available physical page */ 140vm_offset_t avail_end; /* PA of last available physical page */ 141vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 142vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 143static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 144static vm_offset_t vm_first_phys; 145 146static int nkpt; 147static vm_page_t nkpg; 148vm_offset_t kernel_vm_end; 149 150extern vm_offset_t clean_sva, clean_eva; 151extern int cpu_class; 152 153#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2) 154 155/* 156 * Data for the pv entry allocation mechanism 157 */ 158static int pv_freelistcnt; 159TAILQ_HEAD (,pv_entry) pv_freelist; 160static vm_offset_t pvva; 161static int npvvapg; 162 163/* 164 * All those kernel PT submaps that BSD is so fond of 165 */ 166pt_entry_t *CMAP1; 167static pt_entry_t *CMAP2, *ptmmap; 168caddr_t CADDR1, ptvmmap; 169static caddr_t CADDR2; 170static pt_entry_t *msgbufmap; 171struct msgbuf *msgbufp; 172 173pt_entry_t *PMAP1; 174unsigned *PADDR1; 175 176static void free_pv_entry __P((pv_entry_t pv)); 177static unsigned * get_ptbase __P((pmap_t pmap)); 178static pv_entry_t get_pv_entry __P((void)); 179static void i386_protection_init __P((void)); 180static void pmap_alloc_pv_entry __P((void)); 181static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); 182 183static int pmap_is_managed __P((vm_offset_t pa)); 184static void pmap_remove_all __P((vm_offset_t pa)); 185static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, 186 vm_offset_t pa, vm_page_t mpte)); 187static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq, 188 vm_offset_t sva)); 189static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); 190static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv, 191 vm_offset_t va)); 192static boolean_t pmap_testbit __P((vm_offset_t pa, int bit)); 193static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va, 194 vm_page_t mpte, vm_offset_t pa)); 195 196static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va)); 197 198static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p)); 199static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex)); 200static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va)); 201static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex)); 202static PMAP_INLINE void pmap_lock __P((pmap_t pmap)); 203static PMAP_INLINE void pmap_unlock __P((pmap_t pmap)); 204static void pmap_lock2 __P((pmap_t pmap1, pmap_t pmap2)); 205 206#define PDSTACKMAX 6 207static vm_offset_t pdstack[PDSTACKMAX]; 208static int pdstackptr; 209 210/* 211 * Bootstrap the system enough to run with virtual memory. 212 * 213 * On the i386 this is called after mapping has already been enabled 214 * and just syncs the pmap module with what has already been done. 215 * [We can't call it easily with mapping off since the kernel is not 216 * mapped with PA == VA, hence we would have to relocate every address 217 * from the linked base (virtual) address "KERNBASE" to the actual 218 * (physical) address starting relative to 0] 219 */ 220void 221pmap_bootstrap(firstaddr, loadaddr) 222 vm_offset_t firstaddr; 223 vm_offset_t loadaddr; 224{ 225 vm_offset_t va; 226 pt_entry_t *pte; 227 228 avail_start = firstaddr; 229 230 /* 231 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 232 * large. It should instead be correctly calculated in locore.s and 233 * not based on 'first' (which is a physical address, not a virtual 234 * address, for the start of unused physical memory). The kernel 235 * page tables are NOT double mapped and thus should not be included 236 * in this calculation. 237 */ 238 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 239 virtual_end = VM_MAX_KERNEL_ADDRESS; 240 241 /* 242 * Initialize protection array. 243 */ 244 i386_protection_init(); 245 246 /* 247 * The kernel's pmap is statically allocated so we don't have to use 248 * pmap_create, which is unlikely to work correctly at this part of 249 * the boot sequence (XXX and which no longer exists). 250 */ 251 kernel_pmap = &kernel_pmap_store; 252 253 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 254 255 kernel_pmap->pm_count = 1; 256 TAILQ_INIT(&kernel_pmap->pm_pvlist); 257 nkpt = NKPT; 258 259 /* 260 * Reserve some special page table entries/VA space for temporary 261 * mapping of pages. 262 */ 263#define SYSMAP(c, p, v, n) \ 264 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 265 266 va = virtual_avail; 267 pte = (pt_entry_t *) pmap_pte(kernel_pmap, va); 268 269 /* 270 * CMAP1/CMAP2 are used for zeroing and copying pages. 271 */ 272 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 273 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 274 275 /* 276 * ptmmap is used for reading arbitrary physical pages via /dev/mem. 277 */ 278 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 279 280 /* 281 * msgbufmap is used to map the system message buffer. 282 */ 283 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 284 285 /* 286 * ptemap is used for pmap_pte_quick 287 */ 288 SYSMAP(unsigned *, PMAP1, PADDR1, 1); 289 290 virtual_avail = va; 291 292 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 293 invltlb(); 294 295} 296 297/* 298 * Initialize the pmap module. 299 * Called by vm_init, to initialize any structures that the pmap 300 * system needs to map virtual memory. 301 * pmap_init has been enhanced to support in a fairly consistant 302 * way, discontiguous physical memory. 303 */ 304void 305pmap_init(phys_start, phys_end) 306 vm_offset_t phys_start, phys_end; 307{ 308 vm_offset_t addr; 309 vm_size_t npg, s; 310 int i; 311 312 /* 313 * calculate the number of pv_entries needed 314 */ 315 vm_first_phys = phys_avail[0]; 316 for (i = 0; phys_avail[i + 1]; i += 2); 317 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; 318 319 /* 320 * Allocate memory for random pmap data structures. Includes the 321 * pv_head_table. 322 */ 323 s = (vm_size_t) (sizeof(pv_table_t) * npg); 324 s = round_page(s); 325 326 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 327 pv_table = (pv_table_t *) addr; 328 for(i=0;i<npg;i++) { 329 vm_offset_t pa; 330 TAILQ_INIT(&pv_table[i].pv_list); 331 pv_table[i].pv_list_count = 0; 332 pa = vm_first_phys + i * PAGE_SIZE; 333 pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa); 334 } 335 TAILQ_INIT(&pv_freelist); 336 337 /* 338 * init the pv free list 339 */ 340 init_pv_entries(npg); 341 /* 342 * Now it is safe to enable pv_table recording. 343 */ 344 pmap_initialized = TRUE; 345} 346 347/* 348 * Used to map a range of physical addresses into kernel 349 * virtual address space. 350 * 351 * For now, VM is already on, we only need to map the 352 * specified memory. 353 */ 354vm_offset_t 355pmap_map(virt, start, end, prot) 356 vm_offset_t virt; 357 vm_offset_t start; 358 vm_offset_t end; 359 int prot; 360{ 361 while (start < end) { 362 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 363 virt += PAGE_SIZE; 364 start += PAGE_SIZE; 365 } 366 return (virt); 367} 368 369 370/*************************************************** 371 * Low level helper routines..... 372 ***************************************************/ 373 374#if defined(PMAP_DIAGNOSTIC) 375 376/* 377 * This code checks for non-writeable/modified pages. 378 * This should be an invalid condition. 379 */ 380static int 381pmap_nw_modified(pt_entry_t ptea) { 382 int pte; 383 384 pte = (int) ptea; 385 386 if ((pte & (PG_M|PG_RW)) == PG_M) 387 return 1; 388 else 389 return 0; 390} 391#endif 392 393 394/* 395 * this routine defines the region(s) of memory that should 396 * not be tested for the modified bit. 397 */ 398static PMAP_INLINE int 399pmap_track_modified( vm_offset_t va) { 400 if ((va < clean_sva) || (va >= clean_eva)) 401 return 1; 402 else 403 return 0; 404} 405 406static PMAP_INLINE void 407invltlb_1pg( vm_offset_t va) { 408#if defined(I386_CPU) 409 if (cpu_class == CPUCLASS_386) { 410 invltlb(); 411 } else 412#endif 413 { 414 invlpg(va); 415 } 416} 417 418static PMAP_INLINE void 419invltlb_2pg( vm_offset_t va1, vm_offset_t va2) { 420#if defined(I386_CPU) 421 if (cpu_class == CPUCLASS_386) { 422 invltlb(); 423 } else 424#endif 425 { 426 invlpg(va1); 427 invlpg(va2); 428 } 429} 430 431 432static PMAP_INLINE void 433pmap_lock(pmap) 434pmap_t pmap; 435{ 436 int s; 437 if (pmap == kernel_pmap) 438 return; 439 s = splhigh(); 440 while (pmap->pm_flags & PM_FLAG_LOCKED) { 441 pmap->pm_flags |= PM_FLAG_WANTED; 442 tsleep(pmap, PVM - 1, "pmaplk", 0); 443 } 444 splx(s); 445} 446 447static PMAP_INLINE void 448pmap_unlock(pmap) 449pmap_t pmap; 450{ 451 int s; 452 if (pmap == kernel_pmap) 453 return; 454 s = splhigh(); 455 pmap->pm_flags &= ~PM_FLAG_LOCKED; 456 if (pmap->pm_flags & PM_FLAG_WANTED) { 457 pmap->pm_flags &= ~PM_FLAG_WANTED; 458 wakeup(pmap); 459 } 460} 461 462static void 463pmap_lock2(pmap1, pmap2) 464pmap_t pmap1, pmap2; 465{ 466 int s; 467 if (pmap1 == kernel_pmap || pmap2 == kernel_pmap) 468 return; 469 s = splhigh(); 470 while ((pmap1->pm_flags | pmap2->pm_flags) & PM_FLAG_LOCKED) { 471 while (pmap1->pm_flags & PM_FLAG_LOCKED) { 472 pmap1->pm_flags |= PM_FLAG_WANTED; 473 tsleep(pmap1, PVM - 1, "pmapl1", 0); 474 } 475 while (pmap2->pm_flags & PM_FLAG_LOCKED) { 476 pmap2->pm_flags |= PM_FLAG_WANTED; 477 tsleep(pmap2, PVM - 1, "pmapl2", 0); 478 } 479 } 480 splx(s); 481} 482 483static unsigned * 484get_ptbase(pmap) 485 pmap_t pmap; 486{ 487 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 488 489 /* are we current address space or kernel? */ 490 if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) { 491 return (unsigned *) PTmap; 492 } 493 /* otherwise, we are alternate address space */ 494 if (frame != (((unsigned) APTDpde) & PG_FRAME)) { 495 APTDpde = (pd_entry_t) (frame | PG_RW | PG_V); 496 invltlb(); 497 } 498 return (unsigned *) APTmap; 499} 500 501/* 502 * Routine: pmap_pte 503 * Function: 504 * Extract the page table entry associated 505 * with the given map/virtual_address pair. 506 */ 507 508PMAP_INLINE unsigned * 509pmap_pte(pmap, va) 510 register pmap_t pmap; 511 vm_offset_t va; 512{ 513 if (pmap && *pmap_pde(pmap, va)) { 514 return get_ptbase(pmap) + i386_btop(va); 515 } 516 return (0); 517} 518 519/* 520 * Super fast pmap_pte routine best used when scanning 521 * the pv lists. This eliminates many coarse-grained 522 * invltlb calls. Note that many of the pv list 523 * scans are across different pmaps. It is very wasteful 524 * to do an entire invltlb for checking a single mapping. 525 */ 526 527unsigned * 528pmap_pte_quick(pmap, va) 529 register pmap_t pmap; 530 vm_offset_t va; 531{ 532 unsigned pde, newpf; 533 if (pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) { 534 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 535 unsigned index = i386_btop(va); 536 /* are we current address space or kernel? */ 537 if ((pmap == kernel_pmap) || 538 (frame == (((unsigned) PTDpde) & PG_FRAME))) { 539 return (unsigned *) PTmap + index; 540 } 541 newpf = pde & PG_FRAME; 542 if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) { 543 * (unsigned *) PMAP1 = newpf | PG_RW | PG_V; 544 invltlb_1pg((vm_offset_t) PADDR1); 545 } 546 return PADDR1 + ((unsigned) index & (NPTEPG - 1)); 547 } 548 return (0); 549} 550 551/* 552 * Routine: pmap_extract 553 * Function: 554 * Extract the physical page address associated 555 * with the given map/virtual_address pair. 556 */ 557vm_offset_t 558pmap_extract(pmap, va) 559 register pmap_t pmap; 560 vm_offset_t va; 561{ 562 vm_offset_t rtval; 563 pmap_lock(pmap); 564 if (pmap && *pmap_pde(pmap, va)) { 565 unsigned *pte; 566 pte = get_ptbase(pmap) + i386_btop(va); 567 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK)); 568 pmap_unlock(pmap); 569 return rtval; 570 } 571 pmap_unlock(pmap); 572 return 0; 573 574} 575 576/* 577 * determine if a page is managed (memory vs. device) 578 */ 579static PMAP_INLINE int 580pmap_is_managed(pa) 581 vm_offset_t pa; 582{ 583 int i; 584 585 if (!pmap_initialized) 586 return 0; 587 588 for (i = 0; phys_avail[i + 1]; i += 2) { 589 if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) 590 return 1; 591 } 592 return 0; 593} 594 595 596/*************************************************** 597 * Low level mapping routines..... 598 ***************************************************/ 599 600/* 601 * Add a list of wired pages to the kva 602 * this routine is only used for temporary 603 * kernel mappings that do not need to have 604 * page modification or references recorded. 605 * Note that old mappings are simply written 606 * over. The page *must* be wired. 607 */ 608void 609pmap_qenter(va, m, count) 610 vm_offset_t va; 611 vm_page_t *m; 612 int count; 613{ 614 int i; 615 register unsigned *pte; 616 617 for (i = 0; i < count; i++) { 618 vm_offset_t tva = va + i * PAGE_SIZE; 619 unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V; 620 unsigned opte; 621 pte = (unsigned *)vtopte(tva); 622 opte = *pte; 623 *pte = npte; 624 if (opte) 625 invltlb_1pg(tva); 626 } 627} 628 629/* 630 * this routine jerks page mappings from the 631 * kernel -- it is meant only for temporary mappings. 632 */ 633void 634pmap_qremove(va, count) 635 vm_offset_t va; 636 int count; 637{ 638 int i; 639 register unsigned *pte; 640 641 for (i = 0; i < count; i++) { 642 pte = (unsigned *)vtopte(va); 643 *pte = 0; 644 invltlb_1pg(va); 645 va += PAGE_SIZE; 646 } 647} 648 649/* 650 * add a wired page to the kva 651 * note that in order for the mapping to take effect -- you 652 * should do a invltlb after doing the pmap_kenter... 653 */ 654PMAP_INLINE void 655pmap_kenter(va, pa) 656 vm_offset_t va; 657 register vm_offset_t pa; 658{ 659 register unsigned *pte; 660 unsigned npte, opte; 661 662 npte = pa | PG_RW | PG_V; 663 pte = (unsigned *)vtopte(va); 664 opte = *pte; 665 *pte = npte; 666 if (opte) 667 invltlb_1pg(va); 668} 669 670/* 671 * remove a page from the kernel pagetables 672 */ 673PMAP_INLINE void 674pmap_kremove(va) 675 vm_offset_t va; 676{ 677 register unsigned *pte; 678 679 pte = (unsigned *)vtopte(va); 680 *pte = 0; 681 invltlb_1pg(va); 682} 683 684static vm_page_t 685pmap_page_alloc(object, pindex) 686 vm_object_t object; 687 vm_pindex_t pindex; 688{ 689 vm_page_t m; 690 m = vm_page_alloc(object, pindex, VM_ALLOC_ZERO); 691 if (m == NULL) { 692 VM_WAIT; 693 } 694 return m; 695} 696 697vm_page_t 698pmap_page_lookup(object, pindex) 699 vm_object_t object; 700 vm_pindex_t pindex; 701{ 702 vm_page_t m; 703retry: 704 m = vm_page_lookup(object, pindex); 705 if (m) { 706 if (m->flags & PG_BUSY) { 707 m->flags |= PG_WANTED; 708 tsleep(m, PVM, "pplookp", 0); 709 goto retry; 710 } 711 } 712 713 return m; 714} 715 716 717 718 719/*************************************************** 720 * Page table page management routines..... 721 ***************************************************/ 722 723/* 724 * This routine unholds page table pages, and if the hold count 725 * drops to zero, then it decrements the wire count. 726 */ 727static int 728pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) { 729 int s; 730 731 vm_page_unhold(m); 732 733 s = splvm(); 734 while (m->flags & PG_BUSY) { 735 m->flags |= PG_WANTED; 736 tsleep(m, PVM, "pmuwpt", 0); 737 } 738 splx(s); 739 740 if (m->hold_count == 0) { 741 vm_offset_t pteva; 742 /* 743 * unmap the page table page 744 */ 745 pmap->pm_pdir[m->pindex] = 0; 746 --pmap->pm_stats.resident_count; 747 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) == 748 (((unsigned) PTDpde) & PG_FRAME)) { 749 /* 750 * Do a invltlb to make the invalidated mapping 751 * take effect immediately. 752 */ 753 pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex); 754 invltlb_1pg(pteva); 755 } 756 757#if defined(PTPHINT) 758 if (pmap->pm_ptphint == m) 759 pmap->pm_ptphint = NULL; 760#endif 761 762 /* 763 * If the page is finally unwired, simply free it. 764 */ 765 --m->wire_count; 766 if (m->wire_count == 0) { 767 768 if (m->flags & PG_WANTED) { 769 m->flags &= ~PG_WANTED; 770 wakeup(m); 771 } 772 773 vm_page_free_zero(m); 774 --cnt.v_wire_count; 775 } 776 return 1; 777 } 778 return 0; 779} 780 781/* 782 * After removing a page table entry, this routine is used to 783 * conditionally free the page, and manage the hold/wire counts. 784 */ 785int 786pmap_unuse_pt(pmap, va, mpte) 787 pmap_t pmap; 788 vm_offset_t va; 789 vm_page_t mpte; 790{ 791 unsigned ptepindex; 792 if (va >= UPT_MIN_ADDRESS) 793 return 0; 794 795 if (mpte == NULL) { 796 ptepindex = (va >> PDRSHIFT); 797#if defined(PTPHINT) 798 if (pmap->pm_ptphint && 799 (pmap->pm_ptphint->pindex == ptepindex)) { 800 mpte = pmap->pm_ptphint; 801 } else { 802 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 803 pmap->pm_ptphint = mpte; 804 } 805#else 806 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 807#endif 808 } 809 810 return pmap_unwire_pte_hold(pmap, mpte); 811} 812 813/* 814 * Initialize a preallocated and zeroed pmap structure, 815 * such as one in a vmspace structure. 816 */ 817void 818pmap_pinit(pmap) 819 register struct pmap *pmap; 820{ 821 vm_page_t ptdpg; 822 /* 823 * No need to allocate page table space yet but we do need a valid 824 * page directory table. 825 */ 826 827 if (pdstackptr > 0) { 828 --pdstackptr; 829 pmap->pm_pdir = (pd_entry_t *)pdstack[pdstackptr]; 830 } else { 831 pmap->pm_pdir = 832 (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); 833 } 834 835 /* 836 * allocate object for the ptes 837 */ 838 pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1); 839 840 /* 841 * allocate the page directory page 842 */ 843retry: 844 ptdpg = pmap_page_alloc( pmap->pm_pteobj, PTDPTDI); 845 if (ptdpg == NULL) 846 goto retry; 847 848 ptdpg->wire_count = 1; 849 ++cnt.v_wire_count; 850 851 ptdpg->flags &= ~(PG_MAPPED|PG_BUSY); /* not mapped normally */ 852 ptdpg->valid = VM_PAGE_BITS_ALL; 853 854 pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg)); 855 if ((ptdpg->flags & PG_ZERO) == 0) 856 bzero(pmap->pm_pdir, PAGE_SIZE); 857 858 /* wire in kernel global address entries */ 859 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 860 861 /* install self-referential address mapping entry */ 862 *(unsigned *) (pmap->pm_pdir + PTDPTDI) = 863 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW; 864 865 pmap->pm_flags = 0; 866 pmap->pm_count = 1; 867 pmap->pm_ptphint = NULL; 868 TAILQ_INIT(&pmap->pm_pvlist); 869} 870 871static int 872pmap_release_free_page(pmap, p) 873 struct pmap *pmap; 874 vm_page_t p; 875{ 876 int s; 877 unsigned *pde = (unsigned *) pmap->pm_pdir; 878 /* 879 * This code optimizes the case of freeing non-busy 880 * page-table pages. Those pages are zero now, and 881 * might as well be placed directly into the zero queue. 882 */ 883 s = splvm(); 884 if (p->flags & PG_BUSY) { 885 p->flags |= PG_WANTED; 886 tsleep(p, PVM, "pmaprl", 0); 887 splx(s); 888 return 0; 889 } 890 891 if (p->flags & PG_WANTED) { 892 p->flags &= ~PG_WANTED; 893 wakeup(p); 894 } 895 896 /* 897 * Remove the page table page from the processes address space. 898 */ 899 pde[p->pindex] = 0; 900 --pmap->pm_stats.resident_count; 901 902 if (p->hold_count) { 903 panic("pmap_release: freeing held page table page"); 904 } 905 /* 906 * Page directory pages need to have the kernel 907 * stuff cleared, so they can go into the zero queue also. 908 */ 909 if (p->pindex == PTDPTDI) { 910 bzero(pde + KPTDI, nkpt * PTESIZE); 911 pde[APTDPTDI] = 0; 912 pmap_kremove((vm_offset_t) pmap->pm_pdir); 913 } 914 915#if defined(PTPHINT) 916 if (pmap->pm_ptphint && 917 (pmap->pm_ptphint->pindex == p->pindex)) 918 pmap->pm_ptphint = NULL; 919#endif 920 921 vm_page_free_zero(p); 922 splx(s); 923 return 1; 924} 925 926/* 927 * this routine is called if the page table page is not 928 * mapped correctly. 929 */ 930static vm_page_t 931_pmap_allocpte(pmap, ptepindex) 932 pmap_t pmap; 933 unsigned ptepindex; 934{ 935 vm_offset_t pteva, ptepa; 936 vm_page_t m; 937 int needszero = 0; 938 939 /* 940 * Find or fabricate a new pagetable page 941 */ 942retry: 943 m = vm_page_lookup(pmap->pm_pteobj, ptepindex); 944 if (m == NULL) { 945 m = pmap_page_alloc(pmap->pm_pteobj, ptepindex); 946 if (m == NULL) 947 goto retry; 948 if ((m->flags & PG_ZERO) == 0) 949 needszero = 1; 950 m->flags &= ~(PG_ZERO|PG_BUSY); 951 m->valid = VM_PAGE_BITS_ALL; 952 } else { 953 if ((m->flags & PG_BUSY) || m->busy) { 954 m->flags |= PG_WANTED; 955 tsleep(m, PVM, "ptewai", 0); 956 goto retry; 957 } 958 } 959 960 if (m->queue != PQ_NONE) { 961 int s = splvm(); 962 vm_page_unqueue(m); 963 splx(s); 964 } 965 966 if (m->wire_count == 0) 967 ++cnt.v_wire_count; 968 ++m->wire_count; 969 970 /* 971 * Increment the hold count for the page table page 972 * (denoting a new mapping.) 973 */ 974 ++m->hold_count; 975 976 /* 977 * Map the pagetable page into the process address space, if 978 * it isn't already there. 979 */ 980 981 pmap->pm_stats.resident_count++; 982 983 ptepa = VM_PAGE_TO_PHYS(m); 984 pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V); 985 986#if defined(PTPHINT) 987 /* 988 * Set the page table hint 989 */ 990 pmap->pm_ptphint = m; 991#endif 992 993 /* 994 * Try to use the new mapping, but if we cannot, then 995 * do it with the routine that maps the page explicitly. 996 */ 997 if (needszero) { 998 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) == 999 (((unsigned) PTDpde) & PG_FRAME)) { 1000 pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex); 1001 bzero((caddr_t) pteva, PAGE_SIZE); 1002 } else { 1003 pmap_zero_page(ptepa); 1004 } 1005 } 1006 1007 m->valid = VM_PAGE_BITS_ALL; 1008 m->flags |= PG_MAPPED; 1009 1010 return m; 1011} 1012 1013static vm_page_t 1014pmap_allocpte(pmap, va) 1015 pmap_t pmap; 1016 vm_offset_t va; 1017{ 1018 unsigned ptepindex; 1019 vm_offset_t ptepa; 1020 vm_page_t m; 1021 1022 /* 1023 * Calculate pagetable page index 1024 */ 1025 ptepindex = va >> PDRSHIFT; 1026 1027 /* 1028 * Get the page directory entry 1029 */ 1030 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex]; 1031 1032 /* 1033 * If the page table page is mapped, we just increment the 1034 * hold count, and activate it. 1035 */ 1036 if (ptepa) { 1037#if defined(PTPHINT) 1038 /* 1039 * In order to get the page table page, try the 1040 * hint first. 1041 */ 1042 if (pmap->pm_ptphint && 1043 (pmap->pm_ptphint->pindex == ptepindex)) { 1044 m = pmap->pm_ptphint; 1045 } else { 1046 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1047 pmap->pm_ptphint = m; 1048 } 1049#else 1050 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1051#endif 1052 ++m->hold_count; 1053 return m; 1054 } 1055 /* 1056 * Here if the pte page isn't mapped, or if it has been deallocated. 1057 */ 1058 return _pmap_allocpte(pmap, ptepindex); 1059} 1060 1061 1062/*************************************************** 1063* Pmap allocation/deallocation routines. 1064 ***************************************************/ 1065 1066/* 1067 * Release any resources held by the given physical map. 1068 * Called when a pmap initialized by pmap_pinit is being released. 1069 * Should only be called if the map contains no valid mappings. 1070 */ 1071void 1072pmap_release(pmap) 1073 register struct pmap *pmap; 1074{ 1075 vm_page_t p,n,ptdpg; 1076 vm_object_t object = pmap->pm_pteobj; 1077 1078 if (object->ref_count != 1) 1079 panic("pmap_release: pteobj reference count != 1"); 1080 1081 pmap_lock(pmap); 1082 ptdpg = NULL; 1083retry: 1084 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) { 1085 n = TAILQ_NEXT(p, listq); 1086 if (p->pindex == PTDPTDI) { 1087 ptdpg = p; 1088 continue; 1089 } 1090 if (!pmap_release_free_page(pmap, p)) 1091 goto retry; 1092 } 1093 1094 if (ptdpg && !pmap_release_free_page(pmap, ptdpg)) 1095 goto retry; 1096 1097 vm_object_deallocate(object); 1098 if (pdstackptr < PDSTACKMAX) { 1099 pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir; 1100 ++pdstackptr; 1101 } else { 1102 kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 1103 } 1104 pmap->pm_pdir = 0; 1105} 1106 1107/* 1108 * grow the number of kernel page table entries, if needed 1109 */ 1110void 1111pmap_growkernel(vm_offset_t addr) 1112{ 1113 struct proc *p; 1114 struct pmap *pmap; 1115 int s; 1116 1117 s = splhigh(); 1118 if (kernel_vm_end == 0) { 1119 kernel_vm_end = KERNBASE; 1120 nkpt = 0; 1121 while (pdir_pde(PTD, kernel_vm_end)) { 1122 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1123 ++nkpt; 1124 } 1125 } 1126 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1127 while (kernel_vm_end < addr) { 1128 if (pdir_pde(PTD, kernel_vm_end)) { 1129 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1130 continue; 1131 } 1132 ++nkpt; 1133 if (!nkpg) { 1134 vm_offset_t ptpkva = (vm_offset_t) vtopte(addr); 1135 /* 1136 * This index is bogus, but out of the way 1137 */ 1138 vm_pindex_t ptpidx = (ptpkva >> PAGE_SHIFT); 1139 nkpg = vm_page_alloc(kernel_object, 1140 ptpidx, VM_ALLOC_SYSTEM); 1141 if (!nkpg) 1142 panic("pmap_growkernel: no memory to grow kernel"); 1143 vm_page_wire(nkpg); 1144 vm_page_remove(nkpg); 1145 pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 1146 } 1147 pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW); 1148 nkpg = NULL; 1149 1150 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1151 if (p->p_vmspace) { 1152 pmap = &p->p_vmspace->vm_pmap; 1153 *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 1154 } 1155 } 1156 *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 1157 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1158 } 1159 splx(s); 1160} 1161 1162/* 1163 * Retire the given physical map from service. 1164 * Should only be called if the map contains 1165 * no valid mappings. 1166 */ 1167void 1168pmap_destroy(pmap) 1169 register pmap_t pmap; 1170{ 1171 int count; 1172 1173 if (pmap == NULL) 1174 return; 1175 1176 count = --pmap->pm_count; 1177 if (count == 0) { 1178 pmap_release(pmap); 1179 free((caddr_t) pmap, M_VMPMAP); 1180 } 1181} 1182 1183/* 1184 * Add a reference to the specified pmap. 1185 */ 1186void 1187pmap_reference(pmap) 1188 pmap_t pmap; 1189{ 1190 if (pmap != NULL) { 1191 pmap->pm_count++; 1192 } 1193} 1194 1195/*************************************************** 1196* page management routines. 1197 ***************************************************/ 1198 1199/* 1200 * free the pv_entry back to the free list 1201 */ 1202static PMAP_INLINE void 1203free_pv_entry(pv) 1204 pv_entry_t pv; 1205{ 1206 ++pv_freelistcnt; 1207 TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list); 1208} 1209 1210/* 1211 * get a new pv_entry, allocating a block from the system 1212 * when needed. 1213 * the memory allocation is performed bypassing the malloc code 1214 * because of the possibility of allocations at interrupt time. 1215 */ 1216static pv_entry_t 1217get_pv_entry() 1218{ 1219 pv_entry_t tmp; 1220 1221 /* 1222 * get more pv_entry pages if needed 1223 */ 1224 if (pv_freelistcnt < PV_FREELIST_MIN || !TAILQ_FIRST(&pv_freelist)) { 1225 pmap_alloc_pv_entry(); 1226 } 1227 /* 1228 * get a pv_entry off of the free list 1229 */ 1230 --pv_freelistcnt; 1231 tmp = TAILQ_FIRST(&pv_freelist); 1232 TAILQ_REMOVE(&pv_freelist, tmp, pv_list); 1233 return tmp; 1234} 1235 1236/* 1237 * This *strange* allocation routine eliminates the possibility of a malloc 1238 * failure (*FATAL*) for a pv_entry_t data structure. 1239 * also -- this code is MUCH MUCH faster than the malloc equiv... 1240 * We really need to do the slab allocator thingie here. 1241 */ 1242static void 1243pmap_alloc_pv_entry() 1244{ 1245 /* 1246 * do we have any pre-allocated map-pages left? 1247 */ 1248 if (npvvapg) { 1249 vm_page_t m; 1250 1251 /* 1252 * allocate a physical page out of the vm system 1253 */ 1254 m = vm_page_alloc(kernel_object, 1255 OFF_TO_IDX(pvva - vm_map_min(kernel_map)), 1256 VM_ALLOC_INTERRUPT); 1257 if (m) { 1258 int newentries; 1259 int i; 1260 pv_entry_t entry; 1261 1262 newentries = (PAGE_SIZE / sizeof(struct pv_entry)); 1263 /* 1264 * wire the page 1265 */ 1266 vm_page_wire(m); 1267 m->flags &= ~PG_BUSY; 1268 /* 1269 * let the kernel see it 1270 */ 1271 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 1272 1273 entry = (pv_entry_t) pvva; 1274 /* 1275 * update the allocation pointers 1276 */ 1277 pvva += PAGE_SIZE; 1278 --npvvapg; 1279 1280 /* 1281 * free the entries into the free list 1282 */ 1283 for (i = 0; i < newentries; i++) { 1284 free_pv_entry(entry); 1285 entry++; 1286 } 1287 } 1288 } 1289 if (!TAILQ_FIRST(&pv_freelist)) 1290 panic("get_pv_entry: cannot get a pv_entry_t"); 1291} 1292 1293/* 1294 * init the pv_entry allocation system 1295 */ 1296#define PVSPERPAGE 64 1297void 1298init_pv_entries(npg) 1299 int npg; 1300{ 1301 /* 1302 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 1303 * kvm space is fairly cheap, be generous!!! (the system can panic if 1304 * this is too small.) 1305 */ 1306 npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) 1307 + PAGE_SIZE - 1) / PAGE_SIZE; 1308 pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE); 1309 /* 1310 * get the first batch of entries 1311 */ 1312 pmap_alloc_pv_entry(); 1313} 1314 1315/* 1316 * If it is the first entry on the list, it is actually 1317 * in the header and we must copy the following entry up 1318 * to the header. Otherwise we must search the list for 1319 * the entry. In either case we free the now unused entry. 1320 */ 1321 1322static int 1323pmap_remove_entry(pmap, ppv, va) 1324 struct pmap *pmap; 1325 pv_table_t *ppv; 1326 vm_offset_t va; 1327{ 1328 pv_entry_t pv; 1329 int rtval; 1330 int s; 1331 1332 s = splvm(); 1333 if (ppv->pv_list_count < pmap->pm_stats.resident_count) { 1334 for (pv = TAILQ_FIRST(&ppv->pv_list); 1335 pv; 1336 pv = TAILQ_NEXT(pv, pv_list)) { 1337 if (pmap == pv->pv_pmap && va == pv->pv_va) 1338 break; 1339 } 1340 } else { 1341 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); 1342 pv; 1343 pv = TAILQ_NEXT(pv, pv_plist)) { 1344 if (va == pv->pv_va) 1345 break; 1346 } 1347 } 1348 1349 rtval = 0; 1350 if (pv) { 1351 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); 1352 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); 1353 --ppv->pv_list_count; 1354 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1355 free_pv_entry(pv); 1356 } 1357 1358 splx(s); 1359 return rtval; 1360} 1361 1362/* 1363 * Create a pv entry for page at pa for 1364 * (pmap, va). 1365 */ 1366static void 1367pmap_insert_entry(pmap, va, mpte, pa) 1368 pmap_t pmap; 1369 vm_offset_t va; 1370 vm_page_t mpte; 1371 vm_offset_t pa; 1372{ 1373 1374 int s; 1375 pv_entry_t pv; 1376 pv_table_t *ppv; 1377 1378 s = splvm(); 1379 pv = get_pv_entry(); 1380 pv->pv_va = va; 1381 pv->pv_pmap = pmap; 1382 pv->pv_ptem = mpte; 1383 1384 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1385 1386 ppv = pa_to_pvh(pa); 1387 TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); 1388 ++ppv->pv_list_count; 1389 1390 splx(s); 1391} 1392 1393/* 1394 * pmap_remove_pte: do the things to unmap a page in a process 1395 */ 1396static int 1397pmap_remove_pte(pmap, ptq, va) 1398 struct pmap *pmap; 1399 unsigned *ptq; 1400 vm_offset_t va; 1401{ 1402 unsigned oldpte; 1403 pv_table_t *ppv; 1404 1405 oldpte = *ptq; 1406 *ptq = 0; 1407 if (oldpte & PG_W) 1408 pmap->pm_stats.wired_count -= 1; 1409 pmap->pm_stats.resident_count -= 1; 1410 if (oldpte & PG_MANAGED) { 1411 ppv = pa_to_pvh(oldpte); 1412 if (oldpte & PG_M) { 1413#if defined(PMAP_DIAGNOSTIC) 1414 if (pmap_nw_modified((pt_entry_t) oldpte)) { 1415 printf("pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, (int) oldpte); 1416 } 1417#endif 1418 if (pmap_track_modified(va)) 1419 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 1420 } 1421 return pmap_remove_entry(pmap, ppv, va); 1422 } else { 1423 return pmap_unuse_pt(pmap, va, NULL); 1424 } 1425 1426 return 0; 1427} 1428 1429/* 1430 * Remove a single page from a process address space 1431 */ 1432static void 1433pmap_remove_page(pmap, va) 1434 struct pmap *pmap; 1435 register vm_offset_t va; 1436{ 1437 register unsigned *ptq; 1438 1439 /* 1440 * if there is no pte for this address, just skip it!!! 1441 */ 1442 if (*pmap_pde(pmap, va) == 0) { 1443 return; 1444 } 1445 1446 /* 1447 * get a local va for mappings for this pmap. 1448 */ 1449 ptq = get_ptbase(pmap) + i386_btop(va); 1450 if (*ptq) { 1451 (void) pmap_remove_pte(pmap, ptq, va); 1452 invltlb_1pg(va); 1453 } 1454 return; 1455} 1456 1457/* 1458 * Remove the given range of addresses from the specified map. 1459 * 1460 * It is assumed that the start and end are properly 1461 * rounded to the page size. 1462 */ 1463void 1464pmap_remove(pmap, sva, eva) 1465 struct pmap *pmap; 1466 register vm_offset_t sva; 1467 register vm_offset_t eva; 1468{ 1469 register unsigned *ptbase; 1470 vm_offset_t pdnxt; 1471 vm_offset_t ptpaddr; 1472 vm_offset_t sindex, eindex; 1473 int anyvalid; 1474 1475 if (pmap == NULL) 1476 return; 1477 1478 pmap_lock(pmap); 1479 /* 1480 * special handling of removing one page. a very 1481 * common operation and easy to short circuit some 1482 * code. 1483 */ 1484 if ((sva + PAGE_SIZE) == eva) { 1485 pmap_remove_page(pmap, sva); 1486 pmap_unlock(pmap); 1487 return; 1488 } 1489 1490 anyvalid = 0; 1491 1492 /* 1493 * Get a local virtual address for the mappings that are being 1494 * worked with. 1495 */ 1496 ptbase = get_ptbase(pmap); 1497 1498 sindex = i386_btop(sva); 1499 eindex = i386_btop(eva); 1500 1501 for (; sindex < eindex; sindex = pdnxt) { 1502 1503 /* 1504 * Calculate index for next page table. 1505 */ 1506 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1507 ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex)); 1508 1509 /* 1510 * Weed out invalid mappings. Note: we assume that the page 1511 * directory table is always allocated, and in kernel virtual. 1512 */ 1513 if (ptpaddr == 0) 1514 continue; 1515 1516 /* 1517 * Limit our scan to either the end of the va represented 1518 * by the current page table page, or to the end of the 1519 * range being removed. 1520 */ 1521 if (pdnxt > eindex) { 1522 pdnxt = eindex; 1523 } 1524 1525 for ( ;sindex != pdnxt; sindex++) { 1526 vm_offset_t va; 1527 if (ptbase[sindex] == 0) { 1528 continue; 1529 } 1530 va = i386_ptob(sindex); 1531 1532 anyvalid++; 1533 if (pmap_remove_pte(pmap, 1534 ptbase + sindex, va)) 1535 break; 1536 } 1537 } 1538 1539 if (anyvalid) { 1540 invltlb(); 1541 } 1542 pmap_unlock(pmap); 1543} 1544 1545/* 1546 * Routine: pmap_remove_all 1547 * Function: 1548 * Removes this physical page from 1549 * all physical maps in which it resides. 1550 * Reflects back modify bits to the pager. 1551 * 1552 * Notes: 1553 * Original versions of this routine were very 1554 * inefficient because they iteratively called 1555 * pmap_remove (slow...) 1556 */ 1557 1558static void 1559pmap_remove_all(pa) 1560 vm_offset_t pa; 1561{ 1562 register pv_entry_t pv; 1563 pv_table_t *ppv; 1564 register unsigned *pte, tpte; 1565 int nmodify; 1566 int update_needed; 1567 int s; 1568 1569 nmodify = 0; 1570 update_needed = 0; 1571#if defined(PMAP_DIAGNOSTIC) 1572 /* 1573 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1574 * pages! 1575 */ 1576 if (!pmap_is_managed(pa)) { 1577 panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa); 1578 } 1579#endif 1580 1581 s = splvm(); 1582 ppv = pa_to_pvh(pa); 1583 while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { 1584 pmap_lock(pv->pv_pmap); 1585 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 1586 1587 pv->pv_pmap->pm_stats.resident_count--; 1588 1589 tpte = *pte; 1590 *pte = 0; 1591 if (tpte & PG_W) 1592 pv->pv_pmap->pm_stats.wired_count--; 1593 /* 1594 * Update the vm_page_t clean and reference bits. 1595 */ 1596 if (tpte & PG_M) { 1597#if defined(PMAP_DIAGNOSTIC) 1598 if (pmap_nw_modified((pt_entry_t) tpte)) { 1599 printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", pv->pv_va, tpte); 1600 } 1601#endif 1602 if (pmap_track_modified(pv->pv_va)) 1603 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 1604 } 1605 if (!update_needed && 1606 ((!curproc || (&curproc->p_vmspace->vm_pmap == pv->pv_pmap)) || 1607 (pv->pv_pmap == kernel_pmap))) { 1608 update_needed = 1; 1609 } 1610 1611 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 1612 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); 1613 --ppv->pv_list_count; 1614 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 1615 pmap_unlock(pv->pv_pmap); 1616 free_pv_entry(pv); 1617 } 1618 1619 if (update_needed) 1620 invltlb(); 1621 splx(s); 1622 return; 1623} 1624 1625/* 1626 * Set the physical protection on the 1627 * specified range of this map as requested. 1628 */ 1629void 1630pmap_protect(pmap, sva, eva, prot) 1631 register pmap_t pmap; 1632 vm_offset_t sva, eva; 1633 vm_prot_t prot; 1634{ 1635 register unsigned *ptbase; 1636 vm_offset_t pdnxt; 1637 vm_offset_t ptpaddr; 1638 vm_offset_t sindex, eindex; 1639 int anychanged; 1640 1641 1642 if (pmap == NULL) 1643 return; 1644 1645 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1646 pmap_remove(pmap, sva, eva); 1647 return; 1648 } 1649 if (prot & VM_PROT_WRITE) { 1650 return; 1651 } 1652 1653 pmap_lock(pmap); 1654 anychanged = 0; 1655 1656 ptbase = get_ptbase(pmap); 1657 1658 sindex = i386_btop(sva); 1659 eindex = i386_btop(eva); 1660 1661 for (; sindex < eindex; sindex = pdnxt) { 1662 1663 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1664 ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex)); 1665 1666 /* 1667 * Weed out invalid mappings. Note: we assume that the page 1668 * directory table is always allocated, and in kernel virtual. 1669 */ 1670 if (ptpaddr == 0) 1671 continue; 1672 1673 if (pdnxt > eindex) { 1674 pdnxt = eindex; 1675 } 1676 1677 for (; sindex != pdnxt; sindex++) { 1678 1679 unsigned pbits = ptbase[sindex]; 1680 1681 if (pbits & PG_RW) { 1682 if (pbits & PG_M) { 1683 vm_offset_t sva = i386_ptob(sindex); 1684 if (pmap_track_modified(sva)) { 1685 vm_page_t m = PHYS_TO_VM_PAGE(pbits); 1686 m->dirty = VM_PAGE_BITS_ALL; 1687 } 1688 } 1689 ptbase[sindex] = pbits & ~(PG_M|PG_RW); 1690 anychanged = 1; 1691 } 1692 } 1693 } 1694 pmap_unlock(pmap); 1695 if (anychanged) 1696 invltlb(); 1697} 1698 1699/* 1700 * Insert the given physical page (p) at 1701 * the specified virtual address (v) in the 1702 * target physical map with the protection requested. 1703 * 1704 * If specified, the page will be wired down, meaning 1705 * that the related pte can not be reclaimed. 1706 * 1707 * NB: This is the only routine which MAY NOT lazy-evaluate 1708 * or lose information. That is, this routine must actually 1709 * insert this page into the given map NOW. 1710 */ 1711void 1712pmap_enter(pmap, va, pa, prot, wired) 1713 register pmap_t pmap; 1714 vm_offset_t va; 1715 register vm_offset_t pa; 1716 vm_prot_t prot; 1717 boolean_t wired; 1718{ 1719 register unsigned *pte; 1720 vm_offset_t opa; 1721 vm_offset_t origpte, newpte; 1722 vm_page_t mpte; 1723 1724 if (pmap == NULL) 1725 return; 1726 1727 pmap_lock(pmap); 1728 va &= PG_FRAME; 1729#ifdef PMAP_DIAGNOSTIC 1730 if (va > VM_MAX_KERNEL_ADDRESS) 1731 panic("pmap_enter: toobig"); 1732 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 1733 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); 1734#endif 1735 1736 mpte = NULL; 1737 /* 1738 * In the case that a page table page is not 1739 * resident, we are creating it here. 1740 */ 1741 if (va < UPT_MIN_ADDRESS) 1742 mpte = pmap_allocpte(pmap, va); 1743 1744 pte = pmap_pte(pmap, va); 1745 /* 1746 * Page Directory table entry not valid, we need a new PT page 1747 */ 1748 if (pte == NULL) { 1749 panic("pmap_enter: invalid page directory, pdir=%p, va=0x%lx\n", 1750 pmap->pm_pdir[PTDPTDI], va); 1751 } 1752 1753 origpte = *(vm_offset_t *)pte; 1754 pa &= PG_FRAME; 1755 opa = origpte & PG_FRAME; 1756 1757 /* 1758 * Mapping has not changed, must be protection or wiring change. 1759 */ 1760 if (origpte && (opa == pa)) { 1761 /* 1762 * Wiring change, just update stats. We don't worry about 1763 * wiring PT pages as they remain resident as long as there 1764 * are valid mappings in them. Hence, if a user page is wired, 1765 * the PT page will be also. 1766 */ 1767 if (wired && ((origpte & PG_W) == 0)) 1768 pmap->pm_stats.wired_count++; 1769 else if (!wired && (origpte & PG_W)) 1770 pmap->pm_stats.wired_count--; 1771 1772#if defined(PMAP_DIAGNOSTIC) 1773 if (pmap_nw_modified((pt_entry_t) origpte)) { 1774 printf("pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, origpte); 1775 } 1776#endif 1777 1778 /* 1779 * We might be turning off write access to the page, 1780 * so we go ahead and sense modify status. 1781 */ 1782 if (origpte & PG_MANAGED) { 1783 vm_page_t m; 1784 if (origpte & PG_M) { 1785 if (pmap_track_modified(va)) { 1786 m = PHYS_TO_VM_PAGE(pa); 1787 m->dirty = VM_PAGE_BITS_ALL; 1788 } 1789 } 1790 pa |= PG_MANAGED; 1791 } 1792 1793 if (mpte) 1794 --mpte->hold_count; 1795 1796 goto validate; 1797 } 1798 /* 1799 * Mapping has changed, invalidate old range and fall through to 1800 * handle validating new mapping. 1801 */ 1802 if (opa) { 1803 int err; 1804 err = pmap_remove_pte(pmap, pte, va); 1805 if (err) 1806 panic("pmap_enter: pte vanished, va: 0x%x", va); 1807 } 1808 1809 /* 1810 * Enter on the PV list if part of our managed memory Note that we 1811 * raise IPL while manipulating pv_table since pmap_enter can be 1812 * called at interrupt time. 1813 */ 1814 if (pmap_is_managed(pa)) { 1815 pmap_insert_entry(pmap, va, mpte, pa); 1816 pa |= PG_MANAGED; 1817 } 1818 1819 /* 1820 * Increment counters 1821 */ 1822 pmap->pm_stats.resident_count++; 1823 if (wired) 1824 pmap->pm_stats.wired_count++; 1825 1826validate: 1827 /* 1828 * Now validate mapping with desired protection/wiring. 1829 */ 1830 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V); 1831 1832 if (wired) 1833 newpte |= PG_W; 1834 if (va < UPT_MIN_ADDRESS) 1835 newpte |= PG_U; 1836 1837 /* 1838 * if the mapping or permission bits are different, we need 1839 * to update the pte. 1840 */ 1841 if ((origpte & ~(PG_M|PG_A)) != newpte) { 1842 *pte = newpte; 1843 if (origpte) 1844 invltlb_1pg(va); 1845 } 1846 pmap_unlock(pmap); 1847} 1848 1849/* 1850 * this code makes some *MAJOR* assumptions: 1851 * 1. Current pmap & pmap exists. 1852 * 2. Not wired. 1853 * 3. Read access. 1854 * 4. No page table pages. 1855 * 5. Tlbflush is deferred to calling procedure. 1856 * 6. Page IS managed. 1857 * but is *MUCH* faster than pmap_enter... 1858 */ 1859 1860static vm_page_t 1861pmap_enter_quick(pmap, va, pa, mpte) 1862 register pmap_t pmap; 1863 vm_offset_t va; 1864 register vm_offset_t pa; 1865 vm_page_t mpte; 1866{ 1867 register unsigned *pte; 1868 1869 /* 1870 * In the case that a page table page is not 1871 * resident, we are creating it here. 1872 */ 1873 if (va < UPT_MIN_ADDRESS) { 1874 unsigned ptepindex; 1875 vm_offset_t ptepa; 1876 1877 /* 1878 * Calculate pagetable page index 1879 */ 1880 ptepindex = va >> PDRSHIFT; 1881 if (mpte && (mpte->pindex == ptepindex)) { 1882 ++mpte->hold_count; 1883 } else { 1884retry: 1885 /* 1886 * Get the page directory entry 1887 */ 1888 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex]; 1889 1890 /* 1891 * If the page table page is mapped, we just increment 1892 * the hold count, and activate it. 1893 */ 1894 if (ptepa) { 1895#if defined(PTPHINT) 1896 if (pmap->pm_ptphint && 1897 (pmap->pm_ptphint->pindex == ptepindex)) { 1898 mpte = pmap->pm_ptphint; 1899 } else { 1900 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1901 pmap->pm_ptphint = mpte; 1902 } 1903#else 1904 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex); 1905#endif 1906 if (mpte == NULL) 1907 goto retry; 1908 ++mpte->hold_count; 1909 } else { 1910 mpte = _pmap_allocpte(pmap, ptepindex); 1911 } 1912 } 1913 } else { 1914 mpte = NULL; 1915 } 1916 1917 /* 1918 * This call to vtopte makes the assumption that we are 1919 * entering the page into the current pmap. In order to support 1920 * quick entry into any pmap, one would likely use pmap_pte_quick. 1921 * But that isn't as quick as vtopte. 1922 */ 1923 pte = (unsigned *)vtopte(va); 1924 if (*pte) { 1925 if (mpte) 1926 pmap_unwire_pte_hold(pmap, mpte); 1927 return 0; 1928 } 1929 1930 /* 1931 * Enter on the PV list if part of our managed memory Note that we 1932 * raise IPL while manipulating pv_table since pmap_enter can be 1933 * called at interrupt time. 1934 */ 1935 pmap_insert_entry(pmap, va, mpte, pa); 1936 1937 /* 1938 * Increment counters 1939 */ 1940 pmap->pm_stats.resident_count++; 1941 1942 /* 1943 * Now validate mapping with RO protection 1944 */ 1945 *pte = pa | PG_V | PG_U | PG_MANAGED; 1946 1947 return mpte; 1948} 1949 1950#define MAX_INIT_PT (96) 1951/* 1952 * pmap_object_init_pt preloads the ptes for a given object 1953 * into the specified pmap. This eliminates the blast of soft 1954 * faults on process startup and immediately after an mmap. 1955 */ 1956void 1957pmap_object_init_pt(pmap, addr, object, pindex, size, limit) 1958 pmap_t pmap; 1959 vm_offset_t addr; 1960 vm_object_t object; 1961 vm_pindex_t pindex; 1962 vm_size_t size; 1963 int limit; 1964{ 1965 vm_offset_t tmpidx; 1966 int psize; 1967 vm_page_t p, mpte; 1968 int objpgs; 1969 1970 psize = i386_btop(size); 1971 1972 if (!pmap || (object->type != OBJT_VNODE) || 1973 (limit && (psize > MAX_INIT_PT) && 1974 (object->resident_page_count > MAX_INIT_PT))) { 1975 return; 1976 } 1977 1978 pmap_lock(pmap); 1979 if (psize + pindex > object->size) 1980 psize = object->size - pindex; 1981 1982 mpte = NULL; 1983 /* 1984 * if we are processing a major portion of the object, then scan the 1985 * entire thing. 1986 */ 1987 if (psize > (object->size >> 2)) { 1988 objpgs = psize; 1989 1990 for (p = TAILQ_FIRST(&object->memq); 1991 ((objpgs > 0) && (p != NULL)); 1992 p = TAILQ_NEXT(p, listq)) { 1993 1994 tmpidx = p->pindex; 1995 if (tmpidx < pindex) { 1996 continue; 1997 } 1998 tmpidx -= pindex; 1999 if (tmpidx >= psize) { 2000 continue; 2001 } 2002 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2003 (p->busy == 0) && 2004 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2005 if ((p->queue - p->pc) == PQ_CACHE) 2006 vm_page_deactivate(p); 2007 p->flags |= PG_BUSY; 2008 mpte = pmap_enter_quick(pmap, 2009 addr + i386_ptob(tmpidx), 2010 VM_PAGE_TO_PHYS(p), mpte); 2011 p->flags |= PG_MAPPED; 2012 PAGE_WAKEUP(p); 2013 } 2014 objpgs -= 1; 2015 } 2016 } else { 2017 /* 2018 * else lookup the pages one-by-one. 2019 */ 2020 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 2021 p = vm_page_lookup(object, tmpidx + pindex); 2022 if (p && 2023 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2024 (p->busy == 0) && 2025 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2026 if ((p->queue - p->pc) == PQ_CACHE) 2027 vm_page_deactivate(p); 2028 p->flags |= PG_BUSY; 2029 mpte = pmap_enter_quick(pmap, 2030 addr + i386_ptob(tmpidx), 2031 VM_PAGE_TO_PHYS(p), mpte); 2032 p->flags |= PG_MAPPED; 2033 PAGE_WAKEUP(p); 2034 } 2035 } 2036 } 2037 pmap_unlock(pmap); 2038 return; 2039} 2040 2041/* 2042 * pmap_prefault provides a quick way of clustering 2043 * pagefaults into a processes address space. It is a "cousin" 2044 * of pmap_object_init_pt, except it runs at page fault time instead 2045 * of mmap time. 2046 */ 2047#define PFBAK 2 2048#define PFFOR 2 2049#define PAGEORDER_SIZE (PFBAK+PFFOR) 2050 2051static int pmap_prefault_pageorder[] = { 2052 -PAGE_SIZE, PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE 2053}; 2054 2055void 2056pmap_prefault(pmap, addra, entry, object) 2057 pmap_t pmap; 2058 vm_offset_t addra; 2059 vm_map_entry_t entry; 2060 vm_object_t object; 2061{ 2062 int i; 2063 vm_offset_t starta; 2064 vm_offset_t addr; 2065 vm_pindex_t pindex; 2066 vm_page_t m, mpte; 2067 2068 if (entry->object.vm_object != object) 2069 return; 2070 2071 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) 2072 return; 2073 2074 pmap_lock(pmap); 2075 starta = addra - PFBAK * PAGE_SIZE; 2076 if (starta < entry->start) { 2077 starta = entry->start; 2078 } else if (starta > addra) { 2079 starta = 0; 2080 } 2081 2082 mpte = NULL; 2083 for (i = 0; i < PAGEORDER_SIZE; i++) { 2084 vm_object_t lobject; 2085 unsigned *pte; 2086 2087 addr = addra + pmap_prefault_pageorder[i]; 2088 if (addr < starta || addr >= entry->end) 2089 continue; 2090 2091 if ((*pmap_pde(pmap, addr)) == NULL) 2092 continue; 2093 2094 pte = (unsigned *) vtopte(addr); 2095 if (*pte) 2096 continue; 2097 2098 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 2099 lobject = object; 2100 for (m = vm_page_lookup(lobject, pindex); 2101 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 2102 lobject = lobject->backing_object) { 2103 if (lobject->backing_object_offset & PAGE_MASK) 2104 break; 2105 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 2106 m = vm_page_lookup(lobject->backing_object, pindex); 2107 } 2108 2109 /* 2110 * give-up when a page is not in memory 2111 */ 2112 if (m == NULL) 2113 break; 2114 2115 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2116 (m->busy == 0) && 2117 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2118 2119 if ((m->queue - m->pc) == PQ_CACHE) { 2120 vm_page_deactivate(m); 2121 } 2122 m->flags |= PG_BUSY; 2123 mpte = pmap_enter_quick(pmap, addr, 2124 VM_PAGE_TO_PHYS(m), mpte); 2125 m->flags |= PG_MAPPED; 2126 PAGE_WAKEUP(m); 2127 } 2128 } 2129 pmap_unlock(pmap); 2130} 2131 2132/* 2133 * Routine: pmap_change_wiring 2134 * Function: Change the wiring attribute for a map/virtual-address 2135 * pair. 2136 * In/out conditions: 2137 * The mapping must already exist in the pmap. 2138 */ 2139void 2140pmap_change_wiring(pmap, va, wired) 2141 register pmap_t pmap; 2142 vm_offset_t va; 2143 boolean_t wired; 2144{ 2145 register unsigned *pte; 2146 2147 if (pmap == NULL) 2148 return; 2149 2150 pmap_lock(pmap); 2151 pte = pmap_pte(pmap, va); 2152 2153 if (wired && !pmap_pte_w(pte)) 2154 pmap->pm_stats.wired_count++; 2155 else if (!wired && pmap_pte_w(pte)) 2156 pmap->pm_stats.wired_count--; 2157 2158 /* 2159 * Wiring is not a hardware characteristic so there is no need to 2160 * invalidate TLB. 2161 */ 2162 pmap_pte_set_w(pte, wired); 2163 pmap_unlock(pmap); 2164} 2165 2166 2167 2168/* 2169 * Copy the range specified by src_addr/len 2170 * from the source map to the range dst_addr/len 2171 * in the destination map. 2172 * 2173 * This routine is only advisory and need not do anything. 2174 */ 2175 2176void 2177pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 2178 pmap_t dst_pmap, src_pmap; 2179 vm_offset_t dst_addr; 2180 vm_size_t len; 2181 vm_offset_t src_addr; 2182{ 2183 vm_offset_t addr; 2184 vm_offset_t end_addr = src_addr + len; 2185 vm_offset_t pdnxt; 2186 unsigned src_frame, dst_frame; 2187 2188 if (dst_addr != src_addr) 2189 return; 2190 2191 pmap_lock2(src_pmap, dst_pmap); 2192 src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME; 2193 if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) { 2194 pmap_unlock(src_pmap); 2195 pmap_unlock(dst_pmap); 2196 return; 2197 } 2198 2199 dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME; 2200 if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) { 2201 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V); 2202 invltlb(); 2203 } 2204 2205 for(addr = src_addr; addr < end_addr; addr = pdnxt) { 2206 unsigned *src_pte, *dst_pte; 2207 vm_page_t dstmpte, srcmpte; 2208 vm_offset_t srcptepaddr; 2209 unsigned ptepindex; 2210 2211 if (addr >= UPT_MIN_ADDRESS) 2212 panic("pmap_copy: invalid to pmap_copy page tables\n"); 2213 2214 pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1)); 2215 ptepindex = addr >> PDRSHIFT; 2216 2217 srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex]; 2218 if (srcptepaddr == 0) 2219 continue; 2220 2221 srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex); 2222 if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY)) 2223 continue; 2224 2225 if (pdnxt > end_addr) 2226 pdnxt = end_addr; 2227 2228 src_pte = (unsigned *) vtopte(addr); 2229 dst_pte = (unsigned *) avtopte(addr); 2230 while (addr < pdnxt) { 2231 unsigned ptetemp; 2232 ptetemp = *src_pte; 2233 /* 2234 * we only virtual copy managed pages 2235 */ 2236 if ((ptetemp & PG_MANAGED) != 0) { 2237 /* 2238 * We have to check after allocpte for the 2239 * pte still being around... allocpte can 2240 * block. 2241 */ 2242 dstmpte = pmap_allocpte(dst_pmap, addr); 2243 if ((*dst_pte == 0) && (ptetemp = *src_pte)) { 2244 /* 2245 * Clear the modified and 2246 * accessed (referenced) bits 2247 * during the copy. 2248 */ 2249 *dst_pte = ptetemp & ~(PG_M|PG_A); 2250 dst_pmap->pm_stats.resident_count++; 2251 pmap_insert_entry(dst_pmap, addr, 2252 dstmpte, 2253 (ptetemp & PG_FRAME)); 2254 } else { 2255 pmap_unwire_pte_hold(dst_pmap, dstmpte); 2256 } 2257 if (dstmpte->hold_count >= srcmpte->hold_count) 2258 break; 2259 } 2260 addr += PAGE_SIZE; 2261 ++src_pte; 2262 ++dst_pte; 2263 } 2264 } 2265 pmap_unlock(src_pmap); 2266 pmap_unlock(dst_pmap); 2267} 2268 2269/* 2270 * Routine: pmap_kernel 2271 * Function: 2272 * Returns the physical map handle for the kernel. 2273 */ 2274pmap_t 2275pmap_kernel() 2276{ 2277 return (kernel_pmap); 2278} 2279 2280/* 2281 * pmap_zero_page zeros the specified (machine independent) 2282 * page by mapping the page into virtual memory and using 2283 * bzero to clear its contents, one machine dependent page 2284 * at a time. 2285 */ 2286void 2287pmap_zero_page(phys) 2288 vm_offset_t phys; 2289{ 2290 if (*(int *) CMAP2) 2291 panic("pmap_zero_page: CMAP busy"); 2292 2293 *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME); 2294 bzero(CADDR2, PAGE_SIZE); 2295 *(int *) CMAP2 = 0; 2296 invltlb_1pg((vm_offset_t) CADDR2); 2297} 2298 2299/* 2300 * pmap_copy_page copies the specified (machine independent) 2301 * page by mapping the page into virtual memory and using 2302 * bcopy to copy the page, one machine dependent page at a 2303 * time. 2304 */ 2305void 2306pmap_copy_page(src, dst) 2307 vm_offset_t src; 2308 vm_offset_t dst; 2309{ 2310 if (*(int *) CMAP1 || *(int *) CMAP2) 2311 panic("pmap_copy_page: CMAP busy"); 2312 2313 *(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME); 2314 *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME); 2315 2316 bcopy(CADDR1, CADDR2, PAGE_SIZE); 2317 2318 *(int *) CMAP1 = 0; 2319 *(int *) CMAP2 = 0; 2320 invltlb_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2); 2321} 2322 2323 2324/* 2325 * Routine: pmap_pageable 2326 * Function: 2327 * Make the specified pages (by pmap, offset) 2328 * pageable (or not) as requested. 2329 * 2330 * A page which is not pageable may not take 2331 * a fault; therefore, its page table entry 2332 * must remain valid for the duration. 2333 * 2334 * This routine is merely advisory; pmap_enter 2335 * will specify that these pages are to be wired 2336 * down (or not) as appropriate. 2337 */ 2338void 2339pmap_pageable(pmap, sva, eva, pageable) 2340 pmap_t pmap; 2341 vm_offset_t sva, eva; 2342 boolean_t pageable; 2343{ 2344} 2345 2346/* 2347 * this routine returns true if a physical page resides 2348 * in the given pmap. 2349 */ 2350boolean_t 2351pmap_page_exists(pmap, pa) 2352 pmap_t pmap; 2353 vm_offset_t pa; 2354{ 2355 register pv_entry_t pv; 2356 pv_table_t *ppv; 2357 int s; 2358 2359 if (!pmap_is_managed(pa)) 2360 return FALSE; 2361 2362 s = splvm(); 2363 2364 ppv = pa_to_pvh(pa); 2365 /* 2366 * Not found, check current mappings returning immediately if found. 2367 */ 2368 for (pv = TAILQ_FIRST(&ppv->pv_list); 2369 pv; 2370 pv = TAILQ_NEXT(pv, pv_list)) { 2371 if (pv->pv_pmap == pmap) { 2372 splx(s); 2373 return TRUE; 2374 } 2375 } 2376 splx(s); 2377 return (FALSE); 2378} 2379 2380#define PMAP_REMOVE_PAGES_CURPROC_ONLY 2381/* 2382 * Remove all pages from specified address space 2383 * this aids process exit speeds. Also, this code 2384 * is special cased for current process only, but 2385 * can have the more generic (and slightly slower) 2386 * mode enabled. This is much faster than pmap_remove 2387 * in the case of running down an entire address space. 2388 */ 2389void 2390pmap_remove_pages(pmap, sva, eva) 2391 pmap_t pmap; 2392 vm_offset_t sva, eva; 2393{ 2394 unsigned *pte, tpte; 2395 pv_table_t *ppv; 2396 pv_entry_t pv, npv; 2397 int s; 2398 2399#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 2400 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) { 2401 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2402 return; 2403 } 2404#endif 2405 2406 pmap_lock(pmap); 2407 s = splhigh(); 2408 2409 for(pv = TAILQ_FIRST(&pmap->pm_pvlist); 2410 pv; 2411 pv = npv) { 2412 2413 if (pv->pv_va >= eva || pv->pv_va < sva) { 2414 npv = TAILQ_NEXT(pv, pv_plist); 2415 continue; 2416 } 2417 2418#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 2419 pte = (unsigned *)vtopte(pv->pv_va); 2420#else 2421 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2422#endif 2423 tpte = *pte; 2424 *pte = 0; 2425 2426 ppv = pa_to_pvh(tpte); 2427 2428 if (tpte) { 2429 pv->pv_pmap->pm_stats.resident_count--; 2430 if (tpte & PG_W) 2431 pv->pv_pmap->pm_stats.wired_count--; 2432 /* 2433 * Update the vm_page_t clean and reference bits. 2434 */ 2435 if (tpte & PG_M) { 2436 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 2437 } 2438 } 2439 2440 npv = TAILQ_NEXT(pv, pv_plist); 2441 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 2442 2443 --ppv->pv_list_count; 2444 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); 2445 2446 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 2447 free_pv_entry(pv); 2448 } 2449 splx(s); 2450 invltlb(); 2451 pmap_unlock(pmap); 2452} 2453 2454/* 2455 * pmap_testbit tests bits in pte's 2456 * note that the testbit/changebit routines are inline, 2457 * and a lot of things compile-time evaluate. 2458 */ 2459static boolean_t 2460pmap_testbit(pa, bit) 2461 register vm_offset_t pa; 2462 int bit; 2463{ 2464 register pv_entry_t pv; 2465 pv_table_t *ppv; 2466 unsigned *pte; 2467 int s; 2468 2469 if (!pmap_is_managed(pa)) 2470 return FALSE; 2471 2472 ppv = pa_to_pvh(pa); 2473 if (TAILQ_FIRST(&ppv->pv_list) == NULL) 2474 return FALSE; 2475 2476 s = splvm(); 2477 2478 for (pv = TAILQ_FIRST(&ppv->pv_list); 2479 pv; 2480 pv = TAILQ_NEXT(pv, pv_list)) { 2481 2482 /* 2483 * if the bit being tested is the modified bit, then 2484 * mark clean_map and ptes as never 2485 * modified. 2486 */ 2487 if (bit & (PG_A|PG_M)) { 2488 if (!pmap_track_modified(pv->pv_va)) 2489 continue; 2490 } 2491 2492#if defined(PMAP_DIAGNOSTIC) 2493 if (!pv->pv_pmap) { 2494 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 2495 continue; 2496 } 2497#endif 2498 pmap_lock(pv->pv_pmap); 2499 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2500 if (pte == NULL) { 2501 pmap_unlock(pv->pv_pmap); 2502 continue; 2503 } 2504 if (*pte & bit) { 2505 pmap_unlock(pv->pv_pmap); 2506 splx(s); 2507 return TRUE; 2508 } 2509 pmap_unlock(pv->pv_pmap); 2510 } 2511 splx(s); 2512 return (FALSE); 2513} 2514 2515/* 2516 * this routine is used to modify bits in ptes 2517 */ 2518static void 2519pmap_changebit(pa, bit, setem) 2520 vm_offset_t pa; 2521 int bit; 2522 boolean_t setem; 2523{ 2524 register pv_entry_t pv; 2525 pv_table_t *ppv; 2526 register unsigned *pte; 2527 int changed; 2528 int s; 2529 2530 if (!pmap_is_managed(pa)) 2531 return; 2532 2533 s = splvm(); 2534 changed = 0; 2535 ppv = pa_to_pvh(pa); 2536 2537 /* 2538 * Loop over all current mappings setting/clearing as appropos If 2539 * setting RO do we need to clear the VAC? 2540 */ 2541 for (pv = TAILQ_FIRST(&ppv->pv_list); 2542 pv; 2543 pv = TAILQ_NEXT(pv, pv_list)) { 2544 2545 /* 2546 * don't write protect pager mappings 2547 */ 2548 if (!setem && (bit == PG_RW)) { 2549 if (!pmap_track_modified(pv->pv_va)) 2550 continue; 2551 } 2552 2553#if defined(PMAP_DIAGNOSTIC) 2554 if (!pv->pv_pmap) { 2555 printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va); 2556 continue; 2557 } 2558#endif 2559 2560 pmap_lock(pv->pv_pmap); 2561 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2562 if (pte == NULL) { 2563 pmap_unlock(pv->pv_pmap); 2564 continue; 2565 } 2566 if (setem) { 2567 *(int *)pte |= bit; 2568 changed = 1; 2569 } else { 2570 vm_offset_t pbits = *(vm_offset_t *)pte; 2571 if (pbits & bit) { 2572 changed = 1; 2573 if (bit == PG_RW) { 2574 if (pbits & PG_M) { 2575 ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL; 2576 } 2577 *(int *)pte = pbits & ~(PG_M|PG_RW); 2578 } else { 2579 *(int *)pte = pbits & ~bit; 2580 } 2581 } 2582 } 2583 pmap_unlock(pv->pv_pmap); 2584 } 2585 splx(s); 2586 if (changed) 2587 invltlb(); 2588} 2589 2590/* 2591 * pmap_page_protect: 2592 * 2593 * Lower the permission for all mappings to a given page. 2594 */ 2595void 2596pmap_page_protect(phys, prot) 2597 vm_offset_t phys; 2598 vm_prot_t prot; 2599{ 2600 if ((prot & VM_PROT_WRITE) == 0) { 2601 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 2602 pmap_changebit(phys, PG_RW, FALSE); 2603 } else { 2604 pmap_remove_all(phys); 2605 } 2606 } 2607} 2608 2609vm_offset_t 2610pmap_phys_address(ppn) 2611 int ppn; 2612{ 2613 return (i386_ptob(ppn)); 2614} 2615 2616/* 2617 * pmap_is_referenced: 2618 * 2619 * Return whether or not the specified physical page was referenced 2620 * by any physical maps. 2621 */ 2622boolean_t 2623pmap_is_referenced(vm_offset_t pa) 2624{ 2625 register pv_entry_t pv; 2626 pv_table_t *ppv; 2627 unsigned *pte; 2628 int s; 2629 2630 if (!pmap_is_managed(pa)) 2631 return FALSE; 2632 2633 ppv = pa_to_pvh(pa); 2634 2635 s = splvm(); 2636 /* 2637 * Not found, check current mappings returning immediately if found. 2638 */ 2639 for (pv = TAILQ_FIRST(&ppv->pv_list); 2640 pv; 2641 pv = TAILQ_NEXT(pv, pv_list)) { 2642 2643 /* 2644 * if the bit being tested is the modified bit, then 2645 * mark clean_map and ptes as never 2646 * modified. 2647 */ 2648 if (!pmap_track_modified(pv->pv_va)) 2649 continue; 2650 2651 pmap_lock(pv->pv_pmap); 2652 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2653 if (pte == NULL) { 2654 pmap_unlock(pv->pv_pmap); 2655 continue; 2656 } 2657 if ((int) *pte & PG_A) { 2658 pmap_unlock(pv->pv_pmap); 2659 splx(s); 2660 return TRUE; 2661 } 2662 pmap_unlock(pv->pv_pmap); 2663 } 2664 splx(s); 2665 return (FALSE); 2666} 2667 2668/* 2669 * pmap_ts_referenced: 2670 * 2671 * Return the count of reference bits for a page, clearing all of them. 2672 * 2673 */ 2674int 2675pmap_ts_referenced(vm_offset_t pa) 2676{ 2677 register pv_entry_t pv; 2678 pv_table_t *ppv; 2679 unsigned *pte; 2680 int s; 2681 int rtval = 0; 2682 2683 if (!pmap_is_managed(pa)) 2684 return FALSE; 2685 2686 s = splvm(); 2687 2688 ppv = pa_to_pvh(pa); 2689 2690 if (TAILQ_FIRST(&ppv->pv_list) == NULL) { 2691 splx(s); 2692 return 0; 2693 } 2694 2695 /* 2696 * Not found, check current mappings returning immediately if found. 2697 */ 2698 for (pv = TAILQ_FIRST(&ppv->pv_list); 2699 pv; 2700 pv = TAILQ_NEXT(pv, pv_list)) { 2701 /* 2702 * if the bit being tested is the modified bit, then 2703 * mark clean_map and ptes as never 2704 * modified. 2705 */ 2706 if (!pmap_track_modified(pv->pv_va)) 2707 continue; 2708 2709 pmap_lock(pv->pv_pmap); 2710 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2711 if (pte == NULL) { 2712 pmap_unlock(pv->pv_pmap); 2713 continue; 2714 } 2715 if (*pte & PG_A) { 2716 rtval++; 2717 *pte &= ~PG_A; 2718 } 2719 pmap_unlock(pv->pv_pmap); 2720 } 2721 splx(s); 2722 if (rtval) { 2723 invltlb(); 2724 } 2725 return (rtval); 2726} 2727 2728/* 2729 * pmap_is_modified: 2730 * 2731 * Return whether or not the specified physical page was modified 2732 * in any physical maps. 2733 */ 2734boolean_t 2735pmap_is_modified(vm_offset_t pa) 2736{ 2737 return pmap_testbit((pa), PG_M); 2738} 2739 2740/* 2741 * Clear the modify bits on the specified physical page. 2742 */ 2743void 2744pmap_clear_modify(vm_offset_t pa) 2745{ 2746 pmap_changebit((pa), PG_M, FALSE); 2747} 2748 2749/* 2750 * pmap_clear_reference: 2751 * 2752 * Clear the reference bit on the specified physical page. 2753 */ 2754void 2755pmap_clear_reference(vm_offset_t pa) 2756{ 2757 pmap_changebit((pa), PG_A, FALSE); 2758} 2759 2760/* 2761 * Miscellaneous support routines follow 2762 */ 2763 2764static void 2765i386_protection_init() 2766{ 2767 register int *kp, prot; 2768 2769 kp = protection_codes; 2770 for (prot = 0; prot < 8; prot++) { 2771 switch (prot) { 2772 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2773 /* 2774 * Read access is also 0. There isn't any execute bit, 2775 * so just make it readable. 2776 */ 2777 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2778 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2779 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2780 *kp++ = 0; 2781 break; 2782 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2783 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2784 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2785 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2786 *kp++ = PG_RW; 2787 break; 2788 } 2789 } 2790} 2791 2792/* 2793 * Map a set of physical memory pages into the kernel virtual 2794 * address space. Return a pointer to where it is mapped. This 2795 * routine is intended to be used for mapping device memory, 2796 * NOT real memory. The non-cacheable bits are set on each 2797 * mapped page. 2798 */ 2799void * 2800pmap_mapdev(pa, size) 2801 vm_offset_t pa; 2802 vm_size_t size; 2803{ 2804 vm_offset_t va, tmpva; 2805 unsigned *pte; 2806 2807 size = roundup(size, PAGE_SIZE); 2808 2809 va = kmem_alloc_pageable(kernel_map, size); 2810 if (!va) 2811 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2812 2813 pa = pa & PG_FRAME; 2814 for (tmpva = va; size > 0;) { 2815 pte = (unsigned *)vtopte(tmpva); 2816 *pte = pa | PG_RW | PG_V | PG_N; 2817 size -= PAGE_SIZE; 2818 tmpva += PAGE_SIZE; 2819 pa += PAGE_SIZE; 2820 } 2821 invltlb(); 2822 2823 return ((void *) va); 2824} 2825 2826/* 2827 * perform the pmap work for mincore 2828 */ 2829int 2830pmap_mincore(pmap, addr) 2831 pmap_t pmap; 2832 vm_offset_t addr; 2833{ 2834 2835 unsigned *ptep, pte; 2836 int val = 0; 2837 2838 pmap_lock(pmap); 2839 ptep = pmap_pte(pmap, addr); 2840 if (ptep == 0) { 2841 pmap_unlock(pmap); 2842 return 0; 2843 } 2844 2845 if (pte = *ptep) { 2846 vm_offset_t pa; 2847 val = MINCORE_INCORE; 2848 pa = pte & PG_FRAME; 2849 2850 /* 2851 * Modified by us 2852 */ 2853 if (pte & PG_M) 2854 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 2855 /* 2856 * Modified by someone 2857 */ 2858 else if (PHYS_TO_VM_PAGE(pa)->dirty || 2859 pmap_is_modified(pa)) 2860 val |= MINCORE_MODIFIED_OTHER; 2861 /* 2862 * Referenced by us 2863 */ 2864 if (pte & PG_U) 2865 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 2866 2867 /* 2868 * Referenced by someone 2869 */ 2870 else if ((PHYS_TO_VM_PAGE(pa)->flags & PG_REFERENCED) || 2871 pmap_is_referenced(pa)) 2872 val |= MINCORE_REFERENCED_OTHER; 2873 } 2874 pmap_unlock(pmap); 2875 return val; 2876} 2877 2878#if defined(PMAP_DEBUG) 2879pmap_pid_dump(int pid) { 2880 pmap_t pmap; 2881 struct proc *p; 2882 int npte = 0; 2883 int index; 2884 for (p = allproc.lh_first; p != NULL; p = p->p_list.le_next) { 2885 if (p->p_pid != pid) 2886 continue; 2887 2888 if (p->p_vmspace) { 2889 int i,j; 2890 index = 0; 2891 pmap = &p->p_vmspace->vm_pmap; 2892 for(i=0;i<1024;i++) { 2893 pd_entry_t *pde; 2894 unsigned *pte; 2895 unsigned base = i << PDRSHIFT; 2896 2897 pde = &pmap->pm_pdir[i]; 2898 if (pde && pmap_pde_v(pde)) { 2899 for(j=0;j<1024;j++) { 2900 unsigned va = base + (j << PAGE_SHIFT); 2901 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2902 if (index) { 2903 index = 0; 2904 printf("\n"); 2905 } 2906 return npte; 2907 } 2908 pte = pmap_pte_quick( pmap, va); 2909 if (pte && pmap_pte_v(pte)) { 2910 vm_offset_t pa; 2911 vm_page_t m; 2912 pa = *(int *)pte; 2913 m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); 2914 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2915 va, pa, m->hold_count, m->wire_count, m->flags); 2916 npte++; 2917 index++; 2918 if (index >= 2) { 2919 index = 0; 2920 printf("\n"); 2921 } else { 2922 printf(" "); 2923 } 2924 } 2925 } 2926 } 2927 } 2928 } 2929 } 2930 return npte; 2931} 2932#endif 2933 2934#if defined(DEBUG) 2935 2936static void pads __P((pmap_t pm)); 2937static void pmap_pvdump __P((vm_offset_t pa)); 2938 2939/* print address space of pmap*/ 2940static void 2941pads(pm) 2942 pmap_t pm; 2943{ 2944 unsigned va, i, j; 2945 unsigned *ptep; 2946 2947 if (pm == kernel_pmap) 2948 return; 2949 for (i = 0; i < 1024; i++) 2950 if (pm->pm_pdir[i]) 2951 for (j = 0; j < 1024; j++) { 2952 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 2953 if (pm == kernel_pmap && va < KERNBASE) 2954 continue; 2955 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2956 continue; 2957 ptep = pmap_pte_quick(pm, va); 2958 if (pmap_pte_v(ptep)) 2959 printf("%x:%x ", va, *(int *) ptep); 2960 }; 2961 2962} 2963 2964static void 2965pmap_pvdump(pa) 2966 vm_offset_t pa; 2967{ 2968 pv_table_t *ppv; 2969 register pv_entry_t pv; 2970 2971 printf("pa %x", pa); 2972 ppv = pa_to_pvh(pa); 2973 for (pv = TAILQ_FIRST(&ppv->pv_list); 2974 pv; 2975 pv = TAILQ_NEXT(pv, pv_list)) { 2976#ifdef used_to_be 2977 printf(" -> pmap %x, va %x, flags %x", 2978 pv->pv_pmap, pv->pv_va, pv->pv_flags); 2979#endif 2980 printf(" -> pmap %x, va %x", 2981 pv->pv_pmap, pv->pv_va); 2982 pads(pv->pv_pmap); 2983 } 2984 printf(" "); 2985} 2986#endif 2987