pmap.c revision 13908
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.74 1996/01/19 14:19:56 peter Exp $ 43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. 51 * 52 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 53 */ 54/* 55 * Major modifications by John S. Dyson primarily to support 56 * pageable page tables, eliminating pmap_attributes, 57 * discontiguous memory pages, and using more efficient string 58 * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, 59 * general clean-up and efficiency mods. 60 */ 61 62/* 63 * Manages physical address maps. 64 * 65 * In addition to hardware address maps, this 66 * module is called upon to provide software-use-only 67 * maps which may or may not be stored in the same 68 * form as hardware maps. These pseudo-maps are 69 * used to store intermediate results from copy 70 * operations to and from address spaces. 71 * 72 * Since the information managed by this module is 73 * also stored by the logical address mapping module, 74 * this module may throw away valid virtual-to-physical 75 * mappings at almost any time. However, invalidations 76 * of virtual-to-physical mappings must be done as 77 * requested. 78 * 79 * In order to cope with hardware architectures which 80 * make virtual-to-physical map invalidates expensive, 81 * this module may delay invalidate or reduced protection 82 * operations until such time as they are actually 83 * necessary. This module is given full information as 84 * to which processors are currently using which maps, 85 * and to when physical maps must be made correct. 86 */ 87 88#include <sys/param.h> 89#include <sys/systm.h> 90#include <sys/proc.h> 91#include <sys/malloc.h> 92#include <sys/msgbuf.h> 93#include <sys/queue.h> 94#include <sys/vmmeter.h> 95 96#include <vm/vm.h> 97#include <vm/vm_param.h> 98#include <vm/vm_prot.h> 99#include <vm/lock.h> 100#include <vm/vm_kern.h> 101#include <vm/vm_page.h> 102#include <vm/vm_map.h> 103#include <vm/vm_object.h> 104#include <vm/vm_extern.h> 105 106#include <machine/pcb.h> 107#include <machine/cputypes.h> 108#include <machine/md_var.h> 109 110#include <i386/isa/isa.h> 111 112#define PMAP_KEEP_PDIRS 113 114static void init_pv_entries __P((int)); 115 116/* 117 * Get PDEs and PTEs for user/kernel address space 118 */ 119#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 120#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 121 122#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 123 124#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 125#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 126#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 127#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) 128#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 129 130#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 131#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 132 133/* 134 * Given a map and a machine independent protection code, 135 * convert to a vax protection code. 136 */ 137#define pte_prot(m, p) (protection_codes[p]) 138static int protection_codes[8]; 139 140static struct pmap kernel_pmap_store; 141pmap_t kernel_pmap; 142 143vm_offset_t avail_start; /* PA of first available physical page */ 144vm_offset_t avail_end; /* PA of last available physical page */ 145vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 146vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 147static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 148static vm_offset_t vm_first_phys; 149 150static int nkpt; 151 152extern vm_offset_t clean_sva, clean_eva; 153extern int cpu_class; 154 155/* 156 * All those kernel PT submaps that BSD is so fond of 157 */ 158pt_entry_t *CMAP1; 159static pt_entry_t *CMAP2, *ptmmap; 160static pv_entry_t pv_table; 161caddr_t CADDR1, ptvmmap; 162static caddr_t CADDR2; 163static pt_entry_t *msgbufmap; 164struct msgbuf *msgbufp; 165 166static void free_pv_entry __P((pv_entry_t pv)); 167static pt_entry_t * 168 get_pt_entry __P((pmap_t pmap)); 169static pv_entry_t 170 get_pv_entry __P((void)); 171static void i386_protection_init __P((void)); 172static void pmap_alloc_pv_entry __P((void)); 173static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); 174static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, 175 vm_offset_t pa)); 176static int pmap_is_managed __P((vm_offset_t pa)); 177static void pmap_remove_all __P((vm_offset_t pa)); 178static void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, 179 vm_offset_t va)); 180static vm_page_t 181 pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt)); 182static boolean_t 183 pmap_testbit __P((vm_offset_t pa, int bit)); 184static void * pmap_getpdir __P((void)); 185static void pmap_prefault __P((pmap_t pmap, vm_offset_t addra, 186 vm_map_entry_t entry, vm_object_t object)); 187 188/* 189 * The below are finer grained pmap_update routines. These eliminate 190 * the gratuitious tlb flushes on non-i386 architectures. 191 */ 192static __inline void 193pmap_update_1pg( vm_offset_t va) { 194#if defined(I386_CPU) 195 if (cpu_class == CPUCLASS_386) 196 pmap_update(); 197 else 198#endif 199 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va)); 200} 201 202static __inline void 203pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) { 204#if defined(I386_CPU) 205 if (cpu_class == CPUCLASS_386) { 206 pmap_update(); 207 } else 208#endif 209 { 210 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va1)); 211 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va2)); 212 } 213} 214 215/* 216 * Routine: pmap_pte 217 * Function: 218 * Extract the page table entry associated 219 * with the given map/virtual_address pair. 220 * [ what about induced faults -wfj] 221 */ 222 223__inline pt_entry_t * __pure 224pmap_pte(pmap, va) 225 register pmap_t pmap; 226 vm_offset_t va; 227{ 228 229 if (pmap && *pmap_pde(pmap, va)) { 230 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 231 232 /* are we current address space or kernel? */ 233 if ((pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME))) 234 return ((pt_entry_t *) vtopte(va)); 235 /* otherwise, we are alternate address space */ 236 else { 237 if (frame != ((int) APTDpde & PG_FRAME)) { 238 APTDpde = pmap->pm_pdir[PTDPTDI]; 239 pmap_update(); 240 } 241 return ((pt_entry_t *) avtopte(va)); 242 } 243 } 244 return (0); 245} 246 247/* 248 * Routine: pmap_extract 249 * Function: 250 * Extract the physical page address associated 251 * with the given map/virtual_address pair. 252 */ 253 254vm_offset_t 255pmap_extract(pmap, va) 256 register pmap_t pmap; 257 vm_offset_t va; 258{ 259 vm_offset_t pa; 260 261 if (pmap && *pmap_pde(pmap, va)) { 262 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 263 264 /* are we current address space or kernel? */ 265 if ((pmap == kernel_pmap) 266 || (frame == ((int) PTDpde & PG_FRAME))) { 267 pa = *(int *) vtopte(va); 268 /* otherwise, we are alternate address space */ 269 } else { 270 if (frame != ((int) APTDpde & PG_FRAME)) { 271 APTDpde = pmap->pm_pdir[PTDPTDI]; 272 pmap_update(); 273 } 274 pa = *(int *) avtopte(va); 275 } 276 return ((pa & PG_FRAME) | (va & ~PG_FRAME)); 277 } 278 return 0; 279 280} 281 282/* 283 * determine if a page is managed (memory vs. device) 284 */ 285static __inline int 286pmap_is_managed(pa) 287 vm_offset_t pa; 288{ 289 int i; 290 291 if (!pmap_initialized) 292 return 0; 293 294 for (i = 0; phys_avail[i + 1]; i += 2) { 295 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) 296 return 1; 297 } 298 return 0; 299} 300 301/* 302 * find the vm_page_t of a pte (only) given va of pte and pmap 303 */ 304static __inline vm_page_t 305pmap_pte_vm_page(pmap, pt) 306 pmap_t pmap; 307 vm_offset_t pt; 308{ 309 vm_page_t m; 310 311 pt = trunc_page(pt); 312 pt = (pt - UPT_MIN_ADDRESS) / PAGE_SIZE; 313 pt = ((vm_offset_t) pmap->pm_pdir[pt]) & PG_FRAME; 314 m = PHYS_TO_VM_PAGE(pt); 315 return m; 316} 317 318/* 319 * Wire a page table page 320 */ 321__inline void 322pmap_use_pt(pmap, va) 323 pmap_t pmap; 324 vm_offset_t va; 325{ 326 vm_offset_t pt; 327 328 if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 329 return; 330 331 pt = (vm_offset_t) vtopte(va); 332 vm_page_hold(pmap_pte_vm_page(pmap, pt)); 333} 334 335/* 336 * Unwire a page table page 337 */ 338__inline void 339pmap_unuse_pt(pmap, va) 340 pmap_t pmap; 341 vm_offset_t va; 342{ 343 vm_offset_t pt; 344 vm_page_t m; 345 346 if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 347 return; 348 349 pt = (vm_offset_t) vtopte(va); 350 m = pmap_pte_vm_page(pmap, pt); 351 vm_page_unhold(m); 352 if (pmap != kernel_pmap && 353 (m->hold_count == 0) && 354 (m->wire_count == 0) && 355 (va < KPT_MIN_ADDRESS)) { 356/* 357 * We don't free page-table-pages anymore because it can have a negative 358 * impact on perf at times. Now we just deactivate, and it'll get cleaned 359 * up if needed... Also, if the page ends up getting used, it will fault 360 * back into the process address space and be reactivated. 361 */ 362#ifdef PMAP_FREE_OLD_PTES 363 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 364 vm_page_free(m); 365#else 366 m->dirty = 0; 367 vm_page_deactivate(m); 368#endif 369 } 370} 371 372/* 373 * Bootstrap the system enough to run with virtual memory. 374 * 375 * On the i386 this is called after mapping has already been enabled 376 * and just syncs the pmap module with what has already been done. 377 * [We can't call it easily with mapping off since the kernel is not 378 * mapped with PA == VA, hence we would have to relocate every address 379 * from the linked base (virtual) address "KERNBASE" to the actual 380 * (physical) address starting relative to 0] 381 */ 382void 383pmap_bootstrap(firstaddr, loadaddr) 384 vm_offset_t firstaddr; 385 vm_offset_t loadaddr; 386{ 387 vm_offset_t va; 388 pt_entry_t *pte; 389 390 avail_start = firstaddr; 391 392 /* 393 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 394 * large. It should instead be correctly calculated in locore.s and 395 * not based on 'first' (which is a physical address, not a virtual 396 * address, for the start of unused physical memory). The kernel 397 * page tables are NOT double mapped and thus should not be included 398 * in this calculation. 399 */ 400 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 401 virtual_end = VM_MAX_KERNEL_ADDRESS; 402 403 /* 404 * Initialize protection array. 405 */ 406 i386_protection_init(); 407 408 /* 409 * The kernel's pmap is statically allocated so we don't have to use 410 * pmap_create, which is unlikely to work correctly at this part of 411 * the boot sequence (XXX and which no longer exists). 412 */ 413 kernel_pmap = &kernel_pmap_store; 414 415 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 416 417 kernel_pmap->pm_count = 1; 418 nkpt = NKPT; 419 420 /* 421 * Reserve some special page table entries/VA space for temporary 422 * mapping of pages. 423 */ 424#define SYSMAP(c, p, v, n) \ 425 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 426 427 va = virtual_avail; 428 pte = pmap_pte(kernel_pmap, va); 429 430 /* 431 * CMAP1/CMAP2 are used for zeroing and copying pages. 432 */ 433 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 434 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 435 436 /* 437 * ptmmap is used for reading arbitrary physical pages via /dev/mem. 438 */ 439 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 440 441 /* 442 * msgbufmap is used to map the system message buffer. 443 */ 444 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 445 446 virtual_avail = va; 447 448 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 449 pmap_update(); 450} 451 452/* 453 * Initialize the pmap module. 454 * Called by vm_init, to initialize any structures that the pmap 455 * system needs to map virtual memory. 456 * pmap_init has been enhanced to support in a fairly consistant 457 * way, discontiguous physical memory. 458 */ 459void 460pmap_init(phys_start, phys_end) 461 vm_offset_t phys_start, phys_end; 462{ 463 vm_offset_t addr; 464 vm_size_t npg, s; 465 int i; 466 467 /* 468 * calculate the number of pv_entries needed 469 */ 470 vm_first_phys = phys_avail[0]; 471 for (i = 0; phys_avail[i + 1]; i += 2); 472 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; 473 474 /* 475 * Allocate memory for random pmap data structures. Includes the 476 * pv_head_table. 477 */ 478 s = (vm_size_t) (sizeof(struct pv_entry) * npg); 479 s = round_page(s); 480 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 481 pv_table = (pv_entry_t) addr; 482 483 /* 484 * init the pv free list 485 */ 486 init_pv_entries(npg); 487 /* 488 * Now it is safe to enable pv_table recording. 489 */ 490 pmap_initialized = TRUE; 491} 492 493/* 494 * Used to map a range of physical addresses into kernel 495 * virtual address space. 496 * 497 * For now, VM is already on, we only need to map the 498 * specified memory. 499 */ 500vm_offset_t 501pmap_map(virt, start, end, prot) 502 vm_offset_t virt; 503 vm_offset_t start; 504 vm_offset_t end; 505 int prot; 506{ 507 while (start < end) { 508 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 509 virt += PAGE_SIZE; 510 start += PAGE_SIZE; 511 } 512 return (virt); 513} 514 515#ifdef PMAP_KEEP_PDIRS 516int nfreepdir; 517caddr_t *pdirlist; 518#define NFREEPDIR 3 519 520static void * 521pmap_getpdir() { 522 caddr_t *pdir; 523 if (pdirlist) { 524 --nfreepdir; 525 pdir = pdirlist; 526 pdirlist = (caddr_t *) *pdir; 527 bzero( (caddr_t) pdir, PAGE_SIZE); 528 } else { 529 pdir = (caddr_t *) kmem_alloc(kernel_map, PAGE_SIZE); 530 } 531 532 return (void *) pdir; 533} 534 535static void 536pmap_freepdir(void *pdir) { 537 if (nfreepdir > NFREEPDIR) { 538 kmem_free(kernel_map, (vm_offset_t) pdir, PAGE_SIZE); 539 } else { 540 * (caddr_t *) pdir = (caddr_t) pdirlist; 541 pdirlist = (caddr_t *) pdir; 542 ++nfreepdir; 543 } 544} 545#endif 546 547/* 548 * Initialize a preallocated and zeroed pmap structure, 549 * such as one in a vmspace structure. 550 */ 551void 552pmap_pinit(pmap) 553 register struct pmap *pmap; 554{ 555 /* 556 * No need to allocate page table space yet but we do need a valid 557 * page directory table. 558 */ 559 560#ifdef PMAP_KEEP_PDIRS 561 pmap->pm_pdir = pmap_getpdir(); 562#else 563 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, PAGE_SIZE); 564#endif 565 566 /* wire in kernel global address entries */ 567 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 568 569 /* install self-referential address mapping entry */ 570 *(int *) (pmap->pm_pdir + PTDPTDI) = 571 ((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW; 572 573 pmap->pm_count = 1; 574} 575 576/* 577 * grow the number of kernel page table entries, if needed 578 */ 579 580static vm_page_t nkpg; 581vm_offset_t kernel_vm_end; 582 583void 584pmap_growkernel(vm_offset_t addr) 585{ 586 struct proc *p; 587 struct pmap *pmap; 588 int s; 589 590 s = splhigh(); 591 if (kernel_vm_end == 0) { 592 kernel_vm_end = KERNBASE; 593 nkpt = 0; 594 while (pdir_pde(PTD, kernel_vm_end)) { 595 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 596 ++nkpt; 597 } 598 } 599 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 600 while (kernel_vm_end < addr) { 601 if (pdir_pde(PTD, kernel_vm_end)) { 602 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 603 continue; 604 } 605 ++nkpt; 606 if (!nkpg) { 607 nkpg = vm_page_alloc(kernel_object, 0, VM_ALLOC_SYSTEM); 608 if (!nkpg) 609 panic("pmap_growkernel: no memory to grow kernel"); 610 vm_page_wire(nkpg); 611 vm_page_remove(nkpg); 612 pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 613 } 614 pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW); 615 nkpg = NULL; 616 617 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 618 if (p->p_vmspace) { 619 pmap = &p->p_vmspace->vm_pmap; 620 *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 621 } 622 } 623 *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 624 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 625 } 626 splx(s); 627} 628 629/* 630 * Retire the given physical map from service. 631 * Should only be called if the map contains 632 * no valid mappings. 633 */ 634void 635pmap_destroy(pmap) 636 register pmap_t pmap; 637{ 638 int count; 639 640 if (pmap == NULL) 641 return; 642 643 count = --pmap->pm_count; 644 if (count == 0) { 645 pmap_release(pmap); 646 free((caddr_t) pmap, M_VMPMAP); 647 } 648} 649 650/* 651 * Release any resources held by the given physical map. 652 * Called when a pmap initialized by pmap_pinit is being released. 653 * Should only be called if the map contains no valid mappings. 654 */ 655void 656pmap_release(pmap) 657 register struct pmap *pmap; 658{ 659#ifdef PMAP_KEEP_PDIRS 660 pmap_freepdir( (void *)pmap->pm_pdir); 661#else 662 kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 663#endif 664} 665 666/* 667 * Add a reference to the specified pmap. 668 */ 669void 670pmap_reference(pmap) 671 pmap_t pmap; 672{ 673 if (pmap != NULL) { 674 pmap->pm_count++; 675 } 676} 677 678#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2) 679 680/* 681 * Data for the pv entry allocation mechanism 682 */ 683static int pv_freelistcnt; 684static pv_entry_t pv_freelist; 685static vm_offset_t pvva; 686static int npvvapg; 687 688/* 689 * free the pv_entry back to the free list 690 */ 691static __inline void 692free_pv_entry(pv) 693 pv_entry_t pv; 694{ 695 if (!pv) 696 return; 697 ++pv_freelistcnt; 698 pv->pv_next = pv_freelist; 699 pv_freelist = pv; 700} 701 702/* 703 * get a new pv_entry, allocating a block from the system 704 * when needed. 705 * the memory allocation is performed bypassing the malloc code 706 * because of the possibility of allocations at interrupt time. 707 */ 708static __inline pv_entry_t 709get_pv_entry() 710{ 711 pv_entry_t tmp; 712 713 /* 714 * get more pv_entry pages if needed 715 */ 716 if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) { 717 pmap_alloc_pv_entry(); 718 } 719 /* 720 * get a pv_entry off of the free list 721 */ 722 --pv_freelistcnt; 723 tmp = pv_freelist; 724 pv_freelist = tmp->pv_next; 725 return tmp; 726} 727 728/* 729 * this *strange* allocation routine *statistically* eliminates the 730 * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure. 731 * also -- this code is MUCH MUCH faster than the malloc equiv... 732 */ 733static void 734pmap_alloc_pv_entry() 735{ 736 /* 737 * do we have any pre-allocated map-pages left? 738 */ 739 if (npvvapg) { 740 vm_page_t m; 741 742 /* 743 * we do this to keep recursion away 744 */ 745 pv_freelistcnt += PV_FREELIST_MIN; 746 /* 747 * allocate a physical page out of the vm system 748 */ 749 m = vm_page_alloc(kernel_object, 750 OFF_TO_IDX(pvva - vm_map_min(kernel_map)), 751 VM_ALLOC_INTERRUPT); 752 if (m) { 753 int newentries; 754 int i; 755 pv_entry_t entry; 756 757 newentries = (PAGE_SIZE / sizeof(struct pv_entry)); 758 /* 759 * wire the page 760 */ 761 vm_page_wire(m); 762 m->flags &= ~PG_BUSY; 763 /* 764 * let the kernel see it 765 */ 766 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 767 768 entry = (pv_entry_t) pvva; 769 /* 770 * update the allocation pointers 771 */ 772 pvva += PAGE_SIZE; 773 --npvvapg; 774 775 /* 776 * free the entries into the free list 777 */ 778 for (i = 0; i < newentries; i++) { 779 free_pv_entry(entry); 780 entry++; 781 } 782 } 783 pv_freelistcnt -= PV_FREELIST_MIN; 784 } 785 if (!pv_freelist) 786 panic("get_pv_entry: cannot get a pv_entry_t"); 787} 788 789 790 791/* 792 * init the pv_entry allocation system 793 */ 794#define PVSPERPAGE 64 795void 796init_pv_entries(npg) 797 int npg; 798{ 799 /* 800 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 801 * kvm space is fairly cheap, be generous!!! (the system can panic if 802 * this is too small.) 803 */ 804 npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) 805 + PAGE_SIZE - 1) / PAGE_SIZE; 806 pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE); 807 /* 808 * get the first batch of entries 809 */ 810 free_pv_entry(get_pv_entry()); 811} 812 813static pt_entry_t * 814get_pt_entry(pmap) 815 pmap_t pmap; 816{ 817 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 818 819 /* are we current address space or kernel? */ 820 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) { 821 return PTmap; 822 } 823 /* otherwise, we are alternate address space */ 824 if (frame != ((int) APTDpde & PG_FRAME)) { 825 APTDpde = pmap->pm_pdir[PTDPTDI]; 826 pmap_update(); 827 } 828 return APTmap; 829} 830 831/* 832 * If it is the first entry on the list, it is actually 833 * in the header and we must copy the following entry up 834 * to the header. Otherwise we must search the list for 835 * the entry. In either case we free the now unused entry. 836 */ 837static void 838pmap_remove_entry(pmap, pv, va) 839 struct pmap *pmap; 840 pv_entry_t pv; 841 vm_offset_t va; 842{ 843 pv_entry_t npv; 844 int s; 845 846 s = splhigh(); 847 if (pmap == pv->pv_pmap && va == pv->pv_va) { 848 npv = pv->pv_next; 849 if (npv) { 850 *pv = *npv; 851 free_pv_entry(npv); 852 } else { 853 pv->pv_pmap = NULL; 854 } 855 } else { 856 for (npv = pv->pv_next; npv; (pv = npv, npv = pv->pv_next)) { 857 if (pmap == npv->pv_pmap && va == npv->pv_va) { 858 break; 859 } 860 } 861 if (npv) { 862 pv->pv_next = npv->pv_next; 863 free_pv_entry(npv); 864 } 865 } 866 splx(s); 867} 868 869/* 870 * Remove the given range of addresses from the specified map. 871 * 872 * It is assumed that the start and end are properly 873 * rounded to the page size. 874 */ 875void 876pmap_remove(pmap, sva, eva) 877 struct pmap *pmap; 878 register vm_offset_t sva; 879 register vm_offset_t eva; 880{ 881 register pt_entry_t *ptp, *ptq; 882 vm_offset_t pa; 883 register pv_entry_t pv; 884 vm_offset_t va; 885 pt_entry_t oldpte; 886 887 if (pmap == NULL) 888 return; 889 890 ptp = get_pt_entry(pmap); 891 892 /* 893 * special handling of removing one page. a very 894 * common operation and easy to short circuit some 895 * code. 896 */ 897 if ((sva + PAGE_SIZE) == eva) { 898 899 if (*pmap_pde(pmap, sva) == 0) 900 return; 901 902 ptq = ptp + i386_btop(sva); 903 904 if (!*ptq) 905 return; 906 /* 907 * Update statistics 908 */ 909 if (pmap_pte_w(ptq)) 910 pmap->pm_stats.wired_count--; 911 pmap->pm_stats.resident_count--; 912 913 pa = pmap_pte_pa(ptq); 914 oldpte = *ptq; 915 *ptq = 0; 916 917 if (pmap_is_managed(pa)) { 918 if ((int) oldpte & PG_M) { 919 if (sva < USRSTACK + (UPAGES * PAGE_SIZE) || 920 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 921 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 922 } 923 } 924 pv = pa_to_pvh(pa); 925 pmap_remove_entry(pmap, pv, sva); 926 } 927 pmap_unuse_pt(pmap, sva); 928 pmap_update_1pg(sva); 929 return; 930 } 931 sva = i386_btop(sva); 932 eva = i386_btop(eva); 933 934 while (sva < eva) { 935 /* 936 * Weed out invalid mappings. Note: we assume that the page 937 * directory table is always allocated, and in kernel virtual. 938 */ 939 940 if (*pmap_pde(pmap, i386_ptob(sva)) == 0) { 941 /* We can race ahead here, straight to next pde.. */ 942 sva = ((sva + NPTEPG) & ~(NPTEPG - 1)); 943 continue; 944 } 945 ptq = ptp + sva; 946 947 /* 948 * search for page table entries, use string operations that 949 * are much faster than explicitly scanning when page tables 950 * are not fully populated. 951 */ 952 if (*ptq == 0) { 953 vm_offset_t pdnxt = ((sva + NPTEPG) & ~(NPTEPG - 1)); 954 vm_offset_t nscan = pdnxt - sva; 955 int found = 0; 956 957 if ((nscan + sva) > eva) 958 nscan = eva - sva; 959 960 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 961 "=D"(ptq), "=a"(found) : "c"(nscan), "0"(ptq) : "cx"); 962 963 if (!found) { 964 sva = pdnxt; 965 continue; 966 } 967 ptq -= 1; 968 969 sva = ptq - ptp; 970 } 971 /* 972 * Update statistics 973 */ 974 oldpte = *ptq; 975 if (((int) oldpte) & PG_W) 976 pmap->pm_stats.wired_count--; 977 pmap->pm_stats.resident_count--; 978 979 /* 980 * Invalidate the PTEs. XXX: should cluster them up and 981 * invalidate as many as possible at once. 982 */ 983 *ptq = 0; 984 985 va = i386_ptob(sva); 986 987 /* 988 * Remove from the PV table (raise IPL since we may be called 989 * at interrupt time). 990 */ 991 pa = ((int) oldpte) & PG_FRAME; 992 if (!pmap_is_managed(pa)) { 993 pmap_unuse_pt(pmap, (vm_offset_t) va); 994 ++sva; 995 continue; 996 } 997 if ((int) oldpte & PG_M) { 998 if (sva < USRSTACK + (UPAGES * PAGE_SIZE) || 999 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 1000 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 1001 } 1002 } 1003 pv = pa_to_pvh(pa); 1004 pmap_remove_entry(pmap, pv, va); 1005 pmap_unuse_pt(pmap, va); 1006 ++sva; 1007 } 1008 pmap_update(); 1009} 1010 1011/* 1012 * Routine: pmap_remove_all 1013 * Function: 1014 * Removes this physical page from 1015 * all physical maps in which it resides. 1016 * Reflects back modify bits to the pager. 1017 * 1018 * Notes: 1019 * Original versions of this routine were very 1020 * inefficient because they iteratively called 1021 * pmap_remove (slow...) 1022 */ 1023static void 1024pmap_remove_all(pa) 1025 vm_offset_t pa; 1026{ 1027 register pv_entry_t pv, opv, npv; 1028 register pt_entry_t *pte, *ptp; 1029 vm_offset_t va; 1030 struct pmap *pmap; 1031 vm_page_t m; 1032 int s; 1033 int anyvalid = 0; 1034 1035 /* 1036 * Not one of ours 1037 */ 1038 /* 1039 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1040 * pages! 1041 */ 1042 if (!pmap_is_managed(pa)) 1043 return; 1044 1045 pa = trunc_page(pa); 1046 opv = pa_to_pvh(pa); 1047 if (opv->pv_pmap == NULL) 1048 return; 1049 1050 m = PHYS_TO_VM_PAGE(pa); 1051 s = splhigh(); 1052 pv = opv; 1053 while (pv && ((pmap = pv->pv_pmap) != NULL)) { 1054 ptp = get_pt_entry(pmap); 1055 va = pv->pv_va; 1056 pte = ptp + i386_btop(va); 1057 if (pmap_pte_w(pte)) 1058 pmap->pm_stats.wired_count--; 1059 if (*pte) { 1060 pmap->pm_stats.resident_count--; 1061 if (curproc != pageproc) 1062 anyvalid++; 1063 1064 /* 1065 * Update the vm_page_t clean and reference bits. 1066 */ 1067 if ((int) *pte & PG_M) { 1068 if (va < USRSTACK + (UPAGES * PAGE_SIZE) || 1069 (va >= KERNBASE && (va < clean_sva || va >= clean_eva))) { 1070 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 1071 } 1072 } 1073 *pte = 0; 1074 pmap_unuse_pt(pmap, va); 1075 } 1076 pv = pv->pv_next; 1077 } 1078 1079 for (pv = opv->pv_next; pv; pv = npv) { 1080 npv = pv->pv_next; 1081 free_pv_entry(pv); 1082 } 1083 1084 opv->pv_pmap = NULL; 1085 opv->pv_next = NULL; 1086 1087 splx(s); 1088 if (anyvalid) 1089 pmap_update(); 1090} 1091 1092 1093/* 1094 * Set the physical protection on the 1095 * specified range of this map as requested. 1096 */ 1097void 1098pmap_protect(pmap, sva, eva, prot) 1099 register pmap_t pmap; 1100 vm_offset_t sva, eva; 1101 vm_prot_t prot; 1102{ 1103 register pt_entry_t *pte; 1104 register vm_offset_t va; 1105 int i386prot; 1106 register pt_entry_t *ptp; 1107 int evap = i386_btop(eva); 1108 int anyvalid = 0;; 1109 1110 if (pmap == NULL) 1111 return; 1112 1113 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1114 pmap_remove(pmap, sva, eva); 1115 return; 1116 } 1117 if (prot & VM_PROT_WRITE) 1118 return; 1119 1120 ptp = get_pt_entry(pmap); 1121 1122 va = sva; 1123 while (va < eva) { 1124 int found = 0; 1125 int svap; 1126 vm_offset_t nscan; 1127 1128 /* 1129 * Page table page is not allocated. Skip it, we don't want to 1130 * force allocation of unnecessary PTE pages just to set the 1131 * protection. 1132 */ 1133 if (!*pmap_pde(pmap, va)) { 1134 /* XXX: avoid address wrap around */ 1135 nextpde: 1136 if (va >= i386_trunc_pdr((vm_offset_t) - 1)) 1137 break; 1138 va = i386_round_pdr(va + PAGE_SIZE); 1139 continue; 1140 } 1141 pte = ptp + i386_btop(va); 1142 1143 if (*pte == 0) { 1144 /* 1145 * scan for a non-empty pte 1146 */ 1147 svap = pte - ptp; 1148 nscan = ((svap + NPTEPG) & ~(NPTEPG - 1)) - svap; 1149 1150 if (nscan + svap > evap) 1151 nscan = evap - svap; 1152 1153 found = 0; 1154 if (nscan) 1155 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 1156 "=D"(pte), "=a"(found) : "c"(nscan), "0"(pte) : "cx"); 1157 1158 if (!found) 1159 goto nextpde; 1160 1161 pte -= 1; 1162 svap = pte - ptp; 1163 1164 va = i386_ptob(svap); 1165 } 1166 anyvalid++; 1167 1168 i386prot = pte_prot(pmap, prot); 1169 if (va < UPT_MAX_ADDRESS) { 1170 i386prot |= PG_u; 1171 if (va >= UPT_MIN_ADDRESS) 1172 i386prot |= PG_RW; 1173 } 1174 pmap_pte_set_prot(pte, i386prot); 1175 va += PAGE_SIZE; 1176 } 1177 if (anyvalid) 1178 pmap_update(); 1179} 1180 1181/* 1182 * Insert the given physical page (p) at 1183 * the specified virtual address (v) in the 1184 * target physical map with the protection requested. 1185 * 1186 * If specified, the page will be wired down, meaning 1187 * that the related pte can not be reclaimed. 1188 * 1189 * NB: This is the only routine which MAY NOT lazy-evaluate 1190 * or lose information. That is, this routine must actually 1191 * insert this page into the given map NOW. 1192 */ 1193void 1194pmap_enter(pmap, va, pa, prot, wired) 1195 register pmap_t pmap; 1196 vm_offset_t va; 1197 register vm_offset_t pa; 1198 vm_prot_t prot; 1199 boolean_t wired; 1200{ 1201 register pt_entry_t *pte; 1202 register pt_entry_t npte; 1203 vm_offset_t opa; 1204 int ptevalid = 0; 1205 1206 if (pmap == NULL) 1207 return; 1208 1209 va = trunc_page(va); 1210 pa = trunc_page(pa); 1211 if (va > VM_MAX_KERNEL_ADDRESS) 1212 panic("pmap_enter: toobig"); 1213 1214 /* 1215 * Page Directory table entry not valid, we need a new PT page 1216 */ 1217 if (*pmap_pde(pmap, va) == 0) { 1218 printf("kernel page directory invalid pdir=%p, va=0x%lx\n", 1219 pmap->pm_pdir[PTDPTDI], va); 1220 panic("invalid kernel page directory"); 1221 } 1222 pte = pmap_pte(pmap, va); 1223 opa = pmap_pte_pa(pte); 1224 1225 /* 1226 * Mapping has not changed, must be protection or wiring change. 1227 */ 1228 if (opa == pa) { 1229 /* 1230 * Wiring change, just update stats. We don't worry about 1231 * wiring PT pages as they remain resident as long as there 1232 * are valid mappings in them. Hence, if a user page is wired, 1233 * the PT page will be also. 1234 */ 1235 if (wired && !pmap_pte_w(pte)) 1236 pmap->pm_stats.wired_count++; 1237 else if (!wired && pmap_pte_w(pte)) 1238 pmap->pm_stats.wired_count--; 1239 1240 goto validate; 1241 } 1242 /* 1243 * Mapping has changed, invalidate old range and fall through to 1244 * handle validating new mapping. 1245 */ 1246 if (opa) { 1247 pmap_remove(pmap, va, va + PAGE_SIZE); 1248 } 1249 /* 1250 * Enter on the PV list if part of our managed memory Note that we 1251 * raise IPL while manipulating pv_table since pmap_enter can be 1252 * called at interrupt time. 1253 */ 1254 if (pmap_is_managed(pa)) { 1255 register pv_entry_t pv, npv; 1256 int s; 1257 1258 pv = pa_to_pvh(pa); 1259 s = splhigh(); 1260 /* 1261 * No entries yet, use header as the first entry 1262 */ 1263 if (pv->pv_pmap == NULL) { 1264 pv->pv_va = va; 1265 pv->pv_pmap = pmap; 1266 pv->pv_next = NULL; 1267 } 1268 /* 1269 * There is at least one other VA mapping this page. Place 1270 * this entry after the header. 1271 */ 1272 else { 1273 npv = get_pv_entry(); 1274 npv->pv_va = va; 1275 npv->pv_pmap = pmap; 1276 npv->pv_next = pv->pv_next; 1277 pv->pv_next = npv; 1278 } 1279 splx(s); 1280 } 1281 1282 /* 1283 * Increment counters 1284 */ 1285 pmap->pm_stats.resident_count++; 1286 if (wired) 1287 pmap->pm_stats.wired_count++; 1288 1289validate: 1290 /* 1291 * Now validate mapping with desired protection/wiring. 1292 */ 1293 npte = (pt_entry_t) ((int) (pa | pte_prot(pmap, prot) | PG_V)); 1294 1295 /* 1296 * When forking (copy-on-write, etc): A process will turn off write 1297 * permissions for any of its writable pages. If the data (object) is 1298 * only referred to by one process, the processes map is modified 1299 * directly as opposed to using the object manipulation routine. When 1300 * using pmap_protect, the modified bits are not kept in the vm_page_t 1301 * data structure. Therefore, when using pmap_enter in vm_fault to 1302 * bring back writability of a page, there has been no memory of the 1303 * modified or referenced bits except at the pte level. this clause 1304 * supports the carryover of the modified and used (referenced) bits. 1305 */ 1306 if (pa == opa) 1307 (int) npte |= (int) *pte & (PG_M | PG_U); 1308 1309 if (wired) 1310 (int) npte |= PG_W; 1311 if (va < UPT_MIN_ADDRESS) 1312 (int) npte |= PG_u; 1313 else if (va < UPT_MAX_ADDRESS) 1314 (int) npte |= PG_u | PG_RW; 1315 1316 if (*pte != npte) { 1317 if (*pte) 1318 ptevalid++; 1319 *pte = npte; 1320 } 1321 if (ptevalid) { 1322 pmap_update_1pg(va); 1323 } else { 1324 pmap_use_pt(pmap, va); 1325 } 1326} 1327 1328/* 1329 * Add a list of wired pages to the kva 1330 * this routine is only used for temporary 1331 * kernel mappings that do not need to have 1332 * page modification or references recorded. 1333 * Note that old mappings are simply written 1334 * over. The page *must* be wired. 1335 */ 1336void 1337pmap_qenter(va, m, count) 1338 vm_offset_t va; 1339 vm_page_t *m; 1340 int count; 1341{ 1342 int i; 1343 int anyvalid = 0; 1344 register pt_entry_t *pte; 1345 1346 for (i = 0; i < count; i++) { 1347 vm_offset_t tva = va + i * PAGE_SIZE; 1348 pt_entry_t npte = (pt_entry_t) ((int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V)); 1349 pte = vtopte(tva); 1350 if (*pte && (*pte != npte)) 1351 pmap_update_1pg(tva); 1352 *pte = npte; 1353 } 1354} 1355/* 1356 * this routine jerks page mappings from the 1357 * kernel -- it is meant only for temporary mappings. 1358 */ 1359void 1360pmap_qremove(va, count) 1361 vm_offset_t va; 1362 int count; 1363{ 1364 int i; 1365 register pt_entry_t *pte; 1366 1367 for (i = 0; i < count; i++) { 1368 vm_offset_t tva = va + i * PAGE_SIZE; 1369 pte = vtopte(tva); 1370 *pte = 0; 1371 pmap_update_1pg(tva); 1372 } 1373} 1374 1375/* 1376 * add a wired page to the kva 1377 * note that in order for the mapping to take effect -- you 1378 * should do a pmap_update after doing the pmap_kenter... 1379 */ 1380void 1381pmap_kenter(va, pa) 1382 vm_offset_t va; 1383 register vm_offset_t pa; 1384{ 1385 register pt_entry_t *pte; 1386 int wasvalid = 0; 1387 1388 pte = vtopte(va); 1389 1390 if (*pte) 1391 wasvalid++; 1392 1393 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V)); 1394 1395 if (wasvalid) 1396 pmap_update_1pg(va); 1397} 1398 1399/* 1400 * remove a page from the kernel pagetables 1401 */ 1402void 1403pmap_kremove(va) 1404 vm_offset_t va; 1405{ 1406 register pt_entry_t *pte; 1407 1408 pte = vtopte(va); 1409 1410 *pte = (pt_entry_t) 0; 1411 pmap_update_1pg(va); 1412} 1413 1414/* 1415 * this code makes some *MAJOR* assumptions: 1416 * 1. Current pmap & pmap exists. 1417 * 2. Not wired. 1418 * 3. Read access. 1419 * 4. No page table pages. 1420 * 5. Tlbflush is deferred to calling procedure. 1421 * 6. Page IS managed. 1422 * but is *MUCH* faster than pmap_enter... 1423 */ 1424 1425static __inline void 1426pmap_enter_quick(pmap, va, pa) 1427 register pmap_t pmap; 1428 vm_offset_t va; 1429 register vm_offset_t pa; 1430{ 1431 register pt_entry_t *pte; 1432 register pv_entry_t pv, npv; 1433 int s; 1434 1435 /* 1436 * Enter on the PV list if part of our managed memory Note that we 1437 * raise IPL while manipulating pv_table since pmap_enter can be 1438 * called at interrupt time. 1439 */ 1440 1441 pte = vtopte(va); 1442 1443 /* a fault on the page table might occur here */ 1444 if (*pte) { 1445 pmap_remove(pmap, va, va + PAGE_SIZE); 1446 } 1447 pv = pa_to_pvh(pa); 1448 s = splhigh(); 1449 /* 1450 * No entries yet, use header as the first entry 1451 */ 1452 if (pv->pv_pmap == NULL) { 1453 pv->pv_pmap = pmap; 1454 pv->pv_va = va; 1455 pv->pv_next = NULL; 1456 } 1457 /* 1458 * There is at least one other VA mapping this page. Place this entry 1459 * after the header. 1460 */ 1461 else { 1462 npv = get_pv_entry(); 1463 npv->pv_va = va; 1464 npv->pv_pmap = pmap; 1465 npv->pv_next = pv->pv_next; 1466 pv->pv_next = npv; 1467 } 1468 splx(s); 1469 1470 /* 1471 * Increment counters 1472 */ 1473 pmap->pm_stats.resident_count++; 1474 1475 /* 1476 * Now validate mapping with desired protection/wiring. 1477 */ 1478 *pte = (pt_entry_t) ((int) (pa | PG_V | PG_u)); 1479 1480 pmap_use_pt(pmap, va); 1481 1482 return; 1483} 1484 1485#define MAX_INIT_PT (512) 1486/* 1487 * pmap_object_init_pt preloads the ptes for a given object 1488 * into the specified pmap. This eliminates the blast of soft 1489 * faults on process startup and immediately after an mmap. 1490 */ 1491void 1492pmap_object_init_pt(pmap, addr, object, pindex, size) 1493 pmap_t pmap; 1494 vm_offset_t addr; 1495 vm_object_t object; 1496 vm_pindex_t pindex; 1497 vm_size_t size; 1498{ 1499 vm_offset_t tmpidx; 1500 int psize; 1501 vm_page_t p; 1502 int objpgs; 1503 1504 psize = (size >> PAGE_SHIFT); 1505 1506 if (!pmap || ((psize > MAX_INIT_PT) && 1507 (object->resident_page_count > MAX_INIT_PT))) { 1508 return; 1509 } 1510 1511 /* 1512 * if we are processing a major portion of the object, then scan the 1513 * entire thing. 1514 */ 1515 if (psize > (object->size >> 2)) { 1516 objpgs = psize; 1517 1518 for (p = object->memq.tqh_first; 1519 ((objpgs > 0) && (p != NULL)); 1520 p = p->listq.tqe_next) { 1521 1522 tmpidx = p->pindex; 1523 if (tmpidx < pindex) { 1524 continue; 1525 } 1526 tmpidx -= pindex; 1527 if (tmpidx >= psize) { 1528 continue; 1529 } 1530 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1531 (p->busy == 0) && 1532 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1533 if (p->queue == PQ_CACHE) 1534 vm_page_deactivate(p); 1535 vm_page_hold(p); 1536 p->flags |= PG_MAPPED; 1537 pmap_enter_quick(pmap, 1538 addr + (tmpidx << PAGE_SHIFT), 1539 VM_PAGE_TO_PHYS(p)); 1540 vm_page_unhold(p); 1541 } 1542 objpgs -= 1; 1543 } 1544 } else { 1545 /* 1546 * else lookup the pages one-by-one. 1547 */ 1548 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 1549 p = vm_page_lookup(object, tmpidx + pindex); 1550 if (p && (p->busy == 0) && 1551 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1552 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1553 if (p->queue == PQ_CACHE) 1554 vm_page_deactivate(p); 1555 vm_page_hold(p); 1556 p->flags |= PG_MAPPED; 1557 pmap_enter_quick(pmap, 1558 addr + (tmpidx << PAGE_SHIFT), 1559 VM_PAGE_TO_PHYS(p)); 1560 vm_page_unhold(p); 1561 } 1562 } 1563 } 1564} 1565 1566/* 1567 * pmap_prefault provides a quick way of clustering 1568 * pagefaults into a processes address space. It is a "cousin" 1569 * of pmap_object_init_pt, except it runs at page fault time instead 1570 * of mmap time. 1571 */ 1572#define PFBAK 2 1573#define PFFOR 2 1574#define PAGEORDER_SIZE (PFBAK+PFFOR) 1575 1576static int pmap_prefault_pageorder[] = { 1577 -NBPG, NBPG, -2 * NBPG, 2 * NBPG 1578}; 1579 1580static void 1581pmap_prefault(pmap, addra, entry, object) 1582 pmap_t pmap; 1583 vm_offset_t addra; 1584 vm_map_entry_t entry; 1585 vm_object_t object; 1586{ 1587 int i; 1588 vm_offset_t starta; 1589 vm_offset_t addr; 1590 vm_pindex_t pindex; 1591 vm_page_t m; 1592 int pageorder_index; 1593 1594 if (entry->object.vm_object != object) 1595 return; 1596 1597 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) 1598 return; 1599 1600 starta = addra - PFBAK * PAGE_SIZE; 1601 if (starta < entry->start) { 1602 starta = entry->start; 1603 } else if (starta > addra) { 1604 starta = 0; 1605 } 1606 1607 for (i = 0; i < PAGEORDER_SIZE; i++) { 1608 vm_object_t lobject; 1609 pt_entry_t *pte; 1610 1611 addr = addra + pmap_prefault_pageorder[i]; 1612 if (addr < starta || addr >= entry->end) 1613 continue; 1614 1615 pte = vtopte(addr); 1616 if (*pte) 1617 continue; 1618 1619 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1620 lobject = object; 1621 for (m = vm_page_lookup(lobject, pindex); 1622 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 1623 lobject = lobject->backing_object) { 1624 if (lobject->backing_object_offset & (PAGE_MASK-1)) 1625 break; 1626 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 1627 m = vm_page_lookup(lobject->backing_object, pindex); 1628 } 1629 1630 /* 1631 * give-up when a page is not in memory 1632 */ 1633 if (m == NULL) 1634 break; 1635 1636 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1637 (m->busy == 0) && 1638 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1639 1640 if (m->queue == PQ_CACHE) { 1641 if (cnt.v_free_count + cnt.v_cache_count < 1642 cnt.v_free_min) 1643 break; 1644 vm_page_deactivate(m); 1645 } 1646 vm_page_hold(m); 1647 m->flags |= PG_MAPPED; 1648 pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m)); 1649 vm_page_unhold(m); 1650 } 1651 } 1652} 1653 1654/* 1655 * Routine: pmap_change_wiring 1656 * Function: Change the wiring attribute for a map/virtual-address 1657 * pair. 1658 * In/out conditions: 1659 * The mapping must already exist in the pmap. 1660 */ 1661void 1662pmap_change_wiring(pmap, va, wired) 1663 register pmap_t pmap; 1664 vm_offset_t va; 1665 boolean_t wired; 1666{ 1667 register pt_entry_t *pte; 1668 1669 if (pmap == NULL) 1670 return; 1671 1672 pte = pmap_pte(pmap, va); 1673 1674 if (wired && !pmap_pte_w(pte)) 1675 pmap->pm_stats.wired_count++; 1676 else if (!wired && pmap_pte_w(pte)) 1677 pmap->pm_stats.wired_count--; 1678 1679 /* 1680 * Wiring is not a hardware characteristic so there is no need to 1681 * invalidate TLB. 1682 */ 1683 pmap_pte_set_w(pte, wired); 1684} 1685 1686 1687 1688/* 1689 * Copy the range specified by src_addr/len 1690 * from the source map to the range dst_addr/len 1691 * in the destination map. 1692 * 1693 * This routine is only advisory and need not do anything. 1694 */ 1695void 1696pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1697 pmap_t dst_pmap, src_pmap; 1698 vm_offset_t dst_addr; 1699 vm_size_t len; 1700 vm_offset_t src_addr; 1701{ 1702} 1703 1704/* 1705 * Routine: pmap_kernel 1706 * Function: 1707 * Returns the physical map handle for the kernel. 1708 */ 1709pmap_t 1710pmap_kernel() 1711{ 1712 return (kernel_pmap); 1713} 1714 1715/* 1716 * pmap_zero_page zeros the specified (machine independent) 1717 * page by mapping the page into virtual memory and using 1718 * bzero to clear its contents, one machine dependent page 1719 * at a time. 1720 */ 1721void 1722pmap_zero_page(phys) 1723 vm_offset_t phys; 1724{ 1725 if (*(int *) CMAP2) 1726 panic("pmap_zero_page: CMAP busy"); 1727 1728 *(int *) CMAP2 = PG_V | PG_KW | trunc_page(phys); 1729 bzero(CADDR2, PAGE_SIZE); 1730 1731 *(int *) CMAP2 = 0; 1732 pmap_update_1pg((vm_offset_t) CADDR2); 1733} 1734 1735/* 1736 * pmap_copy_page copies the specified (machine independent) 1737 * page by mapping the page into virtual memory and using 1738 * bcopy to copy the page, one machine dependent page at a 1739 * time. 1740 */ 1741void 1742pmap_copy_page(src, dst) 1743 vm_offset_t src; 1744 vm_offset_t dst; 1745{ 1746 if (*(int *) CMAP1 || *(int *) CMAP2) 1747 panic("pmap_copy_page: CMAP busy"); 1748 1749 *(int *) CMAP1 = PG_V | PG_KW | trunc_page(src); 1750 *(int *) CMAP2 = PG_V | PG_KW | trunc_page(dst); 1751 1752#if __GNUC__ > 1 1753 memcpy(CADDR2, CADDR1, PAGE_SIZE); 1754#else 1755 bcopy(CADDR1, CADDR2, PAGE_SIZE); 1756#endif 1757 *(int *) CMAP1 = 0; 1758 *(int *) CMAP2 = 0; 1759 pmap_update_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2); 1760} 1761 1762 1763/* 1764 * Routine: pmap_pageable 1765 * Function: 1766 * Make the specified pages (by pmap, offset) 1767 * pageable (or not) as requested. 1768 * 1769 * A page which is not pageable may not take 1770 * a fault; therefore, its page table entry 1771 * must remain valid for the duration. 1772 * 1773 * This routine is merely advisory; pmap_enter 1774 * will specify that these pages are to be wired 1775 * down (or not) as appropriate. 1776 */ 1777void 1778pmap_pageable(pmap, sva, eva, pageable) 1779 pmap_t pmap; 1780 vm_offset_t sva, eva; 1781 boolean_t pageable; 1782{ 1783} 1784 1785/* 1786 * this routine returns true if a physical page resides 1787 * in the given pmap. 1788 */ 1789boolean_t 1790pmap_page_exists(pmap, pa) 1791 pmap_t pmap; 1792 vm_offset_t pa; 1793{ 1794 register pv_entry_t pv; 1795 int s; 1796 1797 if (!pmap_is_managed(pa)) 1798 return FALSE; 1799 1800 pv = pa_to_pvh(pa); 1801 s = splhigh(); 1802 1803 /* 1804 * Not found, check current mappings returning immediately if found. 1805 */ 1806 if (pv->pv_pmap != NULL) { 1807 for (; pv; pv = pv->pv_next) { 1808 if (pv->pv_pmap == pmap) { 1809 splx(s); 1810 return TRUE; 1811 } 1812 } 1813 } 1814 splx(s); 1815 return (FALSE); 1816} 1817 1818/* 1819 * pmap_testbit tests bits in pte's 1820 * note that the testbit/changebit routines are inline, 1821 * and a lot of things compile-time evaluate. 1822 */ 1823static __inline boolean_t 1824pmap_testbit(pa, bit) 1825 register vm_offset_t pa; 1826 int bit; 1827{ 1828 register pv_entry_t pv; 1829 pt_entry_t *pte; 1830 int s; 1831 1832 if (!pmap_is_managed(pa)) 1833 return FALSE; 1834 1835 pv = pa_to_pvh(pa); 1836 s = splhigh(); 1837 1838 /* 1839 * Not found, check current mappings returning immediately if found. 1840 */ 1841 if (pv->pv_pmap != NULL) { 1842 for (; pv; pv = pv->pv_next) { 1843 /* 1844 * if the bit being tested is the modified bit, then 1845 * mark UPAGES as always modified, and ptes as never 1846 * modified. 1847 */ 1848 if (bit & (PG_U|PG_M)) { 1849 if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) { 1850 continue; 1851 } 1852 } 1853 if (!pv->pv_pmap) { 1854 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 1855 continue; 1856 } 1857 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1858 if ((int) *pte & bit) { 1859 splx(s); 1860 return TRUE; 1861 } 1862 } 1863 } 1864 splx(s); 1865 return (FALSE); 1866} 1867 1868/* 1869 * this routine is used to modify bits in ptes 1870 */ 1871static __inline void 1872pmap_changebit(pa, bit, setem) 1873 vm_offset_t pa; 1874 int bit; 1875 boolean_t setem; 1876{ 1877 register pv_entry_t pv; 1878 register pt_entry_t *pte, npte; 1879 vm_offset_t va; 1880 int changed; 1881 int s; 1882 1883 if (!pmap_is_managed(pa)) 1884 return; 1885 1886 pv = pa_to_pvh(pa); 1887 s = splhigh(); 1888 1889 /* 1890 * Loop over all current mappings setting/clearing as appropos If 1891 * setting RO do we need to clear the VAC? 1892 */ 1893 if (pv->pv_pmap != NULL) { 1894 for (; pv; pv = pv->pv_next) { 1895 va = pv->pv_va; 1896 1897 /* 1898 * don't write protect pager mappings 1899 */ 1900 if (!setem && (bit == PG_RW)) { 1901 if (va >= clean_sva && va < clean_eva) 1902 continue; 1903 } 1904 if (!pv->pv_pmap) { 1905 printf("Null pmap (cb) at va: 0x%lx\n", va); 1906 continue; 1907 } 1908 pte = pmap_pte(pv->pv_pmap, va); 1909 if (setem) { 1910 (int) npte = (int) *pte | bit; 1911 } else { 1912 (int) npte = (int) *pte & ~bit; 1913 } 1914 *pte = npte; 1915 } 1916 } 1917 splx(s); 1918 if (curproc != pageproc) 1919 pmap_update(); 1920} 1921 1922/* 1923 * pmap_page_protect: 1924 * 1925 * Lower the permission for all mappings to a given page. 1926 */ 1927void 1928pmap_page_protect(phys, prot) 1929 vm_offset_t phys; 1930 vm_prot_t prot; 1931{ 1932 if ((prot & VM_PROT_WRITE) == 0) { 1933 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) 1934 pmap_changebit(phys, PG_RW, FALSE); 1935 else 1936 pmap_remove_all(phys); 1937 } 1938} 1939 1940vm_offset_t 1941pmap_phys_address(ppn) 1942 int ppn; 1943{ 1944 return (i386_ptob(ppn)); 1945} 1946 1947/* 1948 * pmap_is_referenced: 1949 * 1950 * Return whether or not the specified physical page was referenced 1951 * by any physical maps. 1952 */ 1953boolean_t 1954pmap_is_referenced(vm_offset_t pa) 1955{ 1956 return pmap_testbit((pa), PG_U); 1957} 1958 1959/* 1960 * pmap_is_modified: 1961 * 1962 * Return whether or not the specified physical page was modified 1963 * in any physical maps. 1964 */ 1965boolean_t 1966pmap_is_modified(vm_offset_t pa) 1967{ 1968 return pmap_testbit((pa), PG_M); 1969} 1970 1971/* 1972 * Clear the modify bits on the specified physical page. 1973 */ 1974void 1975pmap_clear_modify(vm_offset_t pa) 1976{ 1977 pmap_changebit((pa), PG_M, FALSE); 1978} 1979 1980/* 1981 * pmap_clear_reference: 1982 * 1983 * Clear the reference bit on the specified physical page. 1984 */ 1985void 1986pmap_clear_reference(vm_offset_t pa) 1987{ 1988 pmap_changebit((pa), PG_U, FALSE); 1989} 1990 1991/* 1992 * Miscellaneous support routines follow 1993 */ 1994 1995static void 1996i386_protection_init() 1997{ 1998 register int *kp, prot; 1999 2000 kp = protection_codes; 2001 for (prot = 0; prot < 8; prot++) { 2002 switch (prot) { 2003 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2004 /* 2005 * Read access is also 0. There isn't any execute bit, 2006 * so just make it readable. 2007 */ 2008 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2009 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2010 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2011 *kp++ = 0; 2012 break; 2013 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2014 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2015 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2016 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2017 *kp++ = PG_RW; 2018 break; 2019 } 2020 } 2021} 2022 2023/* 2024 * Map a set of physical memory pages into the kernel virtual 2025 * address space. Return a pointer to where it is mapped. This 2026 * routine is intended to be used for mapping device memory, 2027 * NOT real memory. The non-cacheable bits are set on each 2028 * mapped page. 2029 */ 2030void * 2031pmap_mapdev(pa, size) 2032 vm_offset_t pa; 2033 vm_size_t size; 2034{ 2035 vm_offset_t va, tmpva; 2036 pt_entry_t *pte; 2037 2038 pa = trunc_page(pa); 2039 size = roundup(size, PAGE_SIZE); 2040 2041 va = kmem_alloc_pageable(kernel_map, size); 2042 if (!va) 2043 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2044 2045 for (tmpva = va; size > 0;) { 2046 pte = vtopte(tmpva); 2047 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_N)); 2048 size -= PAGE_SIZE; 2049 tmpva += PAGE_SIZE; 2050 pa += PAGE_SIZE; 2051 } 2052 pmap_update(); 2053 2054 return ((void *) va); 2055} 2056 2057#ifdef PMAP_DEBUG 2058pmap_pid_dump(int pid) { 2059 pmap_t pmap; 2060 struct proc *p; 2061 int npte = 0; 2062 int index; 2063 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 2064 if (p->p_pid != pid) 2065 continue; 2066 2067 if (p->p_vmspace) { 2068 int i,j; 2069 index = 0; 2070 pmap = &p->p_vmspace->vm_pmap; 2071 for(i=0;i<1024;i++) { 2072 pd_entry_t *pde; 2073 pt_entry_t *pte; 2074 unsigned base = i << PD_SHIFT; 2075 2076 pde = &pmap->pm_pdir[i]; 2077 if (pde && pmap_pde_v(pde)) { 2078 for(j=0;j<1024;j++) { 2079 unsigned va = base + (j << PG_SHIFT); 2080 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2081 if (index) { 2082 index = 0; 2083 printf("\n"); 2084 } 2085 return npte; 2086 } 2087 pte = pmap_pte( pmap, va); 2088 if (pte && pmap_pte_v(pte)) { 2089 vm_offset_t pa; 2090 vm_page_t m; 2091 pa = *(int *)pte; 2092 m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); 2093 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2094 va, pa, m->hold_count, m->wire_count, m->flags); 2095 npte++; 2096 index++; 2097 if (index >= 2) { 2098 index = 0; 2099 printf("\n"); 2100 } else { 2101 printf(" "); 2102 } 2103 } 2104 } 2105 } 2106 } 2107 } 2108 } 2109 return npte; 2110} 2111#endif 2112 2113#ifdef DEBUG 2114 2115static void pads __P((pmap_t pm)); 2116static void pmap_pvdump __P((vm_offset_t pa)); 2117 2118/* print address space of pmap*/ 2119static void 2120pads(pm) 2121 pmap_t pm; 2122{ 2123 unsigned va, i, j; 2124 pt_entry_t *ptep; 2125 2126 if (pm == kernel_pmap) 2127 return; 2128 for (i = 0; i < 1024; i++) 2129 if (pm->pm_pdir[i]) 2130 for (j = 0; j < 1024; j++) { 2131 va = (i << PD_SHIFT) + (j << PG_SHIFT); 2132 if (pm == kernel_pmap && va < KERNBASE) 2133 continue; 2134 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2135 continue; 2136 ptep = pmap_pte(pm, va); 2137 if (pmap_pte_v(ptep)) 2138 printf("%x:%x ", va, *(int *) ptep); 2139 }; 2140 2141} 2142 2143static void 2144pmap_pvdump(pa) 2145 vm_offset_t pa; 2146{ 2147 register pv_entry_t pv; 2148 2149 printf("pa %x", pa); 2150 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 2151#ifdef used_to_be 2152 printf(" -> pmap %x, va %x, flags %x", 2153 pv->pv_pmap, pv->pv_va, pv->pv_flags); 2154#endif 2155 printf(" -> pmap %x, va %x", 2156 pv->pv_pmap, pv->pv_va); 2157 pads(pv->pv_pmap); 2158 } 2159 printf(" "); 2160} 2161#endif 2162