pmap.c revision 13495
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.73 1996/01/19 03:57:40 dyson Exp $ 43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. 51 * 52 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 53 */ 54/* 55 * Major modifications by John S. Dyson primarily to support 56 * pageable page tables, eliminating pmap_attributes, 57 * discontiguous memory pages, and using more efficient string 58 * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, 59 * general clean-up and efficiency mods. 60 */ 61 62/* 63 * Manages physical address maps. 64 * 65 * In addition to hardware address maps, this 66 * module is called upon to provide software-use-only 67 * maps which may or may not be stored in the same 68 * form as hardware maps. These pseudo-maps are 69 * used to store intermediate results from copy 70 * operations to and from address spaces. 71 * 72 * Since the information managed by this module is 73 * also stored by the logical address mapping module, 74 * this module may throw away valid virtual-to-physical 75 * mappings at almost any time. However, invalidations 76 * of virtual-to-physical mappings must be done as 77 * requested. 78 * 79 * In order to cope with hardware architectures which 80 * make virtual-to-physical map invalidates expensive, 81 * this module may delay invalidate or reduced protection 82 * operations until such time as they are actually 83 * necessary. This module is given full information as 84 * to which processors are currently using which maps, 85 * and to when physical maps must be made correct. 86 */ 87 88#include <sys/param.h> 89#include <sys/systm.h> 90#include <sys/proc.h> 91#include <sys/malloc.h> 92#include <sys/msgbuf.h> 93#include <sys/queue.h> 94#include <sys/vmmeter.h> 95 96#include <vm/vm.h> 97#include <vm/vm_param.h> 98#include <vm/vm_prot.h> 99#include <vm/lock.h> 100#include <vm/vm_kern.h> 101#include <vm/vm_page.h> 102#include <vm/vm_map.h> 103#include <vm/vm_object.h> 104#include <vm/vm_extern.h> 105 106#include <machine/pcb.h> 107#include <machine/cputypes.h> 108#include <machine/md_var.h> 109 110#include <i386/isa/isa.h> 111 112#define PMAP_KEEP_PDIRS 113 114static void init_pv_entries __P((int)); 115 116/* 117 * Get PDEs and PTEs for user/kernel address space 118 */ 119#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 120#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 121 122#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 123 124#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 125#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 126#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 127#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) 128#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 129 130#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 131#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 132 133/* 134 * Given a map and a machine independent protection code, 135 * convert to a vax protection code. 136 */ 137#define pte_prot(m, p) (protection_codes[p]) 138static int protection_codes[8]; 139 140static struct pmap kernel_pmap_store; 141pmap_t kernel_pmap; 142 143vm_offset_t avail_start; /* PA of first available physical page */ 144vm_offset_t avail_end; /* PA of last available physical page */ 145vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 146vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 147static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 148static vm_offset_t vm_first_phys; 149 150static int nkpt; 151 152extern vm_offset_t clean_sva, clean_eva; 153extern int cpu_class; 154 155/* 156 * All those kernel PT submaps that BSD is so fond of 157 */ 158pt_entry_t *CMAP1; 159static pt_entry_t *CMAP2, *ptmmap; 160static pv_entry_t pv_table; 161caddr_t CADDR1, ptvmmap; 162static caddr_t CADDR2; 163static pt_entry_t *msgbufmap; 164struct msgbuf *msgbufp; 165 166static void free_pv_entry __P((pv_entry_t pv)); 167static pt_entry_t * 168 get_pt_entry __P((pmap_t pmap)); 169static pv_entry_t 170 get_pv_entry __P((void)); 171static void i386_protection_init __P((void)); 172static void pmap_alloc_pv_entry __P((void)); 173static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); 174static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, 175 vm_offset_t pa)); 176static int pmap_is_managed __P((vm_offset_t pa)); 177static void pmap_remove_all __P((vm_offset_t pa)); 178static void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, 179 vm_offset_t va)); 180static vm_page_t 181 pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt)); 182static boolean_t 183 pmap_testbit __P((vm_offset_t pa, int bit)); 184static void * pmap_getpdir __P((void)); 185static void pmap_prefault __P((pmap_t pmap, vm_offset_t addra, 186 vm_map_entry_t entry, vm_object_t object)); 187 188/* 189 * The below are finer grained pmap_update routines. These eliminate 190 * the gratuitious tlb flushes on non-i386 architectures. 191 */ 192static __inline void 193pmap_update_1pg( vm_offset_t va) { 194#if defined(I386_CPU) 195 if (cpu_class == CPUCLASS_386) 196 pmap_update(); 197 else 198#endif 199 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va)); 200} 201 202static __inline void 203pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) { 204#if defined(I386_CPU) 205 if (cpu_class == CPUCLASS_386) { 206 pmap_update(); 207 } else 208#endif 209 { 210 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va1)); 211 __asm __volatile(".byte 0xf,0x1,0x38": :"a" (va2)); 212 } 213} 214 215/* 216 * Routine: pmap_pte 217 * Function: 218 * Extract the page table entry associated 219 * with the given map/virtual_address pair. 220 * [ what about induced faults -wfj] 221 */ 222 223__inline pt_entry_t * __pure 224pmap_pte(pmap, va) 225 register pmap_t pmap; 226 vm_offset_t va; 227{ 228 229 if (pmap && *pmap_pde(pmap, va)) { 230 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 231 232 /* are we current address space or kernel? */ 233 if ((pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME))) 234 return ((pt_entry_t *) vtopte(va)); 235 /* otherwise, we are alternate address space */ 236 else { 237 if (frame != ((int) APTDpde & PG_FRAME)) { 238 APTDpde = pmap->pm_pdir[PTDPTDI]; 239 pmap_update(); 240 } 241 return ((pt_entry_t *) avtopte(va)); 242 } 243 } 244 return (0); 245} 246 247/* 248 * Routine: pmap_extract 249 * Function: 250 * Extract the physical page address associated 251 * with the given map/virtual_address pair. 252 */ 253 254vm_offset_t 255pmap_extract(pmap, va) 256 register pmap_t pmap; 257 vm_offset_t va; 258{ 259 vm_offset_t pa; 260 261 if (pmap && *pmap_pde(pmap, va)) { 262 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 263 264 /* are we current address space or kernel? */ 265 if ((pmap == kernel_pmap) 266 || (frame == ((int) PTDpde & PG_FRAME))) { 267 pa = *(int *) vtopte(va); 268 /* otherwise, we are alternate address space */ 269 } else { 270 if (frame != ((int) APTDpde & PG_FRAME)) { 271 APTDpde = pmap->pm_pdir[PTDPTDI]; 272 pmap_update(); 273 } 274 pa = *(int *) avtopte(va); 275 } 276 return ((pa & PG_FRAME) | (va & ~PG_FRAME)); 277 } 278 return 0; 279 280} 281 282/* 283 * determine if a page is managed (memory vs. device) 284 */ 285static __inline int 286pmap_is_managed(pa) 287 vm_offset_t pa; 288{ 289 int i; 290 291 if (!pmap_initialized) 292 return 0; 293 294 for (i = 0; phys_avail[i + 1]; i += 2) { 295 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) 296 return 1; 297 } 298 return 0; 299} 300 301/* 302 * find the vm_page_t of a pte (only) given va of pte and pmap 303 */ 304static __inline vm_page_t 305pmap_pte_vm_page(pmap, pt) 306 pmap_t pmap; 307 vm_offset_t pt; 308{ 309 vm_page_t m; 310 311 pt = trunc_page(pt); 312 pt = (pt - UPT_MIN_ADDRESS) / PAGE_SIZE; 313 pt = ((vm_offset_t) pmap->pm_pdir[pt]) & PG_FRAME; 314 m = PHYS_TO_VM_PAGE(pt); 315 return m; 316} 317 318/* 319 * Wire a page table page 320 */ 321__inline void 322pmap_use_pt(pmap, va) 323 pmap_t pmap; 324 vm_offset_t va; 325{ 326 vm_offset_t pt; 327 328 if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 329 return; 330 331 pt = (vm_offset_t) vtopte(va); 332 vm_page_hold(pmap_pte_vm_page(pmap, pt)); 333} 334 335/* 336 * Unwire a page table page 337 */ 338__inline void 339pmap_unuse_pt(pmap, va) 340 pmap_t pmap; 341 vm_offset_t va; 342{ 343 vm_offset_t pt; 344 vm_page_t m; 345 346 if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 347 return; 348 349 pt = (vm_offset_t) vtopte(va); 350 m = pmap_pte_vm_page(pmap, pt); 351 vm_page_unhold(m); 352 if (pmap != kernel_pmap && 353 (m->hold_count == 0) && 354 (m->wire_count == 0) && 355 (va < KPT_MIN_ADDRESS)) { 356/* 357 * We don't free page-table-pages anymore because it can have a negative 358 * impact on perf at times. Now we just deactivate, and it'll get cleaned 359 * up if needed... Also, if the page ends up getting used, it will fault 360 * back into the process address space and be reactivated. 361 */ 362#ifdef PMAP_FREE_OLD_PTES 363 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 364 vm_page_free(m); 365#else 366 m->dirty = 0; 367 vm_page_deactivate(m); 368#endif 369 } 370} 371 372/* [ macro again?, should I force kstack into user map here? -wfj ] */ 373void 374pmap_activate(pmap, pcbp) 375 register pmap_t pmap; 376 struct pcb *pcbp; 377{ 378 PMAP_ACTIVATE(pmap, pcbp); 379} 380 381/* 382 * Bootstrap the system enough to run with virtual memory. 383 * 384 * On the i386 this is called after mapping has already been enabled 385 * and just syncs the pmap module with what has already been done. 386 * [We can't call it easily with mapping off since the kernel is not 387 * mapped with PA == VA, hence we would have to relocate every address 388 * from the linked base (virtual) address "KERNBASE" to the actual 389 * (physical) address starting relative to 0] 390 */ 391void 392pmap_bootstrap(firstaddr, loadaddr) 393 vm_offset_t firstaddr; 394 vm_offset_t loadaddr; 395{ 396 vm_offset_t va; 397 pt_entry_t *pte; 398 399 avail_start = firstaddr; 400 401 /* 402 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 403 * large. It should instead be correctly calculated in locore.s and 404 * not based on 'first' (which is a physical address, not a virtual 405 * address, for the start of unused physical memory). The kernel 406 * page tables are NOT double mapped and thus should not be included 407 * in this calculation. 408 */ 409 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 410 virtual_end = VM_MAX_KERNEL_ADDRESS; 411 412 /* 413 * Initialize protection array. 414 */ 415 i386_protection_init(); 416 417 /* 418 * The kernel's pmap is statically allocated so we don't have to use 419 * pmap_create, which is unlikely to work correctly at this part of 420 * the boot sequence (XXX and which no longer exists). 421 */ 422 kernel_pmap = &kernel_pmap_store; 423 424 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 425 426 kernel_pmap->pm_count = 1; 427 nkpt = NKPT; 428 429 /* 430 * Reserve some special page table entries/VA space for temporary 431 * mapping of pages. 432 */ 433#define SYSMAP(c, p, v, n) \ 434 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 435 436 va = virtual_avail; 437 pte = pmap_pte(kernel_pmap, va); 438 439 /* 440 * CMAP1/CMAP2 are used for zeroing and copying pages. 441 */ 442 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 443 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 444 445 /* 446 * ptmmap is used for reading arbitrary physical pages via /dev/mem. 447 */ 448 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 449 450 /* 451 * msgbufmap is used to map the system message buffer. 452 */ 453 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 454 455 virtual_avail = va; 456 457 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 458 pmap_update(); 459} 460 461/* 462 * Initialize the pmap module. 463 * Called by vm_init, to initialize any structures that the pmap 464 * system needs to map virtual memory. 465 * pmap_init has been enhanced to support in a fairly consistant 466 * way, discontiguous physical memory. 467 */ 468void 469pmap_init(phys_start, phys_end) 470 vm_offset_t phys_start, phys_end; 471{ 472 vm_offset_t addr; 473 vm_size_t npg, s; 474 int i; 475 476 /* 477 * calculate the number of pv_entries needed 478 */ 479 vm_first_phys = phys_avail[0]; 480 for (i = 0; phys_avail[i + 1]; i += 2); 481 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; 482 483 /* 484 * Allocate memory for random pmap data structures. Includes the 485 * pv_head_table. 486 */ 487 s = (vm_size_t) (sizeof(struct pv_entry) * npg); 488 s = round_page(s); 489 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 490 pv_table = (pv_entry_t) addr; 491 492 /* 493 * init the pv free list 494 */ 495 init_pv_entries(npg); 496 /* 497 * Now it is safe to enable pv_table recording. 498 */ 499 pmap_initialized = TRUE; 500} 501 502/* 503 * Used to map a range of physical addresses into kernel 504 * virtual address space. 505 * 506 * For now, VM is already on, we only need to map the 507 * specified memory. 508 */ 509vm_offset_t 510pmap_map(virt, start, end, prot) 511 vm_offset_t virt; 512 vm_offset_t start; 513 vm_offset_t end; 514 int prot; 515{ 516 while (start < end) { 517 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 518 virt += PAGE_SIZE; 519 start += PAGE_SIZE; 520 } 521 return (virt); 522} 523 524#ifdef PMAP_KEEP_PDIRS 525int nfreepdir; 526caddr_t *pdirlist; 527#define NFREEPDIR 3 528 529static void * 530pmap_getpdir() { 531 caddr_t *pdir; 532 if (pdirlist) { 533 --nfreepdir; 534 pdir = pdirlist; 535 pdirlist = (caddr_t *) *pdir; 536 bzero( (caddr_t) pdir, PAGE_SIZE); 537 } else { 538 pdir = (caddr_t *) kmem_alloc(kernel_map, PAGE_SIZE); 539 } 540 541 return (void *) pdir; 542} 543 544static void 545pmap_freepdir(void *pdir) { 546 if (nfreepdir > NFREEPDIR) { 547 kmem_free(kernel_map, (vm_offset_t) pdir, PAGE_SIZE); 548 } else { 549 * (caddr_t *) pdir = (caddr_t) pdirlist; 550 pdirlist = (caddr_t *) pdir; 551 ++nfreepdir; 552 } 553} 554#endif 555 556/* 557 * Initialize a preallocated and zeroed pmap structure, 558 * such as one in a vmspace structure. 559 */ 560void 561pmap_pinit(pmap) 562 register struct pmap *pmap; 563{ 564 /* 565 * No need to allocate page table space yet but we do need a valid 566 * page directory table. 567 */ 568 569#ifdef PMAP_KEEP_PDIRS 570 pmap->pm_pdir = pmap_getpdir(); 571#else 572 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, PAGE_SIZE); 573#endif 574 575 /* wire in kernel global address entries */ 576 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 577 578 /* install self-referential address mapping entry */ 579 *(int *) (pmap->pm_pdir + PTDPTDI) = 580 ((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW; 581 582 pmap->pm_count = 1; 583} 584 585/* 586 * grow the number of kernel page table entries, if needed 587 */ 588 589static vm_page_t nkpg; 590vm_offset_t kernel_vm_end; 591 592void 593pmap_growkernel(vm_offset_t addr) 594{ 595 struct proc *p; 596 struct pmap *pmap; 597 int s; 598 599 s = splhigh(); 600 if (kernel_vm_end == 0) { 601 kernel_vm_end = KERNBASE; 602 nkpt = 0; 603 while (pdir_pde(PTD, kernel_vm_end)) { 604 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 605 ++nkpt; 606 } 607 } 608 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 609 while (kernel_vm_end < addr) { 610 if (pdir_pde(PTD, kernel_vm_end)) { 611 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 612 continue; 613 } 614 ++nkpt; 615 if (!nkpg) { 616 nkpg = vm_page_alloc(kernel_object, 0, VM_ALLOC_SYSTEM); 617 if (!nkpg) 618 panic("pmap_growkernel: no memory to grow kernel"); 619 vm_page_wire(nkpg); 620 vm_page_remove(nkpg); 621 pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 622 } 623 pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW); 624 nkpg = NULL; 625 626 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 627 if (p->p_vmspace) { 628 pmap = &p->p_vmspace->vm_pmap; 629 *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 630 } 631 } 632 *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 633 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 634 } 635 splx(s); 636} 637 638/* 639 * Retire the given physical map from service. 640 * Should only be called if the map contains 641 * no valid mappings. 642 */ 643void 644pmap_destroy(pmap) 645 register pmap_t pmap; 646{ 647 int count; 648 649 if (pmap == NULL) 650 return; 651 652 count = --pmap->pm_count; 653 if (count == 0) { 654 pmap_release(pmap); 655 free((caddr_t) pmap, M_VMPMAP); 656 } 657} 658 659/* 660 * Release any resources held by the given physical map. 661 * Called when a pmap initialized by pmap_pinit is being released. 662 * Should only be called if the map contains no valid mappings. 663 */ 664void 665pmap_release(pmap) 666 register struct pmap *pmap; 667{ 668#ifdef PMAP_KEEP_PDIRS 669 pmap_freepdir( (void *)pmap->pm_pdir); 670#else 671 kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 672#endif 673} 674 675/* 676 * Add a reference to the specified pmap. 677 */ 678void 679pmap_reference(pmap) 680 pmap_t pmap; 681{ 682 if (pmap != NULL) { 683 pmap->pm_count++; 684 } 685} 686 687#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2) 688 689/* 690 * Data for the pv entry allocation mechanism 691 */ 692static int pv_freelistcnt; 693static pv_entry_t pv_freelist; 694static vm_offset_t pvva; 695static int npvvapg; 696 697/* 698 * free the pv_entry back to the free list 699 */ 700static __inline void 701free_pv_entry(pv) 702 pv_entry_t pv; 703{ 704 if (!pv) 705 return; 706 ++pv_freelistcnt; 707 pv->pv_next = pv_freelist; 708 pv_freelist = pv; 709} 710 711/* 712 * get a new pv_entry, allocating a block from the system 713 * when needed. 714 * the memory allocation is performed bypassing the malloc code 715 * because of the possibility of allocations at interrupt time. 716 */ 717static __inline pv_entry_t 718get_pv_entry() 719{ 720 pv_entry_t tmp; 721 722 /* 723 * get more pv_entry pages if needed 724 */ 725 if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) { 726 pmap_alloc_pv_entry(); 727 } 728 /* 729 * get a pv_entry off of the free list 730 */ 731 --pv_freelistcnt; 732 tmp = pv_freelist; 733 pv_freelist = tmp->pv_next; 734 return tmp; 735} 736 737/* 738 * this *strange* allocation routine *statistically* eliminates the 739 * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure. 740 * also -- this code is MUCH MUCH faster than the malloc equiv... 741 */ 742static void 743pmap_alloc_pv_entry() 744{ 745 /* 746 * do we have any pre-allocated map-pages left? 747 */ 748 if (npvvapg) { 749 vm_page_t m; 750 751 /* 752 * we do this to keep recursion away 753 */ 754 pv_freelistcnt += PV_FREELIST_MIN; 755 /* 756 * allocate a physical page out of the vm system 757 */ 758 m = vm_page_alloc(kernel_object, 759 OFF_TO_IDX(pvva - vm_map_min(kernel_map)), 760 VM_ALLOC_INTERRUPT); 761 if (m) { 762 int newentries; 763 int i; 764 pv_entry_t entry; 765 766 newentries = (PAGE_SIZE / sizeof(struct pv_entry)); 767 /* 768 * wire the page 769 */ 770 vm_page_wire(m); 771 m->flags &= ~PG_BUSY; 772 /* 773 * let the kernel see it 774 */ 775 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 776 777 entry = (pv_entry_t) pvva; 778 /* 779 * update the allocation pointers 780 */ 781 pvva += PAGE_SIZE; 782 --npvvapg; 783 784 /* 785 * free the entries into the free list 786 */ 787 for (i = 0; i < newentries; i++) { 788 free_pv_entry(entry); 789 entry++; 790 } 791 } 792 pv_freelistcnt -= PV_FREELIST_MIN; 793 } 794 if (!pv_freelist) 795 panic("get_pv_entry: cannot get a pv_entry_t"); 796} 797 798 799 800/* 801 * init the pv_entry allocation system 802 */ 803#define PVSPERPAGE 64 804void 805init_pv_entries(npg) 806 int npg; 807{ 808 /* 809 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 810 * kvm space is fairly cheap, be generous!!! (the system can panic if 811 * this is too small.) 812 */ 813 npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) 814 + PAGE_SIZE - 1) / PAGE_SIZE; 815 pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE); 816 /* 817 * get the first batch of entries 818 */ 819 free_pv_entry(get_pv_entry()); 820} 821 822static pt_entry_t * 823get_pt_entry(pmap) 824 pmap_t pmap; 825{ 826 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 827 828 /* are we current address space or kernel? */ 829 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) { 830 return PTmap; 831 } 832 /* otherwise, we are alternate address space */ 833 if (frame != ((int) APTDpde & PG_FRAME)) { 834 APTDpde = pmap->pm_pdir[PTDPTDI]; 835 pmap_update(); 836 } 837 return APTmap; 838} 839 840/* 841 * If it is the first entry on the list, it is actually 842 * in the header and we must copy the following entry up 843 * to the header. Otherwise we must search the list for 844 * the entry. In either case we free the now unused entry. 845 */ 846static void 847pmap_remove_entry(pmap, pv, va) 848 struct pmap *pmap; 849 pv_entry_t pv; 850 vm_offset_t va; 851{ 852 pv_entry_t npv; 853 int s; 854 855 s = splhigh(); 856 if (pmap == pv->pv_pmap && va == pv->pv_va) { 857 npv = pv->pv_next; 858 if (npv) { 859 *pv = *npv; 860 free_pv_entry(npv); 861 } else { 862 pv->pv_pmap = NULL; 863 } 864 } else { 865 for (npv = pv->pv_next; npv; (pv = npv, npv = pv->pv_next)) { 866 if (pmap == npv->pv_pmap && va == npv->pv_va) { 867 break; 868 } 869 } 870 if (npv) { 871 pv->pv_next = npv->pv_next; 872 free_pv_entry(npv); 873 } 874 } 875 splx(s); 876} 877 878/* 879 * Remove the given range of addresses from the specified map. 880 * 881 * It is assumed that the start and end are properly 882 * rounded to the page size. 883 */ 884void 885pmap_remove(pmap, sva, eva) 886 struct pmap *pmap; 887 register vm_offset_t sva; 888 register vm_offset_t eva; 889{ 890 register pt_entry_t *ptp, *ptq; 891 vm_offset_t pa; 892 register pv_entry_t pv; 893 vm_offset_t va; 894 pt_entry_t oldpte; 895 896 if (pmap == NULL) 897 return; 898 899 ptp = get_pt_entry(pmap); 900 901 /* 902 * special handling of removing one page. a very 903 * common operation and easy to short circuit some 904 * code. 905 */ 906 if ((sva + PAGE_SIZE) == eva) { 907 908 if (*pmap_pde(pmap, sva) == 0) 909 return; 910 911 ptq = ptp + i386_btop(sva); 912 913 if (!*ptq) 914 return; 915 /* 916 * Update statistics 917 */ 918 if (pmap_pte_w(ptq)) 919 pmap->pm_stats.wired_count--; 920 pmap->pm_stats.resident_count--; 921 922 pa = pmap_pte_pa(ptq); 923 oldpte = *ptq; 924 *ptq = 0; 925 926 if (pmap_is_managed(pa)) { 927 if ((int) oldpte & PG_M) { 928 if (sva < USRSTACK + (UPAGES * PAGE_SIZE) || 929 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 930 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 931 } 932 } 933 pv = pa_to_pvh(pa); 934 pmap_remove_entry(pmap, pv, sva); 935 } 936 pmap_unuse_pt(pmap, sva); 937 pmap_update_1pg(sva); 938 return; 939 } 940 sva = i386_btop(sva); 941 eva = i386_btop(eva); 942 943 while (sva < eva) { 944 /* 945 * Weed out invalid mappings. Note: we assume that the page 946 * directory table is always allocated, and in kernel virtual. 947 */ 948 949 if (*pmap_pde(pmap, i386_ptob(sva)) == 0) { 950 /* We can race ahead here, straight to next pde.. */ 951 sva = ((sva + NPTEPG) & ~(NPTEPG - 1)); 952 continue; 953 } 954 ptq = ptp + sva; 955 956 /* 957 * search for page table entries, use string operations that 958 * are much faster than explicitly scanning when page tables 959 * are not fully populated. 960 */ 961 if (*ptq == 0) { 962 vm_offset_t pdnxt = ((sva + NPTEPG) & ~(NPTEPG - 1)); 963 vm_offset_t nscan = pdnxt - sva; 964 int found = 0; 965 966 if ((nscan + sva) > eva) 967 nscan = eva - sva; 968 969 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 970 "=D"(ptq), "=a"(found) : "c"(nscan), "0"(ptq) : "cx"); 971 972 if (!found) { 973 sva = pdnxt; 974 continue; 975 } 976 ptq -= 1; 977 978 sva = ptq - ptp; 979 } 980 /* 981 * Update statistics 982 */ 983 oldpte = *ptq; 984 if (((int) oldpte) & PG_W) 985 pmap->pm_stats.wired_count--; 986 pmap->pm_stats.resident_count--; 987 988 /* 989 * Invalidate the PTEs. XXX: should cluster them up and 990 * invalidate as many as possible at once. 991 */ 992 *ptq = 0; 993 994 va = i386_ptob(sva); 995 996 /* 997 * Remove from the PV table (raise IPL since we may be called 998 * at interrupt time). 999 */ 1000 pa = ((int) oldpte) & PG_FRAME; 1001 if (!pmap_is_managed(pa)) { 1002 pmap_unuse_pt(pmap, (vm_offset_t) va); 1003 ++sva; 1004 continue; 1005 } 1006 if ((int) oldpte & PG_M) { 1007 if (sva < USRSTACK + (UPAGES * PAGE_SIZE) || 1008 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 1009 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 1010 } 1011 } 1012 pv = pa_to_pvh(pa); 1013 pmap_remove_entry(pmap, pv, va); 1014 pmap_unuse_pt(pmap, va); 1015 ++sva; 1016 } 1017 pmap_update(); 1018} 1019 1020/* 1021 * Routine: pmap_remove_all 1022 * Function: 1023 * Removes this physical page from 1024 * all physical maps in which it resides. 1025 * Reflects back modify bits to the pager. 1026 * 1027 * Notes: 1028 * Original versions of this routine were very 1029 * inefficient because they iteratively called 1030 * pmap_remove (slow...) 1031 */ 1032static void 1033pmap_remove_all(pa) 1034 vm_offset_t pa; 1035{ 1036 register pv_entry_t pv, opv, npv; 1037 register pt_entry_t *pte, *ptp; 1038 vm_offset_t va; 1039 struct pmap *pmap; 1040 vm_page_t m; 1041 int s; 1042 int anyvalid = 0; 1043 1044 /* 1045 * Not one of ours 1046 */ 1047 /* 1048 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1049 * pages! 1050 */ 1051 if (!pmap_is_managed(pa)) 1052 return; 1053 1054 pa = trunc_page(pa); 1055 opv = pa_to_pvh(pa); 1056 if (opv->pv_pmap == NULL) 1057 return; 1058 1059 m = PHYS_TO_VM_PAGE(pa); 1060 s = splhigh(); 1061 pv = opv; 1062 while (pv && ((pmap = pv->pv_pmap) != NULL)) { 1063 ptp = get_pt_entry(pmap); 1064 va = pv->pv_va; 1065 pte = ptp + i386_btop(va); 1066 if (pmap_pte_w(pte)) 1067 pmap->pm_stats.wired_count--; 1068 if (*pte) { 1069 pmap->pm_stats.resident_count--; 1070 if (curproc != pageproc) 1071 anyvalid++; 1072 1073 /* 1074 * Update the vm_page_t clean and reference bits. 1075 */ 1076 if ((int) *pte & PG_M) { 1077 if (va < USRSTACK + (UPAGES * PAGE_SIZE) || 1078 (va >= KERNBASE && (va < clean_sva || va >= clean_eva))) { 1079 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 1080 } 1081 } 1082 *pte = 0; 1083 pmap_unuse_pt(pmap, va); 1084 } 1085 pv = pv->pv_next; 1086 } 1087 1088 for (pv = opv->pv_next; pv; pv = npv) { 1089 npv = pv->pv_next; 1090 free_pv_entry(pv); 1091 } 1092 1093 opv->pv_pmap = NULL; 1094 opv->pv_next = NULL; 1095 1096 splx(s); 1097 if (anyvalid) 1098 pmap_update(); 1099} 1100 1101 1102/* 1103 * Set the physical protection on the 1104 * specified range of this map as requested. 1105 */ 1106void 1107pmap_protect(pmap, sva, eva, prot) 1108 register pmap_t pmap; 1109 vm_offset_t sva, eva; 1110 vm_prot_t prot; 1111{ 1112 register pt_entry_t *pte; 1113 register vm_offset_t va; 1114 int i386prot; 1115 register pt_entry_t *ptp; 1116 int evap = i386_btop(eva); 1117 int anyvalid = 0;; 1118 1119 if (pmap == NULL) 1120 return; 1121 1122 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1123 pmap_remove(pmap, sva, eva); 1124 return; 1125 } 1126 if (prot & VM_PROT_WRITE) 1127 return; 1128 1129 ptp = get_pt_entry(pmap); 1130 1131 va = sva; 1132 while (va < eva) { 1133 int found = 0; 1134 int svap; 1135 vm_offset_t nscan; 1136 1137 /* 1138 * Page table page is not allocated. Skip it, we don't want to 1139 * force allocation of unnecessary PTE pages just to set the 1140 * protection. 1141 */ 1142 if (!*pmap_pde(pmap, va)) { 1143 /* XXX: avoid address wrap around */ 1144 nextpde: 1145 if (va >= i386_trunc_pdr((vm_offset_t) - 1)) 1146 break; 1147 va = i386_round_pdr(va + PAGE_SIZE); 1148 continue; 1149 } 1150 pte = ptp + i386_btop(va); 1151 1152 if (*pte == 0) { 1153 /* 1154 * scan for a non-empty pte 1155 */ 1156 svap = pte - ptp; 1157 nscan = ((svap + NPTEPG) & ~(NPTEPG - 1)) - svap; 1158 1159 if (nscan + svap > evap) 1160 nscan = evap - svap; 1161 1162 found = 0; 1163 if (nscan) 1164 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 1165 "=D"(pte), "=a"(found) : "c"(nscan), "0"(pte) : "cx"); 1166 1167 if (!found) 1168 goto nextpde; 1169 1170 pte -= 1; 1171 svap = pte - ptp; 1172 1173 va = i386_ptob(svap); 1174 } 1175 anyvalid++; 1176 1177 i386prot = pte_prot(pmap, prot); 1178 if (va < UPT_MAX_ADDRESS) { 1179 i386prot |= PG_u; 1180 if (va >= UPT_MIN_ADDRESS) 1181 i386prot |= PG_RW; 1182 } 1183 pmap_pte_set_prot(pte, i386prot); 1184 va += PAGE_SIZE; 1185 } 1186 if (anyvalid) 1187 pmap_update(); 1188} 1189 1190/* 1191 * Insert the given physical page (p) at 1192 * the specified virtual address (v) in the 1193 * target physical map with the protection requested. 1194 * 1195 * If specified, the page will be wired down, meaning 1196 * that the related pte can not be reclaimed. 1197 * 1198 * NB: This is the only routine which MAY NOT lazy-evaluate 1199 * or lose information. That is, this routine must actually 1200 * insert this page into the given map NOW. 1201 */ 1202void 1203pmap_enter(pmap, va, pa, prot, wired) 1204 register pmap_t pmap; 1205 vm_offset_t va; 1206 register vm_offset_t pa; 1207 vm_prot_t prot; 1208 boolean_t wired; 1209{ 1210 register pt_entry_t *pte; 1211 register pt_entry_t npte; 1212 vm_offset_t opa; 1213 int ptevalid = 0; 1214 1215 if (pmap == NULL) 1216 return; 1217 1218 va = trunc_page(va); 1219 pa = trunc_page(pa); 1220 if (va > VM_MAX_KERNEL_ADDRESS) 1221 panic("pmap_enter: toobig"); 1222 1223 /* 1224 * Page Directory table entry not valid, we need a new PT page 1225 */ 1226 if (*pmap_pde(pmap, va) == 0) { 1227 printf("kernel page directory invalid pdir=%p, va=0x%lx\n", 1228 pmap->pm_pdir[PTDPTDI], va); 1229 panic("invalid kernel page directory"); 1230 } 1231 pte = pmap_pte(pmap, va); 1232 opa = pmap_pte_pa(pte); 1233 1234 /* 1235 * Mapping has not changed, must be protection or wiring change. 1236 */ 1237 if (opa == pa) { 1238 /* 1239 * Wiring change, just update stats. We don't worry about 1240 * wiring PT pages as they remain resident as long as there 1241 * are valid mappings in them. Hence, if a user page is wired, 1242 * the PT page will be also. 1243 */ 1244 if (wired && !pmap_pte_w(pte)) 1245 pmap->pm_stats.wired_count++; 1246 else if (!wired && pmap_pte_w(pte)) 1247 pmap->pm_stats.wired_count--; 1248 1249 goto validate; 1250 } 1251 /* 1252 * Mapping has changed, invalidate old range and fall through to 1253 * handle validating new mapping. 1254 */ 1255 if (opa) { 1256 pmap_remove(pmap, va, va + PAGE_SIZE); 1257 } 1258 /* 1259 * Enter on the PV list if part of our managed memory Note that we 1260 * raise IPL while manipulating pv_table since pmap_enter can be 1261 * called at interrupt time. 1262 */ 1263 if (pmap_is_managed(pa)) { 1264 register pv_entry_t pv, npv; 1265 int s; 1266 1267 pv = pa_to_pvh(pa); 1268 s = splhigh(); 1269 /* 1270 * No entries yet, use header as the first entry 1271 */ 1272 if (pv->pv_pmap == NULL) { 1273 pv->pv_va = va; 1274 pv->pv_pmap = pmap; 1275 pv->pv_next = NULL; 1276 } 1277 /* 1278 * There is at least one other VA mapping this page. Place 1279 * this entry after the header. 1280 */ 1281 else { 1282 npv = get_pv_entry(); 1283 npv->pv_va = va; 1284 npv->pv_pmap = pmap; 1285 npv->pv_next = pv->pv_next; 1286 pv->pv_next = npv; 1287 } 1288 splx(s); 1289 } 1290 1291 /* 1292 * Increment counters 1293 */ 1294 pmap->pm_stats.resident_count++; 1295 if (wired) 1296 pmap->pm_stats.wired_count++; 1297 1298validate: 1299 /* 1300 * Now validate mapping with desired protection/wiring. 1301 */ 1302 npte = (pt_entry_t) ((int) (pa | pte_prot(pmap, prot) | PG_V)); 1303 1304 /* 1305 * When forking (copy-on-write, etc): A process will turn off write 1306 * permissions for any of its writable pages. If the data (object) is 1307 * only referred to by one process, the processes map is modified 1308 * directly as opposed to using the object manipulation routine. When 1309 * using pmap_protect, the modified bits are not kept in the vm_page_t 1310 * data structure. Therefore, when using pmap_enter in vm_fault to 1311 * bring back writability of a page, there has been no memory of the 1312 * modified or referenced bits except at the pte level. this clause 1313 * supports the carryover of the modified and used (referenced) bits. 1314 */ 1315 if (pa == opa) 1316 (int) npte |= (int) *pte & (PG_M | PG_U); 1317 1318 if (wired) 1319 (int) npte |= PG_W; 1320 if (va < UPT_MIN_ADDRESS) 1321 (int) npte |= PG_u; 1322 else if (va < UPT_MAX_ADDRESS) 1323 (int) npte |= PG_u | PG_RW; 1324 1325 if (*pte != npte) { 1326 if (*pte) 1327 ptevalid++; 1328 *pte = npte; 1329 } 1330 if (ptevalid) { 1331 pmap_update_1pg(va); 1332 } else { 1333 pmap_use_pt(pmap, va); 1334 } 1335} 1336 1337/* 1338 * Add a list of wired pages to the kva 1339 * this routine is only used for temporary 1340 * kernel mappings that do not need to have 1341 * page modification or references recorded. 1342 * Note that old mappings are simply written 1343 * over. The page *must* be wired. 1344 */ 1345void 1346pmap_qenter(va, m, count) 1347 vm_offset_t va; 1348 vm_page_t *m; 1349 int count; 1350{ 1351 int i; 1352 int anyvalid = 0; 1353 register pt_entry_t *pte; 1354 1355 for (i = 0; i < count; i++) { 1356 vm_offset_t tva = va + i * PAGE_SIZE; 1357 pt_entry_t npte = (pt_entry_t) ((int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V)); 1358 pte = vtopte(tva); 1359 if (*pte && (*pte != npte)) 1360 pmap_update_1pg(tva); 1361 *pte = npte; 1362 } 1363} 1364/* 1365 * this routine jerks page mappings from the 1366 * kernel -- it is meant only for temporary mappings. 1367 */ 1368void 1369pmap_qremove(va, count) 1370 vm_offset_t va; 1371 int count; 1372{ 1373 int i; 1374 register pt_entry_t *pte; 1375 1376 for (i = 0; i < count; i++) { 1377 vm_offset_t tva = va + i * PAGE_SIZE; 1378 pte = vtopte(tva); 1379 *pte = 0; 1380 pmap_update_1pg(tva); 1381 } 1382} 1383 1384/* 1385 * add a wired page to the kva 1386 * note that in order for the mapping to take effect -- you 1387 * should do a pmap_update after doing the pmap_kenter... 1388 */ 1389void 1390pmap_kenter(va, pa) 1391 vm_offset_t va; 1392 register vm_offset_t pa; 1393{ 1394 register pt_entry_t *pte; 1395 int wasvalid = 0; 1396 1397 pte = vtopte(va); 1398 1399 if (*pte) 1400 wasvalid++; 1401 1402 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V)); 1403 1404 if (wasvalid) 1405 pmap_update_1pg(va); 1406} 1407 1408/* 1409 * remove a page from the kernel pagetables 1410 */ 1411void 1412pmap_kremove(va) 1413 vm_offset_t va; 1414{ 1415 register pt_entry_t *pte; 1416 1417 pte = vtopte(va); 1418 1419 *pte = (pt_entry_t) 0; 1420 pmap_update_1pg(va); 1421} 1422 1423/* 1424 * this code makes some *MAJOR* assumptions: 1425 * 1. Current pmap & pmap exists. 1426 * 2. Not wired. 1427 * 3. Read access. 1428 * 4. No page table pages. 1429 * 5. Tlbflush is deferred to calling procedure. 1430 * 6. Page IS managed. 1431 * but is *MUCH* faster than pmap_enter... 1432 */ 1433 1434static __inline void 1435pmap_enter_quick(pmap, va, pa) 1436 register pmap_t pmap; 1437 vm_offset_t va; 1438 register vm_offset_t pa; 1439{ 1440 register pt_entry_t *pte; 1441 register pv_entry_t pv, npv; 1442 int s; 1443 1444 /* 1445 * Enter on the PV list if part of our managed memory Note that we 1446 * raise IPL while manipulating pv_table since pmap_enter can be 1447 * called at interrupt time. 1448 */ 1449 1450 pte = vtopte(va); 1451 1452 /* a fault on the page table might occur here */ 1453 if (*pte) { 1454 pmap_remove(pmap, va, va + PAGE_SIZE); 1455 } 1456 pv = pa_to_pvh(pa); 1457 s = splhigh(); 1458 /* 1459 * No entries yet, use header as the first entry 1460 */ 1461 if (pv->pv_pmap == NULL) { 1462 pv->pv_pmap = pmap; 1463 pv->pv_va = va; 1464 pv->pv_next = NULL; 1465 } 1466 /* 1467 * There is at least one other VA mapping this page. Place this entry 1468 * after the header. 1469 */ 1470 else { 1471 npv = get_pv_entry(); 1472 npv->pv_va = va; 1473 npv->pv_pmap = pmap; 1474 npv->pv_next = pv->pv_next; 1475 pv->pv_next = npv; 1476 } 1477 splx(s); 1478 1479 /* 1480 * Increment counters 1481 */ 1482 pmap->pm_stats.resident_count++; 1483 1484 /* 1485 * Now validate mapping with desired protection/wiring. 1486 */ 1487 *pte = (pt_entry_t) ((int) (pa | PG_V | PG_u)); 1488 1489 pmap_use_pt(pmap, va); 1490 1491 return; 1492} 1493 1494#define MAX_INIT_PT (512) 1495/* 1496 * pmap_object_init_pt preloads the ptes for a given object 1497 * into the specified pmap. This eliminates the blast of soft 1498 * faults on process startup and immediately after an mmap. 1499 */ 1500void 1501pmap_object_init_pt(pmap, addr, object, pindex, size) 1502 pmap_t pmap; 1503 vm_offset_t addr; 1504 vm_object_t object; 1505 vm_pindex_t pindex; 1506 vm_size_t size; 1507{ 1508 vm_offset_t tmpidx; 1509 int psize; 1510 vm_page_t p; 1511 int objpgs; 1512 1513 psize = (size >> PAGE_SHIFT); 1514 1515 if (!pmap || ((psize > MAX_INIT_PT) && 1516 (object->resident_page_count > MAX_INIT_PT))) { 1517 return; 1518 } 1519 1520 /* 1521 * if we are processing a major portion of the object, then scan the 1522 * entire thing. 1523 */ 1524 if (psize > (object->size >> 2)) { 1525 objpgs = psize; 1526 1527 for (p = object->memq.tqh_first; 1528 ((objpgs > 0) && (p != NULL)); 1529 p = p->listq.tqe_next) { 1530 1531 tmpidx = p->pindex; 1532 if (tmpidx < pindex) { 1533 continue; 1534 } 1535 tmpidx -= pindex; 1536 if (tmpidx >= psize) { 1537 continue; 1538 } 1539 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1540 (p->busy == 0) && 1541 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1542 if (p->queue == PQ_CACHE) 1543 vm_page_deactivate(p); 1544 vm_page_hold(p); 1545 p->flags |= PG_MAPPED; 1546 pmap_enter_quick(pmap, 1547 addr + (tmpidx << PAGE_SHIFT), 1548 VM_PAGE_TO_PHYS(p)); 1549 vm_page_unhold(p); 1550 } 1551 objpgs -= 1; 1552 } 1553 } else { 1554 /* 1555 * else lookup the pages one-by-one. 1556 */ 1557 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 1558 p = vm_page_lookup(object, tmpidx + pindex); 1559 if (p && (p->busy == 0) && 1560 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1561 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1562 if (p->queue == PQ_CACHE) 1563 vm_page_deactivate(p); 1564 vm_page_hold(p); 1565 p->flags |= PG_MAPPED; 1566 pmap_enter_quick(pmap, 1567 addr + (tmpidx << PAGE_SHIFT), 1568 VM_PAGE_TO_PHYS(p)); 1569 vm_page_unhold(p); 1570 } 1571 } 1572 } 1573} 1574 1575/* 1576 * pmap_prefault provides a quick way of clustering 1577 * pagefaults into a processes address space. It is a "cousin" 1578 * of pmap_object_init_pt, except it runs at page fault time instead 1579 * of mmap time. 1580 */ 1581#define PFBAK 2 1582#define PFFOR 2 1583#define PAGEORDER_SIZE (PFBAK+PFFOR) 1584 1585static int pmap_prefault_pageorder[] = { 1586 -NBPG, NBPG, -2 * NBPG, 2 * NBPG 1587}; 1588 1589static void 1590pmap_prefault(pmap, addra, entry, object) 1591 pmap_t pmap; 1592 vm_offset_t addra; 1593 vm_map_entry_t entry; 1594 vm_object_t object; 1595{ 1596 int i; 1597 vm_offset_t starta; 1598 vm_offset_t addr; 1599 vm_pindex_t pindex; 1600 vm_page_t m; 1601 int pageorder_index; 1602 1603 if (entry->object.vm_object != object) 1604 return; 1605 1606 if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) 1607 return; 1608 1609 starta = addra - PFBAK * PAGE_SIZE; 1610 if (starta < entry->start) { 1611 starta = entry->start; 1612 } else if (starta > addra) { 1613 starta = 0; 1614 } 1615 1616 for (i = 0; i < PAGEORDER_SIZE; i++) { 1617 vm_object_t lobject; 1618 pt_entry_t *pte; 1619 1620 addr = addra + pmap_prefault_pageorder[i]; 1621 if (addr < starta || addr >= entry->end) 1622 continue; 1623 1624 pte = vtopte(addr); 1625 if (*pte) 1626 continue; 1627 1628 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1629 lobject = object; 1630 for (m = vm_page_lookup(lobject, pindex); 1631 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 1632 lobject = lobject->backing_object) { 1633 if (lobject->backing_object_offset & (PAGE_MASK-1)) 1634 break; 1635 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 1636 m = vm_page_lookup(lobject->backing_object, pindex); 1637 } 1638 1639 /* 1640 * give-up when a page is not in memory 1641 */ 1642 if (m == NULL) 1643 break; 1644 1645 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1646 (m->busy == 0) && 1647 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1648 1649 if (m->queue == PQ_CACHE) { 1650 if (cnt.v_free_count + cnt.v_cache_count < 1651 cnt.v_free_min) 1652 break; 1653 vm_page_deactivate(m); 1654 } 1655 vm_page_hold(m); 1656 m->flags |= PG_MAPPED; 1657 pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m)); 1658 vm_page_unhold(m); 1659 } 1660 } 1661} 1662 1663/* 1664 * Routine: pmap_change_wiring 1665 * Function: Change the wiring attribute for a map/virtual-address 1666 * pair. 1667 * In/out conditions: 1668 * The mapping must already exist in the pmap. 1669 */ 1670void 1671pmap_change_wiring(pmap, va, wired) 1672 register pmap_t pmap; 1673 vm_offset_t va; 1674 boolean_t wired; 1675{ 1676 register pt_entry_t *pte; 1677 1678 if (pmap == NULL) 1679 return; 1680 1681 pte = pmap_pte(pmap, va); 1682 1683 if (wired && !pmap_pte_w(pte)) 1684 pmap->pm_stats.wired_count++; 1685 else if (!wired && pmap_pte_w(pte)) 1686 pmap->pm_stats.wired_count--; 1687 1688 /* 1689 * Wiring is not a hardware characteristic so there is no need to 1690 * invalidate TLB. 1691 */ 1692 pmap_pte_set_w(pte, wired); 1693} 1694 1695 1696 1697/* 1698 * Copy the range specified by src_addr/len 1699 * from the source map to the range dst_addr/len 1700 * in the destination map. 1701 * 1702 * This routine is only advisory and need not do anything. 1703 */ 1704void 1705pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1706 pmap_t dst_pmap, src_pmap; 1707 vm_offset_t dst_addr; 1708 vm_size_t len; 1709 vm_offset_t src_addr; 1710{ 1711} 1712 1713/* 1714 * Routine: pmap_kernel 1715 * Function: 1716 * Returns the physical map handle for the kernel. 1717 */ 1718pmap_t 1719pmap_kernel() 1720{ 1721 return (kernel_pmap); 1722} 1723 1724/* 1725 * pmap_zero_page zeros the specified (machine independent) 1726 * page by mapping the page into virtual memory and using 1727 * bzero to clear its contents, one machine dependent page 1728 * at a time. 1729 */ 1730void 1731pmap_zero_page(phys) 1732 vm_offset_t phys; 1733{ 1734 if (*(int *) CMAP2) 1735 panic("pmap_zero_page: CMAP busy"); 1736 1737 *(int *) CMAP2 = PG_V | PG_KW | trunc_page(phys); 1738 bzero(CADDR2, PAGE_SIZE); 1739 1740 *(int *) CMAP2 = 0; 1741 pmap_update_1pg((vm_offset_t) CADDR2); 1742} 1743 1744/* 1745 * pmap_copy_page copies the specified (machine independent) 1746 * page by mapping the page into virtual memory and using 1747 * bcopy to copy the page, one machine dependent page at a 1748 * time. 1749 */ 1750void 1751pmap_copy_page(src, dst) 1752 vm_offset_t src; 1753 vm_offset_t dst; 1754{ 1755 if (*(int *) CMAP1 || *(int *) CMAP2) 1756 panic("pmap_copy_page: CMAP busy"); 1757 1758 *(int *) CMAP1 = PG_V | PG_KW | trunc_page(src); 1759 *(int *) CMAP2 = PG_V | PG_KW | trunc_page(dst); 1760 1761#if __GNUC__ > 1 1762 memcpy(CADDR2, CADDR1, PAGE_SIZE); 1763#else 1764 bcopy(CADDR1, CADDR2, PAGE_SIZE); 1765#endif 1766 *(int *) CMAP1 = 0; 1767 *(int *) CMAP2 = 0; 1768 pmap_update_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2); 1769} 1770 1771 1772/* 1773 * Routine: pmap_pageable 1774 * Function: 1775 * Make the specified pages (by pmap, offset) 1776 * pageable (or not) as requested. 1777 * 1778 * A page which is not pageable may not take 1779 * a fault; therefore, its page table entry 1780 * must remain valid for the duration. 1781 * 1782 * This routine is merely advisory; pmap_enter 1783 * will specify that these pages are to be wired 1784 * down (or not) as appropriate. 1785 */ 1786void 1787pmap_pageable(pmap, sva, eva, pageable) 1788 pmap_t pmap; 1789 vm_offset_t sva, eva; 1790 boolean_t pageable; 1791{ 1792} 1793 1794/* 1795 * this routine returns true if a physical page resides 1796 * in the given pmap. 1797 */ 1798boolean_t 1799pmap_page_exists(pmap, pa) 1800 pmap_t pmap; 1801 vm_offset_t pa; 1802{ 1803 register pv_entry_t pv; 1804 int s; 1805 1806 if (!pmap_is_managed(pa)) 1807 return FALSE; 1808 1809 pv = pa_to_pvh(pa); 1810 s = splhigh(); 1811 1812 /* 1813 * Not found, check current mappings returning immediately if found. 1814 */ 1815 if (pv->pv_pmap != NULL) { 1816 for (; pv; pv = pv->pv_next) { 1817 if (pv->pv_pmap == pmap) { 1818 splx(s); 1819 return TRUE; 1820 } 1821 } 1822 } 1823 splx(s); 1824 return (FALSE); 1825} 1826 1827/* 1828 * pmap_testbit tests bits in pte's 1829 * note that the testbit/changebit routines are inline, 1830 * and a lot of things compile-time evaluate. 1831 */ 1832static __inline boolean_t 1833pmap_testbit(pa, bit) 1834 register vm_offset_t pa; 1835 int bit; 1836{ 1837 register pv_entry_t pv; 1838 pt_entry_t *pte; 1839 int s; 1840 1841 if (!pmap_is_managed(pa)) 1842 return FALSE; 1843 1844 pv = pa_to_pvh(pa); 1845 s = splhigh(); 1846 1847 /* 1848 * Not found, check current mappings returning immediately if found. 1849 */ 1850 if (pv->pv_pmap != NULL) { 1851 for (; pv; pv = pv->pv_next) { 1852 /* 1853 * if the bit being tested is the modified bit, then 1854 * mark UPAGES as always modified, and ptes as never 1855 * modified. 1856 */ 1857 if (bit & (PG_U|PG_M)) { 1858 if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) { 1859 continue; 1860 } 1861 } 1862 if (!pv->pv_pmap) { 1863 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 1864 continue; 1865 } 1866 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1867 if ((int) *pte & bit) { 1868 splx(s); 1869 return TRUE; 1870 } 1871 } 1872 } 1873 splx(s); 1874 return (FALSE); 1875} 1876 1877/* 1878 * this routine is used to modify bits in ptes 1879 */ 1880static __inline void 1881pmap_changebit(pa, bit, setem) 1882 vm_offset_t pa; 1883 int bit; 1884 boolean_t setem; 1885{ 1886 register pv_entry_t pv; 1887 register pt_entry_t *pte, npte; 1888 vm_offset_t va; 1889 int changed; 1890 int s; 1891 1892 if (!pmap_is_managed(pa)) 1893 return; 1894 1895 pv = pa_to_pvh(pa); 1896 s = splhigh(); 1897 1898 /* 1899 * Loop over all current mappings setting/clearing as appropos If 1900 * setting RO do we need to clear the VAC? 1901 */ 1902 if (pv->pv_pmap != NULL) { 1903 for (; pv; pv = pv->pv_next) { 1904 va = pv->pv_va; 1905 1906 /* 1907 * don't write protect pager mappings 1908 */ 1909 if (!setem && (bit == PG_RW)) { 1910 if (va >= clean_sva && va < clean_eva) 1911 continue; 1912 } 1913 if (!pv->pv_pmap) { 1914 printf("Null pmap (cb) at va: 0x%lx\n", va); 1915 continue; 1916 } 1917 pte = pmap_pte(pv->pv_pmap, va); 1918 if (setem) { 1919 (int) npte = (int) *pte | bit; 1920 } else { 1921 (int) npte = (int) *pte & ~bit; 1922 } 1923 *pte = npte; 1924 } 1925 } 1926 splx(s); 1927 if (curproc != pageproc) 1928 pmap_update(); 1929} 1930 1931/* 1932 * pmap_page_protect: 1933 * 1934 * Lower the permission for all mappings to a given page. 1935 */ 1936void 1937pmap_page_protect(phys, prot) 1938 vm_offset_t phys; 1939 vm_prot_t prot; 1940{ 1941 if ((prot & VM_PROT_WRITE) == 0) { 1942 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) 1943 pmap_changebit(phys, PG_RW, FALSE); 1944 else 1945 pmap_remove_all(phys); 1946 } 1947} 1948 1949vm_offset_t 1950pmap_phys_address(ppn) 1951 int ppn; 1952{ 1953 return (i386_ptob(ppn)); 1954} 1955 1956/* 1957 * pmap_is_referenced: 1958 * 1959 * Return whether or not the specified physical page was referenced 1960 * by any physical maps. 1961 */ 1962boolean_t 1963pmap_is_referenced(vm_offset_t pa) 1964{ 1965 return pmap_testbit((pa), PG_U); 1966} 1967 1968/* 1969 * pmap_is_modified: 1970 * 1971 * Return whether or not the specified physical page was modified 1972 * in any physical maps. 1973 */ 1974boolean_t 1975pmap_is_modified(vm_offset_t pa) 1976{ 1977 return pmap_testbit((pa), PG_M); 1978} 1979 1980/* 1981 * Clear the modify bits on the specified physical page. 1982 */ 1983void 1984pmap_clear_modify(vm_offset_t pa) 1985{ 1986 pmap_changebit((pa), PG_M, FALSE); 1987} 1988 1989/* 1990 * pmap_clear_reference: 1991 * 1992 * Clear the reference bit on the specified physical page. 1993 */ 1994void 1995pmap_clear_reference(vm_offset_t pa) 1996{ 1997 pmap_changebit((pa), PG_U, FALSE); 1998} 1999 2000/* 2001 * Miscellaneous support routines follow 2002 */ 2003 2004static void 2005i386_protection_init() 2006{ 2007 register int *kp, prot; 2008 2009 kp = protection_codes; 2010 for (prot = 0; prot < 8; prot++) { 2011 switch (prot) { 2012 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2013 /* 2014 * Read access is also 0. There isn't any execute bit, 2015 * so just make it readable. 2016 */ 2017 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2018 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2019 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2020 *kp++ = 0; 2021 break; 2022 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2023 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2024 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2025 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2026 *kp++ = PG_RW; 2027 break; 2028 } 2029 } 2030} 2031 2032/* 2033 * Map a set of physical memory pages into the kernel virtual 2034 * address space. Return a pointer to where it is mapped. This 2035 * routine is intended to be used for mapping device memory, 2036 * NOT real memory. The non-cacheable bits are set on each 2037 * mapped page. 2038 */ 2039void * 2040pmap_mapdev(pa, size) 2041 vm_offset_t pa; 2042 vm_size_t size; 2043{ 2044 vm_offset_t va, tmpva; 2045 pt_entry_t *pte; 2046 2047 pa = trunc_page(pa); 2048 size = roundup(size, PAGE_SIZE); 2049 2050 va = kmem_alloc_pageable(kernel_map, size); 2051 if (!va) 2052 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2053 2054 for (tmpva = va; size > 0;) { 2055 pte = vtopte(tmpva); 2056 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_N)); 2057 size -= PAGE_SIZE; 2058 tmpva += PAGE_SIZE; 2059 pa += PAGE_SIZE; 2060 } 2061 pmap_update(); 2062 2063 return ((void *) va); 2064} 2065 2066#ifdef PMAP_DEBUG 2067pmap_pid_dump(int pid) { 2068 pmap_t pmap; 2069 struct proc *p; 2070 int npte = 0; 2071 int index; 2072 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 2073 if (p->p_pid != pid) 2074 continue; 2075 2076 if (p->p_vmspace) { 2077 int i,j; 2078 index = 0; 2079 pmap = &p->p_vmspace->vm_pmap; 2080 for(i=0;i<1024;i++) { 2081 pd_entry_t *pde; 2082 pt_entry_t *pte; 2083 unsigned base = i << PD_SHIFT; 2084 2085 pde = &pmap->pm_pdir[i]; 2086 if (pde && pmap_pde_v(pde)) { 2087 for(j=0;j<1024;j++) { 2088 unsigned va = base + (j << PG_SHIFT); 2089 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2090 if (index) { 2091 index = 0; 2092 printf("\n"); 2093 } 2094 return npte; 2095 } 2096 pte = pmap_pte( pmap, va); 2097 if (pte && pmap_pte_v(pte)) { 2098 vm_offset_t pa; 2099 vm_page_t m; 2100 pa = *(int *)pte; 2101 m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); 2102 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2103 va, pa, m->hold_count, m->wire_count, m->flags); 2104 npte++; 2105 index++; 2106 if (index >= 2) { 2107 index = 0; 2108 printf("\n"); 2109 } else { 2110 printf(" "); 2111 } 2112 } 2113 } 2114 } 2115 } 2116 } 2117 } 2118 return npte; 2119} 2120#endif 2121 2122#ifdef DEBUG 2123 2124static void pads __P((pmap_t pm)); 2125static void pmap_pvdump __P((vm_offset_t pa)); 2126 2127/* print address space of pmap*/ 2128static void 2129pads(pm) 2130 pmap_t pm; 2131{ 2132 unsigned va, i, j; 2133 pt_entry_t *ptep; 2134 2135 if (pm == kernel_pmap) 2136 return; 2137 for (i = 0; i < 1024; i++) 2138 if (pm->pm_pdir[i]) 2139 for (j = 0; j < 1024; j++) { 2140 va = (i << PD_SHIFT) + (j << PG_SHIFT); 2141 if (pm == kernel_pmap && va < KERNBASE) 2142 continue; 2143 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2144 continue; 2145 ptep = pmap_pte(pm, va); 2146 if (pmap_pte_v(ptep)) 2147 printf("%x:%x ", va, *(int *) ptep); 2148 }; 2149 2150} 2151 2152static void 2153pmap_pvdump(pa) 2154 vm_offset_t pa; 2155{ 2156 register pv_entry_t pv; 2157 2158 printf("pa %x", pa); 2159 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 2160#ifdef used_to_be 2161 printf(" -> pmap %x, va %x, flags %x", 2162 pv->pv_pmap, pv->pv_va, pv->pv_flags); 2163#endif 2164 printf(" -> pmap %x, va %x", 2165 pv->pv_pmap, pv->pv_va); 2166 pads(pv->pv_pmap); 2167 } 2168 printf(" "); 2169} 2170#endif 2171