pmap.c revision 528
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38 * $Id$ 39 */ 40static char rcsid[] = "$Id$"; 41 42/* 43 * Derived from hp300 version by Mike Hibler, this version by William 44 * Jolitz uses a recursive map [a pde points to the page directory] to 45 * map the page tables using the pagetables themselves. This is done to 46 * reduce the impact on kernel virtual memory for lots of sparse address 47 * space, and to reduce the cost of memory to each process. 48 * 49 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 50 */ 51 52/* 53 * Reno i386 version, from Mike Hibler's hp300 version. 54 */ 55 56/* 57 * Manages physical address maps. 58 * 59 * In addition to hardware address maps, this 60 * module is called upon to provide software-use-only 61 * maps which may or may not be stored in the same 62 * form as hardware maps. These pseudo-maps are 63 * used to store intermediate results from copy 64 * operations to and from address spaces. 65 * 66 * Since the information managed by this module is 67 * also stored by the logical address mapping module, 68 * this module may throw away valid virtual-to-physical 69 * mappings at almost any time. However, invalidations 70 * of virtual-to-physical mappings must be done as 71 * requested. 72 * 73 * In order to cope with hardware architectures which 74 * make virtual-to-physical map invalidates expensive, 75 * this module may delay invalidate or reduced protection 76 * operations until such time as they are actually 77 * necessary. This module is given full information as 78 * to which processors are currently using which maps, 79 * and to when physical maps must be made correct. 80 */ 81 82#include "param.h" 83#include "proc.h" 84#include "malloc.h" 85#include "user.h" 86 87#include "vm/vm.h" 88#include "vm/vm_kern.h" 89#include "vm/vm_page.h" 90/*#include "vm/vm_pageout.h"*/ 91 92#include "i386/isa/isa.h" 93 94/* 95 * Allocate various and sundry SYSMAPs used in the days of old VM 96 * and not yet converted. XXX. 97 */ 98#define BSDVM_COMPAT 1 99 100#ifdef DEBUG 101struct { 102 int kernel; /* entering kernel mapping */ 103 int user; /* entering user mapping */ 104 int ptpneeded; /* needed to allocate a PT page */ 105 int pwchange; /* no mapping change, just wiring or protection */ 106 int wchange; /* no mapping change, just wiring */ 107 int mchange; /* was mapped but mapping to different page */ 108 int managed; /* a managed page */ 109 int firstpv; /* first mapping for this PA */ 110 int secondpv; /* second mapping for this PA */ 111 int ci; /* cache inhibited */ 112 int unmanaged; /* not a managed page */ 113 int flushes; /* cache flushes */ 114} enter_stats; 115struct { 116 int calls; 117 int removes; 118 int pvfirst; 119 int pvsearch; 120 int ptinvalid; 121 int uflushes; 122 int sflushes; 123} remove_stats; 124 125int debugmap = 0; 126int pmapdebug = 0 /* 0xffff */; 127#define PDB_FOLLOW 0x0001 128#define PDB_INIT 0x0002 129#define PDB_ENTER 0x0004 130#define PDB_REMOVE 0x0008 131#define PDB_CREATE 0x0010 132#define PDB_PTPAGE 0x0020 133#define PDB_CACHE 0x0040 134#define PDB_BITS 0x0080 135#define PDB_COLLECT 0x0100 136#define PDB_PROTECT 0x0200 137#define PDB_PDRTAB 0x0400 138#define PDB_PARANOIA 0x2000 139#define PDB_WIRING 0x4000 140#define PDB_PVDUMP 0x8000 141 142int pmapvacflush = 0; 143#define PVF_ENTER 0x01 144#define PVF_REMOVE 0x02 145#define PVF_PROTECT 0x04 146#define PVF_TOTAL 0x80 147#endif 148 149/* 150 * Get PDEs and PTEs for user/kernel address space 151 */ 152#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 153 154#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 155 156#define pmap_pde_v(pte) ((pte)->pd_v) 157#define pmap_pte_w(pte) ((pte)->pg_w) 158/* #define pmap_pte_ci(pte) ((pte)->pg_ci) */ 159#define pmap_pte_m(pte) ((pte)->pg_m) 160#define pmap_pte_u(pte) ((pte)->pg_u) 161#define pmap_pte_v(pte) ((pte)->pg_v) 162#define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) 163#define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) 164 165/* 166 * Given a map and a machine independent protection code, 167 * convert to a vax protection code. 168 */ 169#define pte_prot(m, p) (protection_codes[p]) 170int protection_codes[8]; 171 172struct pmap kernel_pmap_store; 173pmap_t kernel_pmap; 174 175vm_offset_t avail_start; /* PA of first available physical page */ 176vm_offset_t avail_end; /* PA of last available physical page */ 177vm_size_t mem_size; /* memory size in bytes */ 178vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 179vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 180vm_offset_t vm_first_phys; /* PA of first managed page */ 181vm_offset_t vm_last_phys; /* PA just past last managed page */ 182int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */ 183boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 184char *pmap_attributes; /* reference and modify bits */ 185 186boolean_t pmap_testbit(); 187void pmap_clear_modify(); 188 189#if BSDVM_COMPAT 190#include "msgbuf.h" 191 192/* 193 * All those kernel PT submaps that BSD is so fond of 194 */ 195struct pte *CMAP1, *CMAP2, *mmap; 196caddr_t CADDR1, CADDR2, vmmap; 197struct pte *msgbufmap; 198struct msgbuf *msgbufp; 199#endif 200 201/* 202 * Bootstrap the system enough to run with virtual memory. 203 * Map the kernel's code and data, and allocate the system page table. 204 * 205 * On the I386 this is called after mapping has already been enabled 206 * and just syncs the pmap module with what has already been done. 207 * [We can't call it easily with mapping off since the kernel is not 208 * mapped with PA == VA, hence we would have to relocate every address 209 * from the linked base (virtual) address 0xFE000000 to the actual 210 * (physical) address starting relative to 0] 211 */ 212struct pte *pmap_pte(); 213 214void 215pmap_bootstrap(firstaddr, loadaddr) 216 vm_offset_t firstaddr; 217 vm_offset_t loadaddr; 218{ 219#if BSDVM_COMPAT 220 vm_offset_t va; 221 struct pte *pte; 222#endif 223 extern vm_offset_t maxmem, physmem; 224extern int IdlePTD; 225 226 avail_start = firstaddr + 8 * NBPG; 227 avail_end = maxmem << PG_SHIFT; 228 229 /* XXX: allow for msgbuf */ 230 avail_end -= i386_round_page(sizeof(struct msgbuf)); 231 232 mem_size = physmem << PG_SHIFT; 233 virtual_avail = (vm_offset_t)atdevbase + 0x100000 - 0xa0000 + 10*NBPG; 234 virtual_end = VM_MAX_KERNEL_ADDRESS; 235 i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE; 236 237 /* 238 * Initialize protection array. 239 */ 240 i386_protection_init(); 241 242 /* 243 * The kernel's pmap is statically allocated so we don't 244 * have to use pmap_create, which is unlikely to work 245 * correctly at this part of the boot sequence. 246 */ 247 kernel_pmap = &kernel_pmap_store; 248 249#ifdef notdef 250 /* 251 * Create Kernel page directory table and page maps. 252 * [ currently done in locore. i have wild and crazy ideas -wfj ] 253 * XXX IF THIS IS EVER USED, IT MUST BE MOVED TO THE TOP 254 * OF THIS ROUTINE -- cgd 255 */ 256 bzero(firstaddr, 4*NBPG); 257 kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS; 258 kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG; 259 260 firstaddr += NBPG; 261 for (x = i386_btod(VM_MIN_KERNEL_ADDRESS); 262 x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) { 263 struct pde *pde; 264 pde = kernel_pmap->pm_pdir + x; 265 *(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW; 266 } 267#else 268 kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD); 269#endif 270 271 272 simple_lock_init(&kernel_pmap->pm_lock); 273 kernel_pmap->pm_count = 1; 274 275#if BSDVM_COMPAT 276 /* 277 * Allocate all the submaps we need 278 */ 279#define SYSMAP(c, p, v, n) \ 280 v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n); 281 282 va = virtual_avail; 283 pte = pmap_pte(kernel_pmap, va); 284 285 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 286 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 287 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 288 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 289 virtual_avail = va; 290#endif 291 /* 292 * reserve special hunk of memory for use by bus dma as a bounce 293 * buffer (contiguous virtual *and* physical memory). 294 * do it from firstaddr -> firstaddr+8 pages. note that 295 * avail_start was bumped up 8 pages, above, to accomodate this. 296 */ 297 { 298 extern vm_offset_t isaphysmem; 299 300 isaphysmem = va; 301 virtual_avail = pmap_map(va, firstaddr, firstaddr + 8*NBPG, 302 VM_PROT_ALL); 303 } 304 305 *(int *)PTD = 0; 306 load_cr3(rcr3()); 307 308} 309 310/* 311 * Initialize the pmap module. 312 * Called by vm_init, to initialize any structures that the pmap 313 * system needs to map virtual memory. 314 */ 315void 316pmap_init(phys_start, phys_end) 317 vm_offset_t phys_start, phys_end; 318{ 319 vm_offset_t addr, addr2; 320 vm_size_t npg, s; 321 int rv; 322 extern int KPTphys; 323 324#ifdef DEBUG 325 if (pmapdebug & PDB_FOLLOW) 326 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 327#endif 328 /* 329 * Now that kernel map has been allocated, we can mark as 330 * unavailable regions which we have mapped in locore. 331 */ 332 addr = atdevbase; 333 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, 334 &addr, (0x100000-0xa0000), FALSE); 335 336 addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */; 337 vm_object_reference(kernel_object); 338 (void) vm_map_find(kernel_map, kernel_object, addr, 339 &addr, 2*NBPG, FALSE); 340 341 /* 342 * Allocate memory for random pmap data structures. Includes the 343 * pv_head_table and pmap_attributes. 344 */ 345 npg = atop(phys_end - phys_start); 346 s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg); 347 s = round_page(s); 348 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 349 pv_table = (pv_entry_t) addr; 350 addr += sizeof(struct pv_entry) * npg; 351 pmap_attributes = (char *) addr; 352#ifdef DEBUG 353 if (pmapdebug & PDB_INIT) 354 printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n", 355 s, npg, pv_table, pmap_attributes); 356#endif 357 358 /* 359 * Now it is safe to enable pv_table recording. 360 */ 361 vm_first_phys = phys_start; 362 vm_last_phys = phys_end; 363 pmap_initialized = TRUE; 364} 365 366/* 367 * Used to map a range of physical addresses into kernel 368 * virtual address space. 369 * 370 * For now, VM is already on, we only need to map the 371 * specified memory. 372 */ 373vm_offset_t 374pmap_map(virt, start, end, prot) 375 vm_offset_t virt; 376 vm_offset_t start; 377 vm_offset_t end; 378 int prot; 379{ 380#ifdef DEBUG 381 if (pmapdebug & PDB_FOLLOW) 382 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 383#endif 384 while (start < end) { 385 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 386 virt += PAGE_SIZE; 387 start += PAGE_SIZE; 388 } 389 return(virt); 390} 391 392/* 393 * Create and return a physical map. 394 * 395 * If the size specified for the map 396 * is zero, the map is an actual physical 397 * map, and may be referenced by the 398 * hardware. 399 * 400 * If the size specified is non-zero, 401 * the map will be used in software only, and 402 * is bounded by that size. 403 * 404 * [ just allocate a ptd and mark it uninitialize -- should we track 405 * with a table which process has which ptd? -wfj ] 406 */ 407 408pmap_t 409pmap_create(size) 410 vm_size_t size; 411{ 412 register pmap_t pmap; 413 414#ifdef DEBUG 415 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 416 printf("pmap_create(%x)\n", size); 417#endif 418 /* 419 * Software use map does not need a pmap 420 */ 421 if (size) 422 return(NULL); 423 424 /* XXX: is it ok to wait here? */ 425 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 426#ifdef notifwewait 427 if (pmap == NULL) 428 panic("pmap_create: cannot allocate a pmap"); 429#endif 430 bzero(pmap, sizeof(*pmap)); 431 pmap_pinit(pmap); 432 return (pmap); 433} 434 435/* 436 * Initialize a preallocated and zeroed pmap structure, 437 * such as one in a vmspace structure. 438 */ 439void 440pmap_pinit(pmap) 441 register struct pmap *pmap; 442{ 443 444#ifdef DEBUG 445 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 446 pg("pmap_pinit(%x)\n", pmap); 447#endif 448 449 /* 450 * No need to allocate page table space yet but we do need a 451 * valid page directory table. 452 */ 453 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG); 454 455 /* wire in kernel global address entries */ 456 bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST, 457 (KPTDI_LAST-KPTDI_FIRST+1)*4); 458 459 /* install self-referential address mapping entry */ 460 *(int *)(pmap->pm_pdir+PTDPTDI) = 461 (int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_KW; 462 463 pmap->pm_count = 1; 464 simple_lock_init(&pmap->pm_lock); 465} 466 467/* 468 * Retire the given physical map from service. 469 * Should only be called if the map contains 470 * no valid mappings. 471 */ 472void 473pmap_destroy(pmap) 474 register pmap_t pmap; 475{ 476 int count; 477 478#ifdef DEBUG 479 if (pmapdebug & PDB_FOLLOW) 480 printf("pmap_destroy(%x)\n", pmap); 481#endif 482 if (pmap == NULL) 483 return; 484 485 simple_lock(&pmap->pm_lock); 486 count = --pmap->pm_count; 487 simple_unlock(&pmap->pm_lock); 488 if (count == 0) { 489 pmap_release(pmap); 490 free((caddr_t)pmap, M_VMPMAP); 491 } 492} 493 494/* 495 * Release any resources held by the given physical map. 496 * Called when a pmap initialized by pmap_pinit is being released. 497 * Should only be called if the map contains no valid mappings. 498 */ 499void 500pmap_release(pmap) 501 register struct pmap *pmap; 502{ 503 504#ifdef DEBUG 505 if (pmapdebug & PDB_FOLLOW) 506 pg("pmap_release(%x)\n", pmap); 507#endif 508#ifdef notdef /* DIAGNOSTIC */ 509 /* count would be 0 from pmap_destroy... */ 510 simple_lock(&pmap->pm_lock); 511 if (pmap->pm_count != 1) 512 panic("pmap_release count"); 513#endif 514 kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG); 515} 516 517/* 518 * Add a reference to the specified pmap. 519 */ 520void 521pmap_reference(pmap) 522 pmap_t pmap; 523{ 524#ifdef DEBUG 525 if (pmapdebug & PDB_FOLLOW) 526 printf("pmap_reference(%x)", pmap); 527#endif 528 if (pmap != NULL) { 529 simple_lock(&pmap->pm_lock); 530 pmap->pm_count++; 531 simple_unlock(&pmap->pm_lock); 532 } 533} 534 535/* 536 * Remove the given range of addresses from the specified map. 537 * 538 * It is assumed that the start and end are properly 539 * rounded to the page size. 540 */ 541void 542pmap_remove(pmap, sva, eva) 543 struct pmap *pmap; 544 register vm_offset_t sva; 545 register vm_offset_t eva; 546{ 547 register pt_entry_t *ptp,*ptq; 548 vm_offset_t va; 549 vm_offset_t pa; 550 pt_entry_t *pte; 551 pv_entry_t pv, npv; 552 int ix; 553 int s, bits; 554#ifdef DEBUG 555 pt_entry_t opte; 556 557 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 558 pg("pmap_remove(%x, %x, %x)", pmap, sva, eva); 559#endif 560 561 if (pmap == NULL) 562 return; 563 564 /* are we current address space or kernel? */ 565 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 566 || pmap == kernel_pmap) 567 ptp=PTmap; 568 569 /* otherwise, we are alternate address space */ 570 else { 571 if (pmap->pm_pdir[PTDPTDI].pd_pfnum 572 != APTDpde.pd_pfnum) { 573 APTDpde = pmap->pm_pdir[PTDPTDI]; 574 tlbflush(); 575 } 576 ptp=APTmap; 577 } 578#ifdef DEBUG 579 remove_stats.calls++; 580#endif 581 582 /* this is essential since we must check the PDE(sva) for precense */ 583 while (sva <= eva && !pmap_pde_v(pmap_pde(pmap, sva))) 584 sva = (sva & PD_MASK) + (1<<PD_SHIFT); 585 sva = i386_btop(sva); 586 eva = i386_btop(eva); 587 588 for (; sva < eva; sva++) { 589 /* 590 * Weed out invalid mappings. 591 * Note: we assume that the page directory table is 592 * always allocated, and in kernel virtual. 593 */ 594 ptq=ptp+sva; 595 while((sva & 0x3ff) && !pmap_pte_pa(ptq)) 596 { 597 if(++sva >= eva) 598 return; 599 ptq++; 600 } 601 602 603 if(!(sva & 0x3ff)) /* Only check once in a while */ 604 { 605 if (!pmap_pde_v(pmap_pde(pmap, i386_ptob(sva)))) 606 { 607 /* We can race ahead here, straight to next pde.. */ 608 sva = (sva & 0xffc00) + (1<<10) -1 ; 609 continue; 610 } 611 } 612 if(!pmap_pte_pa(ptp+sva)) 613 continue; 614 615 pte = ptp + sva; 616 pa = pmap_pte_pa(pte); 617 va = i386_ptob(sva); 618#ifdef DEBUG 619 opte = *pte; 620 remove_stats.removes++; 621#endif 622 /* 623 * Update statistics 624 */ 625 if (pmap_pte_w(pte)) 626 pmap->pm_stats.wired_count--; 627 pmap->pm_stats.resident_count--; 628 629 /* 630 * Invalidate the PTEs. 631 * XXX: should cluster them up and invalidate as many 632 * as possible at once. 633 */ 634#ifdef DEBUG 635 if (pmapdebug & PDB_REMOVE) 636 printf("remove: inv %x ptes at %x(%x) ", 637 i386pagesperpage, pte, *(int *)pte); 638#endif 639 bits = ix = 0; 640 do { 641 bits |= *(int *)pte & (PG_U|PG_M); 642 *(int *)pte++ = 0; 643 /*TBIS(va + ix * I386_PAGE_SIZE);*/ 644 } while (++ix != i386pagesperpage); 645 if (curproc && pmap == &curproc->p_vmspace->vm_pmap) 646 pmap_activate(pmap, (struct pcb *)curproc->p_addr); 647 /* are we current address space or kernel? */ 648 /*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 649 || pmap == kernel_pmap) 650 load_cr3(curpcb->pcb_ptd);*/ 651 tlbflush(); 652 653#ifdef needednotdone 654reduce wiring count on page table pages as references drop 655#endif 656 657 /* 658 * Remove from the PV table (raise IPL since we 659 * may be called at interrupt time). 660 */ 661 if (pa < vm_first_phys || pa >= vm_last_phys) 662 continue; 663 pv = pa_to_pvh(pa); 664 s = splimp(); 665 /* 666 * If it is the first entry on the list, it is actually 667 * in the header and we must copy the following entry up 668 * to the header. Otherwise we must search the list for 669 * the entry. In either case we free the now unused entry. 670 */ 671 if (pmap == pv->pv_pmap && va == pv->pv_va) { 672 npv = pv->pv_next; 673 if (npv) { 674 *pv = *npv; 675 free((caddr_t)npv, M_VMPVENT); 676 } else 677 pv->pv_pmap = NULL; 678#ifdef DEBUG 679 remove_stats.pvfirst++; 680#endif 681 } else { 682 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 683#ifdef DEBUG 684 remove_stats.pvsearch++; 685#endif 686 if (pmap == npv->pv_pmap && va == npv->pv_va) 687 break; 688 pv = npv; 689 } 690#ifdef DEBUG 691 if (npv == NULL) 692 panic("pmap_remove: PA not in pv_tab"); 693#endif 694 pv->pv_next = npv->pv_next; 695 free((caddr_t)npv, M_VMPVENT); 696 pv = pa_to_pvh(pa); 697 } 698 699#ifdef notdef 700[tally number of pagetable pages, if sharing of ptpages adjust here] 701#endif 702 /* 703 * Update saved attributes for managed page 704 */ 705 pmap_attributes[pa_index(pa)] |= bits; 706 splx(s); 707 } 708#ifdef notdef 709[cache and tlb flushing, if needed] 710#endif 711} 712 713/* 714 * Routine: pmap_remove_all 715 * Function: 716 * Removes this physical page from 717 * all physical maps in which it resides. 718 * Reflects back modify bits to the pager. 719 */ 720void 721pmap_remove_all(pa) 722 vm_offset_t pa; 723{ 724 register pv_entry_t pv; 725 int s; 726 727#ifdef DEBUG 728 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 729 printf("pmap_remove_all(%x)", pa); 730 /*pmap_pvdump(pa);*/ 731#endif 732 /* 733 * Not one of ours 734 */ 735 if (pa < vm_first_phys || pa >= vm_last_phys) 736 return; 737 738 pv = pa_to_pvh(pa); 739 s = splimp(); 740 /* 741 * Do it the easy way for now 742 */ 743 while (pv->pv_pmap != NULL) { 744#ifdef DEBUG 745 if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) || 746 pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa) 747 panic("pmap_remove_all: bad mapping"); 748#endif 749 pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); 750 } 751 splx(s); 752} 753 754/* 755 * Routine: pmap_copy_on_write 756 * Function: 757 * Remove write privileges from all 758 * physical maps for this physical page. 759 */ 760void 761pmap_copy_on_write(pa) 762 vm_offset_t pa; 763{ 764#ifdef DEBUG 765 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 766 printf("pmap_copy_on_write(%x)", pa); 767#endif 768 pmap_changebit(pa, PG_RO, TRUE); 769} 770 771/* 772 * Set the physical protection on the 773 * specified range of this map as requested. 774 */ 775void 776pmap_protect(pmap, sva, eva, prot) 777 register pmap_t pmap; 778 vm_offset_t sva, eva; 779 vm_prot_t prot; 780{ 781 register pt_entry_t *pte; 782 register vm_offset_t va; 783 register int ix; 784 int i386prot; 785 boolean_t firstpage = TRUE; 786 register pt_entry_t *ptp; 787 788#ifdef DEBUG 789 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 790 printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot); 791#endif 792 if (pmap == NULL) 793 return; 794 795 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 796 pmap_remove(pmap, sva, eva); 797 return; 798 } 799 if (prot & VM_PROT_WRITE) 800 return; 801 802 /* are we current address space or kernel? */ 803 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 804 || pmap == kernel_pmap) 805 ptp=PTmap; 806 807 /* otherwise, we are alternate address space */ 808 else { 809 if (pmap->pm_pdir[PTDPTDI].pd_pfnum 810 != APTDpde.pd_pfnum) { 811 APTDpde = pmap->pm_pdir[PTDPTDI]; 812 tlbflush(); 813 } 814 ptp=APTmap; 815 } 816 for (va = sva; va < eva; va += PAGE_SIZE) { 817 /* 818 * Page table page is not allocated. 819 * Skip it, we don't want to force allocation 820 * of unnecessary PTE pages just to set the protection. 821 */ 822 if (!pmap_pde_v(pmap_pde(pmap, va))) { 823 /* XXX: avoid address wrap around */ 824 if (va >= i386_trunc_pdr((vm_offset_t)-1)) 825 break; 826 va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE; 827 continue; 828 } 829 830 pte = ptp + i386_btop(va); 831 832 /* 833 * Page not valid. Again, skip it. 834 * Should we do this? Or set protection anyway? 835 */ 836 if (!pmap_pte_v(pte)) 837 continue; 838 839 ix = 0; 840 i386prot = pte_prot(pmap, prot); 841 if(va < UPT_MAX_ADDRESS) 842 i386prot |= 2 /*PG_u*/; 843 do { 844 /* clear VAC here if PG_RO? */ 845 pmap_pte_set_prot(pte++, i386prot); 846 /*TBIS(va + ix * I386_PAGE_SIZE);*/ 847 } while (++ix != i386pagesperpage); 848 } 849 if (curproc && pmap == &curproc->p_vmspace->vm_pmap) 850 pmap_activate(pmap, (struct pcb *)curproc->p_addr); 851} 852 853/* 854 * Insert the given physical page (p) at 855 * the specified virtual address (v) in the 856 * target physical map with the protection requested. 857 * 858 * If specified, the page will be wired down, meaning 859 * that the related pte can not be reclaimed. 860 * 861 * NB: This is the only routine which MAY NOT lazy-evaluate 862 * or lose information. That is, this routine must actually 863 * insert this page into the given map NOW. 864 */ 865void 866pmap_enter(pmap, va, pa, prot, wired) 867 register pmap_t pmap; 868 vm_offset_t va; 869 register vm_offset_t pa; 870 vm_prot_t prot; 871 boolean_t wired; 872{ 873 register pt_entry_t *pte; 874 register int npte, ix; 875 vm_offset_t opa; 876 boolean_t cacheable = TRUE; 877 boolean_t checkpv = TRUE; 878 879#ifdef DEBUG 880 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 881 printf("pmap_enter(%x, %x, %x, %x, %x)", 882 pmap, va, pa, prot, wired); 883#endif 884 if (pmap == NULL) 885 return; 886 887 if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig"); 888 /* also, should not muck with PTD va! */ 889 890#ifdef DEBUG 891 if (pmap == kernel_pmap) 892 enter_stats.kernel++; 893 else 894 enter_stats.user++; 895#endif 896 897 /* 898 * Page Directory table entry not valid, we need a new PT page 899 */ 900 if (!pmap_pde_v(pmap_pde(pmap, va))) { 901 printf("ptdi %x\n", pmap->pm_pdir[PTDPTDI]); 902 panic("Page Table Directory Invalid (ptdi)"); 903 } 904 905 pte = pmap_pte(pmap, va); 906 opa = pmap_pte_pa(pte); 907#ifdef DEBUG 908 if (pmapdebug & PDB_ENTER) 909 printf("enter: pte %x, *pte %x ", pte, *(int *)pte); 910#endif 911 912 /* 913 * Mapping has not changed, must be protection or wiring change. 914 */ 915 if (opa == pa) { 916#ifdef DEBUG 917 enter_stats.pwchange++; 918#endif 919 /* 920 * Wiring change, just update stats. 921 * We don't worry about wiring PT pages as they remain 922 * resident as long as there are valid mappings in them. 923 * Hence, if a user page is wired, the PT page will be also. 924 */ 925 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 926#ifdef DEBUG 927 if (pmapdebug & PDB_ENTER) 928 pg("enter: wiring change -> %x ", wired); 929#endif 930 if (wired) 931 pmap->pm_stats.wired_count++; 932 else 933 pmap->pm_stats.wired_count--; 934#ifdef DEBUG 935 enter_stats.wchange++; 936#endif 937 } 938 goto validate; 939 } 940 941 /* 942 * Mapping has changed, invalidate old range and fall through to 943 * handle validating new mapping. 944 */ 945 if (opa) { 946#ifdef DEBUG 947 if (pmapdebug & PDB_ENTER) 948 printf("enter: removing old mapping %x pa %x ", va, opa); 949#endif 950 pmap_remove(pmap, va, va + PAGE_SIZE); 951#ifdef DEBUG 952 enter_stats.mchange++; 953#endif 954 } 955 956 /* 957 * Enter on the PV list if part of our managed memory 958 * Note that we raise IPL while manipulating pv_table 959 * since pmap_enter can be called at interrupt time. 960 */ 961 if (pa >= vm_first_phys && pa < vm_last_phys) { 962 register pv_entry_t pv, npv; 963 int s; 964 965#ifdef DEBUG 966 enter_stats.managed++; 967#endif 968 pv = pa_to_pvh(pa); 969 s = splimp(); 970#ifdef DEBUG 971 if (pmapdebug & PDB_ENTER) 972 printf("enter: pv at %x: %x/%x/%x ", 973 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 974#endif 975 /* 976 * No entries yet, use header as the first entry 977 */ 978 if (pv->pv_pmap == NULL) { 979#ifdef DEBUG 980 enter_stats.firstpv++; 981#endif 982 pv->pv_va = va; 983 pv->pv_pmap = pmap; 984 pv->pv_next = NULL; 985 pv->pv_flags = 0; 986 } 987 /* 988 * There is at least one other VA mapping this page. 989 * Place this entry after the header. 990 */ 991 else { 992 /*printf("second time: ");*/ 993#ifdef DEBUG 994 for (npv = pv; npv; npv = npv->pv_next) 995 if (pmap == npv->pv_pmap && va == npv->pv_va) 996 panic("pmap_enter: already in pv_tab"); 997#endif 998 npv = (pv_entry_t) 999 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 1000 npv->pv_va = va; 1001 npv->pv_pmap = pmap; 1002 npv->pv_next = pv->pv_next; 1003 pv->pv_next = npv; 1004#ifdef DEBUG 1005 if (!npv->pv_next) 1006 enter_stats.secondpv++; 1007#endif 1008 } 1009 splx(s); 1010 } 1011 /* 1012 * Assumption: if it is not part of our managed memory 1013 * then it must be device memory which may be volitile. 1014 */ 1015 if (pmap_initialized) { 1016 checkpv = cacheable = FALSE; 1017#ifdef DEBUG 1018 enter_stats.unmanaged++; 1019#endif 1020 } 1021 1022 /* 1023 * Increment counters 1024 */ 1025 pmap->pm_stats.resident_count++; 1026 if (wired) 1027 pmap->pm_stats.wired_count++; 1028 1029validate: 1030 /* 1031 * Now validate mapping with desired protection/wiring. 1032 * Assume uniform modified and referenced status for all 1033 * I386 pages in a MACH page. 1034 */ 1035 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 1036 npte |= (*(int *)pte & (PG_M|PG_U)); 1037 if (wired) 1038 npte |= PG_W; 1039 if(va < UPT_MIN_ADDRESS) 1040 npte |= PG_u; 1041 else if(va < UPT_MAX_ADDRESS) 1042 npte |= PG_u | PG_RW; 1043#ifdef DEBUG 1044 if (pmapdebug & PDB_ENTER) 1045 printf("enter: new pte value %x ", npte); 1046#endif 1047 ix = 0; 1048 do { 1049 *(int *)pte++ = npte; 1050 /*TBIS(va);*/ 1051 npte += I386_PAGE_SIZE; 1052 va += I386_PAGE_SIZE; 1053 } while (++ix != i386pagesperpage); 1054 pte--; 1055#ifdef DEBUGx 1056cache, tlb flushes 1057#endif 1058/*pads(pmap);*/ 1059 /*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/ 1060 tlbflush(); 1061} 1062 1063/* 1064 * pmap_page_protect: 1065 * 1066 * Lower the permission for all mappings to a given page. 1067 */ 1068void 1069pmap_page_protect(phys, prot) 1070 vm_offset_t phys; 1071 vm_prot_t prot; 1072{ 1073 switch (prot) { 1074 case VM_PROT_READ: 1075 case VM_PROT_READ|VM_PROT_EXECUTE: 1076 pmap_copy_on_write(phys); 1077 break; 1078 case VM_PROT_ALL: 1079 break; 1080 default: 1081 pmap_remove_all(phys); 1082 break; 1083 } 1084} 1085 1086/* 1087 * Routine: pmap_change_wiring 1088 * Function: Change the wiring attribute for a map/virtual-address 1089 * pair. 1090 * In/out conditions: 1091 * The mapping must already exist in the pmap. 1092 */ 1093void 1094pmap_change_wiring(pmap, va, wired) 1095 register pmap_t pmap; 1096 vm_offset_t va; 1097 boolean_t wired; 1098{ 1099 register pt_entry_t *pte; 1100 register int ix; 1101 1102#ifdef DEBUG 1103 if (pmapdebug & PDB_FOLLOW) 1104 printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired); 1105#endif 1106 if (pmap == NULL) 1107 return; 1108 1109 pte = pmap_pte(pmap, va); 1110#ifdef DEBUG 1111 /* 1112 * Page table page is not allocated. 1113 * Should this ever happen? Ignore it for now, 1114 * we don't want to force allocation of unnecessary PTE pages. 1115 */ 1116 if (!pmap_pde_v(pmap_pde(pmap, va))) { 1117 if (pmapdebug & PDB_PARANOIA) 1118 pg("pmap_change_wiring: invalid PDE for %x ", va); 1119 return; 1120 } 1121 /* 1122 * Page not valid. Should this ever happen? 1123 * Just continue and change wiring anyway. 1124 */ 1125 if (!pmap_pte_v(pte)) { 1126 if (pmapdebug & PDB_PARANOIA) 1127 pg("pmap_change_wiring: invalid PTE for %x ", va); 1128 } 1129#endif 1130 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1131 if (wired) 1132 pmap->pm_stats.wired_count++; 1133 else 1134 pmap->pm_stats.wired_count--; 1135 } 1136 /* 1137 * Wiring is not a hardware characteristic so there is no need 1138 * to invalidate TLB. 1139 */ 1140 ix = 0; 1141 do { 1142 pmap_pte_set_w(pte++, wired); 1143 } while (++ix != i386pagesperpage); 1144} 1145 1146/* 1147 * Routine: pmap_pte 1148 * Function: 1149 * Extract the page table entry associated 1150 * with the given map/virtual_address pair. 1151 * [ what about induced faults -wfj] 1152 */ 1153 1154struct pte *pmap_pte(pmap, va) 1155 register pmap_t pmap; 1156 vm_offset_t va; 1157{ 1158 1159#ifdef DEBUGx 1160 if (pmapdebug & PDB_FOLLOW) 1161 printf("pmap_pte(%x, %x) ->\n", pmap, va); 1162#endif 1163 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { 1164 1165 /* are we current address space or kernel? */ 1166 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 1167 || pmap == kernel_pmap) 1168 return ((struct pte *) vtopte(va)); 1169 1170 /* otherwise, we are alternate address space */ 1171 else { 1172 if (pmap->pm_pdir[PTDPTDI].pd_pfnum 1173 != APTDpde.pd_pfnum) { 1174 APTDpde = pmap->pm_pdir[PTDPTDI]; 1175 tlbflush(); 1176 } 1177 return((struct pte *) avtopte(va)); 1178 } 1179 } 1180 return(0); 1181} 1182 1183/* 1184 * Routine: pmap_extract 1185 * Function: 1186 * Extract the physical page address associated 1187 * with the given map/virtual_address pair. 1188 */ 1189 1190vm_offset_t 1191pmap_extract(pmap, va) 1192 register pmap_t pmap; 1193 vm_offset_t va; 1194{ 1195 register vm_offset_t pa; 1196 1197#ifdef DEBUGx 1198 if (pmapdebug & PDB_FOLLOW) 1199 pg("pmap_extract(%x, %x) -> ", pmap, va); 1200#endif 1201 pa = 0; 1202 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { 1203 pa = *(int *) pmap_pte(pmap, va); 1204 } 1205 if (pa) 1206 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 1207#ifdef DEBUGx 1208 if (pmapdebug & PDB_FOLLOW) 1209 printf("%x\n", pa); 1210#endif 1211 return(pa); 1212} 1213 1214/* 1215 * Copy the range specified by src_addr/len 1216 * from the source map to the range dst_addr/len 1217 * in the destination map. 1218 * 1219 * This routine is only advisory and need not do anything. 1220 */ 1221void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1222 pmap_t dst_pmap; 1223 pmap_t src_pmap; 1224 vm_offset_t dst_addr; 1225 vm_size_t len; 1226 vm_offset_t src_addr; 1227{ 1228#ifdef DEBUG 1229 if (pmapdebug & PDB_FOLLOW) 1230 printf("pmap_copy(%x, %x, %x, %x, %x)", 1231 dst_pmap, src_pmap, dst_addr, len, src_addr); 1232#endif 1233} 1234 1235/* 1236 * Require that all active physical maps contain no 1237 * incorrect entries NOW. [This update includes 1238 * forcing updates of any address map caching.] 1239 * 1240 * Generally used to insure that a thread about 1241 * to run will see a semantically correct world. 1242 */ 1243void pmap_update() 1244{ 1245#ifdef DEBUG 1246 if (pmapdebug & PDB_FOLLOW) 1247 printf("pmap_update()"); 1248#endif 1249 tlbflush(); 1250} 1251 1252/* 1253 * Routine: pmap_collect 1254 * Function: 1255 * Garbage collects the physical map system for 1256 * pages which are no longer used. 1257 * Success need not be guaranteed -- that is, there 1258 * may well be pages which are not referenced, but 1259 * others may be collected. 1260 * Usage: 1261 * Called by the pageout daemon when pages are scarce. 1262 * [ needs to be written -wfj ] 1263 */ 1264void 1265pmap_collect(pmap) 1266 pmap_t pmap; 1267{ 1268 register vm_offset_t pa; 1269 register pv_entry_t pv; 1270 register int *pte; 1271 vm_offset_t kpa; 1272 int s; 1273 1274#ifdef DEBUG 1275 int *pde; 1276 int opmapdebug; 1277 printf("pmap_collect(%x) ", pmap); 1278#endif 1279 if (pmap != kernel_pmap) 1280 return; 1281 1282} 1283 1284/* [ macro again?, should I force kstack into user map here? -wfj ] */ 1285void 1286pmap_activate(pmap, pcbp) 1287 register pmap_t pmap; 1288 struct pcb *pcbp; 1289{ 1290int x; 1291#ifdef DEBUG 1292 if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB)) 1293 pg("pmap_activate(%x, %x) ", pmap, pcbp); 1294#endif 1295 PMAP_ACTIVATE(pmap, pcbp); 1296/*printf("pde "); 1297for(x=0x3f6; x < 0x3fA; x++) 1298 printf("%x ", pmap->pm_pdir[x]);*/ 1299/*pads(pmap);*/ 1300/*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/ 1301} 1302 1303/* 1304 * Routine: pmap_kernel 1305 * Function: 1306 * Returns the physical map handle for the kernel. 1307 */ 1308pmap_t 1309pmap_kernel() 1310{ 1311 return (kernel_pmap); 1312} 1313 1314/* 1315 * pmap_zero_page zeros the specified (machine independent) 1316 * page by mapping the page into virtual memory and using 1317 * bzero to clear its contents, one machine dependent page 1318 * at a time. 1319 */ 1320pmap_zero_page(phys) 1321 register vm_offset_t phys; 1322{ 1323 register int ix; 1324 1325#ifdef DEBUG 1326 if (pmapdebug & PDB_FOLLOW) 1327 printf("pmap_zero_page(%x)", phys); 1328#endif 1329 phys >>= PG_SHIFT; 1330 ix = 0; 1331 do { 1332 clearseg(phys++); 1333 } while (++ix != i386pagesperpage); 1334} 1335 1336/* 1337 * pmap_copy_page copies the specified (machine independent) 1338 * page by mapping the page into virtual memory and using 1339 * bcopy to copy the page, one machine dependent page at a 1340 * time. 1341 */ 1342pmap_copy_page(src, dst) 1343 register vm_offset_t src, dst; 1344{ 1345 register int ix; 1346 1347#ifdef DEBUG 1348 if (pmapdebug & PDB_FOLLOW) 1349 printf("pmap_copy_page(%x, %x)", src, dst); 1350#endif 1351 src >>= PG_SHIFT; 1352 dst >>= PG_SHIFT; 1353 ix = 0; 1354 do { 1355 physcopyseg(src++, dst++); 1356 } while (++ix != i386pagesperpage); 1357} 1358 1359 1360/* 1361 * Routine: pmap_pageable 1362 * Function: 1363 * Make the specified pages (by pmap, offset) 1364 * pageable (or not) as requested. 1365 * 1366 * A page which is not pageable may not take 1367 * a fault; therefore, its page table entry 1368 * must remain valid for the duration. 1369 * 1370 * This routine is merely advisory; pmap_enter 1371 * will specify that these pages are to be wired 1372 * down (or not) as appropriate. 1373 */ 1374pmap_pageable(pmap, sva, eva, pageable) 1375 pmap_t pmap; 1376 vm_offset_t sva, eva; 1377 boolean_t pageable; 1378{ 1379#ifdef DEBUG 1380 if (pmapdebug & PDB_FOLLOW) 1381 printf("pmap_pageable(%x, %x, %x, %x)", 1382 pmap, sva, eva, pageable); 1383#endif 1384 /* 1385 * If we are making a PT page pageable then all valid 1386 * mappings must be gone from that page. Hence it should 1387 * be all zeros and there is no need to clean it. 1388 * Assumptions: 1389 * - we are called with only one page at a time 1390 * - PT pages have only one pv_table entry 1391 */ 1392 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { 1393 register pv_entry_t pv; 1394 register vm_offset_t pa; 1395 1396#ifdef DEBUG 1397 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) 1398 printf("pmap_pageable(%x, %x, %x, %x)", 1399 pmap, sva, eva, pageable); 1400#endif 1401 /*if (!pmap_pde_v(pmap_pde(pmap, sva))) 1402 return;*/ 1403 if(pmap_pte(pmap, sva) == 0) 1404 return; 1405 pa = pmap_pte_pa(pmap_pte(pmap, sva)); 1406 if (pa < vm_first_phys || pa >= vm_last_phys) 1407 return; 1408 pv = pa_to_pvh(pa); 1409 /*if (!ispt(pv->pv_va)) 1410 return;*/ 1411#ifdef DEBUG 1412 if (pv->pv_va != sva || pv->pv_next) { 1413 pg("pmap_pageable: bad PT page va %x next %x\n", 1414 pv->pv_va, pv->pv_next); 1415 return; 1416 } 1417#endif 1418 /* 1419 * Mark it unmodified to avoid pageout 1420 */ 1421 pmap_clear_modify(pa); 1422#ifdef needsomethinglikethis 1423 if (pmapdebug & PDB_PTPAGE) 1424 pg("pmap_pageable: PT page %x(%x) unmodified\n", 1425 sva, *(int *)pmap_pte(pmap, sva)); 1426 if (pmapdebug & PDB_WIRING) 1427 pmap_check_wiring("pageable", sva); 1428#endif 1429 } 1430} 1431 1432/* 1433 * Clear the modify bits on the specified physical page. 1434 */ 1435 1436void 1437pmap_clear_modify(pa) 1438 vm_offset_t pa; 1439{ 1440#ifdef DEBUG 1441 if (pmapdebug & PDB_FOLLOW) 1442 printf("pmap_clear_modify(%x)", pa); 1443#endif 1444 pmap_changebit(pa, PG_M, FALSE); 1445} 1446 1447/* 1448 * pmap_clear_reference: 1449 * 1450 * Clear the reference bit on the specified physical page. 1451 */ 1452 1453void pmap_clear_reference(pa) 1454 vm_offset_t pa; 1455{ 1456#ifdef DEBUG 1457 if (pmapdebug & PDB_FOLLOW) 1458 printf("pmap_clear_reference(%x)", pa); 1459#endif 1460 pmap_changebit(pa, PG_U, FALSE); 1461} 1462 1463/* 1464 * pmap_is_referenced: 1465 * 1466 * Return whether or not the specified physical page is referenced 1467 * by any physical maps. 1468 */ 1469 1470boolean_t 1471pmap_is_referenced(pa) 1472 vm_offset_t pa; 1473{ 1474#ifdef DEBUG 1475 if (pmapdebug & PDB_FOLLOW) { 1476 boolean_t rv = pmap_testbit(pa, PG_U); 1477 printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]); 1478 return(rv); 1479 } 1480#endif 1481 return(pmap_testbit(pa, PG_U)); 1482} 1483 1484/* 1485 * pmap_is_modified: 1486 * 1487 * Return whether or not the specified physical page is modified 1488 * by any physical maps. 1489 */ 1490 1491boolean_t 1492pmap_is_modified(pa) 1493 vm_offset_t pa; 1494{ 1495#ifdef DEBUG 1496 if (pmapdebug & PDB_FOLLOW) { 1497 boolean_t rv = pmap_testbit(pa, PG_M); 1498 printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]); 1499 return(rv); 1500 } 1501#endif 1502 return(pmap_testbit(pa, PG_M)); 1503} 1504 1505vm_offset_t 1506pmap_phys_address(ppn) 1507 int ppn; 1508{ 1509 return(i386_ptob(ppn)); 1510} 1511 1512/* 1513 * Miscellaneous support routines follow 1514 */ 1515 1516i386_protection_init() 1517{ 1518 register int *kp, prot; 1519 1520 kp = protection_codes; 1521 for (prot = 0; prot < 8; prot++) { 1522 switch (prot) { 1523 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1524 *kp++ = 0; 1525 break; 1526 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1527 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1528 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1529 *kp++ = PG_RO; 1530 break; 1531 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1532 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1533 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1534 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1535 *kp++ = PG_RW; 1536 break; 1537 } 1538 } 1539} 1540 1541boolean_t 1542pmap_testbit(pa, bit) 1543 register vm_offset_t pa; 1544 int bit; 1545{ 1546 register pv_entry_t pv; 1547 register int *pte, ix; 1548 int s; 1549 1550 if (pa < vm_first_phys || pa >= vm_last_phys) 1551 return(FALSE); 1552 1553 pv = pa_to_pvh(pa); 1554 s = splimp(); 1555 /* 1556 * Check saved info first 1557 */ 1558 if (pmap_attributes[pa_index(pa)] & bit) { 1559 splx(s); 1560 return(TRUE); 1561 } 1562 /* 1563 * Not found, check current mappings returning 1564 * immediately if found. 1565 */ 1566 if (pv->pv_pmap != NULL) { 1567 for (; pv; pv = pv->pv_next) { 1568 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); 1569 ix = 0; 1570 do { 1571 if (*pte++ & bit) { 1572 splx(s); 1573 return(TRUE); 1574 } 1575 } while (++ix != i386pagesperpage); 1576 } 1577 } 1578 splx(s); 1579 return(FALSE); 1580} 1581 1582pmap_changebit(pa, bit, setem) 1583 register vm_offset_t pa; 1584 int bit; 1585 boolean_t setem; 1586{ 1587 register pv_entry_t pv; 1588 register int *pte, npte, ix; 1589 vm_offset_t va; 1590 int s; 1591 boolean_t firstpage = TRUE; 1592 1593#ifdef DEBUG 1594 if (pmapdebug & PDB_BITS) 1595 printf("pmap_changebit(%x, %x, %s)", 1596 pa, bit, setem ? "set" : "clear"); 1597#endif 1598 if (pa < vm_first_phys || pa >= vm_last_phys) 1599 return; 1600 1601 pv = pa_to_pvh(pa); 1602 s = splimp(); 1603 /* 1604 * Clear saved attributes (modify, reference) 1605 */ 1606 if (!setem) 1607 pmap_attributes[pa_index(pa)] &= ~bit; 1608 /* 1609 * Loop over all current mappings setting/clearing as appropos 1610 * If setting RO do we need to clear the VAC? 1611 */ 1612 if (pv->pv_pmap != NULL) { 1613#ifdef DEBUG 1614 int toflush = 0; 1615#endif 1616 for (; pv; pv = pv->pv_next) { 1617#ifdef DEBUG 1618 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1; 1619#endif 1620 va = pv->pv_va; 1621 1622 /* 1623 * XXX don't write protect pager mappings 1624 */ 1625 if (bit == PG_RO) { 1626 extern vm_offset_t pager_sva, pager_eva; 1627 1628 if (va >= pager_sva && va < pager_eva) 1629 continue; 1630 } 1631 1632 pte = (int *) pmap_pte(pv->pv_pmap, va); 1633 ix = 0; 1634 do { 1635 if (setem) 1636 npte = *pte | bit; 1637 else 1638 npte = *pte & ~bit; 1639 if (*pte != npte) { 1640 *pte = npte; 1641 /*TBIS(va);*/ 1642 } 1643 va += I386_PAGE_SIZE; 1644 pte++; 1645 } while (++ix != i386pagesperpage); 1646 1647 if (curproc && pv->pv_pmap == &curproc->p_vmspace->vm_pmap) 1648 pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr); 1649 } 1650#ifdef somethinglikethis 1651 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { 1652 if ((pmapvacflush & PVF_TOTAL) || toflush == 3) 1653 DCIA(); 1654 else if (toflush == 2) 1655 DCIS(); 1656 else 1657 DCIU(); 1658 } 1659#endif 1660 } 1661 splx(s); 1662} 1663 1664#ifdef DEBUG 1665pmap_pvdump(pa) 1666 vm_offset_t pa; 1667{ 1668 register pv_entry_t pv; 1669 1670 printf("pa %x", pa); 1671 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 1672 printf(" -> pmap %x, va %x, flags %x", 1673 pv->pv_pmap, pv->pv_va, pv->pv_flags); 1674 pads(pv->pv_pmap); 1675 } 1676 printf(" "); 1677} 1678 1679#ifdef notyet 1680pmap_check_wiring(str, va) 1681 char *str; 1682 vm_offset_t va; 1683{ 1684 vm_map_entry_t entry; 1685 register int count, *pte; 1686 1687 va = trunc_page(va); 1688 if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) || 1689 !pmap_pte_v(pmap_pte(kernel_pmap, va))) 1690 return; 1691 1692 if (!vm_map_lookup_entry(pt_map, va, &entry)) { 1693 pg("wired_check: entry for %x not found\n", va); 1694 return; 1695 } 1696 count = 0; 1697 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) 1698 if (*pte) 1699 count++; 1700 if (entry->wired_count != count) 1701 pg("*%s*: %x: w%d/a%d\n", 1702 str, va, entry->wired_count, count); 1703} 1704#endif 1705 1706/* print address space of pmap*/ 1707pads(pm) pmap_t pm; { 1708 unsigned va, i, j; 1709 struct pte *ptep; 1710 1711 if(pm == kernel_pmap) return; 1712 for (i = 0; i < 1024; i++) 1713 if(pm->pm_pdir[i].pd_v) 1714 for (j = 0; j < 1024 ; j++) { 1715 va = (i<<22)+(j<<12); 1716 if (pm == kernel_pmap && va < 0xfe000000) 1717 continue; 1718 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 1719 continue; 1720 ptep = pmap_pte(pm, va); 1721 if(pmap_pte_v(ptep)) 1722 printf("%x:%x ", va, *(int *)ptep); 1723 } ; 1724 1725} 1726#endif 1727