pmap.c revision 1890
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.28 1994/08/06 09:15:15 davidg Exp $ 43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. 51 * 52 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 53 */ 54/* 55 * Major modifications by John S. Dyson primarily to support 56 * pageable page tables, eliminating pmap_attributes, 57 * discontiguous memory pages, and using more efficient string 58 * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, 59 * general clean-up and efficiency mods. 60 */ 61 62/* 63 * Manages physical address maps. 64 * 65 * In addition to hardware address maps, this 66 * module is called upon to provide software-use-only 67 * maps which may or may not be stored in the same 68 * form as hardware maps. These pseudo-maps are 69 * used to store intermediate results from copy 70 * operations to and from address spaces. 71 * 72 * Since the information managed by this module is 73 * also stored by the logical address mapping module, 74 * this module may throw away valid virtual-to-physical 75 * mappings at almost any time. However, invalidations 76 * of virtual-to-physical mappings must be done as 77 * requested. 78 * 79 * In order to cope with hardware architectures which 80 * make virtual-to-physical map invalidates expensive, 81 * this module may delay invalidate or reduced protection 82 * operations until such time as they are actually 83 * necessary. This module is given full information as 84 * to which processors are currently using which maps, 85 * and to when physical maps must be made correct. 86 */ 87 88#include <sys/param.h> 89#include <sys/proc.h> 90#include <sys/malloc.h> 91#include <sys/user.h> 92 93#include <vm/vm.h> 94#include <vm/vm_kern.h> 95#include <vm/vm_page.h> 96 97#include <i386/include/cpufunc.h> 98#include <i386/include/cputypes.h> 99 100#include <i386/isa/isa.h> 101 102/* 103 * Allocate various and sundry SYSMAPs used in the days of old VM 104 * and not yet converted. XXX. 105 */ 106#define BSDVM_COMPAT 1 107 108/* 109 * Get PDEs and PTEs for user/kernel address space 110 */ 111#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 112#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 113 114#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 115 116#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 117#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 118#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 119#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) 120#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 121 122#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 123#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 124 125/* 126 * Given a map and a machine independent protection code, 127 * convert to a vax protection code. 128 */ 129#define pte_prot(m, p) (protection_codes[p]) 130int protection_codes[8]; 131 132struct pmap kernel_pmap_store; 133pmap_t kernel_pmap; 134 135vm_offset_t phys_avail[6]; /* 2 entries + 1 null */ 136vm_offset_t avail_start; /* PA of first available physical page */ 137vm_offset_t avail_end; /* PA of last available physical page */ 138vm_size_t mem_size; /* memory size in bytes */ 139vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 140vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 141int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */ 142boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 143vm_offset_t vm_first_phys, vm_last_phys; 144 145static inline boolean_t pmap_testbit(); 146static inline void pmap_changebit(); 147static inline int pmap_is_managed(); 148static inline void * vm_get_pmap(); 149static inline void vm_put_pmap(); 150static void i386_protection_init(); 151static void pmap_alloc_pv_entry(); 152static inline pv_entry_t get_pv_entry(); 153static inline void pmap_use_pt(); 154static inline void pmap_unuse_pt(); 155 156inline pt_entry_t * pmap_pte(); 157 158extern vm_offset_t clean_sva, clean_eva; 159extern int cpu_class; 160 161#if BSDVM_COMPAT 162#include "msgbuf.h" 163 164/* 165 * All those kernel PT submaps that BSD is so fond of 166 */ 167pt_entry_t *CMAP1, *CMAP2, *ptmmap; 168caddr_t CADDR1, CADDR2, ptvmmap; 169pt_entry_t *msgbufmap; 170struct msgbuf *msgbufp; 171#endif 172 173void init_pv_entries(int) ; 174 175/* 176 * Routine: pmap_pte 177 * Function: 178 * Extract the page table entry associated 179 * with the given map/virtual_address pair. 180 * [ what about induced faults -wfj] 181 */ 182 183inline pt_entry_t * 184const pmap_pte(pmap, va) 185 register pmap_t pmap; 186 vm_offset_t va; 187{ 188 189 if (pmap && *pmap_pde(pmap, va)) { 190 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 191 /* are we current address space or kernel? */ 192 if ( (pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME))) 193 return ((pt_entry_t *) vtopte(va)); 194 /* otherwise, we are alternate address space */ 195 else { 196 if ( frame != ((int) APTDpde & PG_FRAME) ) { 197 APTDpde = pmap->pm_pdir[PTDPTDI]; 198 tlbflush(); 199 } 200 return((pt_entry_t *) avtopte(va)); 201 } 202 } 203 return(0); 204} 205 206/* 207 * Routine: pmap_extract 208 * Function: 209 * Extract the physical page address associated 210 * with the given map/virtual_address pair. 211 */ 212 213vm_offset_t 214pmap_extract(pmap, va) 215 register pmap_t pmap; 216 vm_offset_t va; 217{ 218 pd_entry_t save; 219 vm_offset_t pa; 220 int s; 221 222 if (pmap && *pmap_pde(pmap, va)) { 223 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 224 /* are we current address space or kernel? */ 225 if ( (pmap == kernel_pmap) 226 || (frame == ((int) PTDpde & PG_FRAME)) ) { 227 pa = *(int *) vtopte(va); 228 /* otherwise, we are alternate address space */ 229 } else { 230 if ( frame != ((int) APTDpde & PG_FRAME)) { 231 APTDpde = pmap->pm_pdir[PTDPTDI]; 232 tlbflush(); 233 } 234 pa = *(int *) avtopte(va); 235 } 236 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 237 return pa; 238 } 239 return 0; 240 241} 242 243/* 244 * determine if a page is managed (memory vs. device) 245 */ 246static inline int 247pmap_is_managed(pa) 248 vm_offset_t pa; 249{ 250 int i; 251 252 if (!pmap_initialized) 253 return 0; 254 255 for (i = 0; phys_avail[i + 1]; i += 2) { 256 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) 257 return 1; 258 } 259 return 0; 260} 261 262/* 263 * find the vm_page_t of a pte (only) given va of pte and pmap 264 */ 265inline vm_page_t 266pmap_pte_vm_page(pmap, pt) 267 pmap_t pmap; 268 vm_offset_t pt; 269{ 270 pt = i386_trunc_page( pt); 271 pt = (pt - UPT_MIN_ADDRESS) / NBPG; 272 pt = ((vm_offset_t) pmap->pm_pdir[pt]) & PG_FRAME; 273 return PHYS_TO_VM_PAGE(pt); 274} 275 276/* 277 * Wire a page table page 278 */ 279inline void 280pmap_use_pt(pmap, va) 281 pmap_t pmap; 282 vm_offset_t va; 283{ 284 vm_offset_t pt; 285 286 if (va >= VM_MAX_ADDRESS || !pmap_initialized) 287 return; 288 289 pt = (vm_offset_t) vtopte(va); 290 vm_page_hold( pmap_pte_vm_page(pmap, pt)); 291} 292 293/* 294 * Unwire a page table page 295 */ 296inline void 297pmap_unuse_pt(pmap, va) 298 pmap_t pmap; 299 vm_offset_t va; 300{ 301 vm_offset_t pt; 302 303 if (va >= VM_MAX_ADDRESS || !pmap_initialized) 304 return; 305 306 pt = (vm_offset_t) vtopte(va); 307 vm_page_unhold( pmap_pte_vm_page(pmap, pt)); 308} 309 310/* [ macro again?, should I force kstack into user map here? -wfj ] */ 311void 312pmap_activate(pmap, pcbp) 313 register pmap_t pmap; 314 struct pcb *pcbp; 315{ 316 PMAP_ACTIVATE(pmap, pcbp); 317} 318 319/* 320 * Bootstrap the system enough to run with virtual memory. 321 * Map the kernel's code and data, and allocate the system page table. 322 * 323 * On the I386 this is called after mapping has already been enabled 324 * and just syncs the pmap module with what has already been done. 325 * [We can't call it easily with mapping off since the kernel is not 326 * mapped with PA == VA, hence we would have to relocate every address 327 * from the linked base (virtual) address "KERNBASE" to the actual 328 * (physical) address starting relative to 0] 329 */ 330 331#define DMAPAGES 8 332void 333pmap_bootstrap(firstaddr, loadaddr) 334 vm_offset_t firstaddr; 335 vm_offset_t loadaddr; 336{ 337#if BSDVM_COMPAT 338 vm_offset_t va; 339 pt_entry_t *pte; 340#endif 341 extern int IdlePTD; 342 343 avail_start = firstaddr + DMAPAGES*NBPG; 344 345 virtual_avail = (vm_offset_t) KERNBASE + avail_start; 346 virtual_end = VM_MAX_KERNEL_ADDRESS; 347 i386pagesperpage = PAGE_SIZE / NBPG; 348 349 /* 350 * Initialize protection array. 351 */ 352 i386_protection_init(); 353 354 /* 355 * The kernel's pmap is statically allocated so we don't 356 * have to use pmap_create, which is unlikely to work 357 * correctly at this part of the boot sequence. 358 */ 359 kernel_pmap = &kernel_pmap_store; 360 361 kernel_pmap->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD); 362 363 simple_lock_init(&kernel_pmap->pm_lock); 364 kernel_pmap->pm_count = 1; 365 366#if BSDVM_COMPAT 367 /* 368 * Allocate all the submaps we need 369 */ 370#define SYSMAP(c, p, v, n) \ 371 v = (c)va; va += ((n)*NBPG); p = pte; pte += (n); 372 373 va = virtual_avail; 374 pte = pmap_pte(kernel_pmap, va); 375 376 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 377 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 378 SYSMAP(caddr_t ,ptmmap ,ptvmmap ,1 ) 379 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 380 virtual_avail = va; 381#endif 382 /* 383 * reserve special hunk of memory for use by bus dma as a bounce 384 * buffer (contiguous virtual *and* physical memory). for now, 385 * assume vm does not use memory beneath hole, and we know that 386 * the bootstrap uses top 32k of base memory. -wfj 387 */ 388 { 389 extern vm_offset_t isaphysmem; 390 isaphysmem = va; 391 392 virtual_avail = pmap_map(va, firstaddr, 393 firstaddr + DMAPAGES*NBPG, VM_PROT_ALL); 394 } 395 396 *(int *)CMAP1 = *(int *)CMAP2 = *(int *)PTD = 0; 397 tlbflush(); 398 399} 400 401/* 402 * Initialize the pmap module. 403 * Called by vm_init, to initialize any structures that the pmap 404 * system needs to map virtual memory. 405 * pmap_init has been enhanced to support in a fairly consistant 406 * way, discontiguous physical memory. 407 */ 408void 409pmap_init(phys_start, phys_end) 410 vm_offset_t phys_start, phys_end; 411{ 412 vm_offset_t addr, addr2; 413 vm_size_t npg, s; 414 int rv; 415 int i; 416 extern int KPTphys; 417 extern int IdlePTD; 418 419 /* 420 * Now that kernel map has been allocated, we can mark as 421 * unavailable regions which we have mapped in locore. 422 */ 423 addr = atdevbase; 424 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, 425 &addr, (0x100000-0xa0000), FALSE); 426 427 addr = (vm_offset_t) KERNBASE + IdlePTD; 428 vm_object_reference(kernel_object); 429 (void) vm_map_find(kernel_map, kernel_object, addr, 430 &addr, (4 + NKPT) * NBPG, FALSE); 431 432 433 /* 434 * calculate the number of pv_entries needed 435 */ 436 vm_first_phys = phys_avail[0]; 437 for (i = 0; phys_avail[i + 1]; i += 2) ; 438 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / NBPG; 439 440 /* 441 * Allocate memory for random pmap data structures. Includes the 442 * pv_head_table. 443 */ 444 s = (vm_size_t) (sizeof(struct pv_entry) * npg); 445 s = i386_round_page(s); 446 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 447 pv_table = (pv_entry_t) addr; 448 449 /* 450 * init the pv free list 451 */ 452 init_pv_entries(npg); 453 /* 454 * Now it is safe to enable pv_table recording. 455 */ 456 pmap_initialized = TRUE; 457} 458 459/* 460 * Used to map a range of physical addresses into kernel 461 * virtual address space. 462 * 463 * For now, VM is already on, we only need to map the 464 * specified memory. 465 */ 466vm_offset_t 467pmap_map(virt, start, end, prot) 468 vm_offset_t virt; 469 vm_offset_t start; 470 vm_offset_t end; 471 int prot; 472{ 473 while (start < end) { 474 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 475 virt += PAGE_SIZE; 476 start += PAGE_SIZE; 477 } 478 return(virt); 479} 480 481/* 482 * Create and return a physical map. 483 * 484 * If the size specified for the map 485 * is zero, the map is an actual physical 486 * map, and may be referenced by the 487 * hardware. 488 * 489 * If the size specified is non-zero, 490 * the map will be used in software only, and 491 * is bounded by that size. 492 * 493 * [ just allocate a ptd and mark it uninitialize -- should we track 494 * with a table which process has which ptd? -wfj ] 495 */ 496 497pmap_t 498pmap_create(size) 499 vm_size_t size; 500{ 501 register pmap_t pmap; 502 503 /* 504 * Software use map does not need a pmap 505 */ 506 if (size) 507 return(NULL); 508 509 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 510 bzero(pmap, sizeof(*pmap)); 511 pmap_pinit(pmap); 512 return (pmap); 513} 514 515 516struct pmaplist { 517 struct pmaplist *next; 518}; 519 520static inline void * 521vm_get_pmap() 522{ 523 struct pmaplist *rtval; 524 525 rtval = (struct pmaplist *)kmem_alloc(kernel_map, ctob(1)); 526 bzero(rtval, ctob(1)); 527 return rtval; 528} 529 530static inline void 531vm_put_pmap(up) 532 struct pmaplist *up; 533{ 534 kmem_free(kernel_map, (vm_offset_t)up, ctob(1)); 535} 536 537/* 538 * Initialize a preallocated and zeroed pmap structure, 539 * such as one in a vmspace structure. 540 */ 541void 542pmap_pinit(pmap) 543 register struct pmap *pmap; 544{ 545 /* 546 * No need to allocate page table space yet but we do need a 547 * valid page directory table. 548 */ 549 pmap->pm_pdir = (pd_entry_t *) vm_get_pmap(); 550 551 /* wire in kernel global address entries */ 552 bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPT*PTESIZE); 553 554 /* install self-referential address mapping entry */ 555 *(int *)(pmap->pm_pdir+PTDPTDI) = 556 ((int)pmap_kextract((vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW; 557 558 pmap->pm_count = 1; 559 simple_lock_init(&pmap->pm_lock); 560} 561 562/* 563 * Retire the given physical map from service. 564 * Should only be called if the map contains 565 * no valid mappings. 566 */ 567void 568pmap_destroy(pmap) 569 register pmap_t pmap; 570{ 571 int count; 572 573 if (pmap == NULL) 574 return; 575 576 simple_lock(&pmap->pm_lock); 577 count = --pmap->pm_count; 578 simple_unlock(&pmap->pm_lock); 579 if (count == 0) { 580 pmap_release(pmap); 581 free((caddr_t)pmap, M_VMPMAP); 582 } 583} 584 585/* 586 * Release any resources held by the given physical map. 587 * Called when a pmap initialized by pmap_pinit is being released. 588 * Should only be called if the map contains no valid mappings. 589 */ 590void 591pmap_release(pmap) 592 register struct pmap *pmap; 593{ 594 vm_put_pmap((struct pmaplist *) pmap->pm_pdir); 595} 596 597/* 598 * Add a reference to the specified pmap. 599 */ 600void 601pmap_reference(pmap) 602 pmap_t pmap; 603{ 604 if (pmap != NULL) { 605 simple_lock(&pmap->pm_lock); 606 pmap->pm_count++; 607 simple_unlock(&pmap->pm_lock); 608 } 609} 610 611#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2) 612 613/* 614 * Data for the pv entry allocation mechanism 615 */ 616int pv_freelistcnt; 617pv_entry_t pv_freelist; 618vm_offset_t pvva; 619int npvvapg; 620 621/* 622 * free the pv_entry back to the free list 623 */ 624inline static void 625free_pv_entry(pv) 626 pv_entry_t pv; 627{ 628 if (!pv) return; 629 ++pv_freelistcnt; 630 pv->pv_next = pv_freelist; 631 pv_freelist = pv; 632} 633 634/* 635 * get a new pv_entry, allocating a block from the system 636 * when needed. 637 * the memory allocation is performed bypassing the malloc code 638 * because of the possibility of allocations at interrupt time. 639 */ 640static inline pv_entry_t 641get_pv_entry() 642{ 643 pv_entry_t tmp; 644 645 /* 646 * get more pv_entry pages if needed 647 */ 648 while (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) { 649 pmap_alloc_pv_entry(); 650 } 651 652 /* 653 * get a pv_entry off of the free list 654 */ 655 --pv_freelistcnt; 656 tmp = pv_freelist; 657 pv_freelist = tmp->pv_next; 658 tmp->pv_pmap = 0; 659 tmp->pv_va = 0; 660 tmp->pv_next = 0; 661 return tmp; 662} 663 664/* 665 * this *strange* allocation routine *statistically* eliminates the 666 * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure. 667 * also -- this code is MUCH MUCH faster than the malloc equiv... 668 */ 669static void 670pmap_alloc_pv_entry() 671{ 672 /* 673 * do we have any pre-allocated map-pages left? 674 */ 675 if (npvvapg) { 676 vm_page_t m; 677 /* 678 * we do this to keep recursion away 679 */ 680 pv_freelistcnt += PV_FREELIST_MIN; 681 /* 682 * allocate a physical page out of the vm system 683 */ 684 if (m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map))) { 685 int newentries; 686 int i; 687 pv_entry_t entry; 688 newentries = (NBPG/sizeof (struct pv_entry)); 689 /* 690 * wire the page 691 */ 692 vm_page_wire(m); 693 m->flags &= ~PG_BUSY; 694 /* 695 * let the kernel see it 696 */ 697 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 698 699 entry = (pv_entry_t) pvva; 700 /* 701 * update the allocation pointers 702 */ 703 pvva += NBPG; 704 --npvvapg; 705 706 /* 707 * free the entries into the free list 708 */ 709 for (i = 0; i < newentries; i++) { 710 free_pv_entry(entry); 711 entry++; 712 } 713 } 714 pv_freelistcnt -= PV_FREELIST_MIN; 715 } 716 if (!pv_freelist) 717 panic("get_pv_entry: cannot get a pv_entry_t"); 718} 719 720 721 722/* 723 * init the pv_entry allocation system 724 */ 725#define PVSPERPAGE 64 726void 727init_pv_entries(npg) 728 int npg; 729{ 730 /* 731 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 732 * kvm space is fairly cheap, be generous!!! (the system can panic 733 * if this is too small.) 734 */ 735 npvvapg = ((npg*PVSPERPAGE) * sizeof(struct pv_entry) + NBPG - 1)/NBPG; 736 pvva = kmem_alloc_pageable(kernel_map, npvvapg * NBPG); 737 /* 738 * get the first batch of entries 739 */ 740 free_pv_entry(get_pv_entry()); 741} 742 743static pt_entry_t * 744get_pt_entry(pmap) 745 pmap_t pmap; 746{ 747 pt_entry_t *ptp; 748 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 749 /* are we current address space or kernel? */ 750 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) { 751 ptp=PTmap; 752 /* otherwise, we are alternate address space */ 753 } else { 754 if ( frame != ((int) APTDpde & PG_FRAME)) { 755 APTDpde = pmap->pm_pdir[PTDPTDI]; 756 tlbflush(); 757 } 758 ptp=APTmap; 759 } 760 return ptp; 761} 762 763/* 764 * If it is the first entry on the list, it is actually 765 * in the header and we must copy the following entry up 766 * to the header. Otherwise we must search the list for 767 * the entry. In either case we free the now unused entry. 768 */ 769void 770pmap_remove_entry(pmap, pv, va) 771 struct pmap *pmap; 772 pv_entry_t pv; 773 vm_offset_t va; 774{ 775 pv_entry_t npv; 776 int wired; 777 int s; 778 s = splhigh(); 779 if (pmap == pv->pv_pmap && va == pv->pv_va) { 780 npv = pv->pv_next; 781 if (npv) { 782 *pv = *npv; 783 free_pv_entry(npv); 784 } else { 785 pv->pv_pmap = NULL; 786 } 787 } else { 788 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 789 if (pmap == npv->pv_pmap && va == npv->pv_va) { 790 break; 791 } 792 pv = npv; 793 } 794 if (npv) { 795 pv->pv_next = npv->pv_next; 796 free_pv_entry(npv); 797 } 798 } 799 splx(s); 800} 801 802/* 803 * Remove the given range of addresses from the specified map. 804 * 805 * It is assumed that the start and end are properly 806 * rounded to the page size. 807 */ 808void 809pmap_remove(pmap, sva, eva) 810 struct pmap *pmap; 811 register vm_offset_t sva; 812 register vm_offset_t eva; 813{ 814 register pt_entry_t *ptp,*ptq; 815 vm_offset_t pa; 816 register pv_entry_t pv; 817 vm_offset_t va; 818 vm_page_t m; 819 pt_entry_t oldpte; 820 821 if (pmap == NULL) 822 return; 823 824 ptp = get_pt_entry(pmap); 825 826/* 827 * special handling of removing one page. a very 828 * common operation and easy to short circuit some 829 * code. 830 */ 831 if( (sva + NBPG) == eva) { 832 833 if( *pmap_pde( pmap, sva) == 0) 834 return; 835 836 ptq = ptp + i386_btop(sva); 837 838 if( !*ptq) 839 return; 840 /* 841 * Update statistics 842 */ 843 if (pmap_pte_w(ptq)) 844 pmap->pm_stats.wired_count--; 845 pmap->pm_stats.resident_count--; 846 847 pa = pmap_pte_pa(ptq); 848 oldpte = *ptq; 849 *ptq = 0; 850 851 if (pmap_is_managed(pa)) { 852 if ((int) oldpte & (PG_M | PG_U)) { 853 if ((sva < USRSTACK || sva > UPT_MAX_ADDRESS) || 854 (sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) { 855 if (sva < clean_sva || sva >= clean_eva) { 856 m = PHYS_TO_VM_PAGE(pa); 857 if ((int) oldpte & PG_M) { 858 m->flags &= ~PG_CLEAN; 859 } 860 if ((int) oldpte & PG_U) { 861 m->flags |= PG_REFERENCED; 862 } 863 } 864 } 865 } 866 867 pv = pa_to_pvh(pa); 868 pmap_remove_entry(pmap, pv, sva); 869 pmap_unuse_pt(pmap, sva); 870 } 871 tlbflush(); 872 return; 873 } 874 875 sva = i386_btop(sva); 876 eva = i386_btop(eva); 877 878 while (sva < eva) { 879 /* 880 * Weed out invalid mappings. 881 * Note: we assume that the page directory table is 882 * always allocated, and in kernel virtual. 883 */ 884 885 if ( *pmap_pde(pmap, i386_ptob(sva)) == 0 ) { 886 /* We can race ahead here, straight to next pde.. */ 887 nextpde: 888 sva = ((sva + NPTEPG) & ~(NPTEPG - 1)); 889 continue; 890 } 891 892 ptq = ptp + sva; 893 894 /* 895 * search for page table entries, use string operations 896 * that are much faster than 897 * explicitly scanning when page tables are not fully 898 * populated. 899 */ 900 if ( *ptq == 0) { 901 vm_offset_t pdnxt = ((sva + NPTEPG) & ~(NPTEPG - 1)); 902 vm_offset_t nscan = pdnxt - sva; 903 int found = 0; 904 905 if ((nscan + sva) > eva) 906 nscan = eva - sva; 907 908 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" 909 :"=D"(ptq),"=a"(found) 910 :"c"(nscan),"0"(ptq) 911 :"cx"); 912 913 if( !found) { 914 sva = pdnxt; 915 continue; 916 } 917 ptq -= 1; 918 919 sva = ptq - ptp; 920 } 921 922 /* 923 * Update statistics 924 */ 925 oldpte = *ptq; 926 if (((int)oldpte) & PG_W) 927 pmap->pm_stats.wired_count--; 928 pmap->pm_stats.resident_count--; 929 930 /* 931 * Invalidate the PTEs. 932 * XXX: should cluster them up and invalidate as many 933 * as possible at once. 934 */ 935 *ptq = 0; 936 937 va = i386_ptob(sva); 938 939 /* 940 * Remove from the PV table (raise IPL since we 941 * may be called at interrupt time). 942 */ 943 pa = ((int)oldpte) & PG_FRAME; 944 if (!pmap_is_managed(pa)) { 945 ++sva; 946 continue; 947 } 948 949 if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS)) 950 || (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) { 951 if (va < clean_sva || va >= clean_eva ) { 952 m = PHYS_TO_VM_PAGE(pa); 953 m->flags &= ~PG_CLEAN; 954 } 955 } 956 957 pv = pa_to_pvh(pa); 958 pmap_remove_entry(pmap, pv, va); 959 pmap_unuse_pt(pmap, va); 960 ++sva; 961 } 962 tlbflush(); 963} 964 965/* 966 * Routine: pmap_remove_all 967 * Function: 968 * Removes this physical page from 969 * all physical maps in which it resides. 970 * Reflects back modify bits to the pager. 971 * 972 * Notes: 973 * Original versions of this routine were very 974 * inefficient because they iteratively called 975 * pmap_remove (slow...) 976 */ 977void 978pmap_remove_all(pa) 979 vm_offset_t pa; 980{ 981 register pv_entry_t pv, npv; 982 register pt_entry_t *pte, *ptp; 983 vm_offset_t va; 984 struct pmap *pmap; 985 struct map *map; 986 vm_page_t m; 987 int s; 988 int anyvalid = 0; 989 990 /* 991 * Not one of ours 992 */ 993 if (!pmap_is_managed(pa)) 994 return; 995 996 pa = i386_trunc_page(pa); 997 pv = pa_to_pvh(pa); 998 m = PHYS_TO_VM_PAGE(pa); 999 1000 s = splhigh(); 1001 while (pv->pv_pmap != NULL) { 1002 pmap = pv->pv_pmap; 1003 ptp = get_pt_entry(pmap); 1004 va = i386_btop(pv->pv_va); 1005 pte = ptp + va; 1006 if (pmap_pte_w(pte)) 1007 pmap->pm_stats.wired_count--; 1008 if ( *pte) { 1009 pmap->pm_stats.resident_count--; 1010 anyvalid++; 1011 1012 /* 1013 * update the vm_page_t clean bit 1014 */ 1015 if ( (m->flags & PG_CLEAN) && 1016 ((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS)) 1017 || (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) { 1018 if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) { 1019 m->flags &= ~PG_CLEAN; 1020 } 1021 } 1022 1023 *pte = 0; 1024 } 1025 pmap_unuse_pt(pmap, pv->pv_va); 1026 1027 npv = pv->pv_next; 1028 if (npv) { 1029 *pv = *npv; 1030 free_pv_entry(npv); 1031 } else { 1032 pv->pv_pmap = NULL; 1033 } 1034 } 1035 splx(s); 1036 if (anyvalid) 1037 tlbflush(); 1038} 1039 1040 1041/* 1042 * Set the physical protection on the 1043 * specified range of this map as requested. 1044 */ 1045void 1046pmap_protect(pmap, sva, eva, prot) 1047 register pmap_t pmap; 1048 vm_offset_t sva, eva; 1049 vm_prot_t prot; 1050{ 1051 register pt_entry_t *pte; 1052 register vm_offset_t va; 1053 int i386prot; 1054 register pt_entry_t *ptp; 1055 int evap = i386_btop(eva); 1056 int s; 1057 int anyvalid = 0;; 1058 1059 if (pmap == NULL) 1060 return; 1061 1062 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1063 pmap_remove(pmap, sva, eva); 1064 return; 1065 } 1066 if (prot & VM_PROT_WRITE) 1067 return; 1068 1069 ptp = get_pt_entry(pmap); 1070 1071 va = sva; 1072 while (va < eva) { 1073 int found=0; 1074 int svap; 1075 vm_offset_t nscan; 1076 /* 1077 * Page table page is not allocated. 1078 * Skip it, we don't want to force allocation 1079 * of unnecessary PTE pages just to set the protection. 1080 */ 1081 if (! *pmap_pde(pmap, va)) { 1082 /* XXX: avoid address wrap around */ 1083nextpde: 1084 if (va >= i386_trunc_pdr((vm_offset_t)-1)) 1085 break; 1086 va = i386_round_pdr(va + PAGE_SIZE); 1087 continue; 1088 } 1089 1090 pte = ptp + i386_btop(va); 1091 1092 if( *pte == 0) { 1093 /* 1094 * scan for a non-empty pte 1095 */ 1096 svap = pte - ptp; 1097 nscan = ((svap + NPTEPG) & ~(NPTEPG - 1)) - svap; 1098 1099 if (nscan + svap > evap) 1100 nscan = evap - svap; 1101 1102 found = 0; 1103 if (nscan) 1104 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" 1105 :"=D"(pte),"=a"(found) 1106 :"c"(nscan),"0"(pte):"cx"); 1107 1108 if( !found) 1109 goto nextpde; 1110 1111 pte -= 1; 1112 svap = pte - ptp; 1113 1114 va = i386_ptob(svap); 1115 } 1116 1117 anyvalid++; 1118 1119 i386prot = pte_prot(pmap, prot); 1120 if (va < UPT_MAX_ADDRESS) { 1121 i386prot |= PG_u; 1122 if( va >= UPT_MIN_ADDRESS) 1123 i386prot |= PG_RW; 1124 } 1125 pmap_pte_set_prot(pte, i386prot); 1126 va += PAGE_SIZE; 1127 } 1128 if (anyvalid) 1129 tlbflush(); 1130} 1131 1132/* 1133 * Insert the given physical page (p) at 1134 * the specified virtual address (v) in the 1135 * target physical map with the protection requested. 1136 * 1137 * If specified, the page will be wired down, meaning 1138 * that the related pte can not be reclaimed. 1139 * 1140 * NB: This is the only routine which MAY NOT lazy-evaluate 1141 * or lose information. That is, this routine must actually 1142 * insert this page into the given map NOW. 1143 */ 1144void 1145pmap_enter(pmap, va, pa, prot, wired) 1146 register pmap_t pmap; 1147 vm_offset_t va; 1148 register vm_offset_t pa; 1149 vm_prot_t prot; 1150 boolean_t wired; 1151{ 1152 register pt_entry_t *pte; 1153 register pt_entry_t npte; 1154 vm_offset_t opa; 1155 int cacheable=1; 1156 int ptevalid = 0; 1157 1158 if (pmap == NULL) 1159 return; 1160 1161 va = i386_trunc_page(va); 1162 pa = i386_trunc_page(pa); 1163 if (va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig"); 1164 1165 /* 1166 * Page Directory table entry not valid, we need a new PT page 1167 */ 1168 if ( *pmap_pde(pmap, va) == 0) { 1169 pg("ptdi %x, va %x", pmap->pm_pdir[PTDPTDI], va); 1170 } 1171 1172 pte = pmap_pte(pmap, va); 1173 opa = pmap_pte_pa(pte); 1174 1175 /* 1176 * Mapping has not changed, must be protection or wiring change. 1177 */ 1178 if (opa == pa) { 1179 /* 1180 * Wiring change, just update stats. 1181 * We don't worry about wiring PT pages as they remain 1182 * resident as long as there are valid mappings in them. 1183 * Hence, if a user page is wired, the PT page will be also. 1184 */ 1185 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1186 if (wired) 1187 pmap->pm_stats.wired_count++; 1188 else 1189 pmap->pm_stats.wired_count--; 1190 } 1191 goto validate; 1192 } 1193 1194 /* 1195 * Mapping has changed, invalidate old range and fall through to 1196 * handle validating new mapping. 1197 */ 1198 if (opa) { 1199 pmap_remove(pmap, va, va + PAGE_SIZE); 1200 } 1201 1202 /* 1203 * Enter on the PV list if part of our managed memory 1204 * Note that we raise IPL while manipulating pv_table 1205 * since pmap_enter can be called at interrupt time. 1206 */ 1207 if (pmap_is_managed(pa)) { 1208 register pv_entry_t pv, npv; 1209 int s; 1210 1211 pv = pa_to_pvh(pa); 1212 s = splhigh(); 1213 /* 1214 * No entries yet, use header as the first entry 1215 */ 1216 if (pv->pv_pmap == NULL) { 1217 pv->pv_va = va; 1218 pv->pv_pmap = pmap; 1219 pv->pv_next = NULL; 1220 } 1221 /* 1222 * There is at least one other VA mapping this page. 1223 * Place this entry after the header. 1224 */ 1225 else { 1226 npv = get_pv_entry(); 1227 npv->pv_va = va; 1228 npv->pv_pmap = pmap; 1229 npv->pv_next = pv->pv_next; 1230 pv->pv_next = npv; 1231 } 1232 splx(s); 1233 cacheable = 1; 1234 } else { 1235 cacheable = 0; 1236 } 1237 1238 pmap_use_pt(pmap, va); 1239 1240 /* 1241 * Increment counters 1242 */ 1243 pmap->pm_stats.resident_count++; 1244 if (wired) 1245 pmap->pm_stats.wired_count++; 1246 1247validate: 1248 /* 1249 * Now validate mapping with desired protection/wiring. 1250 */ 1251 npte = (pt_entry_t) ( (int) (pa | pte_prot(pmap, prot) | PG_V)); 1252 /* 1253 * for correctness: 1254 */ 1255 if( !cacheable) 1256 (int) npte |= PG_N; 1257 1258 /* 1259 * When forking (copy-on-write, etc): 1260 * A process will turn off write permissions for any of its writable 1261 * pages. If the data (object) is only referred to by one process, the 1262 * processes map is modified directly as opposed to using the 1263 * object manipulation routine. When using pmap_protect, the 1264 * modified bits are not kept in the vm_page_t data structure. 1265 * Therefore, when using pmap_enter in vm_fault to bring back 1266 * writability of a page, there has been no memory of the 1267 * modified or referenced bits except at the pte level. 1268 * this clause supports the carryover of the modified and 1269 * used (referenced) bits. 1270 */ 1271 if (pa == opa) 1272 (int) npte |= (int) *pte & (PG_M|PG_U); 1273 1274 1275 if (wired) 1276 (int) npte |= PG_W; 1277 if (va < UPT_MIN_ADDRESS) 1278 (int) npte |= PG_u; 1279 else if (va < UPT_MAX_ADDRESS) 1280 (int) npte |= PG_u | PG_RW; 1281 1282 if(*pte != npte) { 1283 if (*pte) 1284 ptevalid++; 1285 *pte = npte; 1286 } 1287 if (ptevalid) 1288 tlbflush(); 1289} 1290 1291/* 1292 * Add a list of wired pages to the kva 1293 * this routine is only used for temporary 1294 * kernel mappings that do not need to have 1295 * page modification or references recorded. 1296 * Note that old mappings are simply written 1297 * over. The page *must* be wired. 1298 */ 1299void 1300pmap_qenter(va, m, count) 1301 vm_offset_t va; 1302 vm_page_t *m; 1303 int count; 1304{ 1305 int i; 1306 int anyvalid = 0; 1307 register pt_entry_t *pte; 1308 1309 for(i=0;i<count;i++) { 1310 pte = vtopte(va + i * NBPG); 1311 if (*pte) 1312 anyvalid++; 1313 *pte = (pt_entry_t) ( (int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | PG_W)); 1314 } 1315 if (anyvalid) 1316 tlbflush(); 1317} 1318/* 1319 * this routine jerks page mappings from the 1320 * kernel -- it is meant only for temporary mappings. 1321 */ 1322void 1323pmap_qremove(va, count) 1324 vm_offset_t va; 1325 int count; 1326{ 1327 int i; 1328 register pt_entry_t *pte; 1329 for(i=0;i<count;i++) { 1330 pte = vtopte(va + i * NBPG); 1331 *pte = 0; 1332 } 1333 tlbflush(); 1334} 1335 1336/* 1337 * add a wired page to the kva 1338 * note that in order for the mapping to take effect -- you 1339 * should do a tlbflush after doing the pmap_kenter... 1340 */ 1341void 1342pmap_kenter(va, pa) 1343 vm_offset_t va; 1344 register vm_offset_t pa; 1345{ 1346 register pt_entry_t *pte; 1347 pte = vtopte(va); 1348 1349 *pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W)); 1350} 1351 1352/* 1353 * remove a page from the kernel pagetables 1354 */ 1355void 1356pmap_kremove( va) 1357 vm_offset_t va; 1358{ 1359 register pt_entry_t *pte; 1360 pte = vtopte(va); 1361 1362 *pte = (pt_entry_t) 0; 1363 tlbflush(); 1364} 1365 1366/* 1367 * this code makes some *MAJOR* assumptions: 1368 * 1. Current pmap & pmap exists. 1369 * 2. Not wired. 1370 * 3. Read access. 1371 * 4. No page table pages. 1372 * 5. Tlbflush is deferred to calling procedure. 1373 * 6. Page IS managed. 1374 * but is *MUCH* faster than pmap_enter... 1375 */ 1376 1377static inline int 1378pmap_enter_quick(pmap, va, pa) 1379 register pmap_t pmap; 1380 vm_offset_t va; 1381 register vm_offset_t pa; 1382{ 1383 register pt_entry_t *pte; 1384 register pv_entry_t pv, npv; 1385 int s; 1386 int anyvalid = 0; 1387 1388 /* 1389 * Enter on the PV list if part of our managed memory 1390 * Note that we raise IPL while manipulating pv_table 1391 * since pmap_enter can be called at interrupt time. 1392 */ 1393 1394 pte = vtopte(va); 1395 if (pmap_pte_pa(pte)) { 1396 pmap_remove(pmap, va, va + PAGE_SIZE); 1397 } 1398 1399 pv = pa_to_pvh(pa); 1400 s = splhigh(); 1401 /* 1402 * No entries yet, use header as the first entry 1403 */ 1404 if (pv->pv_pmap == NULL) { 1405 pv->pv_va = va; 1406 pv->pv_pmap = pmap; 1407 pv->pv_next = NULL; 1408 } 1409 /* 1410 * There is at least one other VA mapping this page. 1411 * Place this entry after the header. 1412 */ 1413 else { 1414 npv = get_pv_entry(); 1415 npv->pv_va = va; 1416 npv->pv_pmap = pmap; 1417 npv->pv_next = pv->pv_next; 1418 pv->pv_next = npv; 1419 } 1420 splx(s); 1421 1422 pmap_use_pt(pmap, va); 1423 1424 /* 1425 * Increment counters 1426 */ 1427 pmap->pm_stats.resident_count++; 1428 1429validate: 1430 1431 if (*pte) 1432 anyvalid++; 1433 /* 1434 * Now validate mapping with desired protection/wiring. 1435 */ 1436 *pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u)); 1437 1438 return (anyvalid); 1439} 1440 1441/* 1442 * pmap_object_init_pt preloads the ptes for a given object 1443 * into the specified pmap. This eliminates the blast of soft 1444 * faults on process startup and immediately after an mmap. 1445 */ 1446void 1447pmap_object_init_pt(pmap, addr, object, offset, size) 1448 pmap_t pmap; 1449 vm_offset_t addr; 1450 vm_object_t object; 1451 vm_offset_t offset; 1452 vm_offset_t size; 1453{ 1454 1455 vm_offset_t tmpoff; 1456 vm_page_t p; 1457 int s; 1458 vm_offset_t v, lastv=0; 1459 pt_entry_t pte; 1460 extern vm_map_t kernel_map; 1461 vm_offset_t objbytes; 1462 int anyvalid = 0; 1463 1464 if (!pmap) 1465 return; 1466 1467 /* 1468 * if we are processing a major portion of the object, then 1469 * scan the entire thing. 1470 */ 1471 if( size > object->size / 2) { 1472 objbytes = size; 1473 p = object->memq.tqh_first; 1474 while ((p != NULL) && (objbytes != 0)) { 1475 tmpoff = p->offset; 1476 if( tmpoff < offset) { 1477 p = p->listq.tqe_next; 1478 continue; 1479 } 1480 tmpoff -= offset; 1481 if( tmpoff >= size) { 1482 p = p->listq.tqe_next; 1483 continue; 1484 } 1485 1486 if ((p->flags & (PG_BUSY|PG_FICTITIOUS)) == 0 ) { 1487 vm_page_hold(p); 1488 v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff))); 1489 /* a fault might occur here */ 1490 *(volatile char *)v += 0; 1491 vm_page_unhold(p); 1492 anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p)); 1493 } 1494 p = p->listq.tqe_next; 1495 objbytes -= NBPG; 1496 } 1497 } else { 1498 /* 1499 * else lookup the pages one-by-one. 1500 */ 1501 for(tmpoff = 0; tmpoff < size; tmpoff += NBPG) { 1502 if( p = vm_page_lookup(object, tmpoff + offset)) { 1503 if( (p->flags & (PG_BUSY|PG_FICTITIOUS)) == 0) { 1504 vm_page_hold(p); 1505 v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff))); 1506 /* a fault might occur here */ 1507 *(volatile char *)v += 0; 1508 vm_page_unhold(p); 1509 anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p)); 1510 } 1511 } 1512 } 1513 } 1514 1515 if (anyvalid) 1516 tlbflush(); 1517} 1518 1519/* 1520 * Routine: pmap_change_wiring 1521 * Function: Change the wiring attribute for a map/virtual-address 1522 * pair. 1523 * In/out conditions: 1524 * The mapping must already exist in the pmap. 1525 */ 1526void 1527pmap_change_wiring(pmap, va, wired) 1528 register pmap_t pmap; 1529 vm_offset_t va; 1530 boolean_t wired; 1531{ 1532 register pt_entry_t *pte; 1533 1534 if (pmap == NULL) 1535 return; 1536 1537 pte = pmap_pte(pmap, va); 1538 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1539 if (wired) 1540 pmap->pm_stats.wired_count++; 1541 else 1542 pmap->pm_stats.wired_count--; 1543 } 1544 /* 1545 * Wiring is not a hardware characteristic so there is no need 1546 * to invalidate TLB. 1547 */ 1548 pmap_pte_set_w(pte, wired); 1549 /* 1550 * When unwiring, set the modified bit in the pte -- could have 1551 * been changed by the kernel 1552 */ 1553 if (!wired) 1554 (int) *pte |= PG_M; 1555} 1556 1557 1558 1559/* 1560 * Copy the range specified by src_addr/len 1561 * from the source map to the range dst_addr/len 1562 * in the destination map. 1563 * 1564 * This routine is only advisory and need not do anything. 1565 */ 1566void 1567pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1568 pmap_t dst_pmap, src_pmap; 1569 vm_offset_t dst_addr; 1570 vm_size_t len; 1571 vm_offset_t src_addr; 1572{ 1573} 1574/* 1575 * Require that all active physical maps contain no 1576 * incorrect entries NOW. [This update includes 1577 * forcing updates of any address map caching.] 1578 * 1579 * Generally used to insure that a thread about 1580 * to run will see a semantically correct world. 1581 */ 1582void 1583pmap_update() 1584{ 1585 tlbflush(); 1586} 1587 1588/* 1589 * Routine: pmap_kernel 1590 * Function: 1591 * Returns the physical map handle for the kernel. 1592 */ 1593pmap_t 1594pmap_kernel() 1595{ 1596 return (kernel_pmap); 1597} 1598 1599/* 1600 * pmap_zero_page zeros the specified (machine independent) 1601 * page by mapping the page into virtual memory and using 1602 * bzero to clear its contents, one machine dependent page 1603 * at a time. 1604 */ 1605void 1606pmap_zero_page(phys) 1607 vm_offset_t phys; 1608{ 1609 if (*(int *)CMAP2) 1610 panic("pmap_zero_page: CMAP busy"); 1611 1612 *(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(phys); 1613 bzero(CADDR2,NBPG); 1614 1615 *(int *)CMAP2 = 0; 1616 tlbflush(); 1617} 1618 1619/* 1620 * pmap_copy_page copies the specified (machine independent) 1621 * page by mapping the page into virtual memory and using 1622 * bcopy to copy the page, one machine dependent page at a 1623 * time. 1624 */ 1625void 1626pmap_copy_page(src, dst) 1627 vm_offset_t src; 1628 vm_offset_t dst; 1629{ 1630 if (*(int *)CMAP1 || *(int *)CMAP2) 1631 panic("pmap_copy_page: CMAP busy"); 1632 1633 *(int *)CMAP1 = PG_V | PG_KW | i386_trunc_page(src); 1634 *(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(dst); 1635 1636#if __GNUC__ > 1 1637 memcpy(CADDR2, CADDR1, NBPG); 1638#else 1639 bcopy(CADDR1, CADDR2, NBPG); 1640#endif 1641 *(int *)CMAP1 = 0; 1642 *(int *)CMAP2 = 0; 1643 tlbflush(); 1644} 1645 1646 1647/* 1648 * Routine: pmap_pageable 1649 * Function: 1650 * Make the specified pages (by pmap, offset) 1651 * pageable (or not) as requested. 1652 * 1653 * A page which is not pageable may not take 1654 * a fault; therefore, its page table entry 1655 * must remain valid for the duration. 1656 * 1657 * This routine is merely advisory; pmap_enter 1658 * will specify that these pages are to be wired 1659 * down (or not) as appropriate. 1660 */ 1661void 1662pmap_pageable(pmap, sva, eva, pageable) 1663 pmap_t pmap; 1664 vm_offset_t sva, eva; 1665 boolean_t pageable; 1666{ 1667} 1668 1669/* 1670 * this routine returns true if a physical page resides 1671 * in the given pmap. 1672 */ 1673boolean_t 1674pmap_page_exists(pmap, pa) 1675 pmap_t pmap; 1676 vm_offset_t pa; 1677{ 1678 register pv_entry_t pv; 1679 int s; 1680 1681 if (!pmap_is_managed(pa)) 1682 return FALSE; 1683 1684 pv = pa_to_pvh(pa); 1685 s = splhigh(); 1686 1687 /* 1688 * Not found, check current mappings returning 1689 * immediately if found. 1690 */ 1691 if (pv->pv_pmap != NULL) { 1692 for (; pv; pv = pv->pv_next) { 1693 if (pv->pv_pmap == pmap) { 1694 splx(s); 1695 return TRUE; 1696 } 1697 } 1698 } 1699 splx(s); 1700 return(FALSE); 1701} 1702 1703/* 1704 * pmap_testbit tests bits in pte's 1705 * note that the testbit/changebit routines are inline, 1706 * and a lot of things compile-time evaluate. 1707 */ 1708static inline boolean_t 1709pmap_testbit(pa, bit) 1710 register vm_offset_t pa; 1711 int bit; 1712{ 1713 register pv_entry_t pv; 1714 pt_entry_t *pte; 1715 int s; 1716 1717 if (!pmap_is_managed(pa)) 1718 return FALSE; 1719 1720 pv = pa_to_pvh(pa); 1721 s = splhigh(); 1722 1723 /* 1724 * Not found, check current mappings returning 1725 * immediately if found. 1726 */ 1727 if (pv->pv_pmap != NULL) { 1728 for (; pv; pv = pv->pv_next) { 1729 /* 1730 * if the bit being tested is the modified bit, 1731 * then mark UPAGES as always modified, and 1732 * ptes as never modified. 1733 */ 1734 if (bit & PG_U ) { 1735 if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) { 1736 continue; 1737 } 1738 } 1739 if (bit & PG_M ) { 1740 if (pv->pv_va >= USRSTACK) { 1741 if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) { 1742 continue; 1743 } 1744 if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) { 1745 splx(s); 1746 return TRUE; 1747 } 1748 else if (pv->pv_va < UPT_MAX_ADDRESS) { 1749 splx(s); 1750 return FALSE; 1751 } 1752 } 1753 } 1754 if( !pv->pv_pmap) { 1755 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 1756 continue; 1757 } 1758 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1759 if ((int) *pte & bit) { 1760 splx(s); 1761 return TRUE; 1762 } 1763 } 1764 } 1765 splx(s); 1766 return(FALSE); 1767} 1768 1769/* 1770 * this routine is used to modify bits in ptes 1771 */ 1772static inline void 1773pmap_changebit(pa, bit, setem) 1774 vm_offset_t pa; 1775 int bit; 1776 boolean_t setem; 1777{ 1778 register pv_entry_t pv; 1779 register pt_entry_t *pte, npte; 1780 vm_offset_t va; 1781 int s; 1782 1783 if (!pmap_is_managed(pa)) 1784 return; 1785 1786 pv = pa_to_pvh(pa); 1787 s = splhigh(); 1788 1789 /* 1790 * Loop over all current mappings setting/clearing as appropos 1791 * If setting RO do we need to clear the VAC? 1792 */ 1793 if (pv->pv_pmap != NULL) { 1794 for (; pv; pv = pv->pv_next) { 1795 va = pv->pv_va; 1796 1797 /* 1798 * don't write protect pager mappings 1799 */ 1800 if (!setem && (bit == PG_RW)) { 1801 if (va >= clean_sva && va < clean_eva) 1802 continue; 1803 } 1804 1805 if( !pv->pv_pmap) { 1806 printf("Null pmap (cb) at va: 0x%lx\n", va); 1807 continue; 1808 } 1809 pte = pmap_pte(pv->pv_pmap, va); 1810 if (setem) 1811 (int) npte = (int) *pte | bit; 1812 else 1813 (int) npte = (int) *pte & ~bit; 1814 *pte = npte; 1815 } 1816 } 1817 splx(s); 1818 tlbflush(); 1819} 1820 1821/* 1822 * pmap_page_protect: 1823 * 1824 * Lower the permission for all mappings to a given page. 1825 */ 1826void 1827pmap_page_protect(phys, prot) 1828 vm_offset_t phys; 1829 vm_prot_t prot; 1830{ 1831 if ((prot & VM_PROT_WRITE) == 0) { 1832 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) 1833 pmap_changebit(phys, PG_RW, FALSE); 1834 else 1835 pmap_remove_all(phys); 1836 } 1837} 1838 1839/* 1840 * Clear the modify bits on the specified physical page. 1841 */ 1842void 1843pmap_clear_modify(pa) 1844 vm_offset_t pa; 1845{ 1846 pmap_changebit(pa, PG_M, FALSE); 1847} 1848 1849/* 1850 * pmap_clear_reference: 1851 * 1852 * Clear the reference bit on the specified physical page. 1853 */ 1854void 1855pmap_clear_reference(pa) 1856 vm_offset_t pa; 1857{ 1858 pmap_changebit(pa, PG_U, FALSE); 1859} 1860 1861/* 1862 * pmap_is_referenced: 1863 * 1864 * Return whether or not the specified physical page is referenced 1865 * by any physical maps. 1866 */ 1867 1868boolean_t 1869pmap_is_referenced(pa) 1870 vm_offset_t pa; 1871{ 1872 return(pmap_testbit(pa, PG_U)); 1873} 1874 1875/* 1876 * pmap_is_modified: 1877 * 1878 * Return whether or not the specified physical page is modified 1879 * by any physical maps. 1880 */ 1881 1882boolean_t 1883pmap_is_modified(pa) 1884 vm_offset_t pa; 1885{ 1886 return(pmap_testbit(pa, PG_M)); 1887} 1888 1889/* 1890 * Routine: pmap_copy_on_write 1891 * Function: 1892 * Remove write privileges from all 1893 * physical maps for this physical page. 1894 */ 1895void 1896pmap_copy_on_write(pa) 1897 vm_offset_t pa; 1898{ 1899 pmap_changebit(pa, PG_RW, FALSE); 1900} 1901 1902 1903vm_offset_t 1904pmap_phys_address(ppn) 1905 int ppn; 1906{ 1907 return(i386_ptob(ppn)); 1908} 1909 1910/* 1911 * Miscellaneous support routines follow 1912 */ 1913 1914void 1915i386_protection_init() 1916{ 1917 register int *kp, prot; 1918 1919 kp = protection_codes; 1920 for (prot = 0; prot < 8; prot++) { 1921 switch (prot) { 1922 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1923 /* 1924 * Read access is also 0. There isn't any execute 1925 * bit, so just make it readable. 1926 */ 1927 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1928 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1929 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1930 *kp++ = 0; 1931 break; 1932 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1933 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1934 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1935 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1936 *kp++ = PG_RW; 1937 break; 1938 } 1939 } 1940} 1941 1942#ifdef DEBUG 1943/* print address space of pmap*/ 1944void 1945pads(pm) 1946 pmap_t pm; 1947{ 1948 unsigned va, i, j; 1949 pt_entry_t *ptep; 1950 1951 if (pm == kernel_pmap) return; 1952 for (i = 0; i < 1024; i++) 1953 if (pm->pm_pdir[i]) 1954 for (j = 0; j < 1024 ; j++) { 1955 va = (i<<PD_SHIFT)+(j<<PG_SHIFT); 1956 if (pm == kernel_pmap && va < KERNBASE) 1957 continue; 1958 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 1959 continue; 1960 ptep = pmap_pte(pm, va); 1961 if (pmap_pte_v(ptep)) 1962 printf("%x:%x ", va, *(int *)ptep); 1963 } ; 1964 1965} 1966 1967void 1968pmap_pvdump(pa) 1969 vm_offset_t pa; 1970{ 1971 register pv_entry_t pv; 1972 1973 printf("pa %x", pa); 1974 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 1975#ifdef used_to_be 1976 printf(" -> pmap %x, va %x, flags %x", 1977 pv->pv_pmap, pv->pv_va, pv->pv_flags); 1978#endif 1979 printf(" -> pmap %x, va %x", 1980 pv->pv_pmap, pv->pv_va); 1981 pads(pv->pv_pmap); 1982 } 1983 printf(" "); 1984} 1985#endif 1986 1987