pmap.c revision 12417
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $Id: pmap.c,v 1.64 1995/10/23 02:31:29 davidg Exp $ 43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. 51 * 52 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 53 */ 54/* 55 * Major modifications by John S. Dyson primarily to support 56 * pageable page tables, eliminating pmap_attributes, 57 * discontiguous memory pages, and using more efficient string 58 * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, 59 * general clean-up and efficiency mods. 60 */ 61 62/* 63 * Manages physical address maps. 64 * 65 * In addition to hardware address maps, this 66 * module is called upon to provide software-use-only 67 * maps which may or may not be stored in the same 68 * form as hardware maps. These pseudo-maps are 69 * used to store intermediate results from copy 70 * operations to and from address spaces. 71 * 72 * Since the information managed by this module is 73 * also stored by the logical address mapping module, 74 * this module may throw away valid virtual-to-physical 75 * mappings at almost any time. However, invalidations 76 * of virtual-to-physical mappings must be done as 77 * requested. 78 * 79 * In order to cope with hardware architectures which 80 * make virtual-to-physical map invalidates expensive, 81 * this module may delay invalidate or reduced protection 82 * operations until such time as they are actually 83 * necessary. This module is given full information as 84 * to which processors are currently using which maps, 85 * and to when physical maps must be made correct. 86 */ 87 88#include <sys/param.h> 89#include <sys/systm.h> 90#include <sys/proc.h> 91#include <sys/malloc.h> 92#include <sys/user.h> 93#include <sys/msgbuf.h> 94 95#include <vm/vm.h> 96#include <vm/vm_kern.h> 97#include <vm/vm_page.h> 98 99#include <machine/cputypes.h> 100#include <machine/md_var.h> 101 102#include <i386/isa/isa.h> 103 104/* 105 * Get PDEs and PTEs for user/kernel address space 106 */ 107#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 108#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 109 110#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 111 112#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 113#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 114#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 115#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) 116#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 117 118#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 119#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 120 121/* 122 * Given a map and a machine independent protection code, 123 * convert to a vax protection code. 124 */ 125#define pte_prot(m, p) (protection_codes[p]) 126int protection_codes[8]; 127 128struct pmap kernel_pmap_store; 129pmap_t kernel_pmap; 130 131vm_offset_t avail_start; /* PA of first available physical page */ 132vm_offset_t avail_end; /* PA of last available physical page */ 133vm_size_t mem_size; /* memory size in bytes */ 134vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 135vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 136boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 137vm_offset_t vm_first_phys, vm_last_phys; 138 139static inline int pmap_is_managed(); 140static void i386_protection_init(); 141static void pmap_alloc_pv_entry(); 142static inline pv_entry_t get_pv_entry(); 143int nkpt; 144 145extern vm_offset_t clean_sva, clean_eva; 146extern int cpu_class; 147 148/* 149 * All those kernel PT submaps that BSD is so fond of 150 */ 151pt_entry_t *CMAP1, *CMAP2, *ptmmap; 152pv_entry_t pv_table; 153caddr_t CADDR1, CADDR2, ptvmmap; 154pt_entry_t *msgbufmap; 155struct msgbuf *msgbufp; 156 157 158void 159init_pv_entries(int); 160 161/* 162 * Routine: pmap_pte 163 * Function: 164 * Extract the page table entry associated 165 * with the given map/virtual_address pair. 166 * [ what about induced faults -wfj] 167 */ 168 169inline pt_entry_t * const 170pmap_pte(pmap, va) 171 register pmap_t pmap; 172 vm_offset_t va; 173{ 174 175 if (pmap && *pmap_pde(pmap, va)) { 176 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 177 178 /* are we current address space or kernel? */ 179 if ((pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME))) 180 return ((pt_entry_t *) vtopte(va)); 181 /* otherwise, we are alternate address space */ 182 else { 183 if (frame != ((int) APTDpde & PG_FRAME)) { 184 APTDpde = pmap->pm_pdir[PTDPTDI]; 185 pmap_update(); 186 } 187 return ((pt_entry_t *) avtopte(va)); 188 } 189 } 190 return (0); 191} 192 193/* 194 * Routine: pmap_extract 195 * Function: 196 * Extract the physical page address associated 197 * with the given map/virtual_address pair. 198 */ 199 200vm_offset_t 201pmap_extract(pmap, va) 202 register pmap_t pmap; 203 vm_offset_t va; 204{ 205 vm_offset_t pa; 206 207 if (pmap && *pmap_pde(pmap, va)) { 208 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 209 210 /* are we current address space or kernel? */ 211 if ((pmap == kernel_pmap) 212 || (frame == ((int) PTDpde & PG_FRAME))) { 213 pa = *(int *) vtopte(va); 214 /* otherwise, we are alternate address space */ 215 } else { 216 if (frame != ((int) APTDpde & PG_FRAME)) { 217 APTDpde = pmap->pm_pdir[PTDPTDI]; 218 pmap_update(); 219 } 220 pa = *(int *) avtopte(va); 221 } 222 return ((pa & PG_FRAME) | (va & ~PG_FRAME)); 223 } 224 return 0; 225 226} 227 228/* 229 * determine if a page is managed (memory vs. device) 230 */ 231static inline int 232pmap_is_managed(pa) 233 vm_offset_t pa; 234{ 235 int i; 236 237 if (!pmap_initialized) 238 return 0; 239 240 for (i = 0; phys_avail[i + 1]; i += 2) { 241 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) 242 return 1; 243 } 244 return 0; 245} 246 247/* 248 * find the vm_page_t of a pte (only) given va of pte and pmap 249 */ 250__inline vm_page_t 251pmap_pte_vm_page(pmap, pt) 252 pmap_t pmap; 253 vm_offset_t pt; 254{ 255 vm_page_t m; 256 257 pt = i386_trunc_page(pt); 258 pt = (pt - UPT_MIN_ADDRESS) / NBPG; 259 pt = ((vm_offset_t) pmap->pm_pdir[pt]) & PG_FRAME; 260 m = PHYS_TO_VM_PAGE(pt); 261 return m; 262} 263 264/* 265 * Wire a page table page 266 */ 267__inline void 268pmap_use_pt(pmap, va) 269 pmap_t pmap; 270 vm_offset_t va; 271{ 272 vm_offset_t pt; 273 274 if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 275 return; 276 277 pt = (vm_offset_t) vtopte(va); 278 vm_page_hold(pmap_pte_vm_page(pmap, pt)); 279} 280 281/* 282 * Unwire a page table page 283 */ 284inline void 285pmap_unuse_pt(pmap, va) 286 pmap_t pmap; 287 vm_offset_t va; 288{ 289 vm_offset_t pt; 290 vm_page_t m; 291 292 if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 293 return; 294 295 pt = (vm_offset_t) vtopte(va); 296 m = pmap_pte_vm_page(pmap, pt); 297 vm_page_unhold(m); 298 if (pmap != kernel_pmap && 299 (m->hold_count == 0) && 300 (m->wire_count == 0) && 301 (va < KPT_MIN_ADDRESS)) { 302 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 303 vm_page_free(m); 304 } 305} 306 307/* [ macro again?, should I force kstack into user map here? -wfj ] */ 308void 309pmap_activate(pmap, pcbp) 310 register pmap_t pmap; 311 struct pcb *pcbp; 312{ 313 PMAP_ACTIVATE(pmap, pcbp); 314} 315 316/* 317 * Bootstrap the system enough to run with virtual memory. 318 * 319 * On the i386 this is called after mapping has already been enabled 320 * and just syncs the pmap module with what has already been done. 321 * [We can't call it easily with mapping off since the kernel is not 322 * mapped with PA == VA, hence we would have to relocate every address 323 * from the linked base (virtual) address "KERNBASE" to the actual 324 * (physical) address starting relative to 0] 325 */ 326void 327pmap_bootstrap(firstaddr, loadaddr) 328 vm_offset_t firstaddr; 329 vm_offset_t loadaddr; 330{ 331 vm_offset_t va; 332 pt_entry_t *pte; 333 334 avail_start = firstaddr; 335 336 /* 337 * XXX The calculation of virtual_avail is wrong. It's NKPT*NBPG too 338 * large. It should instead be correctly calculated in locore.s and 339 * not based on 'first' (which is a physical address, not a virtual 340 * address, for the start of unused physical memory). The kernel 341 * page tables are NOT double mapped and thus should not be included 342 * in this calculation. 343 */ 344 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 345 virtual_end = VM_MAX_KERNEL_ADDRESS; 346 347 /* 348 * Initialize protection array. 349 */ 350 i386_protection_init(); 351 352 /* 353 * The kernel's pmap is statically allocated so we don't have to use 354 * pmap_create, which is unlikely to work correctly at this part of 355 * the boot sequence. 356 */ 357 kernel_pmap = &kernel_pmap_store; 358 359 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 360 361 kernel_pmap->pm_count = 1; 362 nkpt = NKPT; 363 364 /* 365 * Reserve some special page table entries/VA space for temporary 366 * mapping of pages. 367 */ 368#define SYSMAP(c, p, v, n) \ 369 v = (c)va; va += ((n)*NBPG); p = pte; pte += (n); 370 371 va = virtual_avail; 372 pte = pmap_pte(kernel_pmap, va); 373 374 /* 375 * CMAP1/CMAP2 are used for zeroing and copying pages. 376 */ 377 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 378 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 379 380 /* 381 * ptmmap is used for reading arbitrary physical pages via /dev/mem. 382 */ 383 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 384 385 /* 386 * msgbufmap is used to map the system message buffer. 387 */ 388 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 389 390 virtual_avail = va; 391 392 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 393 pmap_update(); 394} 395 396/* 397 * Initialize the pmap module. 398 * Called by vm_init, to initialize any structures that the pmap 399 * system needs to map virtual memory. 400 * pmap_init has been enhanced to support in a fairly consistant 401 * way, discontiguous physical memory. 402 */ 403void 404pmap_init(phys_start, phys_end) 405 vm_offset_t phys_start, phys_end; 406{ 407 vm_offset_t addr; 408 vm_size_t npg, s; 409 int i; 410 411 /* 412 * calculate the number of pv_entries needed 413 */ 414 vm_first_phys = phys_avail[0]; 415 for (i = 0; phys_avail[i + 1]; i += 2); 416 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / NBPG; 417 418 /* 419 * Allocate memory for random pmap data structures. Includes the 420 * pv_head_table. 421 */ 422 s = (vm_size_t) (sizeof(struct pv_entry) * npg); 423 s = i386_round_page(s); 424 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 425 pv_table = (pv_entry_t) addr; 426 427 /* 428 * init the pv free list 429 */ 430 init_pv_entries(npg); 431 /* 432 * Now it is safe to enable pv_table recording. 433 */ 434 pmap_initialized = TRUE; 435} 436 437/* 438 * Used to map a range of physical addresses into kernel 439 * virtual address space. 440 * 441 * For now, VM is already on, we only need to map the 442 * specified memory. 443 */ 444vm_offset_t 445pmap_map(virt, start, end, prot) 446 vm_offset_t virt; 447 vm_offset_t start; 448 vm_offset_t end; 449 int prot; 450{ 451 while (start < end) { 452 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 453 virt += PAGE_SIZE; 454 start += PAGE_SIZE; 455 } 456 return (virt); 457} 458 459/* 460 * Create and return a physical map. 461 * 462 * If the size specified for the map 463 * is zero, the map is an actual physical 464 * map, and may be referenced by the 465 * hardware. 466 * 467 * If the size specified is non-zero, 468 * the map will be used in software only, and 469 * is bounded by that size. 470 * 471 */ 472 473pmap_t 474pmap_create(size) 475 vm_size_t size; 476{ 477 register pmap_t pmap; 478 479 /* 480 * Software use map does not need a pmap 481 */ 482 if (size) 483 return (NULL); 484 485 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 486 bzero(pmap, sizeof(*pmap)); 487 pmap_pinit(pmap); 488 return (pmap); 489} 490 491/* 492 * Initialize a preallocated and zeroed pmap structure, 493 * such as one in a vmspace structure. 494 */ 495void 496pmap_pinit(pmap) 497 register struct pmap *pmap; 498{ 499 /* 500 * No need to allocate page table space yet but we do need a valid 501 * page directory table. 502 */ 503 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, PAGE_SIZE); 504 505 /* wire in kernel global address entries */ 506 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 507 508 /* install self-referential address mapping entry */ 509 *(int *) (pmap->pm_pdir + PTDPTDI) = 510 ((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW; 511 512 pmap->pm_count = 1; 513} 514 515/* 516 * grow the number of kernel page table entries, if needed 517 */ 518 519vm_page_t nkpg; 520vm_offset_t kernel_vm_end; 521 522void 523pmap_growkernel(vm_offset_t addr) 524{ 525 struct proc *p; 526 struct pmap *pmap; 527 int s; 528 529 s = splhigh(); 530 if (kernel_vm_end == 0) { 531 kernel_vm_end = KERNBASE; 532 nkpt = 0; 533 while (pdir_pde(PTD, kernel_vm_end)) { 534 kernel_vm_end = (kernel_vm_end + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 535 ++nkpt; 536 } 537 } 538 addr = (addr + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 539 while (kernel_vm_end < addr) { 540 if (pdir_pde(PTD, kernel_vm_end)) { 541 kernel_vm_end = (kernel_vm_end + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 542 continue; 543 } 544 ++nkpt; 545 if (!nkpg) { 546 nkpg = vm_page_alloc(kernel_object, 0, VM_ALLOC_SYSTEM); 547 if (!nkpg) 548 panic("pmap_growkernel: no memory to grow kernel"); 549 vm_page_wire(nkpg); 550 vm_page_remove(nkpg); 551 pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 552 } 553 pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW); 554 nkpg = NULL; 555 556 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 557 if (p->p_vmspace) { 558 pmap = &p->p_vmspace->vm_pmap; 559 *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 560 } 561 } 562 *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 563 kernel_vm_end = (kernel_vm_end + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 564 } 565 splx(s); 566} 567 568/* 569 * Retire the given physical map from service. 570 * Should only be called if the map contains 571 * no valid mappings. 572 */ 573void 574pmap_destroy(pmap) 575 register pmap_t pmap; 576{ 577 int count; 578 579 if (pmap == NULL) 580 return; 581 582 count = --pmap->pm_count; 583 if (count == 0) { 584 pmap_release(pmap); 585 free((caddr_t) pmap, M_VMPMAP); 586 } 587} 588 589/* 590 * Release any resources held by the given physical map. 591 * Called when a pmap initialized by pmap_pinit is being released. 592 * Should only be called if the map contains no valid mappings. 593 */ 594void 595pmap_release(pmap) 596 register struct pmap *pmap; 597{ 598 kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 599} 600 601/* 602 * Add a reference to the specified pmap. 603 */ 604void 605pmap_reference(pmap) 606 pmap_t pmap; 607{ 608 if (pmap != NULL) { 609 pmap->pm_count++; 610 } 611} 612 613#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2) 614 615/* 616 * Data for the pv entry allocation mechanism 617 */ 618int pv_freelistcnt; 619pv_entry_t pv_freelist; 620vm_offset_t pvva; 621int npvvapg; 622 623/* 624 * free the pv_entry back to the free list 625 */ 626inline static void 627free_pv_entry(pv) 628 pv_entry_t pv; 629{ 630 if (!pv) 631 return; 632 ++pv_freelistcnt; 633 pv->pv_next = pv_freelist; 634 pv_freelist = pv; 635} 636 637/* 638 * get a new pv_entry, allocating a block from the system 639 * when needed. 640 * the memory allocation is performed bypassing the malloc code 641 * because of the possibility of allocations at interrupt time. 642 */ 643static inline pv_entry_t 644get_pv_entry() 645{ 646 pv_entry_t tmp; 647 648 /* 649 * get more pv_entry pages if needed 650 */ 651 if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) { 652 pmap_alloc_pv_entry(); 653 } 654 /* 655 * get a pv_entry off of the free list 656 */ 657 --pv_freelistcnt; 658 tmp = pv_freelist; 659 pv_freelist = tmp->pv_next; 660 return tmp; 661} 662 663/* 664 * this *strange* allocation routine *statistically* eliminates the 665 * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure. 666 * also -- this code is MUCH MUCH faster than the malloc equiv... 667 */ 668static void 669pmap_alloc_pv_entry() 670{ 671 /* 672 * do we have any pre-allocated map-pages left? 673 */ 674 if (npvvapg) { 675 vm_page_t m; 676 677 /* 678 * we do this to keep recursion away 679 */ 680 pv_freelistcnt += PV_FREELIST_MIN; 681 /* 682 * allocate a physical page out of the vm system 683 */ 684 m = vm_page_alloc(kernel_object, 685 pvva - vm_map_min(kernel_map), VM_ALLOC_INTERRUPT); 686 if (m) { 687 int newentries; 688 int i; 689 pv_entry_t entry; 690 691 newentries = (NBPG / sizeof(struct pv_entry)); 692 /* 693 * wire the page 694 */ 695 vm_page_wire(m); 696 m->flags &= ~PG_BUSY; 697 /* 698 * let the kernel see it 699 */ 700 pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 701 702 entry = (pv_entry_t) pvva; 703 /* 704 * update the allocation pointers 705 */ 706 pvva += NBPG; 707 --npvvapg; 708 709 /* 710 * free the entries into the free list 711 */ 712 for (i = 0; i < newentries; i++) { 713 free_pv_entry(entry); 714 entry++; 715 } 716 } 717 pv_freelistcnt -= PV_FREELIST_MIN; 718 } 719 if (!pv_freelist) 720 panic("get_pv_entry: cannot get a pv_entry_t"); 721} 722 723 724 725/* 726 * init the pv_entry allocation system 727 */ 728#define PVSPERPAGE 64 729void 730init_pv_entries(npg) 731 int npg; 732{ 733 /* 734 * allocate enough kvm space for PVSPERPAGE entries per page (lots) 735 * kvm space is fairly cheap, be generous!!! (the system can panic if 736 * this is too small.) 737 */ 738 npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) + NBPG - 1) / NBPG; 739 pvva = kmem_alloc_pageable(kernel_map, npvvapg * NBPG); 740 /* 741 * get the first batch of entries 742 */ 743 free_pv_entry(get_pv_entry()); 744} 745 746static pt_entry_t * 747get_pt_entry(pmap) 748 pmap_t pmap; 749{ 750 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 751 752 /* are we current address space or kernel? */ 753 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) { 754 return PTmap; 755 } 756 /* otherwise, we are alternate address space */ 757 if (frame != ((int) APTDpde & PG_FRAME)) { 758 APTDpde = pmap->pm_pdir[PTDPTDI]; 759 pmap_update(); 760 } 761 return APTmap; 762} 763 764/* 765 * If it is the first entry on the list, it is actually 766 * in the header and we must copy the following entry up 767 * to the header. Otherwise we must search the list for 768 * the entry. In either case we free the now unused entry. 769 */ 770void 771pmap_remove_entry(pmap, pv, va) 772 struct pmap *pmap; 773 pv_entry_t pv; 774 vm_offset_t va; 775{ 776 pv_entry_t npv; 777 int s; 778 779 s = splhigh(); 780 if (pmap == pv->pv_pmap && va == pv->pv_va) { 781 npv = pv->pv_next; 782 if (npv) { 783 *pv = *npv; 784 free_pv_entry(npv); 785 } else { 786 pv->pv_pmap = NULL; 787 } 788 } else { 789 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 790 if (pmap == npv->pv_pmap && va == npv->pv_va) { 791 break; 792 } 793 pv = npv; 794 } 795 if (npv) { 796 pv->pv_next = npv->pv_next; 797 free_pv_entry(npv); 798 } 799 } 800 splx(s); 801} 802 803/* 804 * Remove the given range of addresses from the specified map. 805 * 806 * It is assumed that the start and end are properly 807 * rounded to the page size. 808 */ 809void 810pmap_remove(pmap, sva, eva) 811 struct pmap *pmap; 812 register vm_offset_t sva; 813 register vm_offset_t eva; 814{ 815 register pt_entry_t *ptp, *ptq; 816 vm_offset_t pa; 817 register pv_entry_t pv; 818 vm_offset_t va; 819 pt_entry_t oldpte; 820 821 if (pmap == NULL) 822 return; 823 824 ptp = get_pt_entry(pmap); 825 826 /* 827 * special handling of removing one page. a very 828 * common operation and easy to short circuit some 829 * code. 830 */ 831 if ((sva + NBPG) == eva) { 832 833 if (*pmap_pde(pmap, sva) == 0) 834 return; 835 836 ptq = ptp + i386_btop(sva); 837 838 if (!*ptq) 839 return; 840 /* 841 * Update statistics 842 */ 843 if (pmap_pte_w(ptq)) 844 pmap->pm_stats.wired_count--; 845 pmap->pm_stats.resident_count--; 846 847 pa = pmap_pte_pa(ptq); 848 oldpte = *ptq; 849 *ptq = 0; 850 851 if (pmap_is_managed(pa)) { 852 if ((int) oldpte & PG_M) { 853 if (sva < USRSTACK + (UPAGES * NBPG) || 854 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 855 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 856 } 857 } 858 pv = pa_to_pvh(pa); 859 pmap_remove_entry(pmap, pv, sva); 860 } 861 pmap_unuse_pt(pmap, sva); 862 pmap_update(); 863 return; 864 } 865 sva = i386_btop(sva); 866 eva = i386_btop(eva); 867 868 while (sva < eva) { 869 /* 870 * Weed out invalid mappings. Note: we assume that the page 871 * directory table is always allocated, and in kernel virtual. 872 */ 873 874 if (*pmap_pde(pmap, i386_ptob(sva)) == 0) { 875 /* We can race ahead here, straight to next pde.. */ 876 sva = ((sva + NPTEPG) & ~(NPTEPG - 1)); 877 continue; 878 } 879 ptq = ptp + sva; 880 881 /* 882 * search for page table entries, use string operations that 883 * are much faster than explicitly scanning when page tables 884 * are not fully populated. 885 */ 886 if (*ptq == 0) { 887 vm_offset_t pdnxt = ((sva + NPTEPG) & ~(NPTEPG - 1)); 888 vm_offset_t nscan = pdnxt - sva; 889 int found = 0; 890 891 if ((nscan + sva) > eva) 892 nscan = eva - sva; 893 894 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 895 "=D"(ptq), "=a"(found) : "c"(nscan), "0"(ptq) : "cx"); 896 897 if (!found) { 898 sva = pdnxt; 899 continue; 900 } 901 ptq -= 1; 902 903 sva = ptq - ptp; 904 } 905 /* 906 * Update statistics 907 */ 908 oldpte = *ptq; 909 if (((int) oldpte) & PG_W) 910 pmap->pm_stats.wired_count--; 911 pmap->pm_stats.resident_count--; 912 913 /* 914 * Invalidate the PTEs. XXX: should cluster them up and 915 * invalidate as many as possible at once. 916 */ 917 *ptq = 0; 918 919 va = i386_ptob(sva); 920 921 /* 922 * Remove from the PV table (raise IPL since we may be called 923 * at interrupt time). 924 */ 925 pa = ((int) oldpte) & PG_FRAME; 926 if (!pmap_is_managed(pa)) { 927 pmap_unuse_pt(pmap, va); 928 ++sva; 929 continue; 930 } 931 if ((int) oldpte & PG_M) { 932 if (sva < USRSTACK + (UPAGES * NBPG) || 933 (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) { 934 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 935 } 936 } 937 pv = pa_to_pvh(pa); 938 pmap_remove_entry(pmap, pv, va); 939 pmap_unuse_pt(pmap, va); 940 ++sva; 941 } 942 pmap_update(); 943} 944 945/* 946 * Routine: pmap_remove_all 947 * Function: 948 * Removes this physical page from 949 * all physical maps in which it resides. 950 * Reflects back modify bits to the pager. 951 * 952 * Notes: 953 * Original versions of this routine were very 954 * inefficient because they iteratively called 955 * pmap_remove (slow...) 956 */ 957void 958pmap_remove_all(pa) 959 vm_offset_t pa; 960{ 961 register pv_entry_t pv, npv; 962 register pt_entry_t *pte, *ptp; 963 vm_offset_t va; 964 struct pmap *pmap; 965 vm_page_t m; 966 int s; 967 int anyvalid = 0; 968 969 /* 970 * Not one of ours 971 */ 972 /* 973 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 974 * pages! 975 */ 976 if (!pmap_is_managed(pa)) 977 return; 978 979 pa = i386_trunc_page(pa); 980 pv = pa_to_pvh(pa); 981 m = PHYS_TO_VM_PAGE(pa); 982 983 s = splhigh(); 984 while (pv->pv_pmap != NULL) { 985 pmap = pv->pv_pmap; 986 ptp = get_pt_entry(pmap); 987 va = pv->pv_va; 988 pte = ptp + i386_btop(va); 989 if (pmap_pte_w(pte)) 990 pmap->pm_stats.wired_count--; 991 if (*pte) { 992 pmap->pm_stats.resident_count--; 993 anyvalid++; 994 995 /* 996 * Update the vm_page_t clean and reference bits. 997 */ 998 if ((int) *pte & PG_M) { 999 if (va < USRSTACK + (UPAGES * NBPG) || 1000 (va >= KERNBASE && (va < clean_sva || va >= clean_eva))) { 1001 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 1002 } 1003 } 1004 *pte = 0; 1005 pmap_unuse_pt(pmap, va); 1006 } 1007 npv = pv->pv_next; 1008 if (npv) { 1009 *pv = *npv; 1010 free_pv_entry(npv); 1011 } else { 1012 pv->pv_pmap = NULL; 1013 } 1014 } 1015 splx(s); 1016 if (anyvalid) 1017 pmap_update(); 1018} 1019 1020 1021/* 1022 * Set the physical protection on the 1023 * specified range of this map as requested. 1024 */ 1025void 1026pmap_protect(pmap, sva, eva, prot) 1027 register pmap_t pmap; 1028 vm_offset_t sva, eva; 1029 vm_prot_t prot; 1030{ 1031 register pt_entry_t *pte; 1032 register vm_offset_t va; 1033 int i386prot; 1034 register pt_entry_t *ptp; 1035 int evap = i386_btop(eva); 1036 int anyvalid = 0;; 1037 1038 if (pmap == NULL) 1039 return; 1040 1041 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1042 pmap_remove(pmap, sva, eva); 1043 return; 1044 } 1045 if (prot & VM_PROT_WRITE) 1046 return; 1047 1048 ptp = get_pt_entry(pmap); 1049 1050 va = sva; 1051 while (va < eva) { 1052 int found = 0; 1053 int svap; 1054 vm_offset_t nscan; 1055 1056 /* 1057 * Page table page is not allocated. Skip it, we don't want to 1058 * force allocation of unnecessary PTE pages just to set the 1059 * protection. 1060 */ 1061 if (!*pmap_pde(pmap, va)) { 1062 /* XXX: avoid address wrap around */ 1063 nextpde: 1064 if (va >= i386_trunc_pdr((vm_offset_t) - 1)) 1065 break; 1066 va = i386_round_pdr(va + PAGE_SIZE); 1067 continue; 1068 } 1069 pte = ptp + i386_btop(va); 1070 1071 if (*pte == 0) { 1072 /* 1073 * scan for a non-empty pte 1074 */ 1075 svap = pte - ptp; 1076 nscan = ((svap + NPTEPG) & ~(NPTEPG - 1)) - svap; 1077 1078 if (nscan + svap > evap) 1079 nscan = evap - svap; 1080 1081 found = 0; 1082 if (nscan) 1083 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 1084 "=D"(pte), "=a"(found) : "c"(nscan), "0"(pte) : "cx"); 1085 1086 if (!found) 1087 goto nextpde; 1088 1089 pte -= 1; 1090 svap = pte - ptp; 1091 1092 va = i386_ptob(svap); 1093 } 1094 anyvalid++; 1095 1096 i386prot = pte_prot(pmap, prot); 1097 if (va < UPT_MAX_ADDRESS) { 1098 i386prot |= PG_u; 1099 if (va >= UPT_MIN_ADDRESS) 1100 i386prot |= PG_RW; 1101 } 1102 pmap_pte_set_prot(pte, i386prot); 1103 va += PAGE_SIZE; 1104 } 1105 if (anyvalid) 1106 pmap_update(); 1107} 1108 1109/* 1110 * Insert the given physical page (p) at 1111 * the specified virtual address (v) in the 1112 * target physical map with the protection requested. 1113 * 1114 * If specified, the page will be wired down, meaning 1115 * that the related pte can not be reclaimed. 1116 * 1117 * NB: This is the only routine which MAY NOT lazy-evaluate 1118 * or lose information. That is, this routine must actually 1119 * insert this page into the given map NOW. 1120 */ 1121void 1122pmap_enter(pmap, va, pa, prot, wired) 1123 register pmap_t pmap; 1124 vm_offset_t va; 1125 register vm_offset_t pa; 1126 vm_prot_t prot; 1127 boolean_t wired; 1128{ 1129 register pt_entry_t *pte; 1130 register pt_entry_t npte; 1131 vm_offset_t opa; 1132 int ptevalid = 0; 1133 1134 if (pmap == NULL) 1135 return; 1136 1137 va = i386_trunc_page(va); 1138 pa = i386_trunc_page(pa); 1139 if (va > VM_MAX_KERNEL_ADDRESS) 1140 panic("pmap_enter: toobig"); 1141 1142 /* 1143 * Page Directory table entry not valid, we need a new PT page 1144 */ 1145 if (*pmap_pde(pmap, va) == 0) { 1146 printf("kernel page directory invalid pdir=%p, va=0x%lx\n", 1147 pmap->pm_pdir[PTDPTDI], va); 1148 panic("invalid kernel page directory"); 1149 } 1150 pte = pmap_pte(pmap, va); 1151 opa = pmap_pte_pa(pte); 1152 1153 /* 1154 * Mapping has not changed, must be protection or wiring change. 1155 */ 1156 if (opa == pa) { 1157 /* 1158 * Wiring change, just update stats. We don't worry about 1159 * wiring PT pages as they remain resident as long as there 1160 * are valid mappings in them. Hence, if a user page is wired, 1161 * the PT page will be also. 1162 */ 1163 if (wired && !pmap_pte_w(pte)) 1164 pmap->pm_stats.wired_count++; 1165 else if (!wired && pmap_pte_w(pte)) 1166 pmap->pm_stats.wired_count--; 1167 1168 goto validate; 1169 } 1170 /* 1171 * Mapping has changed, invalidate old range and fall through to 1172 * handle validating new mapping. 1173 */ 1174 if (opa) { 1175 pmap_remove(pmap, va, va + PAGE_SIZE); 1176 } 1177 /* 1178 * Enter on the PV list if part of our managed memory Note that we 1179 * raise IPL while manipulating pv_table since pmap_enter can be 1180 * called at interrupt time. 1181 */ 1182 if (pmap_is_managed(pa)) { 1183 register pv_entry_t pv, npv; 1184 int s; 1185 1186 pv = pa_to_pvh(pa); 1187 s = splhigh(); 1188 /* 1189 * No entries yet, use header as the first entry 1190 */ 1191 if (pv->pv_pmap == NULL) { 1192 pv->pv_va = va; 1193 pv->pv_pmap = pmap; 1194 pv->pv_next = NULL; 1195 } 1196 /* 1197 * There is at least one other VA mapping this page. Place 1198 * this entry after the header. 1199 */ 1200 else { 1201 npv = get_pv_entry(); 1202 npv->pv_va = va; 1203 npv->pv_pmap = pmap; 1204 npv->pv_next = pv->pv_next; 1205 pv->pv_next = npv; 1206 } 1207 splx(s); 1208 } 1209 1210 /* 1211 * Increment counters 1212 */ 1213 pmap->pm_stats.resident_count++; 1214 if (wired) 1215 pmap->pm_stats.wired_count++; 1216 1217validate: 1218 /* 1219 * Now validate mapping with desired protection/wiring. 1220 */ 1221 npte = (pt_entry_t) ((int) (pa | pte_prot(pmap, prot) | PG_V)); 1222 1223 /* 1224 * When forking (copy-on-write, etc): A process will turn off write 1225 * permissions for any of its writable pages. If the data (object) is 1226 * only referred to by one process, the processes map is modified 1227 * directly as opposed to using the object manipulation routine. When 1228 * using pmap_protect, the modified bits are not kept in the vm_page_t 1229 * data structure. Therefore, when using pmap_enter in vm_fault to 1230 * bring back writability of a page, there has been no memory of the 1231 * modified or referenced bits except at the pte level. this clause 1232 * supports the carryover of the modified and used (referenced) bits. 1233 */ 1234 if (pa == opa) 1235 (int) npte |= (int) *pte & (PG_M | PG_U); 1236 1237 if (wired) 1238 (int) npte |= PG_W; 1239 if (va < UPT_MIN_ADDRESS) 1240 (int) npte |= PG_u; 1241 else if (va < UPT_MAX_ADDRESS) 1242 (int) npte |= PG_u | PG_RW; 1243 1244 if (*pte != npte) { 1245 if (*pte) 1246 ptevalid++; 1247 *pte = npte; 1248 } 1249 if (ptevalid) { 1250 pmap_update(); 1251 } else { 1252 pmap_use_pt(pmap, va); 1253 } 1254} 1255 1256/* 1257 * Add a list of wired pages to the kva 1258 * this routine is only used for temporary 1259 * kernel mappings that do not need to have 1260 * page modification or references recorded. 1261 * Note that old mappings are simply written 1262 * over. The page *must* be wired. 1263 */ 1264void 1265pmap_qenter(va, m, count) 1266 vm_offset_t va; 1267 vm_page_t *m; 1268 int count; 1269{ 1270 int i; 1271 int anyvalid = 0; 1272 register pt_entry_t *pte; 1273 1274 for (i = 0; i < count; i++) { 1275 pte = vtopte(va + i * NBPG); 1276 if (*pte) 1277 anyvalid++; 1278 *pte = (pt_entry_t) ((int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V)); 1279 } 1280 if (anyvalid) 1281 pmap_update(); 1282} 1283/* 1284 * this routine jerks page mappings from the 1285 * kernel -- it is meant only for temporary mappings. 1286 */ 1287void 1288pmap_qremove(va, count) 1289 vm_offset_t va; 1290 int count; 1291{ 1292 int i; 1293 register pt_entry_t *pte; 1294 1295 for (i = 0; i < count; i++) { 1296 pte = vtopte(va + i * NBPG); 1297 *pte = 0; 1298 } 1299 pmap_update(); 1300} 1301 1302/* 1303 * add a wired page to the kva 1304 * note that in order for the mapping to take effect -- you 1305 * should do a pmap_update after doing the pmap_kenter... 1306 */ 1307void 1308pmap_kenter(va, pa) 1309 vm_offset_t va; 1310 register vm_offset_t pa; 1311{ 1312 register pt_entry_t *pte; 1313 int wasvalid = 0; 1314 1315 pte = vtopte(va); 1316 1317 if (*pte) 1318 wasvalid++; 1319 1320 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V)); 1321 1322 if (wasvalid) 1323 pmap_update(); 1324} 1325 1326/* 1327 * remove a page from the kernel pagetables 1328 */ 1329void 1330pmap_kremove(va) 1331 vm_offset_t va; 1332{ 1333 register pt_entry_t *pte; 1334 1335 pte = vtopte(va); 1336 1337 *pte = (pt_entry_t) 0; 1338 pmap_update(); 1339} 1340 1341/* 1342 * this code makes some *MAJOR* assumptions: 1343 * 1. Current pmap & pmap exists. 1344 * 2. Not wired. 1345 * 3. Read access. 1346 * 4. No page table pages. 1347 * 5. Tlbflush is deferred to calling procedure. 1348 * 6. Page IS managed. 1349 * but is *MUCH* faster than pmap_enter... 1350 */ 1351 1352static inline void 1353pmap_enter_quick(pmap, va, pa) 1354 register pmap_t pmap; 1355 vm_offset_t va; 1356 register vm_offset_t pa; 1357{ 1358 register pt_entry_t *pte; 1359 register pv_entry_t pv, npv; 1360 int s; 1361 1362 /* 1363 * Enter on the PV list if part of our managed memory Note that we 1364 * raise IPL while manipulating pv_table since pmap_enter can be 1365 * called at interrupt time. 1366 */ 1367 1368 pte = vtopte(va); 1369 1370 /* a fault on the page table might occur here */ 1371 if (*pte) { 1372 pmap_remove(pmap, va, va + PAGE_SIZE); 1373 } 1374 pv = pa_to_pvh(pa); 1375 s = splhigh(); 1376 /* 1377 * No entries yet, use header as the first entry 1378 */ 1379 if (pv->pv_pmap == NULL) { 1380 pv->pv_pmap = pmap; 1381 pv->pv_va = va; 1382 pv->pv_next = NULL; 1383 } 1384 /* 1385 * There is at least one other VA mapping this page. Place this entry 1386 * after the header. 1387 */ 1388 else { 1389 npv = get_pv_entry(); 1390 npv->pv_va = va; 1391 npv->pv_pmap = pmap; 1392 npv->pv_next = pv->pv_next; 1393 pv->pv_next = npv; 1394 } 1395 splx(s); 1396 1397 /* 1398 * Increment counters 1399 */ 1400 pmap->pm_stats.resident_count++; 1401 1402 /* 1403 * Now validate mapping with desired protection/wiring. 1404 */ 1405 *pte = (pt_entry_t) ((int) (pa | PG_V | PG_u)); 1406 1407 pmap_use_pt(pmap, va); 1408 1409 return; 1410} 1411 1412#define MAX_INIT_PT (1024*2048) 1413/* 1414 * pmap_object_init_pt preloads the ptes for a given object 1415 * into the specified pmap. This eliminates the blast of soft 1416 * faults on process startup and immediately after an mmap. 1417 */ 1418void 1419pmap_object_init_pt(pmap, addr, object, offset, size) 1420 pmap_t pmap; 1421 vm_offset_t addr; 1422 vm_object_t object; 1423 vm_offset_t offset; 1424 vm_offset_t size; 1425{ 1426 vm_offset_t tmpoff; 1427 vm_page_t p; 1428 int objbytes; 1429 1430 if (!pmap || ((size > MAX_INIT_PT) && 1431 (object->resident_page_count > (MAX_INIT_PT / NBPG)))) { 1432 return; 1433 } 1434 1435 /* 1436 * if we are processing a major portion of the object, then scan the 1437 * entire thing. 1438 */ 1439 if (size > (object->size >> 2)) { 1440 objbytes = size; 1441 1442 for (p = object->memq.tqh_first; 1443 ((objbytes > 0) && (p != NULL)); 1444 p = p->listq.tqe_next) { 1445 1446 tmpoff = p->offset; 1447 if (tmpoff < offset) { 1448 continue; 1449 } 1450 tmpoff -= offset; 1451 if (tmpoff >= size) { 1452 continue; 1453 } 1454 if (((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) && 1455 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1456 (p->bmapped == 0) && 1457 (p->busy == 0) && 1458 (p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) { 1459 vm_page_hold(p); 1460 p->flags |= PG_MAPPED; 1461 pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p)); 1462 vm_page_unhold(p); 1463 } 1464 objbytes -= NBPG; 1465 } 1466 } else { 1467 /* 1468 * else lookup the pages one-by-one. 1469 */ 1470 for (tmpoff = 0; tmpoff < size; tmpoff += NBPG) { 1471 p = vm_page_lookup(object, tmpoff + offset); 1472 if (p && ((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) && 1473 (p->bmapped == 0) && (p->busy == 0) && 1474 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1475 (p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) { 1476 vm_page_hold(p); 1477 p->flags |= PG_MAPPED; 1478 pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p)); 1479 vm_page_unhold(p); 1480 } 1481 } 1482 } 1483} 1484 1485/* 1486 * Routine: pmap_change_wiring 1487 * Function: Change the wiring attribute for a map/virtual-address 1488 * pair. 1489 * In/out conditions: 1490 * The mapping must already exist in the pmap. 1491 */ 1492void 1493pmap_change_wiring(pmap, va, wired) 1494 register pmap_t pmap; 1495 vm_offset_t va; 1496 boolean_t wired; 1497{ 1498 register pt_entry_t *pte; 1499 1500 if (pmap == NULL) 1501 return; 1502 1503 pte = pmap_pte(pmap, va); 1504 1505 if (wired && !pmap_pte_w(pte)) 1506 pmap->pm_stats.wired_count++; 1507 else if (!wired && pmap_pte_w(pte)) 1508 pmap->pm_stats.wired_count--; 1509 1510 /* 1511 * Wiring is not a hardware characteristic so there is no need to 1512 * invalidate TLB. 1513 */ 1514 pmap_pte_set_w(pte, wired); 1515 /* 1516 * When unwiring, set the modified bit in the pte -- could have been 1517 * changed by the kernel 1518 */ 1519 if (!wired) 1520 (int) *pte |= PG_M; 1521} 1522 1523 1524 1525/* 1526 * Copy the range specified by src_addr/len 1527 * from the source map to the range dst_addr/len 1528 * in the destination map. 1529 * 1530 * This routine is only advisory and need not do anything. 1531 */ 1532void 1533pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1534 pmap_t dst_pmap, src_pmap; 1535 vm_offset_t dst_addr; 1536 vm_size_t len; 1537 vm_offset_t src_addr; 1538{ 1539} 1540 1541/* 1542 * Routine: pmap_kernel 1543 * Function: 1544 * Returns the physical map handle for the kernel. 1545 */ 1546pmap_t 1547pmap_kernel() 1548{ 1549 return (kernel_pmap); 1550} 1551 1552/* 1553 * pmap_zero_page zeros the specified (machine independent) 1554 * page by mapping the page into virtual memory and using 1555 * bzero to clear its contents, one machine dependent page 1556 * at a time. 1557 */ 1558void 1559pmap_zero_page(phys) 1560 vm_offset_t phys; 1561{ 1562 if (*(int *) CMAP2) 1563 panic("pmap_zero_page: CMAP busy"); 1564 1565 *(int *) CMAP2 = PG_V | PG_KW | i386_trunc_page(phys); 1566 bzero(CADDR2, NBPG); 1567 1568 *(int *) CMAP2 = 0; 1569 pmap_update(); 1570} 1571 1572/* 1573 * pmap_copy_page copies the specified (machine independent) 1574 * page by mapping the page into virtual memory and using 1575 * bcopy to copy the page, one machine dependent page at a 1576 * time. 1577 */ 1578void 1579pmap_copy_page(src, dst) 1580 vm_offset_t src; 1581 vm_offset_t dst; 1582{ 1583 if (*(int *) CMAP1 || *(int *) CMAP2) 1584 panic("pmap_copy_page: CMAP busy"); 1585 1586 *(int *) CMAP1 = PG_V | PG_KW | i386_trunc_page(src); 1587 *(int *) CMAP2 = PG_V | PG_KW | i386_trunc_page(dst); 1588 1589#if __GNUC__ > 1 1590 memcpy(CADDR2, CADDR1, NBPG); 1591#else 1592 bcopy(CADDR1, CADDR2, NBPG); 1593#endif 1594 *(int *) CMAP1 = 0; 1595 *(int *) CMAP2 = 0; 1596 pmap_update(); 1597} 1598 1599 1600/* 1601 * Routine: pmap_pageable 1602 * Function: 1603 * Make the specified pages (by pmap, offset) 1604 * pageable (or not) as requested. 1605 * 1606 * A page which is not pageable may not take 1607 * a fault; therefore, its page table entry 1608 * must remain valid for the duration. 1609 * 1610 * This routine is merely advisory; pmap_enter 1611 * will specify that these pages are to be wired 1612 * down (or not) as appropriate. 1613 */ 1614void 1615pmap_pageable(pmap, sva, eva, pageable) 1616 pmap_t pmap; 1617 vm_offset_t sva, eva; 1618 boolean_t pageable; 1619{ 1620} 1621 1622/* 1623 * this routine returns true if a physical page resides 1624 * in the given pmap. 1625 */ 1626boolean_t 1627pmap_page_exists(pmap, pa) 1628 pmap_t pmap; 1629 vm_offset_t pa; 1630{ 1631 register pv_entry_t pv; 1632 int s; 1633 1634 if (!pmap_is_managed(pa)) 1635 return FALSE; 1636 1637 pv = pa_to_pvh(pa); 1638 s = splhigh(); 1639 1640 /* 1641 * Not found, check current mappings returning immediately if found. 1642 */ 1643 if (pv->pv_pmap != NULL) { 1644 for (; pv; pv = pv->pv_next) { 1645 if (pv->pv_pmap == pmap) { 1646 splx(s); 1647 return TRUE; 1648 } 1649 } 1650 } 1651 splx(s); 1652 return (FALSE); 1653} 1654 1655/* 1656 * pmap_testbit tests bits in pte's 1657 * note that the testbit/changebit routines are inline, 1658 * and a lot of things compile-time evaluate. 1659 */ 1660static __inline boolean_t 1661pmap_testbit(pa, bit) 1662 register vm_offset_t pa; 1663 int bit; 1664{ 1665 register pv_entry_t pv; 1666 pt_entry_t *pte; 1667 int s; 1668 1669 if (!pmap_is_managed(pa)) 1670 return FALSE; 1671 1672 pv = pa_to_pvh(pa); 1673 s = splhigh(); 1674 1675 /* 1676 * Not found, check current mappings returning immediately if found. 1677 */ 1678 if (pv->pv_pmap != NULL) { 1679 for (; pv; pv = pv->pv_next) { 1680 /* 1681 * if the bit being tested is the modified bit, then 1682 * mark UPAGES as always modified, and ptes as never 1683 * modified. 1684 */ 1685 if (bit & PG_U) { 1686 if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) { 1687 continue; 1688 } 1689 } 1690 if (bit & PG_M) { 1691 if (pv->pv_va >= USRSTACK) { 1692 if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) { 1693 continue; 1694 } 1695 if (pv->pv_va < USRSTACK + (UPAGES * NBPG)) { 1696 splx(s); 1697 return TRUE; 1698 } else if (pv->pv_va < KERNBASE) { 1699 splx(s); 1700 return FALSE; 1701 } 1702 } 1703 } 1704 if (!pv->pv_pmap) { 1705 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 1706 continue; 1707 } 1708 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1709 if ((int) *pte & bit) { 1710 splx(s); 1711 return TRUE; 1712 } 1713 } 1714 } 1715 splx(s); 1716 return (FALSE); 1717} 1718 1719/* 1720 * this routine is used to modify bits in ptes 1721 */ 1722static __inline void 1723pmap_changebit(pa, bit, setem) 1724 vm_offset_t pa; 1725 int bit; 1726 boolean_t setem; 1727{ 1728 register pv_entry_t pv; 1729 register pt_entry_t *pte, npte; 1730 vm_offset_t va; 1731 int s; 1732 1733 if (!pmap_is_managed(pa)) 1734 return; 1735 1736 pv = pa_to_pvh(pa); 1737 s = splhigh(); 1738 1739 /* 1740 * Loop over all current mappings setting/clearing as appropos If 1741 * setting RO do we need to clear the VAC? 1742 */ 1743 if (pv->pv_pmap != NULL) { 1744 for (; pv; pv = pv->pv_next) { 1745 va = pv->pv_va; 1746 1747 /* 1748 * don't write protect pager mappings 1749 */ 1750 if (!setem && (bit == PG_RW)) { 1751 if (va >= clean_sva && va < clean_eva) 1752 continue; 1753 } 1754 if (!pv->pv_pmap) { 1755 printf("Null pmap (cb) at va: 0x%lx\n", va); 1756 continue; 1757 } 1758 pte = pmap_pte(pv->pv_pmap, va); 1759 if (setem) 1760 (int) npte = (int) *pte | bit; 1761 else 1762 (int) npte = (int) *pte & ~bit; 1763 *pte = npte; 1764 } 1765 } 1766 splx(s); 1767 pmap_update(); 1768} 1769 1770/* 1771 * pmap_page_protect: 1772 * 1773 * Lower the permission for all mappings to a given page. 1774 */ 1775void 1776pmap_page_protect(phys, prot) 1777 vm_offset_t phys; 1778 vm_prot_t prot; 1779{ 1780 if ((prot & VM_PROT_WRITE) == 0) { 1781 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) 1782 pmap_changebit(phys, PG_RW, FALSE); 1783 else 1784 pmap_remove_all(phys); 1785 } 1786} 1787 1788vm_offset_t 1789pmap_phys_address(ppn) 1790 int ppn; 1791{ 1792 return (i386_ptob(ppn)); 1793} 1794 1795/* 1796 * pmap_is_referenced: 1797 * 1798 * Return whether or not the specified physical page was referenced 1799 * by any physical maps. 1800 */ 1801boolean_t 1802pmap_is_referenced(vm_offset_t pa) 1803{ 1804 return pmap_testbit((pa), PG_U); 1805} 1806 1807/* 1808 * pmap_is_modified: 1809 * 1810 * Return whether or not the specified physical page was modified 1811 * in any physical maps. 1812 */ 1813boolean_t 1814pmap_is_modified(vm_offset_t pa) 1815{ 1816 return pmap_testbit((pa), PG_M); 1817} 1818 1819/* 1820 * Clear the modify bits on the specified physical page. 1821 */ 1822void 1823pmap_clear_modify(vm_offset_t pa) 1824{ 1825 pmap_changebit((pa), PG_M, FALSE); 1826} 1827 1828/* 1829 * pmap_clear_reference: 1830 * 1831 * Clear the reference bit on the specified physical page. 1832 */ 1833void 1834pmap_clear_reference(vm_offset_t pa) 1835{ 1836 pmap_changebit((pa), PG_U, FALSE); 1837} 1838 1839/* 1840 * Routine: pmap_copy_on_write 1841 * Function: 1842 * Remove write privileges from all 1843 * physical maps for this physical page. 1844 */ 1845void 1846pmap_copy_on_write(vm_offset_t pa) 1847{ 1848 pmap_changebit((pa), PG_RW, FALSE); 1849} 1850 1851/* 1852 * Miscellaneous support routines follow 1853 */ 1854 1855void 1856i386_protection_init() 1857{ 1858 register int *kp, prot; 1859 1860 kp = protection_codes; 1861 for (prot = 0; prot < 8; prot++) { 1862 switch (prot) { 1863 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1864 /* 1865 * Read access is also 0. There isn't any execute bit, 1866 * so just make it readable. 1867 */ 1868 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1869 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1870 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1871 *kp++ = 0; 1872 break; 1873 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1874 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1875 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1876 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1877 *kp++ = PG_RW; 1878 break; 1879 } 1880 } 1881} 1882 1883/* 1884 * Map a set of physical memory pages into the kernel virtual 1885 * address space. Return a pointer to where it is mapped. This 1886 * routine is intended to be used for mapping device memory, 1887 * NOT real memory. The non-cacheable bits are set on each 1888 * mapped page. 1889 */ 1890void * 1891pmap_mapdev(pa, size) 1892 vm_offset_t pa; 1893 vm_size_t size; 1894{ 1895 vm_offset_t va, tmpva; 1896 pt_entry_t *pte; 1897 1898 pa = trunc_page(pa); 1899 size = roundup(size, PAGE_SIZE); 1900 1901 va = kmem_alloc_pageable(kernel_map, size); 1902 if (!va) 1903 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 1904 1905 for (tmpva = va; size > 0;) { 1906 pte = vtopte(tmpva); 1907 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_N)); 1908 size -= PAGE_SIZE; 1909 tmpva += PAGE_SIZE; 1910 pa += PAGE_SIZE; 1911 } 1912 pmap_update(); 1913 1914 return ((void *) va); 1915} 1916 1917#ifdef DEBUG 1918/* print address space of pmap*/ 1919void 1920pads(pm) 1921 pmap_t pm; 1922{ 1923 unsigned va, i, j; 1924 pt_entry_t *ptep; 1925 1926 if (pm == kernel_pmap) 1927 return; 1928 for (i = 0; i < 1024; i++) 1929 if (pm->pm_pdir[i]) 1930 for (j = 0; j < 1024; j++) { 1931 va = (i << PD_SHIFT) + (j << PG_SHIFT); 1932 if (pm == kernel_pmap && va < KERNBASE) 1933 continue; 1934 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 1935 continue; 1936 ptep = pmap_pte(pm, va); 1937 if (pmap_pte_v(ptep)) 1938 printf("%x:%x ", va, *(int *) ptep); 1939 }; 1940 1941} 1942 1943void 1944pmap_pvdump(pa) 1945 vm_offset_t pa; 1946{ 1947 register pv_entry_t pv; 1948 1949 printf("pa %x", pa); 1950 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 1951#ifdef used_to_be 1952 printf(" -> pmap %x, va %x, flags %x", 1953 pv->pv_pmap, pv->pv_va, pv->pv_flags); 1954#endif 1955 printf(" -> pmap %x, va %x", 1956 pv->pv_pmap, pv->pv_va); 1957 pads(pv->pv_pmap); 1958 } 1959 printf(" "); 1960} 1961#endif 1962