pmap.c revision 111299
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42 * $FreeBSD: head/sys/i386/i386/pmap.c 111299 2003-02-23 09:45:50Z jake $ 43 */ 44 45/* 46 * Manages physical address maps. 47 * 48 * In addition to hardware address maps, this 49 * module is called upon to provide software-use-only 50 * maps which may or may not be stored in the same 51 * form as hardware maps. These pseudo-maps are 52 * used to store intermediate results from copy 53 * operations to and from address spaces. 54 * 55 * Since the information managed by this module is 56 * also stored by the logical address mapping module, 57 * this module may throw away valid virtual-to-physical 58 * mappings at almost any time. However, invalidations 59 * of virtual-to-physical mappings must be done as 60 * requested. 61 * 62 * In order to cope with hardware architectures which 63 * make virtual-to-physical map invalidates expensive, 64 * this module may delay invalidate or reduced protection 65 * operations until such time as they are actually 66 * necessary. This module is given full information as 67 * to which processors are currently using which maps, 68 * and to when physical maps must be made correct. 69 */ 70 71#include "opt_pmap.h" 72#include "opt_msgbuf.h" 73#include "opt_kstack_pages.h" 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/kernel.h> 78#include <sys/lock.h> 79#include <sys/mman.h> 80#include <sys/msgbuf.h> 81#include <sys/mutex.h> 82#include <sys/proc.h> 83#include <sys/sx.h> 84#include <sys/user.h> 85#include <sys/vmmeter.h> 86#include <sys/sysctl.h> 87#ifdef SMP 88#include <sys/smp.h> 89#endif 90 91#include <vm/vm.h> 92#include <vm/vm_param.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_page.h> 95#include <vm/vm_map.h> 96#include <vm/vm_object.h> 97#include <vm/vm_extern.h> 98#include <vm/vm_pageout.h> 99#include <vm/vm_pager.h> 100#include <vm/uma.h> 101 102#include <machine/cpu.h> 103#include <machine/cputypes.h> 104#include <machine/md_var.h> 105#include <machine/specialreg.h> 106#if defined(SMP) || defined(APIC_IO) 107#include <machine/smp.h> 108#include <machine/apic.h> 109#include <machine/segments.h> 110#include <machine/tss.h> 111#endif /* SMP || APIC_IO */ 112 113#define PMAP_KEEP_PDIRS 114#ifndef PMAP_SHPGPERPROC 115#define PMAP_SHPGPERPROC 200 116#endif 117 118#if defined(DIAGNOSTIC) 119#define PMAP_DIAGNOSTIC 120#endif 121 122#define MINPV 2048 123 124#if !defined(PMAP_DIAGNOSTIC) 125#define PMAP_INLINE __inline 126#else 127#define PMAP_INLINE 128#endif 129 130/* 131 * Get PDEs and PTEs for user/kernel address space 132 */ 133#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 134#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 135 136#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 137#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 138#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 139#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 140#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 141 142#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 143#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 144 145/* 146 * Given a map and a machine independent protection code, 147 * convert to a vax protection code. 148 */ 149#define pte_prot(m, p) (protection_codes[p]) 150static int protection_codes[8]; 151 152struct pmap kernel_pmap_store; 153LIST_HEAD(pmaplist, pmap); 154static struct pmaplist allpmaps; 155static struct mtx allpmaps_lock; 156 157vm_offset_t avail_start; /* PA of first available physical page */ 158vm_offset_t avail_end; /* PA of last available physical page */ 159vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 160vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 161static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 162static int pgeflag; /* PG_G or-in */ 163static int pseflag; /* PG_PS or-in */ 164 165static int nkpt; 166vm_offset_t kernel_vm_end; 167extern u_int32_t KERNend; 168 169/* 170 * Data for the pv entry allocation mechanism 171 */ 172static uma_zone_t pvzone; 173static struct vm_object pvzone_obj; 174static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 175int pmap_pagedaemon_waken; 176 177/* 178 * All those kernel PT submaps that BSD is so fond of 179 */ 180pt_entry_t *CMAP1 = 0; 181static pt_entry_t *CMAP2, *CMAP3, *ptmmap; 182caddr_t CADDR1 = 0, ptvmmap = 0; 183static caddr_t CADDR2, CADDR3; 184static struct mtx CMAPCADDR12_lock; 185static pt_entry_t *msgbufmap; 186struct msgbuf *msgbufp = 0; 187 188/* 189 * Crashdump maps. 190 */ 191static pt_entry_t *pt_crashdumpmap; 192static caddr_t crashdumpmap; 193 194#ifdef SMP 195extern pt_entry_t *SMPpt; 196#endif 197static pt_entry_t *PMAP1 = 0; 198static pt_entry_t *PADDR1 = 0; 199 200static PMAP_INLINE void free_pv_entry(pv_entry_t pv); 201static pt_entry_t *get_ptbase(pmap_t pmap); 202static pv_entry_t get_pv_entry(void); 203static void i386_protection_init(void); 204static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem); 205 206static vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, 207 vm_page_t m, vm_page_t mpte); 208static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva); 209static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 210static int pmap_remove_entry(struct pmap *pmap, vm_page_t m, 211 vm_offset_t va); 212static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, 213 vm_page_t mpte, vm_page_t m); 214 215static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); 216 217static int pmap_release_free_page(pmap_t pmap, vm_page_t p); 218static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex); 219static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 220static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex); 221static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); 222static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 223static void *pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 224 225static pd_entry_t pdir4mb; 226 227CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 228CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 229 230/* 231 * Routine: pmap_pte 232 * Function: 233 * Extract the page table entry associated 234 * with the given map/virtual_address pair. 235 */ 236 237PMAP_INLINE pt_entry_t * 238pmap_pte(pmap, va) 239 register pmap_t pmap; 240 vm_offset_t va; 241{ 242 pd_entry_t *pdeaddr; 243 244 if (pmap) { 245 pdeaddr = pmap_pde(pmap, va); 246 if (*pdeaddr & PG_PS) 247 return pdeaddr; 248 if (*pdeaddr) { 249 return get_ptbase(pmap) + i386_btop(va); 250 } 251 } 252 return (0); 253} 254 255/* 256 * Move the kernel virtual free pointer to the next 257 * 4MB. This is used to help improve performance 258 * by using a large (4MB) page for much of the kernel 259 * (.text, .data, .bss) 260 */ 261static vm_offset_t 262pmap_kmem_choose(vm_offset_t addr) 263{ 264 vm_offset_t newaddr = addr; 265 266#ifdef I686_CPU_not /* Problem seems to have gone away */ 267 /* Deal with un-resolved Pentium4 issues */ 268 if (cpu_class == CPUCLASS_686 && 269 strcmp(cpu_vendor, "GenuineIntel") == 0 && 270 (cpu_id & 0xf00) == 0xf00) 271 return newaddr; 272#endif 273#ifndef DISABLE_PSE 274 if (cpu_feature & CPUID_PSE) 275 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1); 276#endif 277 return newaddr; 278} 279 280/* 281 * Bootstrap the system enough to run with virtual memory. 282 * 283 * On the i386 this is called after mapping has already been enabled 284 * and just syncs the pmap module with what has already been done. 285 * [We can't call it easily with mapping off since the kernel is not 286 * mapped with PA == VA, hence we would have to relocate every address 287 * from the linked base (virtual) address "KERNBASE" to the actual 288 * (physical) address starting relative to 0] 289 */ 290void 291pmap_bootstrap(firstaddr, loadaddr) 292 vm_offset_t firstaddr; 293 vm_offset_t loadaddr; 294{ 295 vm_offset_t va; 296 pt_entry_t *pte; 297 int i; 298 299 avail_start = firstaddr; 300 301 /* 302 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 303 * large. It should instead be correctly calculated in locore.s and 304 * not based on 'first' (which is a physical address, not a virtual 305 * address, for the start of unused physical memory). The kernel 306 * page tables are NOT double mapped and thus should not be included 307 * in this calculation. 308 */ 309 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 310 virtual_avail = pmap_kmem_choose(virtual_avail); 311 312 virtual_end = VM_MAX_KERNEL_ADDRESS; 313 314 /* 315 * Initialize protection array. 316 */ 317 i386_protection_init(); 318 319 /* 320 * Initialize the kernel pmap (which is statically allocated). 321 */ 322 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 323 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 324 TAILQ_INIT(&kernel_pmap->pm_pvlist); 325 LIST_INIT(&allpmaps); 326 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 327 mtx_lock_spin(&allpmaps_lock); 328 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 329 mtx_unlock_spin(&allpmaps_lock); 330 nkpt = NKPT; 331 332 /* 333 * Reserve some special page table entries/VA space for temporary 334 * mapping of pages. 335 */ 336#define SYSMAP(c, p, v, n) \ 337 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 338 339 va = virtual_avail; 340 pte = (pt_entry_t *) pmap_pte(kernel_pmap, va); 341 342 /* 343 * CMAP1/CMAP2 are used for zeroing and copying pages. 344 * CMAP3 is used for the idle process page zeroing. 345 */ 346 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 347 SYSMAP(caddr_t, CMAP2, CADDR2, 1) 348 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 349 350 mtx_init(&CMAPCADDR12_lock, "CMAPCADDR12", NULL, MTX_DEF); 351 352 /* 353 * Crashdump maps. 354 */ 355 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS); 356 357 /* 358 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 359 * XXX ptmmap is not used. 360 */ 361 SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 362 363 /* 364 * msgbufp is used to map the system message buffer. 365 * XXX msgbufmap is not used. 366 */ 367 SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 368 atop(round_page(MSGBUF_SIZE))) 369 370 /* 371 * ptemap is used for pmap_pte_quick 372 */ 373 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 374 375 virtual_avail = va; 376 377 *CMAP1 = *CMAP2 = 0; 378 for (i = 0; i < NKPT; i++) 379 PTD[i] = 0; 380 381 pgeflag = 0; 382#ifndef DISABLE_PG_G 383 if (cpu_feature & CPUID_PGE) 384 pgeflag = PG_G; 385#endif 386#ifdef I686_CPU_not /* Problem seems to have gone away */ 387 /* Deal with un-resolved Pentium4 issues */ 388 if (cpu_class == CPUCLASS_686 && 389 strcmp(cpu_vendor, "GenuineIntel") == 0 && 390 (cpu_id & 0xf00) == 0xf00) { 391 printf("Warning: Pentium 4 cpu: PG_G disabled (global flag)\n"); 392 pgeflag = 0; 393 } 394#endif 395 396/* 397 * Initialize the 4MB page size flag 398 */ 399 pseflag = 0; 400/* 401 * The 4MB page version of the initial 402 * kernel page mapping. 403 */ 404 pdir4mb = 0; 405 406#ifndef DISABLE_PSE 407 if (cpu_feature & CPUID_PSE) 408 pseflag = PG_PS; 409#endif 410#ifdef I686_CPU_not /* Problem seems to have gone away */ 411 /* Deal with un-resolved Pentium4 issues */ 412 if (cpu_class == CPUCLASS_686 && 413 strcmp(cpu_vendor, "GenuineIntel") == 0 && 414 (cpu_id & 0xf00) == 0xf00) { 415 printf("Warning: Pentium 4 cpu: PG_PS disabled (4MB pages)\n"); 416 pseflag = 0; 417 } 418#endif 419#ifndef DISABLE_PSE 420 if (pseflag) { 421 pd_entry_t ptditmp; 422 /* 423 * Note that we have enabled PSE mode 424 */ 425 ptditmp = *(PTmap + i386_btop(KERNBASE)); 426 ptditmp &= ~(NBPDR - 1); 427 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag; 428 pdir4mb = ptditmp; 429 } 430#endif 431#ifndef SMP 432 /* 433 * Turn on PGE/PSE. SMP does this later on since the 434 * 4K page tables are required for AP boot (for now). 435 * XXX fixme. 436 */ 437 pmap_set_opt(); 438#endif 439#ifdef SMP 440 if (cpu_apic_address == 0) 441 panic("pmap_bootstrap: no local apic! (non-SMP hardware?)"); 442 443 /* local apic is mapped on last page */ 444 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag | 445 (cpu_apic_address & PG_FRAME)); 446#endif 447 invltlb(); 448} 449 450/* 451 * Enable 4MB page mode for MP startup. Turn on PG_G support. 452 * BSP will run this after all the AP's have started up. 453 */ 454void 455pmap_set_opt(void) 456{ 457 pt_entry_t *pte; 458 vm_offset_t va, endva; 459 460 if (pgeflag && (cpu_feature & CPUID_PGE)) { 461 load_cr4(rcr4() | CR4_PGE); 462 invltlb(); /* Insurance */ 463 } 464#ifndef DISABLE_PSE 465 if (pseflag && (cpu_feature & CPUID_PSE)) { 466 load_cr4(rcr4() | CR4_PSE); 467 invltlb(); /* Insurance */ 468 } 469#endif 470 if (PCPU_GET(cpuid) == 0) { 471#ifndef DISABLE_PSE 472 if (pdir4mb) { 473 kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb; 474 invltlb(); /* Insurance */ 475 } 476#endif 477 if (pgeflag) { 478 /* Turn on PG_G for text, data, bss pages. */ 479 va = (vm_offset_t)btext; 480#ifndef DISABLE_PSE 481 if (pseflag && (cpu_feature & CPUID_PSE)) { 482 if (va < KERNBASE + (1 << PDRSHIFT)) 483 va = KERNBASE + (1 << PDRSHIFT); 484 } 485#endif 486 endva = KERNBASE + KERNend; 487 while (va < endva) { 488 pte = vtopte(va); 489 if (*pte) 490 *pte |= pgeflag; 491 va += PAGE_SIZE; 492 } 493 invltlb(); /* Insurance */ 494 } 495 /* 496 * We do not need to broadcast the invltlb here, because 497 * each AP does it the moment it is released from the boot 498 * lock. See ap_init(). 499 */ 500 } 501} 502 503static void * 504pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 505{ 506 *flags = UMA_SLAB_PRIV; 507 return (void *)kmem_alloc(kernel_map, bytes); 508} 509 510/* 511 * Initialize the pmap module. 512 * Called by vm_init, to initialize any structures that the pmap 513 * system needs to map virtual memory. 514 * pmap_init has been enhanced to support in a fairly consistant 515 * way, discontiguous physical memory. 516 */ 517void 518pmap_init(phys_start, phys_end) 519 vm_offset_t phys_start, phys_end; 520{ 521 int i; 522 int initial_pvs; 523 524 /* 525 * Allocate memory for random pmap data structures. Includes the 526 * pv_head_table. 527 */ 528 529 for(i = 0; i < vm_page_array_size; i++) { 530 vm_page_t m; 531 532 m = &vm_page_array[i]; 533 TAILQ_INIT(&m->md.pv_list); 534 m->md.pv_list_count = 0; 535 } 536 537 /* 538 * init the pv free list 539 */ 540 initial_pvs = vm_page_array_size; 541 if (initial_pvs < MINPV) 542 initial_pvs = MINPV; 543 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 544 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 545 uma_zone_set_allocf(pvzone, pmap_allocf); 546 uma_prealloc(pvzone, initial_pvs); 547 548 /* 549 * Now it is safe to enable pv_table recording. 550 */ 551 pmap_initialized = TRUE; 552} 553 554/* 555 * Initialize the address space (zone) for the pv_entries. Set a 556 * high water mark so that the system can recover from excessive 557 * numbers of pv entries. 558 */ 559void 560pmap_init2() 561{ 562 int shpgperproc = PMAP_SHPGPERPROC; 563 564 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 565 pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 566 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 567 pv_entry_high_water = 9 * (pv_entry_max / 10); 568 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 569} 570 571 572/*************************************************** 573 * Low level helper routines..... 574 ***************************************************/ 575 576#if defined(PMAP_DIAGNOSTIC) 577 578/* 579 * This code checks for non-writeable/modified pages. 580 * This should be an invalid condition. 581 */ 582static int 583pmap_nw_modified(pt_entry_t ptea) 584{ 585 int pte; 586 587 pte = (int) ptea; 588 589 if ((pte & (PG_M|PG_RW)) == PG_M) 590 return 1; 591 else 592 return 0; 593} 594#endif 595 596 597/* 598 * this routine defines the region(s) of memory that should 599 * not be tested for the modified bit. 600 */ 601static PMAP_INLINE int 602pmap_track_modified(vm_offset_t va) 603{ 604 if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) 605 return 1; 606 else 607 return 0; 608} 609 610#ifdef I386_CPU 611/* 612 * i386 only has "invalidate everything" and no SMP to worry about. 613 */ 614PMAP_INLINE void 615pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 616{ 617 618 if (pmap == kernel_pmap || pmap->pm_active) 619 invltlb(); 620} 621 622PMAP_INLINE void 623pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 624{ 625 626 if (pmap == kernel_pmap || pmap->pm_active) 627 invltlb(); 628} 629 630PMAP_INLINE void 631pmap_invalidate_all(pmap_t pmap) 632{ 633 634 if (pmap == kernel_pmap || pmap->pm_active) 635 invltlb(); 636} 637#else /* !I386_CPU */ 638#ifdef SMP 639/* 640 * For SMP, these functions have to use the IPI mechanism for coherence. 641 */ 642void 643pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 644{ 645 u_int cpumask; 646 u_int other_cpus; 647 648 critical_enter(); 649 /* 650 * We need to disable interrupt preemption but MUST NOT have 651 * interrupts disabled here. 652 * XXX we may need to hold schedlock to get a coherent pm_active 653 */ 654 if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) { 655 invlpg(va); 656 smp_invlpg(va); 657 } else { 658 cpumask = PCPU_GET(cpumask); 659 other_cpus = PCPU_GET(other_cpus); 660 if (pmap->pm_active & cpumask) 661 invlpg(va); 662 if (pmap->pm_active & other_cpus) 663 smp_masked_invlpg(pmap->pm_active & other_cpus, va); 664 } 665 critical_exit(); 666} 667 668void 669pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 670{ 671 u_int cpumask; 672 u_int other_cpus; 673 vm_offset_t addr; 674 675 critical_enter(); 676 /* 677 * We need to disable interrupt preemption but MUST NOT have 678 * interrupts disabled here. 679 * XXX we may need to hold schedlock to get a coherent pm_active 680 */ 681 if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) { 682 for (addr = sva; addr < eva; addr += PAGE_SIZE) 683 invlpg(addr); 684 smp_invlpg_range(sva, eva); 685 } else { 686 cpumask = PCPU_GET(cpumask); 687 other_cpus = PCPU_GET(other_cpus); 688 if (pmap->pm_active & cpumask) 689 for (addr = sva; addr < eva; addr += PAGE_SIZE) 690 invlpg(addr); 691 if (pmap->pm_active & other_cpus) 692 smp_masked_invlpg_range(pmap->pm_active & other_cpus, 693 sva, eva); 694 } 695 critical_exit(); 696} 697 698void 699pmap_invalidate_all(pmap_t pmap) 700{ 701 u_int cpumask; 702 u_int other_cpus; 703 704#ifdef SWTCH_OPTIM_STATS 705 tlb_flush_count++; 706#endif 707 critical_enter(); 708 /* 709 * We need to disable interrupt preemption but MUST NOT have 710 * interrupts disabled here. 711 * XXX we may need to hold schedlock to get a coherent pm_active 712 */ 713 if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) { 714 invltlb(); 715 smp_invltlb(); 716 } else { 717 cpumask = PCPU_GET(cpumask); 718 other_cpus = PCPU_GET(other_cpus); 719 if (pmap->pm_active & cpumask) 720 invltlb(); 721 if (pmap->pm_active & other_cpus) 722 smp_masked_invltlb(pmap->pm_active & other_cpus); 723 } 724 critical_exit(); 725} 726#else /* !SMP */ 727/* 728 * Normal, non-SMP, 486+ invalidation functions. 729 * We inline these within pmap.c for speed. 730 */ 731PMAP_INLINE void 732pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 733{ 734 735 if (pmap == kernel_pmap || pmap->pm_active) 736 invlpg(va); 737} 738 739PMAP_INLINE void 740pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 741{ 742 vm_offset_t addr; 743 744 if (pmap == kernel_pmap || pmap->pm_active) 745 for (addr = sva; addr < eva; addr += PAGE_SIZE) 746 invlpg(addr); 747} 748 749PMAP_INLINE void 750pmap_invalidate_all(pmap_t pmap) 751{ 752 753 if (pmap == kernel_pmap || pmap->pm_active) 754 invltlb(); 755} 756#endif /* !SMP */ 757#endif /* !I386_CPU */ 758 759/* 760 * Return an address which is the base of the Virtual mapping of 761 * all the PTEs for the given pmap. Note this doesn't say that 762 * all the PTEs will be present or that the pages there are valid. 763 * The PTEs are made available by the recursive mapping trick. 764 * It will map in the alternate PTE space if needed. 765 */ 766static pt_entry_t * 767get_ptbase(pmap) 768 pmap_t pmap; 769{ 770 pd_entry_t frame; 771 772 /* are we current address space or kernel? */ 773 if (pmap == kernel_pmap) 774 return PTmap; 775 frame = pmap->pm_pdir[PTDPTDI] & PG_FRAME; 776 if (frame == (PTDpde & PG_FRAME)) 777 return PTmap; 778 /* otherwise, we are alternate address space */ 779 if (frame != (APTDpde & PG_FRAME)) { 780 APTDpde = (pd_entry_t) (frame | PG_RW | PG_V); 781 pmap_invalidate_all(kernel_pmap); /* XXX Bandaid */ 782 } 783 return APTmap; 784} 785 786/* 787 * Super fast pmap_pte routine best used when scanning 788 * the pv lists. This eliminates many coarse-grained 789 * invltlb calls. Note that many of the pv list 790 * scans are across different pmaps. It is very wasteful 791 * to do an entire invltlb for checking a single mapping. 792 */ 793 794static pt_entry_t * 795pmap_pte_quick(pmap, va) 796 register pmap_t pmap; 797 vm_offset_t va; 798{ 799 pd_entry_t pde, newpf; 800 pde = pmap->pm_pdir[va >> PDRSHIFT]; 801 if (pde != 0) { 802 pd_entry_t frame = pmap->pm_pdir[PTDPTDI] & PG_FRAME; 803 unsigned index = i386_btop(va); 804 /* are we current address space or kernel? */ 805 if (pmap == kernel_pmap || frame == (PTDpde & PG_FRAME)) 806 return PTmap + index; 807 newpf = pde & PG_FRAME; 808 if (((*PMAP1) & PG_FRAME) != newpf) { 809 *PMAP1 = newpf | PG_RW | PG_V; 810 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR1); 811 } 812 return PADDR1 + (index & (NPTEPG - 1)); 813 } 814 return (0); 815} 816 817/* 818 * Routine: pmap_extract 819 * Function: 820 * Extract the physical page address associated 821 * with the given map/virtual_address pair. 822 */ 823vm_offset_t 824pmap_extract(pmap, va) 825 register pmap_t pmap; 826 vm_offset_t va; 827{ 828 vm_offset_t rtval; /* XXX FIXME */ 829 vm_offset_t pdirindex; 830 831 if (pmap == 0) 832 return 0; 833 pdirindex = va >> PDRSHIFT; 834 rtval = pmap->pm_pdir[pdirindex]; 835 if (rtval != 0) { 836 pt_entry_t *pte; 837 if ((rtval & PG_PS) != 0) { 838 rtval &= ~(NBPDR - 1); 839 rtval |= va & (NBPDR - 1); 840 return rtval; 841 } 842 pte = get_ptbase(pmap) + i386_btop(va); 843 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK)); 844 return rtval; 845 } 846 return 0; 847 848} 849 850/*************************************************** 851 * Low level mapping routines..... 852 ***************************************************/ 853 854/* 855 * Add a wired page to the kva. 856 * Note: not SMP coherent. 857 */ 858PMAP_INLINE void 859pmap_kenter(vm_offset_t va, vm_offset_t pa) 860{ 861 pt_entry_t *pte; 862 863 pte = vtopte(va); 864 *pte = pa | PG_RW | PG_V | pgeflag; 865} 866 867/* 868 * Remove a page from the kernel pagetables. 869 * Note: not SMP coherent. 870 */ 871PMAP_INLINE void 872pmap_kremove(vm_offset_t va) 873{ 874 pt_entry_t *pte; 875 876 pte = vtopte(va); 877 *pte = 0; 878} 879 880/* 881 * Used to map a range of physical addresses into kernel 882 * virtual address space. 883 * 884 * The value passed in '*virt' is a suggested virtual address for 885 * the mapping. Architectures which can support a direct-mapped 886 * physical to virtual region can return the appropriate address 887 * within that region, leaving '*virt' unchanged. Other 888 * architectures should map the pages starting at '*virt' and 889 * update '*virt' with the first usable address after the mapped 890 * region. 891 */ 892vm_offset_t 893pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 894{ 895 vm_offset_t va, sva; 896 897 va = sva = *virt; 898 while (start < end) { 899 pmap_kenter(va, start); 900 va += PAGE_SIZE; 901 start += PAGE_SIZE; 902 } 903 pmap_invalidate_range(kernel_pmap, sva, va); 904 *virt = va; 905 return (sva); 906} 907 908 909/* 910 * Add a list of wired pages to the kva 911 * this routine is only used for temporary 912 * kernel mappings that do not need to have 913 * page modification or references recorded. 914 * Note that old mappings are simply written 915 * over. The page *must* be wired. 916 * Note: SMP coherent. Uses a ranged shootdown IPI. 917 */ 918void 919pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 920{ 921 vm_offset_t va; 922 923 va = sva; 924 while (count-- > 0) { 925 pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 926 va += PAGE_SIZE; 927 m++; 928 } 929 pmap_invalidate_range(kernel_pmap, sva, va); 930} 931 932/* 933 * This routine tears out page mappings from the 934 * kernel -- it is meant only for temporary mappings. 935 * Note: SMP coherent. Uses a ranged shootdown IPI. 936 */ 937void 938pmap_qremove(vm_offset_t sva, int count) 939{ 940 vm_offset_t va; 941 942 va = sva; 943 while (count-- > 0) { 944 pmap_kremove(va); 945 va += PAGE_SIZE; 946 } 947 pmap_invalidate_range(kernel_pmap, sva, va); 948} 949 950static vm_page_t 951pmap_page_lookup(vm_object_t object, vm_pindex_t pindex) 952{ 953 vm_page_t m; 954 955retry: 956 m = vm_page_lookup(object, pindex); 957 if (m != NULL) { 958 vm_page_lock_queues(); 959 if (vm_page_sleep_if_busy(m, FALSE, "pplookp")) 960 goto retry; 961 vm_page_unlock_queues(); 962 } 963 return m; 964} 965 966#ifndef KSTACK_MAX_PAGES 967#define KSTACK_MAX_PAGES 32 968#endif 969 970/* 971 * Create the kernel stack (including pcb for i386) for a new thread. 972 * This routine directly affects the fork perf for a process and 973 * create performance for a thread. 974 */ 975void 976pmap_new_thread(struct thread *td, int pages) 977{ 978 int i; 979 vm_page_t ma[KSTACK_MAX_PAGES]; 980 vm_object_t ksobj; 981 vm_page_t m; 982 vm_offset_t ks; 983 984 /* Bounds check */ 985 if (pages <= 1) 986 pages = KSTACK_PAGES; 987 else if (pages > KSTACK_MAX_PAGES) 988 pages = KSTACK_MAX_PAGES; 989 990 /* 991 * allocate object for the kstack 992 */ 993 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 994 td->td_kstack_obj = ksobj; 995 996 /* get a kernel virtual address for the kstack for this thread */ 997#ifdef KSTACK_GUARD 998 ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE); 999 if (ks == 0) 1000 panic("pmap_new_thread: kstack allocation failed"); 1001 if (*vtopte(ks) != 0) 1002 pmap_qremove(ks, 1); 1003 ks += PAGE_SIZE; 1004 td->td_kstack = ks; 1005#else 1006 /* get a kernel virtual address for the kstack for this thread */ 1007 ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE); 1008 if (ks == 0) 1009 panic("pmap_new_thread: kstack allocation failed"); 1010 td->td_kstack = ks; 1011#endif 1012 /* 1013 * Knowing the number of pages allocated is useful when you 1014 * want to deallocate them. 1015 */ 1016 td->td_kstack_pages = pages; 1017 1018 /* 1019 * For the length of the stack, link in a real page of ram for each 1020 * page of stack. 1021 */ 1022 for (i = 0; i < pages; i++) { 1023 /* 1024 * Get a kernel stack page 1025 */ 1026 m = vm_page_grab(ksobj, i, 1027 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 1028 ma[i] = m; 1029 1030 vm_page_lock_queues(); 1031 vm_page_wakeup(m); 1032 vm_page_flag_clear(m, PG_ZERO); 1033 m->valid = VM_PAGE_BITS_ALL; 1034 vm_page_unlock_queues(); 1035 } 1036 pmap_qenter(ks, ma, pages); 1037} 1038 1039/* 1040 * Dispose the kernel stack for a thread that has exited. 1041 * This routine directly impacts the exit perf of a process and thread. 1042 */ 1043void 1044pmap_dispose_thread(td) 1045 struct thread *td; 1046{ 1047 int i; 1048 int pages; 1049 vm_object_t ksobj; 1050 vm_offset_t ks; 1051 vm_page_t m; 1052 1053 pages = td->td_kstack_pages; 1054 ksobj = td->td_kstack_obj; 1055 ks = td->td_kstack; 1056 pmap_qremove(ks, pages); 1057 for (i = 0; i < pages; i++) { 1058 m = vm_page_lookup(ksobj, i); 1059 if (m == NULL) 1060 panic("pmap_dispose_thread: kstack already missing?"); 1061 vm_page_lock_queues(); 1062 vm_page_busy(m); 1063 vm_page_unwire(m, 0); 1064 vm_page_free(m); 1065 vm_page_unlock_queues(); 1066 } 1067 /* 1068 * Free the space that this stack was mapped to in the kernel 1069 * address map. 1070 */ 1071#ifdef KSTACK_GUARD 1072 kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE); 1073#else 1074 kmem_free(kernel_map, ks, pages * PAGE_SIZE); 1075#endif 1076 vm_object_deallocate(ksobj); 1077} 1078 1079/* 1080 * Set up a variable sized alternate kstack. Though it may look MI, it may 1081 * need to be different on certain arches like ia64. 1082 */ 1083void 1084pmap_new_altkstack(struct thread *td, int pages) 1085{ 1086 /* shuffle the original stack */ 1087 td->td_altkstack_obj = td->td_kstack_obj; 1088 td->td_altkstack = td->td_kstack; 1089 td->td_altkstack_pages = td->td_kstack_pages; 1090 1091 pmap_new_thread(td, pages); 1092} 1093 1094void 1095pmap_dispose_altkstack(td) 1096 struct thread *td; 1097{ 1098 pmap_dispose_thread(td); 1099 1100 /* restore the original kstack */ 1101 td->td_kstack = td->td_altkstack; 1102 td->td_kstack_obj = td->td_altkstack_obj; 1103 td->td_kstack_pages = td->td_altkstack_pages; 1104 td->td_altkstack = 0; 1105 td->td_altkstack_obj = NULL; 1106 td->td_altkstack_pages = 0; 1107} 1108 1109/* 1110 * Allow the Kernel stack for a thread to be prejudicially paged out. 1111 */ 1112void 1113pmap_swapout_thread(td) 1114 struct thread *td; 1115{ 1116 int i; 1117 int pages; 1118 vm_object_t ksobj; 1119 vm_offset_t ks; 1120 vm_page_t m; 1121 1122 pages = td->td_kstack_pages; 1123 ksobj = td->td_kstack_obj; 1124 ks = td->td_kstack; 1125 pmap_qremove(ks, pages); 1126 for (i = 0; i < pages; i++) { 1127 m = vm_page_lookup(ksobj, i); 1128 if (m == NULL) 1129 panic("pmap_swapout_thread: kstack already missing?"); 1130 vm_page_lock_queues(); 1131 vm_page_dirty(m); 1132 vm_page_unwire(m, 0); 1133 vm_page_unlock_queues(); 1134 } 1135} 1136 1137/* 1138 * Bring the kernel stack for a specified thread back in. 1139 */ 1140void 1141pmap_swapin_thread(td) 1142 struct thread *td; 1143{ 1144 int i, rv; 1145 int pages; 1146 vm_page_t ma[KSTACK_MAX_PAGES]; 1147 vm_object_t ksobj; 1148 vm_offset_t ks; 1149 vm_page_t m; 1150 1151 pages = td->td_kstack_pages; 1152 ksobj = td->td_kstack_obj; 1153 ks = td->td_kstack; 1154 for (i = 0; i < pages; i++) { 1155 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1156 if (m->valid != VM_PAGE_BITS_ALL) { 1157 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 1158 if (rv != VM_PAGER_OK) 1159 panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid); 1160 m = vm_page_lookup(ksobj, i); 1161 m->valid = VM_PAGE_BITS_ALL; 1162 } 1163 ma[i] = m; 1164 vm_page_lock_queues(); 1165 vm_page_wire(m); 1166 vm_page_wakeup(m); 1167 vm_page_unlock_queues(); 1168 } 1169 pmap_qenter(ks, ma, pages); 1170} 1171 1172/*************************************************** 1173 * Page table page management routines..... 1174 ***************************************************/ 1175 1176/* 1177 * This routine unholds page table pages, and if the hold count 1178 * drops to zero, then it decrements the wire count. 1179 */ 1180static int 1181_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) 1182{ 1183 1184 while (vm_page_sleep_if_busy(m, FALSE, "pmuwpt")) 1185 vm_page_lock_queues(); 1186 1187 if (m->hold_count == 0) { 1188 vm_offset_t pteva; 1189 /* 1190 * unmap the page table page 1191 */ 1192 pmap->pm_pdir[m->pindex] = 0; 1193 --pmap->pm_stats.resident_count; 1194 if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == 1195 (PTDpde & PG_FRAME)) { 1196 /* 1197 * Do an invltlb to make the invalidated mapping 1198 * take effect immediately. 1199 */ 1200 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1201 pmap_invalidate_page(pmap, pteva); 1202 } 1203 1204 /* 1205 * If the page is finally unwired, simply free it. 1206 */ 1207 --m->wire_count; 1208 if (m->wire_count == 0) { 1209 vm_page_busy(m); 1210 vm_page_free_zero(m); 1211 atomic_subtract_int(&cnt.v_wire_count, 1); 1212 } 1213 return 1; 1214 } 1215 return 0; 1216} 1217 1218static PMAP_INLINE int 1219pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) 1220{ 1221 vm_page_unhold(m); 1222 if (m->hold_count == 0) 1223 return _pmap_unwire_pte_hold(pmap, m); 1224 else 1225 return 0; 1226} 1227 1228/* 1229 * After removing a page table entry, this routine is used to 1230 * conditionally free the page, and manage the hold/wire counts. 1231 */ 1232static int 1233pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 1234{ 1235 unsigned ptepindex; 1236 if (va >= VM_MAXUSER_ADDRESS) 1237 return 0; 1238 1239 if (mpte == NULL) { 1240 ptepindex = (va >> PDRSHIFT); 1241 if (pmap->pm_pteobj->root && 1242 (pmap->pm_pteobj->root->pindex == ptepindex)) { 1243 mpte = pmap->pm_pteobj->root; 1244 } else { 1245 while ((mpte = vm_page_lookup(pmap->pm_pteobj, ptepindex)) != NULL && 1246 vm_page_sleep_if_busy(mpte, FALSE, "pulook")) 1247 vm_page_lock_queues(); 1248 } 1249 } 1250 1251 return pmap_unwire_pte_hold(pmap, mpte); 1252} 1253 1254void 1255pmap_pinit0(pmap) 1256 struct pmap *pmap; 1257{ 1258 pmap->pm_pdir = 1259 (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); 1260 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t)IdlePTD); 1261#ifndef I386_CPU 1262 invlpg((vm_offset_t)pmap->pm_pdir); 1263#else 1264 invltlb(); 1265#endif 1266 pmap->pm_active = 0; 1267 TAILQ_INIT(&pmap->pm_pvlist); 1268 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1269 mtx_lock_spin(&allpmaps_lock); 1270 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1271 mtx_unlock_spin(&allpmaps_lock); 1272} 1273 1274/* 1275 * Initialize a preallocated and zeroed pmap structure, 1276 * such as one in a vmspace structure. 1277 */ 1278void 1279pmap_pinit(pmap) 1280 register struct pmap *pmap; 1281{ 1282 vm_page_t ptdpg; 1283 1284 /* 1285 * No need to allocate page table space yet but we do need a valid 1286 * page directory table. 1287 */ 1288 if (pmap->pm_pdir == NULL) 1289 pmap->pm_pdir = 1290 (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); 1291 1292 /* 1293 * allocate object for the ptes 1294 */ 1295 if (pmap->pm_pteobj == NULL) 1296 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1); 1297 1298 /* 1299 * allocate the page directory page 1300 */ 1301 ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI, 1302 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 1303 vm_page_lock_queues(); 1304 vm_page_flag_clear(ptdpg, PG_BUSY); 1305 ptdpg->valid = VM_PAGE_BITS_ALL; 1306 vm_page_unlock_queues(); 1307 1308 pmap_qenter((vm_offset_t) pmap->pm_pdir, &ptdpg, 1); 1309 if ((ptdpg->flags & PG_ZERO) == 0) 1310 bzero(pmap->pm_pdir, PAGE_SIZE); 1311 1312 mtx_lock_spin(&allpmaps_lock); 1313 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1314 mtx_unlock_spin(&allpmaps_lock); 1315 /* Wire in kernel global address entries. */ 1316 /* XXX copies current process, does not fill in MPPTDI */ 1317 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1318#ifdef SMP 1319 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; 1320#endif 1321 1322 /* install self-referential address mapping entry */ 1323 pmap->pm_pdir[PTDPTDI] = 1324 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M; 1325 1326 pmap->pm_active = 0; 1327 TAILQ_INIT(&pmap->pm_pvlist); 1328 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1329} 1330 1331/* 1332 * Wire in kernel global address entries. To avoid a race condition 1333 * between pmap initialization and pmap_growkernel, this procedure 1334 * should be called after the vmspace is attached to the process 1335 * but before this pmap is activated. 1336 */ 1337void 1338pmap_pinit2(pmap) 1339 struct pmap *pmap; 1340{ 1341 /* XXX: Remove this stub when no longer called */ 1342} 1343 1344static int 1345pmap_release_free_page(pmap_t pmap, vm_page_t p) 1346{ 1347 pd_entry_t *pde = pmap->pm_pdir; 1348 1349 /* 1350 * This code optimizes the case of freeing non-busy 1351 * page-table pages. Those pages are zero now, and 1352 * might as well be placed directly into the zero queue. 1353 */ 1354 vm_page_lock_queues(); 1355 if (vm_page_sleep_if_busy(p, FALSE, "pmaprl")) 1356 return (0); 1357 vm_page_busy(p); 1358 1359 /* 1360 * Remove the page table page from the processes address space. 1361 */ 1362 pde[p->pindex] = 0; 1363 pmap->pm_stats.resident_count--; 1364 1365 if (p->hold_count) { 1366 panic("pmap_release: freeing held page table page"); 1367 } 1368 /* 1369 * Page directory pages need to have the kernel 1370 * stuff cleared, so they can go into the zero queue also. 1371 */ 1372 if (p->pindex == PTDPTDI) { 1373 bzero(pde + KPTDI, nkpt * sizeof(pd_entry_t)); 1374#ifdef SMP 1375 pde[MPPTDI] = 0; 1376#endif 1377 pde[APTDPTDI] = 0; 1378 pmap_kremove((vm_offset_t) pmap->pm_pdir); 1379 } 1380 1381 p->wire_count--; 1382 atomic_subtract_int(&cnt.v_wire_count, 1); 1383 vm_page_free_zero(p); 1384 vm_page_unlock_queues(); 1385 return 1; 1386} 1387 1388/* 1389 * this routine is called if the page table page is not 1390 * mapped correctly. 1391 */ 1392static vm_page_t 1393_pmap_allocpte(pmap, ptepindex) 1394 pmap_t pmap; 1395 unsigned ptepindex; 1396{ 1397 vm_offset_t pteva, ptepa; /* XXXPA */ 1398 vm_page_t m; 1399 1400 /* 1401 * Find or fabricate a new pagetable page 1402 */ 1403 m = vm_page_grab(pmap->pm_pteobj, ptepindex, 1404 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY); 1405 1406 KASSERT(m->queue == PQ_NONE, 1407 ("_pmap_allocpte: %p->queue != PQ_NONE", m)); 1408 1409 /* 1410 * Increment the hold count for the page table page 1411 * (denoting a new mapping.) 1412 */ 1413 m->hold_count++; 1414 1415 /* 1416 * Map the pagetable page into the process address space, if 1417 * it isn't already there. 1418 */ 1419 1420 pmap->pm_stats.resident_count++; 1421 1422 ptepa = VM_PAGE_TO_PHYS(m); 1423 pmap->pm_pdir[ptepindex] = 1424 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 1425 1426 /* 1427 * Try to use the new mapping, but if we cannot, then 1428 * do it with the routine that maps the page explicitly. 1429 */ 1430 if ((m->flags & PG_ZERO) == 0) { 1431 if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == 1432 (PTDpde & PG_FRAME)) { 1433 pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex); 1434 bzero((caddr_t) pteva, PAGE_SIZE); 1435 } else { 1436 pmap_zero_page(m); 1437 } 1438 } 1439 vm_page_lock_queues(); 1440 m->valid = VM_PAGE_BITS_ALL; 1441 vm_page_flag_clear(m, PG_ZERO); 1442 vm_page_wakeup(m); 1443 vm_page_unlock_queues(); 1444 1445 return m; 1446} 1447 1448static vm_page_t 1449pmap_allocpte(pmap_t pmap, vm_offset_t va) 1450{ 1451 unsigned ptepindex; 1452 pd_entry_t ptepa; 1453 vm_page_t m; 1454 1455 /* 1456 * Calculate pagetable page index 1457 */ 1458 ptepindex = va >> PDRSHIFT; 1459 1460 /* 1461 * Get the page directory entry 1462 */ 1463 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex]; 1464 1465 /* 1466 * This supports switching from a 4MB page to a 1467 * normal 4K page. 1468 */ 1469 if (ptepa & PG_PS) { 1470 pmap->pm_pdir[ptepindex] = 0; 1471 ptepa = 0; 1472 pmap_invalidate_all(kernel_pmap); 1473 } 1474 1475 /* 1476 * If the page table page is mapped, we just increment the 1477 * hold count, and activate it. 1478 */ 1479 if (ptepa) { 1480 /* 1481 * In order to get the page table page, try the 1482 * hint first. 1483 */ 1484 if (pmap->pm_pteobj->root && 1485 (pmap->pm_pteobj->root->pindex == ptepindex)) { 1486 m = pmap->pm_pteobj->root; 1487 } else { 1488 m = pmap_page_lookup(pmap->pm_pteobj, ptepindex); 1489 } 1490 m->hold_count++; 1491 return m; 1492 } 1493 /* 1494 * Here if the pte page isn't mapped, or if it has been deallocated. 1495 */ 1496 return _pmap_allocpte(pmap, ptepindex); 1497} 1498 1499 1500/*************************************************** 1501* Pmap allocation/deallocation routines. 1502 ***************************************************/ 1503 1504/* 1505 * Release any resources held by the given physical map. 1506 * Called when a pmap initialized by pmap_pinit is being released. 1507 * Should only be called if the map contains no valid mappings. 1508 */ 1509void 1510pmap_release(pmap_t pmap) 1511{ 1512 vm_page_t p,n,ptdpg; 1513 vm_object_t object = pmap->pm_pteobj; 1514 int curgeneration; 1515 1516#if defined(DIAGNOSTIC) 1517 if (object->ref_count != 1) 1518 panic("pmap_release: pteobj reference count != 1"); 1519#endif 1520 1521 ptdpg = NULL; 1522 mtx_lock_spin(&allpmaps_lock); 1523 LIST_REMOVE(pmap, pm_list); 1524 mtx_unlock_spin(&allpmaps_lock); 1525retry: 1526 curgeneration = object->generation; 1527 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) { 1528 n = TAILQ_NEXT(p, listq); 1529 if (p->pindex == PTDPTDI) { 1530 ptdpg = p; 1531 continue; 1532 } 1533 while (1) { 1534 if (!pmap_release_free_page(pmap, p) && 1535 (object->generation != curgeneration)) 1536 goto retry; 1537 } 1538 } 1539 1540 if (ptdpg && !pmap_release_free_page(pmap, ptdpg)) 1541 goto retry; 1542} 1543 1544static int 1545kvm_size(SYSCTL_HANDLER_ARGS) 1546{ 1547 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1548 1549 return sysctl_handle_long(oidp, &ksize, 0, req); 1550} 1551SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1552 0, 0, kvm_size, "IU", "Size of KVM"); 1553 1554static int 1555kvm_free(SYSCTL_HANDLER_ARGS) 1556{ 1557 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1558 1559 return sysctl_handle_long(oidp, &kfree, 0, req); 1560} 1561SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1562 0, 0, kvm_free, "IU", "Amount of KVM free"); 1563 1564/* 1565 * grow the number of kernel page table entries, if needed 1566 */ 1567void 1568pmap_growkernel(vm_offset_t addr) 1569{ 1570 struct pmap *pmap; 1571 int s; 1572 vm_offset_t ptppaddr; 1573 vm_page_t nkpg; 1574 pd_entry_t newpdir; 1575 1576 s = splhigh(); 1577 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1578 if (kernel_vm_end == 0) { 1579 kernel_vm_end = KERNBASE; 1580 nkpt = 0; 1581 while (pdir_pde(PTD, kernel_vm_end)) { 1582 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1583 nkpt++; 1584 } 1585 } 1586 addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1587 while (kernel_vm_end < addr) { 1588 if (pdir_pde(PTD, kernel_vm_end)) { 1589 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1590 continue; 1591 } 1592 1593 /* 1594 * This index is bogus, but out of the way 1595 */ 1596 nkpg = vm_page_alloc(NULL, nkpt, 1597 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 1598 if (!nkpg) 1599 panic("pmap_growkernel: no memory to grow kernel"); 1600 1601 nkpt++; 1602 1603 pmap_zero_page(nkpg); 1604 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1605 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1606 pdir_pde(PTD, kernel_vm_end) = newpdir; 1607 1608 mtx_lock_spin(&allpmaps_lock); 1609 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1610 *pmap_pde(pmap, kernel_vm_end) = newpdir; 1611 } 1612 mtx_unlock_spin(&allpmaps_lock); 1613 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1614 } 1615 splx(s); 1616} 1617 1618 1619/*************************************************** 1620 * page management routines. 1621 ***************************************************/ 1622 1623/* 1624 * free the pv_entry back to the free list 1625 */ 1626static PMAP_INLINE void 1627free_pv_entry(pv_entry_t pv) 1628{ 1629 pv_entry_count--; 1630 uma_zfree(pvzone, pv); 1631} 1632 1633/* 1634 * get a new pv_entry, allocating a block from the system 1635 * when needed. 1636 * the memory allocation is performed bypassing the malloc code 1637 * because of the possibility of allocations at interrupt time. 1638 */ 1639static pv_entry_t 1640get_pv_entry(void) 1641{ 1642 pv_entry_count++; 1643 if (pv_entry_high_water && 1644 (pv_entry_count > pv_entry_high_water) && 1645 (pmap_pagedaemon_waken == 0)) { 1646 pmap_pagedaemon_waken = 1; 1647 wakeup (&vm_pages_needed); 1648 } 1649 return uma_zalloc(pvzone, M_NOWAIT); 1650} 1651 1652/* 1653 * If it is the first entry on the list, it is actually 1654 * in the header and we must copy the following entry up 1655 * to the header. Otherwise we must search the list for 1656 * the entry. In either case we free the now unused entry. 1657 */ 1658 1659static int 1660pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1661{ 1662 pv_entry_t pv; 1663 int rtval; 1664 int s; 1665 1666 s = splvm(); 1667 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1668 if (m->md.pv_list_count < pmap->pm_stats.resident_count) { 1669 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 1670 if (pmap == pv->pv_pmap && va == pv->pv_va) 1671 break; 1672 } 1673 } else { 1674 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { 1675 if (va == pv->pv_va) 1676 break; 1677 } 1678 } 1679 1680 rtval = 0; 1681 if (pv) { 1682 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); 1683 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1684 m->md.pv_list_count--; 1685 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 1686 vm_page_flag_clear(m, PG_WRITEABLE); 1687 1688 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1689 free_pv_entry(pv); 1690 } 1691 1692 splx(s); 1693 return rtval; 1694} 1695 1696/* 1697 * Create a pv entry for page at pa for 1698 * (pmap, va). 1699 */ 1700static void 1701pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) 1702{ 1703 1704 int s; 1705 pv_entry_t pv; 1706 1707 s = splvm(); 1708 pv = get_pv_entry(); 1709 pv->pv_va = va; 1710 pv->pv_pmap = pmap; 1711 pv->pv_ptem = mpte; 1712 1713 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1714 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1715 m->md.pv_list_count++; 1716 1717 splx(s); 1718} 1719 1720/* 1721 * pmap_remove_pte: do the things to unmap a page in a process 1722 */ 1723static int 1724pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va) 1725{ 1726 pt_entry_t oldpte; 1727 vm_page_t m; 1728 1729 oldpte = atomic_readandclear_int(ptq); 1730 if (oldpte & PG_W) 1731 pmap->pm_stats.wired_count -= 1; 1732 /* 1733 * Machines that don't support invlpg, also don't support 1734 * PG_G. 1735 */ 1736 if (oldpte & PG_G) 1737 pmap_invalidate_page(kernel_pmap, va); 1738 pmap->pm_stats.resident_count -= 1; 1739 if (oldpte & PG_MANAGED) { 1740 m = PHYS_TO_VM_PAGE(oldpte); 1741 if (oldpte & PG_M) { 1742#if defined(PMAP_DIAGNOSTIC) 1743 if (pmap_nw_modified((pt_entry_t) oldpte)) { 1744 printf( 1745 "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n", 1746 va, oldpte); 1747 } 1748#endif 1749 if (pmap_track_modified(va)) 1750 vm_page_dirty(m); 1751 } 1752 if (oldpte & PG_A) 1753 vm_page_flag_set(m, PG_REFERENCED); 1754 return pmap_remove_entry(pmap, m, va); 1755 } else { 1756 return pmap_unuse_pt(pmap, va, NULL); 1757 } 1758 1759 return 0; 1760} 1761 1762/* 1763 * Remove a single page from a process address space 1764 */ 1765static void 1766pmap_remove_page(pmap_t pmap, vm_offset_t va) 1767{ 1768 register pt_entry_t *ptq; 1769 1770 /* 1771 * if there is no pte for this address, just skip it!!! 1772 */ 1773 if (*pmap_pde(pmap, va) == 0) { 1774 return; 1775 } 1776 1777 /* 1778 * get a local va for mappings for this pmap. 1779 */ 1780 ptq = get_ptbase(pmap) + i386_btop(va); 1781 if (*ptq) { 1782 (void) pmap_remove_pte(pmap, ptq, va); 1783 pmap_invalidate_page(pmap, va); 1784 } 1785 return; 1786} 1787 1788/* 1789 * Remove the given range of addresses from the specified map. 1790 * 1791 * It is assumed that the start and end are properly 1792 * rounded to the page size. 1793 */ 1794void 1795pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1796{ 1797 register pt_entry_t *ptbase; 1798 vm_offset_t pdnxt; 1799 pd_entry_t ptpaddr; 1800 vm_offset_t sindex, eindex; 1801 int anyvalid; 1802 1803 if (pmap == NULL) 1804 return; 1805 1806 if (pmap->pm_stats.resident_count == 0) 1807 return; 1808 1809 /* 1810 * special handling of removing one page. a very 1811 * common operation and easy to short circuit some 1812 * code. 1813 */ 1814 if ((sva + PAGE_SIZE == eva) && 1815 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 1816 pmap_remove_page(pmap, sva); 1817 return; 1818 } 1819 1820 anyvalid = 0; 1821 1822 /* 1823 * Get a local virtual address for the mappings that are being 1824 * worked with. 1825 */ 1826 ptbase = get_ptbase(pmap); 1827 1828 sindex = i386_btop(sva); 1829 eindex = i386_btop(eva); 1830 1831 for (; sindex < eindex; sindex = pdnxt) { 1832 unsigned pdirindex; 1833 1834 /* 1835 * Calculate index for next page table. 1836 */ 1837 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1838 if (pmap->pm_stats.resident_count == 0) 1839 break; 1840 1841 pdirindex = sindex / NPDEPG; 1842 ptpaddr = pmap->pm_pdir[pdirindex]; 1843 if ((ptpaddr & PG_PS) != 0) { 1844 pmap->pm_pdir[pdirindex] = 0; 1845 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1846 anyvalid++; 1847 continue; 1848 } 1849 1850 /* 1851 * Weed out invalid mappings. Note: we assume that the page 1852 * directory table is always allocated, and in kernel virtual. 1853 */ 1854 if (ptpaddr == 0) 1855 continue; 1856 1857 /* 1858 * Limit our scan to either the end of the va represented 1859 * by the current page table page, or to the end of the 1860 * range being removed. 1861 */ 1862 if (pdnxt > eindex) { 1863 pdnxt = eindex; 1864 } 1865 1866 for (; sindex != pdnxt; sindex++) { 1867 vm_offset_t va; 1868 if (ptbase[sindex] == 0) { 1869 continue; 1870 } 1871 va = i386_ptob(sindex); 1872 1873 anyvalid++; 1874 if (pmap_remove_pte(pmap, ptbase + sindex, va)) 1875 break; 1876 } 1877 } 1878 1879 if (anyvalid) 1880 pmap_invalidate_all(pmap); 1881} 1882 1883/* 1884 * Routine: pmap_remove_all 1885 * Function: 1886 * Removes this physical page from 1887 * all physical maps in which it resides. 1888 * Reflects back modify bits to the pager. 1889 * 1890 * Notes: 1891 * Original versions of this routine were very 1892 * inefficient because they iteratively called 1893 * pmap_remove (slow...) 1894 */ 1895 1896void 1897pmap_remove_all(vm_page_t m) 1898{ 1899 register pv_entry_t pv; 1900 pt_entry_t *pte, tpte; 1901 int s; 1902 1903#if defined(PMAP_DIAGNOSTIC) 1904 /* 1905 * XXX This makes pmap_remove_all() illegal for non-managed pages! 1906 */ 1907 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { 1908 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x", 1909 VM_PAGE_TO_PHYS(m)); 1910 } 1911#endif 1912 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1913 s = splvm(); 1914 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1915 pv->pv_pmap->pm_stats.resident_count--; 1916 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 1917 tpte = atomic_readandclear_int(pte); 1918 if (tpte & PG_W) 1919 pv->pv_pmap->pm_stats.wired_count--; 1920 if (tpte & PG_A) 1921 vm_page_flag_set(m, PG_REFERENCED); 1922 1923 /* 1924 * Update the vm_page_t clean and reference bits. 1925 */ 1926 if (tpte & PG_M) { 1927#if defined(PMAP_DIAGNOSTIC) 1928 if (pmap_nw_modified((pt_entry_t) tpte)) { 1929 printf( 1930 "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n", 1931 pv->pv_va, tpte); 1932 } 1933#endif 1934 if (pmap_track_modified(pv->pv_va)) 1935 vm_page_dirty(m); 1936 } 1937 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1938 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 1939 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1940 m->md.pv_list_count--; 1941 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 1942 free_pv_entry(pv); 1943 } 1944 vm_page_flag_clear(m, PG_WRITEABLE); 1945 splx(s); 1946} 1947 1948/* 1949 * Set the physical protection on the 1950 * specified range of this map as requested. 1951 */ 1952void 1953pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1954{ 1955 register pt_entry_t *ptbase; 1956 vm_offset_t pdnxt; 1957 pd_entry_t ptpaddr; 1958 vm_offset_t sindex, eindex; 1959 int anychanged; 1960 1961 if (pmap == NULL) 1962 return; 1963 1964 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1965 pmap_remove(pmap, sva, eva); 1966 return; 1967 } 1968 1969 if (prot & VM_PROT_WRITE) 1970 return; 1971 1972 anychanged = 0; 1973 1974 ptbase = get_ptbase(pmap); 1975 1976 sindex = i386_btop(sva); 1977 eindex = i386_btop(eva); 1978 1979 for (; sindex < eindex; sindex = pdnxt) { 1980 1981 unsigned pdirindex; 1982 1983 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1)); 1984 1985 pdirindex = sindex / NPDEPG; 1986 ptpaddr = pmap->pm_pdir[pdirindex]; 1987 if ((ptpaddr & PG_PS) != 0) { 1988 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 1989 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1990 anychanged++; 1991 continue; 1992 } 1993 1994 /* 1995 * Weed out invalid mappings. Note: we assume that the page 1996 * directory table is always allocated, and in kernel virtual. 1997 */ 1998 if (ptpaddr == 0) 1999 continue; 2000 2001 if (pdnxt > eindex) { 2002 pdnxt = eindex; 2003 } 2004 2005 for (; sindex != pdnxt; sindex++) { 2006 2007 pt_entry_t pbits; 2008 vm_page_t m; 2009 2010 pbits = ptbase[sindex]; 2011 2012 if (pbits & PG_MANAGED) { 2013 m = NULL; 2014 if (pbits & PG_A) { 2015 m = PHYS_TO_VM_PAGE(pbits); 2016 vm_page_flag_set(m, PG_REFERENCED); 2017 pbits &= ~PG_A; 2018 } 2019 if (pbits & PG_M) { 2020 if (pmap_track_modified(i386_ptob(sindex))) { 2021 if (m == NULL) 2022 m = PHYS_TO_VM_PAGE(pbits); 2023 vm_page_dirty(m); 2024 pbits &= ~PG_M; 2025 } 2026 } 2027 } 2028 2029 pbits &= ~PG_RW; 2030 2031 if (pbits != ptbase[sindex]) { 2032 ptbase[sindex] = pbits; 2033 anychanged = 1; 2034 } 2035 } 2036 } 2037 if (anychanged) 2038 pmap_invalidate_all(pmap); 2039} 2040 2041/* 2042 * Insert the given physical page (p) at 2043 * the specified virtual address (v) in the 2044 * target physical map with the protection requested. 2045 * 2046 * If specified, the page will be wired down, meaning 2047 * that the related pte can not be reclaimed. 2048 * 2049 * NB: This is the only routine which MAY NOT lazy-evaluate 2050 * or lose information. That is, this routine must actually 2051 * insert this page into the given map NOW. 2052 */ 2053void 2054pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 2055 boolean_t wired) 2056{ 2057 vm_offset_t pa; 2058 register pt_entry_t *pte; 2059 vm_offset_t opa; 2060 pt_entry_t origpte, newpte; 2061 vm_page_t mpte; 2062 2063 if (pmap == NULL) 2064 return; 2065 2066 va &= PG_FRAME; 2067#ifdef PMAP_DIAGNOSTIC 2068 if (va > VM_MAX_KERNEL_ADDRESS) 2069 panic("pmap_enter: toobig"); 2070 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 2071 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); 2072#endif 2073 2074 mpte = NULL; 2075 /* 2076 * In the case that a page table page is not 2077 * resident, we are creating it here. 2078 */ 2079 if (va < VM_MAXUSER_ADDRESS) { 2080 mpte = pmap_allocpte(pmap, va); 2081 } 2082#if 0 && defined(PMAP_DIAGNOSTIC) 2083 else { 2084 pd_entry_t *pdeaddr = pmap_pde(pmap, va); 2085 origpte = *pdeaddr; 2086 if ((origpte & PG_V) == 0) { 2087 panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n", 2088 pmap->pm_pdir[PTDPTDI], origpte, va); 2089 } 2090 } 2091#endif 2092 2093 pte = pmap_pte(pmap, va); 2094 2095 /* 2096 * Page Directory table entry not valid, we need a new PT page 2097 */ 2098 if (pte == NULL) { 2099 panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n", 2100 (void *)pmap->pm_pdir[PTDPTDI], va); 2101 } 2102 2103 pa = VM_PAGE_TO_PHYS(m) & PG_FRAME; 2104 origpte = *(vm_offset_t *)pte; 2105 opa = origpte & PG_FRAME; 2106 2107 if (origpte & PG_PS) 2108 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2109 2110 /* 2111 * Mapping has not changed, must be protection or wiring change. 2112 */ 2113 if (origpte && (opa == pa)) { 2114 /* 2115 * Wiring change, just update stats. We don't worry about 2116 * wiring PT pages as they remain resident as long as there 2117 * are valid mappings in them. Hence, if a user page is wired, 2118 * the PT page will be also. 2119 */ 2120 if (wired && ((origpte & PG_W) == 0)) 2121 pmap->pm_stats.wired_count++; 2122 else if (!wired && (origpte & PG_W)) 2123 pmap->pm_stats.wired_count--; 2124 2125#if defined(PMAP_DIAGNOSTIC) 2126 if (pmap_nw_modified((pt_entry_t) origpte)) { 2127 printf( 2128 "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n", 2129 va, origpte); 2130 } 2131#endif 2132 2133 /* 2134 * Remove extra pte reference 2135 */ 2136 if (mpte) 2137 mpte->hold_count--; 2138 2139 if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) { 2140 if ((origpte & PG_RW) == 0) { 2141 *pte |= PG_RW; 2142 pmap_invalidate_page(pmap, va); 2143 } 2144 return; 2145 } 2146 2147 /* 2148 * We might be turning off write access to the page, 2149 * so we go ahead and sense modify status. 2150 */ 2151 if (origpte & PG_MANAGED) { 2152 if ((origpte & PG_M) && pmap_track_modified(va)) { 2153 vm_page_t om; 2154 om = PHYS_TO_VM_PAGE(opa); 2155 vm_page_dirty(om); 2156 } 2157 pa |= PG_MANAGED; 2158 } 2159 goto validate; 2160 } 2161 /* 2162 * Mapping has changed, invalidate old range and fall through to 2163 * handle validating new mapping. 2164 */ 2165 if (opa) { 2166 int err; 2167 vm_page_lock_queues(); 2168 err = pmap_remove_pte(pmap, pte, va); 2169 vm_page_unlock_queues(); 2170 if (err) 2171 panic("pmap_enter: pte vanished, va: 0x%x", va); 2172 } 2173 2174 /* 2175 * Enter on the PV list if part of our managed memory. Note that we 2176 * raise IPL while manipulating pv_table since pmap_enter can be 2177 * called at interrupt time. 2178 */ 2179 if (pmap_initialized && 2180 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) { 2181 pmap_insert_entry(pmap, va, mpte, m); 2182 pa |= PG_MANAGED; 2183 } 2184 2185 /* 2186 * Increment counters 2187 */ 2188 pmap->pm_stats.resident_count++; 2189 if (wired) 2190 pmap->pm_stats.wired_count++; 2191 2192validate: 2193 /* 2194 * Now validate mapping with desired protection/wiring. 2195 */ 2196 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V); 2197 2198 if (wired) 2199 newpte |= PG_W; 2200 if (va < VM_MAXUSER_ADDRESS) 2201 newpte |= PG_U; 2202 if (pmap == kernel_pmap) 2203 newpte |= pgeflag; 2204 2205 /* 2206 * if the mapping or permission bits are different, we need 2207 * to update the pte. 2208 */ 2209 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2210 *pte = newpte | PG_A; 2211 /*if (origpte)*/ { 2212 pmap_invalidate_page(pmap, va); 2213 } 2214 } 2215} 2216 2217/* 2218 * this code makes some *MAJOR* assumptions: 2219 * 1. Current pmap & pmap exists. 2220 * 2. Not wired. 2221 * 3. Read access. 2222 * 4. No page table pages. 2223 * 5. Tlbflush is deferred to calling procedure. 2224 * 6. Page IS managed. 2225 * but is *MUCH* faster than pmap_enter... 2226 */ 2227 2228static vm_page_t 2229pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) 2230{ 2231 pt_entry_t *pte; 2232 vm_offset_t pa; 2233 2234 /* 2235 * In the case that a page table page is not 2236 * resident, we are creating it here. 2237 */ 2238 if (va < VM_MAXUSER_ADDRESS) { 2239 unsigned ptepindex; 2240 pd_entry_t ptepa; 2241 2242 /* 2243 * Calculate pagetable page index 2244 */ 2245 ptepindex = va >> PDRSHIFT; 2246 if (mpte && (mpte->pindex == ptepindex)) { 2247 mpte->hold_count++; 2248 } else { 2249retry: 2250 /* 2251 * Get the page directory entry 2252 */ 2253 ptepa = pmap->pm_pdir[ptepindex]; 2254 2255 /* 2256 * If the page table page is mapped, we just increment 2257 * the hold count, and activate it. 2258 */ 2259 if (ptepa) { 2260 if (ptepa & PG_PS) 2261 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2262 if (pmap->pm_pteobj->root && 2263 (pmap->pm_pteobj->root->pindex == ptepindex)) { 2264 mpte = pmap->pm_pteobj->root; 2265 } else { 2266 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex); 2267 } 2268 if (mpte == NULL) 2269 goto retry; 2270 mpte->hold_count++; 2271 } else { 2272 mpte = _pmap_allocpte(pmap, ptepindex); 2273 } 2274 } 2275 } else { 2276 mpte = NULL; 2277 } 2278 2279 /* 2280 * This call to vtopte makes the assumption that we are 2281 * entering the page into the current pmap. In order to support 2282 * quick entry into any pmap, one would likely use pmap_pte_quick. 2283 * But that isn't as quick as vtopte. 2284 */ 2285 pte = vtopte(va); 2286 if (*pte) { 2287 if (mpte != NULL) { 2288 vm_page_lock_queues(); 2289 pmap_unwire_pte_hold(pmap, mpte); 2290 vm_page_unlock_queues(); 2291 } 2292 return 0; 2293 } 2294 2295 /* 2296 * Enter on the PV list if part of our managed memory. Note that we 2297 * raise IPL while manipulating pv_table since pmap_enter can be 2298 * called at interrupt time. 2299 */ 2300 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) 2301 pmap_insert_entry(pmap, va, mpte, m); 2302 2303 /* 2304 * Increment counters 2305 */ 2306 pmap->pm_stats.resident_count++; 2307 2308 pa = VM_PAGE_TO_PHYS(m); 2309 2310 /* 2311 * Now validate mapping with RO protection 2312 */ 2313 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 2314 *pte = pa | PG_V | PG_U; 2315 else 2316 *pte = pa | PG_V | PG_U | PG_MANAGED; 2317 2318 return mpte; 2319} 2320 2321/* 2322 * Make a temporary mapping for a physical address. This is only intended 2323 * to be used for panic dumps. 2324 */ 2325void * 2326pmap_kenter_temporary(vm_offset_t pa, int i) 2327{ 2328 vm_offset_t va; 2329 2330 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 2331 pmap_kenter(va, pa); 2332#ifndef I386_CPU 2333 invlpg(va); 2334#else 2335 invltlb(); 2336#endif 2337 return ((void *)crashdumpmap); 2338} 2339 2340#define MAX_INIT_PT (96) 2341/* 2342 * pmap_object_init_pt preloads the ptes for a given object 2343 * into the specified pmap. This eliminates the blast of soft 2344 * faults on process startup and immediately after an mmap. 2345 */ 2346void 2347pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2348 vm_object_t object, vm_pindex_t pindex, 2349 vm_size_t size, int limit) 2350{ 2351 vm_offset_t tmpidx; 2352 int psize; 2353 vm_page_t p, mpte; 2354 2355 if (pmap == NULL || object == NULL) 2356 return; 2357 2358 /* 2359 * This code maps large physical mmap regions into the 2360 * processor address space. Note that some shortcuts 2361 * are taken, but the code works. 2362 */ 2363 if (pseflag && (object->type == OBJT_DEVICE) && 2364 ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) { 2365 int i; 2366 vm_page_t m[1]; 2367 unsigned int ptepindex; 2368 int npdes; 2369 pd_entry_t ptepa; 2370 2371 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)]) 2372 return; 2373 2374retry: 2375 p = vm_page_lookup(object, pindex); 2376 if (p != NULL) { 2377 vm_page_lock_queues(); 2378 if (vm_page_sleep_if_busy(p, FALSE, "init4p")) 2379 goto retry; 2380 } else { 2381 p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); 2382 if (p == NULL) 2383 return; 2384 m[0] = p; 2385 2386 if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) { 2387 vm_page_lock_queues(); 2388 vm_page_free(p); 2389 vm_page_unlock_queues(); 2390 return; 2391 } 2392 2393 p = vm_page_lookup(object, pindex); 2394 vm_page_lock_queues(); 2395 vm_page_wakeup(p); 2396 } 2397 vm_page_unlock_queues(); 2398 2399 ptepa = VM_PAGE_TO_PHYS(p); 2400 if (ptepa & (NBPDR - 1)) { 2401 return; 2402 } 2403 2404 p->valid = VM_PAGE_BITS_ALL; 2405 2406 pmap->pm_stats.resident_count += size >> PAGE_SHIFT; 2407 npdes = size >> PDRSHIFT; 2408 for(i = 0; i < npdes; i++) { 2409 pmap->pm_pdir[ptepindex] = 2410 ptepa | PG_U | PG_RW | PG_V | PG_PS; 2411 ptepa += NBPDR; 2412 ptepindex += 1; 2413 } 2414 pmap_invalidate_all(kernel_pmap); 2415 return; 2416 } 2417 2418 psize = i386_btop(size); 2419 2420 if ((object->type != OBJT_VNODE) || 2421 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 2422 (object->resident_page_count > MAX_INIT_PT))) { 2423 return; 2424 } 2425 2426 if (psize + pindex > object->size) { 2427 if (object->size < pindex) 2428 return; 2429 psize = object->size - pindex; 2430 } 2431 2432 mpte = NULL; 2433 2434 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 2435 if (p->pindex < pindex) { 2436 p = vm_page_splay(pindex, object->root); 2437 if ((object->root = p)->pindex < pindex) 2438 p = TAILQ_NEXT(p, listq); 2439 } 2440 } 2441 /* 2442 * Assert: the variable p is either (1) the page with the 2443 * least pindex greater than or equal to the parameter pindex 2444 * or (2) NULL. 2445 */ 2446 for (; 2447 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2448 p = TAILQ_NEXT(p, listq)) { 2449 /* 2450 * don't allow an madvise to blow away our really 2451 * free pages allocating pv entries. 2452 */ 2453 if ((limit & MAP_PREFAULT_MADVISE) && 2454 cnt.v_free_count < cnt.v_free_reserved) { 2455 break; 2456 } 2457 vm_page_lock_queues(); 2458 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && 2459 (p->busy == 0) && 2460 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2461 if ((p->queue - p->pc) == PQ_CACHE) 2462 vm_page_deactivate(p); 2463 vm_page_busy(p); 2464 vm_page_unlock_queues(); 2465 mpte = pmap_enter_quick(pmap, 2466 addr + i386_ptob(tmpidx), p, mpte); 2467 vm_page_lock_queues(); 2468 vm_page_wakeup(p); 2469 } 2470 vm_page_unlock_queues(); 2471 } 2472 return; 2473} 2474 2475/* 2476 * pmap_prefault provides a quick way of clustering 2477 * pagefaults into a processes address space. It is a "cousin" 2478 * of pmap_object_init_pt, except it runs at page fault time instead 2479 * of mmap time. 2480 */ 2481#define PFBAK 4 2482#define PFFOR 4 2483#define PAGEORDER_SIZE (PFBAK+PFFOR) 2484 2485static int pmap_prefault_pageorder[] = { 2486 -1 * PAGE_SIZE, 1 * PAGE_SIZE, 2487 -2 * PAGE_SIZE, 2 * PAGE_SIZE, 2488 -3 * PAGE_SIZE, 3 * PAGE_SIZE, 2489 -4 * PAGE_SIZE, 4 * PAGE_SIZE 2490}; 2491 2492void 2493pmap_prefault(pmap, addra, entry) 2494 pmap_t pmap; 2495 vm_offset_t addra; 2496 vm_map_entry_t entry; 2497{ 2498 int i; 2499 vm_offset_t starta; 2500 vm_offset_t addr; 2501 vm_pindex_t pindex; 2502 vm_page_t m, mpte; 2503 vm_object_t object; 2504 2505 if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) 2506 return; 2507 2508 object = entry->object.vm_object; 2509 2510 starta = addra - PFBAK * PAGE_SIZE; 2511 if (starta < entry->start) { 2512 starta = entry->start; 2513 } else if (starta > addra) { 2514 starta = 0; 2515 } 2516 2517 mpte = NULL; 2518 for (i = 0; i < PAGEORDER_SIZE; i++) { 2519 vm_object_t lobject; 2520 pt_entry_t *pte; 2521 2522 addr = addra + pmap_prefault_pageorder[i]; 2523 if (addr > addra + (PFFOR * PAGE_SIZE)) 2524 addr = 0; 2525 2526 if (addr < starta || addr >= entry->end) 2527 continue; 2528 2529 if ((*pmap_pde(pmap, addr)) == 0) 2530 continue; 2531 2532 pte = vtopte(addr); 2533 if (*pte) 2534 continue; 2535 2536 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 2537 lobject = object; 2538 for (m = vm_page_lookup(lobject, pindex); 2539 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 2540 lobject = lobject->backing_object) { 2541 if (lobject->backing_object_offset & PAGE_MASK) 2542 break; 2543 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 2544 m = vm_page_lookup(lobject->backing_object, pindex); 2545 } 2546 2547 /* 2548 * give-up when a page is not in memory 2549 */ 2550 if (m == NULL) 2551 break; 2552 vm_page_lock_queues(); 2553 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 2554 (m->busy == 0) && 2555 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 2556 2557 if ((m->queue - m->pc) == PQ_CACHE) { 2558 vm_page_deactivate(m); 2559 } 2560 vm_page_busy(m); 2561 vm_page_unlock_queues(); 2562 mpte = pmap_enter_quick(pmap, addr, m, mpte); 2563 vm_page_lock_queues(); 2564 vm_page_wakeup(m); 2565 } 2566 vm_page_unlock_queues(); 2567 } 2568} 2569 2570/* 2571 * Routine: pmap_change_wiring 2572 * Function: Change the wiring attribute for a map/virtual-address 2573 * pair. 2574 * In/out conditions: 2575 * The mapping must already exist in the pmap. 2576 */ 2577void 2578pmap_change_wiring(pmap, va, wired) 2579 register pmap_t pmap; 2580 vm_offset_t va; 2581 boolean_t wired; 2582{ 2583 register pt_entry_t *pte; 2584 2585 if (pmap == NULL) 2586 return; 2587 2588 pte = pmap_pte(pmap, va); 2589 2590 if (wired && !pmap_pte_w(pte)) 2591 pmap->pm_stats.wired_count++; 2592 else if (!wired && pmap_pte_w(pte)) 2593 pmap->pm_stats.wired_count--; 2594 2595 /* 2596 * Wiring is not a hardware characteristic so there is no need to 2597 * invalidate TLB. 2598 */ 2599 pmap_pte_set_w(pte, wired); 2600} 2601 2602 2603 2604/* 2605 * Copy the range specified by src_addr/len 2606 * from the source map to the range dst_addr/len 2607 * in the destination map. 2608 * 2609 * This routine is only advisory and need not do anything. 2610 */ 2611 2612void 2613pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 2614 vm_offset_t src_addr) 2615{ 2616 vm_offset_t addr; 2617 vm_offset_t end_addr = src_addr + len; 2618 vm_offset_t pdnxt; 2619 pd_entry_t src_frame, dst_frame; 2620 vm_page_t m; 2621 2622 if (dst_addr != src_addr) 2623 return; 2624 2625 src_frame = src_pmap->pm_pdir[PTDPTDI] & PG_FRAME; 2626 if (src_frame != (PTDpde & PG_FRAME)) 2627 return; 2628 2629 dst_frame = dst_pmap->pm_pdir[PTDPTDI] & PG_FRAME; 2630 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 2631 pt_entry_t *src_pte, *dst_pte; 2632 vm_page_t dstmpte, srcmpte; 2633 pd_entry_t srcptepaddr; 2634 unsigned ptepindex; 2635 2636 if (addr >= UPT_MIN_ADDRESS) 2637 panic("pmap_copy: invalid to pmap_copy page tables\n"); 2638 2639 /* 2640 * Don't let optional prefaulting of pages make us go 2641 * way below the low water mark of free pages or way 2642 * above high water mark of used pv entries. 2643 */ 2644 if (cnt.v_free_count < cnt.v_free_reserved || 2645 pv_entry_count > pv_entry_high_water) 2646 break; 2647 2648 pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1)); 2649 ptepindex = addr >> PDRSHIFT; 2650 2651 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 2652 if (srcptepaddr == 0) 2653 continue; 2654 2655 if (srcptepaddr & PG_PS) { 2656 if (dst_pmap->pm_pdir[ptepindex] == 0) { 2657 dst_pmap->pm_pdir[ptepindex] = srcptepaddr; 2658 dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 2659 } 2660 continue; 2661 } 2662 2663 srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex); 2664 if ((srcmpte == NULL) || 2665 (srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY)) 2666 continue; 2667 2668 if (pdnxt > end_addr) 2669 pdnxt = end_addr; 2670 2671 /* 2672 * Have to recheck this before every avtopte() call below 2673 * in case we have blocked and something else used APTDpde. 2674 */ 2675 if (dst_frame != (APTDpde & PG_FRAME)) { 2676 APTDpde = dst_frame | PG_RW | PG_V; 2677 pmap_invalidate_all(kernel_pmap); /* XXX Bandaid */ 2678 } 2679 src_pte = vtopte(addr); 2680 dst_pte = avtopte(addr); 2681 while (addr < pdnxt) { 2682 pt_entry_t ptetemp; 2683 ptetemp = *src_pte; 2684 /* 2685 * we only virtual copy managed pages 2686 */ 2687 if ((ptetemp & PG_MANAGED) != 0) { 2688 /* 2689 * We have to check after allocpte for the 2690 * pte still being around... allocpte can 2691 * block. 2692 */ 2693 dstmpte = pmap_allocpte(dst_pmap, addr); 2694 if ((*dst_pte == 0) && (ptetemp = *src_pte)) { 2695 /* 2696 * Clear the modified and 2697 * accessed (referenced) bits 2698 * during the copy. 2699 */ 2700 m = PHYS_TO_VM_PAGE(ptetemp); 2701 *dst_pte = ptetemp & ~(PG_M | PG_A); 2702 dst_pmap->pm_stats.resident_count++; 2703 pmap_insert_entry(dst_pmap, addr, 2704 dstmpte, m); 2705 } else { 2706 vm_page_lock_queues(); 2707 pmap_unwire_pte_hold(dst_pmap, dstmpte); 2708 vm_page_unlock_queues(); 2709 } 2710 if (dstmpte->hold_count >= srcmpte->hold_count) 2711 break; 2712 } 2713 addr += PAGE_SIZE; 2714 src_pte++; 2715 dst_pte++; 2716 } 2717 } 2718} 2719 2720#ifdef SMP 2721 2722/* 2723 * pmap_zpi_switchin*() 2724 * 2725 * These functions allow us to avoid doing IPIs alltogether in certain 2726 * temporary page-mapping situations (page zeroing). Instead to deal 2727 * with being preempted and moved onto a different cpu we invalidate 2728 * the page when the scheduler switches us in. This does not occur 2729 * very often so we remain relatively optimal with very little effort. 2730 */ 2731static void 2732pmap_zpi_switchin12(void) 2733{ 2734 invlpg((u_int)CADDR1); 2735 invlpg((u_int)CADDR2); 2736} 2737 2738static void 2739pmap_zpi_switchin2(void) 2740{ 2741 invlpg((u_int)CADDR2); 2742} 2743 2744static void 2745pmap_zpi_switchin3(void) 2746{ 2747 invlpg((u_int)CADDR3); 2748} 2749 2750#endif 2751 2752/* 2753 * pmap_zero_page zeros the specified hardware page by mapping 2754 * the page into KVM and using bzero to clear its contents. 2755 */ 2756void 2757pmap_zero_page(vm_page_t m) 2758{ 2759 2760 mtx_lock(&CMAPCADDR12_lock); 2761 if (*CMAP2) 2762 panic("pmap_zero_page: CMAP2 busy"); 2763 *CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; 2764#ifdef I386_CPU 2765 invltlb(); 2766#else 2767#ifdef SMP 2768 curthread->td_switchin = pmap_zpi_switchin2; 2769#endif 2770 invlpg((u_int)CADDR2); 2771#endif 2772#if defined(I686_CPU) 2773 if (cpu_class == CPUCLASS_686) 2774 i686_pagezero(CADDR2); 2775 else 2776#endif 2777 bzero(CADDR2, PAGE_SIZE); 2778#ifdef SMP 2779 curthread->td_switchin = NULL; 2780#endif 2781 *CMAP2 = 0; 2782 mtx_unlock(&CMAPCADDR12_lock); 2783} 2784 2785/* 2786 * pmap_zero_page_area zeros the specified hardware page by mapping 2787 * the page into KVM and using bzero to clear its contents. 2788 * 2789 * off and size may not cover an area beyond a single hardware page. 2790 */ 2791void 2792pmap_zero_page_area(vm_page_t m, int off, int size) 2793{ 2794 2795 mtx_lock(&CMAPCADDR12_lock); 2796 if (*CMAP2) 2797 panic("pmap_zero_page: CMAP2 busy"); 2798 *CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; 2799#ifdef I386_CPU 2800 invltlb(); 2801#else 2802#ifdef SMP 2803 curthread->td_switchin = pmap_zpi_switchin2; 2804#endif 2805 invlpg((u_int)CADDR2); 2806#endif 2807#if defined(I686_CPU) 2808 if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE) 2809 i686_pagezero(CADDR2); 2810 else 2811#endif 2812 bzero((char *)CADDR2 + off, size); 2813#ifdef SMP 2814 curthread->td_switchin = NULL; 2815#endif 2816 *CMAP2 = 0; 2817 mtx_unlock(&CMAPCADDR12_lock); 2818} 2819 2820/* 2821 * pmap_zero_page_idle zeros the specified hardware page by mapping 2822 * the page into KVM and using bzero to clear its contents. This 2823 * is intended to be called from the vm_pagezero process only and 2824 * outside of Giant. 2825 */ 2826void 2827pmap_zero_page_idle(vm_page_t m) 2828{ 2829 2830 if (*CMAP3) 2831 panic("pmap_zero_page: CMAP3 busy"); 2832 *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; 2833#ifdef I386_CPU 2834 invltlb(); 2835#else 2836#ifdef SMP 2837 curthread->td_switchin = pmap_zpi_switchin3; 2838#endif 2839 invlpg((u_int)CADDR3); 2840#endif 2841#if defined(I686_CPU) 2842 if (cpu_class == CPUCLASS_686) 2843 i686_pagezero(CADDR3); 2844 else 2845#endif 2846 bzero(CADDR3, PAGE_SIZE); 2847#ifdef SMP 2848 curthread->td_switchin = NULL; 2849#endif 2850 *CMAP3 = 0; 2851} 2852 2853/* 2854 * pmap_copy_page copies the specified (machine independent) 2855 * page by mapping the page into virtual memory and using 2856 * bcopy to copy the page, one machine dependent page at a 2857 * time. 2858 */ 2859void 2860pmap_copy_page(vm_page_t src, vm_page_t dst) 2861{ 2862 2863 mtx_lock(&CMAPCADDR12_lock); 2864 if (*CMAP1) 2865 panic("pmap_copy_page: CMAP1 busy"); 2866 if (*CMAP2) 2867 panic("pmap_copy_page: CMAP2 busy"); 2868 *CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A; 2869 *CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M; 2870#ifdef I386_CPU 2871 invltlb(); 2872#else 2873#ifdef SMP 2874 curthread->td_switchin = pmap_zpi_switchin12; 2875#endif 2876 invlpg((u_int)CADDR1); 2877 invlpg((u_int)CADDR2); 2878#endif 2879 bcopy(CADDR1, CADDR2, PAGE_SIZE); 2880#ifdef SMP 2881 curthread->td_switchin = NULL; 2882#endif 2883 *CMAP1 = 0; 2884 *CMAP2 = 0; 2885 mtx_unlock(&CMAPCADDR12_lock); 2886} 2887 2888/* 2889 * Returns true if the pmap's pv is one of the first 2890 * 16 pvs linked to from this page. This count may 2891 * be changed upwards or downwards in the future; it 2892 * is only necessary that true be returned for a small 2893 * subset of pmaps for proper page aging. 2894 */ 2895boolean_t 2896pmap_page_exists_quick(pmap, m) 2897 pmap_t pmap; 2898 vm_page_t m; 2899{ 2900 pv_entry_t pv; 2901 int loops = 0; 2902 int s; 2903 2904 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 2905 return FALSE; 2906 2907 s = splvm(); 2908 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2909 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2910 if (pv->pv_pmap == pmap) { 2911 splx(s); 2912 return TRUE; 2913 } 2914 loops++; 2915 if (loops >= 16) 2916 break; 2917 } 2918 splx(s); 2919 return (FALSE); 2920} 2921 2922#define PMAP_REMOVE_PAGES_CURPROC_ONLY 2923/* 2924 * Remove all pages from specified address space 2925 * this aids process exit speeds. Also, this code 2926 * is special cased for current process only, but 2927 * can have the more generic (and slightly slower) 2928 * mode enabled. This is much faster than pmap_remove 2929 * in the case of running down an entire address space. 2930 */ 2931void 2932pmap_remove_pages(pmap, sva, eva) 2933 pmap_t pmap; 2934 vm_offset_t sva, eva; 2935{ 2936 pt_entry_t *pte, tpte; 2937 vm_page_t m; 2938 pv_entry_t pv, npv; 2939 int s; 2940 2941#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 2942 if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) { 2943 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2944 return; 2945 } 2946#endif 2947 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2948 s = splvm(); 2949 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { 2950 2951 if (pv->pv_va >= eva || pv->pv_va < sva) { 2952 npv = TAILQ_NEXT(pv, pv_plist); 2953 continue; 2954 } 2955 2956#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 2957 pte = vtopte(pv->pv_va); 2958#else 2959 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 2960#endif 2961 tpte = *pte; 2962 2963 if (tpte == 0) { 2964 printf("TPTE at %p IS ZERO @ VA %08x\n", 2965 pte, pv->pv_va); 2966 panic("bad pte"); 2967 } 2968 2969/* 2970 * We cannot remove wired pages from a process' mapping at this time 2971 */ 2972 if (tpte & PG_W) { 2973 npv = TAILQ_NEXT(pv, pv_plist); 2974 continue; 2975 } 2976 2977 m = PHYS_TO_VM_PAGE(tpte); 2978 KASSERT(m->phys_addr == (tpte & PG_FRAME), 2979 ("vm_page_t %p phys_addr mismatch %08x %08x", 2980 m, m->phys_addr, tpte)); 2981 2982 KASSERT(m < &vm_page_array[vm_page_array_size], 2983 ("pmap_remove_pages: bad tpte %x", tpte)); 2984 2985 pv->pv_pmap->pm_stats.resident_count--; 2986 2987 *pte = 0; 2988 2989 /* 2990 * Update the vm_page_t clean and reference bits. 2991 */ 2992 if (tpte & PG_M) { 2993 vm_page_dirty(m); 2994 } 2995 2996 npv = TAILQ_NEXT(pv, pv_plist); 2997 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 2998 2999 m->md.pv_list_count--; 3000 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3001 if (TAILQ_FIRST(&m->md.pv_list) == NULL) { 3002 vm_page_flag_clear(m, PG_WRITEABLE); 3003 } 3004 3005 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 3006 free_pv_entry(pv); 3007 } 3008 splx(s); 3009 pmap_invalidate_all(pmap); 3010} 3011 3012/* 3013 * pmap_is_modified: 3014 * 3015 * Return whether or not the specified physical page was modified 3016 * in any physical maps. 3017 */ 3018boolean_t 3019pmap_is_modified(vm_page_t m) 3020{ 3021 pv_entry_t pv; 3022 pt_entry_t *pte; 3023 int s; 3024 3025 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 3026 return FALSE; 3027 3028 s = splvm(); 3029 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3030 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3031 /* 3032 * if the bit being tested is the modified bit, then 3033 * mark clean_map and ptes as never 3034 * modified. 3035 */ 3036 if (!pmap_track_modified(pv->pv_va)) 3037 continue; 3038#if defined(PMAP_DIAGNOSTIC) 3039 if (!pv->pv_pmap) { 3040 printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va); 3041 continue; 3042 } 3043#endif 3044 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 3045 if (*pte & PG_M) { 3046 splx(s); 3047 return TRUE; 3048 } 3049 } 3050 splx(s); 3051 return (FALSE); 3052} 3053 3054/* 3055 * this routine is used to modify bits in ptes 3056 */ 3057static __inline void 3058pmap_changebit(vm_page_t m, int bit, boolean_t setem) 3059{ 3060 register pv_entry_t pv; 3061 register pt_entry_t *pte; 3062 int s; 3063 3064 if (!pmap_initialized || (m->flags & PG_FICTITIOUS) || 3065 (!setem && bit == PG_RW && (m->flags & PG_WRITEABLE) == 0)) 3066 return; 3067 3068 s = splvm(); 3069 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3070 /* 3071 * Loop over all current mappings setting/clearing as appropos If 3072 * setting RO do we need to clear the VAC? 3073 */ 3074 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3075 /* 3076 * don't write protect pager mappings 3077 */ 3078 if (!setem && (bit == PG_RW)) { 3079 if (!pmap_track_modified(pv->pv_va)) 3080 continue; 3081 } 3082 3083#if defined(PMAP_DIAGNOSTIC) 3084 if (!pv->pv_pmap) { 3085 printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va); 3086 continue; 3087 } 3088#endif 3089 3090 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 3091 3092 if (setem) { 3093 *pte |= bit; 3094 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 3095 } else { 3096 pt_entry_t pbits = *pte; 3097 if (pbits & bit) { 3098 if (bit == PG_RW) { 3099 if (pbits & PG_M) { 3100 vm_page_dirty(m); 3101 } 3102 *pte = pbits & ~(PG_M|PG_RW); 3103 } else { 3104 *pte = pbits & ~bit; 3105 } 3106 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 3107 } 3108 } 3109 } 3110 if (!setem && bit == PG_RW) 3111 vm_page_flag_clear(m, PG_WRITEABLE); 3112 splx(s); 3113} 3114 3115/* 3116 * pmap_page_protect: 3117 * 3118 * Lower the permission for all mappings to a given page. 3119 */ 3120void 3121pmap_page_protect(vm_page_t m, vm_prot_t prot) 3122{ 3123 if ((prot & VM_PROT_WRITE) == 0) { 3124 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 3125 pmap_changebit(m, PG_RW, FALSE); 3126 } else { 3127 pmap_remove_all(m); 3128 } 3129 } 3130} 3131 3132vm_offset_t 3133pmap_phys_address(ppn) 3134 int ppn; 3135{ 3136 return (i386_ptob(ppn)); 3137} 3138 3139/* 3140 * pmap_ts_referenced: 3141 * 3142 * Return a count of reference bits for a page, clearing those bits. 3143 * It is not necessary for every reference bit to be cleared, but it 3144 * is necessary that 0 only be returned when there are truly no 3145 * reference bits set. 3146 * 3147 * XXX: The exact number of bits to check and clear is a matter that 3148 * should be tested and standardized at some point in the future for 3149 * optimal aging of shared pages. 3150 */ 3151int 3152pmap_ts_referenced(vm_page_t m) 3153{ 3154 register pv_entry_t pv, pvf, pvn; 3155 pt_entry_t *pte; 3156 int s; 3157 int rtval = 0; 3158 3159 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 3160 return (rtval); 3161 3162 s = splvm(); 3163 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3164 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3165 3166 pvf = pv; 3167 3168 do { 3169 pvn = TAILQ_NEXT(pv, pv_list); 3170 3171 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3172 3173 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3174 3175 if (!pmap_track_modified(pv->pv_va)) 3176 continue; 3177 3178 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); 3179 3180 if (pte && (*pte & PG_A)) { 3181 *pte &= ~PG_A; 3182 3183 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 3184 3185 rtval++; 3186 if (rtval > 4) { 3187 break; 3188 } 3189 } 3190 } while ((pv = pvn) != NULL && pv != pvf); 3191 } 3192 splx(s); 3193 3194 return (rtval); 3195} 3196 3197/* 3198 * Clear the modify bits on the specified physical page. 3199 */ 3200void 3201pmap_clear_modify(vm_page_t m) 3202{ 3203 pmap_changebit(m, PG_M, FALSE); 3204} 3205 3206/* 3207 * pmap_clear_reference: 3208 * 3209 * Clear the reference bit on the specified physical page. 3210 */ 3211void 3212pmap_clear_reference(vm_page_t m) 3213{ 3214 pmap_changebit(m, PG_A, FALSE); 3215} 3216 3217/* 3218 * Miscellaneous support routines follow 3219 */ 3220 3221static void 3222i386_protection_init() 3223{ 3224 register int *kp, prot; 3225 3226 kp = protection_codes; 3227 for (prot = 0; prot < 8; prot++) { 3228 switch (prot) { 3229 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 3230 /* 3231 * Read access is also 0. There isn't any execute bit, 3232 * so just make it readable. 3233 */ 3234 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 3235 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 3236 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 3237 *kp++ = 0; 3238 break; 3239 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 3240 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 3241 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 3242 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 3243 *kp++ = PG_RW; 3244 break; 3245 } 3246 } 3247} 3248 3249/* 3250 * Map a set of physical memory pages into the kernel virtual 3251 * address space. Return a pointer to where it is mapped. This 3252 * routine is intended to be used for mapping device memory, 3253 * NOT real memory. 3254 */ 3255void * 3256pmap_mapdev(pa, size) 3257 vm_offset_t pa; 3258 vm_size_t size; 3259{ 3260 vm_offset_t va, tmpva, offset; 3261 3262 offset = pa & PAGE_MASK; 3263 size = roundup(offset + size, PAGE_SIZE); 3264 3265 GIANT_REQUIRED; 3266 3267 va = kmem_alloc_pageable(kernel_map, size); 3268 if (!va) 3269 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3270 3271 pa = pa & PG_FRAME; 3272 for (tmpva = va; size > 0; ) { 3273 pmap_kenter(tmpva, pa); 3274 size -= PAGE_SIZE; 3275 tmpva += PAGE_SIZE; 3276 pa += PAGE_SIZE; 3277 } 3278 pmap_invalidate_range(kernel_pmap, va, tmpva); 3279 return ((void *)(va + offset)); 3280} 3281 3282void 3283pmap_unmapdev(va, size) 3284 vm_offset_t va; 3285 vm_size_t size; 3286{ 3287 vm_offset_t base, offset, tmpva; 3288 pt_entry_t *pte; 3289 3290 base = va & PG_FRAME; 3291 offset = va & PAGE_MASK; 3292 size = roundup(offset + size, PAGE_SIZE); 3293 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 3294 pte = vtopte(tmpva); 3295 *pte = 0; 3296 } 3297 pmap_invalidate_range(kernel_pmap, va, tmpva); 3298 kmem_free(kernel_map, base, size); 3299} 3300 3301/* 3302 * perform the pmap work for mincore 3303 */ 3304int 3305pmap_mincore(pmap, addr) 3306 pmap_t pmap; 3307 vm_offset_t addr; 3308{ 3309 pt_entry_t *ptep, pte; 3310 vm_page_t m; 3311 int val = 0; 3312 3313 ptep = pmap_pte(pmap, addr); 3314 if (ptep == 0) { 3315 return 0; 3316 } 3317 3318 if ((pte = *ptep) != 0) { 3319 vm_offset_t pa; 3320 3321 val = MINCORE_INCORE; 3322 if ((pte & PG_MANAGED) == 0) 3323 return val; 3324 3325 pa = pte & PG_FRAME; 3326 3327 m = PHYS_TO_VM_PAGE(pa); 3328 3329 /* 3330 * Modified by us 3331 */ 3332 if (pte & PG_M) 3333 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 3334 else { 3335 /* 3336 * Modified by someone else 3337 */ 3338 vm_page_lock_queues(); 3339 if (m->dirty || pmap_is_modified(m)) 3340 val |= MINCORE_MODIFIED_OTHER; 3341 vm_page_unlock_queues(); 3342 } 3343 /* 3344 * Referenced by us 3345 */ 3346 if (pte & PG_A) 3347 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 3348 else { 3349 /* 3350 * Referenced by someone else 3351 */ 3352 vm_page_lock_queues(); 3353 if ((m->flags & PG_REFERENCED) || 3354 pmap_ts_referenced(m)) { 3355 val |= MINCORE_REFERENCED_OTHER; 3356 vm_page_flag_set(m, PG_REFERENCED); 3357 } 3358 vm_page_unlock_queues(); 3359 } 3360 } 3361 return val; 3362} 3363 3364void 3365pmap_activate(struct thread *td) 3366{ 3367 struct proc *p = td->td_proc; 3368 pmap_t pmap; 3369 u_int32_t cr3; 3370 3371 pmap = vmspace_pmap(td->td_proc->p_vmspace); 3372#if defined(SMP) 3373 pmap->pm_active |= PCPU_GET(cpumask); 3374#else 3375 pmap->pm_active |= 1; 3376#endif 3377 cr3 = vtophys(pmap->pm_pdir); 3378 /* XXXKSE this is wrong. 3379 * pmap_activate is for the current thread on the current cpu 3380 */ 3381 if (p->p_flag & P_KSES) { 3382 /* Make sure all other cr3 entries are updated. */ 3383 /* what if they are running? XXXKSE (maybe abort them) */ 3384 FOREACH_THREAD_IN_PROC(p, td) { 3385 td->td_pcb->pcb_cr3 = cr3; 3386 } 3387 } else { 3388 td->td_pcb->pcb_cr3 = cr3; 3389 } 3390 load_cr3(cr3); 3391#ifdef SWTCH_OPTIM_STATS 3392 tlb_flush_count++; 3393#endif 3394} 3395 3396vm_offset_t 3397pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 3398{ 3399 3400 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) { 3401 return addr; 3402 } 3403 3404 addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1); 3405 return addr; 3406} 3407 3408 3409#if defined(PMAP_DEBUG) 3410pmap_pid_dump(int pid) 3411{ 3412 pmap_t pmap; 3413 struct proc *p; 3414 int npte = 0; 3415 int index; 3416 3417 sx_slock(&allproc_lock); 3418 LIST_FOREACH(p, &allproc, p_list) { 3419 if (p->p_pid != pid) 3420 continue; 3421 3422 if (p->p_vmspace) { 3423 int i,j; 3424 index = 0; 3425 pmap = vmspace_pmap(p->p_vmspace); 3426 for (i = 0; i < NPDEPG; i++) { 3427 pd_entry_t *pde; 3428 pt_entry_t *pte; 3429 vm_offset_t base = i << PDRSHIFT; 3430 3431 pde = &pmap->pm_pdir[i]; 3432 if (pde && pmap_pde_v(pde)) { 3433 for (j = 0; j < NPTEPG; j++) { 3434 vm_offset_t va = base + (j << PAGE_SHIFT); 3435 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 3436 if (index) { 3437 index = 0; 3438 printf("\n"); 3439 } 3440 sx_sunlock(&allproc_lock); 3441 return npte; 3442 } 3443 pte = pmap_pte_quick(pmap, va); 3444 if (pte && pmap_pte_v(pte)) { 3445 pt_entry_t pa; 3446 vm_page_t m; 3447 pa = *pte; 3448 m = PHYS_TO_VM_PAGE(pa); 3449 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 3450 va, pa, m->hold_count, m->wire_count, m->flags); 3451 npte++; 3452 index++; 3453 if (index >= 2) { 3454 index = 0; 3455 printf("\n"); 3456 } else { 3457 printf(" "); 3458 } 3459 } 3460 } 3461 } 3462 } 3463 } 3464 } 3465 sx_sunlock(&allproc_lock); 3466 return npte; 3467} 3468#endif 3469 3470#if defined(DEBUG) 3471 3472static void pads(pmap_t pm); 3473void pmap_pvdump(vm_offset_t pa); 3474 3475/* print address space of pmap*/ 3476static void 3477pads(pm) 3478 pmap_t pm; 3479{ 3480 int i, j; 3481 vm_offset_t va; 3482 pt_entry_t *ptep; 3483 3484 if (pm == kernel_pmap) 3485 return; 3486 for (i = 0; i < NPDEPG; i++) 3487 if (pm->pm_pdir[i]) 3488 for (j = 0; j < NPTEPG; j++) { 3489 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 3490 if (pm == kernel_pmap && va < KERNBASE) 3491 continue; 3492 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 3493 continue; 3494 ptep = pmap_pte_quick(pm, va); 3495 if (pmap_pte_v(ptep)) 3496 printf("%x:%x ", va, *ptep); 3497 }; 3498 3499} 3500 3501void 3502pmap_pvdump(pa) 3503 vm_offset_t pa; 3504{ 3505 pv_entry_t pv; 3506 vm_page_t m; 3507 3508 printf("pa %x", pa); 3509 m = PHYS_TO_VM_PAGE(pa); 3510 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3511 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); 3512 pads(pv->pv_pmap); 3513 } 3514 printf(" "); 3515} 3516#endif 3517