pmap.c revision 158226
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/i386/pmap.c 158226 2006-05-01 19:57:00Z peter $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_msgbuf.h" 109#include "opt_smp.h" 110#include "opt_xbox.h" 111 112#include <sys/param.h> 113#include <sys/systm.h> 114#include <sys/kernel.h> 115#include <sys/lock.h> 116#include <sys/malloc.h> 117#include <sys/mman.h> 118#include <sys/msgbuf.h> 119#include <sys/mutex.h> 120#include <sys/proc.h> 121#include <sys/sx.h> 122#include <sys/vmmeter.h> 123#include <sys/sched.h> 124#include <sys/sysctl.h> 125#ifdef SMP 126#include <sys/smp.h> 127#endif 128 129#include <vm/vm.h> 130#include <vm/vm_param.h> 131#include <vm/vm_kern.h> 132#include <vm/vm_page.h> 133#include <vm/vm_map.h> 134#include <vm/vm_object.h> 135#include <vm/vm_extern.h> 136#include <vm/vm_pageout.h> 137#include <vm/vm_pager.h> 138#include <vm/uma.h> 139 140#include <machine/cpu.h> 141#include <machine/cputypes.h> 142#include <machine/md_var.h> 143#include <machine/pcb.h> 144#include <machine/specialreg.h> 145#ifdef SMP 146#include <machine/smp.h> 147#endif 148 149#ifdef XBOX 150#include <machine/xbox.h> 151#endif 152 153#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 154#define CPU_ENABLE_SSE 155#endif 156 157#ifndef PMAP_SHPGPERPROC 158#define PMAP_SHPGPERPROC 200 159#endif 160 161#if defined(DIAGNOSTIC) 162#define PMAP_DIAGNOSTIC 163#endif 164 165#if !defined(PMAP_DIAGNOSTIC) 166#define PMAP_INLINE __inline 167#else 168#define PMAP_INLINE 169#endif 170 171#define PV_STATS 172#ifdef PV_STATS 173#define PV_STAT(x) do { x ; } while (0) 174#else 175#define PV_STAT(x) do { } while (0) 176#endif 177 178/* 179 * Get PDEs and PTEs for user/kernel address space 180 */ 181#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 182#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 183 184#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 185#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 186#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 187#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 188#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 189 190#define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 191 atomic_clear_int((u_int *)(pte), PG_W)) 192#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 193 194struct pmap kernel_pmap_store; 195LIST_HEAD(pmaplist, pmap); 196static struct pmaplist allpmaps; 197static struct mtx allpmaps_lock; 198 199vm_paddr_t avail_end; /* PA of last available physical page */ 200vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 201vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 202int pgeflag = 0; /* PG_G or-in */ 203int pseflag = 0; /* PG_PS or-in */ 204 205static int nkpt; 206vm_offset_t kernel_vm_end; 207extern u_int32_t KERNend; 208 209#ifdef PAE 210static uma_zone_t pdptzone; 211#endif 212 213/* 214 * Data for the pv entry allocation mechanism 215 */ 216static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 217static int shpgperproc = PMAP_SHPGPERPROC; 218 219TAILQ_HEAD(,pv_chunk) pv_freechunks; /* Freelist of chunk pages */ 220struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 221int pv_maxchunks; /* How many chunks we have KVA for */ 222int pv_nextindex; /* Where to map the next page */ 223 224/* 225 * All those kernel PT submaps that BSD is so fond of 226 */ 227struct sysmaps { 228 struct mtx lock; 229 pt_entry_t *CMAP1; 230 pt_entry_t *CMAP2; 231 caddr_t CADDR1; 232 caddr_t CADDR2; 233}; 234static struct sysmaps sysmaps_pcpu[MAXCPU]; 235pt_entry_t *CMAP1 = 0; 236static pt_entry_t *CMAP3; 237caddr_t CADDR1 = 0, ptvmmap = 0; 238static caddr_t CADDR3; 239struct msgbuf *msgbufp = 0; 240 241/* 242 * Crashdump maps. 243 */ 244static caddr_t crashdumpmap; 245 246#ifdef SMP 247extern pt_entry_t *SMPpt; 248#endif 249static pt_entry_t *PMAP1 = 0, *PMAP2; 250static pt_entry_t *PADDR1 = 0, *PADDR2; 251#ifdef SMP 252static int PMAP1cpu; 253static int PMAP1changedcpu; 254SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 255 &PMAP1changedcpu, 0, 256 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 257#endif 258static int PMAP1changed; 259SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 260 &PMAP1changed, 0, 261 "Number of times pmap_pte_quick changed PMAP1"); 262static int PMAP1unchanged; 263SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 264 &PMAP1unchanged, 0, 265 "Number of times pmap_pte_quick didn't change PMAP1"); 266static struct mtx PMAP2mutex; 267 268static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 269static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 270static void pmap_clear_ptes(vm_page_t m, int bit); 271 272static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva); 273static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 274static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 275 vm_offset_t va); 276static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 277static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 278 vm_page_t m); 279 280static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 281 282static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 283static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m); 284static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 285static void pmap_pte_release(pt_entry_t *pte); 286static int pmap_unuse_pt(pmap_t, vm_offset_t); 287static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 288#ifdef PAE 289static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 290#endif 291 292CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 293CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 294 295/* 296 * Move the kernel virtual free pointer to the next 297 * 4MB. This is used to help improve performance 298 * by using a large (4MB) page for much of the kernel 299 * (.text, .data, .bss) 300 */ 301static vm_offset_t 302pmap_kmem_choose(vm_offset_t addr) 303{ 304 vm_offset_t newaddr = addr; 305 306#ifndef DISABLE_PSE 307 if (cpu_feature & CPUID_PSE) 308 newaddr = (addr + PDRMASK) & ~PDRMASK; 309#endif 310 return newaddr; 311} 312 313/* 314 * Bootstrap the system enough to run with virtual memory. 315 * 316 * On the i386 this is called after mapping has already been enabled 317 * and just syncs the pmap module with what has already been done. 318 * [We can't call it easily with mapping off since the kernel is not 319 * mapped with PA == VA, hence we would have to relocate every address 320 * from the linked base (virtual) address "KERNBASE" to the actual 321 * (physical) address starting relative to 0] 322 */ 323void 324pmap_bootstrap(firstaddr, loadaddr) 325 vm_paddr_t firstaddr; 326 vm_paddr_t loadaddr; 327{ 328 vm_offset_t va; 329 pt_entry_t *pte, *unused; 330 struct sysmaps *sysmaps; 331 int i; 332 333 /* 334 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 335 * large. It should instead be correctly calculated in locore.s and 336 * not based on 'first' (which is a physical address, not a virtual 337 * address, for the start of unused physical memory). The kernel 338 * page tables are NOT double mapped and thus should not be included 339 * in this calculation. 340 */ 341 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 342 virtual_avail = pmap_kmem_choose(virtual_avail); 343 344 virtual_end = VM_MAX_KERNEL_ADDRESS; 345 346 /* 347 * Initialize the kernel pmap (which is statically allocated). 348 */ 349 PMAP_LOCK_INIT(kernel_pmap); 350 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 351#ifdef PAE 352 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 353#endif 354 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 355 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 356 LIST_INIT(&allpmaps); 357 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 358 mtx_lock_spin(&allpmaps_lock); 359 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 360 mtx_unlock_spin(&allpmaps_lock); 361 nkpt = NKPT; 362 363 /* 364 * Reserve some special page table entries/VA space for temporary 365 * mapping of pages. 366 */ 367#define SYSMAP(c, p, v, n) \ 368 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 369 370 va = virtual_avail; 371 pte = vtopte(va); 372 373 /* 374 * CMAP1/CMAP2 are used for zeroing and copying pages. 375 * CMAP3 is used for the idle process page zeroing. 376 */ 377 for (i = 0; i < MAXCPU; i++) { 378 sysmaps = &sysmaps_pcpu[i]; 379 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 380 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 381 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 382 } 383 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 384 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 385 *CMAP3 = 0; 386 387 /* 388 * Crashdump maps. 389 */ 390 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 391 392 /* 393 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 394 */ 395 SYSMAP(caddr_t, unused, ptvmmap, 1) 396 397 /* 398 * msgbufp is used to map the system message buffer. 399 */ 400 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) 401 402 /* 403 * ptemap is used for pmap_pte_quick 404 */ 405 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 406 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); 407 408 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 409 410 virtual_avail = va; 411 412 *CMAP1 = 0; 413 414#ifdef XBOX 415 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 416 * an early stadium, we cannot yet neatly map video memory ... :-( 417 * Better fixes are very welcome! */ 418 if (!arch_i386_is_xbox) 419#endif 420 for (i = 0; i < NKPT; i++) 421 PTD[i] = 0; 422 423 /* Turn on PG_G on kernel page(s) */ 424 pmap_set_pg(); 425} 426 427/* 428 * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. 429 */ 430void 431pmap_set_pg(void) 432{ 433 pd_entry_t pdir; 434 pt_entry_t *pte; 435 vm_offset_t va, endva; 436 int i; 437 438 if (pgeflag == 0) 439 return; 440 441 i = KERNLOAD/NBPDR; 442 endva = KERNBASE + KERNend; 443 444 if (pseflag) { 445 va = KERNBASE + KERNLOAD; 446 while (va < endva) { 447 pdir = kernel_pmap->pm_pdir[KPTDI+i]; 448 pdir |= pgeflag; 449 kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir; 450 invltlb(); /* Play it safe, invltlb() every time */ 451 i++; 452 va += NBPDR; 453 } 454 } else { 455 va = (vm_offset_t)btext; 456 while (va < endva) { 457 pte = vtopte(va); 458 if (*pte) 459 *pte |= pgeflag; 460 invltlb(); /* Play it safe, invltlb() every time */ 461 va += PAGE_SIZE; 462 } 463 } 464} 465 466/* 467 * Initialize a vm_page's machine-dependent fields. 468 */ 469void 470pmap_page_init(vm_page_t m) 471{ 472 473 TAILQ_INIT(&m->md.pv_list); 474 m->md.pv_list_count = 0; 475} 476 477#ifdef PAE 478 479static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt"); 480 481static void * 482pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 483{ 484 *flags = UMA_SLAB_PRIV; 485 return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL, 486 1, 0)); 487} 488#endif 489 490/* 491 * Initialize the pmap module. 492 * Called by vm_init, to initialize any structures that the pmap 493 * system needs to map virtual memory. 494 */ 495void 496pmap_init(void) 497{ 498 499 TAILQ_INIT(&pv_freechunks); 500 /* 501 * Initialize the address space (zone) for the pv entries. Set a 502 * high water mark so that the system can recover from excessive 503 * numbers of pv entries. 504 */ 505 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 506 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 507 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 508 pv_entry_max = roundup(pv_entry_max, _NPCPV); 509 pv_entry_high_water = 9 * (pv_entry_max / 10); 510 511 pv_maxchunks = pv_entry_max / _NPCPV; 512 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 513 PAGE_SIZE * pv_maxchunks); 514 if (pv_chunkbase == NULL) 515 panic("pmap_init: not enough kvm for pv chunks"); 516 pv_nextindex = 0; 517#ifdef PAE 518 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 519 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 520 UMA_ZONE_VM | UMA_ZONE_NOFREE); 521 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 522#endif 523} 524 525 526SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 527SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 528 "Max number of PV entries"); 529SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 530 "Page share factor per proc"); 531 532/*************************************************** 533 * Low level helper routines..... 534 ***************************************************/ 535 536#ifdef SMP 537/* 538 * For SMP, these functions have to use the IPI mechanism for coherence. 539 */ 540void 541pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 542{ 543 u_int cpumask; 544 u_int other_cpus; 545 546 if (smp_started) { 547 if (!(read_eflags() & PSL_I)) 548 panic("%s: interrupts disabled", __func__); 549 mtx_lock_spin(&smp_ipi_mtx); 550 } else 551 critical_enter(); 552 /* 553 * We need to disable interrupt preemption but MUST NOT have 554 * interrupts disabled here. 555 * XXX we may need to hold schedlock to get a coherent pm_active 556 * XXX critical sections disable interrupts again 557 */ 558 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 559 invlpg(va); 560 smp_invlpg(va); 561 } else { 562 cpumask = PCPU_GET(cpumask); 563 other_cpus = PCPU_GET(other_cpus); 564 if (pmap->pm_active & cpumask) 565 invlpg(va); 566 if (pmap->pm_active & other_cpus) 567 smp_masked_invlpg(pmap->pm_active & other_cpus, va); 568 } 569 if (smp_started) 570 mtx_unlock_spin(&smp_ipi_mtx); 571 else 572 critical_exit(); 573} 574 575void 576pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 577{ 578 u_int cpumask; 579 u_int other_cpus; 580 vm_offset_t addr; 581 582 if (smp_started) { 583 if (!(read_eflags() & PSL_I)) 584 panic("%s: interrupts disabled", __func__); 585 mtx_lock_spin(&smp_ipi_mtx); 586 } else 587 critical_enter(); 588 /* 589 * We need to disable interrupt preemption but MUST NOT have 590 * interrupts disabled here. 591 * XXX we may need to hold schedlock to get a coherent pm_active 592 * XXX critical sections disable interrupts again 593 */ 594 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 595 for (addr = sva; addr < eva; addr += PAGE_SIZE) 596 invlpg(addr); 597 smp_invlpg_range(sva, eva); 598 } else { 599 cpumask = PCPU_GET(cpumask); 600 other_cpus = PCPU_GET(other_cpus); 601 if (pmap->pm_active & cpumask) 602 for (addr = sva; addr < eva; addr += PAGE_SIZE) 603 invlpg(addr); 604 if (pmap->pm_active & other_cpus) 605 smp_masked_invlpg_range(pmap->pm_active & other_cpus, 606 sva, eva); 607 } 608 if (smp_started) 609 mtx_unlock_spin(&smp_ipi_mtx); 610 else 611 critical_exit(); 612} 613 614void 615pmap_invalidate_all(pmap_t pmap) 616{ 617 u_int cpumask; 618 u_int other_cpus; 619 620 if (smp_started) { 621 if (!(read_eflags() & PSL_I)) 622 panic("%s: interrupts disabled", __func__); 623 mtx_lock_spin(&smp_ipi_mtx); 624 } else 625 critical_enter(); 626 /* 627 * We need to disable interrupt preemption but MUST NOT have 628 * interrupts disabled here. 629 * XXX we may need to hold schedlock to get a coherent pm_active 630 * XXX critical sections disable interrupts again 631 */ 632 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 633 invltlb(); 634 smp_invltlb(); 635 } else { 636 cpumask = PCPU_GET(cpumask); 637 other_cpus = PCPU_GET(other_cpus); 638 if (pmap->pm_active & cpumask) 639 invltlb(); 640 if (pmap->pm_active & other_cpus) 641 smp_masked_invltlb(pmap->pm_active & other_cpus); 642 } 643 if (smp_started) 644 mtx_unlock_spin(&smp_ipi_mtx); 645 else 646 critical_exit(); 647} 648#else /* !SMP */ 649/* 650 * Normal, non-SMP, 486+ invalidation functions. 651 * We inline these within pmap.c for speed. 652 */ 653PMAP_INLINE void 654pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 655{ 656 657 if (pmap == kernel_pmap || pmap->pm_active) 658 invlpg(va); 659} 660 661PMAP_INLINE void 662pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 663{ 664 vm_offset_t addr; 665 666 if (pmap == kernel_pmap || pmap->pm_active) 667 for (addr = sva; addr < eva; addr += PAGE_SIZE) 668 invlpg(addr); 669} 670 671PMAP_INLINE void 672pmap_invalidate_all(pmap_t pmap) 673{ 674 675 if (pmap == kernel_pmap || pmap->pm_active) 676 invltlb(); 677} 678#endif /* !SMP */ 679 680/* 681 * Are we current address space or kernel? N.B. We return FALSE when 682 * a pmap's page table is in use because a kernel thread is borrowing 683 * it. The borrowed page table can change spontaneously, making any 684 * dependence on its continued use subject to a race condition. 685 */ 686static __inline int 687pmap_is_current(pmap_t pmap) 688{ 689 690 return (pmap == kernel_pmap || 691 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 692 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 693} 694 695/* 696 * If the given pmap is not the current or kernel pmap, the returned pte must 697 * be released by passing it to pmap_pte_release(). 698 */ 699pt_entry_t * 700pmap_pte(pmap_t pmap, vm_offset_t va) 701{ 702 pd_entry_t newpf; 703 pd_entry_t *pde; 704 705 pde = pmap_pde(pmap, va); 706 if (*pde & PG_PS) 707 return (pde); 708 if (*pde != 0) { 709 /* are we current address space or kernel? */ 710 if (pmap_is_current(pmap)) 711 return (vtopte(va)); 712 mtx_lock(&PMAP2mutex); 713 newpf = *pde & PG_FRAME; 714 if ((*PMAP2 & PG_FRAME) != newpf) { 715 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 716 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); 717 } 718 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 719 } 720 return (0); 721} 722 723/* 724 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 725 * being NULL. 726 */ 727static __inline void 728pmap_pte_release(pt_entry_t *pte) 729{ 730 731 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 732 mtx_unlock(&PMAP2mutex); 733} 734 735static __inline void 736invlcaddr(void *caddr) 737{ 738 739 invlpg((u_int)caddr); 740} 741 742/* 743 * Super fast pmap_pte routine best used when scanning 744 * the pv lists. This eliminates many coarse-grained 745 * invltlb calls. Note that many of the pv list 746 * scans are across different pmaps. It is very wasteful 747 * to do an entire invltlb for checking a single mapping. 748 * 749 * If the given pmap is not the current pmap, vm_page_queue_mtx 750 * must be held and curthread pinned to a CPU. 751 */ 752static pt_entry_t * 753pmap_pte_quick(pmap_t pmap, vm_offset_t va) 754{ 755 pd_entry_t newpf; 756 pd_entry_t *pde; 757 758 pde = pmap_pde(pmap, va); 759 if (*pde & PG_PS) 760 return (pde); 761 if (*pde != 0) { 762 /* are we current address space or kernel? */ 763 if (pmap_is_current(pmap)) 764 return (vtopte(va)); 765 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 766 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 767 newpf = *pde & PG_FRAME; 768 if ((*PMAP1 & PG_FRAME) != newpf) { 769 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 770#ifdef SMP 771 PMAP1cpu = PCPU_GET(cpuid); 772#endif 773 invlcaddr(PADDR1); 774 PMAP1changed++; 775 } else 776#ifdef SMP 777 if (PMAP1cpu != PCPU_GET(cpuid)) { 778 PMAP1cpu = PCPU_GET(cpuid); 779 invlcaddr(PADDR1); 780 PMAP1changedcpu++; 781 } else 782#endif 783 PMAP1unchanged++; 784 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 785 } 786 return (0); 787} 788 789/* 790 * Routine: pmap_extract 791 * Function: 792 * Extract the physical page address associated 793 * with the given map/virtual_address pair. 794 */ 795vm_paddr_t 796pmap_extract(pmap_t pmap, vm_offset_t va) 797{ 798 vm_paddr_t rtval; 799 pt_entry_t *pte; 800 pd_entry_t pde; 801 802 rtval = 0; 803 PMAP_LOCK(pmap); 804 pde = pmap->pm_pdir[va >> PDRSHIFT]; 805 if (pde != 0) { 806 if ((pde & PG_PS) != 0) { 807 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 808 PMAP_UNLOCK(pmap); 809 return rtval; 810 } 811 pte = pmap_pte(pmap, va); 812 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 813 pmap_pte_release(pte); 814 } 815 PMAP_UNLOCK(pmap); 816 return (rtval); 817} 818 819/* 820 * Routine: pmap_extract_and_hold 821 * Function: 822 * Atomically extract and hold the physical page 823 * with the given pmap and virtual address pair 824 * if that mapping permits the given protection. 825 */ 826vm_page_t 827pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 828{ 829 pd_entry_t pde; 830 pt_entry_t pte; 831 vm_page_t m; 832 833 m = NULL; 834 vm_page_lock_queues(); 835 PMAP_LOCK(pmap); 836 pde = *pmap_pde(pmap, va); 837 if (pde != 0) { 838 if (pde & PG_PS) { 839 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 840 m = PHYS_TO_VM_PAGE((pde & ~PDRMASK) | 841 (va & PDRMASK)); 842 vm_page_hold(m); 843 } 844 } else { 845 sched_pin(); 846 pte = *pmap_pte_quick(pmap, va); 847 if (pte != 0 && 848 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 849 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 850 vm_page_hold(m); 851 } 852 sched_unpin(); 853 } 854 } 855 vm_page_unlock_queues(); 856 PMAP_UNLOCK(pmap); 857 return (m); 858} 859 860/*************************************************** 861 * Low level mapping routines..... 862 ***************************************************/ 863 864/* 865 * Add a wired page to the kva. 866 * Note: not SMP coherent. 867 */ 868PMAP_INLINE void 869pmap_kenter(vm_offset_t va, vm_paddr_t pa) 870{ 871 pt_entry_t *pte; 872 873 pte = vtopte(va); 874 pte_store(pte, pa | PG_RW | PG_V | pgeflag); 875} 876 877/* 878 * Remove a page from the kernel pagetables. 879 * Note: not SMP coherent. 880 */ 881PMAP_INLINE void 882pmap_kremove(vm_offset_t va) 883{ 884 pt_entry_t *pte; 885 886 pte = vtopte(va); 887 pte_clear(pte); 888} 889 890/* 891 * Used to map a range of physical addresses into kernel 892 * virtual address space. 893 * 894 * The value passed in '*virt' is a suggested virtual address for 895 * the mapping. Architectures which can support a direct-mapped 896 * physical to virtual region can return the appropriate address 897 * within that region, leaving '*virt' unchanged. Other 898 * architectures should map the pages starting at '*virt' and 899 * update '*virt' with the first usable address after the mapped 900 * region. 901 */ 902vm_offset_t 903pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 904{ 905 vm_offset_t va, sva; 906 907 va = sva = *virt; 908 while (start < end) { 909 pmap_kenter(va, start); 910 va += PAGE_SIZE; 911 start += PAGE_SIZE; 912 } 913 pmap_invalidate_range(kernel_pmap, sva, va); 914 *virt = va; 915 return (sva); 916} 917 918 919/* 920 * Add a list of wired pages to the kva 921 * this routine is only used for temporary 922 * kernel mappings that do not need to have 923 * page modification or references recorded. 924 * Note that old mappings are simply written 925 * over. The page *must* be wired. 926 * Note: SMP coherent. Uses a ranged shootdown IPI. 927 */ 928void 929pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 930{ 931 vm_offset_t va; 932 933 va = sva; 934 while (count-- > 0) { 935 pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 936 va += PAGE_SIZE; 937 m++; 938 } 939 pmap_invalidate_range(kernel_pmap, sva, va); 940} 941 942/* 943 * This routine tears out page mappings from the 944 * kernel -- it is meant only for temporary mappings. 945 * Note: SMP coherent. Uses a ranged shootdown IPI. 946 */ 947void 948pmap_qremove(vm_offset_t sva, int count) 949{ 950 vm_offset_t va; 951 952 va = sva; 953 while (count-- > 0) { 954 pmap_kremove(va); 955 va += PAGE_SIZE; 956 } 957 pmap_invalidate_range(kernel_pmap, sva, va); 958} 959 960/*************************************************** 961 * Page table page management routines..... 962 ***************************************************/ 963 964/* 965 * This routine unholds page table pages, and if the hold count 966 * drops to zero, then it decrements the wire count. 967 */ 968static PMAP_INLINE int 969pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) 970{ 971 972 --m->wire_count; 973 if (m->wire_count == 0) 974 return _pmap_unwire_pte_hold(pmap, m); 975 else 976 return 0; 977} 978 979static int 980_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) 981{ 982 vm_offset_t pteva; 983 984 /* 985 * unmap the page table page 986 */ 987 pmap->pm_pdir[m->pindex] = 0; 988 --pmap->pm_stats.resident_count; 989 990 /* 991 * Do an invltlb to make the invalidated mapping 992 * take effect immediately. 993 */ 994 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 995 pmap_invalidate_page(pmap, pteva); 996 997 vm_page_free_zero(m); 998 atomic_subtract_int(&cnt.v_wire_count, 1); 999 return 1; 1000} 1001 1002/* 1003 * After removing a page table entry, this routine is used to 1004 * conditionally free the page, and manage the hold/wire counts. 1005 */ 1006static int 1007pmap_unuse_pt(pmap_t pmap, vm_offset_t va) 1008{ 1009 pd_entry_t ptepde; 1010 vm_page_t mpte; 1011 1012 if (va >= VM_MAXUSER_ADDRESS) 1013 return 0; 1014 ptepde = *pmap_pde(pmap, va); 1015 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1016 return pmap_unwire_pte_hold(pmap, mpte); 1017} 1018 1019void 1020pmap_pinit0(pmap) 1021 struct pmap *pmap; 1022{ 1023 1024 PMAP_LOCK_INIT(pmap); 1025 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1026#ifdef PAE 1027 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1028#endif 1029 pmap->pm_active = 0; 1030 PCPU_SET(curpmap, pmap); 1031 TAILQ_INIT(&pmap->pm_pvchunk); 1032 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1033 mtx_lock_spin(&allpmaps_lock); 1034 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1035 mtx_unlock_spin(&allpmaps_lock); 1036} 1037 1038/* 1039 * Initialize a preallocated and zeroed pmap structure, 1040 * such as one in a vmspace structure. 1041 */ 1042void 1043pmap_pinit(pmap) 1044 register struct pmap *pmap; 1045{ 1046 vm_page_t m, ptdpg[NPGPTD]; 1047 vm_paddr_t pa; 1048 static int color; 1049 int i; 1050 1051 PMAP_LOCK_INIT(pmap); 1052 1053 /* 1054 * No need to allocate page table space yet but we do need a valid 1055 * page directory table. 1056 */ 1057 if (pmap->pm_pdir == NULL) { 1058 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1059 NBPTD); 1060#ifdef PAE 1061 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 1062 KASSERT(((vm_offset_t)pmap->pm_pdpt & 1063 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 1064 ("pmap_pinit: pdpt misaligned")); 1065 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 1066 ("pmap_pinit: pdpt above 4g")); 1067#endif 1068 } 1069 1070 /* 1071 * allocate the page directory page(s) 1072 */ 1073 for (i = 0; i < NPGPTD;) { 1074 m = vm_page_alloc(NULL, color++, 1075 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1076 VM_ALLOC_ZERO); 1077 if (m == NULL) 1078 VM_WAIT; 1079 else { 1080 ptdpg[i++] = m; 1081 } 1082 } 1083 1084 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1085 1086 for (i = 0; i < NPGPTD; i++) { 1087 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1088 bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE); 1089 } 1090 1091 mtx_lock_spin(&allpmaps_lock); 1092 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1093 mtx_unlock_spin(&allpmaps_lock); 1094 /* Wire in kernel global address entries. */ 1095 /* XXX copies current process, does not fill in MPPTDI */ 1096 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1097#ifdef SMP 1098 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; 1099#endif 1100 1101 /* install self-referential address mapping entry(s) */ 1102 for (i = 0; i < NPGPTD; i++) { 1103 pa = VM_PAGE_TO_PHYS(ptdpg[i]); 1104 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M; 1105#ifdef PAE 1106 pmap->pm_pdpt[i] = pa | PG_V; 1107#endif 1108 } 1109 1110 pmap->pm_active = 0; 1111 TAILQ_INIT(&pmap->pm_pvchunk); 1112 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1113} 1114 1115/* 1116 * this routine is called if the page table page is not 1117 * mapped correctly. 1118 */ 1119static vm_page_t 1120_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1121{ 1122 vm_paddr_t ptepa; 1123 vm_page_t m; 1124 1125 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1126 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1127 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1128 1129 /* 1130 * Allocate a page table page. 1131 */ 1132 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1133 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1134 if (flags & M_WAITOK) { 1135 PMAP_UNLOCK(pmap); 1136 vm_page_unlock_queues(); 1137 VM_WAIT; 1138 vm_page_lock_queues(); 1139 PMAP_LOCK(pmap); 1140 } 1141 1142 /* 1143 * Indicate the need to retry. While waiting, the page table 1144 * page may have been allocated. 1145 */ 1146 return (NULL); 1147 } 1148 if ((m->flags & PG_ZERO) == 0) 1149 pmap_zero_page(m); 1150 1151 /* 1152 * Map the pagetable page into the process address space, if 1153 * it isn't already there. 1154 */ 1155 1156 pmap->pm_stats.resident_count++; 1157 1158 ptepa = VM_PAGE_TO_PHYS(m); 1159 pmap->pm_pdir[ptepindex] = 1160 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 1161 1162 return m; 1163} 1164 1165static vm_page_t 1166pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1167{ 1168 unsigned ptepindex; 1169 pd_entry_t ptepa; 1170 vm_page_t m; 1171 1172 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1173 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1174 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1175 1176 /* 1177 * Calculate pagetable page index 1178 */ 1179 ptepindex = va >> PDRSHIFT; 1180retry: 1181 /* 1182 * Get the page directory entry 1183 */ 1184 ptepa = pmap->pm_pdir[ptepindex]; 1185 1186 /* 1187 * This supports switching from a 4MB page to a 1188 * normal 4K page. 1189 */ 1190 if (ptepa & PG_PS) { 1191 pmap->pm_pdir[ptepindex] = 0; 1192 ptepa = 0; 1193 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1194 pmap_invalidate_all(kernel_pmap); 1195 } 1196 1197 /* 1198 * If the page table page is mapped, we just increment the 1199 * hold count, and activate it. 1200 */ 1201 if (ptepa) { 1202 m = PHYS_TO_VM_PAGE(ptepa); 1203 m->wire_count++; 1204 } else { 1205 /* 1206 * Here if the pte page isn't mapped, or if it has 1207 * been deallocated. 1208 */ 1209 m = _pmap_allocpte(pmap, ptepindex, flags); 1210 if (m == NULL && (flags & M_WAITOK)) 1211 goto retry; 1212 } 1213 return (m); 1214} 1215 1216 1217/*************************************************** 1218* Pmap allocation/deallocation routines. 1219 ***************************************************/ 1220 1221#ifdef SMP 1222/* 1223 * Deal with a SMP shootdown of other users of the pmap that we are 1224 * trying to dispose of. This can be a bit hairy. 1225 */ 1226static u_int *lazymask; 1227static u_int lazyptd; 1228static volatile u_int lazywait; 1229 1230void pmap_lazyfix_action(void); 1231 1232void 1233pmap_lazyfix_action(void) 1234{ 1235 u_int mymask = PCPU_GET(cpumask); 1236 1237#ifdef COUNT_IPIS 1238 *ipi_lazypmap_counts[PCPU_GET(cpuid)]++; 1239#endif 1240 if (rcr3() == lazyptd) 1241 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1242 atomic_clear_int(lazymask, mymask); 1243 atomic_store_rel_int(&lazywait, 1); 1244} 1245 1246static void 1247pmap_lazyfix_self(u_int mymask) 1248{ 1249 1250 if (rcr3() == lazyptd) 1251 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1252 atomic_clear_int(lazymask, mymask); 1253} 1254 1255 1256static void 1257pmap_lazyfix(pmap_t pmap) 1258{ 1259 u_int mymask; 1260 u_int mask; 1261 register u_int spins; 1262 1263 while ((mask = pmap->pm_active) != 0) { 1264 spins = 50000000; 1265 mask = mask & -mask; /* Find least significant set bit */ 1266 mtx_lock_spin(&smp_ipi_mtx); 1267#ifdef PAE 1268 lazyptd = vtophys(pmap->pm_pdpt); 1269#else 1270 lazyptd = vtophys(pmap->pm_pdir); 1271#endif 1272 mymask = PCPU_GET(cpumask); 1273 if (mask == mymask) { 1274 lazymask = &pmap->pm_active; 1275 pmap_lazyfix_self(mymask); 1276 } else { 1277 atomic_store_rel_int((u_int *)&lazymask, 1278 (u_int)&pmap->pm_active); 1279 atomic_store_rel_int(&lazywait, 0); 1280 ipi_selected(mask, IPI_LAZYPMAP); 1281 while (lazywait == 0) { 1282 ia32_pause(); 1283 if (--spins == 0) 1284 break; 1285 } 1286 } 1287 mtx_unlock_spin(&smp_ipi_mtx); 1288 if (spins == 0) 1289 printf("pmap_lazyfix: spun for 50000000\n"); 1290 } 1291} 1292 1293#else /* SMP */ 1294 1295/* 1296 * Cleaning up on uniprocessor is easy. For various reasons, we're 1297 * unlikely to have to even execute this code, including the fact 1298 * that the cleanup is deferred until the parent does a wait(2), which 1299 * means that another userland process has run. 1300 */ 1301static void 1302pmap_lazyfix(pmap_t pmap) 1303{ 1304 u_int cr3; 1305 1306 cr3 = vtophys(pmap->pm_pdir); 1307 if (cr3 == rcr3()) { 1308 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1309 pmap->pm_active &= ~(PCPU_GET(cpumask)); 1310 } 1311} 1312#endif /* SMP */ 1313 1314/* 1315 * Release any resources held by the given physical map. 1316 * Called when a pmap initialized by pmap_pinit is being released. 1317 * Should only be called if the map contains no valid mappings. 1318 */ 1319void 1320pmap_release(pmap_t pmap) 1321{ 1322 vm_page_t m, ptdpg[NPGPTD]; 1323 int i; 1324 1325 KASSERT(pmap->pm_stats.resident_count == 0, 1326 ("pmap_release: pmap resident count %ld != 0", 1327 pmap->pm_stats.resident_count)); 1328 1329 pmap_lazyfix(pmap); 1330 mtx_lock_spin(&allpmaps_lock); 1331 LIST_REMOVE(pmap, pm_list); 1332 mtx_unlock_spin(&allpmaps_lock); 1333 1334 for (i = 0; i < NPGPTD; i++) 1335 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i]); 1336 1337 bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) * 1338 sizeof(*pmap->pm_pdir)); 1339#ifdef SMP 1340 pmap->pm_pdir[MPPTDI] = 0; 1341#endif 1342 1343 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1344 1345 vm_page_lock_queues(); 1346 for (i = 0; i < NPGPTD; i++) { 1347 m = ptdpg[i]; 1348#ifdef PAE 1349 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1350 ("pmap_release: got wrong ptd page")); 1351#endif 1352 m->wire_count--; 1353 atomic_subtract_int(&cnt.v_wire_count, 1); 1354 vm_page_free_zero(m); 1355 } 1356 vm_page_unlock_queues(); 1357 PMAP_LOCK_DESTROY(pmap); 1358} 1359 1360static int 1361kvm_size(SYSCTL_HANDLER_ARGS) 1362{ 1363 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1364 1365 return sysctl_handle_long(oidp, &ksize, 0, req); 1366} 1367SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1368 0, 0, kvm_size, "IU", "Size of KVM"); 1369 1370static int 1371kvm_free(SYSCTL_HANDLER_ARGS) 1372{ 1373 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1374 1375 return sysctl_handle_long(oidp, &kfree, 0, req); 1376} 1377SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1378 0, 0, kvm_free, "IU", "Amount of KVM free"); 1379 1380/* 1381 * grow the number of kernel page table entries, if needed 1382 */ 1383void 1384pmap_growkernel(vm_offset_t addr) 1385{ 1386 struct pmap *pmap; 1387 vm_paddr_t ptppaddr; 1388 vm_page_t nkpg; 1389 pd_entry_t newpdir; 1390 pt_entry_t *pde; 1391 1392 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1393 if (kernel_vm_end == 0) { 1394 kernel_vm_end = KERNBASE; 1395 nkpt = 0; 1396 while (pdir_pde(PTD, kernel_vm_end)) { 1397 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1398 nkpt++; 1399 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1400 kernel_vm_end = kernel_map->max_offset; 1401 break; 1402 } 1403 } 1404 } 1405 addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1406 if (addr - 1 >= kernel_map->max_offset) 1407 addr = kernel_map->max_offset; 1408 while (kernel_vm_end < addr) { 1409 if (pdir_pde(PTD, kernel_vm_end)) { 1410 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1411 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1412 kernel_vm_end = kernel_map->max_offset; 1413 break; 1414 } 1415 continue; 1416 } 1417 1418 /* 1419 * This index is bogus, but out of the way 1420 */ 1421 nkpg = vm_page_alloc(NULL, nkpt, 1422 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 1423 if (!nkpg) 1424 panic("pmap_growkernel: no memory to grow kernel"); 1425 1426 nkpt++; 1427 1428 pmap_zero_page(nkpg); 1429 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1430 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1431 pdir_pde(PTD, kernel_vm_end) = newpdir; 1432 1433 mtx_lock_spin(&allpmaps_lock); 1434 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1435 pde = pmap_pde(pmap, kernel_vm_end); 1436 pde_store(pde, newpdir); 1437 } 1438 mtx_unlock_spin(&allpmaps_lock); 1439 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1440 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1441 kernel_vm_end = kernel_map->max_offset; 1442 break; 1443 } 1444 } 1445} 1446 1447 1448/*************************************************** 1449 * page management routines. 1450 ***************************************************/ 1451 1452CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1453CTASSERT(_NPCM == 11); 1454 1455static __inline struct pv_chunk * 1456pv_to_chunk(pv_entry_t pv) 1457{ 1458 1459 return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK); 1460} 1461 1462#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1463 1464#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1465#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1466 1467static uint64_t pc_freemask[11] = { 1468 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1469 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1470 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1471 PC_FREE0_9, PC_FREE10 1472}; 1473 1474SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1475 "Current number of pv entries"); 1476 1477#ifdef PV_STATS 1478static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1479static int pc_chunk_spare; 1480 1481SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1482 "Current number of pv entry chunks"); 1483SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1484 "Current number of pv entry chunks allocated"); 1485SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1486 "Current number of pv entry chunks frees"); 1487SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1488 "Number of times tried to get a chunk page but failed."); 1489SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_spare, CTLFLAG_RD, &pc_chunk_spare, 0, 1490 "Current number of spare pv entry chunks allocated"); 1491 1492static long pv_entry_frees, pv_entry_allocs; 1493static int pv_entry_spare; 1494 1495SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1496 "Current number of pv entry frees"); 1497SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1498 "Current number of pv entry allocs"); 1499SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1500 "Current number of spare pv entries"); 1501 1502static int pmap_collect_inactive, pmap_collect_active; 1503 1504SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 1505 "Current number times pmap_collect called on inactive queue"); 1506SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 1507 "Current number times pmap_collect called on active queue"); 1508#endif 1509 1510/* 1511 * We are in a serious low memory condition. Resort to 1512 * drastic measures to free some pages so we can allocate 1513 * another pv entry chunk. This is normally called to 1514 * unmap inactive pages, and if necessary, active pages. 1515 */ 1516static void 1517pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 1518{ 1519 pmap_t pmap; 1520 pt_entry_t *pte, tpte; 1521 pv_entry_t next_pv, pv; 1522 vm_offset_t va; 1523 vm_page_t m; 1524 1525 sched_pin(); 1526 TAILQ_FOREACH(m, &vpq->pl, pageq) { 1527 if (m->hold_count || m->busy || (m->flags & PG_BUSY)) 1528 continue; 1529 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 1530 va = pv->pv_va; 1531 pmap = PV_PMAP(pv); 1532 /* Avoid deadlock and lock recursion. */ 1533 if (pmap > locked_pmap) 1534 PMAP_LOCK(pmap); 1535 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 1536 continue; 1537 pmap->pm_stats.resident_count--; 1538 pte = pmap_pte_quick(pmap, va); 1539 tpte = pte_load_clear(pte); 1540 KASSERT((tpte & PG_W) == 0, 1541 ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 1542 if (tpte & PG_A) 1543 vm_page_flag_set(m, PG_REFERENCED); 1544 if (tpte & PG_M) { 1545 KASSERT((tpte & PG_RW), 1546 ("pmap_collect: modified page not writable: va: %#x, pte: %#jx", 1547 va, (uintmax_t)tpte)); 1548 vm_page_dirty(m); 1549 } 1550 pmap_invalidate_page(pmap, va); 1551 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1552 if (TAILQ_EMPTY(&m->md.pv_list)) 1553 vm_page_flag_clear(m, PG_WRITEABLE); 1554 m->md.pv_list_count--; 1555 pmap_unuse_pt(pmap, va); 1556 if (pmap != locked_pmap) 1557 PMAP_UNLOCK(pmap); 1558 free_pv_entry(locked_pmap, pv); 1559 } 1560 } 1561 sched_unpin(); 1562} 1563 1564 1565/* 1566 * free the pv_entry back to the free list 1567 */ 1568static void 1569free_pv_entry(pmap_t pmap, pv_entry_t pv) 1570{ 1571 struct pv_chunk *pc; 1572 int idx, field, bit; 1573 1574 PV_STAT(pv_entry_frees++); 1575 PV_STAT(pv_entry_spare++); 1576 pv_entry_count--; 1577 pc = pv_to_chunk(pv); 1578 idx = pv - &pc->pc_pventry[0]; 1579 field = idx / 32; 1580 bit = idx % 32; 1581 pc->pc_map[field] |= 1ul << bit; 1582 /* move to head of list */ 1583 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1584 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1585 for (idx = 0; idx < _NPCM; idx++) 1586 if (pc->pc_map[idx] != pc_freemask[idx]) 1587 return; 1588 PV_STAT(pv_entry_spare -= _NPCPV); 1589 PV_STAT(pc_chunk_count--); 1590 PV_STAT(pc_chunk_frees++); 1591 /* entire chunk is free, return it to freelist */ 1592 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1593 TAILQ_INSERT_HEAD(&pv_freechunks, pc, pc_list); 1594 PV_STAT(pc_chunk_spare++); 1595} 1596 1597/* 1598 * get a new pv_entry, allocating a block from the system 1599 * when needed. 1600 */ 1601static pv_entry_t 1602get_pv_entry(pmap_t pmap, int try) 1603{ 1604 static const struct timeval printinterval = { 60, 0 }; 1605 static struct timeval lastprint; 1606 static vm_pindex_t colour; 1607 int bit, field; 1608 pv_entry_t pv; 1609 struct pv_chunk *pc; 1610 vm_page_t m; 1611 1612 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1613 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1614 PV_STAT(pv_entry_allocs++); 1615 pv_entry_count++; 1616 if (pv_entry_count > pv_entry_high_water) 1617 pagedaemon_wakeup(); 1618 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1619 if (pc != NULL) { 1620 for (field = 0; field < _NPCM; field++) { 1621 if (pc->pc_map[field]) { 1622 bit = bsfl(pc->pc_map[field]); 1623 break; 1624 } 1625 } 1626 if (field < _NPCM) { 1627 pv = &pc->pc_pventry[field * 32 + bit]; 1628 pc->pc_map[field] &= ~(1ul << bit); 1629 /* If this was the last item, move it to tail */ 1630 for (field = 0; field < _NPCM; field++) 1631 if (pc->pc_map[field] != 0) { 1632 PV_STAT(pv_entry_spare--); 1633 return (pv); /* not full, return */ 1634 } 1635 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1636 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1637 PV_STAT(pv_entry_spare--); 1638 return (pv); 1639 } 1640 } 1641 /* See if we have a preallocated chunk */ 1642 pc = TAILQ_FIRST(&pv_freechunks); 1643 if (pc) { 1644 /* Take a preallocated one from the freelist */ 1645 TAILQ_REMOVE(&pv_freechunks, pc, pc_list); 1646 PV_STAT(pc_chunk_spare--); 1647 } else { 1648 /* No free items, allocate another chunk */ 1649 m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | 1650 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 1651 if (m == NULL) { 1652 if (try) { 1653 pv_entry_count--; 1654 PV_STAT(pc_chunk_tryfail++); 1655 return (NULL); 1656 } 1657 /* 1658 * Reclaim pv entries: At first, destroy mappings to 1659 * inactive pages. After that, if a pv chunk entry 1660 * is still needed, destroy mappings to active pages. 1661 */ 1662 if (ratecheck(&lastprint, &printinterval)) 1663 printf("Approaching the limit on PV entries, " 1664 "consider increasing tunables " 1665 "vm.pmap.shpgperproc or " 1666 "vm.pmap.pv_entry_max\n"); 1667 PV_STAT(pmap_collect_inactive++); 1668 pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]); 1669 m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | 1670 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 1671 if (m == NULL) { 1672 PV_STAT(pmap_collect_active++); 1673 pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]); 1674 m = vm_page_alloc(NULL, colour, 1675 VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | 1676 VM_ALLOC_WIRED); 1677 if (m == NULL) 1678 panic("get_pv_entry: increase vm.pmap.shpgperproc"); 1679 } 1680 } 1681 colour++; 1682 pc = pv_chunkbase + pv_nextindex; /* Scaled */ 1683 pv_nextindex++; 1684 pmap_qenter((vm_offset_t)pc, &m, 1); 1685 } 1686 PV_STAT(pc_chunk_count++); 1687 PV_STAT(pc_chunk_allocs++); 1688 pc->pc_pmap = pmap; 1689 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 1690 for (field = 1; field < _NPCM; field++) 1691 pc->pc_map[field] = pc_freemask[field]; 1692 pv = &pc->pc_pventry[0]; 1693 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1694 PV_STAT(pv_entry_spare += _NPCPV - 1); 1695 return (pv); 1696} 1697 1698static void 1699pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1700{ 1701 pv_entry_t pv; 1702 1703 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1704 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1705 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 1706 if (pmap == PV_PMAP(pv) && va == pv->pv_va) 1707 break; 1708 } 1709 KASSERT(pv != NULL, ("pmap_remove_entry: pv not found")); 1710 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1711 m->md.pv_list_count--; 1712 if (TAILQ_EMPTY(&m->md.pv_list)) 1713 vm_page_flag_clear(m, PG_WRITEABLE); 1714 free_pv_entry(pmap, pv); 1715} 1716 1717/* 1718 * Create a pv entry for page at pa for 1719 * (pmap, va). 1720 */ 1721static void 1722pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 1723{ 1724 pv_entry_t pv; 1725 1726 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1727 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1728 pv = get_pv_entry(pmap, FALSE); 1729 pv->pv_va = va; 1730 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1731 m->md.pv_list_count++; 1732} 1733 1734/* 1735 * Conditionally create a pv entry. 1736 */ 1737static boolean_t 1738pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 1739{ 1740 pv_entry_t pv; 1741 1742 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1743 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1744 if (pv_entry_count < pv_entry_high_water && 1745 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 1746 pv->pv_va = va; 1747 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1748 m->md.pv_list_count++; 1749 return (TRUE); 1750 } else 1751 return (FALSE); 1752} 1753 1754/* 1755 * pmap_remove_pte: do the things to unmap a page in a process 1756 */ 1757static int 1758pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va) 1759{ 1760 pt_entry_t oldpte; 1761 vm_page_t m; 1762 1763 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1764 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1765 oldpte = pte_load_clear(ptq); 1766 if (oldpte & PG_W) 1767 pmap->pm_stats.wired_count -= 1; 1768 /* 1769 * Machines that don't support invlpg, also don't support 1770 * PG_G. 1771 */ 1772 if (oldpte & PG_G) 1773 pmap_invalidate_page(kernel_pmap, va); 1774 pmap->pm_stats.resident_count -= 1; 1775 if (oldpte & PG_MANAGED) { 1776 m = PHYS_TO_VM_PAGE(oldpte); 1777 if (oldpte & PG_M) { 1778 KASSERT((oldpte & PG_RW), 1779 ("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx", 1780 va, (uintmax_t)oldpte)); 1781 vm_page_dirty(m); 1782 } 1783 if (oldpte & PG_A) 1784 vm_page_flag_set(m, PG_REFERENCED); 1785 pmap_remove_entry(pmap, m, va); 1786 } 1787 return (pmap_unuse_pt(pmap, va)); 1788} 1789 1790/* 1791 * Remove a single page from a process address space 1792 */ 1793static void 1794pmap_remove_page(pmap_t pmap, vm_offset_t va) 1795{ 1796 pt_entry_t *pte; 1797 1798 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1799 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1800 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1801 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 1802 return; 1803 pmap_remove_pte(pmap, pte, va); 1804 pmap_invalidate_page(pmap, va); 1805} 1806 1807/* 1808 * Remove the given range of addresses from the specified map. 1809 * 1810 * It is assumed that the start and end are properly 1811 * rounded to the page size. 1812 */ 1813void 1814pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1815{ 1816 vm_offset_t pdnxt; 1817 pd_entry_t ptpaddr; 1818 pt_entry_t *pte; 1819 int anyvalid; 1820 1821 /* 1822 * Perform an unsynchronized read. This is, however, safe. 1823 */ 1824 if (pmap->pm_stats.resident_count == 0) 1825 return; 1826 1827 anyvalid = 0; 1828 1829 vm_page_lock_queues(); 1830 sched_pin(); 1831 PMAP_LOCK(pmap); 1832 1833 /* 1834 * special handling of removing one page. a very 1835 * common operation and easy to short circuit some 1836 * code. 1837 */ 1838 if ((sva + PAGE_SIZE == eva) && 1839 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 1840 pmap_remove_page(pmap, sva); 1841 goto out; 1842 } 1843 1844 for (; sva < eva; sva = pdnxt) { 1845 unsigned pdirindex; 1846 1847 /* 1848 * Calculate index for next page table. 1849 */ 1850 pdnxt = (sva + NBPDR) & ~PDRMASK; 1851 if (pmap->pm_stats.resident_count == 0) 1852 break; 1853 1854 pdirindex = sva >> PDRSHIFT; 1855 ptpaddr = pmap->pm_pdir[pdirindex]; 1856 1857 /* 1858 * Weed out invalid mappings. Note: we assume that the page 1859 * directory table is always allocated, and in kernel virtual. 1860 */ 1861 if (ptpaddr == 0) 1862 continue; 1863 1864 /* 1865 * Check for large page. 1866 */ 1867 if ((ptpaddr & PG_PS) != 0) { 1868 pmap->pm_pdir[pdirindex] = 0; 1869 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1870 anyvalid = 1; 1871 continue; 1872 } 1873 1874 /* 1875 * Limit our scan to either the end of the va represented 1876 * by the current page table page, or to the end of the 1877 * range being removed. 1878 */ 1879 if (pdnxt > eva) 1880 pdnxt = eva; 1881 1882 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 1883 sva += PAGE_SIZE) { 1884 if (*pte == 0) 1885 continue; 1886 1887 /* 1888 * The TLB entry for a PG_G mapping is invalidated 1889 * by pmap_remove_pte(). 1890 */ 1891 if ((*pte & PG_G) == 0) 1892 anyvalid = 1; 1893 if (pmap_remove_pte(pmap, pte, sva)) 1894 break; 1895 } 1896 } 1897out: 1898 sched_unpin(); 1899 vm_page_unlock_queues(); 1900 if (anyvalid) 1901 pmap_invalidate_all(pmap); 1902 PMAP_UNLOCK(pmap); 1903} 1904 1905/* 1906 * Routine: pmap_remove_all 1907 * Function: 1908 * Removes this physical page from 1909 * all physical maps in which it resides. 1910 * Reflects back modify bits to the pager. 1911 * 1912 * Notes: 1913 * Original versions of this routine were very 1914 * inefficient because they iteratively called 1915 * pmap_remove (slow...) 1916 */ 1917 1918void 1919pmap_remove_all(vm_page_t m) 1920{ 1921 register pv_entry_t pv; 1922 pmap_t pmap; 1923 pt_entry_t *pte, tpte; 1924 1925#if defined(PMAP_DIAGNOSTIC) 1926 /* 1927 * XXX This makes pmap_remove_all() illegal for non-managed pages! 1928 */ 1929 if (m->flags & PG_FICTITIOUS) { 1930 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x", 1931 VM_PAGE_TO_PHYS(m)); 1932 } 1933#endif 1934 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1935 sched_pin(); 1936 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1937 pmap = PV_PMAP(pv); 1938 PMAP_LOCK(pmap); 1939 pmap->pm_stats.resident_count--; 1940 pte = pmap_pte_quick(pmap, pv->pv_va); 1941 tpte = pte_load_clear(pte); 1942 if (tpte & PG_W) 1943 pmap->pm_stats.wired_count--; 1944 if (tpte & PG_A) 1945 vm_page_flag_set(m, PG_REFERENCED); 1946 1947 /* 1948 * Update the vm_page_t clean and reference bits. 1949 */ 1950 if (tpte & PG_M) { 1951 KASSERT((tpte & PG_RW), 1952 ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx", 1953 pv->pv_va, (uintmax_t)tpte)); 1954 vm_page_dirty(m); 1955 } 1956 pmap_invalidate_page(pmap, pv->pv_va); 1957 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1958 m->md.pv_list_count--; 1959 pmap_unuse_pt(pmap, pv->pv_va); 1960 PMAP_UNLOCK(pmap); 1961 free_pv_entry(pmap, pv); 1962 } 1963 vm_page_flag_clear(m, PG_WRITEABLE); 1964 sched_unpin(); 1965} 1966 1967/* 1968 * Set the physical protection on the 1969 * specified range of this map as requested. 1970 */ 1971void 1972pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1973{ 1974 vm_offset_t pdnxt; 1975 pd_entry_t ptpaddr; 1976 pt_entry_t *pte; 1977 int anychanged; 1978 1979 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1980 pmap_remove(pmap, sva, eva); 1981 return; 1982 } 1983 1984 if (prot & VM_PROT_WRITE) 1985 return; 1986 1987 anychanged = 0; 1988 1989 vm_page_lock_queues(); 1990 sched_pin(); 1991 PMAP_LOCK(pmap); 1992 for (; sva < eva; sva = pdnxt) { 1993 unsigned obits, pbits, pdirindex; 1994 1995 pdnxt = (sva + NBPDR) & ~PDRMASK; 1996 1997 pdirindex = sva >> PDRSHIFT; 1998 ptpaddr = pmap->pm_pdir[pdirindex]; 1999 2000 /* 2001 * Weed out invalid mappings. Note: we assume that the page 2002 * directory table is always allocated, and in kernel virtual. 2003 */ 2004 if (ptpaddr == 0) 2005 continue; 2006 2007 /* 2008 * Check for large page. 2009 */ 2010 if ((ptpaddr & PG_PS) != 0) { 2011 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2012 anychanged = 1; 2013 continue; 2014 } 2015 2016 if (pdnxt > eva) 2017 pdnxt = eva; 2018 2019 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2020 sva += PAGE_SIZE) { 2021 vm_page_t m; 2022 2023retry: 2024 /* 2025 * Regardless of whether a pte is 32 or 64 bits in 2026 * size, PG_RW, PG_A, and PG_M are among the least 2027 * significant 32 bits. 2028 */ 2029 obits = pbits = *(u_int *)pte; 2030 if (pbits & PG_MANAGED) { 2031 m = NULL; 2032 if (pbits & PG_A) { 2033 m = PHYS_TO_VM_PAGE(*pte); 2034 vm_page_flag_set(m, PG_REFERENCED); 2035 pbits &= ~PG_A; 2036 } 2037 if ((pbits & PG_M) != 0) { 2038 if (m == NULL) 2039 m = PHYS_TO_VM_PAGE(*pte); 2040 vm_page_dirty(m); 2041 } 2042 } 2043 2044 pbits &= ~(PG_RW | PG_M); 2045 2046 if (pbits != obits) { 2047 if (!atomic_cmpset_int((u_int *)pte, obits, 2048 pbits)) 2049 goto retry; 2050 if (obits & PG_G) 2051 pmap_invalidate_page(pmap, sva); 2052 else 2053 anychanged = 1; 2054 } 2055 } 2056 } 2057 sched_unpin(); 2058 vm_page_unlock_queues(); 2059 if (anychanged) 2060 pmap_invalidate_all(pmap); 2061 PMAP_UNLOCK(pmap); 2062} 2063 2064/* 2065 * Insert the given physical page (p) at 2066 * the specified virtual address (v) in the 2067 * target physical map with the protection requested. 2068 * 2069 * If specified, the page will be wired down, meaning 2070 * that the related pte can not be reclaimed. 2071 * 2072 * NB: This is the only routine which MAY NOT lazy-evaluate 2073 * or lose information. That is, this routine must actually 2074 * insert this page into the given map NOW. 2075 */ 2076void 2077pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 2078 boolean_t wired) 2079{ 2080 vm_paddr_t pa; 2081 pd_entry_t *pde; 2082 register pt_entry_t *pte; 2083 vm_paddr_t opa; 2084 pt_entry_t origpte, newpte; 2085 vm_page_t mpte, om; 2086 boolean_t invlva; 2087 2088 va &= PG_FRAME; 2089#ifdef PMAP_DIAGNOSTIC 2090 if (va > VM_MAX_KERNEL_ADDRESS) 2091 panic("pmap_enter: toobig"); 2092 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 2093 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); 2094#endif 2095 2096 mpte = NULL; 2097 2098 vm_page_lock_queues(); 2099 PMAP_LOCK(pmap); 2100 sched_pin(); 2101 2102 /* 2103 * In the case that a page table page is not 2104 * resident, we are creating it here. 2105 */ 2106 if (va < VM_MAXUSER_ADDRESS) { 2107 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2108 } 2109#if 0 && defined(PMAP_DIAGNOSTIC) 2110 else { 2111 pd_entry_t *pdeaddr = pmap_pde(pmap, va); 2112 origpte = *pdeaddr; 2113 if ((origpte & PG_V) == 0) { 2114 panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n", 2115 pmap->pm_pdir[PTDPTDI], origpte, va); 2116 } 2117 } 2118#endif 2119 2120 pde = pmap_pde(pmap, va); 2121 if ((*pde & PG_PS) != 0) 2122 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2123 pte = pmap_pte_quick(pmap, va); 2124 2125 /* 2126 * Page Directory table entry not valid, we need a new PT page 2127 */ 2128 if (pte == NULL) { 2129 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n", 2130 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 2131 } 2132 2133 pa = VM_PAGE_TO_PHYS(m); 2134 om = NULL; 2135 origpte = *pte; 2136 opa = origpte & PG_FRAME; 2137 2138 /* 2139 * Mapping has not changed, must be protection or wiring change. 2140 */ 2141 if (origpte && (opa == pa)) { 2142 /* 2143 * Wiring change, just update stats. We don't worry about 2144 * wiring PT pages as they remain resident as long as there 2145 * are valid mappings in them. Hence, if a user page is wired, 2146 * the PT page will be also. 2147 */ 2148 if (wired && ((origpte & PG_W) == 0)) 2149 pmap->pm_stats.wired_count++; 2150 else if (!wired && (origpte & PG_W)) 2151 pmap->pm_stats.wired_count--; 2152 2153 /* 2154 * Remove extra pte reference 2155 */ 2156 if (mpte) 2157 mpte->wire_count--; 2158 2159 /* 2160 * We might be turning off write access to the page, 2161 * so we go ahead and sense modify status. 2162 */ 2163 if (origpte & PG_MANAGED) { 2164 om = m; 2165 pa |= PG_MANAGED; 2166 } 2167 goto validate; 2168 } 2169 /* 2170 * Mapping has changed, invalidate old range and fall through to 2171 * handle validating new mapping. 2172 */ 2173 if (opa) { 2174 if (origpte & PG_W) 2175 pmap->pm_stats.wired_count--; 2176 if (origpte & PG_MANAGED) { 2177 om = PHYS_TO_VM_PAGE(opa); 2178 pmap_remove_entry(pmap, om, va); 2179 } 2180 if (mpte != NULL) { 2181 mpte->wire_count--; 2182 KASSERT(mpte->wire_count > 0, 2183 ("pmap_enter: missing reference to page table page," 2184 " va: 0x%x", va)); 2185 } 2186 } else 2187 pmap->pm_stats.resident_count++; 2188 2189 /* 2190 * Enter on the PV list if part of our managed memory. 2191 */ 2192 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 2193 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2194 ("pmap_enter: managed mapping within the clean submap")); 2195 pmap_insert_entry(pmap, va, m); 2196 pa |= PG_MANAGED; 2197 } 2198 2199 /* 2200 * Increment counters 2201 */ 2202 if (wired) 2203 pmap->pm_stats.wired_count++; 2204 2205validate: 2206 /* 2207 * Now validate mapping with desired protection/wiring. 2208 */ 2209 newpte = (pt_entry_t)(pa | PG_V); 2210 if ((prot & VM_PROT_WRITE) != 0) 2211 newpte |= PG_RW; 2212 if (wired) 2213 newpte |= PG_W; 2214 if (va < VM_MAXUSER_ADDRESS) 2215 newpte |= PG_U; 2216 if (pmap == kernel_pmap) 2217 newpte |= pgeflag; 2218 2219 /* 2220 * if the mapping or permission bits are different, we need 2221 * to update the pte. 2222 */ 2223 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2224 if (origpte & PG_V) { 2225 invlva = FALSE; 2226 origpte = pte_load_store(pte, newpte | PG_A); 2227 if (origpte & PG_A) { 2228 if (origpte & PG_MANAGED) 2229 vm_page_flag_set(om, PG_REFERENCED); 2230 if (opa != VM_PAGE_TO_PHYS(m)) 2231 invlva = TRUE; 2232 } 2233 if (origpte & PG_M) { 2234 KASSERT((origpte & PG_RW), 2235 ("pmap_enter: modified page not writable: va: %#x, pte: %#jx", 2236 va, (uintmax_t)origpte)); 2237 if ((origpte & PG_MANAGED) != 0) 2238 vm_page_dirty(om); 2239 if ((prot & VM_PROT_WRITE) == 0) 2240 invlva = TRUE; 2241 } 2242 if (invlva) 2243 pmap_invalidate_page(pmap, va); 2244 } else 2245 pte_store(pte, newpte | PG_A); 2246 } 2247 sched_unpin(); 2248 vm_page_unlock_queues(); 2249 PMAP_UNLOCK(pmap); 2250} 2251 2252/* 2253 * this code makes some *MAJOR* assumptions: 2254 * 1. Current pmap & pmap exists. 2255 * 2. Not wired. 2256 * 3. Read access. 2257 * 4. No page table pages. 2258 * but is *MUCH* faster than pmap_enter... 2259 */ 2260 2261vm_page_t 2262pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 2263 vm_page_t mpte) 2264{ 2265 pt_entry_t *pte; 2266 vm_paddr_t pa; 2267 2268 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2269 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, 2270 ("pmap_enter_quick: managed mapping within the clean submap")); 2271 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2272 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2273 PMAP_LOCK(pmap); 2274 2275 /* 2276 * In the case that a page table page is not 2277 * resident, we are creating it here. 2278 */ 2279 if (va < VM_MAXUSER_ADDRESS) { 2280 unsigned ptepindex; 2281 pd_entry_t ptepa; 2282 2283 /* 2284 * Calculate pagetable page index 2285 */ 2286 ptepindex = va >> PDRSHIFT; 2287 if (mpte && (mpte->pindex == ptepindex)) { 2288 mpte->wire_count++; 2289 } else { 2290retry: 2291 /* 2292 * Get the page directory entry 2293 */ 2294 ptepa = pmap->pm_pdir[ptepindex]; 2295 2296 /* 2297 * If the page table page is mapped, we just increment 2298 * the hold count, and activate it. 2299 */ 2300 if (ptepa) { 2301 if (ptepa & PG_PS) 2302 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2303 mpte = PHYS_TO_VM_PAGE(ptepa); 2304 mpte->wire_count++; 2305 } else { 2306 mpte = _pmap_allocpte(pmap, ptepindex, 2307 M_NOWAIT); 2308 if (mpte == NULL) { 2309 PMAP_UNLOCK(pmap); 2310 vm_page_busy(m); 2311 vm_page_unlock_queues(); 2312 VM_OBJECT_UNLOCK(m->object); 2313 VM_WAIT; 2314 VM_OBJECT_LOCK(m->object); 2315 vm_page_lock_queues(); 2316 vm_page_wakeup(m); 2317 PMAP_LOCK(pmap); 2318 goto retry; 2319 } 2320 } 2321 } 2322 } else { 2323 mpte = NULL; 2324 } 2325 2326 /* 2327 * This call to vtopte makes the assumption that we are 2328 * entering the page into the current pmap. In order to support 2329 * quick entry into any pmap, one would likely use pmap_pte_quick. 2330 * But that isn't as quick as vtopte. 2331 */ 2332 pte = vtopte(va); 2333 if (*pte) { 2334 if (mpte != NULL) { 2335 pmap_unwire_pte_hold(pmap, mpte); 2336 mpte = NULL; 2337 } 2338 goto out; 2339 } 2340 2341 /* 2342 * Enter on the PV list if part of our managed memory. Note that we 2343 * raise IPL while manipulating pv_table since pmap_enter can be 2344 * called at interrupt time. 2345 */ 2346 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) 2347 pmap_insert_entry(pmap, va, m); 2348 2349 /* 2350 * Increment counters 2351 */ 2352 pmap->pm_stats.resident_count++; 2353 2354 pa = VM_PAGE_TO_PHYS(m); 2355 2356 /* 2357 * Now validate mapping with RO protection 2358 */ 2359 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 2360 pte_store(pte, pa | PG_V | PG_U); 2361 else 2362 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 2363out: 2364 PMAP_UNLOCK(pmap); 2365 return mpte; 2366} 2367 2368/* 2369 * Make a temporary mapping for a physical address. This is only intended 2370 * to be used for panic dumps. 2371 */ 2372void * 2373pmap_kenter_temporary(vm_paddr_t pa, int i) 2374{ 2375 vm_offset_t va; 2376 2377 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 2378 pmap_kenter(va, pa); 2379 invlpg(va); 2380 return ((void *)crashdumpmap); 2381} 2382 2383/* 2384 * This code maps large physical mmap regions into the 2385 * processor address space. Note that some shortcuts 2386 * are taken, but the code works. 2387 */ 2388void 2389pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2390 vm_object_t object, vm_pindex_t pindex, 2391 vm_size_t size) 2392{ 2393 vm_page_t p; 2394 2395 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2396 KASSERT(object->type == OBJT_DEVICE, 2397 ("pmap_object_init_pt: non-device object")); 2398 if (pseflag && 2399 ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) { 2400 int i; 2401 vm_page_t m[1]; 2402 unsigned int ptepindex; 2403 int npdes; 2404 pd_entry_t ptepa; 2405 2406 PMAP_LOCK(pmap); 2407 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)]) 2408 goto out; 2409 PMAP_UNLOCK(pmap); 2410retry: 2411 p = vm_page_lookup(object, pindex); 2412 if (p != NULL) { 2413 vm_page_lock_queues(); 2414 if (vm_page_sleep_if_busy(p, FALSE, "init4p")) 2415 goto retry; 2416 } else { 2417 p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); 2418 if (p == NULL) 2419 return; 2420 m[0] = p; 2421 2422 if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) { 2423 vm_page_lock_queues(); 2424 vm_page_free(p); 2425 vm_page_unlock_queues(); 2426 return; 2427 } 2428 2429 p = vm_page_lookup(object, pindex); 2430 vm_page_lock_queues(); 2431 vm_page_wakeup(p); 2432 } 2433 vm_page_unlock_queues(); 2434 2435 ptepa = VM_PAGE_TO_PHYS(p); 2436 if (ptepa & (NBPDR - 1)) 2437 return; 2438 2439 p->valid = VM_PAGE_BITS_ALL; 2440 2441 PMAP_LOCK(pmap); 2442 pmap->pm_stats.resident_count += size >> PAGE_SHIFT; 2443 npdes = size >> PDRSHIFT; 2444 for(i = 0; i < npdes; i++) { 2445 pde_store(&pmap->pm_pdir[ptepindex], 2446 ptepa | PG_U | PG_RW | PG_V | PG_PS); 2447 ptepa += NBPDR; 2448 ptepindex += 1; 2449 } 2450 pmap_invalidate_all(pmap); 2451out: 2452 PMAP_UNLOCK(pmap); 2453 } 2454} 2455 2456/* 2457 * Routine: pmap_change_wiring 2458 * Function: Change the wiring attribute for a map/virtual-address 2459 * pair. 2460 * In/out conditions: 2461 * The mapping must already exist in the pmap. 2462 */ 2463void 2464pmap_change_wiring(pmap, va, wired) 2465 register pmap_t pmap; 2466 vm_offset_t va; 2467 boolean_t wired; 2468{ 2469 register pt_entry_t *pte; 2470 2471 PMAP_LOCK(pmap); 2472 pte = pmap_pte(pmap, va); 2473 2474 if (wired && !pmap_pte_w(pte)) 2475 pmap->pm_stats.wired_count++; 2476 else if (!wired && pmap_pte_w(pte)) 2477 pmap->pm_stats.wired_count--; 2478 2479 /* 2480 * Wiring is not a hardware characteristic so there is no need to 2481 * invalidate TLB. 2482 */ 2483 pmap_pte_set_w(pte, wired); 2484 pmap_pte_release(pte); 2485 PMAP_UNLOCK(pmap); 2486} 2487 2488 2489 2490/* 2491 * Copy the range specified by src_addr/len 2492 * from the source map to the range dst_addr/len 2493 * in the destination map. 2494 * 2495 * This routine is only advisory and need not do anything. 2496 */ 2497 2498void 2499pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 2500 vm_offset_t src_addr) 2501{ 2502 vm_offset_t addr; 2503 vm_offset_t end_addr = src_addr + len; 2504 vm_offset_t pdnxt; 2505 2506 if (dst_addr != src_addr) 2507 return; 2508 2509 if (!pmap_is_current(src_pmap)) 2510 return; 2511 2512 vm_page_lock_queues(); 2513 if (dst_pmap < src_pmap) { 2514 PMAP_LOCK(dst_pmap); 2515 PMAP_LOCK(src_pmap); 2516 } else { 2517 PMAP_LOCK(src_pmap); 2518 PMAP_LOCK(dst_pmap); 2519 } 2520 sched_pin(); 2521 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 2522 pt_entry_t *src_pte, *dst_pte; 2523 vm_page_t dstmpte, srcmpte; 2524 pd_entry_t srcptepaddr; 2525 unsigned ptepindex; 2526 2527 if (addr >= UPT_MIN_ADDRESS) 2528 panic("pmap_copy: invalid to pmap_copy page tables"); 2529 2530 pdnxt = (addr + NBPDR) & ~PDRMASK; 2531 ptepindex = addr >> PDRSHIFT; 2532 2533 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 2534 if (srcptepaddr == 0) 2535 continue; 2536 2537 if (srcptepaddr & PG_PS) { 2538 if (dst_pmap->pm_pdir[ptepindex] == 0) { 2539 dst_pmap->pm_pdir[ptepindex] = srcptepaddr; 2540 dst_pmap->pm_stats.resident_count += 2541 NBPDR / PAGE_SIZE; 2542 } 2543 continue; 2544 } 2545 2546 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr); 2547 if (srcmpte->wire_count == 0) 2548 panic("pmap_copy: source page table page is unused"); 2549 2550 if (pdnxt > end_addr) 2551 pdnxt = end_addr; 2552 2553 src_pte = vtopte(addr); 2554 while (addr < pdnxt) { 2555 pt_entry_t ptetemp; 2556 ptetemp = *src_pte; 2557 /* 2558 * we only virtual copy managed pages 2559 */ 2560 if ((ptetemp & PG_MANAGED) != 0) { 2561 /* 2562 * We have to check after allocpte for the 2563 * pte still being around... allocpte can 2564 * block. 2565 */ 2566 dstmpte = pmap_allocpte(dst_pmap, addr, 2567 M_NOWAIT); 2568 if (dstmpte == NULL) 2569 break; 2570 dst_pte = pmap_pte_quick(dst_pmap, addr); 2571 if (*dst_pte == 0 && 2572 pmap_try_insert_pv_entry(dst_pmap, addr, 2573 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 2574 /* 2575 * Clear the modified and 2576 * accessed (referenced) bits 2577 * during the copy. 2578 */ 2579 *dst_pte = ptetemp & ~(PG_M | PG_A); 2580 dst_pmap->pm_stats.resident_count++; 2581 } else 2582 pmap_unwire_pte_hold(dst_pmap, dstmpte); 2583 if (dstmpte->wire_count >= srcmpte->wire_count) 2584 break; 2585 } 2586 addr += PAGE_SIZE; 2587 src_pte++; 2588 } 2589 } 2590 sched_unpin(); 2591 vm_page_unlock_queues(); 2592 PMAP_UNLOCK(src_pmap); 2593 PMAP_UNLOCK(dst_pmap); 2594} 2595 2596static __inline void 2597pagezero(void *page) 2598{ 2599#if defined(I686_CPU) 2600 if (cpu_class == CPUCLASS_686) { 2601#if defined(CPU_ENABLE_SSE) 2602 if (cpu_feature & CPUID_SSE2) 2603 sse2_pagezero(page); 2604 else 2605#endif 2606 i686_pagezero(page); 2607 } else 2608#endif 2609 bzero(page, PAGE_SIZE); 2610} 2611 2612/* 2613 * pmap_zero_page zeros the specified hardware page by mapping 2614 * the page into KVM and using bzero to clear its contents. 2615 */ 2616void 2617pmap_zero_page(vm_page_t m) 2618{ 2619 struct sysmaps *sysmaps; 2620 2621 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 2622 mtx_lock(&sysmaps->lock); 2623 if (*sysmaps->CMAP2) 2624 panic("pmap_zero_page: CMAP2 busy"); 2625 sched_pin(); 2626 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; 2627 invlcaddr(sysmaps->CADDR2); 2628 pagezero(sysmaps->CADDR2); 2629 *sysmaps->CMAP2 = 0; 2630 sched_unpin(); 2631 mtx_unlock(&sysmaps->lock); 2632} 2633 2634/* 2635 * pmap_zero_page_area zeros the specified hardware page by mapping 2636 * the page into KVM and using bzero to clear its contents. 2637 * 2638 * off and size may not cover an area beyond a single hardware page. 2639 */ 2640void 2641pmap_zero_page_area(vm_page_t m, int off, int size) 2642{ 2643 struct sysmaps *sysmaps; 2644 2645 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 2646 mtx_lock(&sysmaps->lock); 2647 if (*sysmaps->CMAP2) 2648 panic("pmap_zero_page: CMAP2 busy"); 2649 sched_pin(); 2650 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; 2651 invlcaddr(sysmaps->CADDR2); 2652 if (off == 0 && size == PAGE_SIZE) 2653 pagezero(sysmaps->CADDR2); 2654 else 2655 bzero((char *)sysmaps->CADDR2 + off, size); 2656 *sysmaps->CMAP2 = 0; 2657 sched_unpin(); 2658 mtx_unlock(&sysmaps->lock); 2659} 2660 2661/* 2662 * pmap_zero_page_idle zeros the specified hardware page by mapping 2663 * the page into KVM and using bzero to clear its contents. This 2664 * is intended to be called from the vm_pagezero process only and 2665 * outside of Giant. 2666 */ 2667void 2668pmap_zero_page_idle(vm_page_t m) 2669{ 2670 2671 if (*CMAP3) 2672 panic("pmap_zero_page: CMAP3 busy"); 2673 sched_pin(); 2674 *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M; 2675 invlcaddr(CADDR3); 2676 pagezero(CADDR3); 2677 *CMAP3 = 0; 2678 sched_unpin(); 2679} 2680 2681/* 2682 * pmap_copy_page copies the specified (machine independent) 2683 * page by mapping the page into virtual memory and using 2684 * bcopy to copy the page, one machine dependent page at a 2685 * time. 2686 */ 2687void 2688pmap_copy_page(vm_page_t src, vm_page_t dst) 2689{ 2690 struct sysmaps *sysmaps; 2691 2692 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 2693 mtx_lock(&sysmaps->lock); 2694 if (*sysmaps->CMAP1) 2695 panic("pmap_copy_page: CMAP1 busy"); 2696 if (*sysmaps->CMAP2) 2697 panic("pmap_copy_page: CMAP2 busy"); 2698 sched_pin(); 2699 invlpg((u_int)sysmaps->CADDR1); 2700 invlpg((u_int)sysmaps->CADDR2); 2701 *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A; 2702 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M; 2703 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 2704 *sysmaps->CMAP1 = 0; 2705 *sysmaps->CMAP2 = 0; 2706 sched_unpin(); 2707 mtx_unlock(&sysmaps->lock); 2708} 2709 2710/* 2711 * Returns true if the pmap's pv is one of the first 2712 * 16 pvs linked to from this page. This count may 2713 * be changed upwards or downwards in the future; it 2714 * is only necessary that true be returned for a small 2715 * subset of pmaps for proper page aging. 2716 */ 2717boolean_t 2718pmap_page_exists_quick(pmap, m) 2719 pmap_t pmap; 2720 vm_page_t m; 2721{ 2722 pv_entry_t pv; 2723 int loops = 0; 2724 2725 if (m->flags & PG_FICTITIOUS) 2726 return FALSE; 2727 2728 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2729 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2730 if (PV_PMAP(pv) == pmap) { 2731 return TRUE; 2732 } 2733 loops++; 2734 if (loops >= 16) 2735 break; 2736 } 2737 return (FALSE); 2738} 2739 2740/* 2741 * Remove all pages from specified address space 2742 * this aids process exit speeds. Also, this code 2743 * is special cased for current process only, but 2744 * can have the more generic (and slightly slower) 2745 * mode enabled. This is much faster than pmap_remove 2746 * in the case of running down an entire address space. 2747 */ 2748void 2749pmap_remove_pages(pmap_t pmap) 2750{ 2751 pt_entry_t *pte, tpte; 2752 vm_page_t m; 2753 pv_entry_t pv; 2754 struct pv_chunk *pc, *npc; 2755 int field, idx; 2756 int32_t bit; 2757 uint32_t inuse, bitmask; 2758 int allfree; 2759 2760 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 2761 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2762 return; 2763 } 2764 vm_page_lock_queues(); 2765 PMAP_LOCK(pmap); 2766 sched_pin(); 2767 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 2768 allfree = 1; 2769 for (field = 0; field < _NPCM; field++) { 2770 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 2771 while (inuse != 0) { 2772 bit = bsfl(inuse); 2773 bitmask = 1UL << bit; 2774 idx = field * 32 + bit; 2775 pv = &pc->pc_pventry[idx]; 2776 inuse &= ~bitmask; 2777 2778 pte = vtopte(pv->pv_va); 2779 tpte = *pte; 2780 2781 if (tpte == 0) { 2782 printf( 2783 "TPTE at %p IS ZERO @ VA %08x\n", 2784 pte, pv->pv_va); 2785 panic("bad pte"); 2786 } 2787 2788/* 2789 * We cannot remove wired pages from a process' mapping at this time 2790 */ 2791 if (tpte & PG_W) { 2792 allfree = 0; 2793 continue; 2794 } 2795 2796 m = PHYS_TO_VM_PAGE(tpte); 2797 KASSERT(m->phys_addr == (tpte & PG_FRAME), 2798 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 2799 m, (uintmax_t)m->phys_addr, 2800 (uintmax_t)tpte)); 2801 2802 KASSERT(m < &vm_page_array[vm_page_array_size], 2803 ("pmap_remove_pages: bad tpte %#jx", 2804 (uintmax_t)tpte)); 2805 2806 pmap->pm_stats.resident_count--; 2807 2808 pte_clear(pte); 2809 2810 /* 2811 * Update the vm_page_t clean/reference bits. 2812 */ 2813 if (tpte & PG_M) 2814 vm_page_dirty(m); 2815 2816 /* Mark free */ 2817 PV_STAT(pv_entry_frees++); 2818 PV_STAT(pv_entry_spare++); 2819 pv_entry_count--; 2820 pc->pc_map[field] |= bitmask; 2821 m->md.pv_list_count--; 2822 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2823 if (TAILQ_EMPTY(&m->md.pv_list)) 2824 vm_page_flag_clear(m, PG_WRITEABLE); 2825 2826 pmap_unuse_pt(pmap, pv->pv_va); 2827 } 2828 } 2829 if (allfree) { 2830 PV_STAT(pv_entry_spare -= _NPCPV); 2831 PV_STAT(pc_chunk_count--); 2832 PV_STAT(pc_chunk_frees++); 2833 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2834 /* Return to freelist */ 2835 TAILQ_INSERT_HEAD(&pv_freechunks, pc, pc_list); 2836 PV_STAT(pc_chunk_spare++); 2837 } 2838 } 2839 sched_unpin(); 2840 pmap_invalidate_all(pmap); 2841 PMAP_UNLOCK(pmap); 2842 vm_page_unlock_queues(); 2843} 2844 2845/* 2846 * pmap_is_modified: 2847 * 2848 * Return whether or not the specified physical page was modified 2849 * in any physical maps. 2850 */ 2851boolean_t 2852pmap_is_modified(vm_page_t m) 2853{ 2854 pv_entry_t pv; 2855 pt_entry_t *pte; 2856 pmap_t pmap; 2857 boolean_t rv; 2858 2859 rv = FALSE; 2860 if (m->flags & PG_FICTITIOUS) 2861 return (rv); 2862 2863 sched_pin(); 2864 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2865 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2866 pmap = PV_PMAP(pv); 2867 PMAP_LOCK(pmap); 2868 pte = pmap_pte_quick(pmap, pv->pv_va); 2869 rv = (*pte & PG_M) != 0; 2870 PMAP_UNLOCK(pmap); 2871 if (rv) 2872 break; 2873 } 2874 sched_unpin(); 2875 return (rv); 2876} 2877 2878/* 2879 * pmap_is_prefaultable: 2880 * 2881 * Return whether or not the specified virtual address is elgible 2882 * for prefault. 2883 */ 2884boolean_t 2885pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2886{ 2887 pt_entry_t *pte; 2888 boolean_t rv; 2889 2890 rv = FALSE; 2891 PMAP_LOCK(pmap); 2892 if (*pmap_pde(pmap, addr)) { 2893 pte = vtopte(addr); 2894 rv = *pte == 0; 2895 } 2896 PMAP_UNLOCK(pmap); 2897 return (rv); 2898} 2899 2900/* 2901 * Clear the given bit in each of the given page's ptes. The bit is 2902 * expressed as a 32-bit mask. Consequently, if the pte is 64 bits in 2903 * size, only a bit within the least significant 32 can be cleared. 2904 */ 2905static __inline void 2906pmap_clear_ptes(vm_page_t m, int bit) 2907{ 2908 register pv_entry_t pv; 2909 pmap_t pmap; 2910 pt_entry_t pbits, *pte; 2911 2912 if ((m->flags & PG_FICTITIOUS) || 2913 (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0)) 2914 return; 2915 2916 sched_pin(); 2917 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2918 /* 2919 * Loop over all current mappings setting/clearing as appropos If 2920 * setting RO do we need to clear the VAC? 2921 */ 2922 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2923 pmap = PV_PMAP(pv); 2924 PMAP_LOCK(pmap); 2925 pte = pmap_pte_quick(pmap, pv->pv_va); 2926retry: 2927 pbits = *pte; 2928 if (pbits & bit) { 2929 if (bit == PG_RW) { 2930 /* 2931 * Regardless of whether a pte is 32 or 64 bits 2932 * in size, PG_RW and PG_M are among the least 2933 * significant 32 bits. 2934 */ 2935 if (!atomic_cmpset_int((u_int *)pte, pbits, 2936 pbits & ~(PG_RW | PG_M))) 2937 goto retry; 2938 if (pbits & PG_M) { 2939 vm_page_dirty(m); 2940 } 2941 } else { 2942 atomic_clear_int((u_int *)pte, bit); 2943 } 2944 pmap_invalidate_page(pmap, pv->pv_va); 2945 } 2946 PMAP_UNLOCK(pmap); 2947 } 2948 if (bit == PG_RW) 2949 vm_page_flag_clear(m, PG_WRITEABLE); 2950 sched_unpin(); 2951} 2952 2953/* 2954 * pmap_page_protect: 2955 * 2956 * Lower the permission for all mappings to a given page. 2957 */ 2958void 2959pmap_page_protect(vm_page_t m, vm_prot_t prot) 2960{ 2961 if ((prot & VM_PROT_WRITE) == 0) { 2962 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 2963 pmap_clear_ptes(m, PG_RW); 2964 } else { 2965 pmap_remove_all(m); 2966 } 2967 } 2968} 2969 2970/* 2971 * pmap_ts_referenced: 2972 * 2973 * Return a count of reference bits for a page, clearing those bits. 2974 * It is not necessary for every reference bit to be cleared, but it 2975 * is necessary that 0 only be returned when there are truly no 2976 * reference bits set. 2977 * 2978 * XXX: The exact number of bits to check and clear is a matter that 2979 * should be tested and standardized at some point in the future for 2980 * optimal aging of shared pages. 2981 */ 2982int 2983pmap_ts_referenced(vm_page_t m) 2984{ 2985 register pv_entry_t pv, pvf, pvn; 2986 pmap_t pmap; 2987 pt_entry_t *pte; 2988 pt_entry_t v; 2989 int rtval = 0; 2990 2991 if (m->flags & PG_FICTITIOUS) 2992 return (rtval); 2993 2994 sched_pin(); 2995 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2996 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2997 2998 pvf = pv; 2999 3000 do { 3001 pvn = TAILQ_NEXT(pv, pv_list); 3002 3003 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3004 3005 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3006 3007 pmap = PV_PMAP(pv); 3008 PMAP_LOCK(pmap); 3009 pte = pmap_pte_quick(pmap, pv->pv_va); 3010 3011 if (pte && ((v = pte_load(pte)) & PG_A) != 0) { 3012 atomic_clear_int((u_int *)pte, PG_A); 3013 pmap_invalidate_page(pmap, pv->pv_va); 3014 3015 rtval++; 3016 if (rtval > 4) { 3017 PMAP_UNLOCK(pmap); 3018 break; 3019 } 3020 } 3021 PMAP_UNLOCK(pmap); 3022 } while ((pv = pvn) != NULL && pv != pvf); 3023 } 3024 sched_unpin(); 3025 3026 return (rtval); 3027} 3028 3029/* 3030 * Clear the modify bits on the specified physical page. 3031 */ 3032void 3033pmap_clear_modify(vm_page_t m) 3034{ 3035 pmap_clear_ptes(m, PG_M); 3036} 3037 3038/* 3039 * pmap_clear_reference: 3040 * 3041 * Clear the reference bit on the specified physical page. 3042 */ 3043void 3044pmap_clear_reference(vm_page_t m) 3045{ 3046 pmap_clear_ptes(m, PG_A); 3047} 3048 3049/* 3050 * Miscellaneous support routines follow 3051 */ 3052 3053/* 3054 * Map a set of physical memory pages into the kernel virtual 3055 * address space. Return a pointer to where it is mapped. This 3056 * routine is intended to be used for mapping device memory, 3057 * NOT real memory. 3058 */ 3059void * 3060pmap_mapdev(pa, size) 3061 vm_paddr_t pa; 3062 vm_size_t size; 3063{ 3064 vm_offset_t va, tmpva, offset; 3065 3066 offset = pa & PAGE_MASK; 3067 size = roundup(offset + size, PAGE_SIZE); 3068 pa = pa & PG_FRAME; 3069 3070 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3071 va = KERNBASE + pa; 3072 else 3073 va = kmem_alloc_nofault(kernel_map, size); 3074 if (!va) 3075 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3076 3077 for (tmpva = va; size > 0; ) { 3078 pmap_kenter(tmpva, pa); 3079 size -= PAGE_SIZE; 3080 tmpva += PAGE_SIZE; 3081 pa += PAGE_SIZE; 3082 } 3083 pmap_invalidate_range(kernel_pmap, va, tmpva); 3084 return ((void *)(va + offset)); 3085} 3086 3087void 3088pmap_unmapdev(va, size) 3089 vm_offset_t va; 3090 vm_size_t size; 3091{ 3092 vm_offset_t base, offset, tmpva; 3093 3094 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3095 return; 3096 base = va & PG_FRAME; 3097 offset = va & PAGE_MASK; 3098 size = roundup(offset + size, PAGE_SIZE); 3099 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 3100 pmap_kremove(tmpva); 3101 pmap_invalidate_range(kernel_pmap, va, tmpva); 3102 kmem_free(kernel_map, base, size); 3103} 3104 3105/* 3106 * perform the pmap work for mincore 3107 */ 3108int 3109pmap_mincore(pmap, addr) 3110 pmap_t pmap; 3111 vm_offset_t addr; 3112{ 3113 pt_entry_t *ptep, pte; 3114 vm_page_t m; 3115 int val = 0; 3116 3117 PMAP_LOCK(pmap); 3118 ptep = pmap_pte(pmap, addr); 3119 pte = (ptep != NULL) ? *ptep : 0; 3120 pmap_pte_release(ptep); 3121 PMAP_UNLOCK(pmap); 3122 3123 if (pte != 0) { 3124 vm_paddr_t pa; 3125 3126 val = MINCORE_INCORE; 3127 if ((pte & PG_MANAGED) == 0) 3128 return val; 3129 3130 pa = pte & PG_FRAME; 3131 3132 m = PHYS_TO_VM_PAGE(pa); 3133 3134 /* 3135 * Modified by us 3136 */ 3137 if (pte & PG_M) 3138 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 3139 else { 3140 /* 3141 * Modified by someone else 3142 */ 3143 vm_page_lock_queues(); 3144 if (m->dirty || pmap_is_modified(m)) 3145 val |= MINCORE_MODIFIED_OTHER; 3146 vm_page_unlock_queues(); 3147 } 3148 /* 3149 * Referenced by us 3150 */ 3151 if (pte & PG_A) 3152 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 3153 else { 3154 /* 3155 * Referenced by someone else 3156 */ 3157 vm_page_lock_queues(); 3158 if ((m->flags & PG_REFERENCED) || 3159 pmap_ts_referenced(m)) { 3160 val |= MINCORE_REFERENCED_OTHER; 3161 vm_page_flag_set(m, PG_REFERENCED); 3162 } 3163 vm_page_unlock_queues(); 3164 } 3165 } 3166 return val; 3167} 3168 3169void 3170pmap_activate(struct thread *td) 3171{ 3172 pmap_t pmap, oldpmap; 3173 u_int32_t cr3; 3174 3175 critical_enter(); 3176 pmap = vmspace_pmap(td->td_proc->p_vmspace); 3177 oldpmap = PCPU_GET(curpmap); 3178#if defined(SMP) 3179 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); 3180 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 3181#else 3182 oldpmap->pm_active &= ~1; 3183 pmap->pm_active |= 1; 3184#endif 3185#ifdef PAE 3186 cr3 = vtophys(pmap->pm_pdpt); 3187#else 3188 cr3 = vtophys(pmap->pm_pdir); 3189#endif 3190 /* 3191 * pmap_activate is for the current thread on the current cpu 3192 */ 3193 td->td_pcb->pcb_cr3 = cr3; 3194 load_cr3(cr3); 3195 PCPU_SET(curpmap, pmap); 3196 critical_exit(); 3197} 3198 3199vm_offset_t 3200pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 3201{ 3202 3203 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) { 3204 return addr; 3205 } 3206 3207 addr = (addr + PDRMASK) & ~PDRMASK; 3208 return addr; 3209} 3210 3211 3212#if defined(PMAP_DEBUG) 3213pmap_pid_dump(int pid) 3214{ 3215 pmap_t pmap; 3216 struct proc *p; 3217 int npte = 0; 3218 int index; 3219 3220 sx_slock(&allproc_lock); 3221 LIST_FOREACH(p, &allproc, p_list) { 3222 if (p->p_pid != pid) 3223 continue; 3224 3225 if (p->p_vmspace) { 3226 int i,j; 3227 index = 0; 3228 pmap = vmspace_pmap(p->p_vmspace); 3229 for (i = 0; i < NPDEPTD; i++) { 3230 pd_entry_t *pde; 3231 pt_entry_t *pte; 3232 vm_offset_t base = i << PDRSHIFT; 3233 3234 pde = &pmap->pm_pdir[i]; 3235 if (pde && pmap_pde_v(pde)) { 3236 for (j = 0; j < NPTEPG; j++) { 3237 vm_offset_t va = base + (j << PAGE_SHIFT); 3238 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 3239 if (index) { 3240 index = 0; 3241 printf("\n"); 3242 } 3243 sx_sunlock(&allproc_lock); 3244 return npte; 3245 } 3246 pte = pmap_pte(pmap, va); 3247 if (pte && pmap_pte_v(pte)) { 3248 pt_entry_t pa; 3249 vm_page_t m; 3250 pa = *pte; 3251 m = PHYS_TO_VM_PAGE(pa); 3252 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 3253 va, pa, m->hold_count, m->wire_count, m->flags); 3254 npte++; 3255 index++; 3256 if (index >= 2) { 3257 index = 0; 3258 printf("\n"); 3259 } else { 3260 printf(" "); 3261 } 3262 } 3263 } 3264 } 3265 } 3266 } 3267 } 3268 sx_sunlock(&allproc_lock); 3269 return npte; 3270} 3271#endif 3272 3273#if defined(DEBUG) 3274 3275static void pads(pmap_t pm); 3276void pmap_pvdump(vm_offset_t pa); 3277 3278/* print address space of pmap*/ 3279static void 3280pads(pm) 3281 pmap_t pm; 3282{ 3283 int i, j; 3284 vm_paddr_t va; 3285 pt_entry_t *ptep; 3286 3287 if (pm == kernel_pmap) 3288 return; 3289 for (i = 0; i < NPDEPTD; i++) 3290 if (pm->pm_pdir[i]) 3291 for (j = 0; j < NPTEPG; j++) { 3292 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 3293 if (pm == kernel_pmap && va < KERNBASE) 3294 continue; 3295 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 3296 continue; 3297 ptep = pmap_pte(pm, va); 3298 if (pmap_pte_v(ptep)) 3299 printf("%x:%x ", va, *ptep); 3300 }; 3301 3302} 3303 3304void 3305pmap_pvdump(pa) 3306 vm_paddr_t pa; 3307{ 3308 pv_entry_t pv; 3309 pmap_t pmap; 3310 vm_page_t m; 3311 3312 printf("pa %x", pa); 3313 m = PHYS_TO_VM_PAGE(pa); 3314 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3315 pmap = PV_PMAP(pv); 3316 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 3317 pads(pmap); 3318 } 3319 printf(" "); 3320} 3321#endif 3322