pmap.c revision 215525
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 215525 2010-11-19 15:12:19Z cperciva $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_msgbuf.h" 109#include "opt_smp.h" 110#include "opt_xbox.h" 111 112#include <sys/param.h> 113#include <sys/systm.h> 114#include <sys/kernel.h> 115#include <sys/ktr.h> 116#include <sys/lock.h> 117#include <sys/malloc.h> 118#include <sys/mman.h> 119#include <sys/msgbuf.h> 120#include <sys/mutex.h> 121#include <sys/proc.h> 122#include <sys/sf_buf.h> 123#include <sys/sx.h> 124#include <sys/vmmeter.h> 125#include <sys/sched.h> 126#include <sys/sysctl.h> 127#ifdef SMP 128#include <sys/smp.h> 129#endif 130 131#include <vm/vm.h> 132#include <vm/vm_param.h> 133#include <vm/vm_kern.h> 134#include <vm/vm_page.h> 135#include <vm/vm_map.h> 136#include <vm/vm_object.h> 137#include <vm/vm_extern.h> 138#include <vm/vm_pageout.h> 139#include <vm/vm_pager.h> 140#include <vm/uma.h> 141 142#include <machine/cpu.h> 143#include <machine/cputypes.h> 144#include <machine/md_var.h> 145#include <machine/pcb.h> 146#include <machine/specialreg.h> 147#ifdef SMP 148#include <machine/smp.h> 149#endif 150 151#ifdef XBOX 152#include <machine/xbox.h> 153#endif 154 155#include <xen/interface/xen.h> 156#include <xen/hypervisor.h> 157#include <machine/xen/hypercall.h> 158#include <machine/xen/xenvar.h> 159#include <machine/xen/xenfunc.h> 160 161#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 162#define CPU_ENABLE_SSE 163#endif 164 165#ifndef PMAP_SHPGPERPROC 166#define PMAP_SHPGPERPROC 200 167#endif 168 169#define DIAGNOSTIC 170 171#if !defined(DIAGNOSTIC) 172#ifdef __GNUC_GNU_INLINE__ 173#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 174#else 175#define PMAP_INLINE extern inline 176#endif 177#else 178#define PMAP_INLINE 179#endif 180 181#define PV_STATS 182#ifdef PV_STATS 183#define PV_STAT(x) do { x ; } while (0) 184#else 185#define PV_STAT(x) do { } while (0) 186#endif 187 188#define pa_index(pa) ((pa) >> PDRSHIFT) 189#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 190 191/* 192 * Get PDEs and PTEs for user/kernel address space 193 */ 194#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 195#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 196 197#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 198#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 199#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 200#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 201#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 202 203#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 204 205struct pmap kernel_pmap_store; 206LIST_HEAD(pmaplist, pmap); 207static struct pmaplist allpmaps; 208static struct mtx allpmaps_lock; 209 210vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 211vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 212int pgeflag = 0; /* PG_G or-in */ 213int pseflag = 0; /* PG_PS or-in */ 214 215int nkpt; 216vm_offset_t kernel_vm_end; 217extern u_int32_t KERNend; 218 219#ifdef PAE 220pt_entry_t pg_nx; 221#if !defined(XEN) 222static uma_zone_t pdptzone; 223#endif 224#endif 225 226static int pat_works; /* Is page attribute table sane? */ 227 228/* 229 * Data for the pv entry allocation mechanism 230 */ 231static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 232static struct md_page *pv_table; 233static int shpgperproc = PMAP_SHPGPERPROC; 234 235struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 236int pv_maxchunks; /* How many chunks we have KVA for */ 237vm_offset_t pv_vafree; /* freelist stored in the PTE */ 238 239/* 240 * All those kernel PT submaps that BSD is so fond of 241 */ 242struct sysmaps { 243 struct mtx lock; 244 pt_entry_t *CMAP1; 245 pt_entry_t *CMAP2; 246 caddr_t CADDR1; 247 caddr_t CADDR2; 248}; 249static struct sysmaps sysmaps_pcpu[MAXCPU]; 250static pt_entry_t *CMAP3; 251caddr_t ptvmmap = 0; 252static caddr_t CADDR3; 253struct msgbuf *msgbufp = 0; 254 255/* 256 * Crashdump maps. 257 */ 258static caddr_t crashdumpmap; 259 260static pt_entry_t *PMAP1 = 0, *PMAP2; 261static pt_entry_t *PADDR1 = 0, *PADDR2; 262#ifdef SMP 263static int PMAP1cpu; 264static int PMAP1changedcpu; 265SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 266 &PMAP1changedcpu, 0, 267 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 268#endif 269static int PMAP1changed; 270SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 271 &PMAP1changed, 0, 272 "Number of times pmap_pte_quick changed PMAP1"); 273static int PMAP1unchanged; 274SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 275 &PMAP1unchanged, 0, 276 "Number of times pmap_pte_quick didn't change PMAP1"); 277static struct mtx PMAP2mutex; 278 279SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 280static int pg_ps_enabled; 281SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0, 282 "Are large page mappings enabled?"); 283 284SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 285 "Max number of PV entries"); 286SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 287 "Page share factor per proc"); 288SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 289 "2/4MB page mapping counters"); 290 291static u_long pmap_pde_mappings; 292SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 293 &pmap_pde_mappings, 0, "2/4MB page mappings"); 294 295static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 296static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 297static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 298static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 299 vm_offset_t va); 300 301static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 302 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 303static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 304 vm_page_t *free); 305static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 306 vm_page_t *free); 307static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 308 vm_offset_t va); 309static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 310 vm_page_t m); 311 312static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 313 314static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 315static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 316static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 317static void pmap_pte_release(pt_entry_t *pte); 318static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 319static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 320static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 321static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 322 323static __inline void pagezero(void *page); 324 325#if defined(PAE) && !defined(XEN) 326static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 327#endif 328#ifndef XEN 329static void pmap_set_pg(void); 330#endif 331 332CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 333CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 334 335/* 336 * If you get an error here, then you set KVA_PAGES wrong! See the 337 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 338 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 339 */ 340CTASSERT(KERNBASE % (1 << 24) == 0); 341 342 343 344void 345pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 346{ 347 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 348 349 switch (type) { 350 case SH_PD_SET_VA: 351#if 0 352 xen_queue_pt_update(shadow_pdir_ma, 353 xpmap_ptom(val & ~(PG_RW))); 354#endif 355 xen_queue_pt_update(pdir_ma, 356 xpmap_ptom(val)); 357 break; 358 case SH_PD_SET_VA_MA: 359#if 0 360 xen_queue_pt_update(shadow_pdir_ma, 361 val & ~(PG_RW)); 362#endif 363 xen_queue_pt_update(pdir_ma, val); 364 break; 365 case SH_PD_SET_VA_CLEAR: 366#if 0 367 xen_queue_pt_update(shadow_pdir_ma, 0); 368#endif 369 xen_queue_pt_update(pdir_ma, 0); 370 break; 371 } 372} 373 374/* 375 * Move the kernel virtual free pointer to the next 376 * 4MB. This is used to help improve performance 377 * by using a large (4MB) page for much of the kernel 378 * (.text, .data, .bss) 379 */ 380static vm_offset_t 381pmap_kmem_choose(vm_offset_t addr) 382{ 383 vm_offset_t newaddr = addr; 384 385#ifndef DISABLE_PSE 386 if (cpu_feature & CPUID_PSE) 387 newaddr = (addr + PDRMASK) & ~PDRMASK; 388#endif 389 return newaddr; 390} 391 392/* 393 * Bootstrap the system enough to run with virtual memory. 394 * 395 * On the i386 this is called after mapping has already been enabled 396 * and just syncs the pmap module with what has already been done. 397 * [We can't call it easily with mapping off since the kernel is not 398 * mapped with PA == VA, hence we would have to relocate every address 399 * from the linked base (virtual) address "KERNBASE" to the actual 400 * (physical) address starting relative to 0] 401 */ 402void 403pmap_bootstrap(vm_paddr_t firstaddr) 404{ 405 vm_offset_t va; 406 pt_entry_t *pte, *unused; 407 struct sysmaps *sysmaps; 408 int i; 409 410 /* 411 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 412 * large. It should instead be correctly calculated in locore.s and 413 * not based on 'first' (which is a physical address, not a virtual 414 * address, for the start of unused physical memory). The kernel 415 * page tables are NOT double mapped and thus should not be included 416 * in this calculation. 417 */ 418 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 419 virtual_avail = pmap_kmem_choose(virtual_avail); 420 421 virtual_end = VM_MAX_KERNEL_ADDRESS; 422 423 /* 424 * Initialize the kernel pmap (which is statically allocated). 425 */ 426 PMAP_LOCK_INIT(kernel_pmap); 427 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 428#ifdef PAE 429 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 430#endif 431 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 432 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 433 LIST_INIT(&allpmaps); 434 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 435 mtx_lock_spin(&allpmaps_lock); 436 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 437 mtx_unlock_spin(&allpmaps_lock); 438 if (nkpt == 0) 439 nkpt = NKPT; 440 441 /* 442 * Reserve some special page table entries/VA space for temporary 443 * mapping of pages. 444 */ 445#define SYSMAP(c, p, v, n) \ 446 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 447 448 va = virtual_avail; 449 pte = vtopte(va); 450 451 /* 452 * CMAP1/CMAP2 are used for zeroing and copying pages. 453 * CMAP3 is used for the idle process page zeroing. 454 */ 455 for (i = 0; i < MAXCPU; i++) { 456 sysmaps = &sysmaps_pcpu[i]; 457 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 458 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 459 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 460 PT_SET_MA(sysmaps->CADDR1, 0); 461 PT_SET_MA(sysmaps->CADDR2, 0); 462 } 463 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 464 PT_SET_MA(CADDR3, 0); 465 466 /* 467 * Crashdump maps. 468 */ 469 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 470 471 /* 472 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 473 */ 474 SYSMAP(caddr_t, unused, ptvmmap, 1) 475 476 /* 477 * msgbufp is used to map the system message buffer. 478 */ 479 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) 480 481 /* 482 * ptemap is used for pmap_pte_quick 483 */ 484 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 485 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); 486 487 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 488 489 virtual_avail = va; 490 491 /* 492 * Leave in place an identity mapping (virt == phys) for the low 1 MB 493 * physical memory region that is used by the ACPI wakeup code. This 494 * mapping must not have PG_G set. 495 */ 496#ifndef XEN 497 /* 498 * leave here deliberately to show that this is not supported 499 */ 500#ifdef XBOX 501 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 502 * an early stadium, we cannot yet neatly map video memory ... :-( 503 * Better fixes are very welcome! */ 504 if (!arch_i386_is_xbox) 505#endif 506 for (i = 1; i < NKPT; i++) 507 PTD[i] = 0; 508 509 /* Initialize the PAT MSR if present. */ 510 pmap_init_pat(); 511 512 /* Turn on PG_G on kernel page(s) */ 513 pmap_set_pg(); 514#endif 515} 516 517/* 518 * Setup the PAT MSR. 519 */ 520void 521pmap_init_pat(void) 522{ 523 uint64_t pat_msr; 524 525 /* Bail if this CPU doesn't implement PAT. */ 526 if (!(cpu_feature & CPUID_PAT)) 527 return; 528 529 if (cpu_vendor_id != CPU_VENDOR_INTEL || 530 (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 531 /* 532 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 533 * Program 4 and 5 as WP and WC. 534 * Leave 6 and 7 as UC and UC-. 535 */ 536 pat_msr = rdmsr(MSR_PAT); 537 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 538 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 539 PAT_VALUE(5, PAT_WRITE_COMBINING); 540 pat_works = 1; 541 } else { 542 /* 543 * Due to some Intel errata, we can only safely use the lower 4 544 * PAT entries. Thus, just replace PAT Index 2 with WC instead 545 * of UC-. 546 * 547 * Intel Pentium III Processor Specification Update 548 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 549 * or Mode C Paging) 550 * 551 * Intel Pentium IV Processor Specification Update 552 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 553 */ 554 pat_msr = rdmsr(MSR_PAT); 555 pat_msr &= ~PAT_MASK(2); 556 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 557 pat_works = 0; 558 } 559 wrmsr(MSR_PAT, pat_msr); 560} 561 562#ifndef XEN 563/* 564 * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. 565 */ 566static void 567pmap_set_pg(void) 568{ 569 pd_entry_t pdir; 570 pt_entry_t *pte; 571 vm_offset_t va, endva; 572 int i; 573 574 if (pgeflag == 0) 575 return; 576 577 i = KERNLOAD/NBPDR; 578 endva = KERNBASE + KERNend; 579 580 if (pseflag) { 581 va = KERNBASE + KERNLOAD; 582 while (va < endva) { 583 pdir = kernel_pmap->pm_pdir[KPTDI+i]; 584 pdir |= pgeflag; 585 kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir; 586 invltlb(); /* Play it safe, invltlb() every time */ 587 i++; 588 va += NBPDR; 589 } 590 } else { 591 va = (vm_offset_t)btext; 592 while (va < endva) { 593 pte = vtopte(va); 594 if (*pte & PG_V) 595 *pte |= pgeflag; 596 invltlb(); /* Play it safe, invltlb() every time */ 597 va += PAGE_SIZE; 598 } 599 } 600} 601#endif 602 603/* 604 * Initialize a vm_page's machine-dependent fields. 605 */ 606void 607pmap_page_init(vm_page_t m) 608{ 609 610 TAILQ_INIT(&m->md.pv_list); 611 m->md.pat_mode = PAT_WRITE_BACK; 612} 613 614#if defined(PAE) && !defined(XEN) 615static void * 616pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 617{ 618 619 /* Inform UMA that this allocator uses kernel_map/object. */ 620 *flags = UMA_SLAB_KERNEL; 621 return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL, 622 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 623} 624#endif 625 626/* 627 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 628 * Requirements: 629 * - Must deal with pages in order to ensure that none of the PG_* bits 630 * are ever set, PG_V in particular. 631 * - Assumes we can write to ptes without pte_store() atomic ops, even 632 * on PAE systems. This should be ok. 633 * - Assumes nothing will ever test these addresses for 0 to indicate 634 * no mapping instead of correctly checking PG_V. 635 * - Assumes a vm_offset_t will fit in a pte (true for i386). 636 * Because PG_V is never set, there can be no mappings to invalidate. 637 */ 638static int ptelist_count = 0; 639static vm_offset_t 640pmap_ptelist_alloc(vm_offset_t *head) 641{ 642 vm_offset_t va; 643 vm_offset_t *phead = (vm_offset_t *)*head; 644 645 if (ptelist_count == 0) { 646 printf("out of memory!!!!!!\n"); 647 return (0); /* Out of memory */ 648 } 649 ptelist_count--; 650 va = phead[ptelist_count]; 651 return (va); 652} 653 654static void 655pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 656{ 657 vm_offset_t *phead = (vm_offset_t *)*head; 658 659 phead[ptelist_count++] = va; 660} 661 662static void 663pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 664{ 665 int i, nstackpages; 666 vm_offset_t va; 667 vm_page_t m; 668 669 nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 670 for (i = 0; i < nstackpages; i++) { 671 va = (vm_offset_t)base + i * PAGE_SIZE; 672 m = vm_page_alloc(NULL, i, 673 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 674 VM_ALLOC_ZERO); 675 pmap_qenter(va, &m, 1); 676 } 677 678 *head = (vm_offset_t)base; 679 for (i = npages - 1; i >= nstackpages; i--) { 680 va = (vm_offset_t)base + i * PAGE_SIZE; 681 pmap_ptelist_free(head, va); 682 } 683} 684 685 686/* 687 * Initialize the pmap module. 688 * Called by vm_init, to initialize any structures that the pmap 689 * system needs to map virtual memory. 690 */ 691void 692pmap_init(void) 693{ 694 vm_page_t mpte; 695 vm_size_t s; 696 int i, pv_npg; 697 698 /* 699 * Initialize the vm page array entries for the kernel pmap's 700 * page table pages. 701 */ 702 for (i = 0; i < nkpt; i++) { 703 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(PTD[i + KPTDI] & PG_FRAME)); 704 KASSERT(mpte >= vm_page_array && 705 mpte < &vm_page_array[vm_page_array_size], 706 ("pmap_init: page table page is out of range")); 707 mpte->pindex = i + KPTDI; 708 mpte->phys_addr = xpmap_mtop(PTD[i + KPTDI] & PG_FRAME); 709 } 710 711 /* 712 * Initialize the address space (zone) for the pv entries. Set a 713 * high water mark so that the system can recover from excessive 714 * numbers of pv entries. 715 */ 716 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 717 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 718 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 719 pv_entry_max = roundup(pv_entry_max, _NPCPV); 720 pv_entry_high_water = 9 * (pv_entry_max / 10); 721 722 /* 723 * Are large page mappings enabled? 724 */ 725 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 726 727 /* 728 * Calculate the size of the pv head table for superpages. 729 */ 730 for (i = 0; phys_avail[i + 1]; i += 2); 731 pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR; 732 733 /* 734 * Allocate memory for the pv head table for superpages. 735 */ 736 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 737 s = round_page(s); 738 pv_table = (struct md_page *)kmem_alloc(kernel_map, s); 739 for (i = 0; i < pv_npg; i++) 740 TAILQ_INIT(&pv_table[i].pv_list); 741 742 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 743 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 744 PAGE_SIZE * pv_maxchunks); 745 if (pv_chunkbase == NULL) 746 panic("pmap_init: not enough kvm for pv chunks"); 747 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 748#if defined(PAE) && !defined(XEN) 749 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 750 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 751 UMA_ZONE_VM | UMA_ZONE_NOFREE); 752 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 753#endif 754} 755 756 757/*************************************************** 758 * Low level helper routines..... 759 ***************************************************/ 760 761/* 762 * Determine the appropriate bits to set in a PTE or PDE for a specified 763 * caching mode. 764 */ 765int 766pmap_cache_bits(int mode, boolean_t is_pde) 767{ 768 int pat_flag, pat_index, cache_bits; 769 770 /* The PAT bit is different for PTE's and PDE's. */ 771 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 772 773 /* If we don't support PAT, map extended modes to older ones. */ 774 if (!(cpu_feature & CPUID_PAT)) { 775 switch (mode) { 776 case PAT_UNCACHEABLE: 777 case PAT_WRITE_THROUGH: 778 case PAT_WRITE_BACK: 779 break; 780 case PAT_UNCACHED: 781 case PAT_WRITE_COMBINING: 782 case PAT_WRITE_PROTECTED: 783 mode = PAT_UNCACHEABLE; 784 break; 785 } 786 } 787 788 /* Map the caching mode to a PAT index. */ 789 if (pat_works) { 790 switch (mode) { 791 case PAT_UNCACHEABLE: 792 pat_index = 3; 793 break; 794 case PAT_WRITE_THROUGH: 795 pat_index = 1; 796 break; 797 case PAT_WRITE_BACK: 798 pat_index = 0; 799 break; 800 case PAT_UNCACHED: 801 pat_index = 2; 802 break; 803 case PAT_WRITE_COMBINING: 804 pat_index = 5; 805 break; 806 case PAT_WRITE_PROTECTED: 807 pat_index = 4; 808 break; 809 default: 810 panic("Unknown caching mode %d\n", mode); 811 } 812 } else { 813 switch (mode) { 814 case PAT_UNCACHED: 815 case PAT_UNCACHEABLE: 816 case PAT_WRITE_PROTECTED: 817 pat_index = 3; 818 break; 819 case PAT_WRITE_THROUGH: 820 pat_index = 1; 821 break; 822 case PAT_WRITE_BACK: 823 pat_index = 0; 824 break; 825 case PAT_WRITE_COMBINING: 826 pat_index = 2; 827 break; 828 default: 829 panic("Unknown caching mode %d\n", mode); 830 } 831 } 832 833 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 834 cache_bits = 0; 835 if (pat_index & 0x4) 836 cache_bits |= pat_flag; 837 if (pat_index & 0x2) 838 cache_bits |= PG_NC_PCD; 839 if (pat_index & 0x1) 840 cache_bits |= PG_NC_PWT; 841 return (cache_bits); 842} 843#ifdef SMP 844/* 845 * For SMP, these functions have to use the IPI mechanism for coherence. 846 * 847 * N.B.: Before calling any of the following TLB invalidation functions, 848 * the calling processor must ensure that all stores updating a non- 849 * kernel page table are globally performed. Otherwise, another 850 * processor could cache an old, pre-update entry without being 851 * invalidated. This can happen one of two ways: (1) The pmap becomes 852 * active on another processor after its pm_active field is checked by 853 * one of the following functions but before a store updating the page 854 * table is globally performed. (2) The pmap becomes active on another 855 * processor before its pm_active field is checked but due to 856 * speculative loads one of the following functions stills reads the 857 * pmap as inactive on the other processor. 858 * 859 * The kernel page table is exempt because its pm_active field is 860 * immutable. The kernel page table is always active on every 861 * processor. 862 */ 863void 864pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 865{ 866 cpumask_t cpumask, other_cpus; 867 868 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 869 pmap, va); 870 871 sched_pin(); 872 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 873 invlpg(va); 874 smp_invlpg(va); 875 } else { 876 cpumask = PCPU_GET(cpumask); 877 other_cpus = PCPU_GET(other_cpus); 878 if (pmap->pm_active & cpumask) 879 invlpg(va); 880 if (pmap->pm_active & other_cpus) 881 smp_masked_invlpg(pmap->pm_active & other_cpus, va); 882 } 883 sched_unpin(); 884 PT_UPDATES_FLUSH(); 885} 886 887void 888pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 889{ 890 cpumask_t cpumask, other_cpus; 891 vm_offset_t addr; 892 893 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 894 pmap, sva, eva); 895 896 sched_pin(); 897 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 898 for (addr = sva; addr < eva; addr += PAGE_SIZE) 899 invlpg(addr); 900 smp_invlpg_range(sva, eva); 901 } else { 902 cpumask = PCPU_GET(cpumask); 903 other_cpus = PCPU_GET(other_cpus); 904 if (pmap->pm_active & cpumask) 905 for (addr = sva; addr < eva; addr += PAGE_SIZE) 906 invlpg(addr); 907 if (pmap->pm_active & other_cpus) 908 smp_masked_invlpg_range(pmap->pm_active & other_cpus, 909 sva, eva); 910 } 911 sched_unpin(); 912 PT_UPDATES_FLUSH(); 913} 914 915void 916pmap_invalidate_all(pmap_t pmap) 917{ 918 cpumask_t cpumask, other_cpus; 919 920 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 921 922 sched_pin(); 923 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 924 invltlb(); 925 smp_invltlb(); 926 } else { 927 cpumask = PCPU_GET(cpumask); 928 other_cpus = PCPU_GET(other_cpus); 929 if (pmap->pm_active & cpumask) 930 invltlb(); 931 if (pmap->pm_active & other_cpus) 932 smp_masked_invltlb(pmap->pm_active & other_cpus); 933 } 934 sched_unpin(); 935} 936 937void 938pmap_invalidate_cache(void) 939{ 940 941 sched_pin(); 942 wbinvd(); 943 smp_cache_flush(); 944 sched_unpin(); 945} 946#else /* !SMP */ 947/* 948 * Normal, non-SMP, 486+ invalidation functions. 949 * We inline these within pmap.c for speed. 950 */ 951PMAP_INLINE void 952pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 953{ 954 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 955 pmap, va); 956 957 if (pmap == kernel_pmap || pmap->pm_active) 958 invlpg(va); 959 PT_UPDATES_FLUSH(); 960} 961 962PMAP_INLINE void 963pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 964{ 965 vm_offset_t addr; 966 967 if (eva - sva > PAGE_SIZE) 968 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 969 pmap, sva, eva); 970 971 if (pmap == kernel_pmap || pmap->pm_active) 972 for (addr = sva; addr < eva; addr += PAGE_SIZE) 973 invlpg(addr); 974 PT_UPDATES_FLUSH(); 975} 976 977PMAP_INLINE void 978pmap_invalidate_all(pmap_t pmap) 979{ 980 981 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 982 983 if (pmap == kernel_pmap || pmap->pm_active) 984 invltlb(); 985} 986 987PMAP_INLINE void 988pmap_invalidate_cache(void) 989{ 990 991 wbinvd(); 992} 993#endif /* !SMP */ 994 995void 996pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 997{ 998 999 KASSERT((sva & PAGE_MASK) == 0, 1000 ("pmap_invalidate_cache_range: sva not page-aligned")); 1001 KASSERT((eva & PAGE_MASK) == 0, 1002 ("pmap_invalidate_cache_range: eva not page-aligned")); 1003 1004 if (cpu_feature & CPUID_SS) 1005 ; /* If "Self Snoop" is supported, do nothing. */ 1006 else if (cpu_feature & CPUID_CLFSH) { 1007 1008 /* 1009 * Otherwise, do per-cache line flush. Use the mfence 1010 * instruction to insure that previous stores are 1011 * included in the write-back. The processor 1012 * propagates flush to other processors in the cache 1013 * coherence domain. 1014 */ 1015 mfence(); 1016 for (; sva < eva; sva += cpu_clflush_line_size) 1017 clflush(sva); 1018 mfence(); 1019 } else { 1020 1021 /* 1022 * No targeted cache flush methods are supported by CPU, 1023 * globally invalidate cache as a last resort. 1024 */ 1025 pmap_invalidate_cache(); 1026 } 1027} 1028 1029/* 1030 * Are we current address space or kernel? N.B. We return FALSE when 1031 * a pmap's page table is in use because a kernel thread is borrowing 1032 * it. The borrowed page table can change spontaneously, making any 1033 * dependence on its continued use subject to a race condition. 1034 */ 1035static __inline int 1036pmap_is_current(pmap_t pmap) 1037{ 1038 1039 return (pmap == kernel_pmap || 1040 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 1041 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 1042} 1043 1044/* 1045 * If the given pmap is not the current or kernel pmap, the returned pte must 1046 * be released by passing it to pmap_pte_release(). 1047 */ 1048pt_entry_t * 1049pmap_pte(pmap_t pmap, vm_offset_t va) 1050{ 1051 pd_entry_t newpf; 1052 pd_entry_t *pde; 1053 1054 pde = pmap_pde(pmap, va); 1055 if (*pde & PG_PS) 1056 return (pde); 1057 if (*pde != 0) { 1058 /* are we current address space or kernel? */ 1059 if (pmap_is_current(pmap)) 1060 return (vtopte(va)); 1061 mtx_lock(&PMAP2mutex); 1062 newpf = *pde & PG_FRAME; 1063 if ((*PMAP2 & PG_FRAME) != newpf) { 1064 vm_page_lock_queues(); 1065 PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 1066 vm_page_unlock_queues(); 1067 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 1068 pmap, va, (*PMAP2 & 0xffffffff)); 1069 } 1070 1071 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1072 } 1073 return (0); 1074} 1075 1076/* 1077 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1078 * being NULL. 1079 */ 1080static __inline void 1081pmap_pte_release(pt_entry_t *pte) 1082{ 1083 1084 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 1085 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 1086 *PMAP2); 1087 PT_SET_VA(PMAP2, 0, TRUE); 1088 mtx_unlock(&PMAP2mutex); 1089 } 1090} 1091 1092static __inline void 1093invlcaddr(void *caddr) 1094{ 1095 1096 invlpg((u_int)caddr); 1097 PT_UPDATES_FLUSH(); 1098} 1099 1100/* 1101 * Super fast pmap_pte routine best used when scanning 1102 * the pv lists. This eliminates many coarse-grained 1103 * invltlb calls. Note that many of the pv list 1104 * scans are across different pmaps. It is very wasteful 1105 * to do an entire invltlb for checking a single mapping. 1106 * 1107 * If the given pmap is not the current pmap, vm_page_queue_mtx 1108 * must be held and curthread pinned to a CPU. 1109 */ 1110static pt_entry_t * 1111pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1112{ 1113 pd_entry_t newpf; 1114 pd_entry_t *pde; 1115 1116 pde = pmap_pde(pmap, va); 1117 if (*pde & PG_PS) 1118 return (pde); 1119 if (*pde != 0) { 1120 /* are we current address space or kernel? */ 1121 if (pmap_is_current(pmap)) 1122 return (vtopte(va)); 1123 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1124 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1125 newpf = *pde & PG_FRAME; 1126 if ((*PMAP1 & PG_FRAME) != newpf) { 1127 PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1128 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1129 pmap, va, (u_long)*PMAP1); 1130 1131#ifdef SMP 1132 PMAP1cpu = PCPU_GET(cpuid); 1133#endif 1134 PMAP1changed++; 1135 } else 1136#ifdef SMP 1137 if (PMAP1cpu != PCPU_GET(cpuid)) { 1138 PMAP1cpu = PCPU_GET(cpuid); 1139 invlcaddr(PADDR1); 1140 PMAP1changedcpu++; 1141 } else 1142#endif 1143 PMAP1unchanged++; 1144 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1145 } 1146 return (0); 1147} 1148 1149/* 1150 * Routine: pmap_extract 1151 * Function: 1152 * Extract the physical page address associated 1153 * with the given map/virtual_address pair. 1154 */ 1155vm_paddr_t 1156pmap_extract(pmap_t pmap, vm_offset_t va) 1157{ 1158 vm_paddr_t rtval; 1159 pt_entry_t *pte; 1160 pd_entry_t pde; 1161 pt_entry_t pteval; 1162 1163 rtval = 0; 1164 PMAP_LOCK(pmap); 1165 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1166 if (pde != 0) { 1167 if ((pde & PG_PS) != 0) { 1168 rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1169 PMAP_UNLOCK(pmap); 1170 return rtval; 1171 } 1172 pte = pmap_pte(pmap, va); 1173 pteval = *pte ? xpmap_mtop(*pte) : 0; 1174 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1175 pmap_pte_release(pte); 1176 } 1177 PMAP_UNLOCK(pmap); 1178 return (rtval); 1179} 1180 1181/* 1182 * Routine: pmap_extract_ma 1183 * Function: 1184 * Like pmap_extract, but returns machine address 1185 */ 1186vm_paddr_t 1187pmap_extract_ma(pmap_t pmap, vm_offset_t va) 1188{ 1189 vm_paddr_t rtval; 1190 pt_entry_t *pte; 1191 pd_entry_t pde; 1192 1193 rtval = 0; 1194 PMAP_LOCK(pmap); 1195 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1196 if (pde != 0) { 1197 if ((pde & PG_PS) != 0) { 1198 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1199 PMAP_UNLOCK(pmap); 1200 return rtval; 1201 } 1202 pte = pmap_pte(pmap, va); 1203 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1204 pmap_pte_release(pte); 1205 } 1206 PMAP_UNLOCK(pmap); 1207 return (rtval); 1208} 1209 1210/* 1211 * Routine: pmap_extract_and_hold 1212 * Function: 1213 * Atomically extract and hold the physical page 1214 * with the given pmap and virtual address pair 1215 * if that mapping permits the given protection. 1216 */ 1217vm_page_t 1218pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1219{ 1220 pd_entry_t pde; 1221 pt_entry_t pte; 1222 vm_page_t m; 1223 vm_paddr_t pa; 1224 1225 pa = 0; 1226 m = NULL; 1227 PMAP_LOCK(pmap); 1228retry: 1229 pde = PT_GET(pmap_pde(pmap, va)); 1230 if (pde != 0) { 1231 if (pde & PG_PS) { 1232 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1233 if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | 1234 (va & PDRMASK), &pa)) 1235 goto retry; 1236 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1237 (va & PDRMASK)); 1238 vm_page_hold(m); 1239 } 1240 } else { 1241 sched_pin(); 1242 pte = PT_GET(pmap_pte_quick(pmap, va)); 1243 if (*PMAP1) 1244 PT_SET_MA(PADDR1, 0); 1245 if ((pte & PG_V) && 1246 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1247 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) 1248 goto retry; 1249 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1250 vm_page_hold(m); 1251 } 1252 sched_unpin(); 1253 } 1254 } 1255 PA_UNLOCK_COND(pa); 1256 PMAP_UNLOCK(pmap); 1257 return (m); 1258} 1259 1260/*************************************************** 1261 * Low level mapping routines..... 1262 ***************************************************/ 1263 1264/* 1265 * Add a wired page to the kva. 1266 * Note: not SMP coherent. 1267 */ 1268void 1269pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1270{ 1271 PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1272} 1273 1274void 1275pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1276{ 1277 pt_entry_t *pte; 1278 1279 pte = vtopte(va); 1280 pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1281} 1282 1283 1284static __inline void 1285pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1286{ 1287 PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1288} 1289 1290/* 1291 * Remove a page from the kernel pagetables. 1292 * Note: not SMP coherent. 1293 */ 1294PMAP_INLINE void 1295pmap_kremove(vm_offset_t va) 1296{ 1297 pt_entry_t *pte; 1298 1299 pte = vtopte(va); 1300 PT_CLEAR_VA(pte, FALSE); 1301} 1302 1303/* 1304 * Used to map a range of physical addresses into kernel 1305 * virtual address space. 1306 * 1307 * The value passed in '*virt' is a suggested virtual address for 1308 * the mapping. Architectures which can support a direct-mapped 1309 * physical to virtual region can return the appropriate address 1310 * within that region, leaving '*virt' unchanged. Other 1311 * architectures should map the pages starting at '*virt' and 1312 * update '*virt' with the first usable address after the mapped 1313 * region. 1314 */ 1315vm_offset_t 1316pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1317{ 1318 vm_offset_t va, sva; 1319 1320 va = sva = *virt; 1321 CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1322 va, start, end, prot); 1323 while (start < end) { 1324 pmap_kenter(va, start); 1325 va += PAGE_SIZE; 1326 start += PAGE_SIZE; 1327 } 1328 pmap_invalidate_range(kernel_pmap, sva, va); 1329 *virt = va; 1330 return (sva); 1331} 1332 1333 1334/* 1335 * Add a list of wired pages to the kva 1336 * this routine is only used for temporary 1337 * kernel mappings that do not need to have 1338 * page modification or references recorded. 1339 * Note that old mappings are simply written 1340 * over. The page *must* be wired. 1341 * Note: SMP coherent. Uses a ranged shootdown IPI. 1342 */ 1343void 1344pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1345{ 1346 pt_entry_t *endpte, *pte; 1347 vm_paddr_t pa; 1348 vm_offset_t va = sva; 1349 int mclcount = 0; 1350 multicall_entry_t mcl[16]; 1351 multicall_entry_t *mclp = mcl; 1352 int error; 1353 1354 CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1355 pte = vtopte(sva); 1356 endpte = pte + count; 1357 while (pte < endpte) { 1358 pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1359 1360 mclp->op = __HYPERVISOR_update_va_mapping; 1361 mclp->args[0] = va; 1362 mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1363 mclp->args[2] = (uint32_t)(pa >> 32); 1364 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1365 1366 va += PAGE_SIZE; 1367 pte++; 1368 ma++; 1369 mclp++; 1370 mclcount++; 1371 if (mclcount == 16) { 1372 error = HYPERVISOR_multicall(mcl, mclcount); 1373 mclp = mcl; 1374 mclcount = 0; 1375 KASSERT(error == 0, ("bad multicall %d", error)); 1376 } 1377 } 1378 if (mclcount) { 1379 error = HYPERVISOR_multicall(mcl, mclcount); 1380 KASSERT(error == 0, ("bad multicall %d", error)); 1381 } 1382 1383#ifdef INVARIANTS 1384 for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1385 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1386#endif 1387} 1388 1389 1390/* 1391 * This routine tears out page mappings from the 1392 * kernel -- it is meant only for temporary mappings. 1393 * Note: SMP coherent. Uses a ranged shootdown IPI. 1394 */ 1395void 1396pmap_qremove(vm_offset_t sva, int count) 1397{ 1398 vm_offset_t va; 1399 1400 CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1401 va = sva; 1402 vm_page_lock_queues(); 1403 critical_enter(); 1404 while (count-- > 0) { 1405 pmap_kremove(va); 1406 va += PAGE_SIZE; 1407 } 1408 pmap_invalidate_range(kernel_pmap, sva, va); 1409 critical_exit(); 1410 vm_page_unlock_queues(); 1411} 1412 1413/*************************************************** 1414 * Page table page management routines..... 1415 ***************************************************/ 1416static __inline void 1417pmap_free_zero_pages(vm_page_t free) 1418{ 1419 vm_page_t m; 1420 1421 while (free != NULL) { 1422 m = free; 1423 free = m->right; 1424 vm_page_free_zero(m); 1425 } 1426} 1427 1428/* 1429 * This routine unholds page table pages, and if the hold count 1430 * drops to zero, then it decrements the wire count. 1431 */ 1432static __inline int 1433pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1434{ 1435 1436 --m->wire_count; 1437 if (m->wire_count == 0) 1438 return _pmap_unwire_pte_hold(pmap, m, free); 1439 else 1440 return 0; 1441} 1442 1443static int 1444_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1445{ 1446 vm_offset_t pteva; 1447 1448 PT_UPDATES_FLUSH(); 1449 /* 1450 * unmap the page table page 1451 */ 1452 xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1453 /* 1454 * page *might* contain residual mapping :-/ 1455 */ 1456 PD_CLEAR_VA(pmap, m->pindex, TRUE); 1457 pmap_zero_page(m); 1458 --pmap->pm_stats.resident_count; 1459 1460 /* 1461 * This is a release store so that the ordinary store unmapping 1462 * the page table page is globally performed before TLB shoot- 1463 * down is begun. 1464 */ 1465 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1466 1467 /* 1468 * Do an invltlb to make the invalidated mapping 1469 * take effect immediately. 1470 */ 1471 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1472 pmap_invalidate_page(pmap, pteva); 1473 1474 /* 1475 * Put page on a list so that it is released after 1476 * *ALL* TLB shootdown is done 1477 */ 1478 m->right = *free; 1479 *free = m; 1480 1481 return 1; 1482} 1483 1484/* 1485 * After removing a page table entry, this routine is used to 1486 * conditionally free the page, and manage the hold/wire counts. 1487 */ 1488static int 1489pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1490{ 1491 pd_entry_t ptepde; 1492 vm_page_t mpte; 1493 1494 if (va >= VM_MAXUSER_ADDRESS) 1495 return 0; 1496 ptepde = PT_GET(pmap_pde(pmap, va)); 1497 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1498 return pmap_unwire_pte_hold(pmap, mpte, free); 1499} 1500 1501void 1502pmap_pinit0(pmap_t pmap) 1503{ 1504 1505 PMAP_LOCK_INIT(pmap); 1506 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1507#ifdef PAE 1508 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1509#endif 1510 pmap->pm_active = 0; 1511 PCPU_SET(curpmap, pmap); 1512 TAILQ_INIT(&pmap->pm_pvchunk); 1513 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1514 mtx_lock_spin(&allpmaps_lock); 1515 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1516 mtx_unlock_spin(&allpmaps_lock); 1517} 1518 1519/* 1520 * Initialize a preallocated and zeroed pmap structure, 1521 * such as one in a vmspace structure. 1522 */ 1523int 1524pmap_pinit(pmap_t pmap) 1525{ 1526 vm_page_t m, ptdpg[NPGPTD + 1]; 1527 int npgptd = NPGPTD + 1; 1528 static int color; 1529 int i; 1530 1531 PMAP_LOCK_INIT(pmap); 1532 1533 /* 1534 * No need to allocate page table space yet but we do need a valid 1535 * page directory table. 1536 */ 1537 if (pmap->pm_pdir == NULL) { 1538 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1539 NBPTD); 1540 if (pmap->pm_pdir == NULL) { 1541 PMAP_LOCK_DESTROY(pmap); 1542 return (0); 1543 } 1544#if defined(XEN) && defined(PAE) 1545 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1546#endif 1547 1548#if defined(PAE) && !defined(XEN) 1549 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 1550 KASSERT(((vm_offset_t)pmap->pm_pdpt & 1551 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 1552 ("pmap_pinit: pdpt misaligned")); 1553 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 1554 ("pmap_pinit: pdpt above 4g")); 1555#endif 1556 } 1557 1558 /* 1559 * allocate the page directory page(s) 1560 */ 1561 for (i = 0; i < npgptd;) { 1562 m = vm_page_alloc(NULL, color++, 1563 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1564 VM_ALLOC_ZERO); 1565 if (m == NULL) 1566 VM_WAIT; 1567 else { 1568 ptdpg[i++] = m; 1569 } 1570 } 1571 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1572 for (i = 0; i < NPGPTD; i++) { 1573 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1574 pagezero(&pmap->pm_pdir[i*NPTEPG]); 1575 } 1576 1577 mtx_lock_spin(&allpmaps_lock); 1578 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1579 mtx_unlock_spin(&allpmaps_lock); 1580 /* Wire in kernel global address entries. */ 1581 1582 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1583#ifdef PAE 1584#ifdef XEN 1585 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1586 if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1587 bzero(pmap->pm_pdpt, PAGE_SIZE); 1588#endif 1589 for (i = 0; i < NPGPTD; i++) { 1590 vm_paddr_t ma; 1591 1592 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1593 pmap->pm_pdpt[i] = ma | PG_V; 1594 1595 } 1596#endif 1597#ifdef XEN 1598 for (i = 0; i < NPGPTD; i++) { 1599 pt_entry_t *pd; 1600 vm_paddr_t ma; 1601 1602 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1603 pd = pmap->pm_pdir + (i * NPDEPG); 1604 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1605#if 0 1606 xen_pgd_pin(ma); 1607#endif 1608 } 1609 1610#ifdef PAE 1611 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1612#endif 1613 vm_page_lock_queues(); 1614 xen_flush_queue(); 1615 xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD]))); 1616 for (i = 0; i < NPGPTD; i++) { 1617 vm_paddr_t ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1618 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1619 } 1620 xen_flush_queue(); 1621 vm_page_unlock_queues(); 1622#endif 1623 pmap->pm_active = 0; 1624 TAILQ_INIT(&pmap->pm_pvchunk); 1625 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1626 1627 return (1); 1628} 1629 1630/* 1631 * this routine is called if the page table page is not 1632 * mapped correctly. 1633 */ 1634static vm_page_t 1635_pmap_allocpte(pmap_t pmap, unsigned int ptepindex, int flags) 1636{ 1637 vm_paddr_t ptema; 1638 vm_page_t m; 1639 1640 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1641 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1642 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1643 1644 /* 1645 * Allocate a page table page. 1646 */ 1647 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1648 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1649 if (flags & M_WAITOK) { 1650 PMAP_UNLOCK(pmap); 1651 vm_page_unlock_queues(); 1652 VM_WAIT; 1653 vm_page_lock_queues(); 1654 PMAP_LOCK(pmap); 1655 } 1656 1657 /* 1658 * Indicate the need to retry. While waiting, the page table 1659 * page may have been allocated. 1660 */ 1661 return (NULL); 1662 } 1663 if ((m->flags & PG_ZERO) == 0) 1664 pmap_zero_page(m); 1665 1666 /* 1667 * Map the pagetable page into the process address space, if 1668 * it isn't already there. 1669 */ 1670 pmap->pm_stats.resident_count++; 1671 1672 ptema = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 1673 xen_pt_pin(ptema); 1674 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1675 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1676 1677 KASSERT(pmap->pm_pdir[ptepindex], 1678 ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1679 return (m); 1680} 1681 1682static vm_page_t 1683pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1684{ 1685 unsigned ptepindex; 1686 pd_entry_t ptema; 1687 vm_page_t m; 1688 1689 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1690 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1691 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1692 1693 /* 1694 * Calculate pagetable page index 1695 */ 1696 ptepindex = va >> PDRSHIFT; 1697retry: 1698 /* 1699 * Get the page directory entry 1700 */ 1701 ptema = pmap->pm_pdir[ptepindex]; 1702 1703 /* 1704 * This supports switching from a 4MB page to a 1705 * normal 4K page. 1706 */ 1707 if (ptema & PG_PS) { 1708 /* 1709 * XXX 1710 */ 1711 pmap->pm_pdir[ptepindex] = 0; 1712 ptema = 0; 1713 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1714 pmap_invalidate_all(kernel_pmap); 1715 } 1716 1717 /* 1718 * If the page table page is mapped, we just increment the 1719 * hold count, and activate it. 1720 */ 1721 if (ptema & PG_V) { 1722 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1723 m->wire_count++; 1724 } else { 1725 /* 1726 * Here if the pte page isn't mapped, or if it has 1727 * been deallocated. 1728 */ 1729 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1730 pmap, va, flags); 1731 m = _pmap_allocpte(pmap, ptepindex, flags); 1732 if (m == NULL && (flags & M_WAITOK)) 1733 goto retry; 1734 1735 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1736 } 1737 return (m); 1738} 1739 1740 1741/*************************************************** 1742* Pmap allocation/deallocation routines. 1743 ***************************************************/ 1744 1745#ifdef SMP 1746/* 1747 * Deal with a SMP shootdown of other users of the pmap that we are 1748 * trying to dispose of. This can be a bit hairy. 1749 */ 1750static cpumask_t *lazymask; 1751static u_int lazyptd; 1752static volatile u_int lazywait; 1753 1754void pmap_lazyfix_action(void); 1755 1756void 1757pmap_lazyfix_action(void) 1758{ 1759 cpumask_t mymask = PCPU_GET(cpumask); 1760 1761#ifdef COUNT_IPIS 1762 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1763#endif 1764 if (rcr3() == lazyptd) 1765 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1766 atomic_clear_int(lazymask, mymask); 1767 atomic_store_rel_int(&lazywait, 1); 1768} 1769 1770static void 1771pmap_lazyfix_self(cpumask_t mymask) 1772{ 1773 1774 if (rcr3() == lazyptd) 1775 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1776 atomic_clear_int(lazymask, mymask); 1777} 1778 1779 1780static void 1781pmap_lazyfix(pmap_t pmap) 1782{ 1783 cpumask_t mymask, mask; 1784 u_int spins; 1785 1786 while ((mask = pmap->pm_active) != 0) { 1787 spins = 50000000; 1788 mask = mask & -mask; /* Find least significant set bit */ 1789 mtx_lock_spin(&smp_ipi_mtx); 1790#ifdef PAE 1791 lazyptd = vtophys(pmap->pm_pdpt); 1792#else 1793 lazyptd = vtophys(pmap->pm_pdir); 1794#endif 1795 mymask = PCPU_GET(cpumask); 1796 if (mask == mymask) { 1797 lazymask = &pmap->pm_active; 1798 pmap_lazyfix_self(mymask); 1799 } else { 1800 atomic_store_rel_int((u_int *)&lazymask, 1801 (u_int)&pmap->pm_active); 1802 atomic_store_rel_int(&lazywait, 0); 1803 ipi_selected(mask, IPI_LAZYPMAP); 1804 while (lazywait == 0) { 1805 ia32_pause(); 1806 if (--spins == 0) 1807 break; 1808 } 1809 } 1810 mtx_unlock_spin(&smp_ipi_mtx); 1811 if (spins == 0) 1812 printf("pmap_lazyfix: spun for 50000000\n"); 1813 } 1814} 1815 1816#else /* SMP */ 1817 1818/* 1819 * Cleaning up on uniprocessor is easy. For various reasons, we're 1820 * unlikely to have to even execute this code, including the fact 1821 * that the cleanup is deferred until the parent does a wait(2), which 1822 * means that another userland process has run. 1823 */ 1824static void 1825pmap_lazyfix(pmap_t pmap) 1826{ 1827 u_int cr3; 1828 1829 cr3 = vtophys(pmap->pm_pdir); 1830 if (cr3 == rcr3()) { 1831 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1832 pmap->pm_active &= ~(PCPU_GET(cpumask)); 1833 } 1834} 1835#endif /* SMP */ 1836 1837/* 1838 * Release any resources held by the given physical map. 1839 * Called when a pmap initialized by pmap_pinit is being released. 1840 * Should only be called if the map contains no valid mappings. 1841 */ 1842void 1843pmap_release(pmap_t pmap) 1844{ 1845 vm_page_t m, ptdpg[2*NPGPTD+1]; 1846 vm_paddr_t ma; 1847 int i; 1848#ifdef XEN 1849#ifdef PAE 1850 int npgptd = NPGPTD + 1; 1851#else 1852 int npgptd = NPGPTD; 1853#endif 1854#else 1855 int npgptd = NPGPTD; 1856#endif 1857 KASSERT(pmap->pm_stats.resident_count == 0, 1858 ("pmap_release: pmap resident count %ld != 0", 1859 pmap->pm_stats.resident_count)); 1860 PT_UPDATES_FLUSH(); 1861 1862 pmap_lazyfix(pmap); 1863 mtx_lock_spin(&allpmaps_lock); 1864 LIST_REMOVE(pmap, pm_list); 1865 mtx_unlock_spin(&allpmaps_lock); 1866 1867 for (i = 0; i < NPGPTD; i++) 1868 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1869 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1870#if defined(PAE) && defined(XEN) 1871 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1872#endif 1873 1874 for (i = 0; i < npgptd; i++) { 1875 m = ptdpg[i]; 1876 ma = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 1877 /* unpinning L1 and L2 treated the same */ 1878#if 0 1879 xen_pgd_unpin(ma); 1880#else 1881 if (i == NPGPTD) 1882 xen_pgd_unpin(ma); 1883#endif 1884#ifdef PAE 1885 if (i < NPGPTD) 1886 KASSERT(xpmap_ptom(VM_PAGE_TO_PHYS(m)) == (pmap->pm_pdpt[i] & PG_FRAME), 1887 ("pmap_release: got wrong ptd page")); 1888#endif 1889 m->wire_count--; 1890 atomic_subtract_int(&cnt.v_wire_count, 1); 1891 vm_page_free(m); 1892 } 1893#ifdef PAE 1894 pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1895#endif 1896 PMAP_LOCK_DESTROY(pmap); 1897} 1898 1899static int 1900kvm_size(SYSCTL_HANDLER_ARGS) 1901{ 1902 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1903 1904 return sysctl_handle_long(oidp, &ksize, 0, req); 1905} 1906SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1907 0, 0, kvm_size, "IU", "Size of KVM"); 1908 1909static int 1910kvm_free(SYSCTL_HANDLER_ARGS) 1911{ 1912 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1913 1914 return sysctl_handle_long(oidp, &kfree, 0, req); 1915} 1916SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1917 0, 0, kvm_free, "IU", "Amount of KVM free"); 1918 1919/* 1920 * grow the number of kernel page table entries, if needed 1921 */ 1922void 1923pmap_growkernel(vm_offset_t addr) 1924{ 1925 struct pmap *pmap; 1926 vm_paddr_t ptppaddr; 1927 vm_page_t nkpg; 1928 pd_entry_t newpdir; 1929 1930 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1931 if (kernel_vm_end == 0) { 1932 kernel_vm_end = KERNBASE; 1933 nkpt = 0; 1934 while (pdir_pde(PTD, kernel_vm_end)) { 1935 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1936 nkpt++; 1937 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1938 kernel_vm_end = kernel_map->max_offset; 1939 break; 1940 } 1941 } 1942 } 1943 addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1944 if (addr - 1 >= kernel_map->max_offset) 1945 addr = kernel_map->max_offset; 1946 while (kernel_vm_end < addr) { 1947 if (pdir_pde(PTD, kernel_vm_end)) { 1948 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1949 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1950 kernel_vm_end = kernel_map->max_offset; 1951 break; 1952 } 1953 continue; 1954 } 1955 1956 /* 1957 * This index is bogus, but out of the way 1958 */ 1959 nkpg = vm_page_alloc(NULL, nkpt, 1960 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 1961 if (!nkpg) 1962 panic("pmap_growkernel: no memory to grow kernel"); 1963 1964 nkpt++; 1965 1966 pmap_zero_page(nkpg); 1967 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1968 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1969 vm_page_lock_queues(); 1970 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1971 mtx_lock_spin(&allpmaps_lock); 1972 LIST_FOREACH(pmap, &allpmaps, pm_list) 1973 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1974 1975 mtx_unlock_spin(&allpmaps_lock); 1976 vm_page_unlock_queues(); 1977 1978 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1979 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1980 kernel_vm_end = kernel_map->max_offset; 1981 break; 1982 } 1983 } 1984} 1985 1986 1987/*************************************************** 1988 * page management routines. 1989 ***************************************************/ 1990 1991CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1992CTASSERT(_NPCM == 11); 1993 1994static __inline struct pv_chunk * 1995pv_to_chunk(pv_entry_t pv) 1996{ 1997 1998 return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK); 1999} 2000 2001#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2002 2003#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2004#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2005 2006static uint32_t pc_freemask[11] = { 2007 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2008 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2009 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2010 PC_FREE0_9, PC_FREE10 2011}; 2012 2013SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2014 "Current number of pv entries"); 2015 2016#ifdef PV_STATS 2017static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2018 2019SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2020 "Current number of pv entry chunks"); 2021SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2022 "Current number of pv entry chunks allocated"); 2023SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2024 "Current number of pv entry chunks frees"); 2025SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 2026 "Number of times tried to get a chunk page but failed."); 2027 2028static long pv_entry_frees, pv_entry_allocs; 2029static int pv_entry_spare; 2030 2031SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2032 "Current number of pv entry frees"); 2033SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 2034 "Current number of pv entry allocs"); 2035SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2036 "Current number of spare pv entries"); 2037 2038static int pmap_collect_inactive, pmap_collect_active; 2039 2040SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 2041 "Current number times pmap_collect called on inactive queue"); 2042SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 2043 "Current number times pmap_collect called on active queue"); 2044#endif 2045 2046/* 2047 * We are in a serious low memory condition. Resort to 2048 * drastic measures to free some pages so we can allocate 2049 * another pv entry chunk. This is normally called to 2050 * unmap inactive pages, and if necessary, active pages. 2051 */ 2052static void 2053pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 2054{ 2055 pmap_t pmap; 2056 pt_entry_t *pte, tpte; 2057 pv_entry_t next_pv, pv; 2058 vm_offset_t va; 2059 vm_page_t m, free; 2060 2061 sched_pin(); 2062 TAILQ_FOREACH(m, &vpq->pl, pageq) { 2063 if (m->hold_count || m->busy) 2064 continue; 2065 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 2066 va = pv->pv_va; 2067 pmap = PV_PMAP(pv); 2068 /* Avoid deadlock and lock recursion. */ 2069 if (pmap > locked_pmap) 2070 PMAP_LOCK(pmap); 2071 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 2072 continue; 2073 pmap->pm_stats.resident_count--; 2074 pte = pmap_pte_quick(pmap, va); 2075 tpte = pte_load_clear(pte); 2076 KASSERT((tpte & PG_W) == 0, 2077 ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 2078 if (tpte & PG_A) 2079 vm_page_flag_set(m, PG_REFERENCED); 2080 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2081 vm_page_dirty(m); 2082 free = NULL; 2083 pmap_unuse_pt(pmap, va, &free); 2084 pmap_invalidate_page(pmap, va); 2085 pmap_free_zero_pages(free); 2086 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2087 free_pv_entry(pmap, pv); 2088 if (pmap != locked_pmap) 2089 PMAP_UNLOCK(pmap); 2090 } 2091 if (TAILQ_EMPTY(&m->md.pv_list)) 2092 vm_page_flag_clear(m, PG_WRITEABLE); 2093 } 2094 sched_unpin(); 2095} 2096 2097 2098/* 2099 * free the pv_entry back to the free list 2100 */ 2101static void 2102free_pv_entry(pmap_t pmap, pv_entry_t pv) 2103{ 2104 vm_page_t m; 2105 struct pv_chunk *pc; 2106 int idx, field, bit; 2107 2108 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2109 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2110 PV_STAT(pv_entry_frees++); 2111 PV_STAT(pv_entry_spare++); 2112 pv_entry_count--; 2113 pc = pv_to_chunk(pv); 2114 idx = pv - &pc->pc_pventry[0]; 2115 field = idx / 32; 2116 bit = idx % 32; 2117 pc->pc_map[field] |= 1ul << bit; 2118 /* move to head of list */ 2119 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2120 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2121 for (idx = 0; idx < _NPCM; idx++) 2122 if (pc->pc_map[idx] != pc_freemask[idx]) 2123 return; 2124 PV_STAT(pv_entry_spare -= _NPCPV); 2125 PV_STAT(pc_chunk_count--); 2126 PV_STAT(pc_chunk_frees++); 2127 /* entire chunk is free, return it */ 2128 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2129 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2130 pmap_qremove((vm_offset_t)pc, 1); 2131 vm_page_unwire(m, 0); 2132 vm_page_free(m); 2133 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2134} 2135 2136/* 2137 * get a new pv_entry, allocating a block from the system 2138 * when needed. 2139 */ 2140static pv_entry_t 2141get_pv_entry(pmap_t pmap, int try) 2142{ 2143 static const struct timeval printinterval = { 60, 0 }; 2144 static struct timeval lastprint; 2145 static vm_pindex_t colour; 2146 struct vpgqueues *pq; 2147 int bit, field; 2148 pv_entry_t pv; 2149 struct pv_chunk *pc; 2150 vm_page_t m; 2151 2152 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2153 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2154 PV_STAT(pv_entry_allocs++); 2155 pv_entry_count++; 2156 if (pv_entry_count > pv_entry_high_water) 2157 if (ratecheck(&lastprint, &printinterval)) 2158 printf("Approaching the limit on PV entries, consider " 2159 "increasing either the vm.pmap.shpgperproc or the " 2160 "vm.pmap.pv_entry_max tunable.\n"); 2161 pq = NULL; 2162retry: 2163 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2164 if (pc != NULL) { 2165 for (field = 0; field < _NPCM; field++) { 2166 if (pc->pc_map[field]) { 2167 bit = bsfl(pc->pc_map[field]); 2168 break; 2169 } 2170 } 2171 if (field < _NPCM) { 2172 pv = &pc->pc_pventry[field * 32 + bit]; 2173 pc->pc_map[field] &= ~(1ul << bit); 2174 /* If this was the last item, move it to tail */ 2175 for (field = 0; field < _NPCM; field++) 2176 if (pc->pc_map[field] != 0) { 2177 PV_STAT(pv_entry_spare--); 2178 return (pv); /* not full, return */ 2179 } 2180 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2181 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2182 PV_STAT(pv_entry_spare--); 2183 return (pv); 2184 } 2185 } 2186 /* 2187 * Access to the ptelist "pv_vafree" is synchronized by the page 2188 * queues lock. If "pv_vafree" is currently non-empty, it will 2189 * remain non-empty until pmap_ptelist_alloc() completes. 2190 */ 2191 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq == 2192 &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | 2193 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2194 if (try) { 2195 pv_entry_count--; 2196 PV_STAT(pc_chunk_tryfail++); 2197 return (NULL); 2198 } 2199 /* 2200 * Reclaim pv entries: At first, destroy mappings to 2201 * inactive pages. After that, if a pv chunk entry 2202 * is still needed, destroy mappings to active pages. 2203 */ 2204 if (pq == NULL) { 2205 PV_STAT(pmap_collect_inactive++); 2206 pq = &vm_page_queues[PQ_INACTIVE]; 2207 } else if (pq == &vm_page_queues[PQ_INACTIVE]) { 2208 PV_STAT(pmap_collect_active++); 2209 pq = &vm_page_queues[PQ_ACTIVE]; 2210 } else 2211 panic("get_pv_entry: increase vm.pmap.shpgperproc"); 2212 pmap_collect(pmap, pq); 2213 goto retry; 2214 } 2215 PV_STAT(pc_chunk_count++); 2216 PV_STAT(pc_chunk_allocs++); 2217 colour++; 2218 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2219 pmap_qenter((vm_offset_t)pc, &m, 1); 2220 if ((m->flags & PG_ZERO) == 0) 2221 pagezero(pc); 2222 pc->pc_pmap = pmap; 2223 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2224 for (field = 1; field < _NPCM; field++) 2225 pc->pc_map[field] = pc_freemask[field]; 2226 pv = &pc->pc_pventry[0]; 2227 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2228 PV_STAT(pv_entry_spare += _NPCPV - 1); 2229 return (pv); 2230} 2231 2232static __inline pv_entry_t 2233pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2234{ 2235 pv_entry_t pv; 2236 2237 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2238 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2239 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2240 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2241 break; 2242 } 2243 } 2244 return (pv); 2245} 2246 2247static void 2248pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2249{ 2250 pv_entry_t pv; 2251 2252 pv = pmap_pvh_remove(pvh, pmap, va); 2253 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2254 free_pv_entry(pmap, pv); 2255} 2256 2257static void 2258pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2259{ 2260 2261 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2262 pmap_pvh_free(&m->md, pmap, va); 2263 if (TAILQ_EMPTY(&m->md.pv_list)) 2264 vm_page_flag_clear(m, PG_WRITEABLE); 2265} 2266 2267/* 2268 * Conditionally create a pv entry. 2269 */ 2270static boolean_t 2271pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2272{ 2273 pv_entry_t pv; 2274 2275 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2276 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2277 if (pv_entry_count < pv_entry_high_water && 2278 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2279 pv->pv_va = va; 2280 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2281 return (TRUE); 2282 } else 2283 return (FALSE); 2284} 2285 2286/* 2287 * pmap_remove_pte: do the things to unmap a page in a process 2288 */ 2289static int 2290pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2291{ 2292 pt_entry_t oldpte; 2293 vm_page_t m; 2294 2295 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2296 pmap, (u_long)*ptq, va); 2297 2298 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2299 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2300 oldpte = *ptq; 2301 PT_SET_VA_MA(ptq, 0, TRUE); 2302 if (oldpte & PG_W) 2303 pmap->pm_stats.wired_count -= 1; 2304 /* 2305 * Machines that don't support invlpg, also don't support 2306 * PG_G. 2307 */ 2308 if (oldpte & PG_G) 2309 pmap_invalidate_page(kernel_pmap, va); 2310 pmap->pm_stats.resident_count -= 1; 2311 /* 2312 * XXX This is not strictly correctly, but somewhere along the line 2313 * we are losing the managed bit on some pages. It is unclear to me 2314 * why, but I think the most likely explanation is that xen's writable 2315 * page table implementation doesn't respect the unused bits. 2316 */ 2317 if ((oldpte & PG_MANAGED) || ((oldpte & PG_V) && (va < VM_MAXUSER_ADDRESS)) 2318 ) { 2319 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2320 2321 if (!(oldpte & PG_MANAGED)) 2322 printf("va=0x%x is unmanaged :-( pte=0x%llx\n", va, oldpte); 2323 2324 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2325 vm_page_dirty(m); 2326 if (oldpte & PG_A) 2327 vm_page_flag_set(m, PG_REFERENCED); 2328 pmap_remove_entry(pmap, m, va); 2329 } else if ((va < VM_MAXUSER_ADDRESS) && (oldpte & PG_V)) 2330 printf("va=0x%x is unmanaged :-( pte=0x%llx\n", va, oldpte); 2331 2332 return (pmap_unuse_pt(pmap, va, free)); 2333} 2334 2335/* 2336 * Remove a single page from a process address space 2337 */ 2338static void 2339pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2340{ 2341 pt_entry_t *pte; 2342 2343 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2344 pmap, va); 2345 2346 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2347 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2348 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2349 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2350 return; 2351 pmap_remove_pte(pmap, pte, va, free); 2352 pmap_invalidate_page(pmap, va); 2353 if (*PMAP1) 2354 PT_SET_MA(PADDR1, 0); 2355 2356} 2357 2358/* 2359 * Remove the given range of addresses from the specified map. 2360 * 2361 * It is assumed that the start and end are properly 2362 * rounded to the page size. 2363 */ 2364void 2365pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2366{ 2367 vm_offset_t pdnxt; 2368 pd_entry_t ptpaddr; 2369 pt_entry_t *pte; 2370 vm_page_t free = NULL; 2371 int anyvalid; 2372 2373 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2374 pmap, sva, eva); 2375 2376 /* 2377 * Perform an unsynchronized read. This is, however, safe. 2378 */ 2379 if (pmap->pm_stats.resident_count == 0) 2380 return; 2381 2382 anyvalid = 0; 2383 2384 vm_page_lock_queues(); 2385 sched_pin(); 2386 PMAP_LOCK(pmap); 2387 2388 /* 2389 * special handling of removing one page. a very 2390 * common operation and easy to short circuit some 2391 * code. 2392 */ 2393 if ((sva + PAGE_SIZE == eva) && 2394 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2395 pmap_remove_page(pmap, sva, &free); 2396 goto out; 2397 } 2398 2399 for (; sva < eva; sva = pdnxt) { 2400 unsigned pdirindex; 2401 2402 /* 2403 * Calculate index for next page table. 2404 */ 2405 pdnxt = (sva + NBPDR) & ~PDRMASK; 2406 if (pmap->pm_stats.resident_count == 0) 2407 break; 2408 2409 pdirindex = sva >> PDRSHIFT; 2410 ptpaddr = pmap->pm_pdir[pdirindex]; 2411 2412 /* 2413 * Weed out invalid mappings. Note: we assume that the page 2414 * directory table is always allocated, and in kernel virtual. 2415 */ 2416 if (ptpaddr == 0) 2417 continue; 2418 2419 /* 2420 * Check for large page. 2421 */ 2422 if ((ptpaddr & PG_PS) != 0) { 2423 PD_CLEAR_VA(pmap, pdirindex, TRUE); 2424 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2425 anyvalid = 1; 2426 continue; 2427 } 2428 2429 /* 2430 * Limit our scan to either the end of the va represented 2431 * by the current page table page, or to the end of the 2432 * range being removed. 2433 */ 2434 if (pdnxt > eva) 2435 pdnxt = eva; 2436 2437 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2438 sva += PAGE_SIZE) { 2439 if ((*pte & PG_V) == 0) 2440 continue; 2441 2442 /* 2443 * The TLB entry for a PG_G mapping is invalidated 2444 * by pmap_remove_pte(). 2445 */ 2446 if ((*pte & PG_G) == 0) 2447 anyvalid = 1; 2448 if (pmap_remove_pte(pmap, pte, sva, &free)) 2449 break; 2450 } 2451 } 2452 PT_UPDATES_FLUSH(); 2453 if (*PMAP1) 2454 PT_SET_VA_MA(PMAP1, 0, TRUE); 2455out: 2456 if (anyvalid) 2457 pmap_invalidate_all(pmap); 2458 sched_unpin(); 2459 vm_page_unlock_queues(); 2460 PMAP_UNLOCK(pmap); 2461 pmap_free_zero_pages(free); 2462} 2463 2464/* 2465 * Routine: pmap_remove_all 2466 * Function: 2467 * Removes this physical page from 2468 * all physical maps in which it resides. 2469 * Reflects back modify bits to the pager. 2470 * 2471 * Notes: 2472 * Original versions of this routine were very 2473 * inefficient because they iteratively called 2474 * pmap_remove (slow...) 2475 */ 2476 2477void 2478pmap_remove_all(vm_page_t m) 2479{ 2480 pv_entry_t pv; 2481 pmap_t pmap; 2482 pt_entry_t *pte, tpte; 2483 vm_page_t free; 2484 2485 KASSERT((m->flags & PG_FICTITIOUS) == 0, 2486 ("pmap_remove_all: page %p is fictitious", m)); 2487 free = NULL; 2488 vm_page_lock_queues(); 2489 sched_pin(); 2490 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2491 pmap = PV_PMAP(pv); 2492 PMAP_LOCK(pmap); 2493 pmap->pm_stats.resident_count--; 2494 pte = pmap_pte_quick(pmap, pv->pv_va); 2495 2496 tpte = *pte; 2497 PT_SET_VA_MA(pte, 0, TRUE); 2498 if (tpte & PG_W) 2499 pmap->pm_stats.wired_count--; 2500 if (tpte & PG_A) 2501 vm_page_flag_set(m, PG_REFERENCED); 2502 2503 /* 2504 * Update the vm_page_t clean and reference bits. 2505 */ 2506 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2507 vm_page_dirty(m); 2508 pmap_unuse_pt(pmap, pv->pv_va, &free); 2509 pmap_invalidate_page(pmap, pv->pv_va); 2510 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2511 free_pv_entry(pmap, pv); 2512 PMAP_UNLOCK(pmap); 2513 } 2514 vm_page_flag_clear(m, PG_WRITEABLE); 2515 PT_UPDATES_FLUSH(); 2516 if (*PMAP1) 2517 PT_SET_MA(PADDR1, 0); 2518 sched_unpin(); 2519 vm_page_unlock_queues(); 2520 pmap_free_zero_pages(free); 2521} 2522 2523/* 2524 * Set the physical protection on the 2525 * specified range of this map as requested. 2526 */ 2527void 2528pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2529{ 2530 vm_offset_t pdnxt; 2531 pd_entry_t ptpaddr; 2532 pt_entry_t *pte; 2533 int anychanged; 2534 2535 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2536 pmap, sva, eva, prot); 2537 2538 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2539 pmap_remove(pmap, sva, eva); 2540 return; 2541 } 2542 2543#ifdef PAE 2544 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2545 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2546 return; 2547#else 2548 if (prot & VM_PROT_WRITE) 2549 return; 2550#endif 2551 2552 anychanged = 0; 2553 2554 vm_page_lock_queues(); 2555 sched_pin(); 2556 PMAP_LOCK(pmap); 2557 for (; sva < eva; sva = pdnxt) { 2558 pt_entry_t obits, pbits; 2559 unsigned pdirindex; 2560 2561 pdnxt = (sva + NBPDR) & ~PDRMASK; 2562 2563 pdirindex = sva >> PDRSHIFT; 2564 ptpaddr = pmap->pm_pdir[pdirindex]; 2565 2566 /* 2567 * Weed out invalid mappings. Note: we assume that the page 2568 * directory table is always allocated, and in kernel virtual. 2569 */ 2570 if (ptpaddr == 0) 2571 continue; 2572 2573 /* 2574 * Check for large page. 2575 */ 2576 if ((ptpaddr & PG_PS) != 0) { 2577 if ((prot & VM_PROT_WRITE) == 0) 2578 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2579#ifdef PAE 2580 if ((prot & VM_PROT_EXECUTE) == 0) 2581 pmap->pm_pdir[pdirindex] |= pg_nx; 2582#endif 2583 anychanged = 1; 2584 continue; 2585 } 2586 2587 if (pdnxt > eva) 2588 pdnxt = eva; 2589 2590 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2591 sva += PAGE_SIZE) { 2592 vm_page_t m; 2593 2594retry: 2595 /* 2596 * Regardless of whether a pte is 32 or 64 bits in 2597 * size, PG_RW, PG_A, and PG_M are among the least 2598 * significant 32 bits. 2599 */ 2600 obits = pbits = *pte; 2601 if ((pbits & PG_V) == 0) 2602 continue; 2603 2604 if ((prot & VM_PROT_WRITE) == 0) { 2605 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2606 (PG_MANAGED | PG_M | PG_RW)) { 2607 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2608 PG_FRAME); 2609 vm_page_dirty(m); 2610 } 2611 pbits &= ~(PG_RW | PG_M); 2612 } 2613#ifdef PAE 2614 if ((prot & VM_PROT_EXECUTE) == 0) 2615 pbits |= pg_nx; 2616#endif 2617 2618 if (pbits != obits) { 2619#ifdef XEN 2620 obits = *pte; 2621 PT_SET_VA_MA(pte, pbits, TRUE); 2622 if (*pte != pbits) 2623 goto retry; 2624#else 2625#ifdef PAE 2626 if (!atomic_cmpset_64(pte, obits, pbits)) 2627 goto retry; 2628#else 2629 if (!atomic_cmpset_int((u_int *)pte, obits, 2630 pbits)) 2631 goto retry; 2632#endif 2633#endif 2634 if (obits & PG_G) 2635 pmap_invalidate_page(pmap, sva); 2636 else 2637 anychanged = 1; 2638 } 2639 } 2640 } 2641 PT_UPDATES_FLUSH(); 2642 if (*PMAP1) 2643 PT_SET_VA_MA(PMAP1, 0, TRUE); 2644 if (anychanged) 2645 pmap_invalidate_all(pmap); 2646 sched_unpin(); 2647 vm_page_unlock_queues(); 2648 PMAP_UNLOCK(pmap); 2649} 2650 2651/* 2652 * Insert the given physical page (p) at 2653 * the specified virtual address (v) in the 2654 * target physical map with the protection requested. 2655 * 2656 * If specified, the page will be wired down, meaning 2657 * that the related pte can not be reclaimed. 2658 * 2659 * NB: This is the only routine which MAY NOT lazy-evaluate 2660 * or lose information. That is, this routine must actually 2661 * insert this page into the given map NOW. 2662 */ 2663void 2664pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2665 vm_prot_t prot, boolean_t wired) 2666{ 2667 pd_entry_t *pde; 2668 pt_entry_t *pte; 2669 pt_entry_t newpte, origpte; 2670 pv_entry_t pv; 2671 vm_paddr_t opa, pa; 2672 vm_page_t mpte, om; 2673 boolean_t invlva; 2674 2675 CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2676 pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired); 2677 va = trunc_page(va); 2678 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2679 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2680 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2681 va)); 2682 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 2683 (m->oflags & VPO_BUSY) != 0, 2684 ("pmap_enter: page %p is not busy", m)); 2685 2686 mpte = NULL; 2687 2688 vm_page_lock_queues(); 2689 PMAP_LOCK(pmap); 2690 sched_pin(); 2691 2692 /* 2693 * In the case that a page table page is not 2694 * resident, we are creating it here. 2695 */ 2696 if (va < VM_MAXUSER_ADDRESS) { 2697 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2698 } 2699 2700 pde = pmap_pde(pmap, va); 2701 if ((*pde & PG_PS) != 0) 2702 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2703 pte = pmap_pte_quick(pmap, va); 2704 2705 /* 2706 * Page Directory table entry not valid, we need a new PT page 2707 */ 2708 if (pte == NULL) { 2709 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2710 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2711 } 2712 2713 pa = VM_PAGE_TO_PHYS(m); 2714 om = NULL; 2715 opa = origpte = 0; 2716 2717#if 0 2718 KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2719 pte, *pte)); 2720#endif 2721 origpte = *pte; 2722 if (origpte) 2723 origpte = xpmap_mtop(origpte); 2724 opa = origpte & PG_FRAME; 2725 2726 /* 2727 * Mapping has not changed, must be protection or wiring change. 2728 */ 2729 if (origpte && (opa == pa)) { 2730 /* 2731 * Wiring change, just update stats. We don't worry about 2732 * wiring PT pages as they remain resident as long as there 2733 * are valid mappings in them. Hence, if a user page is wired, 2734 * the PT page will be also. 2735 */ 2736 if (wired && ((origpte & PG_W) == 0)) 2737 pmap->pm_stats.wired_count++; 2738 else if (!wired && (origpte & PG_W)) 2739 pmap->pm_stats.wired_count--; 2740 2741 /* 2742 * Remove extra pte reference 2743 */ 2744 if (mpte) 2745 mpte->wire_count--; 2746 2747 if (origpte & PG_MANAGED) { 2748 om = m; 2749 pa |= PG_MANAGED; 2750 } 2751 goto validate; 2752 } 2753 2754 pv = NULL; 2755 2756 /* 2757 * Mapping has changed, invalidate old range and fall through to 2758 * handle validating new mapping. 2759 */ 2760 if (opa) { 2761 if (origpte & PG_W) 2762 pmap->pm_stats.wired_count--; 2763 if (origpte & PG_MANAGED) { 2764 om = PHYS_TO_VM_PAGE(opa); 2765 pv = pmap_pvh_remove(&om->md, pmap, va); 2766 } else if (va < VM_MAXUSER_ADDRESS) 2767 printf("va=0x%x is unmanaged :-( \n", va); 2768 2769 if (mpte != NULL) { 2770 mpte->wire_count--; 2771 KASSERT(mpte->wire_count > 0, 2772 ("pmap_enter: missing reference to page table page," 2773 " va: 0x%x", va)); 2774 } 2775 } else 2776 pmap->pm_stats.resident_count++; 2777 2778 /* 2779 * Enter on the PV list if part of our managed memory. 2780 */ 2781 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 2782 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2783 ("pmap_enter: managed mapping within the clean submap")); 2784 if (pv == NULL) 2785 pv = get_pv_entry(pmap, FALSE); 2786 pv->pv_va = va; 2787 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2788 pa |= PG_MANAGED; 2789 } else if (pv != NULL) 2790 free_pv_entry(pmap, pv); 2791 2792 /* 2793 * Increment counters 2794 */ 2795 if (wired) 2796 pmap->pm_stats.wired_count++; 2797 2798validate: 2799 /* 2800 * Now validate mapping with desired protection/wiring. 2801 */ 2802 newpte = (pt_entry_t)(pa | PG_V); 2803 if ((prot & VM_PROT_WRITE) != 0) { 2804 newpte |= PG_RW; 2805 if ((newpte & PG_MANAGED) != 0) 2806 vm_page_flag_set(m, PG_WRITEABLE); 2807 } 2808#ifdef PAE 2809 if ((prot & VM_PROT_EXECUTE) == 0) 2810 newpte |= pg_nx; 2811#endif 2812 if (wired) 2813 newpte |= PG_W; 2814 if (va < VM_MAXUSER_ADDRESS) 2815 newpte |= PG_U; 2816 if (pmap == kernel_pmap) 2817 newpte |= pgeflag; 2818 2819 critical_enter(); 2820 /* 2821 * if the mapping or permission bits are different, we need 2822 * to update the pte. 2823 */ 2824 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2825 if (origpte) { 2826 invlva = FALSE; 2827 origpte = *pte; 2828 PT_SET_VA(pte, newpte | PG_A, FALSE); 2829 if (origpte & PG_A) { 2830 if (origpte & PG_MANAGED) 2831 vm_page_flag_set(om, PG_REFERENCED); 2832 if (opa != VM_PAGE_TO_PHYS(m)) 2833 invlva = TRUE; 2834#ifdef PAE 2835 if ((origpte & PG_NX) == 0 && 2836 (newpte & PG_NX) != 0) 2837 invlva = TRUE; 2838#endif 2839 } 2840 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2841 if ((origpte & PG_MANAGED) != 0) 2842 vm_page_dirty(om); 2843 if ((prot & VM_PROT_WRITE) == 0) 2844 invlva = TRUE; 2845 } 2846 if ((origpte & PG_MANAGED) != 0 && 2847 TAILQ_EMPTY(&om->md.pv_list)) 2848 vm_page_flag_clear(om, PG_WRITEABLE); 2849 if (invlva) 2850 pmap_invalidate_page(pmap, va); 2851 } else{ 2852 PT_SET_VA(pte, newpte | PG_A, FALSE); 2853 } 2854 2855 } 2856 PT_UPDATES_FLUSH(); 2857 critical_exit(); 2858 if (*PMAP1) 2859 PT_SET_VA_MA(PMAP1, 0, TRUE); 2860 sched_unpin(); 2861 vm_page_unlock_queues(); 2862 PMAP_UNLOCK(pmap); 2863} 2864 2865/* 2866 * Maps a sequence of resident pages belonging to the same object. 2867 * The sequence begins with the given page m_start. This page is 2868 * mapped at the given virtual address start. Each subsequent page is 2869 * mapped at a virtual address that is offset from start by the same 2870 * amount as the page is offset from m_start within the object. The 2871 * last page in the sequence is the page with the largest offset from 2872 * m_start that can be mapped at a virtual address less than the given 2873 * virtual address end. Not every virtual page between start and end 2874 * is mapped; only those for which a resident page exists with the 2875 * corresponding offset from m_start are mapped. 2876 */ 2877void 2878pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2879 vm_page_t m_start, vm_prot_t prot) 2880{ 2881 vm_page_t m, mpte; 2882 vm_pindex_t diff, psize; 2883 multicall_entry_t mcl[16]; 2884 multicall_entry_t *mclp = mcl; 2885 int error, count = 0; 2886 2887 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2888 psize = atop(end - start); 2889 2890 mpte = NULL; 2891 m = m_start; 2892 vm_page_lock_queues(); 2893 PMAP_LOCK(pmap); 2894 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2895 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2896 prot, mpte); 2897 m = TAILQ_NEXT(m, listq); 2898 if (count == 16) { 2899 error = HYPERVISOR_multicall(mcl, count); 2900 KASSERT(error == 0, ("bad multicall %d", error)); 2901 mclp = mcl; 2902 count = 0; 2903 } 2904 } 2905 if (count) { 2906 error = HYPERVISOR_multicall(mcl, count); 2907 KASSERT(error == 0, ("bad multicall %d", error)); 2908 } 2909 vm_page_unlock_queues(); 2910 PMAP_UNLOCK(pmap); 2911} 2912 2913/* 2914 * this code makes some *MAJOR* assumptions: 2915 * 1. Current pmap & pmap exists. 2916 * 2. Not wired. 2917 * 3. Read access. 2918 * 4. No page table pages. 2919 * but is *MUCH* faster than pmap_enter... 2920 */ 2921 2922void 2923pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2924{ 2925 multicall_entry_t mcl, *mclp; 2926 int count = 0; 2927 mclp = &mcl; 2928 2929 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2930 pmap, va, m, prot); 2931 2932 vm_page_lock_queues(); 2933 PMAP_LOCK(pmap); 2934 (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2935 if (count) 2936 HYPERVISOR_multicall(&mcl, count); 2937 vm_page_unlock_queues(); 2938 PMAP_UNLOCK(pmap); 2939} 2940 2941#ifdef notyet 2942void 2943pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2944{ 2945 int i, error, index = 0; 2946 multicall_entry_t mcl[16]; 2947 multicall_entry_t *mclp = mcl; 2948 2949 PMAP_LOCK(pmap); 2950 for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2951 if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2952 continue; 2953 2954 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2955 if (index == 16) { 2956 error = HYPERVISOR_multicall(mcl, index); 2957 mclp = mcl; 2958 index = 0; 2959 KASSERT(error == 0, ("bad multicall %d", error)); 2960 } 2961 } 2962 if (index) { 2963 error = HYPERVISOR_multicall(mcl, index); 2964 KASSERT(error == 0, ("bad multicall %d", error)); 2965 } 2966 2967 PMAP_UNLOCK(pmap); 2968} 2969#endif 2970 2971static vm_page_t 2972pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2973 vm_prot_t prot, vm_page_t mpte) 2974{ 2975 pt_entry_t *pte; 2976 vm_paddr_t pa; 2977 vm_page_t free; 2978 multicall_entry_t *mcl = *mclpp; 2979 2980 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2981 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, 2982 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2983 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2984 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2985 2986 /* 2987 * In the case that a page table page is not 2988 * resident, we are creating it here. 2989 */ 2990 if (va < VM_MAXUSER_ADDRESS) { 2991 unsigned ptepindex; 2992 pd_entry_t ptema; 2993 2994 /* 2995 * Calculate pagetable page index 2996 */ 2997 ptepindex = va >> PDRSHIFT; 2998 if (mpte && (mpte->pindex == ptepindex)) { 2999 mpte->wire_count++; 3000 } else { 3001 /* 3002 * Get the page directory entry 3003 */ 3004 ptema = pmap->pm_pdir[ptepindex]; 3005 3006 /* 3007 * If the page table page is mapped, we just increment 3008 * the hold count, and activate it. 3009 */ 3010 if (ptema & PG_V) { 3011 if (ptema & PG_PS) 3012 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 3013 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 3014 mpte->wire_count++; 3015 } else { 3016 mpte = _pmap_allocpte(pmap, ptepindex, 3017 M_NOWAIT); 3018 if (mpte == NULL) 3019 return (mpte); 3020 } 3021 } 3022 } else { 3023 mpte = NULL; 3024 } 3025 3026 /* 3027 * This call to vtopte makes the assumption that we are 3028 * entering the page into the current pmap. In order to support 3029 * quick entry into any pmap, one would likely use pmap_pte_quick. 3030 * But that isn't as quick as vtopte. 3031 */ 3032 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 3033 pte = vtopte(va); 3034 if (*pte & PG_V) { 3035 if (mpte != NULL) { 3036 mpte->wire_count--; 3037 mpte = NULL; 3038 } 3039 return (mpte); 3040 } 3041 3042 /* 3043 * Enter on the PV list if part of our managed memory. 3044 */ 3045 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && 3046 !pmap_try_insert_pv_entry(pmap, va, m)) { 3047 if (mpte != NULL) { 3048 free = NULL; 3049 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3050 pmap_invalidate_page(pmap, va); 3051 pmap_free_zero_pages(free); 3052 } 3053 3054 mpte = NULL; 3055 } 3056 return (mpte); 3057 } 3058 3059 /* 3060 * Increment counters 3061 */ 3062 pmap->pm_stats.resident_count++; 3063 3064 pa = VM_PAGE_TO_PHYS(m); 3065#ifdef PAE 3066 if ((prot & VM_PROT_EXECUTE) == 0) 3067 pa |= pg_nx; 3068#endif 3069 3070#if 0 3071 /* 3072 * Now validate mapping with RO protection 3073 */ 3074 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3075 pte_store(pte, pa | PG_V | PG_U); 3076 else 3077 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3078#else 3079 /* 3080 * Now validate mapping with RO protection 3081 */ 3082 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3083 pa = xpmap_ptom(pa | PG_V | PG_U); 3084 else 3085 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3086 3087 mcl->op = __HYPERVISOR_update_va_mapping; 3088 mcl->args[0] = va; 3089 mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3090 mcl->args[2] = (uint32_t)(pa >> 32); 3091 mcl->args[3] = 0; 3092 *mclpp = mcl + 1; 3093 *count = *count + 1; 3094#endif 3095 return mpte; 3096} 3097 3098/* 3099 * Make a temporary mapping for a physical address. This is only intended 3100 * to be used for panic dumps. 3101 */ 3102void * 3103pmap_kenter_temporary(vm_paddr_t pa, int i) 3104{ 3105 vm_offset_t va; 3106 vm_paddr_t ma = xpmap_ptom(pa); 3107 3108 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3109 PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3110 invlpg(va); 3111 return ((void *)crashdumpmap); 3112} 3113 3114/* 3115 * This code maps large physical mmap regions into the 3116 * processor address space. Note that some shortcuts 3117 * are taken, but the code works. 3118 */ 3119void 3120pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 3121 vm_object_t object, vm_pindex_t pindex, 3122 vm_size_t size) 3123{ 3124 pd_entry_t *pde; 3125 vm_paddr_t pa, ptepa; 3126 vm_page_t p; 3127 int pat_mode; 3128 3129 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3130 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3131 ("pmap_object_init_pt: non-device object")); 3132 if (pseflag && 3133 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3134 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3135 return; 3136 p = vm_page_lookup(object, pindex); 3137 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3138 ("pmap_object_init_pt: invalid page %p", p)); 3139 pat_mode = p->md.pat_mode; 3140 /* 3141 * Abort the mapping if the first page is not physically 3142 * aligned to a 2/4MB page boundary. 3143 */ 3144 ptepa = VM_PAGE_TO_PHYS(p); 3145 if (ptepa & (NBPDR - 1)) 3146 return; 3147 /* 3148 * Skip the first page. Abort the mapping if the rest of 3149 * the pages are not physically contiguous or have differing 3150 * memory attributes. 3151 */ 3152 p = TAILQ_NEXT(p, listq); 3153 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3154 pa += PAGE_SIZE) { 3155 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3156 ("pmap_object_init_pt: invalid page %p", p)); 3157 if (pa != VM_PAGE_TO_PHYS(p) || 3158 pat_mode != p->md.pat_mode) 3159 return; 3160 p = TAILQ_NEXT(p, listq); 3161 } 3162 /* Map using 2/4MB pages. */ 3163 PMAP_LOCK(pmap); 3164 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3165 size; pa += NBPDR) { 3166 pde = pmap_pde(pmap, addr); 3167 if (*pde == 0) { 3168 pde_store(pde, pa | PG_PS | PG_M | PG_A | 3169 PG_U | PG_RW | PG_V); 3170 pmap->pm_stats.resident_count += NBPDR / 3171 PAGE_SIZE; 3172 pmap_pde_mappings++; 3173 } 3174 /* Else continue on if the PDE is already valid. */ 3175 addr += NBPDR; 3176 } 3177 PMAP_UNLOCK(pmap); 3178 } 3179} 3180 3181/* 3182 * Routine: pmap_change_wiring 3183 * Function: Change the wiring attribute for a map/virtual-address 3184 * pair. 3185 * In/out conditions: 3186 * The mapping must already exist in the pmap. 3187 */ 3188void 3189pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3190{ 3191 pt_entry_t *pte; 3192 3193 vm_page_lock_queues(); 3194 PMAP_LOCK(pmap); 3195 pte = pmap_pte(pmap, va); 3196 3197 if (wired && !pmap_pte_w(pte)) { 3198 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3199 pmap->pm_stats.wired_count++; 3200 } else if (!wired && pmap_pte_w(pte)) { 3201 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3202 pmap->pm_stats.wired_count--; 3203 } 3204 3205 /* 3206 * Wiring is not a hardware characteristic so there is no need to 3207 * invalidate TLB. 3208 */ 3209 pmap_pte_release(pte); 3210 PMAP_UNLOCK(pmap); 3211 vm_page_unlock_queues(); 3212} 3213 3214 3215 3216/* 3217 * Copy the range specified by src_addr/len 3218 * from the source map to the range dst_addr/len 3219 * in the destination map. 3220 * 3221 * This routine is only advisory and need not do anything. 3222 */ 3223 3224void 3225pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3226 vm_offset_t src_addr) 3227{ 3228 vm_page_t free; 3229 vm_offset_t addr; 3230 vm_offset_t end_addr = src_addr + len; 3231 vm_offset_t pdnxt; 3232 3233 if (dst_addr != src_addr) 3234 return; 3235 3236 if (!pmap_is_current(src_pmap)) { 3237 CTR2(KTR_PMAP, 3238 "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3239 (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3240 3241 return; 3242 } 3243 CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3244 dst_pmap, src_pmap, dst_addr, len, src_addr); 3245 3246 vm_page_lock_queues(); 3247 if (dst_pmap < src_pmap) { 3248 PMAP_LOCK(dst_pmap); 3249 PMAP_LOCK(src_pmap); 3250 } else { 3251 PMAP_LOCK(src_pmap); 3252 PMAP_LOCK(dst_pmap); 3253 } 3254 sched_pin(); 3255 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3256 pt_entry_t *src_pte, *dst_pte; 3257 vm_page_t dstmpte, srcmpte; 3258 pd_entry_t srcptepaddr; 3259 unsigned ptepindex; 3260 3261 KASSERT(addr < UPT_MIN_ADDRESS, 3262 ("pmap_copy: invalid to pmap_copy page tables")); 3263 3264 pdnxt = (addr + NBPDR) & ~PDRMASK; 3265 ptepindex = addr >> PDRSHIFT; 3266 3267 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3268 if (srcptepaddr == 0) 3269 continue; 3270 3271 if (srcptepaddr & PG_PS) { 3272 if (dst_pmap->pm_pdir[ptepindex] == 0) { 3273 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3274 dst_pmap->pm_stats.resident_count += 3275 NBPDR / PAGE_SIZE; 3276 } 3277 continue; 3278 } 3279 3280 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3281 KASSERT(srcmpte->wire_count > 0, 3282 ("pmap_copy: source page table page is unused")); 3283 3284 if (pdnxt > end_addr) 3285 pdnxt = end_addr; 3286 3287 src_pte = vtopte(addr); 3288 while (addr < pdnxt) { 3289 pt_entry_t ptetemp; 3290 ptetemp = *src_pte; 3291 /* 3292 * we only virtual copy managed pages 3293 */ 3294 if ((ptetemp & PG_MANAGED) != 0) { 3295 dstmpte = pmap_allocpte(dst_pmap, addr, 3296 M_NOWAIT); 3297 if (dstmpte == NULL) 3298 break; 3299 dst_pte = pmap_pte_quick(dst_pmap, addr); 3300 if (*dst_pte == 0 && 3301 pmap_try_insert_pv_entry(dst_pmap, addr, 3302 PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3303 /* 3304 * Clear the wired, modified, and 3305 * accessed (referenced) bits 3306 * during the copy. 3307 */ 3308 KASSERT(ptetemp != 0, ("src_pte not set")); 3309 PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3310 KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3311 ("no pmap copy expected: 0x%jx saw: 0x%jx", 3312 ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3313 dst_pmap->pm_stats.resident_count++; 3314 } else { 3315 free = NULL; 3316 if (pmap_unwire_pte_hold(dst_pmap, 3317 dstmpte, &free)) { 3318 pmap_invalidate_page(dst_pmap, 3319 addr); 3320 pmap_free_zero_pages(free); 3321 } 3322 } 3323 if (dstmpte->wire_count >= srcmpte->wire_count) 3324 break; 3325 } 3326 addr += PAGE_SIZE; 3327 src_pte++; 3328 } 3329 } 3330 PT_UPDATES_FLUSH(); 3331 sched_unpin(); 3332 vm_page_unlock_queues(); 3333 PMAP_UNLOCK(src_pmap); 3334 PMAP_UNLOCK(dst_pmap); 3335} 3336 3337static __inline void 3338pagezero(void *page) 3339{ 3340#if defined(I686_CPU) 3341 if (cpu_class == CPUCLASS_686) { 3342#if defined(CPU_ENABLE_SSE) 3343 if (cpu_feature & CPUID_SSE2) 3344 sse2_pagezero(page); 3345 else 3346#endif 3347 i686_pagezero(page); 3348 } else 3349#endif 3350 bzero(page, PAGE_SIZE); 3351} 3352 3353/* 3354 * pmap_zero_page zeros the specified hardware page by mapping 3355 * the page into KVM and using bzero to clear its contents. 3356 */ 3357void 3358pmap_zero_page(vm_page_t m) 3359{ 3360 struct sysmaps *sysmaps; 3361 3362 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3363 mtx_lock(&sysmaps->lock); 3364 if (*sysmaps->CMAP2) 3365 panic("pmap_zero_page: CMAP2 busy"); 3366 sched_pin(); 3367 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3368 pagezero(sysmaps->CADDR2); 3369 PT_SET_MA(sysmaps->CADDR2, 0); 3370 sched_unpin(); 3371 mtx_unlock(&sysmaps->lock); 3372} 3373 3374/* 3375 * pmap_zero_page_area zeros the specified hardware page by mapping 3376 * the page into KVM and using bzero to clear its contents. 3377 * 3378 * off and size may not cover an area beyond a single hardware page. 3379 */ 3380void 3381pmap_zero_page_area(vm_page_t m, int off, int size) 3382{ 3383 struct sysmaps *sysmaps; 3384 3385 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3386 mtx_lock(&sysmaps->lock); 3387 if (*sysmaps->CMAP2) 3388 panic("pmap_zero_page: CMAP2 busy"); 3389 sched_pin(); 3390 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3391 3392 if (off == 0 && size == PAGE_SIZE) 3393 pagezero(sysmaps->CADDR2); 3394 else 3395 bzero((char *)sysmaps->CADDR2 + off, size); 3396 PT_SET_MA(sysmaps->CADDR2, 0); 3397 sched_unpin(); 3398 mtx_unlock(&sysmaps->lock); 3399} 3400 3401/* 3402 * pmap_zero_page_idle zeros the specified hardware page by mapping 3403 * the page into KVM and using bzero to clear its contents. This 3404 * is intended to be called from the vm_pagezero process only and 3405 * outside of Giant. 3406 */ 3407void 3408pmap_zero_page_idle(vm_page_t m) 3409{ 3410 3411 if (*CMAP3) 3412 panic("pmap_zero_page: CMAP3 busy"); 3413 sched_pin(); 3414 PT_SET_MA(CADDR3, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3415 pagezero(CADDR3); 3416 PT_SET_MA(CADDR3, 0); 3417 sched_unpin(); 3418} 3419 3420/* 3421 * pmap_copy_page copies the specified (machine independent) 3422 * page by mapping the page into virtual memory and using 3423 * bcopy to copy the page, one machine dependent page at a 3424 * time. 3425 */ 3426void 3427pmap_copy_page(vm_page_t src, vm_page_t dst) 3428{ 3429 struct sysmaps *sysmaps; 3430 3431 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3432 mtx_lock(&sysmaps->lock); 3433 if (*sysmaps->CMAP1) 3434 panic("pmap_copy_page: CMAP1 busy"); 3435 if (*sysmaps->CMAP2) 3436 panic("pmap_copy_page: CMAP2 busy"); 3437 sched_pin(); 3438 PT_SET_MA(sysmaps->CADDR1, PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A); 3439 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M); 3440 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3441 PT_SET_MA(sysmaps->CADDR1, 0); 3442 PT_SET_MA(sysmaps->CADDR2, 0); 3443 sched_unpin(); 3444 mtx_unlock(&sysmaps->lock); 3445} 3446 3447/* 3448 * Returns true if the pmap's pv is one of the first 3449 * 16 pvs linked to from this page. This count may 3450 * be changed upwards or downwards in the future; it 3451 * is only necessary that true be returned for a small 3452 * subset of pmaps for proper page aging. 3453 */ 3454boolean_t 3455pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3456{ 3457 pv_entry_t pv; 3458 int loops = 0; 3459 boolean_t rv; 3460 3461 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3462 ("pmap_page_exists_quick: page %p is not managed", m)); 3463 rv = FALSE; 3464 vm_page_lock_queues(); 3465 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3466 if (PV_PMAP(pv) == pmap) { 3467 rv = TRUE; 3468 break; 3469 } 3470 loops++; 3471 if (loops >= 16) 3472 break; 3473 } 3474 vm_page_unlock_queues(); 3475 return (rv); 3476} 3477 3478/* 3479 * pmap_page_wired_mappings: 3480 * 3481 * Return the number of managed mappings to the given physical page 3482 * that are wired. 3483 */ 3484int 3485pmap_page_wired_mappings(vm_page_t m) 3486{ 3487 pv_entry_t pv; 3488 pt_entry_t *pte; 3489 pmap_t pmap; 3490 int count; 3491 3492 count = 0; 3493 if ((m->flags & PG_FICTITIOUS) != 0) 3494 return (count); 3495 vm_page_lock_queues(); 3496 sched_pin(); 3497 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3498 pmap = PV_PMAP(pv); 3499 PMAP_LOCK(pmap); 3500 pte = pmap_pte_quick(pmap, pv->pv_va); 3501 if ((*pte & PG_W) != 0) 3502 count++; 3503 PMAP_UNLOCK(pmap); 3504 } 3505 sched_unpin(); 3506 vm_page_unlock_queues(); 3507 return (count); 3508} 3509 3510/* 3511 * Returns TRUE if the given page is mapped individually or as part of 3512 * a 4mpage. Otherwise, returns FALSE. 3513 */ 3514boolean_t 3515pmap_page_is_mapped(vm_page_t m) 3516{ 3517 boolean_t rv; 3518 3519 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 3520 return (FALSE); 3521 vm_page_lock_queues(); 3522 rv = !TAILQ_EMPTY(&m->md.pv_list) || 3523 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list); 3524 vm_page_unlock_queues(); 3525 return (rv); 3526} 3527 3528/* 3529 * Remove all pages from specified address space 3530 * this aids process exit speeds. Also, this code 3531 * is special cased for current process only, but 3532 * can have the more generic (and slightly slower) 3533 * mode enabled. This is much faster than pmap_remove 3534 * in the case of running down an entire address space. 3535 */ 3536void 3537pmap_remove_pages(pmap_t pmap) 3538{ 3539 pt_entry_t *pte, tpte; 3540 vm_page_t m, free = NULL; 3541 pv_entry_t pv; 3542 struct pv_chunk *pc, *npc; 3543 int field, idx; 3544 int32_t bit; 3545 uint32_t inuse, bitmask; 3546 int allfree; 3547 3548 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3549 3550 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3551 printf("warning: pmap_remove_pages called with non-current pmap\n"); 3552 return; 3553 } 3554 vm_page_lock_queues(); 3555 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3556 PMAP_LOCK(pmap); 3557 sched_pin(); 3558 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3559 allfree = 1; 3560 for (field = 0; field < _NPCM; field++) { 3561 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3562 while (inuse != 0) { 3563 bit = bsfl(inuse); 3564 bitmask = 1UL << bit; 3565 idx = field * 32 + bit; 3566 pv = &pc->pc_pventry[idx]; 3567 inuse &= ~bitmask; 3568 3569 pte = vtopte(pv->pv_va); 3570 tpte = *pte ? xpmap_mtop(*pte) : 0; 3571 3572 if (tpte == 0) { 3573 printf( 3574 "TPTE at %p IS ZERO @ VA %08x\n", 3575 pte, pv->pv_va); 3576 panic("bad pte"); 3577 } 3578 3579/* 3580 * We cannot remove wired pages from a process' mapping at this time 3581 */ 3582 if (tpte & PG_W) { 3583 allfree = 0; 3584 continue; 3585 } 3586 3587 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3588 KASSERT(m->phys_addr == (tpte & PG_FRAME), 3589 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3590 m, (uintmax_t)m->phys_addr, 3591 (uintmax_t)tpte)); 3592 3593 KASSERT(m < &vm_page_array[vm_page_array_size], 3594 ("pmap_remove_pages: bad tpte %#jx", 3595 (uintmax_t)tpte)); 3596 3597 3598 PT_CLEAR_VA(pte, FALSE); 3599 3600 /* 3601 * Update the vm_page_t clean/reference bits. 3602 */ 3603 if (tpte & PG_M) 3604 vm_page_dirty(m); 3605 3606 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3607 if (TAILQ_EMPTY(&m->md.pv_list)) 3608 vm_page_flag_clear(m, PG_WRITEABLE); 3609 3610 pmap_unuse_pt(pmap, pv->pv_va, &free); 3611 3612 /* Mark free */ 3613 PV_STAT(pv_entry_frees++); 3614 PV_STAT(pv_entry_spare++); 3615 pv_entry_count--; 3616 pc->pc_map[field] |= bitmask; 3617 pmap->pm_stats.resident_count--; 3618 } 3619 } 3620 PT_UPDATES_FLUSH(); 3621 if (allfree) { 3622 PV_STAT(pv_entry_spare -= _NPCPV); 3623 PV_STAT(pc_chunk_count--); 3624 PV_STAT(pc_chunk_frees++); 3625 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3626 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 3627 pmap_qremove((vm_offset_t)pc, 1); 3628 vm_page_unwire(m, 0); 3629 vm_page_free(m); 3630 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 3631 } 3632 } 3633 PT_UPDATES_FLUSH(); 3634 if (*PMAP1) 3635 PT_SET_MA(PADDR1, 0); 3636 3637 sched_unpin(); 3638 pmap_invalidate_all(pmap); 3639 vm_page_unlock_queues(); 3640 PMAP_UNLOCK(pmap); 3641 pmap_free_zero_pages(free); 3642} 3643 3644/* 3645 * pmap_is_modified: 3646 * 3647 * Return whether or not the specified physical page was modified 3648 * in any physical maps. 3649 */ 3650boolean_t 3651pmap_is_modified(vm_page_t m) 3652{ 3653 pv_entry_t pv; 3654 pt_entry_t *pte; 3655 pmap_t pmap; 3656 boolean_t rv; 3657 3658 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3659 ("pmap_is_modified: page %p is not managed", m)); 3660 rv = FALSE; 3661 3662 /* 3663 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 3664 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 3665 * is clear, no PTEs can have PG_M set. 3666 */ 3667 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3668 if ((m->oflags & VPO_BUSY) == 0 && 3669 (m->flags & PG_WRITEABLE) == 0) 3670 return (rv); 3671 vm_page_lock_queues(); 3672 sched_pin(); 3673 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3674 pmap = PV_PMAP(pv); 3675 PMAP_LOCK(pmap); 3676 pte = pmap_pte_quick(pmap, pv->pv_va); 3677 rv = (*pte & PG_M) != 0; 3678 PMAP_UNLOCK(pmap); 3679 if (rv) 3680 break; 3681 } 3682 if (*PMAP1) 3683 PT_SET_MA(PADDR1, 0); 3684 sched_unpin(); 3685 vm_page_unlock_queues(); 3686 return (rv); 3687} 3688 3689/* 3690 * pmap_is_prefaultable: 3691 * 3692 * Return whether or not the specified virtual address is elgible 3693 * for prefault. 3694 */ 3695static boolean_t 3696pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3697{ 3698 pt_entry_t *pte; 3699 boolean_t rv = FALSE; 3700 3701 return (rv); 3702 3703 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3704 pte = vtopte(addr); 3705 rv = (*pte == 0); 3706 } 3707 return (rv); 3708} 3709 3710boolean_t 3711pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3712{ 3713 boolean_t rv; 3714 3715 PMAP_LOCK(pmap); 3716 rv = pmap_is_prefaultable_locked(pmap, addr); 3717 PMAP_UNLOCK(pmap); 3718 return (rv); 3719} 3720 3721boolean_t 3722pmap_is_referenced(vm_page_t m) 3723{ 3724 pv_entry_t pv; 3725 pt_entry_t *pte; 3726 pmap_t pmap; 3727 boolean_t rv; 3728 3729 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3730 ("pmap_is_referenced: page %p is not managed", m)); 3731 rv = FALSE; 3732 vm_page_lock_queues(); 3733 sched_pin(); 3734 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3735 pmap = PV_PMAP(pv); 3736 PMAP_LOCK(pmap); 3737 pte = pmap_pte_quick(pmap, pv->pv_va); 3738 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3739 PMAP_UNLOCK(pmap); 3740 if (rv) 3741 break; 3742 } 3743 if (*PMAP1) 3744 PT_SET_MA(PADDR1, 0); 3745 sched_unpin(); 3746 vm_page_unlock_queues(); 3747 return (rv); 3748} 3749 3750void 3751pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3752{ 3753 int i, npages = round_page(len) >> PAGE_SHIFT; 3754 for (i = 0; i < npages; i++) { 3755 pt_entry_t *pte; 3756 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3757 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3758 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3759 pmap_pte_release(pte); 3760 } 3761} 3762 3763void 3764pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3765{ 3766 int i, npages = round_page(len) >> PAGE_SHIFT; 3767 for (i = 0; i < npages; i++) { 3768 pt_entry_t *pte; 3769 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3770 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3771 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3772 pmap_pte_release(pte); 3773 } 3774} 3775 3776/* 3777 * Clear the write and modified bits in each of the given page's mappings. 3778 */ 3779void 3780pmap_remove_write(vm_page_t m) 3781{ 3782 pv_entry_t pv; 3783 pmap_t pmap; 3784 pt_entry_t oldpte, *pte; 3785 3786 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3787 ("pmap_remove_write: page %p is not managed", m)); 3788 3789 /* 3790 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 3791 * another thread while the object is locked. Thus, if PG_WRITEABLE 3792 * is clear, no page table entries need updating. 3793 */ 3794 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3795 if ((m->oflags & VPO_BUSY) == 0 && 3796 (m->flags & PG_WRITEABLE) == 0) 3797 return; 3798 vm_page_lock_queues(); 3799 sched_pin(); 3800 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3801 pmap = PV_PMAP(pv); 3802 PMAP_LOCK(pmap); 3803 pte = pmap_pte_quick(pmap, pv->pv_va); 3804retry: 3805 oldpte = *pte; 3806 if ((oldpte & PG_RW) != 0) { 3807 vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3808 3809 /* 3810 * Regardless of whether a pte is 32 or 64 bits 3811 * in size, PG_RW and PG_M are among the least 3812 * significant 32 bits. 3813 */ 3814 PT_SET_VA_MA(pte, newpte, TRUE); 3815 if (*pte != newpte) 3816 goto retry; 3817 3818 if ((oldpte & PG_M) != 0) 3819 vm_page_dirty(m); 3820 pmap_invalidate_page(pmap, pv->pv_va); 3821 } 3822 PMAP_UNLOCK(pmap); 3823 } 3824 vm_page_flag_clear(m, PG_WRITEABLE); 3825 PT_UPDATES_FLUSH(); 3826 if (*PMAP1) 3827 PT_SET_MA(PADDR1, 0); 3828 sched_unpin(); 3829 vm_page_unlock_queues(); 3830} 3831 3832/* 3833 * pmap_ts_referenced: 3834 * 3835 * Return a count of reference bits for a page, clearing those bits. 3836 * It is not necessary for every reference bit to be cleared, but it 3837 * is necessary that 0 only be returned when there are truly no 3838 * reference bits set. 3839 * 3840 * XXX: The exact number of bits to check and clear is a matter that 3841 * should be tested and standardized at some point in the future for 3842 * optimal aging of shared pages. 3843 */ 3844int 3845pmap_ts_referenced(vm_page_t m) 3846{ 3847 pv_entry_t pv, pvf, pvn; 3848 pmap_t pmap; 3849 pt_entry_t *pte; 3850 int rtval = 0; 3851 3852 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3853 ("pmap_ts_referenced: page %p is not managed", m)); 3854 vm_page_lock_queues(); 3855 sched_pin(); 3856 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3857 pvf = pv; 3858 do { 3859 pvn = TAILQ_NEXT(pv, pv_list); 3860 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3861 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3862 pmap = PV_PMAP(pv); 3863 PMAP_LOCK(pmap); 3864 pte = pmap_pte_quick(pmap, pv->pv_va); 3865 if ((*pte & PG_A) != 0) { 3866 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3867 pmap_invalidate_page(pmap, pv->pv_va); 3868 rtval++; 3869 if (rtval > 4) 3870 pvn = NULL; 3871 } 3872 PMAP_UNLOCK(pmap); 3873 } while ((pv = pvn) != NULL && pv != pvf); 3874 } 3875 PT_UPDATES_FLUSH(); 3876 if (*PMAP1) 3877 PT_SET_MA(PADDR1, 0); 3878 3879 sched_unpin(); 3880 vm_page_unlock_queues(); 3881 return (rtval); 3882} 3883 3884/* 3885 * Clear the modify bits on the specified physical page. 3886 */ 3887void 3888pmap_clear_modify(vm_page_t m) 3889{ 3890 pv_entry_t pv; 3891 pmap_t pmap; 3892 pt_entry_t *pte; 3893 3894 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3895 ("pmap_clear_modify: page %p is not managed", m)); 3896 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3897 KASSERT((m->oflags & VPO_BUSY) == 0, 3898 ("pmap_clear_modify: page %p is busy", m)); 3899 3900 /* 3901 * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. 3902 * If the object containing the page is locked and the page is not 3903 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 3904 */ 3905 if ((m->flags & PG_WRITEABLE) == 0) 3906 return; 3907 vm_page_lock_queues(); 3908 sched_pin(); 3909 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3910 pmap = PV_PMAP(pv); 3911 PMAP_LOCK(pmap); 3912 pte = pmap_pte_quick(pmap, pv->pv_va); 3913 if ((*pte & PG_M) != 0) { 3914 /* 3915 * Regardless of whether a pte is 32 or 64 bits 3916 * in size, PG_M is among the least significant 3917 * 32 bits. 3918 */ 3919 PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3920 pmap_invalidate_page(pmap, pv->pv_va); 3921 } 3922 PMAP_UNLOCK(pmap); 3923 } 3924 sched_unpin(); 3925 vm_page_unlock_queues(); 3926} 3927 3928/* 3929 * pmap_clear_reference: 3930 * 3931 * Clear the reference bit on the specified physical page. 3932 */ 3933void 3934pmap_clear_reference(vm_page_t m) 3935{ 3936 pv_entry_t pv; 3937 pmap_t pmap; 3938 pt_entry_t *pte; 3939 3940 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 3941 ("pmap_clear_reference: page %p is not managed", m)); 3942 vm_page_lock_queues(); 3943 sched_pin(); 3944 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3945 pmap = PV_PMAP(pv); 3946 PMAP_LOCK(pmap); 3947 pte = pmap_pte_quick(pmap, pv->pv_va); 3948 if ((*pte & PG_A) != 0) { 3949 /* 3950 * Regardless of whether a pte is 32 or 64 bits 3951 * in size, PG_A is among the least significant 3952 * 32 bits. 3953 */ 3954 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3955 pmap_invalidate_page(pmap, pv->pv_va); 3956 } 3957 PMAP_UNLOCK(pmap); 3958 } 3959 sched_unpin(); 3960 vm_page_unlock_queues(); 3961} 3962 3963/* 3964 * Miscellaneous support routines follow 3965 */ 3966 3967/* 3968 * Map a set of physical memory pages into the kernel virtual 3969 * address space. Return a pointer to where it is mapped. This 3970 * routine is intended to be used for mapping device memory, 3971 * NOT real memory. 3972 */ 3973void * 3974pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3975{ 3976 vm_offset_t va, offset; 3977 vm_size_t tmpsize; 3978 3979 offset = pa & PAGE_MASK; 3980 size = roundup(offset + size, PAGE_SIZE); 3981 pa = pa & PG_FRAME; 3982 3983 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3984 va = KERNBASE + pa; 3985 else 3986 va = kmem_alloc_nofault(kernel_map, size); 3987 if (!va) 3988 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3989 3990 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3991 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3992 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3993 pmap_invalidate_cache_range(va, va + size); 3994 return ((void *)(va + offset)); 3995} 3996 3997void * 3998pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3999{ 4000 4001 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 4002} 4003 4004void * 4005pmap_mapbios(vm_paddr_t pa, vm_size_t size) 4006{ 4007 4008 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 4009} 4010 4011void 4012pmap_unmapdev(vm_offset_t va, vm_size_t size) 4013{ 4014 vm_offset_t base, offset, tmpva; 4015 4016 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 4017 return; 4018 base = trunc_page(va); 4019 offset = va & PAGE_MASK; 4020 size = roundup(offset + size, PAGE_SIZE); 4021 critical_enter(); 4022 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 4023 pmap_kremove(tmpva); 4024 pmap_invalidate_range(kernel_pmap, va, tmpva); 4025 critical_exit(); 4026 kmem_free(kernel_map, base, size); 4027} 4028 4029/* 4030 * Sets the memory attribute for the specified page. 4031 */ 4032void 4033pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4034{ 4035 struct sysmaps *sysmaps; 4036 vm_offset_t sva, eva; 4037 4038 m->md.pat_mode = ma; 4039 if ((m->flags & PG_FICTITIOUS) != 0) 4040 return; 4041 4042 /* 4043 * If "m" is a normal page, flush it from the cache. 4044 * See pmap_invalidate_cache_range(). 4045 * 4046 * First, try to find an existing mapping of the page by sf 4047 * buffer. sf_buf_invalidate_cache() modifies mapping and 4048 * flushes the cache. 4049 */ 4050 if (sf_buf_invalidate_cache(m)) 4051 return; 4052 4053 /* 4054 * If page is not mapped by sf buffer, but CPU does not 4055 * support self snoop, map the page transient and do 4056 * invalidation. In the worst case, whole cache is flushed by 4057 * pmap_invalidate_cache_range(). 4058 */ 4059 if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) { 4060 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4061 mtx_lock(&sysmaps->lock); 4062 if (*sysmaps->CMAP2) 4063 panic("pmap_page_set_memattr: CMAP2 busy"); 4064 sched_pin(); 4065 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 4066 xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M | 4067 pmap_cache_bits(m->md.pat_mode, 0)); 4068 invlcaddr(sysmaps->CADDR2); 4069 sva = (vm_offset_t)sysmaps->CADDR2; 4070 eva = sva + PAGE_SIZE; 4071 } else 4072 sva = eva = 0; /* gcc */ 4073 pmap_invalidate_cache_range(sva, eva); 4074 if (sva != 0) { 4075 PT_SET_MA(sysmaps->CADDR2, 0); 4076 sched_unpin(); 4077 mtx_unlock(&sysmaps->lock); 4078 } 4079} 4080 4081int 4082pmap_change_attr(va, size, mode) 4083 vm_offset_t va; 4084 vm_size_t size; 4085 int mode; 4086{ 4087 vm_offset_t base, offset, tmpva; 4088 pt_entry_t *pte; 4089 u_int opte, npte; 4090 pd_entry_t *pde; 4091 boolean_t changed; 4092 4093 base = trunc_page(va); 4094 offset = va & PAGE_MASK; 4095 size = roundup(offset + size, PAGE_SIZE); 4096 4097 /* Only supported on kernel virtual addresses. */ 4098 if (base <= VM_MAXUSER_ADDRESS) 4099 return (EINVAL); 4100 4101 /* 4MB pages and pages that aren't mapped aren't supported. */ 4102 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4103 pde = pmap_pde(kernel_pmap, tmpva); 4104 if (*pde & PG_PS) 4105 return (EINVAL); 4106 if ((*pde & PG_V) == 0) 4107 return (EINVAL); 4108 pte = vtopte(va); 4109 if ((*pte & PG_V) == 0) 4110 return (EINVAL); 4111 } 4112 4113 changed = FALSE; 4114 4115 /* 4116 * Ok, all the pages exist and are 4k, so run through them updating 4117 * their cache mode. 4118 */ 4119 for (tmpva = base; size > 0; ) { 4120 pte = vtopte(tmpva); 4121 4122 /* 4123 * The cache mode bits are all in the low 32-bits of the 4124 * PTE, so we can just spin on updating the low 32-bits. 4125 */ 4126 do { 4127 opte = *(u_int *)pte; 4128 npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4129 npte |= pmap_cache_bits(mode, 0); 4130 PT_SET_VA_MA(pte, npte, TRUE); 4131 } while (npte != opte && (*pte != npte)); 4132 if (npte != opte) 4133 changed = TRUE; 4134 tmpva += PAGE_SIZE; 4135 size -= PAGE_SIZE; 4136 } 4137 4138 /* 4139 * Flush CPU caches to make sure any data isn't cached that shouldn't 4140 * be, etc. 4141 */ 4142 if (changed) { 4143 pmap_invalidate_range(kernel_pmap, base, tmpva); 4144 pmap_invalidate_cache_range(base, tmpva); 4145 } 4146 return (0); 4147} 4148 4149/* 4150 * perform the pmap work for mincore 4151 */ 4152int 4153pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4154{ 4155 pt_entry_t *ptep, pte; 4156 vm_paddr_t pa; 4157 int val; 4158 4159 PMAP_LOCK(pmap); 4160retry: 4161 ptep = pmap_pte(pmap, addr); 4162 pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4163 pmap_pte_release(ptep); 4164 val = 0; 4165 if ((pte & PG_V) != 0) { 4166 val |= MINCORE_INCORE; 4167 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4168 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4169 if ((pte & PG_A) != 0) 4170 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4171 } 4172 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4173 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4174 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4175 pa = pte & PG_FRAME; 4176 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4177 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4178 goto retry; 4179 } else 4180 PA_UNLOCK_COND(*locked_pa); 4181 PMAP_UNLOCK(pmap); 4182 return (val); 4183} 4184 4185void 4186pmap_activate(struct thread *td) 4187{ 4188 pmap_t pmap, oldpmap; 4189 u_int32_t cr3; 4190 4191 critical_enter(); 4192 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4193 oldpmap = PCPU_GET(curpmap); 4194#if defined(SMP) 4195 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); 4196 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 4197#else 4198 oldpmap->pm_active &= ~1; 4199 pmap->pm_active |= 1; 4200#endif 4201#ifdef PAE 4202 cr3 = vtophys(pmap->pm_pdpt); 4203#else 4204 cr3 = vtophys(pmap->pm_pdir); 4205#endif 4206 /* 4207 * pmap_activate is for the current thread on the current cpu 4208 */ 4209 td->td_pcb->pcb_cr3 = cr3; 4210 PT_UPDATES_FLUSH(); 4211 load_cr3(cr3); 4212 PCPU_SET(curpmap, pmap); 4213 critical_exit(); 4214} 4215 4216void 4217pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4218{ 4219} 4220 4221/* 4222 * Increase the starting virtual address of the given mapping if a 4223 * different alignment might result in more superpage mappings. 4224 */ 4225void 4226pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4227 vm_offset_t *addr, vm_size_t size) 4228{ 4229 vm_offset_t superpage_offset; 4230 4231 if (size < NBPDR) 4232 return; 4233 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4234 offset += ptoa(object->pg_color); 4235 superpage_offset = offset & PDRMASK; 4236 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4237 (*addr & PDRMASK) == superpage_offset) 4238 return; 4239 if ((*addr & PDRMASK) < superpage_offset) 4240 *addr = (*addr & ~PDRMASK) + superpage_offset; 4241 else 4242 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4243} 4244 4245#ifdef XEN 4246 4247void 4248pmap_suspend() 4249{ 4250 pmap_t pmap; 4251 int i, pdir, offset; 4252 vm_paddr_t pdirma; 4253 mmu_update_t mu[4]; 4254 4255 /* 4256 * We need to remove the recursive mapping structure from all 4257 * our pmaps so that Xen doesn't get confused when it restores 4258 * the page tables. The recursive map lives at page directory 4259 * index PTDPTDI. We assume that the suspend code has stopped 4260 * the other vcpus (if any). 4261 */ 4262 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4263 for (i = 0; i < 4; i++) { 4264 /* 4265 * Figure out which page directory (L2) page 4266 * contains this bit of the recursive map and 4267 * the offset within that page of the map 4268 * entry 4269 */ 4270 pdir = (PTDPTDI + i) / NPDEPG; 4271 offset = (PTDPTDI + i) % NPDEPG; 4272 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4273 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4274 mu[i].val = 0; 4275 } 4276 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4277 } 4278} 4279 4280void 4281pmap_resume() 4282{ 4283 pmap_t pmap; 4284 int i, pdir, offset; 4285 vm_paddr_t pdirma; 4286 mmu_update_t mu[4]; 4287 4288 /* 4289 * Restore the recursive map that we removed on suspend. 4290 */ 4291 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4292 for (i = 0; i < 4; i++) { 4293 /* 4294 * Figure out which page directory (L2) page 4295 * contains this bit of the recursive map and 4296 * the offset within that page of the map 4297 * entry 4298 */ 4299 pdir = (PTDPTDI + i) / NPDEPG; 4300 offset = (PTDPTDI + i) % NPDEPG; 4301 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4302 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4303 mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4304 } 4305 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4306 } 4307} 4308 4309#endif 4310 4311#if defined(PMAP_DEBUG) 4312pmap_pid_dump(int pid) 4313{ 4314 pmap_t pmap; 4315 struct proc *p; 4316 int npte = 0; 4317 int index; 4318 4319 sx_slock(&allproc_lock); 4320 FOREACH_PROC_IN_SYSTEM(p) { 4321 if (p->p_pid != pid) 4322 continue; 4323 4324 if (p->p_vmspace) { 4325 int i,j; 4326 index = 0; 4327 pmap = vmspace_pmap(p->p_vmspace); 4328 for (i = 0; i < NPDEPTD; i++) { 4329 pd_entry_t *pde; 4330 pt_entry_t *pte; 4331 vm_offset_t base = i << PDRSHIFT; 4332 4333 pde = &pmap->pm_pdir[i]; 4334 if (pde && pmap_pde_v(pde)) { 4335 for (j = 0; j < NPTEPG; j++) { 4336 vm_offset_t va = base + (j << PAGE_SHIFT); 4337 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4338 if (index) { 4339 index = 0; 4340 printf("\n"); 4341 } 4342 sx_sunlock(&allproc_lock); 4343 return npte; 4344 } 4345 pte = pmap_pte(pmap, va); 4346 if (pte && pmap_pte_v(pte)) { 4347 pt_entry_t pa; 4348 vm_page_t m; 4349 pa = PT_GET(pte); 4350 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4351 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4352 va, pa, m->hold_count, m->wire_count, m->flags); 4353 npte++; 4354 index++; 4355 if (index >= 2) { 4356 index = 0; 4357 printf("\n"); 4358 } else { 4359 printf(" "); 4360 } 4361 } 4362 } 4363 } 4364 } 4365 } 4366 } 4367 sx_sunlock(&allproc_lock); 4368 return npte; 4369} 4370#endif 4371 4372#if defined(DEBUG) 4373 4374static void pads(pmap_t pm); 4375void pmap_pvdump(vm_paddr_t pa); 4376 4377/* print address space of pmap*/ 4378static void 4379pads(pmap_t pm) 4380{ 4381 int i, j; 4382 vm_paddr_t va; 4383 pt_entry_t *ptep; 4384 4385 if (pm == kernel_pmap) 4386 return; 4387 for (i = 0; i < NPDEPTD; i++) 4388 if (pm->pm_pdir[i]) 4389 for (j = 0; j < NPTEPG; j++) { 4390 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4391 if (pm == kernel_pmap && va < KERNBASE) 4392 continue; 4393 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4394 continue; 4395 ptep = pmap_pte(pm, va); 4396 if (pmap_pte_v(ptep)) 4397 printf("%x:%x ", va, *ptep); 4398 }; 4399 4400} 4401 4402void 4403pmap_pvdump(vm_paddr_t pa) 4404{ 4405 pv_entry_t pv; 4406 pmap_t pmap; 4407 vm_page_t m; 4408 4409 printf("pa %x", pa); 4410 m = PHYS_TO_VM_PAGE(pa); 4411 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4412 pmap = PV_PMAP(pv); 4413 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4414 pads(pmap); 4415 } 4416 printf(" "); 4417} 4418#endif 4419