pmap.c revision 236240
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 236240 2012-05-29 15:41:20Z alc $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_smp.h" 109#include "opt_xbox.h" 110 111#include <sys/param.h> 112#include <sys/systm.h> 113#include <sys/kernel.h> 114#include <sys/ktr.h> 115#include <sys/lock.h> 116#include <sys/malloc.h> 117#include <sys/mman.h> 118#include <sys/msgbuf.h> 119#include <sys/mutex.h> 120#include <sys/proc.h> 121#include <sys/sf_buf.h> 122#include <sys/sx.h> 123#include <sys/vmmeter.h> 124#include <sys/sched.h> 125#include <sys/sysctl.h> 126#ifdef SMP 127#include <sys/smp.h> 128#else 129#include <sys/cpuset.h> 130#endif 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/cputypes.h> 145#include <machine/md_var.h> 146#include <machine/pcb.h> 147#include <machine/specialreg.h> 148#ifdef SMP 149#include <machine/smp.h> 150#endif 151 152#ifdef XBOX 153#include <machine/xbox.h> 154#endif 155 156#include <xen/interface/xen.h> 157#include <xen/hypervisor.h> 158#include <machine/xen/hypercall.h> 159#include <machine/xen/xenvar.h> 160#include <machine/xen/xenfunc.h> 161 162#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 163#define CPU_ENABLE_SSE 164#endif 165 166#ifndef PMAP_SHPGPERPROC 167#define PMAP_SHPGPERPROC 200 168#endif 169 170#define DIAGNOSTIC 171 172#if !defined(DIAGNOSTIC) 173#ifdef __GNUC_GNU_INLINE__ 174#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 175#else 176#define PMAP_INLINE extern inline 177#endif 178#else 179#define PMAP_INLINE 180#endif 181 182#define PV_STATS 183#ifdef PV_STATS 184#define PV_STAT(x) do { x ; } while (0) 185#else 186#define PV_STAT(x) do { } while (0) 187#endif 188 189/* 190 * Get PDEs and PTEs for user/kernel address space 191 */ 192#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 193#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 194 195#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 196#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 197#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 198#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 199#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 200 201#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 202 203#define HAMFISTED_LOCKING 204#ifdef HAMFISTED_LOCKING 205static struct mtx createdelete_lock; 206#endif 207 208struct pmap kernel_pmap_store; 209LIST_HEAD(pmaplist, pmap); 210static struct pmaplist allpmaps; 211static struct mtx allpmaps_lock; 212 213vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 214vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 215int pgeflag = 0; /* PG_G or-in */ 216int pseflag = 0; /* PG_PS or-in */ 217 218int nkpt; 219vm_offset_t kernel_vm_end; 220extern u_int32_t KERNend; 221 222#ifdef PAE 223pt_entry_t pg_nx; 224#endif 225 226static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 227 228static int pat_works; /* Is page attribute table sane? */ 229 230/* 231 * Data for the pv entry allocation mechanism 232 */ 233static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 234static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 235static int shpgperproc = PMAP_SHPGPERPROC; 236 237struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 238int pv_maxchunks; /* How many chunks we have KVA for */ 239vm_offset_t pv_vafree; /* freelist stored in the PTE */ 240 241/* 242 * All those kernel PT submaps that BSD is so fond of 243 */ 244struct sysmaps { 245 struct mtx lock; 246 pt_entry_t *CMAP1; 247 pt_entry_t *CMAP2; 248 caddr_t CADDR1; 249 caddr_t CADDR2; 250}; 251static struct sysmaps sysmaps_pcpu[MAXCPU]; 252static pt_entry_t *CMAP3; 253caddr_t ptvmmap = 0; 254static caddr_t CADDR3; 255struct msgbuf *msgbufp = 0; 256 257/* 258 * Crashdump maps. 259 */ 260static caddr_t crashdumpmap; 261 262static pt_entry_t *PMAP1 = 0, *PMAP2; 263static pt_entry_t *PADDR1 = 0, *PADDR2; 264#ifdef SMP 265static int PMAP1cpu; 266static int PMAP1changedcpu; 267SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 268 &PMAP1changedcpu, 0, 269 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 270#endif 271static int PMAP1changed; 272SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 273 &PMAP1changed, 0, 274 "Number of times pmap_pte_quick changed PMAP1"); 275static int PMAP1unchanged; 276SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 277 &PMAP1unchanged, 0, 278 "Number of times pmap_pte_quick didn't change PMAP1"); 279static struct mtx PMAP2mutex; 280 281static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 282static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 283static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 284static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 285 vm_offset_t va); 286 287static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 288 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 289static void pmap_flush_page(vm_page_t m); 290static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 291static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 292 vm_page_t *free); 293static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 294 vm_page_t *free); 295static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 296 vm_offset_t va); 297static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 298 vm_page_t m); 299 300static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 301 302static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); 303static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 304static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 305static void pmap_pte_release(pt_entry_t *pte); 306static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 307static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 308 309static __inline void pagezero(void *page); 310 311CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 312CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 313 314/* 315 * If you get an error here, then you set KVA_PAGES wrong! See the 316 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 317 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 318 */ 319CTASSERT(KERNBASE % (1 << 24) == 0); 320 321void 322pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 323{ 324 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 325 326 switch (type) { 327 case SH_PD_SET_VA: 328#if 0 329 xen_queue_pt_update(shadow_pdir_ma, 330 xpmap_ptom(val & ~(PG_RW))); 331#endif 332 xen_queue_pt_update(pdir_ma, 333 xpmap_ptom(val)); 334 break; 335 case SH_PD_SET_VA_MA: 336#if 0 337 xen_queue_pt_update(shadow_pdir_ma, 338 val & ~(PG_RW)); 339#endif 340 xen_queue_pt_update(pdir_ma, val); 341 break; 342 case SH_PD_SET_VA_CLEAR: 343#if 0 344 xen_queue_pt_update(shadow_pdir_ma, 0); 345#endif 346 xen_queue_pt_update(pdir_ma, 0); 347 break; 348 } 349} 350 351/* 352 * Bootstrap the system enough to run with virtual memory. 353 * 354 * On the i386 this is called after mapping has already been enabled 355 * and just syncs the pmap module with what has already been done. 356 * [We can't call it easily with mapping off since the kernel is not 357 * mapped with PA == VA, hence we would have to relocate every address 358 * from the linked base (virtual) address "KERNBASE" to the actual 359 * (physical) address starting relative to 0] 360 */ 361void 362pmap_bootstrap(vm_paddr_t firstaddr) 363{ 364 vm_offset_t va; 365 pt_entry_t *pte, *unused; 366 struct sysmaps *sysmaps; 367 int i; 368 369 /* 370 * Initialize the first available kernel virtual address. However, 371 * using "firstaddr" may waste a few pages of the kernel virtual 372 * address space, because locore may not have mapped every physical 373 * page that it allocated. Preferably, locore would provide a first 374 * unused virtual address in addition to "firstaddr". 375 */ 376 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 377 378 virtual_end = VM_MAX_KERNEL_ADDRESS; 379 380 /* 381 * Initialize the kernel pmap (which is statically allocated). 382 */ 383 PMAP_LOCK_INIT(kernel_pmap); 384 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 385#ifdef PAE 386 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 387#endif 388 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 389 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 390 LIST_INIT(&allpmaps); 391 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 392 mtx_lock_spin(&allpmaps_lock); 393 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 394 mtx_unlock_spin(&allpmaps_lock); 395 if (nkpt == 0) 396 nkpt = NKPT; 397 398 /* 399 * Reserve some special page table entries/VA space for temporary 400 * mapping of pages. 401 */ 402#define SYSMAP(c, p, v, n) \ 403 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 404 405 va = virtual_avail; 406 pte = vtopte(va); 407 408 /* 409 * CMAP1/CMAP2 are used for zeroing and copying pages. 410 * CMAP3 is used for the idle process page zeroing. 411 */ 412 for (i = 0; i < MAXCPU; i++) { 413 sysmaps = &sysmaps_pcpu[i]; 414 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 415 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 416 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 417 PT_SET_MA(sysmaps->CADDR1, 0); 418 PT_SET_MA(sysmaps->CADDR2, 0); 419 } 420 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 421 PT_SET_MA(CADDR3, 0); 422 423 /* 424 * Crashdump maps. 425 */ 426 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 427 428 /* 429 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 430 */ 431 SYSMAP(caddr_t, unused, ptvmmap, 1) 432 433 /* 434 * msgbufp is used to map the system message buffer. 435 */ 436 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 437 438 /* 439 * ptemap is used for pmap_pte_quick 440 */ 441 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 442 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 443 444 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 445 446 virtual_avail = va; 447 448 /* 449 * Leave in place an identity mapping (virt == phys) for the low 1 MB 450 * physical memory region that is used by the ACPI wakeup code. This 451 * mapping must not have PG_G set. 452 */ 453#ifndef XEN 454 /* 455 * leave here deliberately to show that this is not supported 456 */ 457#ifdef XBOX 458 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 459 * an early stadium, we cannot yet neatly map video memory ... :-( 460 * Better fixes are very welcome! */ 461 if (!arch_i386_is_xbox) 462#endif 463 for (i = 1; i < NKPT; i++) 464 PTD[i] = 0; 465 466 /* Initialize the PAT MSR if present. */ 467 pmap_init_pat(); 468 469 /* Turn on PG_G on kernel page(s) */ 470 pmap_set_pg(); 471#endif 472 473#ifdef HAMFISTED_LOCKING 474 mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF); 475#endif 476} 477 478/* 479 * Setup the PAT MSR. 480 */ 481void 482pmap_init_pat(void) 483{ 484 uint64_t pat_msr; 485 486 /* Bail if this CPU doesn't implement PAT. */ 487 if (!(cpu_feature & CPUID_PAT)) 488 return; 489 490 if (cpu_vendor_id != CPU_VENDOR_INTEL || 491 (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 492 /* 493 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 494 * Program 4 and 5 as WP and WC. 495 * Leave 6 and 7 as UC and UC-. 496 */ 497 pat_msr = rdmsr(MSR_PAT); 498 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 499 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 500 PAT_VALUE(5, PAT_WRITE_COMBINING); 501 pat_works = 1; 502 } else { 503 /* 504 * Due to some Intel errata, we can only safely use the lower 4 505 * PAT entries. Thus, just replace PAT Index 2 with WC instead 506 * of UC-. 507 * 508 * Intel Pentium III Processor Specification Update 509 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 510 * or Mode C Paging) 511 * 512 * Intel Pentium IV Processor Specification Update 513 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 514 */ 515 pat_msr = rdmsr(MSR_PAT); 516 pat_msr &= ~PAT_MASK(2); 517 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 518 pat_works = 0; 519 } 520 wrmsr(MSR_PAT, pat_msr); 521} 522 523/* 524 * Initialize a vm_page's machine-dependent fields. 525 */ 526void 527pmap_page_init(vm_page_t m) 528{ 529 530 TAILQ_INIT(&m->md.pv_list); 531 m->md.pat_mode = PAT_WRITE_BACK; 532} 533 534/* 535 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 536 * Requirements: 537 * - Must deal with pages in order to ensure that none of the PG_* bits 538 * are ever set, PG_V in particular. 539 * - Assumes we can write to ptes without pte_store() atomic ops, even 540 * on PAE systems. This should be ok. 541 * - Assumes nothing will ever test these addresses for 0 to indicate 542 * no mapping instead of correctly checking PG_V. 543 * - Assumes a vm_offset_t will fit in a pte (true for i386). 544 * Because PG_V is never set, there can be no mappings to invalidate. 545 */ 546static int ptelist_count = 0; 547static vm_offset_t 548pmap_ptelist_alloc(vm_offset_t *head) 549{ 550 vm_offset_t va; 551 vm_offset_t *phead = (vm_offset_t *)*head; 552 553 if (ptelist_count == 0) { 554 printf("out of memory!!!!!!\n"); 555 return (0); /* Out of memory */ 556 } 557 ptelist_count--; 558 va = phead[ptelist_count]; 559 return (va); 560} 561 562static void 563pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 564{ 565 vm_offset_t *phead = (vm_offset_t *)*head; 566 567 phead[ptelist_count++] = va; 568} 569 570static void 571pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 572{ 573 int i, nstackpages; 574 vm_offset_t va; 575 vm_page_t m; 576 577 nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 578 for (i = 0; i < nstackpages; i++) { 579 va = (vm_offset_t)base + i * PAGE_SIZE; 580 m = vm_page_alloc(NULL, i, 581 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 582 VM_ALLOC_ZERO); 583 pmap_qenter(va, &m, 1); 584 } 585 586 *head = (vm_offset_t)base; 587 for (i = npages - 1; i >= nstackpages; i--) { 588 va = (vm_offset_t)base + i * PAGE_SIZE; 589 pmap_ptelist_free(head, va); 590 } 591} 592 593 594/* 595 * Initialize the pmap module. 596 * Called by vm_init, to initialize any structures that the pmap 597 * system needs to map virtual memory. 598 */ 599void 600pmap_init(void) 601{ 602 603 /* 604 * Initialize the address space (zone) for the pv entries. Set a 605 * high water mark so that the system can recover from excessive 606 * numbers of pv entries. 607 */ 608 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 609 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 610 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 611 pv_entry_max = roundup(pv_entry_max, _NPCPV); 612 pv_entry_high_water = 9 * (pv_entry_max / 10); 613 614 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 615 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 616 PAGE_SIZE * pv_maxchunks); 617 if (pv_chunkbase == NULL) 618 panic("pmap_init: not enough kvm for pv chunks"); 619 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 620} 621 622 623SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 624 "Max number of PV entries"); 625SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 626 "Page share factor per proc"); 627 628static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 629 "2/4MB page mapping counters"); 630 631static u_long pmap_pde_mappings; 632SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 633 &pmap_pde_mappings, 0, "2/4MB page mappings"); 634 635/*************************************************** 636 * Low level helper routines..... 637 ***************************************************/ 638 639/* 640 * Determine the appropriate bits to set in a PTE or PDE for a specified 641 * caching mode. 642 */ 643int 644pmap_cache_bits(int mode, boolean_t is_pde) 645{ 646 int pat_flag, pat_index, cache_bits; 647 648 /* The PAT bit is different for PTE's and PDE's. */ 649 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 650 651 /* If we don't support PAT, map extended modes to older ones. */ 652 if (!(cpu_feature & CPUID_PAT)) { 653 switch (mode) { 654 case PAT_UNCACHEABLE: 655 case PAT_WRITE_THROUGH: 656 case PAT_WRITE_BACK: 657 break; 658 case PAT_UNCACHED: 659 case PAT_WRITE_COMBINING: 660 case PAT_WRITE_PROTECTED: 661 mode = PAT_UNCACHEABLE; 662 break; 663 } 664 } 665 666 /* Map the caching mode to a PAT index. */ 667 if (pat_works) { 668 switch (mode) { 669 case PAT_UNCACHEABLE: 670 pat_index = 3; 671 break; 672 case PAT_WRITE_THROUGH: 673 pat_index = 1; 674 break; 675 case PAT_WRITE_BACK: 676 pat_index = 0; 677 break; 678 case PAT_UNCACHED: 679 pat_index = 2; 680 break; 681 case PAT_WRITE_COMBINING: 682 pat_index = 5; 683 break; 684 case PAT_WRITE_PROTECTED: 685 pat_index = 4; 686 break; 687 default: 688 panic("Unknown caching mode %d\n", mode); 689 } 690 } else { 691 switch (mode) { 692 case PAT_UNCACHED: 693 case PAT_UNCACHEABLE: 694 case PAT_WRITE_PROTECTED: 695 pat_index = 3; 696 break; 697 case PAT_WRITE_THROUGH: 698 pat_index = 1; 699 break; 700 case PAT_WRITE_BACK: 701 pat_index = 0; 702 break; 703 case PAT_WRITE_COMBINING: 704 pat_index = 2; 705 break; 706 default: 707 panic("Unknown caching mode %d\n", mode); 708 } 709 } 710 711 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 712 cache_bits = 0; 713 if (pat_index & 0x4) 714 cache_bits |= pat_flag; 715 if (pat_index & 0x2) 716 cache_bits |= PG_NC_PCD; 717 if (pat_index & 0x1) 718 cache_bits |= PG_NC_PWT; 719 return (cache_bits); 720} 721#ifdef SMP 722/* 723 * For SMP, these functions have to use the IPI mechanism for coherence. 724 * 725 * N.B.: Before calling any of the following TLB invalidation functions, 726 * the calling processor must ensure that all stores updating a non- 727 * kernel page table are globally performed. Otherwise, another 728 * processor could cache an old, pre-update entry without being 729 * invalidated. This can happen one of two ways: (1) The pmap becomes 730 * active on another processor after its pm_active field is checked by 731 * one of the following functions but before a store updating the page 732 * table is globally performed. (2) The pmap becomes active on another 733 * processor before its pm_active field is checked but due to 734 * speculative loads one of the following functions stills reads the 735 * pmap as inactive on the other processor. 736 * 737 * The kernel page table is exempt because its pm_active field is 738 * immutable. The kernel page table is always active on every 739 * processor. 740 */ 741void 742pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 743{ 744 cpuset_t other_cpus; 745 u_int cpuid; 746 747 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 748 pmap, va); 749 750 sched_pin(); 751 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 752 invlpg(va); 753 smp_invlpg(va); 754 } else { 755 cpuid = PCPU_GET(cpuid); 756 other_cpus = all_cpus; 757 CPU_CLR(cpuid, &other_cpus); 758 if (CPU_ISSET(cpuid, &pmap->pm_active)) 759 invlpg(va); 760 CPU_AND(&other_cpus, &pmap->pm_active); 761 if (!CPU_EMPTY(&other_cpus)) 762 smp_masked_invlpg(other_cpus, va); 763 } 764 sched_unpin(); 765 PT_UPDATES_FLUSH(); 766} 767 768void 769pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 770{ 771 cpuset_t other_cpus; 772 vm_offset_t addr; 773 u_int cpuid; 774 775 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 776 pmap, sva, eva); 777 778 sched_pin(); 779 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 780 for (addr = sva; addr < eva; addr += PAGE_SIZE) 781 invlpg(addr); 782 smp_invlpg_range(sva, eva); 783 } else { 784 cpuid = PCPU_GET(cpuid); 785 other_cpus = all_cpus; 786 CPU_CLR(cpuid, &other_cpus); 787 if (CPU_ISSET(cpuid, &pmap->pm_active)) 788 for (addr = sva; addr < eva; addr += PAGE_SIZE) 789 invlpg(addr); 790 CPU_AND(&other_cpus, &pmap->pm_active); 791 if (!CPU_EMPTY(&other_cpus)) 792 smp_masked_invlpg_range(other_cpus, sva, eva); 793 } 794 sched_unpin(); 795 PT_UPDATES_FLUSH(); 796} 797 798void 799pmap_invalidate_all(pmap_t pmap) 800{ 801 cpuset_t other_cpus; 802 u_int cpuid; 803 804 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 805 806 sched_pin(); 807 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 808 invltlb(); 809 smp_invltlb(); 810 } else { 811 cpuid = PCPU_GET(cpuid); 812 other_cpus = all_cpus; 813 CPU_CLR(cpuid, &other_cpus); 814 if (CPU_ISSET(cpuid, &pmap->pm_active)) 815 invltlb(); 816 CPU_AND(&other_cpus, &pmap->pm_active); 817 if (!CPU_EMPTY(&other_cpus)) 818 smp_masked_invltlb(other_cpus); 819 } 820 sched_unpin(); 821} 822 823void 824pmap_invalidate_cache(void) 825{ 826 827 sched_pin(); 828 wbinvd(); 829 smp_cache_flush(); 830 sched_unpin(); 831} 832#else /* !SMP */ 833/* 834 * Normal, non-SMP, 486+ invalidation functions. 835 * We inline these within pmap.c for speed. 836 */ 837PMAP_INLINE void 838pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 839{ 840 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 841 pmap, va); 842 843 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 844 invlpg(va); 845 PT_UPDATES_FLUSH(); 846} 847 848PMAP_INLINE void 849pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 850{ 851 vm_offset_t addr; 852 853 if (eva - sva > PAGE_SIZE) 854 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 855 pmap, sva, eva); 856 857 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 858 for (addr = sva; addr < eva; addr += PAGE_SIZE) 859 invlpg(addr); 860 PT_UPDATES_FLUSH(); 861} 862 863PMAP_INLINE void 864pmap_invalidate_all(pmap_t pmap) 865{ 866 867 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 868 869 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 870 invltlb(); 871} 872 873PMAP_INLINE void 874pmap_invalidate_cache(void) 875{ 876 877 wbinvd(); 878} 879#endif /* !SMP */ 880 881#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 882 883void 884pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 885{ 886 887 KASSERT((sva & PAGE_MASK) == 0, 888 ("pmap_invalidate_cache_range: sva not page-aligned")); 889 KASSERT((eva & PAGE_MASK) == 0, 890 ("pmap_invalidate_cache_range: eva not page-aligned")); 891 892 if (cpu_feature & CPUID_SS) 893 ; /* If "Self Snoop" is supported, do nothing. */ 894 else if ((cpu_feature & CPUID_CLFSH) != 0 && 895 eva - sva < PMAP_CLFLUSH_THRESHOLD) { 896 897 /* 898 * Otherwise, do per-cache line flush. Use the mfence 899 * instruction to insure that previous stores are 900 * included in the write-back. The processor 901 * propagates flush to other processors in the cache 902 * coherence domain. 903 */ 904 mfence(); 905 for (; sva < eva; sva += cpu_clflush_line_size) 906 clflush(sva); 907 mfence(); 908 } else { 909 910 /* 911 * No targeted cache flush methods are supported by CPU, 912 * or the supplied range is bigger than 2MB. 913 * Globally invalidate cache. 914 */ 915 pmap_invalidate_cache(); 916 } 917} 918 919void 920pmap_invalidate_cache_pages(vm_page_t *pages, int count) 921{ 922 int i; 923 924 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 925 (cpu_feature & CPUID_CLFSH) == 0) { 926 pmap_invalidate_cache(); 927 } else { 928 for (i = 0; i < count; i++) 929 pmap_flush_page(pages[i]); 930 } 931} 932 933/* 934 * Are we current address space or kernel? N.B. We return FALSE when 935 * a pmap's page table is in use because a kernel thread is borrowing 936 * it. The borrowed page table can change spontaneously, making any 937 * dependence on its continued use subject to a race condition. 938 */ 939static __inline int 940pmap_is_current(pmap_t pmap) 941{ 942 943 return (pmap == kernel_pmap || 944 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 945 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 946} 947 948/* 949 * If the given pmap is not the current or kernel pmap, the returned pte must 950 * be released by passing it to pmap_pte_release(). 951 */ 952pt_entry_t * 953pmap_pte(pmap_t pmap, vm_offset_t va) 954{ 955 pd_entry_t newpf; 956 pd_entry_t *pde; 957 958 pde = pmap_pde(pmap, va); 959 if (*pde & PG_PS) 960 return (pde); 961 if (*pde != 0) { 962 /* are we current address space or kernel? */ 963 if (pmap_is_current(pmap)) 964 return (vtopte(va)); 965 mtx_lock(&PMAP2mutex); 966 newpf = *pde & PG_FRAME; 967 if ((*PMAP2 & PG_FRAME) != newpf) { 968 vm_page_lock_queues(); 969 PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 970 vm_page_unlock_queues(); 971 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 972 pmap, va, (*PMAP2 & 0xffffffff)); 973 } 974 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 975 } 976 return (NULL); 977} 978 979/* 980 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 981 * being NULL. 982 */ 983static __inline void 984pmap_pte_release(pt_entry_t *pte) 985{ 986 987 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 988 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 989 *PMAP2); 990 vm_page_lock_queues(); 991 PT_SET_VA(PMAP2, 0, TRUE); 992 vm_page_unlock_queues(); 993 mtx_unlock(&PMAP2mutex); 994 } 995} 996 997static __inline void 998invlcaddr(void *caddr) 999{ 1000 1001 invlpg((u_int)caddr); 1002 PT_UPDATES_FLUSH(); 1003} 1004 1005/* 1006 * Super fast pmap_pte routine best used when scanning 1007 * the pv lists. This eliminates many coarse-grained 1008 * invltlb calls. Note that many of the pv list 1009 * scans are across different pmaps. It is very wasteful 1010 * to do an entire invltlb for checking a single mapping. 1011 * 1012 * If the given pmap is not the current pmap, vm_page_queue_mtx 1013 * must be held and curthread pinned to a CPU. 1014 */ 1015static pt_entry_t * 1016pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1017{ 1018 pd_entry_t newpf; 1019 pd_entry_t *pde; 1020 1021 pde = pmap_pde(pmap, va); 1022 if (*pde & PG_PS) 1023 return (pde); 1024 if (*pde != 0) { 1025 /* are we current address space or kernel? */ 1026 if (pmap_is_current(pmap)) 1027 return (vtopte(va)); 1028 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1029 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1030 newpf = *pde & PG_FRAME; 1031 if ((*PMAP1 & PG_FRAME) != newpf) { 1032 PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1033 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1034 pmap, va, (u_long)*PMAP1); 1035 1036#ifdef SMP 1037 PMAP1cpu = PCPU_GET(cpuid); 1038#endif 1039 PMAP1changed++; 1040 } else 1041#ifdef SMP 1042 if (PMAP1cpu != PCPU_GET(cpuid)) { 1043 PMAP1cpu = PCPU_GET(cpuid); 1044 invlcaddr(PADDR1); 1045 PMAP1changedcpu++; 1046 } else 1047#endif 1048 PMAP1unchanged++; 1049 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1050 } 1051 return (0); 1052} 1053 1054/* 1055 * Routine: pmap_extract 1056 * Function: 1057 * Extract the physical page address associated 1058 * with the given map/virtual_address pair. 1059 */ 1060vm_paddr_t 1061pmap_extract(pmap_t pmap, vm_offset_t va) 1062{ 1063 vm_paddr_t rtval; 1064 pt_entry_t *pte; 1065 pd_entry_t pde; 1066 pt_entry_t pteval; 1067 1068 rtval = 0; 1069 PMAP_LOCK(pmap); 1070 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1071 if (pde != 0) { 1072 if ((pde & PG_PS) != 0) { 1073 rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1074 PMAP_UNLOCK(pmap); 1075 return rtval; 1076 } 1077 pte = pmap_pte(pmap, va); 1078 pteval = *pte ? xpmap_mtop(*pte) : 0; 1079 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1080 pmap_pte_release(pte); 1081 } 1082 PMAP_UNLOCK(pmap); 1083 return (rtval); 1084} 1085 1086/* 1087 * Routine: pmap_extract_ma 1088 * Function: 1089 * Like pmap_extract, but returns machine address 1090 */ 1091vm_paddr_t 1092pmap_extract_ma(pmap_t pmap, vm_offset_t va) 1093{ 1094 vm_paddr_t rtval; 1095 pt_entry_t *pte; 1096 pd_entry_t pde; 1097 1098 rtval = 0; 1099 PMAP_LOCK(pmap); 1100 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1101 if (pde != 0) { 1102 if ((pde & PG_PS) != 0) { 1103 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1104 PMAP_UNLOCK(pmap); 1105 return rtval; 1106 } 1107 pte = pmap_pte(pmap, va); 1108 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1109 pmap_pte_release(pte); 1110 } 1111 PMAP_UNLOCK(pmap); 1112 return (rtval); 1113} 1114 1115/* 1116 * Routine: pmap_extract_and_hold 1117 * Function: 1118 * Atomically extract and hold the physical page 1119 * with the given pmap and virtual address pair 1120 * if that mapping permits the given protection. 1121 */ 1122vm_page_t 1123pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1124{ 1125 pd_entry_t pde; 1126 pt_entry_t pte, *ptep; 1127 vm_page_t m; 1128 vm_paddr_t pa; 1129 1130 pa = 0; 1131 m = NULL; 1132 PMAP_LOCK(pmap); 1133retry: 1134 pde = PT_GET(pmap_pde(pmap, va)); 1135 if (pde != 0) { 1136 if (pde & PG_PS) { 1137 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1138 if (vm_page_pa_tryrelock(pmap, (pde & 1139 PG_PS_FRAME) | (va & PDRMASK), &pa)) 1140 goto retry; 1141 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1142 (va & PDRMASK)); 1143 vm_page_hold(m); 1144 } 1145 } else { 1146 ptep = pmap_pte(pmap, va); 1147 pte = PT_GET(ptep); 1148 pmap_pte_release(ptep); 1149 if (pte != 0 && 1150 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1151 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, 1152 &pa)) 1153 goto retry; 1154 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1155 vm_page_hold(m); 1156 } 1157 } 1158 } 1159 PA_UNLOCK_COND(pa); 1160 PMAP_UNLOCK(pmap); 1161 return (m); 1162} 1163 1164/*************************************************** 1165 * Low level mapping routines..... 1166 ***************************************************/ 1167 1168/* 1169 * Add a wired page to the kva. 1170 * Note: not SMP coherent. 1171 * 1172 * This function may be used before pmap_bootstrap() is called. 1173 */ 1174void 1175pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1176{ 1177 1178 PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1179} 1180 1181void 1182pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1183{ 1184 pt_entry_t *pte; 1185 1186 pte = vtopte(va); 1187 pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1188} 1189 1190static __inline void 1191pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1192{ 1193 1194 PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1195} 1196 1197/* 1198 * Remove a page from the kernel pagetables. 1199 * Note: not SMP coherent. 1200 * 1201 * This function may be used before pmap_bootstrap() is called. 1202 */ 1203PMAP_INLINE void 1204pmap_kremove(vm_offset_t va) 1205{ 1206 pt_entry_t *pte; 1207 1208 pte = vtopte(va); 1209 PT_CLEAR_VA(pte, FALSE); 1210} 1211 1212/* 1213 * Used to map a range of physical addresses into kernel 1214 * virtual address space. 1215 * 1216 * The value passed in '*virt' is a suggested virtual address for 1217 * the mapping. Architectures which can support a direct-mapped 1218 * physical to virtual region can return the appropriate address 1219 * within that region, leaving '*virt' unchanged. Other 1220 * architectures should map the pages starting at '*virt' and 1221 * update '*virt' with the first usable address after the mapped 1222 * region. 1223 */ 1224vm_offset_t 1225pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1226{ 1227 vm_offset_t va, sva; 1228 1229 va = sva = *virt; 1230 CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1231 va, start, end, prot); 1232 while (start < end) { 1233 pmap_kenter(va, start); 1234 va += PAGE_SIZE; 1235 start += PAGE_SIZE; 1236 } 1237 pmap_invalidate_range(kernel_pmap, sva, va); 1238 *virt = va; 1239 return (sva); 1240} 1241 1242 1243/* 1244 * Add a list of wired pages to the kva 1245 * this routine is only used for temporary 1246 * kernel mappings that do not need to have 1247 * page modification or references recorded. 1248 * Note that old mappings are simply written 1249 * over. The page *must* be wired. 1250 * Note: SMP coherent. Uses a ranged shootdown IPI. 1251 */ 1252void 1253pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1254{ 1255 pt_entry_t *endpte, *pte; 1256 vm_paddr_t pa; 1257 vm_offset_t va = sva; 1258 int mclcount = 0; 1259 multicall_entry_t mcl[16]; 1260 multicall_entry_t *mclp = mcl; 1261 int error; 1262 1263 CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1264 pte = vtopte(sva); 1265 endpte = pte + count; 1266 while (pte < endpte) { 1267 pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1268 1269 mclp->op = __HYPERVISOR_update_va_mapping; 1270 mclp->args[0] = va; 1271 mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1272 mclp->args[2] = (uint32_t)(pa >> 32); 1273 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1274 1275 va += PAGE_SIZE; 1276 pte++; 1277 ma++; 1278 mclp++; 1279 mclcount++; 1280 if (mclcount == 16) { 1281 error = HYPERVISOR_multicall(mcl, mclcount); 1282 mclp = mcl; 1283 mclcount = 0; 1284 KASSERT(error == 0, ("bad multicall %d", error)); 1285 } 1286 } 1287 if (mclcount) { 1288 error = HYPERVISOR_multicall(mcl, mclcount); 1289 KASSERT(error == 0, ("bad multicall %d", error)); 1290 } 1291 1292#ifdef INVARIANTS 1293 for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1294 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1295#endif 1296} 1297 1298/* 1299 * This routine tears out page mappings from the 1300 * kernel -- it is meant only for temporary mappings. 1301 * Note: SMP coherent. Uses a ranged shootdown IPI. 1302 */ 1303void 1304pmap_qremove(vm_offset_t sva, int count) 1305{ 1306 vm_offset_t va; 1307 1308 CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1309 va = sva; 1310 vm_page_lock_queues(); 1311 critical_enter(); 1312 while (count-- > 0) { 1313 pmap_kremove(va); 1314 va += PAGE_SIZE; 1315 } 1316 PT_UPDATES_FLUSH(); 1317 pmap_invalidate_range(kernel_pmap, sva, va); 1318 critical_exit(); 1319 vm_page_unlock_queues(); 1320} 1321 1322/*************************************************** 1323 * Page table page management routines..... 1324 ***************************************************/ 1325static __inline void 1326pmap_free_zero_pages(vm_page_t free) 1327{ 1328 vm_page_t m; 1329 1330 while (free != NULL) { 1331 m = free; 1332 free = m->right; 1333 vm_page_free_zero(m); 1334 } 1335} 1336 1337/* 1338 * This routine unholds page table pages, and if the hold count 1339 * drops to zero, then it decrements the wire count. 1340 */ 1341static __inline int 1342pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1343{ 1344 1345 --m->wire_count; 1346 if (m->wire_count == 0) 1347 return (_pmap_unwire_pte_hold(pmap, m, free)); 1348 else 1349 return (0); 1350} 1351 1352static int 1353_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1354{ 1355 vm_offset_t pteva; 1356 1357 PT_UPDATES_FLUSH(); 1358 /* 1359 * unmap the page table page 1360 */ 1361 xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1362 /* 1363 * page *might* contain residual mapping :-/ 1364 */ 1365 PD_CLEAR_VA(pmap, m->pindex, TRUE); 1366 pmap_zero_page(m); 1367 --pmap->pm_stats.resident_count; 1368 1369 /* 1370 * This is a release store so that the ordinary store unmapping 1371 * the page table page is globally performed before TLB shoot- 1372 * down is begun. 1373 */ 1374 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1375 1376 /* 1377 * Do an invltlb to make the invalidated mapping 1378 * take effect immediately. 1379 */ 1380 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1381 pmap_invalidate_page(pmap, pteva); 1382 1383 /* 1384 * Put page on a list so that it is released after 1385 * *ALL* TLB shootdown is done 1386 */ 1387 m->right = *free; 1388 *free = m; 1389 1390 return (1); 1391} 1392 1393/* 1394 * After removing a page table entry, this routine is used to 1395 * conditionally free the page, and manage the hold/wire counts. 1396 */ 1397static int 1398pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1399{ 1400 pd_entry_t ptepde; 1401 vm_page_t mpte; 1402 1403 if (va >= VM_MAXUSER_ADDRESS) 1404 return (0); 1405 ptepde = PT_GET(pmap_pde(pmap, va)); 1406 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1407 return (pmap_unwire_pte_hold(pmap, mpte, free)); 1408} 1409 1410/* 1411 * Initialize the pmap for the swapper process. 1412 */ 1413void 1414pmap_pinit0(pmap_t pmap) 1415{ 1416 1417 PMAP_LOCK_INIT(pmap); 1418 /* 1419 * Since the page table directory is shared with the kernel pmap, 1420 * which is already included in the list "allpmaps", this pmap does 1421 * not need to be inserted into that list. 1422 */ 1423 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1424#ifdef PAE 1425 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1426#endif 1427 CPU_ZERO(&pmap->pm_active); 1428 PCPU_SET(curpmap, pmap); 1429 TAILQ_INIT(&pmap->pm_pvchunk); 1430 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1431} 1432 1433/* 1434 * Initialize a preallocated and zeroed pmap structure, 1435 * such as one in a vmspace structure. 1436 */ 1437int 1438pmap_pinit(pmap_t pmap) 1439{ 1440 vm_page_t m, ptdpg[NPGPTD + 1]; 1441 int npgptd = NPGPTD + 1; 1442 int i; 1443 1444#ifdef HAMFISTED_LOCKING 1445 mtx_lock(&createdelete_lock); 1446#endif 1447 1448 PMAP_LOCK_INIT(pmap); 1449 1450 /* 1451 * No need to allocate page table space yet but we do need a valid 1452 * page directory table. 1453 */ 1454 if (pmap->pm_pdir == NULL) { 1455 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1456 NBPTD); 1457 if (pmap->pm_pdir == NULL) { 1458 PMAP_LOCK_DESTROY(pmap); 1459#ifdef HAMFISTED_LOCKING 1460 mtx_unlock(&createdelete_lock); 1461#endif 1462 return (0); 1463 } 1464#ifdef PAE 1465 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1466#endif 1467 } 1468 1469 /* 1470 * allocate the page directory page(s) 1471 */ 1472 for (i = 0; i < npgptd;) { 1473 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1474 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 1475 if (m == NULL) 1476 VM_WAIT; 1477 else { 1478 ptdpg[i++] = m; 1479 } 1480 } 1481 1482 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1483 1484 for (i = 0; i < NPGPTD; i++) 1485 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1486 pagezero(pmap->pm_pdir + (i * NPDEPG)); 1487 1488 mtx_lock_spin(&allpmaps_lock); 1489 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1490 /* Copy the kernel page table directory entries. */ 1491 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1492 mtx_unlock_spin(&allpmaps_lock); 1493 1494#ifdef PAE 1495 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1496 if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1497 bzero(pmap->pm_pdpt, PAGE_SIZE); 1498 for (i = 0; i < NPGPTD; i++) { 1499 vm_paddr_t ma; 1500 1501 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1502 pmap->pm_pdpt[i] = ma | PG_V; 1503 1504 } 1505#endif 1506 for (i = 0; i < NPGPTD; i++) { 1507 pt_entry_t *pd; 1508 vm_paddr_t ma; 1509 1510 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1511 pd = pmap->pm_pdir + (i * NPDEPG); 1512 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1513#if 0 1514 xen_pgd_pin(ma); 1515#endif 1516 } 1517 1518#ifdef PAE 1519 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1520#endif 1521 vm_page_lock_queues(); 1522 xen_flush_queue(); 1523 xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD])); 1524 for (i = 0; i < NPGPTD; i++) { 1525 vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]); 1526 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1527 } 1528 xen_flush_queue(); 1529 vm_page_unlock_queues(); 1530 CPU_ZERO(&pmap->pm_active); 1531 TAILQ_INIT(&pmap->pm_pvchunk); 1532 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1533 1534#ifdef HAMFISTED_LOCKING 1535 mtx_unlock(&createdelete_lock); 1536#endif 1537 return (1); 1538} 1539 1540/* 1541 * this routine is called if the page table page is not 1542 * mapped correctly. 1543 */ 1544static vm_page_t 1545_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) 1546{ 1547 vm_paddr_t ptema; 1548 vm_page_t m; 1549 1550 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1551 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1552 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1553 1554 /* 1555 * Allocate a page table page. 1556 */ 1557 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1558 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1559 if (flags & M_WAITOK) { 1560 PMAP_UNLOCK(pmap); 1561 vm_page_unlock_queues(); 1562 VM_WAIT; 1563 vm_page_lock_queues(); 1564 PMAP_LOCK(pmap); 1565 } 1566 1567 /* 1568 * Indicate the need to retry. While waiting, the page table 1569 * page may have been allocated. 1570 */ 1571 return (NULL); 1572 } 1573 if ((m->flags & PG_ZERO) == 0) 1574 pmap_zero_page(m); 1575 1576 /* 1577 * Map the pagetable page into the process address space, if 1578 * it isn't already there. 1579 */ 1580 1581 pmap->pm_stats.resident_count++; 1582 1583 ptema = VM_PAGE_TO_MACH(m); 1584 xen_pt_pin(ptema); 1585 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1586 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1587 1588 KASSERT(pmap->pm_pdir[ptepindex], 1589 ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1590 return (m); 1591} 1592 1593static vm_page_t 1594pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1595{ 1596 u_int ptepindex; 1597 pd_entry_t ptema; 1598 vm_page_t m; 1599 1600 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1601 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1602 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1603 1604 /* 1605 * Calculate pagetable page index 1606 */ 1607 ptepindex = va >> PDRSHIFT; 1608retry: 1609 /* 1610 * Get the page directory entry 1611 */ 1612 ptema = pmap->pm_pdir[ptepindex]; 1613 1614 /* 1615 * This supports switching from a 4MB page to a 1616 * normal 4K page. 1617 */ 1618 if (ptema & PG_PS) { 1619 /* 1620 * XXX 1621 */ 1622 pmap->pm_pdir[ptepindex] = 0; 1623 ptema = 0; 1624 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1625 pmap_invalidate_all(kernel_pmap); 1626 } 1627 1628 /* 1629 * If the page table page is mapped, we just increment the 1630 * hold count, and activate it. 1631 */ 1632 if (ptema & PG_V) { 1633 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1634 m->wire_count++; 1635 } else { 1636 /* 1637 * Here if the pte page isn't mapped, or if it has 1638 * been deallocated. 1639 */ 1640 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1641 pmap, va, flags); 1642 m = _pmap_allocpte(pmap, ptepindex, flags); 1643 if (m == NULL && (flags & M_WAITOK)) 1644 goto retry; 1645 1646 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1647 } 1648 return (m); 1649} 1650 1651 1652/*************************************************** 1653* Pmap allocation/deallocation routines. 1654 ***************************************************/ 1655 1656#ifdef SMP 1657/* 1658 * Deal with a SMP shootdown of other users of the pmap that we are 1659 * trying to dispose of. This can be a bit hairy. 1660 */ 1661static cpuset_t *lazymask; 1662static u_int lazyptd; 1663static volatile u_int lazywait; 1664 1665void pmap_lazyfix_action(void); 1666 1667void 1668pmap_lazyfix_action(void) 1669{ 1670 1671#ifdef COUNT_IPIS 1672 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1673#endif 1674 if (rcr3() == lazyptd) 1675 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1676 CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); 1677 atomic_store_rel_int(&lazywait, 1); 1678} 1679 1680static void 1681pmap_lazyfix_self(u_int cpuid) 1682{ 1683 1684 if (rcr3() == lazyptd) 1685 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1686 CPU_CLR_ATOMIC(cpuid, lazymask); 1687} 1688 1689 1690static void 1691pmap_lazyfix(pmap_t pmap) 1692{ 1693 cpuset_t mymask, mask; 1694 u_int cpuid, spins; 1695 int lsb; 1696 1697 mask = pmap->pm_active; 1698 while (!CPU_EMPTY(&mask)) { 1699 spins = 50000000; 1700 1701 /* Find least significant set bit. */ 1702 lsb = cpusetobj_ffs(&mask); 1703 MPASS(lsb != 0); 1704 lsb--; 1705 CPU_SETOF(lsb, &mask); 1706 mtx_lock_spin(&smp_ipi_mtx); 1707#ifdef PAE 1708 lazyptd = vtophys(pmap->pm_pdpt); 1709#else 1710 lazyptd = vtophys(pmap->pm_pdir); 1711#endif 1712 cpuid = PCPU_GET(cpuid); 1713 1714 /* Use a cpuset just for having an easy check. */ 1715 CPU_SETOF(cpuid, &mymask); 1716 if (!CPU_CMP(&mask, &mymask)) { 1717 lazymask = &pmap->pm_active; 1718 pmap_lazyfix_self(cpuid); 1719 } else { 1720 atomic_store_rel_int((u_int *)&lazymask, 1721 (u_int)&pmap->pm_active); 1722 atomic_store_rel_int(&lazywait, 0); 1723 ipi_selected(mask, IPI_LAZYPMAP); 1724 while (lazywait == 0) { 1725 ia32_pause(); 1726 if (--spins == 0) 1727 break; 1728 } 1729 } 1730 mtx_unlock_spin(&smp_ipi_mtx); 1731 if (spins == 0) 1732 printf("pmap_lazyfix: spun for 50000000\n"); 1733 mask = pmap->pm_active; 1734 } 1735} 1736 1737#else /* SMP */ 1738 1739/* 1740 * Cleaning up on uniprocessor is easy. For various reasons, we're 1741 * unlikely to have to even execute this code, including the fact 1742 * that the cleanup is deferred until the parent does a wait(2), which 1743 * means that another userland process has run. 1744 */ 1745static void 1746pmap_lazyfix(pmap_t pmap) 1747{ 1748 u_int cr3; 1749 1750 cr3 = vtophys(pmap->pm_pdir); 1751 if (cr3 == rcr3()) { 1752 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1753 CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); 1754 } 1755} 1756#endif /* SMP */ 1757 1758/* 1759 * Release any resources held by the given physical map. 1760 * Called when a pmap initialized by pmap_pinit is being released. 1761 * Should only be called if the map contains no valid mappings. 1762 */ 1763void 1764pmap_release(pmap_t pmap) 1765{ 1766 vm_page_t m, ptdpg[2*NPGPTD+1]; 1767 vm_paddr_t ma; 1768 int i; 1769#ifdef PAE 1770 int npgptd = NPGPTD + 1; 1771#else 1772 int npgptd = NPGPTD; 1773#endif 1774 1775 KASSERT(pmap->pm_stats.resident_count == 0, 1776 ("pmap_release: pmap resident count %ld != 0", 1777 pmap->pm_stats.resident_count)); 1778 PT_UPDATES_FLUSH(); 1779 1780#ifdef HAMFISTED_LOCKING 1781 mtx_lock(&createdelete_lock); 1782#endif 1783 1784 pmap_lazyfix(pmap); 1785 mtx_lock_spin(&allpmaps_lock); 1786 LIST_REMOVE(pmap, pm_list); 1787 mtx_unlock_spin(&allpmaps_lock); 1788 1789 for (i = 0; i < NPGPTD; i++) 1790 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1791 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1792#ifdef PAE 1793 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1794#endif 1795 1796 for (i = 0; i < npgptd; i++) { 1797 m = ptdpg[i]; 1798 ma = VM_PAGE_TO_MACH(m); 1799 /* unpinning L1 and L2 treated the same */ 1800#if 0 1801 xen_pgd_unpin(ma); 1802#else 1803 if (i == NPGPTD) 1804 xen_pgd_unpin(ma); 1805#endif 1806#ifdef PAE 1807 if (i < NPGPTD) 1808 KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1809 ("pmap_release: got wrong ptd page")); 1810#endif 1811 m->wire_count--; 1812 atomic_subtract_int(&cnt.v_wire_count, 1); 1813 vm_page_free(m); 1814 } 1815#ifdef PAE 1816 pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1817#endif 1818 PMAP_LOCK_DESTROY(pmap); 1819 1820#ifdef HAMFISTED_LOCKING 1821 mtx_unlock(&createdelete_lock); 1822#endif 1823} 1824 1825static int 1826kvm_size(SYSCTL_HANDLER_ARGS) 1827{ 1828 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1829 1830 return (sysctl_handle_long(oidp, &ksize, 0, req)); 1831} 1832SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1833 0, 0, kvm_size, "IU", "Size of KVM"); 1834 1835static int 1836kvm_free(SYSCTL_HANDLER_ARGS) 1837{ 1838 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1839 1840 return (sysctl_handle_long(oidp, &kfree, 0, req)); 1841} 1842SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1843 0, 0, kvm_free, "IU", "Amount of KVM free"); 1844 1845/* 1846 * grow the number of kernel page table entries, if needed 1847 */ 1848void 1849pmap_growkernel(vm_offset_t addr) 1850{ 1851 struct pmap *pmap; 1852 vm_paddr_t ptppaddr; 1853 vm_page_t nkpg; 1854 pd_entry_t newpdir; 1855 1856 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1857 if (kernel_vm_end == 0) { 1858 kernel_vm_end = KERNBASE; 1859 nkpt = 0; 1860 while (pdir_pde(PTD, kernel_vm_end)) { 1861 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1862 nkpt++; 1863 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1864 kernel_vm_end = kernel_map->max_offset; 1865 break; 1866 } 1867 } 1868 } 1869 addr = roundup2(addr, NBPDR); 1870 if (addr - 1 >= kernel_map->max_offset) 1871 addr = kernel_map->max_offset; 1872 while (kernel_vm_end < addr) { 1873 if (pdir_pde(PTD, kernel_vm_end)) { 1874 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1875 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1876 kernel_vm_end = kernel_map->max_offset; 1877 break; 1878 } 1879 continue; 1880 } 1881 1882 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 1883 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1884 VM_ALLOC_ZERO); 1885 if (nkpg == NULL) 1886 panic("pmap_growkernel: no memory to grow kernel"); 1887 1888 nkpt++; 1889 1890 if ((nkpg->flags & PG_ZERO) == 0) 1891 pmap_zero_page(nkpg); 1892 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1893 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1894 vm_page_lock_queues(); 1895 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1896 mtx_lock_spin(&allpmaps_lock); 1897 LIST_FOREACH(pmap, &allpmaps, pm_list) 1898 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1899 1900 mtx_unlock_spin(&allpmaps_lock); 1901 vm_page_unlock_queues(); 1902 1903 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1904 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1905 kernel_vm_end = kernel_map->max_offset; 1906 break; 1907 } 1908 } 1909} 1910 1911 1912/*************************************************** 1913 * page management routines. 1914 ***************************************************/ 1915 1916CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1917CTASSERT(_NPCM == 11); 1918 1919static __inline struct pv_chunk * 1920pv_to_chunk(pv_entry_t pv) 1921{ 1922 1923 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1924} 1925 1926#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1927 1928#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1929#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1930 1931static uint32_t pc_freemask[11] = { 1932 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1933 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1934 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1935 PC_FREE0_9, PC_FREE10 1936}; 1937 1938SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1939 "Current number of pv entries"); 1940 1941#ifdef PV_STATS 1942static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1943 1944SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1945 "Current number of pv entry chunks"); 1946SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1947 "Current number of pv entry chunks allocated"); 1948SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1949 "Current number of pv entry chunks frees"); 1950SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1951 "Number of times tried to get a chunk page but failed."); 1952 1953static long pv_entry_frees, pv_entry_allocs; 1954static int pv_entry_spare; 1955 1956SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1957 "Current number of pv entry frees"); 1958SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1959 "Current number of pv entry allocs"); 1960SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1961 "Current number of spare pv entries"); 1962#endif 1963 1964/* 1965 * We are in a serious low memory condition. Resort to 1966 * drastic measures to free some pages so we can allocate 1967 * another pv entry chunk. 1968 */ 1969static vm_page_t 1970pmap_pv_reclaim(pmap_t locked_pmap) 1971{ 1972 struct pch newtail; 1973 struct pv_chunk *pc; 1974 pmap_t pmap; 1975 pt_entry_t *pte, tpte; 1976 pv_entry_t pv; 1977 vm_offset_t va; 1978 vm_page_t free, m, m_pc; 1979 uint32_t inuse, freemask; 1980 int bit, field, freed; 1981 1982 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1983 pmap = NULL; 1984 free = m_pc = NULL; 1985 TAILQ_INIT(&newtail); 1986 sched_pin(); 1987 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 1988 free == NULL)) { 1989 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1990 if (pmap != pc->pc_pmap) { 1991 if (pmap != NULL) { 1992 pmap_invalidate_all(pmap); 1993 if (pmap != locked_pmap) 1994 PMAP_UNLOCK(pmap); 1995 } 1996 pmap = pc->pc_pmap; 1997 /* Avoid deadlock and lock recursion. */ 1998 if (pmap > locked_pmap) 1999 PMAP_LOCK(pmap); 2000 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2001 pmap = NULL; 2002 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2003 continue; 2004 } 2005 } 2006 2007 /* 2008 * Destroy every non-wired, 4 KB page mapping in the chunk. 2009 */ 2010 freed = 0; 2011 for (field = 0; field < _NPCM; field++) { 2012 freemask = 0; 2013 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2014 inuse != 0; inuse &= ~(1UL << bit)) { 2015 bit = bsfl(inuse); 2016 pv = &pc->pc_pventry[field * 32 + bit]; 2017 va = pv->pv_va; 2018 pte = pmap_pte_quick(pmap, va); 2019 if ((*pte & PG_W) != 0) 2020 continue; 2021 tpte = pte_load_clear(pte); 2022 if ((tpte & PG_G) != 0) 2023 pmap_invalidate_page(pmap, va); 2024 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2025 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2026 vm_page_dirty(m); 2027 if ((tpte & PG_A) != 0) 2028 vm_page_aflag_set(m, PGA_REFERENCED); 2029 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2030 if (TAILQ_EMPTY(&m->md.pv_list)) 2031 vm_page_aflag_clear(m, PGA_WRITEABLE); 2032 pmap_unuse_pt(pmap, va, &free); 2033 freemask |= 1UL << bit; 2034 freed++; 2035 } 2036 pc->pc_map[field] |= freemask; 2037 } 2038 if (freed == 0) { 2039 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2040 continue; 2041 } 2042 pmap->pm_stats.resident_count -= freed; 2043 PV_STAT(pv_entry_frees += freed); 2044 PV_STAT(pv_entry_spare += freed); 2045 pv_entry_count -= freed; 2046 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2047 for (field = 0; field < _NPCM; field++) 2048 if (pc->pc_map[field] != pc_freemask[field]) { 2049 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2050 pc_list); 2051 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2052 2053 /* 2054 * One freed pv entry in locked_pmap is 2055 * sufficient. 2056 */ 2057 if (pmap == locked_pmap) 2058 goto out; 2059 break; 2060 } 2061 if (field == _NPCM) { 2062 PV_STAT(pv_entry_spare -= _NPCPV); 2063 PV_STAT(pc_chunk_count--); 2064 PV_STAT(pc_chunk_frees++); 2065 /* Entire chunk is free; return it. */ 2066 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2067 pmap_qremove((vm_offset_t)pc, 1); 2068 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2069 break; 2070 } 2071 } 2072out: 2073 sched_unpin(); 2074 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2075 if (pmap != NULL) { 2076 pmap_invalidate_all(pmap); 2077 if (pmap != locked_pmap) 2078 PMAP_UNLOCK(pmap); 2079 } 2080 if (m_pc == NULL && pv_vafree != 0 && free != NULL) { 2081 m_pc = free; 2082 free = m_pc->right; 2083 /* Recycle a freed page table page. */ 2084 m_pc->wire_count = 1; 2085 atomic_add_int(&cnt.v_wire_count, 1); 2086 } 2087 pmap_free_zero_pages(free); 2088 return (m_pc); 2089} 2090 2091 2092/* 2093 * free the pv_entry back to the free list 2094 */ 2095static void 2096free_pv_entry(pmap_t pmap, pv_entry_t pv) 2097{ 2098 vm_page_t m; 2099 struct pv_chunk *pc; 2100 int idx, field, bit; 2101 2102 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2103 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2104 PV_STAT(pv_entry_frees++); 2105 PV_STAT(pv_entry_spare++); 2106 pv_entry_count--; 2107 pc = pv_to_chunk(pv); 2108 idx = pv - &pc->pc_pventry[0]; 2109 field = idx / 32; 2110 bit = idx % 32; 2111 pc->pc_map[field] |= 1ul << bit; 2112 /* move to head of list */ 2113 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2114 for (idx = 0; idx < _NPCM; idx++) 2115 if (pc->pc_map[idx] != pc_freemask[idx]) { 2116 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2117 return; 2118 } 2119 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2120 PV_STAT(pv_entry_spare -= _NPCPV); 2121 PV_STAT(pc_chunk_count--); 2122 PV_STAT(pc_chunk_frees++); 2123 /* entire chunk is free, return it */ 2124 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2125 pmap_qremove((vm_offset_t)pc, 1); 2126 vm_page_unwire(m, 0); 2127 vm_page_free(m); 2128 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2129} 2130 2131/* 2132 * get a new pv_entry, allocating a block from the system 2133 * when needed. 2134 */ 2135static pv_entry_t 2136get_pv_entry(pmap_t pmap, int try) 2137{ 2138 static const struct timeval printinterval = { 60, 0 }; 2139 static struct timeval lastprint; 2140 int bit, field; 2141 pv_entry_t pv; 2142 struct pv_chunk *pc; 2143 vm_page_t m; 2144 2145 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2146 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2147 PV_STAT(pv_entry_allocs++); 2148 pv_entry_count++; 2149 if (pv_entry_count > pv_entry_high_water) 2150 if (ratecheck(&lastprint, &printinterval)) 2151 printf("Approaching the limit on PV entries, consider " 2152 "increasing either the vm.pmap.shpgperproc or the " 2153 "vm.pmap.pv_entry_max tunable.\n"); 2154retry: 2155 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2156 if (pc != NULL) { 2157 for (field = 0; field < _NPCM; field++) { 2158 if (pc->pc_map[field]) { 2159 bit = bsfl(pc->pc_map[field]); 2160 break; 2161 } 2162 } 2163 if (field < _NPCM) { 2164 pv = &pc->pc_pventry[field * 32 + bit]; 2165 pc->pc_map[field] &= ~(1ul << bit); 2166 /* If this was the last item, move it to tail */ 2167 for (field = 0; field < _NPCM; field++) 2168 if (pc->pc_map[field] != 0) { 2169 PV_STAT(pv_entry_spare--); 2170 return (pv); /* not full, return */ 2171 } 2172 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2173 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2174 if (pc != TAILQ_LAST(&pv_chunks, pch)) { 2175 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2176 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2177 } 2178 PV_STAT(pv_entry_spare--); 2179 return (pv); 2180 } 2181 } 2182 /* 2183 * Access to the ptelist "pv_vafree" is synchronized by the page 2184 * queues lock. If "pv_vafree" is currently non-empty, it will 2185 * remain non-empty until pmap_ptelist_alloc() completes. 2186 */ 2187 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2188 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2189 if (try) { 2190 pv_entry_count--; 2191 PV_STAT(pc_chunk_tryfail++); 2192 return (NULL); 2193 } 2194 m = pmap_pv_reclaim(pmap); 2195 if (m == NULL) 2196 goto retry; 2197 } 2198 PV_STAT(pc_chunk_count++); 2199 PV_STAT(pc_chunk_allocs++); 2200 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2201 pmap_qenter((vm_offset_t)pc, &m, 1); 2202 if ((m->flags & PG_ZERO) == 0) 2203 pagezero(pc); 2204 pc->pc_pmap = pmap; 2205 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2206 for (field = 1; field < _NPCM; field++) 2207 pc->pc_map[field] = pc_freemask[field]; 2208 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2209 pv = &pc->pc_pventry[0]; 2210 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2211 PV_STAT(pv_entry_spare += _NPCPV - 1); 2212 return (pv); 2213} 2214 2215static __inline pv_entry_t 2216pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2217{ 2218 pv_entry_t pv; 2219 2220 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2221 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2222 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2223 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2224 break; 2225 } 2226 } 2227 return (pv); 2228} 2229 2230static void 2231pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2232{ 2233 pv_entry_t pv; 2234 2235 pv = pmap_pvh_remove(pvh, pmap, va); 2236 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2237 free_pv_entry(pmap, pv); 2238} 2239 2240static void 2241pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2242{ 2243 2244 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2245 pmap_pvh_free(&m->md, pmap, va); 2246 if (TAILQ_EMPTY(&m->md.pv_list)) 2247 vm_page_aflag_clear(m, PGA_WRITEABLE); 2248} 2249 2250/* 2251 * Conditionally create a pv entry. 2252 */ 2253static boolean_t 2254pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2255{ 2256 pv_entry_t pv; 2257 2258 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2259 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2260 if (pv_entry_count < pv_entry_high_water && 2261 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2262 pv->pv_va = va; 2263 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2264 return (TRUE); 2265 } else 2266 return (FALSE); 2267} 2268 2269/* 2270 * pmap_remove_pte: do the things to unmap a page in a process 2271 */ 2272static int 2273pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2274{ 2275 pt_entry_t oldpte; 2276 vm_page_t m; 2277 2278 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2279 pmap, (u_long)*ptq, va); 2280 2281 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2282 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2283 oldpte = *ptq; 2284 PT_SET_VA_MA(ptq, 0, TRUE); 2285 if (oldpte & PG_W) 2286 pmap->pm_stats.wired_count -= 1; 2287 /* 2288 * Machines that don't support invlpg, also don't support 2289 * PG_G. 2290 */ 2291 if (oldpte & PG_G) 2292 pmap_invalidate_page(kernel_pmap, va); 2293 pmap->pm_stats.resident_count -= 1; 2294 if (oldpte & PG_MANAGED) { 2295 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2296 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2297 vm_page_dirty(m); 2298 if (oldpte & PG_A) 2299 vm_page_aflag_set(m, PGA_REFERENCED); 2300 pmap_remove_entry(pmap, m, va); 2301 } 2302 return (pmap_unuse_pt(pmap, va, free)); 2303} 2304 2305/* 2306 * Remove a single page from a process address space 2307 */ 2308static void 2309pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2310{ 2311 pt_entry_t *pte; 2312 2313 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2314 pmap, va); 2315 2316 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2317 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2318 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2319 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2320 return; 2321 pmap_remove_pte(pmap, pte, va, free); 2322 pmap_invalidate_page(pmap, va); 2323 if (*PMAP1) 2324 PT_SET_MA(PADDR1, 0); 2325 2326} 2327 2328/* 2329 * Remove the given range of addresses from the specified map. 2330 * 2331 * It is assumed that the start and end are properly 2332 * rounded to the page size. 2333 */ 2334void 2335pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2336{ 2337 vm_offset_t pdnxt; 2338 pd_entry_t ptpaddr; 2339 pt_entry_t *pte; 2340 vm_page_t free = NULL; 2341 int anyvalid; 2342 2343 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2344 pmap, sva, eva); 2345 2346 /* 2347 * Perform an unsynchronized read. This is, however, safe. 2348 */ 2349 if (pmap->pm_stats.resident_count == 0) 2350 return; 2351 2352 anyvalid = 0; 2353 2354 vm_page_lock_queues(); 2355 sched_pin(); 2356 PMAP_LOCK(pmap); 2357 2358 /* 2359 * special handling of removing one page. a very 2360 * common operation and easy to short circuit some 2361 * code. 2362 */ 2363 if ((sva + PAGE_SIZE == eva) && 2364 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2365 pmap_remove_page(pmap, sva, &free); 2366 goto out; 2367 } 2368 2369 for (; sva < eva; sva = pdnxt) { 2370 u_int pdirindex; 2371 2372 /* 2373 * Calculate index for next page table. 2374 */ 2375 pdnxt = (sva + NBPDR) & ~PDRMASK; 2376 if (pdnxt < sva) 2377 pdnxt = eva; 2378 if (pmap->pm_stats.resident_count == 0) 2379 break; 2380 2381 pdirindex = sva >> PDRSHIFT; 2382 ptpaddr = pmap->pm_pdir[pdirindex]; 2383 2384 /* 2385 * Weed out invalid mappings. Note: we assume that the page 2386 * directory table is always allocated, and in kernel virtual. 2387 */ 2388 if (ptpaddr == 0) 2389 continue; 2390 2391 /* 2392 * Check for large page. 2393 */ 2394 if ((ptpaddr & PG_PS) != 0) { 2395 PD_CLEAR_VA(pmap, pdirindex, TRUE); 2396 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2397 anyvalid = 1; 2398 continue; 2399 } 2400 2401 /* 2402 * Limit our scan to either the end of the va represented 2403 * by the current page table page, or to the end of the 2404 * range being removed. 2405 */ 2406 if (pdnxt > eva) 2407 pdnxt = eva; 2408 2409 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2410 sva += PAGE_SIZE) { 2411 if ((*pte & PG_V) == 0) 2412 continue; 2413 2414 /* 2415 * The TLB entry for a PG_G mapping is invalidated 2416 * by pmap_remove_pte(). 2417 */ 2418 if ((*pte & PG_G) == 0) 2419 anyvalid = 1; 2420 if (pmap_remove_pte(pmap, pte, sva, &free)) 2421 break; 2422 } 2423 } 2424 PT_UPDATES_FLUSH(); 2425 if (*PMAP1) 2426 PT_SET_VA_MA(PMAP1, 0, TRUE); 2427out: 2428 if (anyvalid) 2429 pmap_invalidate_all(pmap); 2430 sched_unpin(); 2431 vm_page_unlock_queues(); 2432 PMAP_UNLOCK(pmap); 2433 pmap_free_zero_pages(free); 2434} 2435 2436/* 2437 * Routine: pmap_remove_all 2438 * Function: 2439 * Removes this physical page from 2440 * all physical maps in which it resides. 2441 * Reflects back modify bits to the pager. 2442 * 2443 * Notes: 2444 * Original versions of this routine were very 2445 * inefficient because they iteratively called 2446 * pmap_remove (slow...) 2447 */ 2448 2449void 2450pmap_remove_all(vm_page_t m) 2451{ 2452 pv_entry_t pv; 2453 pmap_t pmap; 2454 pt_entry_t *pte, tpte; 2455 vm_page_t free; 2456 2457 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2458 ("pmap_remove_all: page %p is not managed", m)); 2459 free = NULL; 2460 vm_page_lock_queues(); 2461 sched_pin(); 2462 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2463 pmap = PV_PMAP(pv); 2464 PMAP_LOCK(pmap); 2465 pmap->pm_stats.resident_count--; 2466 pte = pmap_pte_quick(pmap, pv->pv_va); 2467 tpte = *pte; 2468 PT_SET_VA_MA(pte, 0, TRUE); 2469 if (tpte & PG_W) 2470 pmap->pm_stats.wired_count--; 2471 if (tpte & PG_A) 2472 vm_page_aflag_set(m, PGA_REFERENCED); 2473 2474 /* 2475 * Update the vm_page_t clean and reference bits. 2476 */ 2477 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2478 vm_page_dirty(m); 2479 pmap_unuse_pt(pmap, pv->pv_va, &free); 2480 pmap_invalidate_page(pmap, pv->pv_va); 2481 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2482 free_pv_entry(pmap, pv); 2483 PMAP_UNLOCK(pmap); 2484 } 2485 vm_page_aflag_clear(m, PGA_WRITEABLE); 2486 PT_UPDATES_FLUSH(); 2487 if (*PMAP1) 2488 PT_SET_MA(PADDR1, 0); 2489 sched_unpin(); 2490 vm_page_unlock_queues(); 2491 pmap_free_zero_pages(free); 2492} 2493 2494/* 2495 * Set the physical protection on the 2496 * specified range of this map as requested. 2497 */ 2498void 2499pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2500{ 2501 vm_offset_t pdnxt; 2502 pd_entry_t ptpaddr; 2503 pt_entry_t *pte; 2504 int anychanged; 2505 2506 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2507 pmap, sva, eva, prot); 2508 2509 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2510 pmap_remove(pmap, sva, eva); 2511 return; 2512 } 2513 2514#ifdef PAE 2515 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2516 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2517 return; 2518#else 2519 if (prot & VM_PROT_WRITE) 2520 return; 2521#endif 2522 2523 anychanged = 0; 2524 2525 vm_page_lock_queues(); 2526 sched_pin(); 2527 PMAP_LOCK(pmap); 2528 for (; sva < eva; sva = pdnxt) { 2529 pt_entry_t obits, pbits; 2530 u_int pdirindex; 2531 2532 pdnxt = (sva + NBPDR) & ~PDRMASK; 2533 if (pdnxt < sva) 2534 pdnxt = eva; 2535 2536 pdirindex = sva >> PDRSHIFT; 2537 ptpaddr = pmap->pm_pdir[pdirindex]; 2538 2539 /* 2540 * Weed out invalid mappings. Note: we assume that the page 2541 * directory table is always allocated, and in kernel virtual. 2542 */ 2543 if (ptpaddr == 0) 2544 continue; 2545 2546 /* 2547 * Check for large page. 2548 */ 2549 if ((ptpaddr & PG_PS) != 0) { 2550 if ((prot & VM_PROT_WRITE) == 0) 2551 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2552#ifdef PAE 2553 if ((prot & VM_PROT_EXECUTE) == 0) 2554 pmap->pm_pdir[pdirindex] |= pg_nx; 2555#endif 2556 anychanged = 1; 2557 continue; 2558 } 2559 2560 if (pdnxt > eva) 2561 pdnxt = eva; 2562 2563 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2564 sva += PAGE_SIZE) { 2565 vm_page_t m; 2566 2567retry: 2568 /* 2569 * Regardless of whether a pte is 32 or 64 bits in 2570 * size, PG_RW, PG_A, and PG_M are among the least 2571 * significant 32 bits. 2572 */ 2573 obits = pbits = *pte; 2574 if ((pbits & PG_V) == 0) 2575 continue; 2576 2577 if ((prot & VM_PROT_WRITE) == 0) { 2578 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2579 (PG_MANAGED | PG_M | PG_RW)) { 2580 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2581 PG_FRAME); 2582 vm_page_dirty(m); 2583 } 2584 pbits &= ~(PG_RW | PG_M); 2585 } 2586#ifdef PAE 2587 if ((prot & VM_PROT_EXECUTE) == 0) 2588 pbits |= pg_nx; 2589#endif 2590 2591 if (pbits != obits) { 2592 obits = *pte; 2593 PT_SET_VA_MA(pte, pbits, TRUE); 2594 if (*pte != pbits) 2595 goto retry; 2596 if (obits & PG_G) 2597 pmap_invalidate_page(pmap, sva); 2598 else 2599 anychanged = 1; 2600 } 2601 } 2602 } 2603 PT_UPDATES_FLUSH(); 2604 if (*PMAP1) 2605 PT_SET_VA_MA(PMAP1, 0, TRUE); 2606 if (anychanged) 2607 pmap_invalidate_all(pmap); 2608 sched_unpin(); 2609 vm_page_unlock_queues(); 2610 PMAP_UNLOCK(pmap); 2611} 2612 2613/* 2614 * Insert the given physical page (p) at 2615 * the specified virtual address (v) in the 2616 * target physical map with the protection requested. 2617 * 2618 * If specified, the page will be wired down, meaning 2619 * that the related pte can not be reclaimed. 2620 * 2621 * NB: This is the only routine which MAY NOT lazy-evaluate 2622 * or lose information. That is, this routine must actually 2623 * insert this page into the given map NOW. 2624 */ 2625void 2626pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2627 vm_prot_t prot, boolean_t wired) 2628{ 2629 pd_entry_t *pde; 2630 pt_entry_t *pte; 2631 pt_entry_t newpte, origpte; 2632 pv_entry_t pv; 2633 vm_paddr_t opa, pa; 2634 vm_page_t mpte, om; 2635 boolean_t invlva; 2636 2637 CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2638 pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); 2639 va = trunc_page(va); 2640 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2641 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2642 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2643 va)); 2644 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 2645 VM_OBJECT_LOCKED(m->object), 2646 ("pmap_enter: page %p is not busy", m)); 2647 2648 mpte = NULL; 2649 2650 vm_page_lock_queues(); 2651 PMAP_LOCK(pmap); 2652 sched_pin(); 2653 2654 /* 2655 * In the case that a page table page is not 2656 * resident, we are creating it here. 2657 */ 2658 if (va < VM_MAXUSER_ADDRESS) { 2659 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2660 } 2661 2662 pde = pmap_pde(pmap, va); 2663 if ((*pde & PG_PS) != 0) 2664 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2665 pte = pmap_pte_quick(pmap, va); 2666 2667 /* 2668 * Page Directory table entry not valid, we need a new PT page 2669 */ 2670 if (pte == NULL) { 2671 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2672 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2673 } 2674 2675 pa = VM_PAGE_TO_PHYS(m); 2676 om = NULL; 2677 opa = origpte = 0; 2678 2679#if 0 2680 KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2681 pte, *pte)); 2682#endif 2683 origpte = *pte; 2684 if (origpte) 2685 origpte = xpmap_mtop(origpte); 2686 opa = origpte & PG_FRAME; 2687 2688 /* 2689 * Mapping has not changed, must be protection or wiring change. 2690 */ 2691 if (origpte && (opa == pa)) { 2692 /* 2693 * Wiring change, just update stats. We don't worry about 2694 * wiring PT pages as they remain resident as long as there 2695 * are valid mappings in them. Hence, if a user page is wired, 2696 * the PT page will be also. 2697 */ 2698 if (wired && ((origpte & PG_W) == 0)) 2699 pmap->pm_stats.wired_count++; 2700 else if (!wired && (origpte & PG_W)) 2701 pmap->pm_stats.wired_count--; 2702 2703 /* 2704 * Remove extra pte reference 2705 */ 2706 if (mpte) 2707 mpte->wire_count--; 2708 2709 if (origpte & PG_MANAGED) { 2710 om = m; 2711 pa |= PG_MANAGED; 2712 } 2713 goto validate; 2714 } 2715 2716 pv = NULL; 2717 2718 /* 2719 * Mapping has changed, invalidate old range and fall through to 2720 * handle validating new mapping. 2721 */ 2722 if (opa) { 2723 if (origpte & PG_W) 2724 pmap->pm_stats.wired_count--; 2725 if (origpte & PG_MANAGED) { 2726 om = PHYS_TO_VM_PAGE(opa); 2727 pv = pmap_pvh_remove(&om->md, pmap, va); 2728 } else if (va < VM_MAXUSER_ADDRESS) 2729 printf("va=0x%x is unmanaged :-( \n", va); 2730 2731 if (mpte != NULL) { 2732 mpte->wire_count--; 2733 KASSERT(mpte->wire_count > 0, 2734 ("pmap_enter: missing reference to page table page," 2735 " va: 0x%x", va)); 2736 } 2737 } else 2738 pmap->pm_stats.resident_count++; 2739 2740 /* 2741 * Enter on the PV list if part of our managed memory. 2742 */ 2743 if ((m->oflags & VPO_UNMANAGED) == 0) { 2744 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2745 ("pmap_enter: managed mapping within the clean submap")); 2746 if (pv == NULL) 2747 pv = get_pv_entry(pmap, FALSE); 2748 pv->pv_va = va; 2749 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2750 pa |= PG_MANAGED; 2751 } else if (pv != NULL) 2752 free_pv_entry(pmap, pv); 2753 2754 /* 2755 * Increment counters 2756 */ 2757 if (wired) 2758 pmap->pm_stats.wired_count++; 2759 2760validate: 2761 /* 2762 * Now validate mapping with desired protection/wiring. 2763 */ 2764 newpte = (pt_entry_t)(pa | PG_V); 2765 if ((prot & VM_PROT_WRITE) != 0) { 2766 newpte |= PG_RW; 2767 if ((newpte & PG_MANAGED) != 0) 2768 vm_page_aflag_set(m, PGA_WRITEABLE); 2769 } 2770#ifdef PAE 2771 if ((prot & VM_PROT_EXECUTE) == 0) 2772 newpte |= pg_nx; 2773#endif 2774 if (wired) 2775 newpte |= PG_W; 2776 if (va < VM_MAXUSER_ADDRESS) 2777 newpte |= PG_U; 2778 if (pmap == kernel_pmap) 2779 newpte |= pgeflag; 2780 2781 critical_enter(); 2782 /* 2783 * if the mapping or permission bits are different, we need 2784 * to update the pte. 2785 */ 2786 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2787 if (origpte) { 2788 invlva = FALSE; 2789 origpte = *pte; 2790 PT_SET_VA(pte, newpte | PG_A, FALSE); 2791 if (origpte & PG_A) { 2792 if (origpte & PG_MANAGED) 2793 vm_page_aflag_set(om, PGA_REFERENCED); 2794 if (opa != VM_PAGE_TO_PHYS(m)) 2795 invlva = TRUE; 2796#ifdef PAE 2797 if ((origpte & PG_NX) == 0 && 2798 (newpte & PG_NX) != 0) 2799 invlva = TRUE; 2800#endif 2801 } 2802 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2803 if ((origpte & PG_MANAGED) != 0) 2804 vm_page_dirty(om); 2805 if ((prot & VM_PROT_WRITE) == 0) 2806 invlva = TRUE; 2807 } 2808 if ((origpte & PG_MANAGED) != 0 && 2809 TAILQ_EMPTY(&om->md.pv_list)) 2810 vm_page_aflag_clear(om, PGA_WRITEABLE); 2811 if (invlva) 2812 pmap_invalidate_page(pmap, va); 2813 } else{ 2814 PT_SET_VA(pte, newpte | PG_A, FALSE); 2815 } 2816 2817 } 2818 PT_UPDATES_FLUSH(); 2819 critical_exit(); 2820 if (*PMAP1) 2821 PT_SET_VA_MA(PMAP1, 0, TRUE); 2822 sched_unpin(); 2823 vm_page_unlock_queues(); 2824 PMAP_UNLOCK(pmap); 2825} 2826 2827/* 2828 * Maps a sequence of resident pages belonging to the same object. 2829 * The sequence begins with the given page m_start. This page is 2830 * mapped at the given virtual address start. Each subsequent page is 2831 * mapped at a virtual address that is offset from start by the same 2832 * amount as the page is offset from m_start within the object. The 2833 * last page in the sequence is the page with the largest offset from 2834 * m_start that can be mapped at a virtual address less than the given 2835 * virtual address end. Not every virtual page between start and end 2836 * is mapped; only those for which a resident page exists with the 2837 * corresponding offset from m_start are mapped. 2838 */ 2839void 2840pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2841 vm_page_t m_start, vm_prot_t prot) 2842{ 2843 vm_page_t m, mpte; 2844 vm_pindex_t diff, psize; 2845 multicall_entry_t mcl[16]; 2846 multicall_entry_t *mclp = mcl; 2847 int error, count = 0; 2848 2849 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2850 psize = atop(end - start); 2851 mpte = NULL; 2852 m = m_start; 2853 vm_page_lock_queues(); 2854 PMAP_LOCK(pmap); 2855 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2856 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2857 prot, mpte); 2858 m = TAILQ_NEXT(m, listq); 2859 if (count == 16) { 2860 error = HYPERVISOR_multicall(mcl, count); 2861 KASSERT(error == 0, ("bad multicall %d", error)); 2862 mclp = mcl; 2863 count = 0; 2864 } 2865 } 2866 if (count) { 2867 error = HYPERVISOR_multicall(mcl, count); 2868 KASSERT(error == 0, ("bad multicall %d", error)); 2869 } 2870 vm_page_unlock_queues(); 2871 PMAP_UNLOCK(pmap); 2872} 2873 2874/* 2875 * this code makes some *MAJOR* assumptions: 2876 * 1. Current pmap & pmap exists. 2877 * 2. Not wired. 2878 * 3. Read access. 2879 * 4. No page table pages. 2880 * but is *MUCH* faster than pmap_enter... 2881 */ 2882 2883void 2884pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2885{ 2886 multicall_entry_t mcl, *mclp; 2887 int count = 0; 2888 mclp = &mcl; 2889 2890 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2891 pmap, va, m, prot); 2892 2893 vm_page_lock_queues(); 2894 PMAP_LOCK(pmap); 2895 (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2896 if (count) 2897 HYPERVISOR_multicall(&mcl, count); 2898 vm_page_unlock_queues(); 2899 PMAP_UNLOCK(pmap); 2900} 2901 2902#ifdef notyet 2903void 2904pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2905{ 2906 int i, error, index = 0; 2907 multicall_entry_t mcl[16]; 2908 multicall_entry_t *mclp = mcl; 2909 2910 PMAP_LOCK(pmap); 2911 for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2912 if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2913 continue; 2914 2915 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2916 if (index == 16) { 2917 error = HYPERVISOR_multicall(mcl, index); 2918 mclp = mcl; 2919 index = 0; 2920 KASSERT(error == 0, ("bad multicall %d", error)); 2921 } 2922 } 2923 if (index) { 2924 error = HYPERVISOR_multicall(mcl, index); 2925 KASSERT(error == 0, ("bad multicall %d", error)); 2926 } 2927 2928 PMAP_UNLOCK(pmap); 2929} 2930#endif 2931 2932static vm_page_t 2933pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2934 vm_prot_t prot, vm_page_t mpte) 2935{ 2936 pt_entry_t *pte; 2937 vm_paddr_t pa; 2938 vm_page_t free; 2939 multicall_entry_t *mcl = *mclpp; 2940 2941 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2942 (m->oflags & VPO_UNMANAGED) != 0, 2943 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2944 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2945 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2946 2947 /* 2948 * In the case that a page table page is not 2949 * resident, we are creating it here. 2950 */ 2951 if (va < VM_MAXUSER_ADDRESS) { 2952 u_int ptepindex; 2953 pd_entry_t ptema; 2954 2955 /* 2956 * Calculate pagetable page index 2957 */ 2958 ptepindex = va >> PDRSHIFT; 2959 if (mpte && (mpte->pindex == ptepindex)) { 2960 mpte->wire_count++; 2961 } else { 2962 /* 2963 * Get the page directory entry 2964 */ 2965 ptema = pmap->pm_pdir[ptepindex]; 2966 2967 /* 2968 * If the page table page is mapped, we just increment 2969 * the hold count, and activate it. 2970 */ 2971 if (ptema & PG_V) { 2972 if (ptema & PG_PS) 2973 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2974 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2975 mpte->wire_count++; 2976 } else { 2977 mpte = _pmap_allocpte(pmap, ptepindex, 2978 M_NOWAIT); 2979 if (mpte == NULL) 2980 return (mpte); 2981 } 2982 } 2983 } else { 2984 mpte = NULL; 2985 } 2986 2987 /* 2988 * This call to vtopte makes the assumption that we are 2989 * entering the page into the current pmap. In order to support 2990 * quick entry into any pmap, one would likely use pmap_pte_quick. 2991 * But that isn't as quick as vtopte. 2992 */ 2993 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 2994 pte = vtopte(va); 2995 if (*pte & PG_V) { 2996 if (mpte != NULL) { 2997 mpte->wire_count--; 2998 mpte = NULL; 2999 } 3000 return (mpte); 3001 } 3002 3003 /* 3004 * Enter on the PV list if part of our managed memory. 3005 */ 3006 if ((m->oflags & VPO_UNMANAGED) == 0 && 3007 !pmap_try_insert_pv_entry(pmap, va, m)) { 3008 if (mpte != NULL) { 3009 free = NULL; 3010 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3011 pmap_invalidate_page(pmap, va); 3012 pmap_free_zero_pages(free); 3013 } 3014 3015 mpte = NULL; 3016 } 3017 return (mpte); 3018 } 3019 3020 /* 3021 * Increment counters 3022 */ 3023 pmap->pm_stats.resident_count++; 3024 3025 pa = VM_PAGE_TO_PHYS(m); 3026#ifdef PAE 3027 if ((prot & VM_PROT_EXECUTE) == 0) 3028 pa |= pg_nx; 3029#endif 3030 3031#if 0 3032 /* 3033 * Now validate mapping with RO protection 3034 */ 3035 if ((m->oflags & VPO_UNMANAGED) != 0) 3036 pte_store(pte, pa | PG_V | PG_U); 3037 else 3038 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3039#else 3040 /* 3041 * Now validate mapping with RO protection 3042 */ 3043 if ((m->oflags & VPO_UNMANAGED) != 0) 3044 pa = xpmap_ptom(pa | PG_V | PG_U); 3045 else 3046 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3047 3048 mcl->op = __HYPERVISOR_update_va_mapping; 3049 mcl->args[0] = va; 3050 mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3051 mcl->args[2] = (uint32_t)(pa >> 32); 3052 mcl->args[3] = 0; 3053 *mclpp = mcl + 1; 3054 *count = *count + 1; 3055#endif 3056 return (mpte); 3057} 3058 3059/* 3060 * Make a temporary mapping for a physical address. This is only intended 3061 * to be used for panic dumps. 3062 */ 3063void * 3064pmap_kenter_temporary(vm_paddr_t pa, int i) 3065{ 3066 vm_offset_t va; 3067 vm_paddr_t ma = xpmap_ptom(pa); 3068 3069 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3070 PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3071 invlpg(va); 3072 return ((void *)crashdumpmap); 3073} 3074 3075/* 3076 * This code maps large physical mmap regions into the 3077 * processor address space. Note that some shortcuts 3078 * are taken, but the code works. 3079 */ 3080void 3081pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3082 vm_pindex_t pindex, vm_size_t size) 3083{ 3084 pd_entry_t *pde; 3085 vm_paddr_t pa, ptepa; 3086 vm_page_t p; 3087 int pat_mode; 3088 3089 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3090 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3091 ("pmap_object_init_pt: non-device object")); 3092 if (pseflag && 3093 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3094 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3095 return; 3096 p = vm_page_lookup(object, pindex); 3097 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3098 ("pmap_object_init_pt: invalid page %p", p)); 3099 pat_mode = p->md.pat_mode; 3100 3101 /* 3102 * Abort the mapping if the first page is not physically 3103 * aligned to a 2/4MB page boundary. 3104 */ 3105 ptepa = VM_PAGE_TO_PHYS(p); 3106 if (ptepa & (NBPDR - 1)) 3107 return; 3108 3109 /* 3110 * Skip the first page. Abort the mapping if the rest of 3111 * the pages are not physically contiguous or have differing 3112 * memory attributes. 3113 */ 3114 p = TAILQ_NEXT(p, listq); 3115 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3116 pa += PAGE_SIZE) { 3117 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3118 ("pmap_object_init_pt: invalid page %p", p)); 3119 if (pa != VM_PAGE_TO_PHYS(p) || 3120 pat_mode != p->md.pat_mode) 3121 return; 3122 p = TAILQ_NEXT(p, listq); 3123 } 3124 3125 /* 3126 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 3127 * "size" is a multiple of 2/4M, adding the PAT setting to 3128 * "pa" will not affect the termination of this loop. 3129 */ 3130 PMAP_LOCK(pmap); 3131 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3132 size; pa += NBPDR) { 3133 pde = pmap_pde(pmap, addr); 3134 if (*pde == 0) { 3135 pde_store(pde, pa | PG_PS | PG_M | PG_A | 3136 PG_U | PG_RW | PG_V); 3137 pmap->pm_stats.resident_count += NBPDR / 3138 PAGE_SIZE; 3139 pmap_pde_mappings++; 3140 } 3141 /* Else continue on if the PDE is already valid. */ 3142 addr += NBPDR; 3143 } 3144 PMAP_UNLOCK(pmap); 3145 } 3146} 3147 3148/* 3149 * Routine: pmap_change_wiring 3150 * Function: Change the wiring attribute for a map/virtual-address 3151 * pair. 3152 * In/out conditions: 3153 * The mapping must already exist in the pmap. 3154 */ 3155void 3156pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3157{ 3158 pt_entry_t *pte; 3159 3160 vm_page_lock_queues(); 3161 PMAP_LOCK(pmap); 3162 pte = pmap_pte(pmap, va); 3163 3164 if (wired && !pmap_pte_w(pte)) { 3165 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3166 pmap->pm_stats.wired_count++; 3167 } else if (!wired && pmap_pte_w(pte)) { 3168 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3169 pmap->pm_stats.wired_count--; 3170 } 3171 3172 /* 3173 * Wiring is not a hardware characteristic so there is no need to 3174 * invalidate TLB. 3175 */ 3176 pmap_pte_release(pte); 3177 PMAP_UNLOCK(pmap); 3178 vm_page_unlock_queues(); 3179} 3180 3181 3182 3183/* 3184 * Copy the range specified by src_addr/len 3185 * from the source map to the range dst_addr/len 3186 * in the destination map. 3187 * 3188 * This routine is only advisory and need not do anything. 3189 */ 3190 3191void 3192pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3193 vm_offset_t src_addr) 3194{ 3195 vm_page_t free; 3196 vm_offset_t addr; 3197 vm_offset_t end_addr = src_addr + len; 3198 vm_offset_t pdnxt; 3199 3200 if (dst_addr != src_addr) 3201 return; 3202 3203 if (!pmap_is_current(src_pmap)) { 3204 CTR2(KTR_PMAP, 3205 "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3206 (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3207 3208 return; 3209 } 3210 CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3211 dst_pmap, src_pmap, dst_addr, len, src_addr); 3212 3213#ifdef HAMFISTED_LOCKING 3214 mtx_lock(&createdelete_lock); 3215#endif 3216 3217 vm_page_lock_queues(); 3218 if (dst_pmap < src_pmap) { 3219 PMAP_LOCK(dst_pmap); 3220 PMAP_LOCK(src_pmap); 3221 } else { 3222 PMAP_LOCK(src_pmap); 3223 PMAP_LOCK(dst_pmap); 3224 } 3225 sched_pin(); 3226 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3227 pt_entry_t *src_pte, *dst_pte; 3228 vm_page_t dstmpte, srcmpte; 3229 pd_entry_t srcptepaddr; 3230 u_int ptepindex; 3231 3232 KASSERT(addr < UPT_MIN_ADDRESS, 3233 ("pmap_copy: invalid to pmap_copy page tables")); 3234 3235 pdnxt = (addr + NBPDR) & ~PDRMASK; 3236 if (pdnxt < addr) 3237 pdnxt = end_addr; 3238 ptepindex = addr >> PDRSHIFT; 3239 3240 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3241 if (srcptepaddr == 0) 3242 continue; 3243 3244 if (srcptepaddr & PG_PS) { 3245 if (dst_pmap->pm_pdir[ptepindex] == 0) { 3246 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3247 dst_pmap->pm_stats.resident_count += 3248 NBPDR / PAGE_SIZE; 3249 } 3250 continue; 3251 } 3252 3253 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3254 KASSERT(srcmpte->wire_count > 0, 3255 ("pmap_copy: source page table page is unused")); 3256 3257 if (pdnxt > end_addr) 3258 pdnxt = end_addr; 3259 3260 src_pte = vtopte(addr); 3261 while (addr < pdnxt) { 3262 pt_entry_t ptetemp; 3263 ptetemp = *src_pte; 3264 /* 3265 * we only virtual copy managed pages 3266 */ 3267 if ((ptetemp & PG_MANAGED) != 0) { 3268 dstmpte = pmap_allocpte(dst_pmap, addr, 3269 M_NOWAIT); 3270 if (dstmpte == NULL) 3271 goto out; 3272 dst_pte = pmap_pte_quick(dst_pmap, addr); 3273 if (*dst_pte == 0 && 3274 pmap_try_insert_pv_entry(dst_pmap, addr, 3275 PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3276 /* 3277 * Clear the wired, modified, and 3278 * accessed (referenced) bits 3279 * during the copy. 3280 */ 3281 KASSERT(ptetemp != 0, ("src_pte not set")); 3282 PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3283 KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3284 ("no pmap copy expected: 0x%jx saw: 0x%jx", 3285 ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3286 dst_pmap->pm_stats.resident_count++; 3287 } else { 3288 free = NULL; 3289 if (pmap_unwire_pte_hold(dst_pmap, 3290 dstmpte, &free)) { 3291 pmap_invalidate_page(dst_pmap, 3292 addr); 3293 pmap_free_zero_pages(free); 3294 } 3295 goto out; 3296 } 3297 if (dstmpte->wire_count >= srcmpte->wire_count) 3298 break; 3299 } 3300 addr += PAGE_SIZE; 3301 src_pte++; 3302 } 3303 } 3304out: 3305 PT_UPDATES_FLUSH(); 3306 sched_unpin(); 3307 vm_page_unlock_queues(); 3308 PMAP_UNLOCK(src_pmap); 3309 PMAP_UNLOCK(dst_pmap); 3310 3311#ifdef HAMFISTED_LOCKING 3312 mtx_unlock(&createdelete_lock); 3313#endif 3314} 3315 3316static __inline void 3317pagezero(void *page) 3318{ 3319#if defined(I686_CPU) 3320 if (cpu_class == CPUCLASS_686) { 3321#if defined(CPU_ENABLE_SSE) 3322 if (cpu_feature & CPUID_SSE2) 3323 sse2_pagezero(page); 3324 else 3325#endif 3326 i686_pagezero(page); 3327 } else 3328#endif 3329 bzero(page, PAGE_SIZE); 3330} 3331 3332/* 3333 * pmap_zero_page zeros the specified hardware page by mapping 3334 * the page into KVM and using bzero to clear its contents. 3335 */ 3336void 3337pmap_zero_page(vm_page_t m) 3338{ 3339 struct sysmaps *sysmaps; 3340 3341 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3342 mtx_lock(&sysmaps->lock); 3343 if (*sysmaps->CMAP2) 3344 panic("pmap_zero_page: CMAP2 busy"); 3345 sched_pin(); 3346 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3347 pagezero(sysmaps->CADDR2); 3348 PT_SET_MA(sysmaps->CADDR2, 0); 3349 sched_unpin(); 3350 mtx_unlock(&sysmaps->lock); 3351} 3352 3353/* 3354 * pmap_zero_page_area zeros the specified hardware page by mapping 3355 * the page into KVM and using bzero to clear its contents. 3356 * 3357 * off and size may not cover an area beyond a single hardware page. 3358 */ 3359void 3360pmap_zero_page_area(vm_page_t m, int off, int size) 3361{ 3362 struct sysmaps *sysmaps; 3363 3364 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3365 mtx_lock(&sysmaps->lock); 3366 if (*sysmaps->CMAP2) 3367 panic("pmap_zero_page_area: CMAP2 busy"); 3368 sched_pin(); 3369 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3370 3371 if (off == 0 && size == PAGE_SIZE) 3372 pagezero(sysmaps->CADDR2); 3373 else 3374 bzero((char *)sysmaps->CADDR2 + off, size); 3375 PT_SET_MA(sysmaps->CADDR2, 0); 3376 sched_unpin(); 3377 mtx_unlock(&sysmaps->lock); 3378} 3379 3380/* 3381 * pmap_zero_page_idle zeros the specified hardware page by mapping 3382 * the page into KVM and using bzero to clear its contents. This 3383 * is intended to be called from the vm_pagezero process only and 3384 * outside of Giant. 3385 */ 3386void 3387pmap_zero_page_idle(vm_page_t m) 3388{ 3389 3390 if (*CMAP3) 3391 panic("pmap_zero_page_idle: CMAP3 busy"); 3392 sched_pin(); 3393 PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3394 pagezero(CADDR3); 3395 PT_SET_MA(CADDR3, 0); 3396 sched_unpin(); 3397} 3398 3399/* 3400 * pmap_copy_page copies the specified (machine independent) 3401 * page by mapping the page into virtual memory and using 3402 * bcopy to copy the page, one machine dependent page at a 3403 * time. 3404 */ 3405void 3406pmap_copy_page(vm_page_t src, vm_page_t dst) 3407{ 3408 struct sysmaps *sysmaps; 3409 3410 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3411 mtx_lock(&sysmaps->lock); 3412 if (*sysmaps->CMAP1) 3413 panic("pmap_copy_page: CMAP1 busy"); 3414 if (*sysmaps->CMAP2) 3415 panic("pmap_copy_page: CMAP2 busy"); 3416 sched_pin(); 3417 PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A); 3418 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M); 3419 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3420 PT_SET_MA(sysmaps->CADDR1, 0); 3421 PT_SET_MA(sysmaps->CADDR2, 0); 3422 sched_unpin(); 3423 mtx_unlock(&sysmaps->lock); 3424} 3425 3426/* 3427 * Returns true if the pmap's pv is one of the first 3428 * 16 pvs linked to from this page. This count may 3429 * be changed upwards or downwards in the future; it 3430 * is only necessary that true be returned for a small 3431 * subset of pmaps for proper page aging. 3432 */ 3433boolean_t 3434pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3435{ 3436 pv_entry_t pv; 3437 int loops = 0; 3438 boolean_t rv; 3439 3440 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3441 ("pmap_page_exists_quick: page %p is not managed", m)); 3442 rv = FALSE; 3443 vm_page_lock_queues(); 3444 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3445 if (PV_PMAP(pv) == pmap) { 3446 rv = TRUE; 3447 break; 3448 } 3449 loops++; 3450 if (loops >= 16) 3451 break; 3452 } 3453 vm_page_unlock_queues(); 3454 return (rv); 3455} 3456 3457/* 3458 * pmap_page_wired_mappings: 3459 * 3460 * Return the number of managed mappings to the given physical page 3461 * that are wired. 3462 */ 3463int 3464pmap_page_wired_mappings(vm_page_t m) 3465{ 3466 pv_entry_t pv; 3467 pt_entry_t *pte; 3468 pmap_t pmap; 3469 int count; 3470 3471 count = 0; 3472 if ((m->oflags & VPO_UNMANAGED) != 0) 3473 return (count); 3474 vm_page_lock_queues(); 3475 sched_pin(); 3476 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3477 pmap = PV_PMAP(pv); 3478 PMAP_LOCK(pmap); 3479 pte = pmap_pte_quick(pmap, pv->pv_va); 3480 if ((*pte & PG_W) != 0) 3481 count++; 3482 PMAP_UNLOCK(pmap); 3483 } 3484 sched_unpin(); 3485 vm_page_unlock_queues(); 3486 return (count); 3487} 3488 3489/* 3490 * Returns TRUE if the given page is mapped. Otherwise, returns FALSE. 3491 */ 3492boolean_t 3493pmap_page_is_mapped(vm_page_t m) 3494{ 3495 3496 if ((m->oflags & VPO_UNMANAGED) != 0) 3497 return (FALSE); 3498 return (!TAILQ_EMPTY(&m->md.pv_list)); 3499} 3500 3501/* 3502 * Remove all pages from specified address space 3503 * this aids process exit speeds. Also, this code 3504 * is special cased for current process only, but 3505 * can have the more generic (and slightly slower) 3506 * mode enabled. This is much faster than pmap_remove 3507 * in the case of running down an entire address space. 3508 */ 3509void 3510pmap_remove_pages(pmap_t pmap) 3511{ 3512 pt_entry_t *pte, tpte; 3513 vm_page_t m, free = NULL; 3514 pv_entry_t pv; 3515 struct pv_chunk *pc, *npc; 3516 int field, idx; 3517 int32_t bit; 3518 uint32_t inuse, bitmask; 3519 int allfree; 3520 3521 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3522 3523 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3524 printf("warning: pmap_remove_pages called with non-current pmap\n"); 3525 return; 3526 } 3527 vm_page_lock_queues(); 3528 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3529 PMAP_LOCK(pmap); 3530 sched_pin(); 3531 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3532 allfree = 1; 3533 for (field = 0; field < _NPCM; field++) { 3534 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3535 while (inuse != 0) { 3536 bit = bsfl(inuse); 3537 bitmask = 1UL << bit; 3538 idx = field * 32 + bit; 3539 pv = &pc->pc_pventry[idx]; 3540 inuse &= ~bitmask; 3541 3542 pte = vtopte(pv->pv_va); 3543 tpte = *pte ? xpmap_mtop(*pte) : 0; 3544 3545 if (tpte == 0) { 3546 printf( 3547 "TPTE at %p IS ZERO @ VA %08x\n", 3548 pte, pv->pv_va); 3549 panic("bad pte"); 3550 } 3551 3552/* 3553 * We cannot remove wired pages from a process' mapping at this time 3554 */ 3555 if (tpte & PG_W) { 3556 allfree = 0; 3557 continue; 3558 } 3559 3560 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3561 KASSERT(m->phys_addr == (tpte & PG_FRAME), 3562 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3563 m, (uintmax_t)m->phys_addr, 3564 (uintmax_t)tpte)); 3565 3566 KASSERT(m < &vm_page_array[vm_page_array_size], 3567 ("pmap_remove_pages: bad tpte %#jx", 3568 (uintmax_t)tpte)); 3569 3570 3571 PT_CLEAR_VA(pte, FALSE); 3572 3573 /* 3574 * Update the vm_page_t clean/reference bits. 3575 */ 3576 if (tpte & PG_M) 3577 vm_page_dirty(m); 3578 3579 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3580 if (TAILQ_EMPTY(&m->md.pv_list)) 3581 vm_page_aflag_clear(m, PGA_WRITEABLE); 3582 3583 pmap_unuse_pt(pmap, pv->pv_va, &free); 3584 3585 /* Mark free */ 3586 PV_STAT(pv_entry_frees++); 3587 PV_STAT(pv_entry_spare++); 3588 pv_entry_count--; 3589 pc->pc_map[field] |= bitmask; 3590 pmap->pm_stats.resident_count--; 3591 } 3592 } 3593 PT_UPDATES_FLUSH(); 3594 if (allfree) { 3595 PV_STAT(pv_entry_spare -= _NPCPV); 3596 PV_STAT(pc_chunk_count--); 3597 PV_STAT(pc_chunk_frees++); 3598 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3599 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 3600 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 3601 pmap_qremove((vm_offset_t)pc, 1); 3602 vm_page_unwire(m, 0); 3603 vm_page_free(m); 3604 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 3605 } 3606 } 3607 PT_UPDATES_FLUSH(); 3608 if (*PMAP1) 3609 PT_SET_MA(PADDR1, 0); 3610 3611 sched_unpin(); 3612 pmap_invalidate_all(pmap); 3613 vm_page_unlock_queues(); 3614 PMAP_UNLOCK(pmap); 3615 pmap_free_zero_pages(free); 3616} 3617 3618/* 3619 * pmap_is_modified: 3620 * 3621 * Return whether or not the specified physical page was modified 3622 * in any physical maps. 3623 */ 3624boolean_t 3625pmap_is_modified(vm_page_t m) 3626{ 3627 pv_entry_t pv; 3628 pt_entry_t *pte; 3629 pmap_t pmap; 3630 boolean_t rv; 3631 3632 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3633 ("pmap_is_modified: page %p is not managed", m)); 3634 rv = FALSE; 3635 3636 /* 3637 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 3638 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3639 * is clear, no PTEs can have PG_M set. 3640 */ 3641 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3642 if ((m->oflags & VPO_BUSY) == 0 && 3643 (m->aflags & PGA_WRITEABLE) == 0) 3644 return (rv); 3645 vm_page_lock_queues(); 3646 sched_pin(); 3647 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3648 pmap = PV_PMAP(pv); 3649 PMAP_LOCK(pmap); 3650 pte = pmap_pte_quick(pmap, pv->pv_va); 3651 rv = (*pte & PG_M) != 0; 3652 PMAP_UNLOCK(pmap); 3653 if (rv) 3654 break; 3655 } 3656 if (*PMAP1) 3657 PT_SET_MA(PADDR1, 0); 3658 sched_unpin(); 3659 vm_page_unlock_queues(); 3660 return (rv); 3661} 3662 3663/* 3664 * pmap_is_prefaultable: 3665 * 3666 * Return whether or not the specified virtual address is elgible 3667 * for prefault. 3668 */ 3669static boolean_t 3670pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3671{ 3672 pt_entry_t *pte; 3673 boolean_t rv = FALSE; 3674 3675 return (rv); 3676 3677 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3678 pte = vtopte(addr); 3679 rv = (*pte == 0); 3680 } 3681 return (rv); 3682} 3683 3684boolean_t 3685pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3686{ 3687 boolean_t rv; 3688 3689 PMAP_LOCK(pmap); 3690 rv = pmap_is_prefaultable_locked(pmap, addr); 3691 PMAP_UNLOCK(pmap); 3692 return (rv); 3693} 3694 3695boolean_t 3696pmap_is_referenced(vm_page_t m) 3697{ 3698 pv_entry_t pv; 3699 pt_entry_t *pte; 3700 pmap_t pmap; 3701 boolean_t rv; 3702 3703 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3704 ("pmap_is_referenced: page %p is not managed", m)); 3705 rv = FALSE; 3706 vm_page_lock_queues(); 3707 sched_pin(); 3708 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3709 pmap = PV_PMAP(pv); 3710 PMAP_LOCK(pmap); 3711 pte = pmap_pte_quick(pmap, pv->pv_va); 3712 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3713 PMAP_UNLOCK(pmap); 3714 if (rv) 3715 break; 3716 } 3717 if (*PMAP1) 3718 PT_SET_MA(PADDR1, 0); 3719 sched_unpin(); 3720 vm_page_unlock_queues(); 3721 return (rv); 3722} 3723 3724void 3725pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3726{ 3727 int i, npages = round_page(len) >> PAGE_SHIFT; 3728 for (i = 0; i < npages; i++) { 3729 pt_entry_t *pte; 3730 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3731 vm_page_lock_queues(); 3732 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3733 vm_page_unlock_queues(); 3734 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3735 pmap_pte_release(pte); 3736 } 3737} 3738 3739void 3740pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3741{ 3742 int i, npages = round_page(len) >> PAGE_SHIFT; 3743 for (i = 0; i < npages; i++) { 3744 pt_entry_t *pte; 3745 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3746 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3747 vm_page_lock_queues(); 3748 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3749 vm_page_unlock_queues(); 3750 pmap_pte_release(pte); 3751 } 3752} 3753 3754/* 3755 * Clear the write and modified bits in each of the given page's mappings. 3756 */ 3757void 3758pmap_remove_write(vm_page_t m) 3759{ 3760 pv_entry_t pv; 3761 pmap_t pmap; 3762 pt_entry_t oldpte, *pte; 3763 3764 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3765 ("pmap_remove_write: page %p is not managed", m)); 3766 3767 /* 3768 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 3769 * another thread while the object is locked. Thus, if PGA_WRITEABLE 3770 * is clear, no page table entries need updating. 3771 */ 3772 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3773 if ((m->oflags & VPO_BUSY) == 0 && 3774 (m->aflags & PGA_WRITEABLE) == 0) 3775 return; 3776 vm_page_lock_queues(); 3777 sched_pin(); 3778 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3779 pmap = PV_PMAP(pv); 3780 PMAP_LOCK(pmap); 3781 pte = pmap_pte_quick(pmap, pv->pv_va); 3782retry: 3783 oldpte = *pte; 3784 if ((oldpte & PG_RW) != 0) { 3785 vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3786 3787 /* 3788 * Regardless of whether a pte is 32 or 64 bits 3789 * in size, PG_RW and PG_M are among the least 3790 * significant 32 bits. 3791 */ 3792 PT_SET_VA_MA(pte, newpte, TRUE); 3793 if (*pte != newpte) 3794 goto retry; 3795 3796 if ((oldpte & PG_M) != 0) 3797 vm_page_dirty(m); 3798 pmap_invalidate_page(pmap, pv->pv_va); 3799 } 3800 PMAP_UNLOCK(pmap); 3801 } 3802 vm_page_aflag_clear(m, PGA_WRITEABLE); 3803 PT_UPDATES_FLUSH(); 3804 if (*PMAP1) 3805 PT_SET_MA(PADDR1, 0); 3806 sched_unpin(); 3807 vm_page_unlock_queues(); 3808} 3809 3810/* 3811 * pmap_ts_referenced: 3812 * 3813 * Return a count of reference bits for a page, clearing those bits. 3814 * It is not necessary for every reference bit to be cleared, but it 3815 * is necessary that 0 only be returned when there are truly no 3816 * reference bits set. 3817 * 3818 * XXX: The exact number of bits to check and clear is a matter that 3819 * should be tested and standardized at some point in the future for 3820 * optimal aging of shared pages. 3821 */ 3822int 3823pmap_ts_referenced(vm_page_t m) 3824{ 3825 pv_entry_t pv, pvf, pvn; 3826 pmap_t pmap; 3827 pt_entry_t *pte; 3828 int rtval = 0; 3829 3830 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3831 ("pmap_ts_referenced: page %p is not managed", m)); 3832 vm_page_lock_queues(); 3833 sched_pin(); 3834 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3835 pvf = pv; 3836 do { 3837 pvn = TAILQ_NEXT(pv, pv_list); 3838 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3839 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3840 pmap = PV_PMAP(pv); 3841 PMAP_LOCK(pmap); 3842 pte = pmap_pte_quick(pmap, pv->pv_va); 3843 if ((*pte & PG_A) != 0) { 3844 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3845 pmap_invalidate_page(pmap, pv->pv_va); 3846 rtval++; 3847 if (rtval > 4) 3848 pvn = NULL; 3849 } 3850 PMAP_UNLOCK(pmap); 3851 } while ((pv = pvn) != NULL && pv != pvf); 3852 } 3853 PT_UPDATES_FLUSH(); 3854 if (*PMAP1) 3855 PT_SET_MA(PADDR1, 0); 3856 sched_unpin(); 3857 vm_page_unlock_queues(); 3858 return (rtval); 3859} 3860 3861/* 3862 * Clear the modify bits on the specified physical page. 3863 */ 3864void 3865pmap_clear_modify(vm_page_t m) 3866{ 3867 pv_entry_t pv; 3868 pmap_t pmap; 3869 pt_entry_t *pte; 3870 3871 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3872 ("pmap_clear_modify: page %p is not managed", m)); 3873 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3874 KASSERT((m->oflags & VPO_BUSY) == 0, 3875 ("pmap_clear_modify: page %p is busy", m)); 3876 3877 /* 3878 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 3879 * If the object containing the page is locked and the page is not 3880 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 3881 */ 3882 if ((m->aflags & PGA_WRITEABLE) == 0) 3883 return; 3884 vm_page_lock_queues(); 3885 sched_pin(); 3886 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3887 pmap = PV_PMAP(pv); 3888 PMAP_LOCK(pmap); 3889 pte = pmap_pte_quick(pmap, pv->pv_va); 3890 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3891 /* 3892 * Regardless of whether a pte is 32 or 64 bits 3893 * in size, PG_M is among the least significant 3894 * 32 bits. 3895 */ 3896 PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3897 pmap_invalidate_page(pmap, pv->pv_va); 3898 } 3899 PMAP_UNLOCK(pmap); 3900 } 3901 sched_unpin(); 3902 vm_page_unlock_queues(); 3903} 3904 3905/* 3906 * pmap_clear_reference: 3907 * 3908 * Clear the reference bit on the specified physical page. 3909 */ 3910void 3911pmap_clear_reference(vm_page_t m) 3912{ 3913 pv_entry_t pv; 3914 pmap_t pmap; 3915 pt_entry_t *pte; 3916 3917 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3918 ("pmap_clear_reference: page %p is not managed", m)); 3919 vm_page_lock_queues(); 3920 sched_pin(); 3921 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3922 pmap = PV_PMAP(pv); 3923 PMAP_LOCK(pmap); 3924 pte = pmap_pte_quick(pmap, pv->pv_va); 3925 if ((*pte & PG_A) != 0) { 3926 /* 3927 * Regardless of whether a pte is 32 or 64 bits 3928 * in size, PG_A is among the least significant 3929 * 32 bits. 3930 */ 3931 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3932 pmap_invalidate_page(pmap, pv->pv_va); 3933 } 3934 PMAP_UNLOCK(pmap); 3935 } 3936 sched_unpin(); 3937 vm_page_unlock_queues(); 3938} 3939 3940/* 3941 * Miscellaneous support routines follow 3942 */ 3943 3944/* 3945 * Map a set of physical memory pages into the kernel virtual 3946 * address space. Return a pointer to where it is mapped. This 3947 * routine is intended to be used for mapping device memory, 3948 * NOT real memory. 3949 */ 3950void * 3951pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3952{ 3953 vm_offset_t va, offset; 3954 vm_size_t tmpsize; 3955 3956 offset = pa & PAGE_MASK; 3957 size = roundup(offset + size, PAGE_SIZE); 3958 pa = pa & PG_FRAME; 3959 3960 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3961 va = KERNBASE + pa; 3962 else 3963 va = kmem_alloc_nofault(kernel_map, size); 3964 if (!va) 3965 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3966 3967 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3968 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3969 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3970 pmap_invalidate_cache_range(va, va + size); 3971 return ((void *)(va + offset)); 3972} 3973 3974void * 3975pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3976{ 3977 3978 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3979} 3980 3981void * 3982pmap_mapbios(vm_paddr_t pa, vm_size_t size) 3983{ 3984 3985 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3986} 3987 3988void 3989pmap_unmapdev(vm_offset_t va, vm_size_t size) 3990{ 3991 vm_offset_t base, offset, tmpva; 3992 3993 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3994 return; 3995 base = trunc_page(va); 3996 offset = va & PAGE_MASK; 3997 size = roundup(offset + size, PAGE_SIZE); 3998 critical_enter(); 3999 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 4000 pmap_kremove(tmpva); 4001 pmap_invalidate_range(kernel_pmap, va, tmpva); 4002 critical_exit(); 4003 kmem_free(kernel_map, base, size); 4004} 4005 4006/* 4007 * Sets the memory attribute for the specified page. 4008 */ 4009void 4010pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4011{ 4012 4013 m->md.pat_mode = ma; 4014 if ((m->flags & PG_FICTITIOUS) != 0) 4015 return; 4016 4017 /* 4018 * If "m" is a normal page, flush it from the cache. 4019 * See pmap_invalidate_cache_range(). 4020 * 4021 * First, try to find an existing mapping of the page by sf 4022 * buffer. sf_buf_invalidate_cache() modifies mapping and 4023 * flushes the cache. 4024 */ 4025 if (sf_buf_invalidate_cache(m)) 4026 return; 4027 4028 /* 4029 * If page is not mapped by sf buffer, but CPU does not 4030 * support self snoop, map the page transient and do 4031 * invalidation. In the worst case, whole cache is flushed by 4032 * pmap_invalidate_cache_range(). 4033 */ 4034 if ((cpu_feature & CPUID_SS) == 0) 4035 pmap_flush_page(m); 4036} 4037 4038static void 4039pmap_flush_page(vm_page_t m) 4040{ 4041 struct sysmaps *sysmaps; 4042 vm_offset_t sva, eva; 4043 4044 if ((cpu_feature & CPUID_CLFSH) != 0) { 4045 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4046 mtx_lock(&sysmaps->lock); 4047 if (*sysmaps->CMAP2) 4048 panic("pmap_flush_page: CMAP2 busy"); 4049 sched_pin(); 4050 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 4051 VM_PAGE_TO_MACH(m) | PG_A | PG_M | 4052 pmap_cache_bits(m->md.pat_mode, 0)); 4053 invlcaddr(sysmaps->CADDR2); 4054 sva = (vm_offset_t)sysmaps->CADDR2; 4055 eva = sva + PAGE_SIZE; 4056 4057 /* 4058 * Use mfence despite the ordering implied by 4059 * mtx_{un,}lock() because clflush is not guaranteed 4060 * to be ordered by any other instruction. 4061 */ 4062 mfence(); 4063 for (; sva < eva; sva += cpu_clflush_line_size) 4064 clflush(sva); 4065 mfence(); 4066 PT_SET_MA(sysmaps->CADDR2, 0); 4067 sched_unpin(); 4068 mtx_unlock(&sysmaps->lock); 4069 } else 4070 pmap_invalidate_cache(); 4071} 4072 4073/* 4074 * Changes the specified virtual address range's memory type to that given by 4075 * the parameter "mode". The specified virtual address range must be 4076 * completely contained within either the kernel map. 4077 * 4078 * Returns zero if the change completed successfully, and either EINVAL or 4079 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 4080 * of the virtual address range was not mapped, and ENOMEM is returned if 4081 * there was insufficient memory available to complete the change. 4082 */ 4083int 4084pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 4085{ 4086 vm_offset_t base, offset, tmpva; 4087 pt_entry_t *pte; 4088 u_int opte, npte; 4089 pd_entry_t *pde; 4090 boolean_t changed; 4091 4092 base = trunc_page(va); 4093 offset = va & PAGE_MASK; 4094 size = roundup(offset + size, PAGE_SIZE); 4095 4096 /* Only supported on kernel virtual addresses. */ 4097 if (base <= VM_MAXUSER_ADDRESS) 4098 return (EINVAL); 4099 4100 /* 4MB pages and pages that aren't mapped aren't supported. */ 4101 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4102 pde = pmap_pde(kernel_pmap, tmpva); 4103 if (*pde & PG_PS) 4104 return (EINVAL); 4105 if ((*pde & PG_V) == 0) 4106 return (EINVAL); 4107 pte = vtopte(va); 4108 if ((*pte & PG_V) == 0) 4109 return (EINVAL); 4110 } 4111 4112 changed = FALSE; 4113 4114 /* 4115 * Ok, all the pages exist and are 4k, so run through them updating 4116 * their cache mode. 4117 */ 4118 for (tmpva = base; size > 0; ) { 4119 pte = vtopte(tmpva); 4120 4121 /* 4122 * The cache mode bits are all in the low 32-bits of the 4123 * PTE, so we can just spin on updating the low 32-bits. 4124 */ 4125 do { 4126 opte = *(u_int *)pte; 4127 npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4128 npte |= pmap_cache_bits(mode, 0); 4129 PT_SET_VA_MA(pte, npte, TRUE); 4130 } while (npte != opte && (*pte != npte)); 4131 if (npte != opte) 4132 changed = TRUE; 4133 tmpva += PAGE_SIZE; 4134 size -= PAGE_SIZE; 4135 } 4136 4137 /* 4138 * Flush CPU caches to make sure any data isn't cached that 4139 * shouldn't be, etc. 4140 */ 4141 if (changed) { 4142 pmap_invalidate_range(kernel_pmap, base, tmpva); 4143 pmap_invalidate_cache_range(base, tmpva); 4144 } 4145 return (0); 4146} 4147 4148/* 4149 * perform the pmap work for mincore 4150 */ 4151int 4152pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4153{ 4154 pt_entry_t *ptep, pte; 4155 vm_paddr_t pa; 4156 int val; 4157 4158 PMAP_LOCK(pmap); 4159retry: 4160 ptep = pmap_pte(pmap, addr); 4161 pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4162 pmap_pte_release(ptep); 4163 val = 0; 4164 if ((pte & PG_V) != 0) { 4165 val |= MINCORE_INCORE; 4166 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4167 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4168 if ((pte & PG_A) != 0) 4169 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4170 } 4171 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4172 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4173 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4174 pa = pte & PG_FRAME; 4175 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4176 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4177 goto retry; 4178 } else 4179 PA_UNLOCK_COND(*locked_pa); 4180 PMAP_UNLOCK(pmap); 4181 return (val); 4182} 4183 4184void 4185pmap_activate(struct thread *td) 4186{ 4187 pmap_t pmap, oldpmap; 4188 u_int cpuid; 4189 u_int32_t cr3; 4190 4191 critical_enter(); 4192 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4193 oldpmap = PCPU_GET(curpmap); 4194 cpuid = PCPU_GET(cpuid); 4195#if defined(SMP) 4196 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 4197 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 4198#else 4199 CPU_CLR(cpuid, &oldpmap->pm_active); 4200 CPU_SET(cpuid, &pmap->pm_active); 4201#endif 4202#ifdef PAE 4203 cr3 = vtophys(pmap->pm_pdpt); 4204#else 4205 cr3 = vtophys(pmap->pm_pdir); 4206#endif 4207 /* 4208 * pmap_activate is for the current thread on the current cpu 4209 */ 4210 td->td_pcb->pcb_cr3 = cr3; 4211 PT_UPDATES_FLUSH(); 4212 load_cr3(cr3); 4213 PCPU_SET(curpmap, pmap); 4214 critical_exit(); 4215} 4216 4217void 4218pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4219{ 4220} 4221 4222/* 4223 * Increase the starting virtual address of the given mapping if a 4224 * different alignment might result in more superpage mappings. 4225 */ 4226void 4227pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4228 vm_offset_t *addr, vm_size_t size) 4229{ 4230 vm_offset_t superpage_offset; 4231 4232 if (size < NBPDR) 4233 return; 4234 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4235 offset += ptoa(object->pg_color); 4236 superpage_offset = offset & PDRMASK; 4237 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4238 (*addr & PDRMASK) == superpage_offset) 4239 return; 4240 if ((*addr & PDRMASK) < superpage_offset) 4241 *addr = (*addr & ~PDRMASK) + superpage_offset; 4242 else 4243 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4244} 4245 4246void 4247pmap_suspend() 4248{ 4249 pmap_t pmap; 4250 int i, pdir, offset; 4251 vm_paddr_t pdirma; 4252 mmu_update_t mu[4]; 4253 4254 /* 4255 * We need to remove the recursive mapping structure from all 4256 * our pmaps so that Xen doesn't get confused when it restores 4257 * the page tables. The recursive map lives at page directory 4258 * index PTDPTDI. We assume that the suspend code has stopped 4259 * the other vcpus (if any). 4260 */ 4261 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4262 for (i = 0; i < 4; i++) { 4263 /* 4264 * Figure out which page directory (L2) page 4265 * contains this bit of the recursive map and 4266 * the offset within that page of the map 4267 * entry 4268 */ 4269 pdir = (PTDPTDI + i) / NPDEPG; 4270 offset = (PTDPTDI + i) % NPDEPG; 4271 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4272 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4273 mu[i].val = 0; 4274 } 4275 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4276 } 4277} 4278 4279void 4280pmap_resume() 4281{ 4282 pmap_t pmap; 4283 int i, pdir, offset; 4284 vm_paddr_t pdirma; 4285 mmu_update_t mu[4]; 4286 4287 /* 4288 * Restore the recursive map that we removed on suspend. 4289 */ 4290 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4291 for (i = 0; i < 4; i++) { 4292 /* 4293 * Figure out which page directory (L2) page 4294 * contains this bit of the recursive map and 4295 * the offset within that page of the map 4296 * entry 4297 */ 4298 pdir = (PTDPTDI + i) / NPDEPG; 4299 offset = (PTDPTDI + i) % NPDEPG; 4300 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4301 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4302 mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4303 } 4304 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4305 } 4306} 4307 4308#if defined(PMAP_DEBUG) 4309pmap_pid_dump(int pid) 4310{ 4311 pmap_t pmap; 4312 struct proc *p; 4313 int npte = 0; 4314 int index; 4315 4316 sx_slock(&allproc_lock); 4317 FOREACH_PROC_IN_SYSTEM(p) { 4318 if (p->p_pid != pid) 4319 continue; 4320 4321 if (p->p_vmspace) { 4322 int i,j; 4323 index = 0; 4324 pmap = vmspace_pmap(p->p_vmspace); 4325 for (i = 0; i < NPDEPTD; i++) { 4326 pd_entry_t *pde; 4327 pt_entry_t *pte; 4328 vm_offset_t base = i << PDRSHIFT; 4329 4330 pde = &pmap->pm_pdir[i]; 4331 if (pde && pmap_pde_v(pde)) { 4332 for (j = 0; j < NPTEPG; j++) { 4333 vm_offset_t va = base + (j << PAGE_SHIFT); 4334 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4335 if (index) { 4336 index = 0; 4337 printf("\n"); 4338 } 4339 sx_sunlock(&allproc_lock); 4340 return (npte); 4341 } 4342 pte = pmap_pte(pmap, va); 4343 if (pte && pmap_pte_v(pte)) { 4344 pt_entry_t pa; 4345 vm_page_t m; 4346 pa = PT_GET(pte); 4347 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4348 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4349 va, pa, m->hold_count, m->wire_count, m->flags); 4350 npte++; 4351 index++; 4352 if (index >= 2) { 4353 index = 0; 4354 printf("\n"); 4355 } else { 4356 printf(" "); 4357 } 4358 } 4359 } 4360 } 4361 } 4362 } 4363 } 4364 sx_sunlock(&allproc_lock); 4365 return (npte); 4366} 4367#endif 4368 4369#if defined(DEBUG) 4370 4371static void pads(pmap_t pm); 4372void pmap_pvdump(vm_paddr_t pa); 4373 4374/* print address space of pmap*/ 4375static void 4376pads(pmap_t pm) 4377{ 4378 int i, j; 4379 vm_paddr_t va; 4380 pt_entry_t *ptep; 4381 4382 if (pm == kernel_pmap) 4383 return; 4384 for (i = 0; i < NPDEPTD; i++) 4385 if (pm->pm_pdir[i]) 4386 for (j = 0; j < NPTEPG; j++) { 4387 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4388 if (pm == kernel_pmap && va < KERNBASE) 4389 continue; 4390 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4391 continue; 4392 ptep = pmap_pte(pm, va); 4393 if (pmap_pte_v(ptep)) 4394 printf("%x:%x ", va, *ptep); 4395 }; 4396 4397} 4398 4399void 4400pmap_pvdump(vm_paddr_t pa) 4401{ 4402 pv_entry_t pv; 4403 pmap_t pmap; 4404 vm_page_t m; 4405 4406 printf("pa %x", pa); 4407 m = PHYS_TO_VM_PAGE(pa); 4408 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4409 pmap = PV_PMAP(pv); 4410 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4411 pads(pmap); 4412 } 4413 printf(" "); 4414} 4415#endif 4416