pmap.c revision 239171
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 239171 2012-08-10 05:47:04Z alc $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_smp.h" 109#include "opt_xbox.h" 110 111#include <sys/param.h> 112#include <sys/systm.h> 113#include <sys/kernel.h> 114#include <sys/ktr.h> 115#include <sys/lock.h> 116#include <sys/malloc.h> 117#include <sys/mman.h> 118#include <sys/msgbuf.h> 119#include <sys/mutex.h> 120#include <sys/proc.h> 121#include <sys/sf_buf.h> 122#include <sys/sx.h> 123#include <sys/vmmeter.h> 124#include <sys/sched.h> 125#include <sys/sysctl.h> 126#ifdef SMP 127#include <sys/smp.h> 128#else 129#include <sys/cpuset.h> 130#endif 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/cputypes.h> 145#include <machine/md_var.h> 146#include <machine/pcb.h> 147#include <machine/specialreg.h> 148#ifdef SMP 149#include <machine/smp.h> 150#endif 151 152#ifdef XBOX 153#include <machine/xbox.h> 154#endif 155 156#include <xen/interface/xen.h> 157#include <xen/hypervisor.h> 158#include <machine/xen/hypercall.h> 159#include <machine/xen/xenvar.h> 160#include <machine/xen/xenfunc.h> 161 162#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 163#define CPU_ENABLE_SSE 164#endif 165 166#ifndef PMAP_SHPGPERPROC 167#define PMAP_SHPGPERPROC 200 168#endif 169 170#define DIAGNOSTIC 171 172#if !defined(DIAGNOSTIC) 173#ifdef __GNUC_GNU_INLINE__ 174#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 175#else 176#define PMAP_INLINE extern inline 177#endif 178#else 179#define PMAP_INLINE 180#endif 181 182#ifdef PV_STATS 183#define PV_STAT(x) do { x ; } while (0) 184#else 185#define PV_STAT(x) do { } while (0) 186#endif 187 188/* 189 * Get PDEs and PTEs for user/kernel address space 190 */ 191#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 192#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 193 194#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 195#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 196#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 197#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 198#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 199 200#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 201 202#define HAMFISTED_LOCKING 203#ifdef HAMFISTED_LOCKING 204static struct mtx createdelete_lock; 205#endif 206 207struct pmap kernel_pmap_store; 208LIST_HEAD(pmaplist, pmap); 209static struct pmaplist allpmaps; 210static struct mtx allpmaps_lock; 211 212vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 213vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 214int pgeflag = 0; /* PG_G or-in */ 215int pseflag = 0; /* PG_PS or-in */ 216 217int nkpt; 218vm_offset_t kernel_vm_end; 219extern u_int32_t KERNend; 220 221#ifdef PAE 222pt_entry_t pg_nx; 223#endif 224 225static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 226 227static int pat_works; /* Is page attribute table sane? */ 228 229/* 230 * Data for the pv entry allocation mechanism 231 */ 232static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 233static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 234static int shpgperproc = PMAP_SHPGPERPROC; 235 236struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 237int pv_maxchunks; /* How many chunks we have KVA for */ 238vm_offset_t pv_vafree; /* freelist stored in the PTE */ 239 240/* 241 * All those kernel PT submaps that BSD is so fond of 242 */ 243struct sysmaps { 244 struct mtx lock; 245 pt_entry_t *CMAP1; 246 pt_entry_t *CMAP2; 247 caddr_t CADDR1; 248 caddr_t CADDR2; 249}; 250static struct sysmaps sysmaps_pcpu[MAXCPU]; 251static pt_entry_t *CMAP3; 252caddr_t ptvmmap = 0; 253static caddr_t CADDR3; 254struct msgbuf *msgbufp = 0; 255 256/* 257 * Crashdump maps. 258 */ 259static caddr_t crashdumpmap; 260 261static pt_entry_t *PMAP1 = 0, *PMAP2; 262static pt_entry_t *PADDR1 = 0, *PADDR2; 263#ifdef SMP 264static int PMAP1cpu; 265static int PMAP1changedcpu; 266SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 267 &PMAP1changedcpu, 0, 268 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 269#endif 270static int PMAP1changed; 271SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 272 &PMAP1changed, 0, 273 "Number of times pmap_pte_quick changed PMAP1"); 274static int PMAP1unchanged; 275SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 276 &PMAP1unchanged, 0, 277 "Number of times pmap_pte_quick didn't change PMAP1"); 278static struct mtx PMAP2mutex; 279 280static void free_pv_chunk(struct pv_chunk *pc); 281static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 282static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 283static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 284static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 285 vm_offset_t va); 286 287static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 288 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 289static void pmap_flush_page(vm_page_t m); 290static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 291static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 292 vm_page_t *free); 293static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 294 vm_page_t *free); 295static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 296 vm_offset_t va); 297static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 298 vm_page_t m); 299 300static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 301 302static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); 303static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 304static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 305static void pmap_pte_release(pt_entry_t *pte); 306static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 307static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 308 309static __inline void pagezero(void *page); 310 311CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 312CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 313 314/* 315 * If you get an error here, then you set KVA_PAGES wrong! See the 316 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 317 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 318 */ 319CTASSERT(KERNBASE % (1 << 24) == 0); 320 321void 322pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 323{ 324 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 325 326 switch (type) { 327 case SH_PD_SET_VA: 328#if 0 329 xen_queue_pt_update(shadow_pdir_ma, 330 xpmap_ptom(val & ~(PG_RW))); 331#endif 332 xen_queue_pt_update(pdir_ma, 333 xpmap_ptom(val)); 334 break; 335 case SH_PD_SET_VA_MA: 336#if 0 337 xen_queue_pt_update(shadow_pdir_ma, 338 val & ~(PG_RW)); 339#endif 340 xen_queue_pt_update(pdir_ma, val); 341 break; 342 case SH_PD_SET_VA_CLEAR: 343#if 0 344 xen_queue_pt_update(shadow_pdir_ma, 0); 345#endif 346 xen_queue_pt_update(pdir_ma, 0); 347 break; 348 } 349} 350 351/* 352 * Bootstrap the system enough to run with virtual memory. 353 * 354 * On the i386 this is called after mapping has already been enabled 355 * and just syncs the pmap module with what has already been done. 356 * [We can't call it easily with mapping off since the kernel is not 357 * mapped with PA == VA, hence we would have to relocate every address 358 * from the linked base (virtual) address "KERNBASE" to the actual 359 * (physical) address starting relative to 0] 360 */ 361void 362pmap_bootstrap(vm_paddr_t firstaddr) 363{ 364 vm_offset_t va; 365 pt_entry_t *pte, *unused; 366 struct sysmaps *sysmaps; 367 int i; 368 369 /* 370 * Initialize the first available kernel virtual address. However, 371 * using "firstaddr" may waste a few pages of the kernel virtual 372 * address space, because locore may not have mapped every physical 373 * page that it allocated. Preferably, locore would provide a first 374 * unused virtual address in addition to "firstaddr". 375 */ 376 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 377 378 virtual_end = VM_MAX_KERNEL_ADDRESS; 379 380 /* 381 * Initialize the kernel pmap (which is statically allocated). 382 */ 383 PMAP_LOCK_INIT(kernel_pmap); 384 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 385#ifdef PAE 386 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 387#endif 388 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 389 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 390 LIST_INIT(&allpmaps); 391 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 392 mtx_lock_spin(&allpmaps_lock); 393 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 394 mtx_unlock_spin(&allpmaps_lock); 395 if (nkpt == 0) 396 nkpt = NKPT; 397 398 /* 399 * Reserve some special page table entries/VA space for temporary 400 * mapping of pages. 401 */ 402#define SYSMAP(c, p, v, n) \ 403 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 404 405 va = virtual_avail; 406 pte = vtopte(va); 407 408 /* 409 * CMAP1/CMAP2 are used for zeroing and copying pages. 410 * CMAP3 is used for the idle process page zeroing. 411 */ 412 for (i = 0; i < MAXCPU; i++) { 413 sysmaps = &sysmaps_pcpu[i]; 414 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 415 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 416 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 417 PT_SET_MA(sysmaps->CADDR1, 0); 418 PT_SET_MA(sysmaps->CADDR2, 0); 419 } 420 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 421 PT_SET_MA(CADDR3, 0); 422 423 /* 424 * Crashdump maps. 425 */ 426 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 427 428 /* 429 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 430 */ 431 SYSMAP(caddr_t, unused, ptvmmap, 1) 432 433 /* 434 * msgbufp is used to map the system message buffer. 435 */ 436 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 437 438 /* 439 * ptemap is used for pmap_pte_quick 440 */ 441 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 442 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 443 444 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 445 446 virtual_avail = va; 447 448 /* 449 * Leave in place an identity mapping (virt == phys) for the low 1 MB 450 * physical memory region that is used by the ACPI wakeup code. This 451 * mapping must not have PG_G set. 452 */ 453#ifndef XEN 454 /* 455 * leave here deliberately to show that this is not supported 456 */ 457#ifdef XBOX 458 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 459 * an early stadium, we cannot yet neatly map video memory ... :-( 460 * Better fixes are very welcome! */ 461 if (!arch_i386_is_xbox) 462#endif 463 for (i = 1; i < NKPT; i++) 464 PTD[i] = 0; 465 466 /* Initialize the PAT MSR if present. */ 467 pmap_init_pat(); 468 469 /* Turn on PG_G on kernel page(s) */ 470 pmap_set_pg(); 471#endif 472 473#ifdef HAMFISTED_LOCKING 474 mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF); 475#endif 476} 477 478/* 479 * Setup the PAT MSR. 480 */ 481void 482pmap_init_pat(void) 483{ 484 uint64_t pat_msr; 485 486 /* Bail if this CPU doesn't implement PAT. */ 487 if (!(cpu_feature & CPUID_PAT)) 488 return; 489 490 if (cpu_vendor_id != CPU_VENDOR_INTEL || 491 (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 492 /* 493 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 494 * Program 4 and 5 as WP and WC. 495 * Leave 6 and 7 as UC and UC-. 496 */ 497 pat_msr = rdmsr(MSR_PAT); 498 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 499 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 500 PAT_VALUE(5, PAT_WRITE_COMBINING); 501 pat_works = 1; 502 } else { 503 /* 504 * Due to some Intel errata, we can only safely use the lower 4 505 * PAT entries. Thus, just replace PAT Index 2 with WC instead 506 * of UC-. 507 * 508 * Intel Pentium III Processor Specification Update 509 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 510 * or Mode C Paging) 511 * 512 * Intel Pentium IV Processor Specification Update 513 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 514 */ 515 pat_msr = rdmsr(MSR_PAT); 516 pat_msr &= ~PAT_MASK(2); 517 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 518 pat_works = 0; 519 } 520 wrmsr(MSR_PAT, pat_msr); 521} 522 523/* 524 * Initialize a vm_page's machine-dependent fields. 525 */ 526void 527pmap_page_init(vm_page_t m) 528{ 529 530 TAILQ_INIT(&m->md.pv_list); 531 m->md.pat_mode = PAT_WRITE_BACK; 532} 533 534/* 535 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 536 * Requirements: 537 * - Must deal with pages in order to ensure that none of the PG_* bits 538 * are ever set, PG_V in particular. 539 * - Assumes we can write to ptes without pte_store() atomic ops, even 540 * on PAE systems. This should be ok. 541 * - Assumes nothing will ever test these addresses for 0 to indicate 542 * no mapping instead of correctly checking PG_V. 543 * - Assumes a vm_offset_t will fit in a pte (true for i386). 544 * Because PG_V is never set, there can be no mappings to invalidate. 545 */ 546static int ptelist_count = 0; 547static vm_offset_t 548pmap_ptelist_alloc(vm_offset_t *head) 549{ 550 vm_offset_t va; 551 vm_offset_t *phead = (vm_offset_t *)*head; 552 553 if (ptelist_count == 0) { 554 printf("out of memory!!!!!!\n"); 555 return (0); /* Out of memory */ 556 } 557 ptelist_count--; 558 va = phead[ptelist_count]; 559 return (va); 560} 561 562static void 563pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 564{ 565 vm_offset_t *phead = (vm_offset_t *)*head; 566 567 phead[ptelist_count++] = va; 568} 569 570static void 571pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 572{ 573 int i, nstackpages; 574 vm_offset_t va; 575 vm_page_t m; 576 577 nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 578 for (i = 0; i < nstackpages; i++) { 579 va = (vm_offset_t)base + i * PAGE_SIZE; 580 m = vm_page_alloc(NULL, i, 581 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 582 VM_ALLOC_ZERO); 583 pmap_qenter(va, &m, 1); 584 } 585 586 *head = (vm_offset_t)base; 587 for (i = npages - 1; i >= nstackpages; i--) { 588 va = (vm_offset_t)base + i * PAGE_SIZE; 589 pmap_ptelist_free(head, va); 590 } 591} 592 593 594/* 595 * Initialize the pmap module. 596 * Called by vm_init, to initialize any structures that the pmap 597 * system needs to map virtual memory. 598 */ 599void 600pmap_init(void) 601{ 602 603 /* 604 * Initialize the address space (zone) for the pv entries. Set a 605 * high water mark so that the system can recover from excessive 606 * numbers of pv entries. 607 */ 608 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 609 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 610 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 611 pv_entry_max = roundup(pv_entry_max, _NPCPV); 612 pv_entry_high_water = 9 * (pv_entry_max / 10); 613 614 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 615 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 616 PAGE_SIZE * pv_maxchunks); 617 if (pv_chunkbase == NULL) 618 panic("pmap_init: not enough kvm for pv chunks"); 619 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 620} 621 622 623SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 624 "Max number of PV entries"); 625SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 626 "Page share factor per proc"); 627 628static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 629 "2/4MB page mapping counters"); 630 631static u_long pmap_pde_mappings; 632SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 633 &pmap_pde_mappings, 0, "2/4MB page mappings"); 634 635/*************************************************** 636 * Low level helper routines..... 637 ***************************************************/ 638 639/* 640 * Determine the appropriate bits to set in a PTE or PDE for a specified 641 * caching mode. 642 */ 643int 644pmap_cache_bits(int mode, boolean_t is_pde) 645{ 646 int pat_flag, pat_index, cache_bits; 647 648 /* The PAT bit is different for PTE's and PDE's. */ 649 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 650 651 /* If we don't support PAT, map extended modes to older ones. */ 652 if (!(cpu_feature & CPUID_PAT)) { 653 switch (mode) { 654 case PAT_UNCACHEABLE: 655 case PAT_WRITE_THROUGH: 656 case PAT_WRITE_BACK: 657 break; 658 case PAT_UNCACHED: 659 case PAT_WRITE_COMBINING: 660 case PAT_WRITE_PROTECTED: 661 mode = PAT_UNCACHEABLE; 662 break; 663 } 664 } 665 666 /* Map the caching mode to a PAT index. */ 667 if (pat_works) { 668 switch (mode) { 669 case PAT_UNCACHEABLE: 670 pat_index = 3; 671 break; 672 case PAT_WRITE_THROUGH: 673 pat_index = 1; 674 break; 675 case PAT_WRITE_BACK: 676 pat_index = 0; 677 break; 678 case PAT_UNCACHED: 679 pat_index = 2; 680 break; 681 case PAT_WRITE_COMBINING: 682 pat_index = 5; 683 break; 684 case PAT_WRITE_PROTECTED: 685 pat_index = 4; 686 break; 687 default: 688 panic("Unknown caching mode %d\n", mode); 689 } 690 } else { 691 switch (mode) { 692 case PAT_UNCACHED: 693 case PAT_UNCACHEABLE: 694 case PAT_WRITE_PROTECTED: 695 pat_index = 3; 696 break; 697 case PAT_WRITE_THROUGH: 698 pat_index = 1; 699 break; 700 case PAT_WRITE_BACK: 701 pat_index = 0; 702 break; 703 case PAT_WRITE_COMBINING: 704 pat_index = 2; 705 break; 706 default: 707 panic("Unknown caching mode %d\n", mode); 708 } 709 } 710 711 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 712 cache_bits = 0; 713 if (pat_index & 0x4) 714 cache_bits |= pat_flag; 715 if (pat_index & 0x2) 716 cache_bits |= PG_NC_PCD; 717 if (pat_index & 0x1) 718 cache_bits |= PG_NC_PWT; 719 return (cache_bits); 720} 721#ifdef SMP 722/* 723 * For SMP, these functions have to use the IPI mechanism for coherence. 724 * 725 * N.B.: Before calling any of the following TLB invalidation functions, 726 * the calling processor must ensure that all stores updating a non- 727 * kernel page table are globally performed. Otherwise, another 728 * processor could cache an old, pre-update entry without being 729 * invalidated. This can happen one of two ways: (1) The pmap becomes 730 * active on another processor after its pm_active field is checked by 731 * one of the following functions but before a store updating the page 732 * table is globally performed. (2) The pmap becomes active on another 733 * processor before its pm_active field is checked but due to 734 * speculative loads one of the following functions stills reads the 735 * pmap as inactive on the other processor. 736 * 737 * The kernel page table is exempt because its pm_active field is 738 * immutable. The kernel page table is always active on every 739 * processor. 740 */ 741void 742pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 743{ 744 cpuset_t other_cpus; 745 u_int cpuid; 746 747 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 748 pmap, va); 749 750 sched_pin(); 751 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 752 invlpg(va); 753 smp_invlpg(va); 754 } else { 755 cpuid = PCPU_GET(cpuid); 756 other_cpus = all_cpus; 757 CPU_CLR(cpuid, &other_cpus); 758 if (CPU_ISSET(cpuid, &pmap->pm_active)) 759 invlpg(va); 760 CPU_AND(&other_cpus, &pmap->pm_active); 761 if (!CPU_EMPTY(&other_cpus)) 762 smp_masked_invlpg(other_cpus, va); 763 } 764 sched_unpin(); 765 PT_UPDATES_FLUSH(); 766} 767 768void 769pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 770{ 771 cpuset_t other_cpus; 772 vm_offset_t addr; 773 u_int cpuid; 774 775 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 776 pmap, sva, eva); 777 778 sched_pin(); 779 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 780 for (addr = sva; addr < eva; addr += PAGE_SIZE) 781 invlpg(addr); 782 smp_invlpg_range(sva, eva); 783 } else { 784 cpuid = PCPU_GET(cpuid); 785 other_cpus = all_cpus; 786 CPU_CLR(cpuid, &other_cpus); 787 if (CPU_ISSET(cpuid, &pmap->pm_active)) 788 for (addr = sva; addr < eva; addr += PAGE_SIZE) 789 invlpg(addr); 790 CPU_AND(&other_cpus, &pmap->pm_active); 791 if (!CPU_EMPTY(&other_cpus)) 792 smp_masked_invlpg_range(other_cpus, sva, eva); 793 } 794 sched_unpin(); 795 PT_UPDATES_FLUSH(); 796} 797 798void 799pmap_invalidate_all(pmap_t pmap) 800{ 801 cpuset_t other_cpus; 802 u_int cpuid; 803 804 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 805 806 sched_pin(); 807 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 808 invltlb(); 809 smp_invltlb(); 810 } else { 811 cpuid = PCPU_GET(cpuid); 812 other_cpus = all_cpus; 813 CPU_CLR(cpuid, &other_cpus); 814 if (CPU_ISSET(cpuid, &pmap->pm_active)) 815 invltlb(); 816 CPU_AND(&other_cpus, &pmap->pm_active); 817 if (!CPU_EMPTY(&other_cpus)) 818 smp_masked_invltlb(other_cpus); 819 } 820 sched_unpin(); 821} 822 823void 824pmap_invalidate_cache(void) 825{ 826 827 sched_pin(); 828 wbinvd(); 829 smp_cache_flush(); 830 sched_unpin(); 831} 832#else /* !SMP */ 833/* 834 * Normal, non-SMP, 486+ invalidation functions. 835 * We inline these within pmap.c for speed. 836 */ 837PMAP_INLINE void 838pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 839{ 840 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 841 pmap, va); 842 843 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 844 invlpg(va); 845 PT_UPDATES_FLUSH(); 846} 847 848PMAP_INLINE void 849pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 850{ 851 vm_offset_t addr; 852 853 if (eva - sva > PAGE_SIZE) 854 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 855 pmap, sva, eva); 856 857 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 858 for (addr = sva; addr < eva; addr += PAGE_SIZE) 859 invlpg(addr); 860 PT_UPDATES_FLUSH(); 861} 862 863PMAP_INLINE void 864pmap_invalidate_all(pmap_t pmap) 865{ 866 867 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 868 869 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 870 invltlb(); 871} 872 873PMAP_INLINE void 874pmap_invalidate_cache(void) 875{ 876 877 wbinvd(); 878} 879#endif /* !SMP */ 880 881#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 882 883void 884pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 885{ 886 887 KASSERT((sva & PAGE_MASK) == 0, 888 ("pmap_invalidate_cache_range: sva not page-aligned")); 889 KASSERT((eva & PAGE_MASK) == 0, 890 ("pmap_invalidate_cache_range: eva not page-aligned")); 891 892 if (cpu_feature & CPUID_SS) 893 ; /* If "Self Snoop" is supported, do nothing. */ 894 else if ((cpu_feature & CPUID_CLFSH) != 0 && 895 eva - sva < PMAP_CLFLUSH_THRESHOLD) { 896 897 /* 898 * Otherwise, do per-cache line flush. Use the mfence 899 * instruction to insure that previous stores are 900 * included in the write-back. The processor 901 * propagates flush to other processors in the cache 902 * coherence domain. 903 */ 904 mfence(); 905 for (; sva < eva; sva += cpu_clflush_line_size) 906 clflush(sva); 907 mfence(); 908 } else { 909 910 /* 911 * No targeted cache flush methods are supported by CPU, 912 * or the supplied range is bigger than 2MB. 913 * Globally invalidate cache. 914 */ 915 pmap_invalidate_cache(); 916 } 917} 918 919void 920pmap_invalidate_cache_pages(vm_page_t *pages, int count) 921{ 922 int i; 923 924 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 925 (cpu_feature & CPUID_CLFSH) == 0) { 926 pmap_invalidate_cache(); 927 } else { 928 for (i = 0; i < count; i++) 929 pmap_flush_page(pages[i]); 930 } 931} 932 933/* 934 * Are we current address space or kernel? N.B. We return FALSE when 935 * a pmap's page table is in use because a kernel thread is borrowing 936 * it. The borrowed page table can change spontaneously, making any 937 * dependence on its continued use subject to a race condition. 938 */ 939static __inline int 940pmap_is_current(pmap_t pmap) 941{ 942 943 return (pmap == kernel_pmap || 944 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 945 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 946} 947 948/* 949 * If the given pmap is not the current or kernel pmap, the returned pte must 950 * be released by passing it to pmap_pte_release(). 951 */ 952pt_entry_t * 953pmap_pte(pmap_t pmap, vm_offset_t va) 954{ 955 pd_entry_t newpf; 956 pd_entry_t *pde; 957 958 pde = pmap_pde(pmap, va); 959 if (*pde & PG_PS) 960 return (pde); 961 if (*pde != 0) { 962 /* are we current address space or kernel? */ 963 if (pmap_is_current(pmap)) 964 return (vtopte(va)); 965 mtx_lock(&PMAP2mutex); 966 newpf = *pde & PG_FRAME; 967 if ((*PMAP2 & PG_FRAME) != newpf) { 968 PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 969 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 970 pmap, va, (*PMAP2 & 0xffffffff)); 971 } 972 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 973 } 974 return (NULL); 975} 976 977/* 978 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 979 * being NULL. 980 */ 981static __inline void 982pmap_pte_release(pt_entry_t *pte) 983{ 984 985 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 986 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 987 *PMAP2); 988 vm_page_lock_queues(); 989 PT_SET_VA(PMAP2, 0, TRUE); 990 vm_page_unlock_queues(); 991 mtx_unlock(&PMAP2mutex); 992 } 993} 994 995static __inline void 996invlcaddr(void *caddr) 997{ 998 999 invlpg((u_int)caddr); 1000 PT_UPDATES_FLUSH(); 1001} 1002 1003/* 1004 * Super fast pmap_pte routine best used when scanning 1005 * the pv lists. This eliminates many coarse-grained 1006 * invltlb calls. Note that many of the pv list 1007 * scans are across different pmaps. It is very wasteful 1008 * to do an entire invltlb for checking a single mapping. 1009 * 1010 * If the given pmap is not the current pmap, vm_page_queue_mtx 1011 * must be held and curthread pinned to a CPU. 1012 */ 1013static pt_entry_t * 1014pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1015{ 1016 pd_entry_t newpf; 1017 pd_entry_t *pde; 1018 1019 pde = pmap_pde(pmap, va); 1020 if (*pde & PG_PS) 1021 return (pde); 1022 if (*pde != 0) { 1023 /* are we current address space or kernel? */ 1024 if (pmap_is_current(pmap)) 1025 return (vtopte(va)); 1026 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1027 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1028 newpf = *pde & PG_FRAME; 1029 if ((*PMAP1 & PG_FRAME) != newpf) { 1030 PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1031 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1032 pmap, va, (u_long)*PMAP1); 1033 1034#ifdef SMP 1035 PMAP1cpu = PCPU_GET(cpuid); 1036#endif 1037 PMAP1changed++; 1038 } else 1039#ifdef SMP 1040 if (PMAP1cpu != PCPU_GET(cpuid)) { 1041 PMAP1cpu = PCPU_GET(cpuid); 1042 invlcaddr(PADDR1); 1043 PMAP1changedcpu++; 1044 } else 1045#endif 1046 PMAP1unchanged++; 1047 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1048 } 1049 return (0); 1050} 1051 1052/* 1053 * Routine: pmap_extract 1054 * Function: 1055 * Extract the physical page address associated 1056 * with the given map/virtual_address pair. 1057 */ 1058vm_paddr_t 1059pmap_extract(pmap_t pmap, vm_offset_t va) 1060{ 1061 vm_paddr_t rtval; 1062 pt_entry_t *pte; 1063 pd_entry_t pde; 1064 pt_entry_t pteval; 1065 1066 rtval = 0; 1067 PMAP_LOCK(pmap); 1068 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1069 if (pde != 0) { 1070 if ((pde & PG_PS) != 0) { 1071 rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1072 PMAP_UNLOCK(pmap); 1073 return rtval; 1074 } 1075 pte = pmap_pte(pmap, va); 1076 pteval = *pte ? xpmap_mtop(*pte) : 0; 1077 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1078 pmap_pte_release(pte); 1079 } 1080 PMAP_UNLOCK(pmap); 1081 return (rtval); 1082} 1083 1084/* 1085 * Routine: pmap_extract_ma 1086 * Function: 1087 * Like pmap_extract, but returns machine address 1088 */ 1089vm_paddr_t 1090pmap_extract_ma(pmap_t pmap, vm_offset_t va) 1091{ 1092 vm_paddr_t rtval; 1093 pt_entry_t *pte; 1094 pd_entry_t pde; 1095 1096 rtval = 0; 1097 PMAP_LOCK(pmap); 1098 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1099 if (pde != 0) { 1100 if ((pde & PG_PS) != 0) { 1101 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1102 PMAP_UNLOCK(pmap); 1103 return rtval; 1104 } 1105 pte = pmap_pte(pmap, va); 1106 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1107 pmap_pte_release(pte); 1108 } 1109 PMAP_UNLOCK(pmap); 1110 return (rtval); 1111} 1112 1113/* 1114 * Routine: pmap_extract_and_hold 1115 * Function: 1116 * Atomically extract and hold the physical page 1117 * with the given pmap and virtual address pair 1118 * if that mapping permits the given protection. 1119 */ 1120vm_page_t 1121pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1122{ 1123 pd_entry_t pde; 1124 pt_entry_t pte, *ptep; 1125 vm_page_t m; 1126 vm_paddr_t pa; 1127 1128 pa = 0; 1129 m = NULL; 1130 PMAP_LOCK(pmap); 1131retry: 1132 pde = PT_GET(pmap_pde(pmap, va)); 1133 if (pde != 0) { 1134 if (pde & PG_PS) { 1135 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1136 if (vm_page_pa_tryrelock(pmap, (pde & 1137 PG_PS_FRAME) | (va & PDRMASK), &pa)) 1138 goto retry; 1139 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1140 (va & PDRMASK)); 1141 vm_page_hold(m); 1142 } 1143 } else { 1144 ptep = pmap_pte(pmap, va); 1145 pte = PT_GET(ptep); 1146 pmap_pte_release(ptep); 1147 if (pte != 0 && 1148 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1149 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, 1150 &pa)) 1151 goto retry; 1152 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1153 vm_page_hold(m); 1154 } 1155 } 1156 } 1157 PA_UNLOCK_COND(pa); 1158 PMAP_UNLOCK(pmap); 1159 return (m); 1160} 1161 1162/*************************************************** 1163 * Low level mapping routines..... 1164 ***************************************************/ 1165 1166/* 1167 * Add a wired page to the kva. 1168 * Note: not SMP coherent. 1169 * 1170 * This function may be used before pmap_bootstrap() is called. 1171 */ 1172void 1173pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1174{ 1175 1176 PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1177} 1178 1179void 1180pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1181{ 1182 pt_entry_t *pte; 1183 1184 pte = vtopte(va); 1185 pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1186} 1187 1188static __inline void 1189pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1190{ 1191 1192 PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1193} 1194 1195/* 1196 * Remove a page from the kernel pagetables. 1197 * Note: not SMP coherent. 1198 * 1199 * This function may be used before pmap_bootstrap() is called. 1200 */ 1201PMAP_INLINE void 1202pmap_kremove(vm_offset_t va) 1203{ 1204 pt_entry_t *pte; 1205 1206 pte = vtopte(va); 1207 PT_CLEAR_VA(pte, FALSE); 1208} 1209 1210/* 1211 * Used to map a range of physical addresses into kernel 1212 * virtual address space. 1213 * 1214 * The value passed in '*virt' is a suggested virtual address for 1215 * the mapping. Architectures which can support a direct-mapped 1216 * physical to virtual region can return the appropriate address 1217 * within that region, leaving '*virt' unchanged. Other 1218 * architectures should map the pages starting at '*virt' and 1219 * update '*virt' with the first usable address after the mapped 1220 * region. 1221 */ 1222vm_offset_t 1223pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1224{ 1225 vm_offset_t va, sva; 1226 1227 va = sva = *virt; 1228 CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1229 va, start, end, prot); 1230 while (start < end) { 1231 pmap_kenter(va, start); 1232 va += PAGE_SIZE; 1233 start += PAGE_SIZE; 1234 } 1235 pmap_invalidate_range(kernel_pmap, sva, va); 1236 *virt = va; 1237 return (sva); 1238} 1239 1240 1241/* 1242 * Add a list of wired pages to the kva 1243 * this routine is only used for temporary 1244 * kernel mappings that do not need to have 1245 * page modification or references recorded. 1246 * Note that old mappings are simply written 1247 * over. The page *must* be wired. 1248 * Note: SMP coherent. Uses a ranged shootdown IPI. 1249 */ 1250void 1251pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1252{ 1253 pt_entry_t *endpte, *pte; 1254 vm_paddr_t pa; 1255 vm_offset_t va = sva; 1256 int mclcount = 0; 1257 multicall_entry_t mcl[16]; 1258 multicall_entry_t *mclp = mcl; 1259 int error; 1260 1261 CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1262 pte = vtopte(sva); 1263 endpte = pte + count; 1264 while (pte < endpte) { 1265 pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1266 1267 mclp->op = __HYPERVISOR_update_va_mapping; 1268 mclp->args[0] = va; 1269 mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1270 mclp->args[2] = (uint32_t)(pa >> 32); 1271 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1272 1273 va += PAGE_SIZE; 1274 pte++; 1275 ma++; 1276 mclp++; 1277 mclcount++; 1278 if (mclcount == 16) { 1279 error = HYPERVISOR_multicall(mcl, mclcount); 1280 mclp = mcl; 1281 mclcount = 0; 1282 KASSERT(error == 0, ("bad multicall %d", error)); 1283 } 1284 } 1285 if (mclcount) { 1286 error = HYPERVISOR_multicall(mcl, mclcount); 1287 KASSERT(error == 0, ("bad multicall %d", error)); 1288 } 1289 1290#ifdef INVARIANTS 1291 for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1292 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1293#endif 1294} 1295 1296/* 1297 * This routine tears out page mappings from the 1298 * kernel -- it is meant only for temporary mappings. 1299 * Note: SMP coherent. Uses a ranged shootdown IPI. 1300 */ 1301void 1302pmap_qremove(vm_offset_t sva, int count) 1303{ 1304 vm_offset_t va; 1305 1306 CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1307 va = sva; 1308 vm_page_lock_queues(); 1309 critical_enter(); 1310 while (count-- > 0) { 1311 pmap_kremove(va); 1312 va += PAGE_SIZE; 1313 } 1314 PT_UPDATES_FLUSH(); 1315 pmap_invalidate_range(kernel_pmap, sva, va); 1316 critical_exit(); 1317 vm_page_unlock_queues(); 1318} 1319 1320/*************************************************** 1321 * Page table page management routines..... 1322 ***************************************************/ 1323static __inline void 1324pmap_free_zero_pages(vm_page_t free) 1325{ 1326 vm_page_t m; 1327 1328 while (free != NULL) { 1329 m = free; 1330 free = m->right; 1331 vm_page_free_zero(m); 1332 } 1333} 1334 1335/* 1336 * This routine unholds page table pages, and if the hold count 1337 * drops to zero, then it decrements the wire count. 1338 */ 1339static __inline int 1340pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1341{ 1342 1343 --m->wire_count; 1344 if (m->wire_count == 0) 1345 return (_pmap_unwire_pte_hold(pmap, m, free)); 1346 else 1347 return (0); 1348} 1349 1350static int 1351_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1352{ 1353 vm_offset_t pteva; 1354 1355 PT_UPDATES_FLUSH(); 1356 /* 1357 * unmap the page table page 1358 */ 1359 xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1360 /* 1361 * page *might* contain residual mapping :-/ 1362 */ 1363 PD_CLEAR_VA(pmap, m->pindex, TRUE); 1364 pmap_zero_page(m); 1365 --pmap->pm_stats.resident_count; 1366 1367 /* 1368 * This is a release store so that the ordinary store unmapping 1369 * the page table page is globally performed before TLB shoot- 1370 * down is begun. 1371 */ 1372 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1373 1374 /* 1375 * Do an invltlb to make the invalidated mapping 1376 * take effect immediately. 1377 */ 1378 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1379 pmap_invalidate_page(pmap, pteva); 1380 1381 /* 1382 * Put page on a list so that it is released after 1383 * *ALL* TLB shootdown is done 1384 */ 1385 m->right = *free; 1386 *free = m; 1387 1388 return (1); 1389} 1390 1391/* 1392 * After removing a page table entry, this routine is used to 1393 * conditionally free the page, and manage the hold/wire counts. 1394 */ 1395static int 1396pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1397{ 1398 pd_entry_t ptepde; 1399 vm_page_t mpte; 1400 1401 if (va >= VM_MAXUSER_ADDRESS) 1402 return (0); 1403 ptepde = PT_GET(pmap_pde(pmap, va)); 1404 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1405 return (pmap_unwire_pte_hold(pmap, mpte, free)); 1406} 1407 1408/* 1409 * Initialize the pmap for the swapper process. 1410 */ 1411void 1412pmap_pinit0(pmap_t pmap) 1413{ 1414 1415 PMAP_LOCK_INIT(pmap); 1416 /* 1417 * Since the page table directory is shared with the kernel pmap, 1418 * which is already included in the list "allpmaps", this pmap does 1419 * not need to be inserted into that list. 1420 */ 1421 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1422#ifdef PAE 1423 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1424#endif 1425 CPU_ZERO(&pmap->pm_active); 1426 PCPU_SET(curpmap, pmap); 1427 TAILQ_INIT(&pmap->pm_pvchunk); 1428 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1429} 1430 1431/* 1432 * Initialize a preallocated and zeroed pmap structure, 1433 * such as one in a vmspace structure. 1434 */ 1435int 1436pmap_pinit(pmap_t pmap) 1437{ 1438 vm_page_t m, ptdpg[NPGPTD + 1]; 1439 int npgptd = NPGPTD + 1; 1440 int i; 1441 1442#ifdef HAMFISTED_LOCKING 1443 mtx_lock(&createdelete_lock); 1444#endif 1445 1446 PMAP_LOCK_INIT(pmap); 1447 1448 /* 1449 * No need to allocate page table space yet but we do need a valid 1450 * page directory table. 1451 */ 1452 if (pmap->pm_pdir == NULL) { 1453 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1454 NBPTD); 1455 if (pmap->pm_pdir == NULL) { 1456 PMAP_LOCK_DESTROY(pmap); 1457#ifdef HAMFISTED_LOCKING 1458 mtx_unlock(&createdelete_lock); 1459#endif 1460 return (0); 1461 } 1462#ifdef PAE 1463 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1464#endif 1465 } 1466 1467 /* 1468 * allocate the page directory page(s) 1469 */ 1470 for (i = 0; i < npgptd;) { 1471 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1472 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 1473 if (m == NULL) 1474 VM_WAIT; 1475 else { 1476 ptdpg[i++] = m; 1477 } 1478 } 1479 1480 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1481 1482 for (i = 0; i < NPGPTD; i++) 1483 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1484 pagezero(pmap->pm_pdir + (i * NPDEPG)); 1485 1486 mtx_lock_spin(&allpmaps_lock); 1487 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1488 /* Copy the kernel page table directory entries. */ 1489 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1490 mtx_unlock_spin(&allpmaps_lock); 1491 1492#ifdef PAE 1493 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1494 if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1495 bzero(pmap->pm_pdpt, PAGE_SIZE); 1496 for (i = 0; i < NPGPTD; i++) { 1497 vm_paddr_t ma; 1498 1499 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1500 pmap->pm_pdpt[i] = ma | PG_V; 1501 1502 } 1503#endif 1504 for (i = 0; i < NPGPTD; i++) { 1505 pt_entry_t *pd; 1506 vm_paddr_t ma; 1507 1508 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1509 pd = pmap->pm_pdir + (i * NPDEPG); 1510 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1511#if 0 1512 xen_pgd_pin(ma); 1513#endif 1514 } 1515 1516#ifdef PAE 1517 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1518#endif 1519 vm_page_lock_queues(); 1520 xen_flush_queue(); 1521 xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD])); 1522 for (i = 0; i < NPGPTD; i++) { 1523 vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]); 1524 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1525 } 1526 xen_flush_queue(); 1527 vm_page_unlock_queues(); 1528 CPU_ZERO(&pmap->pm_active); 1529 TAILQ_INIT(&pmap->pm_pvchunk); 1530 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1531 1532#ifdef HAMFISTED_LOCKING 1533 mtx_unlock(&createdelete_lock); 1534#endif 1535 return (1); 1536} 1537 1538/* 1539 * this routine is called if the page table page is not 1540 * mapped correctly. 1541 */ 1542static vm_page_t 1543_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) 1544{ 1545 vm_paddr_t ptema; 1546 vm_page_t m; 1547 1548 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1549 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1550 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1551 1552 /* 1553 * Allocate a page table page. 1554 */ 1555 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1556 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1557 if (flags & M_WAITOK) { 1558 PMAP_UNLOCK(pmap); 1559 vm_page_unlock_queues(); 1560 VM_WAIT; 1561 vm_page_lock_queues(); 1562 PMAP_LOCK(pmap); 1563 } 1564 1565 /* 1566 * Indicate the need to retry. While waiting, the page table 1567 * page may have been allocated. 1568 */ 1569 return (NULL); 1570 } 1571 if ((m->flags & PG_ZERO) == 0) 1572 pmap_zero_page(m); 1573 1574 /* 1575 * Map the pagetable page into the process address space, if 1576 * it isn't already there. 1577 */ 1578 1579 pmap->pm_stats.resident_count++; 1580 1581 ptema = VM_PAGE_TO_MACH(m); 1582 xen_pt_pin(ptema); 1583 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1584 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1585 1586 KASSERT(pmap->pm_pdir[ptepindex], 1587 ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1588 return (m); 1589} 1590 1591static vm_page_t 1592pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1593{ 1594 u_int ptepindex; 1595 pd_entry_t ptema; 1596 vm_page_t m; 1597 1598 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1599 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1600 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1601 1602 /* 1603 * Calculate pagetable page index 1604 */ 1605 ptepindex = va >> PDRSHIFT; 1606retry: 1607 /* 1608 * Get the page directory entry 1609 */ 1610 ptema = pmap->pm_pdir[ptepindex]; 1611 1612 /* 1613 * This supports switching from a 4MB page to a 1614 * normal 4K page. 1615 */ 1616 if (ptema & PG_PS) { 1617 /* 1618 * XXX 1619 */ 1620 pmap->pm_pdir[ptepindex] = 0; 1621 ptema = 0; 1622 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1623 pmap_invalidate_all(kernel_pmap); 1624 } 1625 1626 /* 1627 * If the page table page is mapped, we just increment the 1628 * hold count, and activate it. 1629 */ 1630 if (ptema & PG_V) { 1631 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1632 m->wire_count++; 1633 } else { 1634 /* 1635 * Here if the pte page isn't mapped, or if it has 1636 * been deallocated. 1637 */ 1638 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1639 pmap, va, flags); 1640 m = _pmap_allocpte(pmap, ptepindex, flags); 1641 if (m == NULL && (flags & M_WAITOK)) 1642 goto retry; 1643 1644 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1645 } 1646 return (m); 1647} 1648 1649 1650/*************************************************** 1651* Pmap allocation/deallocation routines. 1652 ***************************************************/ 1653 1654#ifdef SMP 1655/* 1656 * Deal with a SMP shootdown of other users of the pmap that we are 1657 * trying to dispose of. This can be a bit hairy. 1658 */ 1659static cpuset_t *lazymask; 1660static u_int lazyptd; 1661static volatile u_int lazywait; 1662 1663void pmap_lazyfix_action(void); 1664 1665void 1666pmap_lazyfix_action(void) 1667{ 1668 1669#ifdef COUNT_IPIS 1670 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1671#endif 1672 if (rcr3() == lazyptd) 1673 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1674 CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); 1675 atomic_store_rel_int(&lazywait, 1); 1676} 1677 1678static void 1679pmap_lazyfix_self(u_int cpuid) 1680{ 1681 1682 if (rcr3() == lazyptd) 1683 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1684 CPU_CLR_ATOMIC(cpuid, lazymask); 1685} 1686 1687 1688static void 1689pmap_lazyfix(pmap_t pmap) 1690{ 1691 cpuset_t mymask, mask; 1692 u_int cpuid, spins; 1693 int lsb; 1694 1695 mask = pmap->pm_active; 1696 while (!CPU_EMPTY(&mask)) { 1697 spins = 50000000; 1698 1699 /* Find least significant set bit. */ 1700 lsb = cpusetobj_ffs(&mask); 1701 MPASS(lsb != 0); 1702 lsb--; 1703 CPU_SETOF(lsb, &mask); 1704 mtx_lock_spin(&smp_ipi_mtx); 1705#ifdef PAE 1706 lazyptd = vtophys(pmap->pm_pdpt); 1707#else 1708 lazyptd = vtophys(pmap->pm_pdir); 1709#endif 1710 cpuid = PCPU_GET(cpuid); 1711 1712 /* Use a cpuset just for having an easy check. */ 1713 CPU_SETOF(cpuid, &mymask); 1714 if (!CPU_CMP(&mask, &mymask)) { 1715 lazymask = &pmap->pm_active; 1716 pmap_lazyfix_self(cpuid); 1717 } else { 1718 atomic_store_rel_int((u_int *)&lazymask, 1719 (u_int)&pmap->pm_active); 1720 atomic_store_rel_int(&lazywait, 0); 1721 ipi_selected(mask, IPI_LAZYPMAP); 1722 while (lazywait == 0) { 1723 ia32_pause(); 1724 if (--spins == 0) 1725 break; 1726 } 1727 } 1728 mtx_unlock_spin(&smp_ipi_mtx); 1729 if (spins == 0) 1730 printf("pmap_lazyfix: spun for 50000000\n"); 1731 mask = pmap->pm_active; 1732 } 1733} 1734 1735#else /* SMP */ 1736 1737/* 1738 * Cleaning up on uniprocessor is easy. For various reasons, we're 1739 * unlikely to have to even execute this code, including the fact 1740 * that the cleanup is deferred until the parent does a wait(2), which 1741 * means that another userland process has run. 1742 */ 1743static void 1744pmap_lazyfix(pmap_t pmap) 1745{ 1746 u_int cr3; 1747 1748 cr3 = vtophys(pmap->pm_pdir); 1749 if (cr3 == rcr3()) { 1750 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1751 CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); 1752 } 1753} 1754#endif /* SMP */ 1755 1756/* 1757 * Release any resources held by the given physical map. 1758 * Called when a pmap initialized by pmap_pinit is being released. 1759 * Should only be called if the map contains no valid mappings. 1760 */ 1761void 1762pmap_release(pmap_t pmap) 1763{ 1764 vm_page_t m, ptdpg[2*NPGPTD+1]; 1765 vm_paddr_t ma; 1766 int i; 1767#ifdef PAE 1768 int npgptd = NPGPTD + 1; 1769#else 1770 int npgptd = NPGPTD; 1771#endif 1772 1773 KASSERT(pmap->pm_stats.resident_count == 0, 1774 ("pmap_release: pmap resident count %ld != 0", 1775 pmap->pm_stats.resident_count)); 1776 PT_UPDATES_FLUSH(); 1777 1778#ifdef HAMFISTED_LOCKING 1779 mtx_lock(&createdelete_lock); 1780#endif 1781 1782 pmap_lazyfix(pmap); 1783 mtx_lock_spin(&allpmaps_lock); 1784 LIST_REMOVE(pmap, pm_list); 1785 mtx_unlock_spin(&allpmaps_lock); 1786 1787 for (i = 0; i < NPGPTD; i++) 1788 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1789 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1790#ifdef PAE 1791 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1792#endif 1793 1794 for (i = 0; i < npgptd; i++) { 1795 m = ptdpg[i]; 1796 ma = VM_PAGE_TO_MACH(m); 1797 /* unpinning L1 and L2 treated the same */ 1798#if 0 1799 xen_pgd_unpin(ma); 1800#else 1801 if (i == NPGPTD) 1802 xen_pgd_unpin(ma); 1803#endif 1804#ifdef PAE 1805 if (i < NPGPTD) 1806 KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1807 ("pmap_release: got wrong ptd page")); 1808#endif 1809 m->wire_count--; 1810 atomic_subtract_int(&cnt.v_wire_count, 1); 1811 vm_page_free(m); 1812 } 1813#ifdef PAE 1814 pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1815#endif 1816 PMAP_LOCK_DESTROY(pmap); 1817 1818#ifdef HAMFISTED_LOCKING 1819 mtx_unlock(&createdelete_lock); 1820#endif 1821} 1822 1823static int 1824kvm_size(SYSCTL_HANDLER_ARGS) 1825{ 1826 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1827 1828 return (sysctl_handle_long(oidp, &ksize, 0, req)); 1829} 1830SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1831 0, 0, kvm_size, "IU", "Size of KVM"); 1832 1833static int 1834kvm_free(SYSCTL_HANDLER_ARGS) 1835{ 1836 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1837 1838 return (sysctl_handle_long(oidp, &kfree, 0, req)); 1839} 1840SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1841 0, 0, kvm_free, "IU", "Amount of KVM free"); 1842 1843/* 1844 * grow the number of kernel page table entries, if needed 1845 */ 1846void 1847pmap_growkernel(vm_offset_t addr) 1848{ 1849 struct pmap *pmap; 1850 vm_paddr_t ptppaddr; 1851 vm_page_t nkpg; 1852 pd_entry_t newpdir; 1853 1854 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1855 if (kernel_vm_end == 0) { 1856 kernel_vm_end = KERNBASE; 1857 nkpt = 0; 1858 while (pdir_pde(PTD, kernel_vm_end)) { 1859 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1860 nkpt++; 1861 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1862 kernel_vm_end = kernel_map->max_offset; 1863 break; 1864 } 1865 } 1866 } 1867 addr = roundup2(addr, NBPDR); 1868 if (addr - 1 >= kernel_map->max_offset) 1869 addr = kernel_map->max_offset; 1870 while (kernel_vm_end < addr) { 1871 if (pdir_pde(PTD, kernel_vm_end)) { 1872 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1873 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1874 kernel_vm_end = kernel_map->max_offset; 1875 break; 1876 } 1877 continue; 1878 } 1879 1880 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 1881 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1882 VM_ALLOC_ZERO); 1883 if (nkpg == NULL) 1884 panic("pmap_growkernel: no memory to grow kernel"); 1885 1886 nkpt++; 1887 1888 if ((nkpg->flags & PG_ZERO) == 0) 1889 pmap_zero_page(nkpg); 1890 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1891 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1892 vm_page_lock_queues(); 1893 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1894 mtx_lock_spin(&allpmaps_lock); 1895 LIST_FOREACH(pmap, &allpmaps, pm_list) 1896 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1897 1898 mtx_unlock_spin(&allpmaps_lock); 1899 vm_page_unlock_queues(); 1900 1901 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1902 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1903 kernel_vm_end = kernel_map->max_offset; 1904 break; 1905 } 1906 } 1907} 1908 1909 1910/*************************************************** 1911 * page management routines. 1912 ***************************************************/ 1913 1914CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1915CTASSERT(_NPCM == 11); 1916CTASSERT(_NPCPV == 336); 1917 1918static __inline struct pv_chunk * 1919pv_to_chunk(pv_entry_t pv) 1920{ 1921 1922 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1923} 1924 1925#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1926 1927#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1928#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1929 1930static const uint32_t pc_freemask[_NPCM] = { 1931 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1932 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1933 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1934 PC_FREE0_9, PC_FREE10 1935}; 1936 1937SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1938 "Current number of pv entries"); 1939 1940#ifdef PV_STATS 1941static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1942 1943SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1944 "Current number of pv entry chunks"); 1945SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1946 "Current number of pv entry chunks allocated"); 1947SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1948 "Current number of pv entry chunks frees"); 1949SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1950 "Number of times tried to get a chunk page but failed."); 1951 1952static long pv_entry_frees, pv_entry_allocs; 1953static int pv_entry_spare; 1954 1955SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1956 "Current number of pv entry frees"); 1957SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1958 "Current number of pv entry allocs"); 1959SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1960 "Current number of spare pv entries"); 1961#endif 1962 1963/* 1964 * We are in a serious low memory condition. Resort to 1965 * drastic measures to free some pages so we can allocate 1966 * another pv entry chunk. 1967 */ 1968static vm_page_t 1969pmap_pv_reclaim(pmap_t locked_pmap) 1970{ 1971 struct pch newtail; 1972 struct pv_chunk *pc; 1973 pmap_t pmap; 1974 pt_entry_t *pte, tpte; 1975 pv_entry_t pv; 1976 vm_offset_t va; 1977 vm_page_t free, m, m_pc; 1978 uint32_t inuse; 1979 int bit, field, freed; 1980 1981 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1982 pmap = NULL; 1983 free = m_pc = NULL; 1984 TAILQ_INIT(&newtail); 1985 sched_pin(); 1986 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 1987 free == NULL)) { 1988 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1989 if (pmap != pc->pc_pmap) { 1990 if (pmap != NULL) { 1991 pmap_invalidate_all(pmap); 1992 if (pmap != locked_pmap) 1993 PMAP_UNLOCK(pmap); 1994 } 1995 pmap = pc->pc_pmap; 1996 /* Avoid deadlock and lock recursion. */ 1997 if (pmap > locked_pmap) 1998 PMAP_LOCK(pmap); 1999 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2000 pmap = NULL; 2001 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2002 continue; 2003 } 2004 } 2005 2006 /* 2007 * Destroy every non-wired, 4 KB page mapping in the chunk. 2008 */ 2009 freed = 0; 2010 for (field = 0; field < _NPCM; field++) { 2011 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2012 inuse != 0; inuse &= ~(1UL << bit)) { 2013 bit = bsfl(inuse); 2014 pv = &pc->pc_pventry[field * 32 + bit]; 2015 va = pv->pv_va; 2016 pte = pmap_pte_quick(pmap, va); 2017 if ((*pte & PG_W) != 0) 2018 continue; 2019 tpte = pte_load_clear(pte); 2020 if ((tpte & PG_G) != 0) 2021 pmap_invalidate_page(pmap, va); 2022 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2023 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2024 vm_page_dirty(m); 2025 if ((tpte & PG_A) != 0) 2026 vm_page_aflag_set(m, PGA_REFERENCED); 2027 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2028 if (TAILQ_EMPTY(&m->md.pv_list)) 2029 vm_page_aflag_clear(m, PGA_WRITEABLE); 2030 pc->pc_map[field] |= 1UL << bit; 2031 pmap_unuse_pt(pmap, va, &free); 2032 freed++; 2033 } 2034 } 2035 if (freed == 0) { 2036 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2037 continue; 2038 } 2039 /* Every freed mapping is for a 4 KB page. */ 2040 pmap->pm_stats.resident_count -= freed; 2041 PV_STAT(pv_entry_frees += freed); 2042 PV_STAT(pv_entry_spare += freed); 2043 pv_entry_count -= freed; 2044 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2045 for (field = 0; field < _NPCM; field++) 2046 if (pc->pc_map[field] != pc_freemask[field]) { 2047 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2048 pc_list); 2049 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2050 2051 /* 2052 * One freed pv entry in locked_pmap is 2053 * sufficient. 2054 */ 2055 if (pmap == locked_pmap) 2056 goto out; 2057 break; 2058 } 2059 if (field == _NPCM) { 2060 PV_STAT(pv_entry_spare -= _NPCPV); 2061 PV_STAT(pc_chunk_count--); 2062 PV_STAT(pc_chunk_frees++); 2063 /* Entire chunk is free; return it. */ 2064 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2065 pmap_qremove((vm_offset_t)pc, 1); 2066 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2067 break; 2068 } 2069 } 2070out: 2071 sched_unpin(); 2072 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2073 if (pmap != NULL) { 2074 pmap_invalidate_all(pmap); 2075 if (pmap != locked_pmap) 2076 PMAP_UNLOCK(pmap); 2077 } 2078 if (m_pc == NULL && pv_vafree != 0 && free != NULL) { 2079 m_pc = free; 2080 free = m_pc->right; 2081 /* Recycle a freed page table page. */ 2082 m_pc->wire_count = 1; 2083 atomic_add_int(&cnt.v_wire_count, 1); 2084 } 2085 pmap_free_zero_pages(free); 2086 return (m_pc); 2087} 2088 2089/* 2090 * free the pv_entry back to the free list 2091 */ 2092static void 2093free_pv_entry(pmap_t pmap, pv_entry_t pv) 2094{ 2095 struct pv_chunk *pc; 2096 int idx, field, bit; 2097 2098 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2099 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2100 PV_STAT(pv_entry_frees++); 2101 PV_STAT(pv_entry_spare++); 2102 pv_entry_count--; 2103 pc = pv_to_chunk(pv); 2104 idx = pv - &pc->pc_pventry[0]; 2105 field = idx / 32; 2106 bit = idx % 32; 2107 pc->pc_map[field] |= 1ul << bit; 2108 for (idx = 0; idx < _NPCM; idx++) 2109 if (pc->pc_map[idx] != pc_freemask[idx]) { 2110 /* 2111 * 98% of the time, pc is already at the head of the 2112 * list. If it isn't already, move it to the head. 2113 */ 2114 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2115 pc)) { 2116 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2117 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2118 pc_list); 2119 } 2120 return; 2121 } 2122 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2123 free_pv_chunk(pc); 2124} 2125 2126static void 2127free_pv_chunk(struct pv_chunk *pc) 2128{ 2129 vm_page_t m; 2130 2131 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2132 PV_STAT(pv_entry_spare -= _NPCPV); 2133 PV_STAT(pc_chunk_count--); 2134 PV_STAT(pc_chunk_frees++); 2135 /* entire chunk is free, return it */ 2136 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2137 pmap_qremove((vm_offset_t)pc, 1); 2138 vm_page_unwire(m, 0); 2139 vm_page_free(m); 2140 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2141} 2142 2143/* 2144 * get a new pv_entry, allocating a block from the system 2145 * when needed. 2146 */ 2147static pv_entry_t 2148get_pv_entry(pmap_t pmap, boolean_t try) 2149{ 2150 static const struct timeval printinterval = { 60, 0 }; 2151 static struct timeval lastprint; 2152 int bit, field; 2153 pv_entry_t pv; 2154 struct pv_chunk *pc; 2155 vm_page_t m; 2156 2157 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2158 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2159 PV_STAT(pv_entry_allocs++); 2160 pv_entry_count++; 2161 if (pv_entry_count > pv_entry_high_water) 2162 if (ratecheck(&lastprint, &printinterval)) 2163 printf("Approaching the limit on PV entries, consider " 2164 "increasing either the vm.pmap.shpgperproc or the " 2165 "vm.pmap.pv_entry_max tunable.\n"); 2166retry: 2167 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2168 if (pc != NULL) { 2169 for (field = 0; field < _NPCM; field++) { 2170 if (pc->pc_map[field]) { 2171 bit = bsfl(pc->pc_map[field]); 2172 break; 2173 } 2174 } 2175 if (field < _NPCM) { 2176 pv = &pc->pc_pventry[field * 32 + bit]; 2177 pc->pc_map[field] &= ~(1ul << bit); 2178 /* If this was the last item, move it to tail */ 2179 for (field = 0; field < _NPCM; field++) 2180 if (pc->pc_map[field] != 0) { 2181 PV_STAT(pv_entry_spare--); 2182 return (pv); /* not full, return */ 2183 } 2184 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2185 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2186 PV_STAT(pv_entry_spare--); 2187 return (pv); 2188 } 2189 } 2190 /* 2191 * Access to the ptelist "pv_vafree" is synchronized by the page 2192 * queues lock. If "pv_vafree" is currently non-empty, it will 2193 * remain non-empty until pmap_ptelist_alloc() completes. 2194 */ 2195 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2196 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2197 if (try) { 2198 pv_entry_count--; 2199 PV_STAT(pc_chunk_tryfail++); 2200 return (NULL); 2201 } 2202 m = pmap_pv_reclaim(pmap); 2203 if (m == NULL) 2204 goto retry; 2205 } 2206 PV_STAT(pc_chunk_count++); 2207 PV_STAT(pc_chunk_allocs++); 2208 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2209 pmap_qenter((vm_offset_t)pc, &m, 1); 2210 if ((m->flags & PG_ZERO) == 0) 2211 pagezero(pc); 2212 pc->pc_pmap = pmap; 2213 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2214 for (field = 1; field < _NPCM; field++) 2215 pc->pc_map[field] = pc_freemask[field]; 2216 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2217 pv = &pc->pc_pventry[0]; 2218 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2219 PV_STAT(pv_entry_spare += _NPCPV - 1); 2220 return (pv); 2221} 2222 2223static __inline pv_entry_t 2224pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2225{ 2226 pv_entry_t pv; 2227 2228 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2229 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2230 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2231 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2232 break; 2233 } 2234 } 2235 return (pv); 2236} 2237 2238static void 2239pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2240{ 2241 pv_entry_t pv; 2242 2243 pv = pmap_pvh_remove(pvh, pmap, va); 2244 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2245 free_pv_entry(pmap, pv); 2246} 2247 2248static void 2249pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2250{ 2251 2252 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2253 pmap_pvh_free(&m->md, pmap, va); 2254 if (TAILQ_EMPTY(&m->md.pv_list)) 2255 vm_page_aflag_clear(m, PGA_WRITEABLE); 2256} 2257 2258/* 2259 * Conditionally create a pv entry. 2260 */ 2261static boolean_t 2262pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2263{ 2264 pv_entry_t pv; 2265 2266 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2267 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2268 if (pv_entry_count < pv_entry_high_water && 2269 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2270 pv->pv_va = va; 2271 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2272 return (TRUE); 2273 } else 2274 return (FALSE); 2275} 2276 2277/* 2278 * pmap_remove_pte: do the things to unmap a page in a process 2279 */ 2280static int 2281pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2282{ 2283 pt_entry_t oldpte; 2284 vm_page_t m; 2285 2286 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2287 pmap, (u_long)*ptq, va); 2288 2289 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2290 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2291 oldpte = *ptq; 2292 PT_SET_VA_MA(ptq, 0, TRUE); 2293 if (oldpte & PG_W) 2294 pmap->pm_stats.wired_count -= 1; 2295 /* 2296 * Machines that don't support invlpg, also don't support 2297 * PG_G. 2298 */ 2299 if (oldpte & PG_G) 2300 pmap_invalidate_page(kernel_pmap, va); 2301 pmap->pm_stats.resident_count -= 1; 2302 if (oldpte & PG_MANAGED) { 2303 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2304 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2305 vm_page_dirty(m); 2306 if (oldpte & PG_A) 2307 vm_page_aflag_set(m, PGA_REFERENCED); 2308 pmap_remove_entry(pmap, m, va); 2309 } 2310 return (pmap_unuse_pt(pmap, va, free)); 2311} 2312 2313/* 2314 * Remove a single page from a process address space 2315 */ 2316static void 2317pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2318{ 2319 pt_entry_t *pte; 2320 2321 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2322 pmap, va); 2323 2324 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2325 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2326 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2327 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2328 return; 2329 pmap_remove_pte(pmap, pte, va, free); 2330 pmap_invalidate_page(pmap, va); 2331 if (*PMAP1) 2332 PT_SET_MA(PADDR1, 0); 2333 2334} 2335 2336/* 2337 * Remove the given range of addresses from the specified map. 2338 * 2339 * It is assumed that the start and end are properly 2340 * rounded to the page size. 2341 */ 2342void 2343pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2344{ 2345 vm_offset_t pdnxt; 2346 pd_entry_t ptpaddr; 2347 pt_entry_t *pte; 2348 vm_page_t free = NULL; 2349 int anyvalid; 2350 2351 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2352 pmap, sva, eva); 2353 2354 /* 2355 * Perform an unsynchronized read. This is, however, safe. 2356 */ 2357 if (pmap->pm_stats.resident_count == 0) 2358 return; 2359 2360 anyvalid = 0; 2361 2362 vm_page_lock_queues(); 2363 sched_pin(); 2364 PMAP_LOCK(pmap); 2365 2366 /* 2367 * special handling of removing one page. a very 2368 * common operation and easy to short circuit some 2369 * code. 2370 */ 2371 if ((sva + PAGE_SIZE == eva) && 2372 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2373 pmap_remove_page(pmap, sva, &free); 2374 goto out; 2375 } 2376 2377 for (; sva < eva; sva = pdnxt) { 2378 u_int pdirindex; 2379 2380 /* 2381 * Calculate index for next page table. 2382 */ 2383 pdnxt = (sva + NBPDR) & ~PDRMASK; 2384 if (pdnxt < sva) 2385 pdnxt = eva; 2386 if (pmap->pm_stats.resident_count == 0) 2387 break; 2388 2389 pdirindex = sva >> PDRSHIFT; 2390 ptpaddr = pmap->pm_pdir[pdirindex]; 2391 2392 /* 2393 * Weed out invalid mappings. Note: we assume that the page 2394 * directory table is always allocated, and in kernel virtual. 2395 */ 2396 if (ptpaddr == 0) 2397 continue; 2398 2399 /* 2400 * Check for large page. 2401 */ 2402 if ((ptpaddr & PG_PS) != 0) { 2403 PD_CLEAR_VA(pmap, pdirindex, TRUE); 2404 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2405 anyvalid = 1; 2406 continue; 2407 } 2408 2409 /* 2410 * Limit our scan to either the end of the va represented 2411 * by the current page table page, or to the end of the 2412 * range being removed. 2413 */ 2414 if (pdnxt > eva) 2415 pdnxt = eva; 2416 2417 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2418 sva += PAGE_SIZE) { 2419 if ((*pte & PG_V) == 0) 2420 continue; 2421 2422 /* 2423 * The TLB entry for a PG_G mapping is invalidated 2424 * by pmap_remove_pte(). 2425 */ 2426 if ((*pte & PG_G) == 0) 2427 anyvalid = 1; 2428 if (pmap_remove_pte(pmap, pte, sva, &free)) 2429 break; 2430 } 2431 } 2432 PT_UPDATES_FLUSH(); 2433 if (*PMAP1) 2434 PT_SET_VA_MA(PMAP1, 0, TRUE); 2435out: 2436 if (anyvalid) 2437 pmap_invalidate_all(pmap); 2438 sched_unpin(); 2439 vm_page_unlock_queues(); 2440 PMAP_UNLOCK(pmap); 2441 pmap_free_zero_pages(free); 2442} 2443 2444/* 2445 * Routine: pmap_remove_all 2446 * Function: 2447 * Removes this physical page from 2448 * all physical maps in which it resides. 2449 * Reflects back modify bits to the pager. 2450 * 2451 * Notes: 2452 * Original versions of this routine were very 2453 * inefficient because they iteratively called 2454 * pmap_remove (slow...) 2455 */ 2456 2457void 2458pmap_remove_all(vm_page_t m) 2459{ 2460 pv_entry_t pv; 2461 pmap_t pmap; 2462 pt_entry_t *pte, tpte; 2463 vm_page_t free; 2464 2465 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2466 ("pmap_remove_all: page %p is not managed", m)); 2467 free = NULL; 2468 vm_page_lock_queues(); 2469 sched_pin(); 2470 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2471 pmap = PV_PMAP(pv); 2472 PMAP_LOCK(pmap); 2473 pmap->pm_stats.resident_count--; 2474 pte = pmap_pte_quick(pmap, pv->pv_va); 2475 tpte = *pte; 2476 PT_SET_VA_MA(pte, 0, TRUE); 2477 if (tpte & PG_W) 2478 pmap->pm_stats.wired_count--; 2479 if (tpte & PG_A) 2480 vm_page_aflag_set(m, PGA_REFERENCED); 2481 2482 /* 2483 * Update the vm_page_t clean and reference bits. 2484 */ 2485 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2486 vm_page_dirty(m); 2487 pmap_unuse_pt(pmap, pv->pv_va, &free); 2488 pmap_invalidate_page(pmap, pv->pv_va); 2489 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2490 free_pv_entry(pmap, pv); 2491 PMAP_UNLOCK(pmap); 2492 } 2493 vm_page_aflag_clear(m, PGA_WRITEABLE); 2494 PT_UPDATES_FLUSH(); 2495 if (*PMAP1) 2496 PT_SET_MA(PADDR1, 0); 2497 sched_unpin(); 2498 vm_page_unlock_queues(); 2499 pmap_free_zero_pages(free); 2500} 2501 2502/* 2503 * Set the physical protection on the 2504 * specified range of this map as requested. 2505 */ 2506void 2507pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2508{ 2509 vm_offset_t pdnxt; 2510 pd_entry_t ptpaddr; 2511 pt_entry_t *pte; 2512 int anychanged; 2513 2514 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2515 pmap, sva, eva, prot); 2516 2517 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2518 pmap_remove(pmap, sva, eva); 2519 return; 2520 } 2521 2522#ifdef PAE 2523 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2524 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2525 return; 2526#else 2527 if (prot & VM_PROT_WRITE) 2528 return; 2529#endif 2530 2531 anychanged = 0; 2532 2533 vm_page_lock_queues(); 2534 sched_pin(); 2535 PMAP_LOCK(pmap); 2536 for (; sva < eva; sva = pdnxt) { 2537 pt_entry_t obits, pbits; 2538 u_int pdirindex; 2539 2540 pdnxt = (sva + NBPDR) & ~PDRMASK; 2541 if (pdnxt < sva) 2542 pdnxt = eva; 2543 2544 pdirindex = sva >> PDRSHIFT; 2545 ptpaddr = pmap->pm_pdir[pdirindex]; 2546 2547 /* 2548 * Weed out invalid mappings. Note: we assume that the page 2549 * directory table is always allocated, and in kernel virtual. 2550 */ 2551 if (ptpaddr == 0) 2552 continue; 2553 2554 /* 2555 * Check for large page. 2556 */ 2557 if ((ptpaddr & PG_PS) != 0) { 2558 if ((prot & VM_PROT_WRITE) == 0) 2559 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2560#ifdef PAE 2561 if ((prot & VM_PROT_EXECUTE) == 0) 2562 pmap->pm_pdir[pdirindex] |= pg_nx; 2563#endif 2564 anychanged = 1; 2565 continue; 2566 } 2567 2568 if (pdnxt > eva) 2569 pdnxt = eva; 2570 2571 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2572 sva += PAGE_SIZE) { 2573 vm_page_t m; 2574 2575retry: 2576 /* 2577 * Regardless of whether a pte is 32 or 64 bits in 2578 * size, PG_RW, PG_A, and PG_M are among the least 2579 * significant 32 bits. 2580 */ 2581 obits = pbits = *pte; 2582 if ((pbits & PG_V) == 0) 2583 continue; 2584 2585 if ((prot & VM_PROT_WRITE) == 0) { 2586 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2587 (PG_MANAGED | PG_M | PG_RW)) { 2588 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2589 PG_FRAME); 2590 vm_page_dirty(m); 2591 } 2592 pbits &= ~(PG_RW | PG_M); 2593 } 2594#ifdef PAE 2595 if ((prot & VM_PROT_EXECUTE) == 0) 2596 pbits |= pg_nx; 2597#endif 2598 2599 if (pbits != obits) { 2600 obits = *pte; 2601 PT_SET_VA_MA(pte, pbits, TRUE); 2602 if (*pte != pbits) 2603 goto retry; 2604 if (obits & PG_G) 2605 pmap_invalidate_page(pmap, sva); 2606 else 2607 anychanged = 1; 2608 } 2609 } 2610 } 2611 PT_UPDATES_FLUSH(); 2612 if (*PMAP1) 2613 PT_SET_VA_MA(PMAP1, 0, TRUE); 2614 if (anychanged) 2615 pmap_invalidate_all(pmap); 2616 sched_unpin(); 2617 vm_page_unlock_queues(); 2618 PMAP_UNLOCK(pmap); 2619} 2620 2621/* 2622 * Insert the given physical page (p) at 2623 * the specified virtual address (v) in the 2624 * target physical map with the protection requested. 2625 * 2626 * If specified, the page will be wired down, meaning 2627 * that the related pte can not be reclaimed. 2628 * 2629 * NB: This is the only routine which MAY NOT lazy-evaluate 2630 * or lose information. That is, this routine must actually 2631 * insert this page into the given map NOW. 2632 */ 2633void 2634pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2635 vm_prot_t prot, boolean_t wired) 2636{ 2637 pd_entry_t *pde; 2638 pt_entry_t *pte; 2639 pt_entry_t newpte, origpte; 2640 pv_entry_t pv; 2641 vm_paddr_t opa, pa; 2642 vm_page_t mpte, om; 2643 boolean_t invlva; 2644 2645 CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2646 pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); 2647 va = trunc_page(va); 2648 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2649 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2650 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2651 va)); 2652 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 2653 VM_OBJECT_LOCKED(m->object), 2654 ("pmap_enter: page %p is not busy", m)); 2655 2656 mpte = NULL; 2657 2658 vm_page_lock_queues(); 2659 PMAP_LOCK(pmap); 2660 sched_pin(); 2661 2662 /* 2663 * In the case that a page table page is not 2664 * resident, we are creating it here. 2665 */ 2666 if (va < VM_MAXUSER_ADDRESS) { 2667 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2668 } 2669 2670 pde = pmap_pde(pmap, va); 2671 if ((*pde & PG_PS) != 0) 2672 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2673 pte = pmap_pte_quick(pmap, va); 2674 2675 /* 2676 * Page Directory table entry not valid, we need a new PT page 2677 */ 2678 if (pte == NULL) { 2679 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2680 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2681 } 2682 2683 pa = VM_PAGE_TO_PHYS(m); 2684 om = NULL; 2685 opa = origpte = 0; 2686 2687#if 0 2688 KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2689 pte, *pte)); 2690#endif 2691 origpte = *pte; 2692 if (origpte) 2693 origpte = xpmap_mtop(origpte); 2694 opa = origpte & PG_FRAME; 2695 2696 /* 2697 * Mapping has not changed, must be protection or wiring change. 2698 */ 2699 if (origpte && (opa == pa)) { 2700 /* 2701 * Wiring change, just update stats. We don't worry about 2702 * wiring PT pages as they remain resident as long as there 2703 * are valid mappings in them. Hence, if a user page is wired, 2704 * the PT page will be also. 2705 */ 2706 if (wired && ((origpte & PG_W) == 0)) 2707 pmap->pm_stats.wired_count++; 2708 else if (!wired && (origpte & PG_W)) 2709 pmap->pm_stats.wired_count--; 2710 2711 /* 2712 * Remove extra pte reference 2713 */ 2714 if (mpte) 2715 mpte->wire_count--; 2716 2717 if (origpte & PG_MANAGED) { 2718 om = m; 2719 pa |= PG_MANAGED; 2720 } 2721 goto validate; 2722 } 2723 2724 pv = NULL; 2725 2726 /* 2727 * Mapping has changed, invalidate old range and fall through to 2728 * handle validating new mapping. 2729 */ 2730 if (opa) { 2731 if (origpte & PG_W) 2732 pmap->pm_stats.wired_count--; 2733 if (origpte & PG_MANAGED) { 2734 om = PHYS_TO_VM_PAGE(opa); 2735 pv = pmap_pvh_remove(&om->md, pmap, va); 2736 } else if (va < VM_MAXUSER_ADDRESS) 2737 printf("va=0x%x is unmanaged :-( \n", va); 2738 2739 if (mpte != NULL) { 2740 mpte->wire_count--; 2741 KASSERT(mpte->wire_count > 0, 2742 ("pmap_enter: missing reference to page table page," 2743 " va: 0x%x", va)); 2744 } 2745 } else 2746 pmap->pm_stats.resident_count++; 2747 2748 /* 2749 * Enter on the PV list if part of our managed memory. 2750 */ 2751 if ((m->oflags & VPO_UNMANAGED) == 0) { 2752 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2753 ("pmap_enter: managed mapping within the clean submap")); 2754 if (pv == NULL) 2755 pv = get_pv_entry(pmap, FALSE); 2756 pv->pv_va = va; 2757 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2758 pa |= PG_MANAGED; 2759 } else if (pv != NULL) 2760 free_pv_entry(pmap, pv); 2761 2762 /* 2763 * Increment counters 2764 */ 2765 if (wired) 2766 pmap->pm_stats.wired_count++; 2767 2768validate: 2769 /* 2770 * Now validate mapping with desired protection/wiring. 2771 */ 2772 newpte = (pt_entry_t)(pa | PG_V); 2773 if ((prot & VM_PROT_WRITE) != 0) { 2774 newpte |= PG_RW; 2775 if ((newpte & PG_MANAGED) != 0) 2776 vm_page_aflag_set(m, PGA_WRITEABLE); 2777 } 2778#ifdef PAE 2779 if ((prot & VM_PROT_EXECUTE) == 0) 2780 newpte |= pg_nx; 2781#endif 2782 if (wired) 2783 newpte |= PG_W; 2784 if (va < VM_MAXUSER_ADDRESS) 2785 newpte |= PG_U; 2786 if (pmap == kernel_pmap) 2787 newpte |= pgeflag; 2788 2789 critical_enter(); 2790 /* 2791 * if the mapping or permission bits are different, we need 2792 * to update the pte. 2793 */ 2794 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2795 if (origpte) { 2796 invlva = FALSE; 2797 origpte = *pte; 2798 PT_SET_VA(pte, newpte | PG_A, FALSE); 2799 if (origpte & PG_A) { 2800 if (origpte & PG_MANAGED) 2801 vm_page_aflag_set(om, PGA_REFERENCED); 2802 if (opa != VM_PAGE_TO_PHYS(m)) 2803 invlva = TRUE; 2804#ifdef PAE 2805 if ((origpte & PG_NX) == 0 && 2806 (newpte & PG_NX) != 0) 2807 invlva = TRUE; 2808#endif 2809 } 2810 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2811 if ((origpte & PG_MANAGED) != 0) 2812 vm_page_dirty(om); 2813 if ((prot & VM_PROT_WRITE) == 0) 2814 invlva = TRUE; 2815 } 2816 if ((origpte & PG_MANAGED) != 0 && 2817 TAILQ_EMPTY(&om->md.pv_list)) 2818 vm_page_aflag_clear(om, PGA_WRITEABLE); 2819 if (invlva) 2820 pmap_invalidate_page(pmap, va); 2821 } else{ 2822 PT_SET_VA(pte, newpte | PG_A, FALSE); 2823 } 2824 2825 } 2826 PT_UPDATES_FLUSH(); 2827 critical_exit(); 2828 if (*PMAP1) 2829 PT_SET_VA_MA(PMAP1, 0, TRUE); 2830 sched_unpin(); 2831 vm_page_unlock_queues(); 2832 PMAP_UNLOCK(pmap); 2833} 2834 2835/* 2836 * Maps a sequence of resident pages belonging to the same object. 2837 * The sequence begins with the given page m_start. This page is 2838 * mapped at the given virtual address start. Each subsequent page is 2839 * mapped at a virtual address that is offset from start by the same 2840 * amount as the page is offset from m_start within the object. The 2841 * last page in the sequence is the page with the largest offset from 2842 * m_start that can be mapped at a virtual address less than the given 2843 * virtual address end. Not every virtual page between start and end 2844 * is mapped; only those for which a resident page exists with the 2845 * corresponding offset from m_start are mapped. 2846 */ 2847void 2848pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2849 vm_page_t m_start, vm_prot_t prot) 2850{ 2851 vm_page_t m, mpte; 2852 vm_pindex_t diff, psize; 2853 multicall_entry_t mcl[16]; 2854 multicall_entry_t *mclp = mcl; 2855 int error, count = 0; 2856 2857 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2858 psize = atop(end - start); 2859 mpte = NULL; 2860 m = m_start; 2861 vm_page_lock_queues(); 2862 PMAP_LOCK(pmap); 2863 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2864 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2865 prot, mpte); 2866 m = TAILQ_NEXT(m, listq); 2867 if (count == 16) { 2868 error = HYPERVISOR_multicall(mcl, count); 2869 KASSERT(error == 0, ("bad multicall %d", error)); 2870 mclp = mcl; 2871 count = 0; 2872 } 2873 } 2874 if (count) { 2875 error = HYPERVISOR_multicall(mcl, count); 2876 KASSERT(error == 0, ("bad multicall %d", error)); 2877 } 2878 vm_page_unlock_queues(); 2879 PMAP_UNLOCK(pmap); 2880} 2881 2882/* 2883 * this code makes some *MAJOR* assumptions: 2884 * 1. Current pmap & pmap exists. 2885 * 2. Not wired. 2886 * 3. Read access. 2887 * 4. No page table pages. 2888 * but is *MUCH* faster than pmap_enter... 2889 */ 2890 2891void 2892pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2893{ 2894 multicall_entry_t mcl, *mclp; 2895 int count = 0; 2896 mclp = &mcl; 2897 2898 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2899 pmap, va, m, prot); 2900 2901 vm_page_lock_queues(); 2902 PMAP_LOCK(pmap); 2903 (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2904 if (count) 2905 HYPERVISOR_multicall(&mcl, count); 2906 vm_page_unlock_queues(); 2907 PMAP_UNLOCK(pmap); 2908} 2909 2910#ifdef notyet 2911void 2912pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2913{ 2914 int i, error, index = 0; 2915 multicall_entry_t mcl[16]; 2916 multicall_entry_t *mclp = mcl; 2917 2918 PMAP_LOCK(pmap); 2919 for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2920 if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2921 continue; 2922 2923 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2924 if (index == 16) { 2925 error = HYPERVISOR_multicall(mcl, index); 2926 mclp = mcl; 2927 index = 0; 2928 KASSERT(error == 0, ("bad multicall %d", error)); 2929 } 2930 } 2931 if (index) { 2932 error = HYPERVISOR_multicall(mcl, index); 2933 KASSERT(error == 0, ("bad multicall %d", error)); 2934 } 2935 2936 PMAP_UNLOCK(pmap); 2937} 2938#endif 2939 2940static vm_page_t 2941pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2942 vm_prot_t prot, vm_page_t mpte) 2943{ 2944 pt_entry_t *pte; 2945 vm_paddr_t pa; 2946 vm_page_t free; 2947 multicall_entry_t *mcl = *mclpp; 2948 2949 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2950 (m->oflags & VPO_UNMANAGED) != 0, 2951 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2952 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2953 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2954 2955 /* 2956 * In the case that a page table page is not 2957 * resident, we are creating it here. 2958 */ 2959 if (va < VM_MAXUSER_ADDRESS) { 2960 u_int ptepindex; 2961 pd_entry_t ptema; 2962 2963 /* 2964 * Calculate pagetable page index 2965 */ 2966 ptepindex = va >> PDRSHIFT; 2967 if (mpte && (mpte->pindex == ptepindex)) { 2968 mpte->wire_count++; 2969 } else { 2970 /* 2971 * Get the page directory entry 2972 */ 2973 ptema = pmap->pm_pdir[ptepindex]; 2974 2975 /* 2976 * If the page table page is mapped, we just increment 2977 * the hold count, and activate it. 2978 */ 2979 if (ptema & PG_V) { 2980 if (ptema & PG_PS) 2981 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2982 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2983 mpte->wire_count++; 2984 } else { 2985 mpte = _pmap_allocpte(pmap, ptepindex, 2986 M_NOWAIT); 2987 if (mpte == NULL) 2988 return (mpte); 2989 } 2990 } 2991 } else { 2992 mpte = NULL; 2993 } 2994 2995 /* 2996 * This call to vtopte makes the assumption that we are 2997 * entering the page into the current pmap. In order to support 2998 * quick entry into any pmap, one would likely use pmap_pte_quick. 2999 * But that isn't as quick as vtopte. 3000 */ 3001 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 3002 pte = vtopte(va); 3003 if (*pte & PG_V) { 3004 if (mpte != NULL) { 3005 mpte->wire_count--; 3006 mpte = NULL; 3007 } 3008 return (mpte); 3009 } 3010 3011 /* 3012 * Enter on the PV list if part of our managed memory. 3013 */ 3014 if ((m->oflags & VPO_UNMANAGED) == 0 && 3015 !pmap_try_insert_pv_entry(pmap, va, m)) { 3016 if (mpte != NULL) { 3017 free = NULL; 3018 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3019 pmap_invalidate_page(pmap, va); 3020 pmap_free_zero_pages(free); 3021 } 3022 3023 mpte = NULL; 3024 } 3025 return (mpte); 3026 } 3027 3028 /* 3029 * Increment counters 3030 */ 3031 pmap->pm_stats.resident_count++; 3032 3033 pa = VM_PAGE_TO_PHYS(m); 3034#ifdef PAE 3035 if ((prot & VM_PROT_EXECUTE) == 0) 3036 pa |= pg_nx; 3037#endif 3038 3039#if 0 3040 /* 3041 * Now validate mapping with RO protection 3042 */ 3043 if ((m->oflags & VPO_UNMANAGED) != 0) 3044 pte_store(pte, pa | PG_V | PG_U); 3045 else 3046 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3047#else 3048 /* 3049 * Now validate mapping with RO protection 3050 */ 3051 if ((m->oflags & VPO_UNMANAGED) != 0) 3052 pa = xpmap_ptom(pa | PG_V | PG_U); 3053 else 3054 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3055 3056 mcl->op = __HYPERVISOR_update_va_mapping; 3057 mcl->args[0] = va; 3058 mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3059 mcl->args[2] = (uint32_t)(pa >> 32); 3060 mcl->args[3] = 0; 3061 *mclpp = mcl + 1; 3062 *count = *count + 1; 3063#endif 3064 return (mpte); 3065} 3066 3067/* 3068 * Make a temporary mapping for a physical address. This is only intended 3069 * to be used for panic dumps. 3070 */ 3071void * 3072pmap_kenter_temporary(vm_paddr_t pa, int i) 3073{ 3074 vm_offset_t va; 3075 vm_paddr_t ma = xpmap_ptom(pa); 3076 3077 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3078 PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3079 invlpg(va); 3080 return ((void *)crashdumpmap); 3081} 3082 3083/* 3084 * This code maps large physical mmap regions into the 3085 * processor address space. Note that some shortcuts 3086 * are taken, but the code works. 3087 */ 3088void 3089pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3090 vm_pindex_t pindex, vm_size_t size) 3091{ 3092 pd_entry_t *pde; 3093 vm_paddr_t pa, ptepa; 3094 vm_page_t p; 3095 int pat_mode; 3096 3097 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3098 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3099 ("pmap_object_init_pt: non-device object")); 3100 if (pseflag && 3101 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3102 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3103 return; 3104 p = vm_page_lookup(object, pindex); 3105 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3106 ("pmap_object_init_pt: invalid page %p", p)); 3107 pat_mode = p->md.pat_mode; 3108 3109 /* 3110 * Abort the mapping if the first page is not physically 3111 * aligned to a 2/4MB page boundary. 3112 */ 3113 ptepa = VM_PAGE_TO_PHYS(p); 3114 if (ptepa & (NBPDR - 1)) 3115 return; 3116 3117 /* 3118 * Skip the first page. Abort the mapping if the rest of 3119 * the pages are not physically contiguous or have differing 3120 * memory attributes. 3121 */ 3122 p = TAILQ_NEXT(p, listq); 3123 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3124 pa += PAGE_SIZE) { 3125 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3126 ("pmap_object_init_pt: invalid page %p", p)); 3127 if (pa != VM_PAGE_TO_PHYS(p) || 3128 pat_mode != p->md.pat_mode) 3129 return; 3130 p = TAILQ_NEXT(p, listq); 3131 } 3132 3133 /* 3134 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 3135 * "size" is a multiple of 2/4M, adding the PAT setting to 3136 * "pa" will not affect the termination of this loop. 3137 */ 3138 PMAP_LOCK(pmap); 3139 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3140 size; pa += NBPDR) { 3141 pde = pmap_pde(pmap, addr); 3142 if (*pde == 0) { 3143 pde_store(pde, pa | PG_PS | PG_M | PG_A | 3144 PG_U | PG_RW | PG_V); 3145 pmap->pm_stats.resident_count += NBPDR / 3146 PAGE_SIZE; 3147 pmap_pde_mappings++; 3148 } 3149 /* Else continue on if the PDE is already valid. */ 3150 addr += NBPDR; 3151 } 3152 PMAP_UNLOCK(pmap); 3153 } 3154} 3155 3156/* 3157 * Routine: pmap_change_wiring 3158 * Function: Change the wiring attribute for a map/virtual-address 3159 * pair. 3160 * In/out conditions: 3161 * The mapping must already exist in the pmap. 3162 */ 3163void 3164pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3165{ 3166 pt_entry_t *pte; 3167 3168 vm_page_lock_queues(); 3169 PMAP_LOCK(pmap); 3170 pte = pmap_pte(pmap, va); 3171 3172 if (wired && !pmap_pte_w(pte)) { 3173 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3174 pmap->pm_stats.wired_count++; 3175 } else if (!wired && pmap_pte_w(pte)) { 3176 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3177 pmap->pm_stats.wired_count--; 3178 } 3179 3180 /* 3181 * Wiring is not a hardware characteristic so there is no need to 3182 * invalidate TLB. 3183 */ 3184 pmap_pte_release(pte); 3185 PMAP_UNLOCK(pmap); 3186 vm_page_unlock_queues(); 3187} 3188 3189 3190 3191/* 3192 * Copy the range specified by src_addr/len 3193 * from the source map to the range dst_addr/len 3194 * in the destination map. 3195 * 3196 * This routine is only advisory and need not do anything. 3197 */ 3198 3199void 3200pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3201 vm_offset_t src_addr) 3202{ 3203 vm_page_t free; 3204 vm_offset_t addr; 3205 vm_offset_t end_addr = src_addr + len; 3206 vm_offset_t pdnxt; 3207 3208 if (dst_addr != src_addr) 3209 return; 3210 3211 if (!pmap_is_current(src_pmap)) { 3212 CTR2(KTR_PMAP, 3213 "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3214 (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3215 3216 return; 3217 } 3218 CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3219 dst_pmap, src_pmap, dst_addr, len, src_addr); 3220 3221#ifdef HAMFISTED_LOCKING 3222 mtx_lock(&createdelete_lock); 3223#endif 3224 3225 vm_page_lock_queues(); 3226 if (dst_pmap < src_pmap) { 3227 PMAP_LOCK(dst_pmap); 3228 PMAP_LOCK(src_pmap); 3229 } else { 3230 PMAP_LOCK(src_pmap); 3231 PMAP_LOCK(dst_pmap); 3232 } 3233 sched_pin(); 3234 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3235 pt_entry_t *src_pte, *dst_pte; 3236 vm_page_t dstmpte, srcmpte; 3237 pd_entry_t srcptepaddr; 3238 u_int ptepindex; 3239 3240 KASSERT(addr < UPT_MIN_ADDRESS, 3241 ("pmap_copy: invalid to pmap_copy page tables")); 3242 3243 pdnxt = (addr + NBPDR) & ~PDRMASK; 3244 if (pdnxt < addr) 3245 pdnxt = end_addr; 3246 ptepindex = addr >> PDRSHIFT; 3247 3248 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3249 if (srcptepaddr == 0) 3250 continue; 3251 3252 if (srcptepaddr & PG_PS) { 3253 if (dst_pmap->pm_pdir[ptepindex] == 0) { 3254 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3255 dst_pmap->pm_stats.resident_count += 3256 NBPDR / PAGE_SIZE; 3257 } 3258 continue; 3259 } 3260 3261 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3262 KASSERT(srcmpte->wire_count > 0, 3263 ("pmap_copy: source page table page is unused")); 3264 3265 if (pdnxt > end_addr) 3266 pdnxt = end_addr; 3267 3268 src_pte = vtopte(addr); 3269 while (addr < pdnxt) { 3270 pt_entry_t ptetemp; 3271 ptetemp = *src_pte; 3272 /* 3273 * we only virtual copy managed pages 3274 */ 3275 if ((ptetemp & PG_MANAGED) != 0) { 3276 dstmpte = pmap_allocpte(dst_pmap, addr, 3277 M_NOWAIT); 3278 if (dstmpte == NULL) 3279 goto out; 3280 dst_pte = pmap_pte_quick(dst_pmap, addr); 3281 if (*dst_pte == 0 && 3282 pmap_try_insert_pv_entry(dst_pmap, addr, 3283 PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3284 /* 3285 * Clear the wired, modified, and 3286 * accessed (referenced) bits 3287 * during the copy. 3288 */ 3289 KASSERT(ptetemp != 0, ("src_pte not set")); 3290 PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3291 KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3292 ("no pmap copy expected: 0x%jx saw: 0x%jx", 3293 ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3294 dst_pmap->pm_stats.resident_count++; 3295 } else { 3296 free = NULL; 3297 if (pmap_unwire_pte_hold(dst_pmap, 3298 dstmpte, &free)) { 3299 pmap_invalidate_page(dst_pmap, 3300 addr); 3301 pmap_free_zero_pages(free); 3302 } 3303 goto out; 3304 } 3305 if (dstmpte->wire_count >= srcmpte->wire_count) 3306 break; 3307 } 3308 addr += PAGE_SIZE; 3309 src_pte++; 3310 } 3311 } 3312out: 3313 PT_UPDATES_FLUSH(); 3314 sched_unpin(); 3315 vm_page_unlock_queues(); 3316 PMAP_UNLOCK(src_pmap); 3317 PMAP_UNLOCK(dst_pmap); 3318 3319#ifdef HAMFISTED_LOCKING 3320 mtx_unlock(&createdelete_lock); 3321#endif 3322} 3323 3324static __inline void 3325pagezero(void *page) 3326{ 3327#if defined(I686_CPU) 3328 if (cpu_class == CPUCLASS_686) { 3329#if defined(CPU_ENABLE_SSE) 3330 if (cpu_feature & CPUID_SSE2) 3331 sse2_pagezero(page); 3332 else 3333#endif 3334 i686_pagezero(page); 3335 } else 3336#endif 3337 bzero(page, PAGE_SIZE); 3338} 3339 3340/* 3341 * pmap_zero_page zeros the specified hardware page by mapping 3342 * the page into KVM and using bzero to clear its contents. 3343 */ 3344void 3345pmap_zero_page(vm_page_t m) 3346{ 3347 struct sysmaps *sysmaps; 3348 3349 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3350 mtx_lock(&sysmaps->lock); 3351 if (*sysmaps->CMAP2) 3352 panic("pmap_zero_page: CMAP2 busy"); 3353 sched_pin(); 3354 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3355 pagezero(sysmaps->CADDR2); 3356 PT_SET_MA(sysmaps->CADDR2, 0); 3357 sched_unpin(); 3358 mtx_unlock(&sysmaps->lock); 3359} 3360 3361/* 3362 * pmap_zero_page_area zeros the specified hardware page by mapping 3363 * the page into KVM and using bzero to clear its contents. 3364 * 3365 * off and size may not cover an area beyond a single hardware page. 3366 */ 3367void 3368pmap_zero_page_area(vm_page_t m, int off, int size) 3369{ 3370 struct sysmaps *sysmaps; 3371 3372 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3373 mtx_lock(&sysmaps->lock); 3374 if (*sysmaps->CMAP2) 3375 panic("pmap_zero_page_area: CMAP2 busy"); 3376 sched_pin(); 3377 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3378 3379 if (off == 0 && size == PAGE_SIZE) 3380 pagezero(sysmaps->CADDR2); 3381 else 3382 bzero((char *)sysmaps->CADDR2 + off, size); 3383 PT_SET_MA(sysmaps->CADDR2, 0); 3384 sched_unpin(); 3385 mtx_unlock(&sysmaps->lock); 3386} 3387 3388/* 3389 * pmap_zero_page_idle zeros the specified hardware page by mapping 3390 * the page into KVM and using bzero to clear its contents. This 3391 * is intended to be called from the vm_pagezero process only and 3392 * outside of Giant. 3393 */ 3394void 3395pmap_zero_page_idle(vm_page_t m) 3396{ 3397 3398 if (*CMAP3) 3399 panic("pmap_zero_page_idle: CMAP3 busy"); 3400 sched_pin(); 3401 PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3402 pagezero(CADDR3); 3403 PT_SET_MA(CADDR3, 0); 3404 sched_unpin(); 3405} 3406 3407/* 3408 * pmap_copy_page copies the specified (machine independent) 3409 * page by mapping the page into virtual memory and using 3410 * bcopy to copy the page, one machine dependent page at a 3411 * time. 3412 */ 3413void 3414pmap_copy_page(vm_page_t src, vm_page_t dst) 3415{ 3416 struct sysmaps *sysmaps; 3417 3418 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3419 mtx_lock(&sysmaps->lock); 3420 if (*sysmaps->CMAP1) 3421 panic("pmap_copy_page: CMAP1 busy"); 3422 if (*sysmaps->CMAP2) 3423 panic("pmap_copy_page: CMAP2 busy"); 3424 sched_pin(); 3425 PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A); 3426 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M); 3427 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3428 PT_SET_MA(sysmaps->CADDR1, 0); 3429 PT_SET_MA(sysmaps->CADDR2, 0); 3430 sched_unpin(); 3431 mtx_unlock(&sysmaps->lock); 3432} 3433 3434/* 3435 * Returns true if the pmap's pv is one of the first 3436 * 16 pvs linked to from this page. This count may 3437 * be changed upwards or downwards in the future; it 3438 * is only necessary that true be returned for a small 3439 * subset of pmaps for proper page aging. 3440 */ 3441boolean_t 3442pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3443{ 3444 pv_entry_t pv; 3445 int loops = 0; 3446 boolean_t rv; 3447 3448 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3449 ("pmap_page_exists_quick: page %p is not managed", m)); 3450 rv = FALSE; 3451 vm_page_lock_queues(); 3452 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3453 if (PV_PMAP(pv) == pmap) { 3454 rv = TRUE; 3455 break; 3456 } 3457 loops++; 3458 if (loops >= 16) 3459 break; 3460 } 3461 vm_page_unlock_queues(); 3462 return (rv); 3463} 3464 3465/* 3466 * pmap_page_wired_mappings: 3467 * 3468 * Return the number of managed mappings to the given physical page 3469 * that are wired. 3470 */ 3471int 3472pmap_page_wired_mappings(vm_page_t m) 3473{ 3474 pv_entry_t pv; 3475 pt_entry_t *pte; 3476 pmap_t pmap; 3477 int count; 3478 3479 count = 0; 3480 if ((m->oflags & VPO_UNMANAGED) != 0) 3481 return (count); 3482 vm_page_lock_queues(); 3483 sched_pin(); 3484 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3485 pmap = PV_PMAP(pv); 3486 PMAP_LOCK(pmap); 3487 pte = pmap_pte_quick(pmap, pv->pv_va); 3488 if ((*pte & PG_W) != 0) 3489 count++; 3490 PMAP_UNLOCK(pmap); 3491 } 3492 sched_unpin(); 3493 vm_page_unlock_queues(); 3494 return (count); 3495} 3496 3497/* 3498 * Returns TRUE if the given page is mapped. Otherwise, returns FALSE. 3499 */ 3500boolean_t 3501pmap_page_is_mapped(vm_page_t m) 3502{ 3503 3504 if ((m->oflags & VPO_UNMANAGED) != 0) 3505 return (FALSE); 3506 return (!TAILQ_EMPTY(&m->md.pv_list)); 3507} 3508 3509/* 3510 * Remove all pages from specified address space 3511 * this aids process exit speeds. Also, this code 3512 * is special cased for current process only, but 3513 * can have the more generic (and slightly slower) 3514 * mode enabled. This is much faster than pmap_remove 3515 * in the case of running down an entire address space. 3516 */ 3517void 3518pmap_remove_pages(pmap_t pmap) 3519{ 3520 pt_entry_t *pte, tpte; 3521 vm_page_t m, free = NULL; 3522 pv_entry_t pv; 3523 struct pv_chunk *pc, *npc; 3524 int field, idx; 3525 int32_t bit; 3526 uint32_t inuse, bitmask; 3527 int allfree; 3528 3529 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3530 3531 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3532 printf("warning: pmap_remove_pages called with non-current pmap\n"); 3533 return; 3534 } 3535 vm_page_lock_queues(); 3536 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3537 PMAP_LOCK(pmap); 3538 sched_pin(); 3539 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3540 allfree = 1; 3541 for (field = 0; field < _NPCM; field++) { 3542 inuse = ~pc->pc_map[field] & pc_freemask[field]; 3543 while (inuse != 0) { 3544 bit = bsfl(inuse); 3545 bitmask = 1UL << bit; 3546 idx = field * 32 + bit; 3547 pv = &pc->pc_pventry[idx]; 3548 inuse &= ~bitmask; 3549 3550 pte = vtopte(pv->pv_va); 3551 tpte = *pte ? xpmap_mtop(*pte) : 0; 3552 3553 if (tpte == 0) { 3554 printf( 3555 "TPTE at %p IS ZERO @ VA %08x\n", 3556 pte, pv->pv_va); 3557 panic("bad pte"); 3558 } 3559 3560/* 3561 * We cannot remove wired pages from a process' mapping at this time 3562 */ 3563 if (tpte & PG_W) { 3564 allfree = 0; 3565 continue; 3566 } 3567 3568 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3569 KASSERT(m->phys_addr == (tpte & PG_FRAME), 3570 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3571 m, (uintmax_t)m->phys_addr, 3572 (uintmax_t)tpte)); 3573 3574 KASSERT(m < &vm_page_array[vm_page_array_size], 3575 ("pmap_remove_pages: bad tpte %#jx", 3576 (uintmax_t)tpte)); 3577 3578 3579 PT_CLEAR_VA(pte, FALSE); 3580 3581 /* 3582 * Update the vm_page_t clean/reference bits. 3583 */ 3584 if (tpte & PG_M) 3585 vm_page_dirty(m); 3586 3587 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3588 if (TAILQ_EMPTY(&m->md.pv_list)) 3589 vm_page_aflag_clear(m, PGA_WRITEABLE); 3590 3591 pmap_unuse_pt(pmap, pv->pv_va, &free); 3592 3593 /* Mark free */ 3594 PV_STAT(pv_entry_frees++); 3595 PV_STAT(pv_entry_spare++); 3596 pv_entry_count--; 3597 pc->pc_map[field] |= bitmask; 3598 pmap->pm_stats.resident_count--; 3599 } 3600 } 3601 PT_UPDATES_FLUSH(); 3602 if (allfree) { 3603 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3604 free_pv_chunk(pc); 3605 } 3606 } 3607 PT_UPDATES_FLUSH(); 3608 if (*PMAP1) 3609 PT_SET_MA(PADDR1, 0); 3610 3611 sched_unpin(); 3612 pmap_invalidate_all(pmap); 3613 vm_page_unlock_queues(); 3614 PMAP_UNLOCK(pmap); 3615 pmap_free_zero_pages(free); 3616} 3617 3618/* 3619 * pmap_is_modified: 3620 * 3621 * Return whether or not the specified physical page was modified 3622 * in any physical maps. 3623 */ 3624boolean_t 3625pmap_is_modified(vm_page_t m) 3626{ 3627 pv_entry_t pv; 3628 pt_entry_t *pte; 3629 pmap_t pmap; 3630 boolean_t rv; 3631 3632 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3633 ("pmap_is_modified: page %p is not managed", m)); 3634 rv = FALSE; 3635 3636 /* 3637 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 3638 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3639 * is clear, no PTEs can have PG_M set. 3640 */ 3641 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3642 if ((m->oflags & VPO_BUSY) == 0 && 3643 (m->aflags & PGA_WRITEABLE) == 0) 3644 return (rv); 3645 vm_page_lock_queues(); 3646 sched_pin(); 3647 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3648 pmap = PV_PMAP(pv); 3649 PMAP_LOCK(pmap); 3650 pte = pmap_pte_quick(pmap, pv->pv_va); 3651 rv = (*pte & PG_M) != 0; 3652 PMAP_UNLOCK(pmap); 3653 if (rv) 3654 break; 3655 } 3656 if (*PMAP1) 3657 PT_SET_MA(PADDR1, 0); 3658 sched_unpin(); 3659 vm_page_unlock_queues(); 3660 return (rv); 3661} 3662 3663/* 3664 * pmap_is_prefaultable: 3665 * 3666 * Return whether or not the specified virtual address is elgible 3667 * for prefault. 3668 */ 3669static boolean_t 3670pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3671{ 3672 pt_entry_t *pte; 3673 boolean_t rv = FALSE; 3674 3675 return (rv); 3676 3677 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3678 pte = vtopte(addr); 3679 rv = (*pte == 0); 3680 } 3681 return (rv); 3682} 3683 3684boolean_t 3685pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3686{ 3687 boolean_t rv; 3688 3689 PMAP_LOCK(pmap); 3690 rv = pmap_is_prefaultable_locked(pmap, addr); 3691 PMAP_UNLOCK(pmap); 3692 return (rv); 3693} 3694 3695boolean_t 3696pmap_is_referenced(vm_page_t m) 3697{ 3698 pv_entry_t pv; 3699 pt_entry_t *pte; 3700 pmap_t pmap; 3701 boolean_t rv; 3702 3703 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3704 ("pmap_is_referenced: page %p is not managed", m)); 3705 rv = FALSE; 3706 vm_page_lock_queues(); 3707 sched_pin(); 3708 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3709 pmap = PV_PMAP(pv); 3710 PMAP_LOCK(pmap); 3711 pte = pmap_pte_quick(pmap, pv->pv_va); 3712 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3713 PMAP_UNLOCK(pmap); 3714 if (rv) 3715 break; 3716 } 3717 if (*PMAP1) 3718 PT_SET_MA(PADDR1, 0); 3719 sched_unpin(); 3720 vm_page_unlock_queues(); 3721 return (rv); 3722} 3723 3724void 3725pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3726{ 3727 int i, npages = round_page(len) >> PAGE_SHIFT; 3728 for (i = 0; i < npages; i++) { 3729 pt_entry_t *pte; 3730 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3731 vm_page_lock_queues(); 3732 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3733 vm_page_unlock_queues(); 3734 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3735 pmap_pte_release(pte); 3736 } 3737} 3738 3739void 3740pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3741{ 3742 int i, npages = round_page(len) >> PAGE_SHIFT; 3743 for (i = 0; i < npages; i++) { 3744 pt_entry_t *pte; 3745 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3746 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3747 vm_page_lock_queues(); 3748 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3749 vm_page_unlock_queues(); 3750 pmap_pte_release(pte); 3751 } 3752} 3753 3754/* 3755 * Clear the write and modified bits in each of the given page's mappings. 3756 */ 3757void 3758pmap_remove_write(vm_page_t m) 3759{ 3760 pv_entry_t pv; 3761 pmap_t pmap; 3762 pt_entry_t oldpte, *pte; 3763 3764 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3765 ("pmap_remove_write: page %p is not managed", m)); 3766 3767 /* 3768 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 3769 * another thread while the object is locked. Thus, if PGA_WRITEABLE 3770 * is clear, no page table entries need updating. 3771 */ 3772 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3773 if ((m->oflags & VPO_BUSY) == 0 && 3774 (m->aflags & PGA_WRITEABLE) == 0) 3775 return; 3776 vm_page_lock_queues(); 3777 sched_pin(); 3778 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3779 pmap = PV_PMAP(pv); 3780 PMAP_LOCK(pmap); 3781 pte = pmap_pte_quick(pmap, pv->pv_va); 3782retry: 3783 oldpte = *pte; 3784 if ((oldpte & PG_RW) != 0) { 3785 vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3786 3787 /* 3788 * Regardless of whether a pte is 32 or 64 bits 3789 * in size, PG_RW and PG_M are among the least 3790 * significant 32 bits. 3791 */ 3792 PT_SET_VA_MA(pte, newpte, TRUE); 3793 if (*pte != newpte) 3794 goto retry; 3795 3796 if ((oldpte & PG_M) != 0) 3797 vm_page_dirty(m); 3798 pmap_invalidate_page(pmap, pv->pv_va); 3799 } 3800 PMAP_UNLOCK(pmap); 3801 } 3802 vm_page_aflag_clear(m, PGA_WRITEABLE); 3803 PT_UPDATES_FLUSH(); 3804 if (*PMAP1) 3805 PT_SET_MA(PADDR1, 0); 3806 sched_unpin(); 3807 vm_page_unlock_queues(); 3808} 3809 3810/* 3811 * pmap_ts_referenced: 3812 * 3813 * Return a count of reference bits for a page, clearing those bits. 3814 * It is not necessary for every reference bit to be cleared, but it 3815 * is necessary that 0 only be returned when there are truly no 3816 * reference bits set. 3817 * 3818 * XXX: The exact number of bits to check and clear is a matter that 3819 * should be tested and standardized at some point in the future for 3820 * optimal aging of shared pages. 3821 */ 3822int 3823pmap_ts_referenced(vm_page_t m) 3824{ 3825 pv_entry_t pv, pvf, pvn; 3826 pmap_t pmap; 3827 pt_entry_t *pte; 3828 int rtval = 0; 3829 3830 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3831 ("pmap_ts_referenced: page %p is not managed", m)); 3832 vm_page_lock_queues(); 3833 sched_pin(); 3834 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3835 pvf = pv; 3836 do { 3837 pvn = TAILQ_NEXT(pv, pv_list); 3838 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3839 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3840 pmap = PV_PMAP(pv); 3841 PMAP_LOCK(pmap); 3842 pte = pmap_pte_quick(pmap, pv->pv_va); 3843 if ((*pte & PG_A) != 0) { 3844 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3845 pmap_invalidate_page(pmap, pv->pv_va); 3846 rtval++; 3847 if (rtval > 4) 3848 pvn = NULL; 3849 } 3850 PMAP_UNLOCK(pmap); 3851 } while ((pv = pvn) != NULL && pv != pvf); 3852 } 3853 PT_UPDATES_FLUSH(); 3854 if (*PMAP1) 3855 PT_SET_MA(PADDR1, 0); 3856 sched_unpin(); 3857 vm_page_unlock_queues(); 3858 return (rtval); 3859} 3860 3861/* 3862 * Clear the modify bits on the specified physical page. 3863 */ 3864void 3865pmap_clear_modify(vm_page_t m) 3866{ 3867 pv_entry_t pv; 3868 pmap_t pmap; 3869 pt_entry_t *pte; 3870 3871 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3872 ("pmap_clear_modify: page %p is not managed", m)); 3873 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3874 KASSERT((m->oflags & VPO_BUSY) == 0, 3875 ("pmap_clear_modify: page %p is busy", m)); 3876 3877 /* 3878 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 3879 * If the object containing the page is locked and the page is not 3880 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 3881 */ 3882 if ((m->aflags & PGA_WRITEABLE) == 0) 3883 return; 3884 vm_page_lock_queues(); 3885 sched_pin(); 3886 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3887 pmap = PV_PMAP(pv); 3888 PMAP_LOCK(pmap); 3889 pte = pmap_pte_quick(pmap, pv->pv_va); 3890 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3891 /* 3892 * Regardless of whether a pte is 32 or 64 bits 3893 * in size, PG_M is among the least significant 3894 * 32 bits. 3895 */ 3896 PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3897 pmap_invalidate_page(pmap, pv->pv_va); 3898 } 3899 PMAP_UNLOCK(pmap); 3900 } 3901 sched_unpin(); 3902 vm_page_unlock_queues(); 3903} 3904 3905/* 3906 * pmap_clear_reference: 3907 * 3908 * Clear the reference bit on the specified physical page. 3909 */ 3910void 3911pmap_clear_reference(vm_page_t m) 3912{ 3913 pv_entry_t pv; 3914 pmap_t pmap; 3915 pt_entry_t *pte; 3916 3917 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3918 ("pmap_clear_reference: page %p is not managed", m)); 3919 vm_page_lock_queues(); 3920 sched_pin(); 3921 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3922 pmap = PV_PMAP(pv); 3923 PMAP_LOCK(pmap); 3924 pte = pmap_pte_quick(pmap, pv->pv_va); 3925 if ((*pte & PG_A) != 0) { 3926 /* 3927 * Regardless of whether a pte is 32 or 64 bits 3928 * in size, PG_A is among the least significant 3929 * 32 bits. 3930 */ 3931 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3932 pmap_invalidate_page(pmap, pv->pv_va); 3933 } 3934 PMAP_UNLOCK(pmap); 3935 } 3936 sched_unpin(); 3937 vm_page_unlock_queues(); 3938} 3939 3940/* 3941 * Miscellaneous support routines follow 3942 */ 3943 3944/* 3945 * Map a set of physical memory pages into the kernel virtual 3946 * address space. Return a pointer to where it is mapped. This 3947 * routine is intended to be used for mapping device memory, 3948 * NOT real memory. 3949 */ 3950void * 3951pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3952{ 3953 vm_offset_t va, offset; 3954 vm_size_t tmpsize; 3955 3956 offset = pa & PAGE_MASK; 3957 size = roundup(offset + size, PAGE_SIZE); 3958 pa = pa & PG_FRAME; 3959 3960 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3961 va = KERNBASE + pa; 3962 else 3963 va = kmem_alloc_nofault(kernel_map, size); 3964 if (!va) 3965 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3966 3967 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3968 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3969 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3970 pmap_invalidate_cache_range(va, va + size); 3971 return ((void *)(va + offset)); 3972} 3973 3974void * 3975pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3976{ 3977 3978 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3979} 3980 3981void * 3982pmap_mapbios(vm_paddr_t pa, vm_size_t size) 3983{ 3984 3985 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3986} 3987 3988void 3989pmap_unmapdev(vm_offset_t va, vm_size_t size) 3990{ 3991 vm_offset_t base, offset, tmpva; 3992 3993 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3994 return; 3995 base = trunc_page(va); 3996 offset = va & PAGE_MASK; 3997 size = roundup(offset + size, PAGE_SIZE); 3998 critical_enter(); 3999 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 4000 pmap_kremove(tmpva); 4001 pmap_invalidate_range(kernel_pmap, va, tmpva); 4002 critical_exit(); 4003 kmem_free(kernel_map, base, size); 4004} 4005 4006/* 4007 * Sets the memory attribute for the specified page. 4008 */ 4009void 4010pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4011{ 4012 4013 m->md.pat_mode = ma; 4014 if ((m->flags & PG_FICTITIOUS) != 0) 4015 return; 4016 4017 /* 4018 * If "m" is a normal page, flush it from the cache. 4019 * See pmap_invalidate_cache_range(). 4020 * 4021 * First, try to find an existing mapping of the page by sf 4022 * buffer. sf_buf_invalidate_cache() modifies mapping and 4023 * flushes the cache. 4024 */ 4025 if (sf_buf_invalidate_cache(m)) 4026 return; 4027 4028 /* 4029 * If page is not mapped by sf buffer, but CPU does not 4030 * support self snoop, map the page transient and do 4031 * invalidation. In the worst case, whole cache is flushed by 4032 * pmap_invalidate_cache_range(). 4033 */ 4034 if ((cpu_feature & CPUID_SS) == 0) 4035 pmap_flush_page(m); 4036} 4037 4038static void 4039pmap_flush_page(vm_page_t m) 4040{ 4041 struct sysmaps *sysmaps; 4042 vm_offset_t sva, eva; 4043 4044 if ((cpu_feature & CPUID_CLFSH) != 0) { 4045 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4046 mtx_lock(&sysmaps->lock); 4047 if (*sysmaps->CMAP2) 4048 panic("pmap_flush_page: CMAP2 busy"); 4049 sched_pin(); 4050 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 4051 VM_PAGE_TO_MACH(m) | PG_A | PG_M | 4052 pmap_cache_bits(m->md.pat_mode, 0)); 4053 invlcaddr(sysmaps->CADDR2); 4054 sva = (vm_offset_t)sysmaps->CADDR2; 4055 eva = sva + PAGE_SIZE; 4056 4057 /* 4058 * Use mfence despite the ordering implied by 4059 * mtx_{un,}lock() because clflush is not guaranteed 4060 * to be ordered by any other instruction. 4061 */ 4062 mfence(); 4063 for (; sva < eva; sva += cpu_clflush_line_size) 4064 clflush(sva); 4065 mfence(); 4066 PT_SET_MA(sysmaps->CADDR2, 0); 4067 sched_unpin(); 4068 mtx_unlock(&sysmaps->lock); 4069 } else 4070 pmap_invalidate_cache(); 4071} 4072 4073/* 4074 * Changes the specified virtual address range's memory type to that given by 4075 * the parameter "mode". The specified virtual address range must be 4076 * completely contained within either the kernel map. 4077 * 4078 * Returns zero if the change completed successfully, and either EINVAL or 4079 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 4080 * of the virtual address range was not mapped, and ENOMEM is returned if 4081 * there was insufficient memory available to complete the change. 4082 */ 4083int 4084pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 4085{ 4086 vm_offset_t base, offset, tmpva; 4087 pt_entry_t *pte; 4088 u_int opte, npte; 4089 pd_entry_t *pde; 4090 boolean_t changed; 4091 4092 base = trunc_page(va); 4093 offset = va & PAGE_MASK; 4094 size = roundup(offset + size, PAGE_SIZE); 4095 4096 /* Only supported on kernel virtual addresses. */ 4097 if (base <= VM_MAXUSER_ADDRESS) 4098 return (EINVAL); 4099 4100 /* 4MB pages and pages that aren't mapped aren't supported. */ 4101 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4102 pde = pmap_pde(kernel_pmap, tmpva); 4103 if (*pde & PG_PS) 4104 return (EINVAL); 4105 if ((*pde & PG_V) == 0) 4106 return (EINVAL); 4107 pte = vtopte(va); 4108 if ((*pte & PG_V) == 0) 4109 return (EINVAL); 4110 } 4111 4112 changed = FALSE; 4113 4114 /* 4115 * Ok, all the pages exist and are 4k, so run through them updating 4116 * their cache mode. 4117 */ 4118 for (tmpva = base; size > 0; ) { 4119 pte = vtopte(tmpva); 4120 4121 /* 4122 * The cache mode bits are all in the low 32-bits of the 4123 * PTE, so we can just spin on updating the low 32-bits. 4124 */ 4125 do { 4126 opte = *(u_int *)pte; 4127 npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4128 npte |= pmap_cache_bits(mode, 0); 4129 PT_SET_VA_MA(pte, npte, TRUE); 4130 } while (npte != opte && (*pte != npte)); 4131 if (npte != opte) 4132 changed = TRUE; 4133 tmpva += PAGE_SIZE; 4134 size -= PAGE_SIZE; 4135 } 4136 4137 /* 4138 * Flush CPU caches to make sure any data isn't cached that 4139 * shouldn't be, etc. 4140 */ 4141 if (changed) { 4142 pmap_invalidate_range(kernel_pmap, base, tmpva); 4143 pmap_invalidate_cache_range(base, tmpva); 4144 } 4145 return (0); 4146} 4147 4148/* 4149 * perform the pmap work for mincore 4150 */ 4151int 4152pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4153{ 4154 pt_entry_t *ptep, pte; 4155 vm_paddr_t pa; 4156 int val; 4157 4158 PMAP_LOCK(pmap); 4159retry: 4160 ptep = pmap_pte(pmap, addr); 4161 pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4162 pmap_pte_release(ptep); 4163 val = 0; 4164 if ((pte & PG_V) != 0) { 4165 val |= MINCORE_INCORE; 4166 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4167 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4168 if ((pte & PG_A) != 0) 4169 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4170 } 4171 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4172 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4173 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4174 pa = pte & PG_FRAME; 4175 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4176 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4177 goto retry; 4178 } else 4179 PA_UNLOCK_COND(*locked_pa); 4180 PMAP_UNLOCK(pmap); 4181 return (val); 4182} 4183 4184void 4185pmap_activate(struct thread *td) 4186{ 4187 pmap_t pmap, oldpmap; 4188 u_int cpuid; 4189 u_int32_t cr3; 4190 4191 critical_enter(); 4192 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4193 oldpmap = PCPU_GET(curpmap); 4194 cpuid = PCPU_GET(cpuid); 4195#if defined(SMP) 4196 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 4197 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 4198#else 4199 CPU_CLR(cpuid, &oldpmap->pm_active); 4200 CPU_SET(cpuid, &pmap->pm_active); 4201#endif 4202#ifdef PAE 4203 cr3 = vtophys(pmap->pm_pdpt); 4204#else 4205 cr3 = vtophys(pmap->pm_pdir); 4206#endif 4207 /* 4208 * pmap_activate is for the current thread on the current cpu 4209 */ 4210 td->td_pcb->pcb_cr3 = cr3; 4211 PT_UPDATES_FLUSH(); 4212 load_cr3(cr3); 4213 PCPU_SET(curpmap, pmap); 4214 critical_exit(); 4215} 4216 4217void 4218pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4219{ 4220} 4221 4222/* 4223 * Increase the starting virtual address of the given mapping if a 4224 * different alignment might result in more superpage mappings. 4225 */ 4226void 4227pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4228 vm_offset_t *addr, vm_size_t size) 4229{ 4230 vm_offset_t superpage_offset; 4231 4232 if (size < NBPDR) 4233 return; 4234 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4235 offset += ptoa(object->pg_color); 4236 superpage_offset = offset & PDRMASK; 4237 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4238 (*addr & PDRMASK) == superpage_offset) 4239 return; 4240 if ((*addr & PDRMASK) < superpage_offset) 4241 *addr = (*addr & ~PDRMASK) + superpage_offset; 4242 else 4243 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4244} 4245 4246void 4247pmap_suspend() 4248{ 4249 pmap_t pmap; 4250 int i, pdir, offset; 4251 vm_paddr_t pdirma; 4252 mmu_update_t mu[4]; 4253 4254 /* 4255 * We need to remove the recursive mapping structure from all 4256 * our pmaps so that Xen doesn't get confused when it restores 4257 * the page tables. The recursive map lives at page directory 4258 * index PTDPTDI. We assume that the suspend code has stopped 4259 * the other vcpus (if any). 4260 */ 4261 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4262 for (i = 0; i < 4; i++) { 4263 /* 4264 * Figure out which page directory (L2) page 4265 * contains this bit of the recursive map and 4266 * the offset within that page of the map 4267 * entry 4268 */ 4269 pdir = (PTDPTDI + i) / NPDEPG; 4270 offset = (PTDPTDI + i) % NPDEPG; 4271 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4272 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4273 mu[i].val = 0; 4274 } 4275 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4276 } 4277} 4278 4279void 4280pmap_resume() 4281{ 4282 pmap_t pmap; 4283 int i, pdir, offset; 4284 vm_paddr_t pdirma; 4285 mmu_update_t mu[4]; 4286 4287 /* 4288 * Restore the recursive map that we removed on suspend. 4289 */ 4290 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4291 for (i = 0; i < 4; i++) { 4292 /* 4293 * Figure out which page directory (L2) page 4294 * contains this bit of the recursive map and 4295 * the offset within that page of the map 4296 * entry 4297 */ 4298 pdir = (PTDPTDI + i) / NPDEPG; 4299 offset = (PTDPTDI + i) % NPDEPG; 4300 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4301 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4302 mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4303 } 4304 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4305 } 4306} 4307 4308#if defined(PMAP_DEBUG) 4309pmap_pid_dump(int pid) 4310{ 4311 pmap_t pmap; 4312 struct proc *p; 4313 int npte = 0; 4314 int index; 4315 4316 sx_slock(&allproc_lock); 4317 FOREACH_PROC_IN_SYSTEM(p) { 4318 if (p->p_pid != pid) 4319 continue; 4320 4321 if (p->p_vmspace) { 4322 int i,j; 4323 index = 0; 4324 pmap = vmspace_pmap(p->p_vmspace); 4325 for (i = 0; i < NPDEPTD; i++) { 4326 pd_entry_t *pde; 4327 pt_entry_t *pte; 4328 vm_offset_t base = i << PDRSHIFT; 4329 4330 pde = &pmap->pm_pdir[i]; 4331 if (pde && pmap_pde_v(pde)) { 4332 for (j = 0; j < NPTEPG; j++) { 4333 vm_offset_t va = base + (j << PAGE_SHIFT); 4334 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4335 if (index) { 4336 index = 0; 4337 printf("\n"); 4338 } 4339 sx_sunlock(&allproc_lock); 4340 return (npte); 4341 } 4342 pte = pmap_pte(pmap, va); 4343 if (pte && pmap_pte_v(pte)) { 4344 pt_entry_t pa; 4345 vm_page_t m; 4346 pa = PT_GET(pte); 4347 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4348 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4349 va, pa, m->hold_count, m->wire_count, m->flags); 4350 npte++; 4351 index++; 4352 if (index >= 2) { 4353 index = 0; 4354 printf("\n"); 4355 } else { 4356 printf(" "); 4357 } 4358 } 4359 } 4360 } 4361 } 4362 } 4363 } 4364 sx_sunlock(&allproc_lock); 4365 return (npte); 4366} 4367#endif 4368 4369#if defined(DEBUG) 4370 4371static void pads(pmap_t pm); 4372void pmap_pvdump(vm_paddr_t pa); 4373 4374/* print address space of pmap*/ 4375static void 4376pads(pmap_t pm) 4377{ 4378 int i, j; 4379 vm_paddr_t va; 4380 pt_entry_t *ptep; 4381 4382 if (pm == kernel_pmap) 4383 return; 4384 for (i = 0; i < NPDEPTD; i++) 4385 if (pm->pm_pdir[i]) 4386 for (j = 0; j < NPTEPG; j++) { 4387 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4388 if (pm == kernel_pmap && va < KERNBASE) 4389 continue; 4390 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4391 continue; 4392 ptep = pmap_pte(pm, va); 4393 if (pmap_pte_v(ptep)) 4394 printf("%x:%x ", va, *ptep); 4395 }; 4396 4397} 4398 4399void 4400pmap_pvdump(vm_paddr_t pa) 4401{ 4402 pv_entry_t pv; 4403 pmap_t pmap; 4404 vm_page_t m; 4405 4406 printf("pa %x", pa); 4407 m = PHYS_TO_VM_PAGE(pa); 4408 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4409 pmap = PV_PMAP(pv); 4410 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4411 pads(pmap); 4412 } 4413 printf(" "); 4414} 4415#endif 4416