pmap.c revision 236378
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 236378 2012-06-01 04:26:50Z alc $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_smp.h" 109#include "opt_xbox.h" 110 111#include <sys/param.h> 112#include <sys/systm.h> 113#include <sys/kernel.h> 114#include <sys/ktr.h> 115#include <sys/lock.h> 116#include <sys/malloc.h> 117#include <sys/mman.h> 118#include <sys/msgbuf.h> 119#include <sys/mutex.h> 120#include <sys/proc.h> 121#include <sys/sf_buf.h> 122#include <sys/sx.h> 123#include <sys/vmmeter.h> 124#include <sys/sched.h> 125#include <sys/sysctl.h> 126#ifdef SMP 127#include <sys/smp.h> 128#else 129#include <sys/cpuset.h> 130#endif 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/cputypes.h> 145#include <machine/md_var.h> 146#include <machine/pcb.h> 147#include <machine/specialreg.h> 148#ifdef SMP 149#include <machine/smp.h> 150#endif 151 152#ifdef XBOX 153#include <machine/xbox.h> 154#endif 155 156#include <xen/interface/xen.h> 157#include <xen/hypervisor.h> 158#include <machine/xen/hypercall.h> 159#include <machine/xen/xenvar.h> 160#include <machine/xen/xenfunc.h> 161 162#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 163#define CPU_ENABLE_SSE 164#endif 165 166#ifndef PMAP_SHPGPERPROC 167#define PMAP_SHPGPERPROC 200 168#endif 169 170#define DIAGNOSTIC 171 172#if !defined(DIAGNOSTIC) 173#ifdef __GNUC_GNU_INLINE__ 174#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 175#else 176#define PMAP_INLINE extern inline 177#endif 178#else 179#define PMAP_INLINE 180#endif 181 182#ifdef PV_STATS 183#define PV_STAT(x) do { x ; } while (0) 184#else 185#define PV_STAT(x) do { } while (0) 186#endif 187 188/* 189 * Get PDEs and PTEs for user/kernel address space 190 */ 191#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 192#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 193 194#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 195#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 196#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 197#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 198#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 199 200#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 201 202#define HAMFISTED_LOCKING 203#ifdef HAMFISTED_LOCKING 204static struct mtx createdelete_lock; 205#endif 206 207struct pmap kernel_pmap_store; 208LIST_HEAD(pmaplist, pmap); 209static struct pmaplist allpmaps; 210static struct mtx allpmaps_lock; 211 212vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 213vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 214int pgeflag = 0; /* PG_G or-in */ 215int pseflag = 0; /* PG_PS or-in */ 216 217int nkpt; 218vm_offset_t kernel_vm_end; 219extern u_int32_t KERNend; 220 221#ifdef PAE 222pt_entry_t pg_nx; 223#endif 224 225static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 226 227static int pat_works; /* Is page attribute table sane? */ 228 229/* 230 * Data for the pv entry allocation mechanism 231 */ 232static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 233static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 234static int shpgperproc = PMAP_SHPGPERPROC; 235 236struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 237int pv_maxchunks; /* How many chunks we have KVA for */ 238vm_offset_t pv_vafree; /* freelist stored in the PTE */ 239 240/* 241 * All those kernel PT submaps that BSD is so fond of 242 */ 243struct sysmaps { 244 struct mtx lock; 245 pt_entry_t *CMAP1; 246 pt_entry_t *CMAP2; 247 caddr_t CADDR1; 248 caddr_t CADDR2; 249}; 250static struct sysmaps sysmaps_pcpu[MAXCPU]; 251static pt_entry_t *CMAP3; 252caddr_t ptvmmap = 0; 253static caddr_t CADDR3; 254struct msgbuf *msgbufp = 0; 255 256/* 257 * Crashdump maps. 258 */ 259static caddr_t crashdumpmap; 260 261static pt_entry_t *PMAP1 = 0, *PMAP2; 262static pt_entry_t *PADDR1 = 0, *PADDR2; 263#ifdef SMP 264static int PMAP1cpu; 265static int PMAP1changedcpu; 266SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 267 &PMAP1changedcpu, 0, 268 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 269#endif 270static int PMAP1changed; 271SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 272 &PMAP1changed, 0, 273 "Number of times pmap_pte_quick changed PMAP1"); 274static int PMAP1unchanged; 275SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 276 &PMAP1unchanged, 0, 277 "Number of times pmap_pte_quick didn't change PMAP1"); 278static struct mtx PMAP2mutex; 279 280static void free_pv_chunk(struct pv_chunk *pc); 281static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 282static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 283static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 284static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 285 vm_offset_t va); 286 287static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 288 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 289static void pmap_flush_page(vm_page_t m); 290static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 291static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 292 vm_page_t *free); 293static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 294 vm_page_t *free); 295static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 296 vm_offset_t va); 297static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 298 vm_page_t m); 299 300static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 301 302static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); 303static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 304static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 305static void pmap_pte_release(pt_entry_t *pte); 306static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 307static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 308 309static __inline void pagezero(void *page); 310 311CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 312CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 313 314/* 315 * If you get an error here, then you set KVA_PAGES wrong! See the 316 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 317 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 318 */ 319CTASSERT(KERNBASE % (1 << 24) == 0); 320 321void 322pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 323{ 324 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 325 326 switch (type) { 327 case SH_PD_SET_VA: 328#if 0 329 xen_queue_pt_update(shadow_pdir_ma, 330 xpmap_ptom(val & ~(PG_RW))); 331#endif 332 xen_queue_pt_update(pdir_ma, 333 xpmap_ptom(val)); 334 break; 335 case SH_PD_SET_VA_MA: 336#if 0 337 xen_queue_pt_update(shadow_pdir_ma, 338 val & ~(PG_RW)); 339#endif 340 xen_queue_pt_update(pdir_ma, val); 341 break; 342 case SH_PD_SET_VA_CLEAR: 343#if 0 344 xen_queue_pt_update(shadow_pdir_ma, 0); 345#endif 346 xen_queue_pt_update(pdir_ma, 0); 347 break; 348 } 349} 350 351/* 352 * Bootstrap the system enough to run with virtual memory. 353 * 354 * On the i386 this is called after mapping has already been enabled 355 * and just syncs the pmap module with what has already been done. 356 * [We can't call it easily with mapping off since the kernel is not 357 * mapped with PA == VA, hence we would have to relocate every address 358 * from the linked base (virtual) address "KERNBASE" to the actual 359 * (physical) address starting relative to 0] 360 */ 361void 362pmap_bootstrap(vm_paddr_t firstaddr) 363{ 364 vm_offset_t va; 365 pt_entry_t *pte, *unused; 366 struct sysmaps *sysmaps; 367 int i; 368 369 /* 370 * Initialize the first available kernel virtual address. However, 371 * using "firstaddr" may waste a few pages of the kernel virtual 372 * address space, because locore may not have mapped every physical 373 * page that it allocated. Preferably, locore would provide a first 374 * unused virtual address in addition to "firstaddr". 375 */ 376 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 377 378 virtual_end = VM_MAX_KERNEL_ADDRESS; 379 380 /* 381 * Initialize the kernel pmap (which is statically allocated). 382 */ 383 PMAP_LOCK_INIT(kernel_pmap); 384 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 385#ifdef PAE 386 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 387#endif 388 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 389 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 390 LIST_INIT(&allpmaps); 391 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 392 mtx_lock_spin(&allpmaps_lock); 393 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 394 mtx_unlock_spin(&allpmaps_lock); 395 if (nkpt == 0) 396 nkpt = NKPT; 397 398 /* 399 * Reserve some special page table entries/VA space for temporary 400 * mapping of pages. 401 */ 402#define SYSMAP(c, p, v, n) \ 403 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 404 405 va = virtual_avail; 406 pte = vtopte(va); 407 408 /* 409 * CMAP1/CMAP2 are used for zeroing and copying pages. 410 * CMAP3 is used for the idle process page zeroing. 411 */ 412 for (i = 0; i < MAXCPU; i++) { 413 sysmaps = &sysmaps_pcpu[i]; 414 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 415 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 416 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 417 PT_SET_MA(sysmaps->CADDR1, 0); 418 PT_SET_MA(sysmaps->CADDR2, 0); 419 } 420 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 421 PT_SET_MA(CADDR3, 0); 422 423 /* 424 * Crashdump maps. 425 */ 426 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 427 428 /* 429 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 430 */ 431 SYSMAP(caddr_t, unused, ptvmmap, 1) 432 433 /* 434 * msgbufp is used to map the system message buffer. 435 */ 436 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 437 438 /* 439 * ptemap is used for pmap_pte_quick 440 */ 441 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 442 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 443 444 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 445 446 virtual_avail = va; 447 448 /* 449 * Leave in place an identity mapping (virt == phys) for the low 1 MB 450 * physical memory region that is used by the ACPI wakeup code. This 451 * mapping must not have PG_G set. 452 */ 453#ifndef XEN 454 /* 455 * leave here deliberately to show that this is not supported 456 */ 457#ifdef XBOX 458 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 459 * an early stadium, we cannot yet neatly map video memory ... :-( 460 * Better fixes are very welcome! */ 461 if (!arch_i386_is_xbox) 462#endif 463 for (i = 1; i < NKPT; i++) 464 PTD[i] = 0; 465 466 /* Initialize the PAT MSR if present. */ 467 pmap_init_pat(); 468 469 /* Turn on PG_G on kernel page(s) */ 470 pmap_set_pg(); 471#endif 472 473#ifdef HAMFISTED_LOCKING 474 mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF); 475#endif 476} 477 478/* 479 * Setup the PAT MSR. 480 */ 481void 482pmap_init_pat(void) 483{ 484 uint64_t pat_msr; 485 486 /* Bail if this CPU doesn't implement PAT. */ 487 if (!(cpu_feature & CPUID_PAT)) 488 return; 489 490 if (cpu_vendor_id != CPU_VENDOR_INTEL || 491 (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 492 /* 493 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 494 * Program 4 and 5 as WP and WC. 495 * Leave 6 and 7 as UC and UC-. 496 */ 497 pat_msr = rdmsr(MSR_PAT); 498 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 499 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 500 PAT_VALUE(5, PAT_WRITE_COMBINING); 501 pat_works = 1; 502 } else { 503 /* 504 * Due to some Intel errata, we can only safely use the lower 4 505 * PAT entries. Thus, just replace PAT Index 2 with WC instead 506 * of UC-. 507 * 508 * Intel Pentium III Processor Specification Update 509 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 510 * or Mode C Paging) 511 * 512 * Intel Pentium IV Processor Specification Update 513 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 514 */ 515 pat_msr = rdmsr(MSR_PAT); 516 pat_msr &= ~PAT_MASK(2); 517 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 518 pat_works = 0; 519 } 520 wrmsr(MSR_PAT, pat_msr); 521} 522 523/* 524 * Initialize a vm_page's machine-dependent fields. 525 */ 526void 527pmap_page_init(vm_page_t m) 528{ 529 530 TAILQ_INIT(&m->md.pv_list); 531 m->md.pat_mode = PAT_WRITE_BACK; 532} 533 534/* 535 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 536 * Requirements: 537 * - Must deal with pages in order to ensure that none of the PG_* bits 538 * are ever set, PG_V in particular. 539 * - Assumes we can write to ptes without pte_store() atomic ops, even 540 * on PAE systems. This should be ok. 541 * - Assumes nothing will ever test these addresses for 0 to indicate 542 * no mapping instead of correctly checking PG_V. 543 * - Assumes a vm_offset_t will fit in a pte (true for i386). 544 * Because PG_V is never set, there can be no mappings to invalidate. 545 */ 546static int ptelist_count = 0; 547static vm_offset_t 548pmap_ptelist_alloc(vm_offset_t *head) 549{ 550 vm_offset_t va; 551 vm_offset_t *phead = (vm_offset_t *)*head; 552 553 if (ptelist_count == 0) { 554 printf("out of memory!!!!!!\n"); 555 return (0); /* Out of memory */ 556 } 557 ptelist_count--; 558 va = phead[ptelist_count]; 559 return (va); 560} 561 562static void 563pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 564{ 565 vm_offset_t *phead = (vm_offset_t *)*head; 566 567 phead[ptelist_count++] = va; 568} 569 570static void 571pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 572{ 573 int i, nstackpages; 574 vm_offset_t va; 575 vm_page_t m; 576 577 nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 578 for (i = 0; i < nstackpages; i++) { 579 va = (vm_offset_t)base + i * PAGE_SIZE; 580 m = vm_page_alloc(NULL, i, 581 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 582 VM_ALLOC_ZERO); 583 pmap_qenter(va, &m, 1); 584 } 585 586 *head = (vm_offset_t)base; 587 for (i = npages - 1; i >= nstackpages; i--) { 588 va = (vm_offset_t)base + i * PAGE_SIZE; 589 pmap_ptelist_free(head, va); 590 } 591} 592 593 594/* 595 * Initialize the pmap module. 596 * Called by vm_init, to initialize any structures that the pmap 597 * system needs to map virtual memory. 598 */ 599void 600pmap_init(void) 601{ 602 603 /* 604 * Initialize the address space (zone) for the pv entries. Set a 605 * high water mark so that the system can recover from excessive 606 * numbers of pv entries. 607 */ 608 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 609 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 610 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 611 pv_entry_max = roundup(pv_entry_max, _NPCPV); 612 pv_entry_high_water = 9 * (pv_entry_max / 10); 613 614 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 615 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 616 PAGE_SIZE * pv_maxchunks); 617 if (pv_chunkbase == NULL) 618 panic("pmap_init: not enough kvm for pv chunks"); 619 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 620} 621 622 623SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 624 "Max number of PV entries"); 625SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 626 "Page share factor per proc"); 627 628static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 629 "2/4MB page mapping counters"); 630 631static u_long pmap_pde_mappings; 632SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 633 &pmap_pde_mappings, 0, "2/4MB page mappings"); 634 635/*************************************************** 636 * Low level helper routines..... 637 ***************************************************/ 638 639/* 640 * Determine the appropriate bits to set in a PTE or PDE for a specified 641 * caching mode. 642 */ 643int 644pmap_cache_bits(int mode, boolean_t is_pde) 645{ 646 int pat_flag, pat_index, cache_bits; 647 648 /* The PAT bit is different for PTE's and PDE's. */ 649 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 650 651 /* If we don't support PAT, map extended modes to older ones. */ 652 if (!(cpu_feature & CPUID_PAT)) { 653 switch (mode) { 654 case PAT_UNCACHEABLE: 655 case PAT_WRITE_THROUGH: 656 case PAT_WRITE_BACK: 657 break; 658 case PAT_UNCACHED: 659 case PAT_WRITE_COMBINING: 660 case PAT_WRITE_PROTECTED: 661 mode = PAT_UNCACHEABLE; 662 break; 663 } 664 } 665 666 /* Map the caching mode to a PAT index. */ 667 if (pat_works) { 668 switch (mode) { 669 case PAT_UNCACHEABLE: 670 pat_index = 3; 671 break; 672 case PAT_WRITE_THROUGH: 673 pat_index = 1; 674 break; 675 case PAT_WRITE_BACK: 676 pat_index = 0; 677 break; 678 case PAT_UNCACHED: 679 pat_index = 2; 680 break; 681 case PAT_WRITE_COMBINING: 682 pat_index = 5; 683 break; 684 case PAT_WRITE_PROTECTED: 685 pat_index = 4; 686 break; 687 default: 688 panic("Unknown caching mode %d\n", mode); 689 } 690 } else { 691 switch (mode) { 692 case PAT_UNCACHED: 693 case PAT_UNCACHEABLE: 694 case PAT_WRITE_PROTECTED: 695 pat_index = 3; 696 break; 697 case PAT_WRITE_THROUGH: 698 pat_index = 1; 699 break; 700 case PAT_WRITE_BACK: 701 pat_index = 0; 702 break; 703 case PAT_WRITE_COMBINING: 704 pat_index = 2; 705 break; 706 default: 707 panic("Unknown caching mode %d\n", mode); 708 } 709 } 710 711 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 712 cache_bits = 0; 713 if (pat_index & 0x4) 714 cache_bits |= pat_flag; 715 if (pat_index & 0x2) 716 cache_bits |= PG_NC_PCD; 717 if (pat_index & 0x1) 718 cache_bits |= PG_NC_PWT; 719 return (cache_bits); 720} 721#ifdef SMP 722/* 723 * For SMP, these functions have to use the IPI mechanism for coherence. 724 * 725 * N.B.: Before calling any of the following TLB invalidation functions, 726 * the calling processor must ensure that all stores updating a non- 727 * kernel page table are globally performed. Otherwise, another 728 * processor could cache an old, pre-update entry without being 729 * invalidated. This can happen one of two ways: (1) The pmap becomes 730 * active on another processor after its pm_active field is checked by 731 * one of the following functions but before a store updating the page 732 * table is globally performed. (2) The pmap becomes active on another 733 * processor before its pm_active field is checked but due to 734 * speculative loads one of the following functions stills reads the 735 * pmap as inactive on the other processor. 736 * 737 * The kernel page table is exempt because its pm_active field is 738 * immutable. The kernel page table is always active on every 739 * processor. 740 */ 741void 742pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 743{ 744 cpuset_t other_cpus; 745 u_int cpuid; 746 747 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 748 pmap, va); 749 750 sched_pin(); 751 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 752 invlpg(va); 753 smp_invlpg(va); 754 } else { 755 cpuid = PCPU_GET(cpuid); 756 other_cpus = all_cpus; 757 CPU_CLR(cpuid, &other_cpus); 758 if (CPU_ISSET(cpuid, &pmap->pm_active)) 759 invlpg(va); 760 CPU_AND(&other_cpus, &pmap->pm_active); 761 if (!CPU_EMPTY(&other_cpus)) 762 smp_masked_invlpg(other_cpus, va); 763 } 764 sched_unpin(); 765 PT_UPDATES_FLUSH(); 766} 767 768void 769pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 770{ 771 cpuset_t other_cpus; 772 vm_offset_t addr; 773 u_int cpuid; 774 775 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 776 pmap, sva, eva); 777 778 sched_pin(); 779 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 780 for (addr = sva; addr < eva; addr += PAGE_SIZE) 781 invlpg(addr); 782 smp_invlpg_range(sva, eva); 783 } else { 784 cpuid = PCPU_GET(cpuid); 785 other_cpus = all_cpus; 786 CPU_CLR(cpuid, &other_cpus); 787 if (CPU_ISSET(cpuid, &pmap->pm_active)) 788 for (addr = sva; addr < eva; addr += PAGE_SIZE) 789 invlpg(addr); 790 CPU_AND(&other_cpus, &pmap->pm_active); 791 if (!CPU_EMPTY(&other_cpus)) 792 smp_masked_invlpg_range(other_cpus, sva, eva); 793 } 794 sched_unpin(); 795 PT_UPDATES_FLUSH(); 796} 797 798void 799pmap_invalidate_all(pmap_t pmap) 800{ 801 cpuset_t other_cpus; 802 u_int cpuid; 803 804 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 805 806 sched_pin(); 807 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 808 invltlb(); 809 smp_invltlb(); 810 } else { 811 cpuid = PCPU_GET(cpuid); 812 other_cpus = all_cpus; 813 CPU_CLR(cpuid, &other_cpus); 814 if (CPU_ISSET(cpuid, &pmap->pm_active)) 815 invltlb(); 816 CPU_AND(&other_cpus, &pmap->pm_active); 817 if (!CPU_EMPTY(&other_cpus)) 818 smp_masked_invltlb(other_cpus); 819 } 820 sched_unpin(); 821} 822 823void 824pmap_invalidate_cache(void) 825{ 826 827 sched_pin(); 828 wbinvd(); 829 smp_cache_flush(); 830 sched_unpin(); 831} 832#else /* !SMP */ 833/* 834 * Normal, non-SMP, 486+ invalidation functions. 835 * We inline these within pmap.c for speed. 836 */ 837PMAP_INLINE void 838pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 839{ 840 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 841 pmap, va); 842 843 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 844 invlpg(va); 845 PT_UPDATES_FLUSH(); 846} 847 848PMAP_INLINE void 849pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 850{ 851 vm_offset_t addr; 852 853 if (eva - sva > PAGE_SIZE) 854 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 855 pmap, sva, eva); 856 857 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 858 for (addr = sva; addr < eva; addr += PAGE_SIZE) 859 invlpg(addr); 860 PT_UPDATES_FLUSH(); 861} 862 863PMAP_INLINE void 864pmap_invalidate_all(pmap_t pmap) 865{ 866 867 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 868 869 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 870 invltlb(); 871} 872 873PMAP_INLINE void 874pmap_invalidate_cache(void) 875{ 876 877 wbinvd(); 878} 879#endif /* !SMP */ 880 881#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 882 883void 884pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 885{ 886 887 KASSERT((sva & PAGE_MASK) == 0, 888 ("pmap_invalidate_cache_range: sva not page-aligned")); 889 KASSERT((eva & PAGE_MASK) == 0, 890 ("pmap_invalidate_cache_range: eva not page-aligned")); 891 892 if (cpu_feature & CPUID_SS) 893 ; /* If "Self Snoop" is supported, do nothing. */ 894 else if ((cpu_feature & CPUID_CLFSH) != 0 && 895 eva - sva < PMAP_CLFLUSH_THRESHOLD) { 896 897 /* 898 * Otherwise, do per-cache line flush. Use the mfence 899 * instruction to insure that previous stores are 900 * included in the write-back. The processor 901 * propagates flush to other processors in the cache 902 * coherence domain. 903 */ 904 mfence(); 905 for (; sva < eva; sva += cpu_clflush_line_size) 906 clflush(sva); 907 mfence(); 908 } else { 909 910 /* 911 * No targeted cache flush methods are supported by CPU, 912 * or the supplied range is bigger than 2MB. 913 * Globally invalidate cache. 914 */ 915 pmap_invalidate_cache(); 916 } 917} 918 919void 920pmap_invalidate_cache_pages(vm_page_t *pages, int count) 921{ 922 int i; 923 924 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 925 (cpu_feature & CPUID_CLFSH) == 0) { 926 pmap_invalidate_cache(); 927 } else { 928 for (i = 0; i < count; i++) 929 pmap_flush_page(pages[i]); 930 } 931} 932 933/* 934 * Are we current address space or kernel? N.B. We return FALSE when 935 * a pmap's page table is in use because a kernel thread is borrowing 936 * it. The borrowed page table can change spontaneously, making any 937 * dependence on its continued use subject to a race condition. 938 */ 939static __inline int 940pmap_is_current(pmap_t pmap) 941{ 942 943 return (pmap == kernel_pmap || 944 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 945 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 946} 947 948/* 949 * If the given pmap is not the current or kernel pmap, the returned pte must 950 * be released by passing it to pmap_pte_release(). 951 */ 952pt_entry_t * 953pmap_pte(pmap_t pmap, vm_offset_t va) 954{ 955 pd_entry_t newpf; 956 pd_entry_t *pde; 957 958 pde = pmap_pde(pmap, va); 959 if (*pde & PG_PS) 960 return (pde); 961 if (*pde != 0) { 962 /* are we current address space or kernel? */ 963 if (pmap_is_current(pmap)) 964 return (vtopte(va)); 965 mtx_lock(&PMAP2mutex); 966 newpf = *pde & PG_FRAME; 967 if ((*PMAP2 & PG_FRAME) != newpf) { 968 vm_page_lock_queues(); 969 PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 970 vm_page_unlock_queues(); 971 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 972 pmap, va, (*PMAP2 & 0xffffffff)); 973 } 974 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 975 } 976 return (NULL); 977} 978 979/* 980 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 981 * being NULL. 982 */ 983static __inline void 984pmap_pte_release(pt_entry_t *pte) 985{ 986 987 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 988 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 989 *PMAP2); 990 vm_page_lock_queues(); 991 PT_SET_VA(PMAP2, 0, TRUE); 992 vm_page_unlock_queues(); 993 mtx_unlock(&PMAP2mutex); 994 } 995} 996 997static __inline void 998invlcaddr(void *caddr) 999{ 1000 1001 invlpg((u_int)caddr); 1002 PT_UPDATES_FLUSH(); 1003} 1004 1005/* 1006 * Super fast pmap_pte routine best used when scanning 1007 * the pv lists. This eliminates many coarse-grained 1008 * invltlb calls. Note that many of the pv list 1009 * scans are across different pmaps. It is very wasteful 1010 * to do an entire invltlb for checking a single mapping. 1011 * 1012 * If the given pmap is not the current pmap, vm_page_queue_mtx 1013 * must be held and curthread pinned to a CPU. 1014 */ 1015static pt_entry_t * 1016pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1017{ 1018 pd_entry_t newpf; 1019 pd_entry_t *pde; 1020 1021 pde = pmap_pde(pmap, va); 1022 if (*pde & PG_PS) 1023 return (pde); 1024 if (*pde != 0) { 1025 /* are we current address space or kernel? */ 1026 if (pmap_is_current(pmap)) 1027 return (vtopte(va)); 1028 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1029 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1030 newpf = *pde & PG_FRAME; 1031 if ((*PMAP1 & PG_FRAME) != newpf) { 1032 PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1033 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1034 pmap, va, (u_long)*PMAP1); 1035 1036#ifdef SMP 1037 PMAP1cpu = PCPU_GET(cpuid); 1038#endif 1039 PMAP1changed++; 1040 } else 1041#ifdef SMP 1042 if (PMAP1cpu != PCPU_GET(cpuid)) { 1043 PMAP1cpu = PCPU_GET(cpuid); 1044 invlcaddr(PADDR1); 1045 PMAP1changedcpu++; 1046 } else 1047#endif 1048 PMAP1unchanged++; 1049 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1050 } 1051 return (0); 1052} 1053 1054/* 1055 * Routine: pmap_extract 1056 * Function: 1057 * Extract the physical page address associated 1058 * with the given map/virtual_address pair. 1059 */ 1060vm_paddr_t 1061pmap_extract(pmap_t pmap, vm_offset_t va) 1062{ 1063 vm_paddr_t rtval; 1064 pt_entry_t *pte; 1065 pd_entry_t pde; 1066 pt_entry_t pteval; 1067 1068 rtval = 0; 1069 PMAP_LOCK(pmap); 1070 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1071 if (pde != 0) { 1072 if ((pde & PG_PS) != 0) { 1073 rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1074 PMAP_UNLOCK(pmap); 1075 return rtval; 1076 } 1077 pte = pmap_pte(pmap, va); 1078 pteval = *pte ? xpmap_mtop(*pte) : 0; 1079 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1080 pmap_pte_release(pte); 1081 } 1082 PMAP_UNLOCK(pmap); 1083 return (rtval); 1084} 1085 1086/* 1087 * Routine: pmap_extract_ma 1088 * Function: 1089 * Like pmap_extract, but returns machine address 1090 */ 1091vm_paddr_t 1092pmap_extract_ma(pmap_t pmap, vm_offset_t va) 1093{ 1094 vm_paddr_t rtval; 1095 pt_entry_t *pte; 1096 pd_entry_t pde; 1097 1098 rtval = 0; 1099 PMAP_LOCK(pmap); 1100 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1101 if (pde != 0) { 1102 if ((pde & PG_PS) != 0) { 1103 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1104 PMAP_UNLOCK(pmap); 1105 return rtval; 1106 } 1107 pte = pmap_pte(pmap, va); 1108 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1109 pmap_pte_release(pte); 1110 } 1111 PMAP_UNLOCK(pmap); 1112 return (rtval); 1113} 1114 1115/* 1116 * Routine: pmap_extract_and_hold 1117 * Function: 1118 * Atomically extract and hold the physical page 1119 * with the given pmap and virtual address pair 1120 * if that mapping permits the given protection. 1121 */ 1122vm_page_t 1123pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1124{ 1125 pd_entry_t pde; 1126 pt_entry_t pte, *ptep; 1127 vm_page_t m; 1128 vm_paddr_t pa; 1129 1130 pa = 0; 1131 m = NULL; 1132 PMAP_LOCK(pmap); 1133retry: 1134 pde = PT_GET(pmap_pde(pmap, va)); 1135 if (pde != 0) { 1136 if (pde & PG_PS) { 1137 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1138 if (vm_page_pa_tryrelock(pmap, (pde & 1139 PG_PS_FRAME) | (va & PDRMASK), &pa)) 1140 goto retry; 1141 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1142 (va & PDRMASK)); 1143 vm_page_hold(m); 1144 } 1145 } else { 1146 ptep = pmap_pte(pmap, va); 1147 pte = PT_GET(ptep); 1148 pmap_pte_release(ptep); 1149 if (pte != 0 && 1150 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1151 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, 1152 &pa)) 1153 goto retry; 1154 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1155 vm_page_hold(m); 1156 } 1157 } 1158 } 1159 PA_UNLOCK_COND(pa); 1160 PMAP_UNLOCK(pmap); 1161 return (m); 1162} 1163 1164/*************************************************** 1165 * Low level mapping routines..... 1166 ***************************************************/ 1167 1168/* 1169 * Add a wired page to the kva. 1170 * Note: not SMP coherent. 1171 * 1172 * This function may be used before pmap_bootstrap() is called. 1173 */ 1174void 1175pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1176{ 1177 1178 PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1179} 1180 1181void 1182pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1183{ 1184 pt_entry_t *pte; 1185 1186 pte = vtopte(va); 1187 pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1188} 1189 1190static __inline void 1191pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1192{ 1193 1194 PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1195} 1196 1197/* 1198 * Remove a page from the kernel pagetables. 1199 * Note: not SMP coherent. 1200 * 1201 * This function may be used before pmap_bootstrap() is called. 1202 */ 1203PMAP_INLINE void 1204pmap_kremove(vm_offset_t va) 1205{ 1206 pt_entry_t *pte; 1207 1208 pte = vtopte(va); 1209 PT_CLEAR_VA(pte, FALSE); 1210} 1211 1212/* 1213 * Used to map a range of physical addresses into kernel 1214 * virtual address space. 1215 * 1216 * The value passed in '*virt' is a suggested virtual address for 1217 * the mapping. Architectures which can support a direct-mapped 1218 * physical to virtual region can return the appropriate address 1219 * within that region, leaving '*virt' unchanged. Other 1220 * architectures should map the pages starting at '*virt' and 1221 * update '*virt' with the first usable address after the mapped 1222 * region. 1223 */ 1224vm_offset_t 1225pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1226{ 1227 vm_offset_t va, sva; 1228 1229 va = sva = *virt; 1230 CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1231 va, start, end, prot); 1232 while (start < end) { 1233 pmap_kenter(va, start); 1234 va += PAGE_SIZE; 1235 start += PAGE_SIZE; 1236 } 1237 pmap_invalidate_range(kernel_pmap, sva, va); 1238 *virt = va; 1239 return (sva); 1240} 1241 1242 1243/* 1244 * Add a list of wired pages to the kva 1245 * this routine is only used for temporary 1246 * kernel mappings that do not need to have 1247 * page modification or references recorded. 1248 * Note that old mappings are simply written 1249 * over. The page *must* be wired. 1250 * Note: SMP coherent. Uses a ranged shootdown IPI. 1251 */ 1252void 1253pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1254{ 1255 pt_entry_t *endpte, *pte; 1256 vm_paddr_t pa; 1257 vm_offset_t va = sva; 1258 int mclcount = 0; 1259 multicall_entry_t mcl[16]; 1260 multicall_entry_t *mclp = mcl; 1261 int error; 1262 1263 CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1264 pte = vtopte(sva); 1265 endpte = pte + count; 1266 while (pte < endpte) { 1267 pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1268 1269 mclp->op = __HYPERVISOR_update_va_mapping; 1270 mclp->args[0] = va; 1271 mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1272 mclp->args[2] = (uint32_t)(pa >> 32); 1273 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1274 1275 va += PAGE_SIZE; 1276 pte++; 1277 ma++; 1278 mclp++; 1279 mclcount++; 1280 if (mclcount == 16) { 1281 error = HYPERVISOR_multicall(mcl, mclcount); 1282 mclp = mcl; 1283 mclcount = 0; 1284 KASSERT(error == 0, ("bad multicall %d", error)); 1285 } 1286 } 1287 if (mclcount) { 1288 error = HYPERVISOR_multicall(mcl, mclcount); 1289 KASSERT(error == 0, ("bad multicall %d", error)); 1290 } 1291 1292#ifdef INVARIANTS 1293 for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1294 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1295#endif 1296} 1297 1298/* 1299 * This routine tears out page mappings from the 1300 * kernel -- it is meant only for temporary mappings. 1301 * Note: SMP coherent. Uses a ranged shootdown IPI. 1302 */ 1303void 1304pmap_qremove(vm_offset_t sva, int count) 1305{ 1306 vm_offset_t va; 1307 1308 CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1309 va = sva; 1310 vm_page_lock_queues(); 1311 critical_enter(); 1312 while (count-- > 0) { 1313 pmap_kremove(va); 1314 va += PAGE_SIZE; 1315 } 1316 PT_UPDATES_FLUSH(); 1317 pmap_invalidate_range(kernel_pmap, sva, va); 1318 critical_exit(); 1319 vm_page_unlock_queues(); 1320} 1321 1322/*************************************************** 1323 * Page table page management routines..... 1324 ***************************************************/ 1325static __inline void 1326pmap_free_zero_pages(vm_page_t free) 1327{ 1328 vm_page_t m; 1329 1330 while (free != NULL) { 1331 m = free; 1332 free = m->right; 1333 vm_page_free_zero(m); 1334 } 1335} 1336 1337/* 1338 * This routine unholds page table pages, and if the hold count 1339 * drops to zero, then it decrements the wire count. 1340 */ 1341static __inline int 1342pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1343{ 1344 1345 --m->wire_count; 1346 if (m->wire_count == 0) 1347 return (_pmap_unwire_pte_hold(pmap, m, free)); 1348 else 1349 return (0); 1350} 1351 1352static int 1353_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1354{ 1355 vm_offset_t pteva; 1356 1357 PT_UPDATES_FLUSH(); 1358 /* 1359 * unmap the page table page 1360 */ 1361 xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1362 /* 1363 * page *might* contain residual mapping :-/ 1364 */ 1365 PD_CLEAR_VA(pmap, m->pindex, TRUE); 1366 pmap_zero_page(m); 1367 --pmap->pm_stats.resident_count; 1368 1369 /* 1370 * This is a release store so that the ordinary store unmapping 1371 * the page table page is globally performed before TLB shoot- 1372 * down is begun. 1373 */ 1374 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1375 1376 /* 1377 * Do an invltlb to make the invalidated mapping 1378 * take effect immediately. 1379 */ 1380 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1381 pmap_invalidate_page(pmap, pteva); 1382 1383 /* 1384 * Put page on a list so that it is released after 1385 * *ALL* TLB shootdown is done 1386 */ 1387 m->right = *free; 1388 *free = m; 1389 1390 return (1); 1391} 1392 1393/* 1394 * After removing a page table entry, this routine is used to 1395 * conditionally free the page, and manage the hold/wire counts. 1396 */ 1397static int 1398pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1399{ 1400 pd_entry_t ptepde; 1401 vm_page_t mpte; 1402 1403 if (va >= VM_MAXUSER_ADDRESS) 1404 return (0); 1405 ptepde = PT_GET(pmap_pde(pmap, va)); 1406 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1407 return (pmap_unwire_pte_hold(pmap, mpte, free)); 1408} 1409 1410/* 1411 * Initialize the pmap for the swapper process. 1412 */ 1413void 1414pmap_pinit0(pmap_t pmap) 1415{ 1416 1417 PMAP_LOCK_INIT(pmap); 1418 /* 1419 * Since the page table directory is shared with the kernel pmap, 1420 * which is already included in the list "allpmaps", this pmap does 1421 * not need to be inserted into that list. 1422 */ 1423 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1424#ifdef PAE 1425 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1426#endif 1427 CPU_ZERO(&pmap->pm_active); 1428 PCPU_SET(curpmap, pmap); 1429 TAILQ_INIT(&pmap->pm_pvchunk); 1430 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1431} 1432 1433/* 1434 * Initialize a preallocated and zeroed pmap structure, 1435 * such as one in a vmspace structure. 1436 */ 1437int 1438pmap_pinit(pmap_t pmap) 1439{ 1440 vm_page_t m, ptdpg[NPGPTD + 1]; 1441 int npgptd = NPGPTD + 1; 1442 int i; 1443 1444#ifdef HAMFISTED_LOCKING 1445 mtx_lock(&createdelete_lock); 1446#endif 1447 1448 PMAP_LOCK_INIT(pmap); 1449 1450 /* 1451 * No need to allocate page table space yet but we do need a valid 1452 * page directory table. 1453 */ 1454 if (pmap->pm_pdir == NULL) { 1455 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1456 NBPTD); 1457 if (pmap->pm_pdir == NULL) { 1458 PMAP_LOCK_DESTROY(pmap); 1459#ifdef HAMFISTED_LOCKING 1460 mtx_unlock(&createdelete_lock); 1461#endif 1462 return (0); 1463 } 1464#ifdef PAE 1465 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1466#endif 1467 } 1468 1469 /* 1470 * allocate the page directory page(s) 1471 */ 1472 for (i = 0; i < npgptd;) { 1473 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1474 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 1475 if (m == NULL) 1476 VM_WAIT; 1477 else { 1478 ptdpg[i++] = m; 1479 } 1480 } 1481 1482 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1483 1484 for (i = 0; i < NPGPTD; i++) 1485 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1486 pagezero(pmap->pm_pdir + (i * NPDEPG)); 1487 1488 mtx_lock_spin(&allpmaps_lock); 1489 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1490 /* Copy the kernel page table directory entries. */ 1491 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1492 mtx_unlock_spin(&allpmaps_lock); 1493 1494#ifdef PAE 1495 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1496 if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1497 bzero(pmap->pm_pdpt, PAGE_SIZE); 1498 for (i = 0; i < NPGPTD; i++) { 1499 vm_paddr_t ma; 1500 1501 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1502 pmap->pm_pdpt[i] = ma | PG_V; 1503 1504 } 1505#endif 1506 for (i = 0; i < NPGPTD; i++) { 1507 pt_entry_t *pd; 1508 vm_paddr_t ma; 1509 1510 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1511 pd = pmap->pm_pdir + (i * NPDEPG); 1512 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1513#if 0 1514 xen_pgd_pin(ma); 1515#endif 1516 } 1517 1518#ifdef PAE 1519 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1520#endif 1521 vm_page_lock_queues(); 1522 xen_flush_queue(); 1523 xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD])); 1524 for (i = 0; i < NPGPTD; i++) { 1525 vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]); 1526 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1527 } 1528 xen_flush_queue(); 1529 vm_page_unlock_queues(); 1530 CPU_ZERO(&pmap->pm_active); 1531 TAILQ_INIT(&pmap->pm_pvchunk); 1532 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1533 1534#ifdef HAMFISTED_LOCKING 1535 mtx_unlock(&createdelete_lock); 1536#endif 1537 return (1); 1538} 1539 1540/* 1541 * this routine is called if the page table page is not 1542 * mapped correctly. 1543 */ 1544static vm_page_t 1545_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) 1546{ 1547 vm_paddr_t ptema; 1548 vm_page_t m; 1549 1550 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1551 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1552 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1553 1554 /* 1555 * Allocate a page table page. 1556 */ 1557 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1558 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1559 if (flags & M_WAITOK) { 1560 PMAP_UNLOCK(pmap); 1561 vm_page_unlock_queues(); 1562 VM_WAIT; 1563 vm_page_lock_queues(); 1564 PMAP_LOCK(pmap); 1565 } 1566 1567 /* 1568 * Indicate the need to retry. While waiting, the page table 1569 * page may have been allocated. 1570 */ 1571 return (NULL); 1572 } 1573 if ((m->flags & PG_ZERO) == 0) 1574 pmap_zero_page(m); 1575 1576 /* 1577 * Map the pagetable page into the process address space, if 1578 * it isn't already there. 1579 */ 1580 1581 pmap->pm_stats.resident_count++; 1582 1583 ptema = VM_PAGE_TO_MACH(m); 1584 xen_pt_pin(ptema); 1585 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1586 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1587 1588 KASSERT(pmap->pm_pdir[ptepindex], 1589 ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1590 return (m); 1591} 1592 1593static vm_page_t 1594pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1595{ 1596 u_int ptepindex; 1597 pd_entry_t ptema; 1598 vm_page_t m; 1599 1600 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1601 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1602 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1603 1604 /* 1605 * Calculate pagetable page index 1606 */ 1607 ptepindex = va >> PDRSHIFT; 1608retry: 1609 /* 1610 * Get the page directory entry 1611 */ 1612 ptema = pmap->pm_pdir[ptepindex]; 1613 1614 /* 1615 * This supports switching from a 4MB page to a 1616 * normal 4K page. 1617 */ 1618 if (ptema & PG_PS) { 1619 /* 1620 * XXX 1621 */ 1622 pmap->pm_pdir[ptepindex] = 0; 1623 ptema = 0; 1624 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1625 pmap_invalidate_all(kernel_pmap); 1626 } 1627 1628 /* 1629 * If the page table page is mapped, we just increment the 1630 * hold count, and activate it. 1631 */ 1632 if (ptema & PG_V) { 1633 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1634 m->wire_count++; 1635 } else { 1636 /* 1637 * Here if the pte page isn't mapped, or if it has 1638 * been deallocated. 1639 */ 1640 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1641 pmap, va, flags); 1642 m = _pmap_allocpte(pmap, ptepindex, flags); 1643 if (m == NULL && (flags & M_WAITOK)) 1644 goto retry; 1645 1646 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1647 } 1648 return (m); 1649} 1650 1651 1652/*************************************************** 1653* Pmap allocation/deallocation routines. 1654 ***************************************************/ 1655 1656#ifdef SMP 1657/* 1658 * Deal with a SMP shootdown of other users of the pmap that we are 1659 * trying to dispose of. This can be a bit hairy. 1660 */ 1661static cpuset_t *lazymask; 1662static u_int lazyptd; 1663static volatile u_int lazywait; 1664 1665void pmap_lazyfix_action(void); 1666 1667void 1668pmap_lazyfix_action(void) 1669{ 1670 1671#ifdef COUNT_IPIS 1672 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1673#endif 1674 if (rcr3() == lazyptd) 1675 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1676 CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); 1677 atomic_store_rel_int(&lazywait, 1); 1678} 1679 1680static void 1681pmap_lazyfix_self(u_int cpuid) 1682{ 1683 1684 if (rcr3() == lazyptd) 1685 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1686 CPU_CLR_ATOMIC(cpuid, lazymask); 1687} 1688 1689 1690static void 1691pmap_lazyfix(pmap_t pmap) 1692{ 1693 cpuset_t mymask, mask; 1694 u_int cpuid, spins; 1695 int lsb; 1696 1697 mask = pmap->pm_active; 1698 while (!CPU_EMPTY(&mask)) { 1699 spins = 50000000; 1700 1701 /* Find least significant set bit. */ 1702 lsb = cpusetobj_ffs(&mask); 1703 MPASS(lsb != 0); 1704 lsb--; 1705 CPU_SETOF(lsb, &mask); 1706 mtx_lock_spin(&smp_ipi_mtx); 1707#ifdef PAE 1708 lazyptd = vtophys(pmap->pm_pdpt); 1709#else 1710 lazyptd = vtophys(pmap->pm_pdir); 1711#endif 1712 cpuid = PCPU_GET(cpuid); 1713 1714 /* Use a cpuset just for having an easy check. */ 1715 CPU_SETOF(cpuid, &mymask); 1716 if (!CPU_CMP(&mask, &mymask)) { 1717 lazymask = &pmap->pm_active; 1718 pmap_lazyfix_self(cpuid); 1719 } else { 1720 atomic_store_rel_int((u_int *)&lazymask, 1721 (u_int)&pmap->pm_active); 1722 atomic_store_rel_int(&lazywait, 0); 1723 ipi_selected(mask, IPI_LAZYPMAP); 1724 while (lazywait == 0) { 1725 ia32_pause(); 1726 if (--spins == 0) 1727 break; 1728 } 1729 } 1730 mtx_unlock_spin(&smp_ipi_mtx); 1731 if (spins == 0) 1732 printf("pmap_lazyfix: spun for 50000000\n"); 1733 mask = pmap->pm_active; 1734 } 1735} 1736 1737#else /* SMP */ 1738 1739/* 1740 * Cleaning up on uniprocessor is easy. For various reasons, we're 1741 * unlikely to have to even execute this code, including the fact 1742 * that the cleanup is deferred until the parent does a wait(2), which 1743 * means that another userland process has run. 1744 */ 1745static void 1746pmap_lazyfix(pmap_t pmap) 1747{ 1748 u_int cr3; 1749 1750 cr3 = vtophys(pmap->pm_pdir); 1751 if (cr3 == rcr3()) { 1752 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1753 CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); 1754 } 1755} 1756#endif /* SMP */ 1757 1758/* 1759 * Release any resources held by the given physical map. 1760 * Called when a pmap initialized by pmap_pinit is being released. 1761 * Should only be called if the map contains no valid mappings. 1762 */ 1763void 1764pmap_release(pmap_t pmap) 1765{ 1766 vm_page_t m, ptdpg[2*NPGPTD+1]; 1767 vm_paddr_t ma; 1768 int i; 1769#ifdef PAE 1770 int npgptd = NPGPTD + 1; 1771#else 1772 int npgptd = NPGPTD; 1773#endif 1774 1775 KASSERT(pmap->pm_stats.resident_count == 0, 1776 ("pmap_release: pmap resident count %ld != 0", 1777 pmap->pm_stats.resident_count)); 1778 PT_UPDATES_FLUSH(); 1779 1780#ifdef HAMFISTED_LOCKING 1781 mtx_lock(&createdelete_lock); 1782#endif 1783 1784 pmap_lazyfix(pmap); 1785 mtx_lock_spin(&allpmaps_lock); 1786 LIST_REMOVE(pmap, pm_list); 1787 mtx_unlock_spin(&allpmaps_lock); 1788 1789 for (i = 0; i < NPGPTD; i++) 1790 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1791 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1792#ifdef PAE 1793 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1794#endif 1795 1796 for (i = 0; i < npgptd; i++) { 1797 m = ptdpg[i]; 1798 ma = VM_PAGE_TO_MACH(m); 1799 /* unpinning L1 and L2 treated the same */ 1800#if 0 1801 xen_pgd_unpin(ma); 1802#else 1803 if (i == NPGPTD) 1804 xen_pgd_unpin(ma); 1805#endif 1806#ifdef PAE 1807 if (i < NPGPTD) 1808 KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1809 ("pmap_release: got wrong ptd page")); 1810#endif 1811 m->wire_count--; 1812 atomic_subtract_int(&cnt.v_wire_count, 1); 1813 vm_page_free(m); 1814 } 1815#ifdef PAE 1816 pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1817#endif 1818 PMAP_LOCK_DESTROY(pmap); 1819 1820#ifdef HAMFISTED_LOCKING 1821 mtx_unlock(&createdelete_lock); 1822#endif 1823} 1824 1825static int 1826kvm_size(SYSCTL_HANDLER_ARGS) 1827{ 1828 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1829 1830 return (sysctl_handle_long(oidp, &ksize, 0, req)); 1831} 1832SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1833 0, 0, kvm_size, "IU", "Size of KVM"); 1834 1835static int 1836kvm_free(SYSCTL_HANDLER_ARGS) 1837{ 1838 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1839 1840 return (sysctl_handle_long(oidp, &kfree, 0, req)); 1841} 1842SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1843 0, 0, kvm_free, "IU", "Amount of KVM free"); 1844 1845/* 1846 * grow the number of kernel page table entries, if needed 1847 */ 1848void 1849pmap_growkernel(vm_offset_t addr) 1850{ 1851 struct pmap *pmap; 1852 vm_paddr_t ptppaddr; 1853 vm_page_t nkpg; 1854 pd_entry_t newpdir; 1855 1856 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1857 if (kernel_vm_end == 0) { 1858 kernel_vm_end = KERNBASE; 1859 nkpt = 0; 1860 while (pdir_pde(PTD, kernel_vm_end)) { 1861 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1862 nkpt++; 1863 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1864 kernel_vm_end = kernel_map->max_offset; 1865 break; 1866 } 1867 } 1868 } 1869 addr = roundup2(addr, NBPDR); 1870 if (addr - 1 >= kernel_map->max_offset) 1871 addr = kernel_map->max_offset; 1872 while (kernel_vm_end < addr) { 1873 if (pdir_pde(PTD, kernel_vm_end)) { 1874 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1875 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1876 kernel_vm_end = kernel_map->max_offset; 1877 break; 1878 } 1879 continue; 1880 } 1881 1882 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 1883 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1884 VM_ALLOC_ZERO); 1885 if (nkpg == NULL) 1886 panic("pmap_growkernel: no memory to grow kernel"); 1887 1888 nkpt++; 1889 1890 if ((nkpg->flags & PG_ZERO) == 0) 1891 pmap_zero_page(nkpg); 1892 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1893 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1894 vm_page_lock_queues(); 1895 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1896 mtx_lock_spin(&allpmaps_lock); 1897 LIST_FOREACH(pmap, &allpmaps, pm_list) 1898 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1899 1900 mtx_unlock_spin(&allpmaps_lock); 1901 vm_page_unlock_queues(); 1902 1903 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1904 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1905 kernel_vm_end = kernel_map->max_offset; 1906 break; 1907 } 1908 } 1909} 1910 1911 1912/*************************************************** 1913 * page management routines. 1914 ***************************************************/ 1915 1916CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1917CTASSERT(_NPCM == 11); 1918CTASSERT(_NPCPV == 336); 1919 1920static __inline struct pv_chunk * 1921pv_to_chunk(pv_entry_t pv) 1922{ 1923 1924 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1925} 1926 1927#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1928 1929#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1930#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1931 1932static uint32_t pc_freemask[_NPCM] = { 1933 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1934 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1935 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1936 PC_FREE0_9, PC_FREE10 1937}; 1938 1939SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1940 "Current number of pv entries"); 1941 1942#ifdef PV_STATS 1943static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1944 1945SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1946 "Current number of pv entry chunks"); 1947SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1948 "Current number of pv entry chunks allocated"); 1949SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1950 "Current number of pv entry chunks frees"); 1951SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1952 "Number of times tried to get a chunk page but failed."); 1953 1954static long pv_entry_frees, pv_entry_allocs; 1955static int pv_entry_spare; 1956 1957SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1958 "Current number of pv entry frees"); 1959SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1960 "Current number of pv entry allocs"); 1961SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1962 "Current number of spare pv entries"); 1963#endif 1964 1965/* 1966 * We are in a serious low memory condition. Resort to 1967 * drastic measures to free some pages so we can allocate 1968 * another pv entry chunk. 1969 */ 1970static vm_page_t 1971pmap_pv_reclaim(pmap_t locked_pmap) 1972{ 1973 struct pch newtail; 1974 struct pv_chunk *pc; 1975 pmap_t pmap; 1976 pt_entry_t *pte, tpte; 1977 pv_entry_t pv; 1978 vm_offset_t va; 1979 vm_page_t free, m, m_pc; 1980 uint32_t inuse, freemask; 1981 int bit, field, freed; 1982 1983 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1984 pmap = NULL; 1985 free = m_pc = NULL; 1986 TAILQ_INIT(&newtail); 1987 sched_pin(); 1988 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 1989 free == NULL)) { 1990 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1991 if (pmap != pc->pc_pmap) { 1992 if (pmap != NULL) { 1993 pmap_invalidate_all(pmap); 1994 if (pmap != locked_pmap) 1995 PMAP_UNLOCK(pmap); 1996 } 1997 pmap = pc->pc_pmap; 1998 /* Avoid deadlock and lock recursion. */ 1999 if (pmap > locked_pmap) 2000 PMAP_LOCK(pmap); 2001 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2002 pmap = NULL; 2003 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2004 continue; 2005 } 2006 } 2007 2008 /* 2009 * Destroy every non-wired, 4 KB page mapping in the chunk. 2010 */ 2011 freed = 0; 2012 for (field = 0; field < _NPCM; field++) { 2013 freemask = 0; 2014 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2015 inuse != 0; inuse &= ~(1UL << bit)) { 2016 bit = bsfl(inuse); 2017 pv = &pc->pc_pventry[field * 32 + bit]; 2018 va = pv->pv_va; 2019 pte = pmap_pte_quick(pmap, va); 2020 if ((*pte & PG_W) != 0) 2021 continue; 2022 tpte = pte_load_clear(pte); 2023 if ((tpte & PG_G) != 0) 2024 pmap_invalidate_page(pmap, va); 2025 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2026 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2027 vm_page_dirty(m); 2028 if ((tpte & PG_A) != 0) 2029 vm_page_aflag_set(m, PGA_REFERENCED); 2030 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2031 if (TAILQ_EMPTY(&m->md.pv_list)) 2032 vm_page_aflag_clear(m, PGA_WRITEABLE); 2033 pmap_unuse_pt(pmap, va, &free); 2034 freemask |= 1UL << bit; 2035 freed++; 2036 } 2037 pc->pc_map[field] |= freemask; 2038 } 2039 if (freed == 0) { 2040 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2041 continue; 2042 } 2043 pmap->pm_stats.resident_count -= freed; 2044 PV_STAT(pv_entry_frees += freed); 2045 PV_STAT(pv_entry_spare += freed); 2046 pv_entry_count -= freed; 2047 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2048 for (field = 0; field < _NPCM; field++) 2049 if (pc->pc_map[field] != pc_freemask[field]) { 2050 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2051 pc_list); 2052 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2053 2054 /* 2055 * One freed pv entry in locked_pmap is 2056 * sufficient. 2057 */ 2058 if (pmap == locked_pmap) 2059 goto out; 2060 break; 2061 } 2062 if (field == _NPCM) { 2063 PV_STAT(pv_entry_spare -= _NPCPV); 2064 PV_STAT(pc_chunk_count--); 2065 PV_STAT(pc_chunk_frees++); 2066 /* Entire chunk is free; return it. */ 2067 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2068 pmap_qremove((vm_offset_t)pc, 1); 2069 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2070 break; 2071 } 2072 } 2073out: 2074 sched_unpin(); 2075 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2076 if (pmap != NULL) { 2077 pmap_invalidate_all(pmap); 2078 if (pmap != locked_pmap) 2079 PMAP_UNLOCK(pmap); 2080 } 2081 if (m_pc == NULL && pv_vafree != 0 && free != NULL) { 2082 m_pc = free; 2083 free = m_pc->right; 2084 /* Recycle a freed page table page. */ 2085 m_pc->wire_count = 1; 2086 atomic_add_int(&cnt.v_wire_count, 1); 2087 } 2088 pmap_free_zero_pages(free); 2089 return (m_pc); 2090} 2091 2092/* 2093 * free the pv_entry back to the free list 2094 */ 2095static void 2096free_pv_entry(pmap_t pmap, pv_entry_t pv) 2097{ 2098 struct pv_chunk *pc; 2099 int idx, field, bit; 2100 2101 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2102 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2103 PV_STAT(pv_entry_frees++); 2104 PV_STAT(pv_entry_spare++); 2105 pv_entry_count--; 2106 pc = pv_to_chunk(pv); 2107 idx = pv - &pc->pc_pventry[0]; 2108 field = idx / 32; 2109 bit = idx % 32; 2110 pc->pc_map[field] |= 1ul << bit; 2111 /* move to head of list */ 2112 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2113 for (idx = 0; idx < _NPCM; idx++) 2114 if (pc->pc_map[idx] != pc_freemask[idx]) { 2115 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2116 return; 2117 } 2118 free_pv_chunk(pc); 2119} 2120 2121static void 2122free_pv_chunk(struct pv_chunk *pc) 2123{ 2124 vm_page_t m; 2125 2126 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2127 PV_STAT(pv_entry_spare -= _NPCPV); 2128 PV_STAT(pc_chunk_count--); 2129 PV_STAT(pc_chunk_frees++); 2130 /* entire chunk is free, return it */ 2131 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2132 pmap_qremove((vm_offset_t)pc, 1); 2133 vm_page_unwire(m, 0); 2134 vm_page_free(m); 2135 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2136} 2137 2138/* 2139 * get a new pv_entry, allocating a block from the system 2140 * when needed. 2141 */ 2142static pv_entry_t 2143get_pv_entry(pmap_t pmap, boolean_t try) 2144{ 2145 static const struct timeval printinterval = { 60, 0 }; 2146 static struct timeval lastprint; 2147 int bit, field; 2148 pv_entry_t pv; 2149 struct pv_chunk *pc; 2150 vm_page_t m; 2151 2152 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2153 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2154 PV_STAT(pv_entry_allocs++); 2155 pv_entry_count++; 2156 if (pv_entry_count > pv_entry_high_water) 2157 if (ratecheck(&lastprint, &printinterval)) 2158 printf("Approaching the limit on PV entries, consider " 2159 "increasing either the vm.pmap.shpgperproc or the " 2160 "vm.pmap.pv_entry_max tunable.\n"); 2161retry: 2162 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2163 if (pc != NULL) { 2164 for (field = 0; field < _NPCM; field++) { 2165 if (pc->pc_map[field]) { 2166 bit = bsfl(pc->pc_map[field]); 2167 break; 2168 } 2169 } 2170 if (field < _NPCM) { 2171 pv = &pc->pc_pventry[field * 32 + bit]; 2172 pc->pc_map[field] &= ~(1ul << bit); 2173 /* If this was the last item, move it to tail */ 2174 for (field = 0; field < _NPCM; field++) 2175 if (pc->pc_map[field] != 0) { 2176 PV_STAT(pv_entry_spare--); 2177 return (pv); /* not full, return */ 2178 } 2179 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2180 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2181 if (pc != TAILQ_LAST(&pv_chunks, pch)) { 2182 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2183 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2184 } 2185 PV_STAT(pv_entry_spare--); 2186 return (pv); 2187 } 2188 } 2189 /* 2190 * Access to the ptelist "pv_vafree" is synchronized by the page 2191 * queues lock. If "pv_vafree" is currently non-empty, it will 2192 * remain non-empty until pmap_ptelist_alloc() completes. 2193 */ 2194 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2195 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2196 if (try) { 2197 pv_entry_count--; 2198 PV_STAT(pc_chunk_tryfail++); 2199 return (NULL); 2200 } 2201 m = pmap_pv_reclaim(pmap); 2202 if (m == NULL) 2203 goto retry; 2204 } 2205 PV_STAT(pc_chunk_count++); 2206 PV_STAT(pc_chunk_allocs++); 2207 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2208 pmap_qenter((vm_offset_t)pc, &m, 1); 2209 if ((m->flags & PG_ZERO) == 0) 2210 pagezero(pc); 2211 pc->pc_pmap = pmap; 2212 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2213 for (field = 1; field < _NPCM; field++) 2214 pc->pc_map[field] = pc_freemask[field]; 2215 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2216 pv = &pc->pc_pventry[0]; 2217 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2218 PV_STAT(pv_entry_spare += _NPCPV - 1); 2219 return (pv); 2220} 2221 2222static __inline pv_entry_t 2223pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2224{ 2225 pv_entry_t pv; 2226 2227 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2228 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2229 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2230 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2231 break; 2232 } 2233 } 2234 return (pv); 2235} 2236 2237static void 2238pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2239{ 2240 pv_entry_t pv; 2241 2242 pv = pmap_pvh_remove(pvh, pmap, va); 2243 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2244 free_pv_entry(pmap, pv); 2245} 2246 2247static void 2248pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2249{ 2250 2251 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2252 pmap_pvh_free(&m->md, pmap, va); 2253 if (TAILQ_EMPTY(&m->md.pv_list)) 2254 vm_page_aflag_clear(m, PGA_WRITEABLE); 2255} 2256 2257/* 2258 * Conditionally create a pv entry. 2259 */ 2260static boolean_t 2261pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2262{ 2263 pv_entry_t pv; 2264 2265 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2266 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2267 if (pv_entry_count < pv_entry_high_water && 2268 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2269 pv->pv_va = va; 2270 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2271 return (TRUE); 2272 } else 2273 return (FALSE); 2274} 2275 2276/* 2277 * pmap_remove_pte: do the things to unmap a page in a process 2278 */ 2279static int 2280pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2281{ 2282 pt_entry_t oldpte; 2283 vm_page_t m; 2284 2285 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2286 pmap, (u_long)*ptq, va); 2287 2288 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2289 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2290 oldpte = *ptq; 2291 PT_SET_VA_MA(ptq, 0, TRUE); 2292 if (oldpte & PG_W) 2293 pmap->pm_stats.wired_count -= 1; 2294 /* 2295 * Machines that don't support invlpg, also don't support 2296 * PG_G. 2297 */ 2298 if (oldpte & PG_G) 2299 pmap_invalidate_page(kernel_pmap, va); 2300 pmap->pm_stats.resident_count -= 1; 2301 if (oldpte & PG_MANAGED) { 2302 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2303 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2304 vm_page_dirty(m); 2305 if (oldpte & PG_A) 2306 vm_page_aflag_set(m, PGA_REFERENCED); 2307 pmap_remove_entry(pmap, m, va); 2308 } 2309 return (pmap_unuse_pt(pmap, va, free)); 2310} 2311 2312/* 2313 * Remove a single page from a process address space 2314 */ 2315static void 2316pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2317{ 2318 pt_entry_t *pte; 2319 2320 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2321 pmap, va); 2322 2323 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2324 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2325 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2326 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2327 return; 2328 pmap_remove_pte(pmap, pte, va, free); 2329 pmap_invalidate_page(pmap, va); 2330 if (*PMAP1) 2331 PT_SET_MA(PADDR1, 0); 2332 2333} 2334 2335/* 2336 * Remove the given range of addresses from the specified map. 2337 * 2338 * It is assumed that the start and end are properly 2339 * rounded to the page size. 2340 */ 2341void 2342pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2343{ 2344 vm_offset_t pdnxt; 2345 pd_entry_t ptpaddr; 2346 pt_entry_t *pte; 2347 vm_page_t free = NULL; 2348 int anyvalid; 2349 2350 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2351 pmap, sva, eva); 2352 2353 /* 2354 * Perform an unsynchronized read. This is, however, safe. 2355 */ 2356 if (pmap->pm_stats.resident_count == 0) 2357 return; 2358 2359 anyvalid = 0; 2360 2361 vm_page_lock_queues(); 2362 sched_pin(); 2363 PMAP_LOCK(pmap); 2364 2365 /* 2366 * special handling of removing one page. a very 2367 * common operation and easy to short circuit some 2368 * code. 2369 */ 2370 if ((sva + PAGE_SIZE == eva) && 2371 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2372 pmap_remove_page(pmap, sva, &free); 2373 goto out; 2374 } 2375 2376 for (; sva < eva; sva = pdnxt) { 2377 u_int pdirindex; 2378 2379 /* 2380 * Calculate index for next page table. 2381 */ 2382 pdnxt = (sva + NBPDR) & ~PDRMASK; 2383 if (pdnxt < sva) 2384 pdnxt = eva; 2385 if (pmap->pm_stats.resident_count == 0) 2386 break; 2387 2388 pdirindex = sva >> PDRSHIFT; 2389 ptpaddr = pmap->pm_pdir[pdirindex]; 2390 2391 /* 2392 * Weed out invalid mappings. Note: we assume that the page 2393 * directory table is always allocated, and in kernel virtual. 2394 */ 2395 if (ptpaddr == 0) 2396 continue; 2397 2398 /* 2399 * Check for large page. 2400 */ 2401 if ((ptpaddr & PG_PS) != 0) { 2402 PD_CLEAR_VA(pmap, pdirindex, TRUE); 2403 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2404 anyvalid = 1; 2405 continue; 2406 } 2407 2408 /* 2409 * Limit our scan to either the end of the va represented 2410 * by the current page table page, or to the end of the 2411 * range being removed. 2412 */ 2413 if (pdnxt > eva) 2414 pdnxt = eva; 2415 2416 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2417 sva += PAGE_SIZE) { 2418 if ((*pte & PG_V) == 0) 2419 continue; 2420 2421 /* 2422 * The TLB entry for a PG_G mapping is invalidated 2423 * by pmap_remove_pte(). 2424 */ 2425 if ((*pte & PG_G) == 0) 2426 anyvalid = 1; 2427 if (pmap_remove_pte(pmap, pte, sva, &free)) 2428 break; 2429 } 2430 } 2431 PT_UPDATES_FLUSH(); 2432 if (*PMAP1) 2433 PT_SET_VA_MA(PMAP1, 0, TRUE); 2434out: 2435 if (anyvalid) 2436 pmap_invalidate_all(pmap); 2437 sched_unpin(); 2438 vm_page_unlock_queues(); 2439 PMAP_UNLOCK(pmap); 2440 pmap_free_zero_pages(free); 2441} 2442 2443/* 2444 * Routine: pmap_remove_all 2445 * Function: 2446 * Removes this physical page from 2447 * all physical maps in which it resides. 2448 * Reflects back modify bits to the pager. 2449 * 2450 * Notes: 2451 * Original versions of this routine were very 2452 * inefficient because they iteratively called 2453 * pmap_remove (slow...) 2454 */ 2455 2456void 2457pmap_remove_all(vm_page_t m) 2458{ 2459 pv_entry_t pv; 2460 pmap_t pmap; 2461 pt_entry_t *pte, tpte; 2462 vm_page_t free; 2463 2464 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2465 ("pmap_remove_all: page %p is not managed", m)); 2466 free = NULL; 2467 vm_page_lock_queues(); 2468 sched_pin(); 2469 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2470 pmap = PV_PMAP(pv); 2471 PMAP_LOCK(pmap); 2472 pmap->pm_stats.resident_count--; 2473 pte = pmap_pte_quick(pmap, pv->pv_va); 2474 tpte = *pte; 2475 PT_SET_VA_MA(pte, 0, TRUE); 2476 if (tpte & PG_W) 2477 pmap->pm_stats.wired_count--; 2478 if (tpte & PG_A) 2479 vm_page_aflag_set(m, PGA_REFERENCED); 2480 2481 /* 2482 * Update the vm_page_t clean and reference bits. 2483 */ 2484 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2485 vm_page_dirty(m); 2486 pmap_unuse_pt(pmap, pv->pv_va, &free); 2487 pmap_invalidate_page(pmap, pv->pv_va); 2488 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2489 free_pv_entry(pmap, pv); 2490 PMAP_UNLOCK(pmap); 2491 } 2492 vm_page_aflag_clear(m, PGA_WRITEABLE); 2493 PT_UPDATES_FLUSH(); 2494 if (*PMAP1) 2495 PT_SET_MA(PADDR1, 0); 2496 sched_unpin(); 2497 vm_page_unlock_queues(); 2498 pmap_free_zero_pages(free); 2499} 2500 2501/* 2502 * Set the physical protection on the 2503 * specified range of this map as requested. 2504 */ 2505void 2506pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2507{ 2508 vm_offset_t pdnxt; 2509 pd_entry_t ptpaddr; 2510 pt_entry_t *pte; 2511 int anychanged; 2512 2513 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2514 pmap, sva, eva, prot); 2515 2516 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2517 pmap_remove(pmap, sva, eva); 2518 return; 2519 } 2520 2521#ifdef PAE 2522 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2523 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2524 return; 2525#else 2526 if (prot & VM_PROT_WRITE) 2527 return; 2528#endif 2529 2530 anychanged = 0; 2531 2532 vm_page_lock_queues(); 2533 sched_pin(); 2534 PMAP_LOCK(pmap); 2535 for (; sva < eva; sva = pdnxt) { 2536 pt_entry_t obits, pbits; 2537 u_int pdirindex; 2538 2539 pdnxt = (sva + NBPDR) & ~PDRMASK; 2540 if (pdnxt < sva) 2541 pdnxt = eva; 2542 2543 pdirindex = sva >> PDRSHIFT; 2544 ptpaddr = pmap->pm_pdir[pdirindex]; 2545 2546 /* 2547 * Weed out invalid mappings. Note: we assume that the page 2548 * directory table is always allocated, and in kernel virtual. 2549 */ 2550 if (ptpaddr == 0) 2551 continue; 2552 2553 /* 2554 * Check for large page. 2555 */ 2556 if ((ptpaddr & PG_PS) != 0) { 2557 if ((prot & VM_PROT_WRITE) == 0) 2558 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2559#ifdef PAE 2560 if ((prot & VM_PROT_EXECUTE) == 0) 2561 pmap->pm_pdir[pdirindex] |= pg_nx; 2562#endif 2563 anychanged = 1; 2564 continue; 2565 } 2566 2567 if (pdnxt > eva) 2568 pdnxt = eva; 2569 2570 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2571 sva += PAGE_SIZE) { 2572 vm_page_t m; 2573 2574retry: 2575 /* 2576 * Regardless of whether a pte is 32 or 64 bits in 2577 * size, PG_RW, PG_A, and PG_M are among the least 2578 * significant 32 bits. 2579 */ 2580 obits = pbits = *pte; 2581 if ((pbits & PG_V) == 0) 2582 continue; 2583 2584 if ((prot & VM_PROT_WRITE) == 0) { 2585 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2586 (PG_MANAGED | PG_M | PG_RW)) { 2587 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2588 PG_FRAME); 2589 vm_page_dirty(m); 2590 } 2591 pbits &= ~(PG_RW | PG_M); 2592 } 2593#ifdef PAE 2594 if ((prot & VM_PROT_EXECUTE) == 0) 2595 pbits |= pg_nx; 2596#endif 2597 2598 if (pbits != obits) { 2599 obits = *pte; 2600 PT_SET_VA_MA(pte, pbits, TRUE); 2601 if (*pte != pbits) 2602 goto retry; 2603 if (obits & PG_G) 2604 pmap_invalidate_page(pmap, sva); 2605 else 2606 anychanged = 1; 2607 } 2608 } 2609 } 2610 PT_UPDATES_FLUSH(); 2611 if (*PMAP1) 2612 PT_SET_VA_MA(PMAP1, 0, TRUE); 2613 if (anychanged) 2614 pmap_invalidate_all(pmap); 2615 sched_unpin(); 2616 vm_page_unlock_queues(); 2617 PMAP_UNLOCK(pmap); 2618} 2619 2620/* 2621 * Insert the given physical page (p) at 2622 * the specified virtual address (v) in the 2623 * target physical map with the protection requested. 2624 * 2625 * If specified, the page will be wired down, meaning 2626 * that the related pte can not be reclaimed. 2627 * 2628 * NB: This is the only routine which MAY NOT lazy-evaluate 2629 * or lose information. That is, this routine must actually 2630 * insert this page into the given map NOW. 2631 */ 2632void 2633pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2634 vm_prot_t prot, boolean_t wired) 2635{ 2636 pd_entry_t *pde; 2637 pt_entry_t *pte; 2638 pt_entry_t newpte, origpte; 2639 pv_entry_t pv; 2640 vm_paddr_t opa, pa; 2641 vm_page_t mpte, om; 2642 boolean_t invlva; 2643 2644 CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2645 pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); 2646 va = trunc_page(va); 2647 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2648 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2649 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2650 va)); 2651 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 2652 VM_OBJECT_LOCKED(m->object), 2653 ("pmap_enter: page %p is not busy", m)); 2654 2655 mpte = NULL; 2656 2657 vm_page_lock_queues(); 2658 PMAP_LOCK(pmap); 2659 sched_pin(); 2660 2661 /* 2662 * In the case that a page table page is not 2663 * resident, we are creating it here. 2664 */ 2665 if (va < VM_MAXUSER_ADDRESS) { 2666 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2667 } 2668 2669 pde = pmap_pde(pmap, va); 2670 if ((*pde & PG_PS) != 0) 2671 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2672 pte = pmap_pte_quick(pmap, va); 2673 2674 /* 2675 * Page Directory table entry not valid, we need a new PT page 2676 */ 2677 if (pte == NULL) { 2678 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2679 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2680 } 2681 2682 pa = VM_PAGE_TO_PHYS(m); 2683 om = NULL; 2684 opa = origpte = 0; 2685 2686#if 0 2687 KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2688 pte, *pte)); 2689#endif 2690 origpte = *pte; 2691 if (origpte) 2692 origpte = xpmap_mtop(origpte); 2693 opa = origpte & PG_FRAME; 2694 2695 /* 2696 * Mapping has not changed, must be protection or wiring change. 2697 */ 2698 if (origpte && (opa == pa)) { 2699 /* 2700 * Wiring change, just update stats. We don't worry about 2701 * wiring PT pages as they remain resident as long as there 2702 * are valid mappings in them. Hence, if a user page is wired, 2703 * the PT page will be also. 2704 */ 2705 if (wired && ((origpte & PG_W) == 0)) 2706 pmap->pm_stats.wired_count++; 2707 else if (!wired && (origpte & PG_W)) 2708 pmap->pm_stats.wired_count--; 2709 2710 /* 2711 * Remove extra pte reference 2712 */ 2713 if (mpte) 2714 mpte->wire_count--; 2715 2716 if (origpte & PG_MANAGED) { 2717 om = m; 2718 pa |= PG_MANAGED; 2719 } 2720 goto validate; 2721 } 2722 2723 pv = NULL; 2724 2725 /* 2726 * Mapping has changed, invalidate old range and fall through to 2727 * handle validating new mapping. 2728 */ 2729 if (opa) { 2730 if (origpte & PG_W) 2731 pmap->pm_stats.wired_count--; 2732 if (origpte & PG_MANAGED) { 2733 om = PHYS_TO_VM_PAGE(opa); 2734 pv = pmap_pvh_remove(&om->md, pmap, va); 2735 } else if (va < VM_MAXUSER_ADDRESS) 2736 printf("va=0x%x is unmanaged :-( \n", va); 2737 2738 if (mpte != NULL) { 2739 mpte->wire_count--; 2740 KASSERT(mpte->wire_count > 0, 2741 ("pmap_enter: missing reference to page table page," 2742 " va: 0x%x", va)); 2743 } 2744 } else 2745 pmap->pm_stats.resident_count++; 2746 2747 /* 2748 * Enter on the PV list if part of our managed memory. 2749 */ 2750 if ((m->oflags & VPO_UNMANAGED) == 0) { 2751 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2752 ("pmap_enter: managed mapping within the clean submap")); 2753 if (pv == NULL) 2754 pv = get_pv_entry(pmap, FALSE); 2755 pv->pv_va = va; 2756 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2757 pa |= PG_MANAGED; 2758 } else if (pv != NULL) 2759 free_pv_entry(pmap, pv); 2760 2761 /* 2762 * Increment counters 2763 */ 2764 if (wired) 2765 pmap->pm_stats.wired_count++; 2766 2767validate: 2768 /* 2769 * Now validate mapping with desired protection/wiring. 2770 */ 2771 newpte = (pt_entry_t)(pa | PG_V); 2772 if ((prot & VM_PROT_WRITE) != 0) { 2773 newpte |= PG_RW; 2774 if ((newpte & PG_MANAGED) != 0) 2775 vm_page_aflag_set(m, PGA_WRITEABLE); 2776 } 2777#ifdef PAE 2778 if ((prot & VM_PROT_EXECUTE) == 0) 2779 newpte |= pg_nx; 2780#endif 2781 if (wired) 2782 newpte |= PG_W; 2783 if (va < VM_MAXUSER_ADDRESS) 2784 newpte |= PG_U; 2785 if (pmap == kernel_pmap) 2786 newpte |= pgeflag; 2787 2788 critical_enter(); 2789 /* 2790 * if the mapping or permission bits are different, we need 2791 * to update the pte. 2792 */ 2793 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2794 if (origpte) { 2795 invlva = FALSE; 2796 origpte = *pte; 2797 PT_SET_VA(pte, newpte | PG_A, FALSE); 2798 if (origpte & PG_A) { 2799 if (origpte & PG_MANAGED) 2800 vm_page_aflag_set(om, PGA_REFERENCED); 2801 if (opa != VM_PAGE_TO_PHYS(m)) 2802 invlva = TRUE; 2803#ifdef PAE 2804 if ((origpte & PG_NX) == 0 && 2805 (newpte & PG_NX) != 0) 2806 invlva = TRUE; 2807#endif 2808 } 2809 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2810 if ((origpte & PG_MANAGED) != 0) 2811 vm_page_dirty(om); 2812 if ((prot & VM_PROT_WRITE) == 0) 2813 invlva = TRUE; 2814 } 2815 if ((origpte & PG_MANAGED) != 0 && 2816 TAILQ_EMPTY(&om->md.pv_list)) 2817 vm_page_aflag_clear(om, PGA_WRITEABLE); 2818 if (invlva) 2819 pmap_invalidate_page(pmap, va); 2820 } else{ 2821 PT_SET_VA(pte, newpte | PG_A, FALSE); 2822 } 2823 2824 } 2825 PT_UPDATES_FLUSH(); 2826 critical_exit(); 2827 if (*PMAP1) 2828 PT_SET_VA_MA(PMAP1, 0, TRUE); 2829 sched_unpin(); 2830 vm_page_unlock_queues(); 2831 PMAP_UNLOCK(pmap); 2832} 2833 2834/* 2835 * Maps a sequence of resident pages belonging to the same object. 2836 * The sequence begins with the given page m_start. This page is 2837 * mapped at the given virtual address start. Each subsequent page is 2838 * mapped at a virtual address that is offset from start by the same 2839 * amount as the page is offset from m_start within the object. The 2840 * last page in the sequence is the page with the largest offset from 2841 * m_start that can be mapped at a virtual address less than the given 2842 * virtual address end. Not every virtual page between start and end 2843 * is mapped; only those for which a resident page exists with the 2844 * corresponding offset from m_start are mapped. 2845 */ 2846void 2847pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2848 vm_page_t m_start, vm_prot_t prot) 2849{ 2850 vm_page_t m, mpte; 2851 vm_pindex_t diff, psize; 2852 multicall_entry_t mcl[16]; 2853 multicall_entry_t *mclp = mcl; 2854 int error, count = 0; 2855 2856 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2857 psize = atop(end - start); 2858 mpte = NULL; 2859 m = m_start; 2860 vm_page_lock_queues(); 2861 PMAP_LOCK(pmap); 2862 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2863 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2864 prot, mpte); 2865 m = TAILQ_NEXT(m, listq); 2866 if (count == 16) { 2867 error = HYPERVISOR_multicall(mcl, count); 2868 KASSERT(error == 0, ("bad multicall %d", error)); 2869 mclp = mcl; 2870 count = 0; 2871 } 2872 } 2873 if (count) { 2874 error = HYPERVISOR_multicall(mcl, count); 2875 KASSERT(error == 0, ("bad multicall %d", error)); 2876 } 2877 vm_page_unlock_queues(); 2878 PMAP_UNLOCK(pmap); 2879} 2880 2881/* 2882 * this code makes some *MAJOR* assumptions: 2883 * 1. Current pmap & pmap exists. 2884 * 2. Not wired. 2885 * 3. Read access. 2886 * 4. No page table pages. 2887 * but is *MUCH* faster than pmap_enter... 2888 */ 2889 2890void 2891pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2892{ 2893 multicall_entry_t mcl, *mclp; 2894 int count = 0; 2895 mclp = &mcl; 2896 2897 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2898 pmap, va, m, prot); 2899 2900 vm_page_lock_queues(); 2901 PMAP_LOCK(pmap); 2902 (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2903 if (count) 2904 HYPERVISOR_multicall(&mcl, count); 2905 vm_page_unlock_queues(); 2906 PMAP_UNLOCK(pmap); 2907} 2908 2909#ifdef notyet 2910void 2911pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2912{ 2913 int i, error, index = 0; 2914 multicall_entry_t mcl[16]; 2915 multicall_entry_t *mclp = mcl; 2916 2917 PMAP_LOCK(pmap); 2918 for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2919 if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2920 continue; 2921 2922 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2923 if (index == 16) { 2924 error = HYPERVISOR_multicall(mcl, index); 2925 mclp = mcl; 2926 index = 0; 2927 KASSERT(error == 0, ("bad multicall %d", error)); 2928 } 2929 } 2930 if (index) { 2931 error = HYPERVISOR_multicall(mcl, index); 2932 KASSERT(error == 0, ("bad multicall %d", error)); 2933 } 2934 2935 PMAP_UNLOCK(pmap); 2936} 2937#endif 2938 2939static vm_page_t 2940pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2941 vm_prot_t prot, vm_page_t mpte) 2942{ 2943 pt_entry_t *pte; 2944 vm_paddr_t pa; 2945 vm_page_t free; 2946 multicall_entry_t *mcl = *mclpp; 2947 2948 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2949 (m->oflags & VPO_UNMANAGED) != 0, 2950 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2951 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2952 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2953 2954 /* 2955 * In the case that a page table page is not 2956 * resident, we are creating it here. 2957 */ 2958 if (va < VM_MAXUSER_ADDRESS) { 2959 u_int ptepindex; 2960 pd_entry_t ptema; 2961 2962 /* 2963 * Calculate pagetable page index 2964 */ 2965 ptepindex = va >> PDRSHIFT; 2966 if (mpte && (mpte->pindex == ptepindex)) { 2967 mpte->wire_count++; 2968 } else { 2969 /* 2970 * Get the page directory entry 2971 */ 2972 ptema = pmap->pm_pdir[ptepindex]; 2973 2974 /* 2975 * If the page table page is mapped, we just increment 2976 * the hold count, and activate it. 2977 */ 2978 if (ptema & PG_V) { 2979 if (ptema & PG_PS) 2980 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2981 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2982 mpte->wire_count++; 2983 } else { 2984 mpte = _pmap_allocpte(pmap, ptepindex, 2985 M_NOWAIT); 2986 if (mpte == NULL) 2987 return (mpte); 2988 } 2989 } 2990 } else { 2991 mpte = NULL; 2992 } 2993 2994 /* 2995 * This call to vtopte makes the assumption that we are 2996 * entering the page into the current pmap. In order to support 2997 * quick entry into any pmap, one would likely use pmap_pte_quick. 2998 * But that isn't as quick as vtopte. 2999 */ 3000 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 3001 pte = vtopte(va); 3002 if (*pte & PG_V) { 3003 if (mpte != NULL) { 3004 mpte->wire_count--; 3005 mpte = NULL; 3006 } 3007 return (mpte); 3008 } 3009 3010 /* 3011 * Enter on the PV list if part of our managed memory. 3012 */ 3013 if ((m->oflags & VPO_UNMANAGED) == 0 && 3014 !pmap_try_insert_pv_entry(pmap, va, m)) { 3015 if (mpte != NULL) { 3016 free = NULL; 3017 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3018 pmap_invalidate_page(pmap, va); 3019 pmap_free_zero_pages(free); 3020 } 3021 3022 mpte = NULL; 3023 } 3024 return (mpte); 3025 } 3026 3027 /* 3028 * Increment counters 3029 */ 3030 pmap->pm_stats.resident_count++; 3031 3032 pa = VM_PAGE_TO_PHYS(m); 3033#ifdef PAE 3034 if ((prot & VM_PROT_EXECUTE) == 0) 3035 pa |= pg_nx; 3036#endif 3037 3038#if 0 3039 /* 3040 * Now validate mapping with RO protection 3041 */ 3042 if ((m->oflags & VPO_UNMANAGED) != 0) 3043 pte_store(pte, pa | PG_V | PG_U); 3044 else 3045 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3046#else 3047 /* 3048 * Now validate mapping with RO protection 3049 */ 3050 if ((m->oflags & VPO_UNMANAGED) != 0) 3051 pa = xpmap_ptom(pa | PG_V | PG_U); 3052 else 3053 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3054 3055 mcl->op = __HYPERVISOR_update_va_mapping; 3056 mcl->args[0] = va; 3057 mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3058 mcl->args[2] = (uint32_t)(pa >> 32); 3059 mcl->args[3] = 0; 3060 *mclpp = mcl + 1; 3061 *count = *count + 1; 3062#endif 3063 return (mpte); 3064} 3065 3066/* 3067 * Make a temporary mapping for a physical address. This is only intended 3068 * to be used for panic dumps. 3069 */ 3070void * 3071pmap_kenter_temporary(vm_paddr_t pa, int i) 3072{ 3073 vm_offset_t va; 3074 vm_paddr_t ma = xpmap_ptom(pa); 3075 3076 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3077 PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3078 invlpg(va); 3079 return ((void *)crashdumpmap); 3080} 3081 3082/* 3083 * This code maps large physical mmap regions into the 3084 * processor address space. Note that some shortcuts 3085 * are taken, but the code works. 3086 */ 3087void 3088pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3089 vm_pindex_t pindex, vm_size_t size) 3090{ 3091 pd_entry_t *pde; 3092 vm_paddr_t pa, ptepa; 3093 vm_page_t p; 3094 int pat_mode; 3095 3096 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3097 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3098 ("pmap_object_init_pt: non-device object")); 3099 if (pseflag && 3100 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3101 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3102 return; 3103 p = vm_page_lookup(object, pindex); 3104 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3105 ("pmap_object_init_pt: invalid page %p", p)); 3106 pat_mode = p->md.pat_mode; 3107 3108 /* 3109 * Abort the mapping if the first page is not physically 3110 * aligned to a 2/4MB page boundary. 3111 */ 3112 ptepa = VM_PAGE_TO_PHYS(p); 3113 if (ptepa & (NBPDR - 1)) 3114 return; 3115 3116 /* 3117 * Skip the first page. Abort the mapping if the rest of 3118 * the pages are not physically contiguous or have differing 3119 * memory attributes. 3120 */ 3121 p = TAILQ_NEXT(p, listq); 3122 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3123 pa += PAGE_SIZE) { 3124 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3125 ("pmap_object_init_pt: invalid page %p", p)); 3126 if (pa != VM_PAGE_TO_PHYS(p) || 3127 pat_mode != p->md.pat_mode) 3128 return; 3129 p = TAILQ_NEXT(p, listq); 3130 } 3131 3132 /* 3133 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 3134 * "size" is a multiple of 2/4M, adding the PAT setting to 3135 * "pa" will not affect the termination of this loop. 3136 */ 3137 PMAP_LOCK(pmap); 3138 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3139 size; pa += NBPDR) { 3140 pde = pmap_pde(pmap, addr); 3141 if (*pde == 0) { 3142 pde_store(pde, pa | PG_PS | PG_M | PG_A | 3143 PG_U | PG_RW | PG_V); 3144 pmap->pm_stats.resident_count += NBPDR / 3145 PAGE_SIZE; 3146 pmap_pde_mappings++; 3147 } 3148 /* Else continue on if the PDE is already valid. */ 3149 addr += NBPDR; 3150 } 3151 PMAP_UNLOCK(pmap); 3152 } 3153} 3154 3155/* 3156 * Routine: pmap_change_wiring 3157 * Function: Change the wiring attribute for a map/virtual-address 3158 * pair. 3159 * In/out conditions: 3160 * The mapping must already exist in the pmap. 3161 */ 3162void 3163pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3164{ 3165 pt_entry_t *pte; 3166 3167 vm_page_lock_queues(); 3168 PMAP_LOCK(pmap); 3169 pte = pmap_pte(pmap, va); 3170 3171 if (wired && !pmap_pte_w(pte)) { 3172 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3173 pmap->pm_stats.wired_count++; 3174 } else if (!wired && pmap_pte_w(pte)) { 3175 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3176 pmap->pm_stats.wired_count--; 3177 } 3178 3179 /* 3180 * Wiring is not a hardware characteristic so there is no need to 3181 * invalidate TLB. 3182 */ 3183 pmap_pte_release(pte); 3184 PMAP_UNLOCK(pmap); 3185 vm_page_unlock_queues(); 3186} 3187 3188 3189 3190/* 3191 * Copy the range specified by src_addr/len 3192 * from the source map to the range dst_addr/len 3193 * in the destination map. 3194 * 3195 * This routine is only advisory and need not do anything. 3196 */ 3197 3198void 3199pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3200 vm_offset_t src_addr) 3201{ 3202 vm_page_t free; 3203 vm_offset_t addr; 3204 vm_offset_t end_addr = src_addr + len; 3205 vm_offset_t pdnxt; 3206 3207 if (dst_addr != src_addr) 3208 return; 3209 3210 if (!pmap_is_current(src_pmap)) { 3211 CTR2(KTR_PMAP, 3212 "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3213 (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3214 3215 return; 3216 } 3217 CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3218 dst_pmap, src_pmap, dst_addr, len, src_addr); 3219 3220#ifdef HAMFISTED_LOCKING 3221 mtx_lock(&createdelete_lock); 3222#endif 3223 3224 vm_page_lock_queues(); 3225 if (dst_pmap < src_pmap) { 3226 PMAP_LOCK(dst_pmap); 3227 PMAP_LOCK(src_pmap); 3228 } else { 3229 PMAP_LOCK(src_pmap); 3230 PMAP_LOCK(dst_pmap); 3231 } 3232 sched_pin(); 3233 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3234 pt_entry_t *src_pte, *dst_pte; 3235 vm_page_t dstmpte, srcmpte; 3236 pd_entry_t srcptepaddr; 3237 u_int ptepindex; 3238 3239 KASSERT(addr < UPT_MIN_ADDRESS, 3240 ("pmap_copy: invalid to pmap_copy page tables")); 3241 3242 pdnxt = (addr + NBPDR) & ~PDRMASK; 3243 if (pdnxt < addr) 3244 pdnxt = end_addr; 3245 ptepindex = addr >> PDRSHIFT; 3246 3247 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3248 if (srcptepaddr == 0) 3249 continue; 3250 3251 if (srcptepaddr & PG_PS) { 3252 if (dst_pmap->pm_pdir[ptepindex] == 0) { 3253 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3254 dst_pmap->pm_stats.resident_count += 3255 NBPDR / PAGE_SIZE; 3256 } 3257 continue; 3258 } 3259 3260 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3261 KASSERT(srcmpte->wire_count > 0, 3262 ("pmap_copy: source page table page is unused")); 3263 3264 if (pdnxt > end_addr) 3265 pdnxt = end_addr; 3266 3267 src_pte = vtopte(addr); 3268 while (addr < pdnxt) { 3269 pt_entry_t ptetemp; 3270 ptetemp = *src_pte; 3271 /* 3272 * we only virtual copy managed pages 3273 */ 3274 if ((ptetemp & PG_MANAGED) != 0) { 3275 dstmpte = pmap_allocpte(dst_pmap, addr, 3276 M_NOWAIT); 3277 if (dstmpte == NULL) 3278 goto out; 3279 dst_pte = pmap_pte_quick(dst_pmap, addr); 3280 if (*dst_pte == 0 && 3281 pmap_try_insert_pv_entry(dst_pmap, addr, 3282 PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3283 /* 3284 * Clear the wired, modified, and 3285 * accessed (referenced) bits 3286 * during the copy. 3287 */ 3288 KASSERT(ptetemp != 0, ("src_pte not set")); 3289 PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3290 KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3291 ("no pmap copy expected: 0x%jx saw: 0x%jx", 3292 ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3293 dst_pmap->pm_stats.resident_count++; 3294 } else { 3295 free = NULL; 3296 if (pmap_unwire_pte_hold(dst_pmap, 3297 dstmpte, &free)) { 3298 pmap_invalidate_page(dst_pmap, 3299 addr); 3300 pmap_free_zero_pages(free); 3301 } 3302 goto out; 3303 } 3304 if (dstmpte->wire_count >= srcmpte->wire_count) 3305 break; 3306 } 3307 addr += PAGE_SIZE; 3308 src_pte++; 3309 } 3310 } 3311out: 3312 PT_UPDATES_FLUSH(); 3313 sched_unpin(); 3314 vm_page_unlock_queues(); 3315 PMAP_UNLOCK(src_pmap); 3316 PMAP_UNLOCK(dst_pmap); 3317 3318#ifdef HAMFISTED_LOCKING 3319 mtx_unlock(&createdelete_lock); 3320#endif 3321} 3322 3323static __inline void 3324pagezero(void *page) 3325{ 3326#if defined(I686_CPU) 3327 if (cpu_class == CPUCLASS_686) { 3328#if defined(CPU_ENABLE_SSE) 3329 if (cpu_feature & CPUID_SSE2) 3330 sse2_pagezero(page); 3331 else 3332#endif 3333 i686_pagezero(page); 3334 } else 3335#endif 3336 bzero(page, PAGE_SIZE); 3337} 3338 3339/* 3340 * pmap_zero_page zeros the specified hardware page by mapping 3341 * the page into KVM and using bzero to clear its contents. 3342 */ 3343void 3344pmap_zero_page(vm_page_t m) 3345{ 3346 struct sysmaps *sysmaps; 3347 3348 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3349 mtx_lock(&sysmaps->lock); 3350 if (*sysmaps->CMAP2) 3351 panic("pmap_zero_page: CMAP2 busy"); 3352 sched_pin(); 3353 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3354 pagezero(sysmaps->CADDR2); 3355 PT_SET_MA(sysmaps->CADDR2, 0); 3356 sched_unpin(); 3357 mtx_unlock(&sysmaps->lock); 3358} 3359 3360/* 3361 * pmap_zero_page_area zeros the specified hardware page by mapping 3362 * the page into KVM and using bzero to clear its contents. 3363 * 3364 * off and size may not cover an area beyond a single hardware page. 3365 */ 3366void 3367pmap_zero_page_area(vm_page_t m, int off, int size) 3368{ 3369 struct sysmaps *sysmaps; 3370 3371 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3372 mtx_lock(&sysmaps->lock); 3373 if (*sysmaps->CMAP2) 3374 panic("pmap_zero_page_area: CMAP2 busy"); 3375 sched_pin(); 3376 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3377 3378 if (off == 0 && size == PAGE_SIZE) 3379 pagezero(sysmaps->CADDR2); 3380 else 3381 bzero((char *)sysmaps->CADDR2 + off, size); 3382 PT_SET_MA(sysmaps->CADDR2, 0); 3383 sched_unpin(); 3384 mtx_unlock(&sysmaps->lock); 3385} 3386 3387/* 3388 * pmap_zero_page_idle zeros the specified hardware page by mapping 3389 * the page into KVM and using bzero to clear its contents. This 3390 * is intended to be called from the vm_pagezero process only and 3391 * outside of Giant. 3392 */ 3393void 3394pmap_zero_page_idle(vm_page_t m) 3395{ 3396 3397 if (*CMAP3) 3398 panic("pmap_zero_page_idle: CMAP3 busy"); 3399 sched_pin(); 3400 PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3401 pagezero(CADDR3); 3402 PT_SET_MA(CADDR3, 0); 3403 sched_unpin(); 3404} 3405 3406/* 3407 * pmap_copy_page copies the specified (machine independent) 3408 * page by mapping the page into virtual memory and using 3409 * bcopy to copy the page, one machine dependent page at a 3410 * time. 3411 */ 3412void 3413pmap_copy_page(vm_page_t src, vm_page_t dst) 3414{ 3415 struct sysmaps *sysmaps; 3416 3417 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3418 mtx_lock(&sysmaps->lock); 3419 if (*sysmaps->CMAP1) 3420 panic("pmap_copy_page: CMAP1 busy"); 3421 if (*sysmaps->CMAP2) 3422 panic("pmap_copy_page: CMAP2 busy"); 3423 sched_pin(); 3424 PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A); 3425 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M); 3426 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3427 PT_SET_MA(sysmaps->CADDR1, 0); 3428 PT_SET_MA(sysmaps->CADDR2, 0); 3429 sched_unpin(); 3430 mtx_unlock(&sysmaps->lock); 3431} 3432 3433/* 3434 * Returns true if the pmap's pv is one of the first 3435 * 16 pvs linked to from this page. This count may 3436 * be changed upwards or downwards in the future; it 3437 * is only necessary that true be returned for a small 3438 * subset of pmaps for proper page aging. 3439 */ 3440boolean_t 3441pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3442{ 3443 pv_entry_t pv; 3444 int loops = 0; 3445 boolean_t rv; 3446 3447 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3448 ("pmap_page_exists_quick: page %p is not managed", m)); 3449 rv = FALSE; 3450 vm_page_lock_queues(); 3451 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3452 if (PV_PMAP(pv) == pmap) { 3453 rv = TRUE; 3454 break; 3455 } 3456 loops++; 3457 if (loops >= 16) 3458 break; 3459 } 3460 vm_page_unlock_queues(); 3461 return (rv); 3462} 3463 3464/* 3465 * pmap_page_wired_mappings: 3466 * 3467 * Return the number of managed mappings to the given physical page 3468 * that are wired. 3469 */ 3470int 3471pmap_page_wired_mappings(vm_page_t m) 3472{ 3473 pv_entry_t pv; 3474 pt_entry_t *pte; 3475 pmap_t pmap; 3476 int count; 3477 3478 count = 0; 3479 if ((m->oflags & VPO_UNMANAGED) != 0) 3480 return (count); 3481 vm_page_lock_queues(); 3482 sched_pin(); 3483 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3484 pmap = PV_PMAP(pv); 3485 PMAP_LOCK(pmap); 3486 pte = pmap_pte_quick(pmap, pv->pv_va); 3487 if ((*pte & PG_W) != 0) 3488 count++; 3489 PMAP_UNLOCK(pmap); 3490 } 3491 sched_unpin(); 3492 vm_page_unlock_queues(); 3493 return (count); 3494} 3495 3496/* 3497 * Returns TRUE if the given page is mapped. Otherwise, returns FALSE. 3498 */ 3499boolean_t 3500pmap_page_is_mapped(vm_page_t m) 3501{ 3502 3503 if ((m->oflags & VPO_UNMANAGED) != 0) 3504 return (FALSE); 3505 return (!TAILQ_EMPTY(&m->md.pv_list)); 3506} 3507 3508/* 3509 * Remove all pages from specified address space 3510 * this aids process exit speeds. Also, this code 3511 * is special cased for current process only, but 3512 * can have the more generic (and slightly slower) 3513 * mode enabled. This is much faster than pmap_remove 3514 * in the case of running down an entire address space. 3515 */ 3516void 3517pmap_remove_pages(pmap_t pmap) 3518{ 3519 pt_entry_t *pte, tpte; 3520 vm_page_t m, free = NULL; 3521 pv_entry_t pv; 3522 struct pv_chunk *pc, *npc; 3523 int field, idx; 3524 int32_t bit; 3525 uint32_t inuse, bitmask; 3526 int allfree; 3527 3528 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3529 3530 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3531 printf("warning: pmap_remove_pages called with non-current pmap\n"); 3532 return; 3533 } 3534 vm_page_lock_queues(); 3535 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3536 PMAP_LOCK(pmap); 3537 sched_pin(); 3538 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3539 allfree = 1; 3540 for (field = 0; field < _NPCM; field++) { 3541 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3542 while (inuse != 0) { 3543 bit = bsfl(inuse); 3544 bitmask = 1UL << bit; 3545 idx = field * 32 + bit; 3546 pv = &pc->pc_pventry[idx]; 3547 inuse &= ~bitmask; 3548 3549 pte = vtopte(pv->pv_va); 3550 tpte = *pte ? xpmap_mtop(*pte) : 0; 3551 3552 if (tpte == 0) { 3553 printf( 3554 "TPTE at %p IS ZERO @ VA %08x\n", 3555 pte, pv->pv_va); 3556 panic("bad pte"); 3557 } 3558 3559/* 3560 * We cannot remove wired pages from a process' mapping at this time 3561 */ 3562 if (tpte & PG_W) { 3563 allfree = 0; 3564 continue; 3565 } 3566 3567 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3568 KASSERT(m->phys_addr == (tpte & PG_FRAME), 3569 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3570 m, (uintmax_t)m->phys_addr, 3571 (uintmax_t)tpte)); 3572 3573 KASSERT(m < &vm_page_array[vm_page_array_size], 3574 ("pmap_remove_pages: bad tpte %#jx", 3575 (uintmax_t)tpte)); 3576 3577 3578 PT_CLEAR_VA(pte, FALSE); 3579 3580 /* 3581 * Update the vm_page_t clean/reference bits. 3582 */ 3583 if (tpte & PG_M) 3584 vm_page_dirty(m); 3585 3586 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3587 if (TAILQ_EMPTY(&m->md.pv_list)) 3588 vm_page_aflag_clear(m, PGA_WRITEABLE); 3589 3590 pmap_unuse_pt(pmap, pv->pv_va, &free); 3591 3592 /* Mark free */ 3593 PV_STAT(pv_entry_frees++); 3594 PV_STAT(pv_entry_spare++); 3595 pv_entry_count--; 3596 pc->pc_map[field] |= bitmask; 3597 pmap->pm_stats.resident_count--; 3598 } 3599 } 3600 PT_UPDATES_FLUSH(); 3601 if (allfree) { 3602 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3603 free_pv_chunk(pc); 3604 } 3605 } 3606 PT_UPDATES_FLUSH(); 3607 if (*PMAP1) 3608 PT_SET_MA(PADDR1, 0); 3609 3610 sched_unpin(); 3611 pmap_invalidate_all(pmap); 3612 vm_page_unlock_queues(); 3613 PMAP_UNLOCK(pmap); 3614 pmap_free_zero_pages(free); 3615} 3616 3617/* 3618 * pmap_is_modified: 3619 * 3620 * Return whether or not the specified physical page was modified 3621 * in any physical maps. 3622 */ 3623boolean_t 3624pmap_is_modified(vm_page_t m) 3625{ 3626 pv_entry_t pv; 3627 pt_entry_t *pte; 3628 pmap_t pmap; 3629 boolean_t rv; 3630 3631 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3632 ("pmap_is_modified: page %p is not managed", m)); 3633 rv = FALSE; 3634 3635 /* 3636 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 3637 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3638 * is clear, no PTEs can have PG_M set. 3639 */ 3640 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3641 if ((m->oflags & VPO_BUSY) == 0 && 3642 (m->aflags & PGA_WRITEABLE) == 0) 3643 return (rv); 3644 vm_page_lock_queues(); 3645 sched_pin(); 3646 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3647 pmap = PV_PMAP(pv); 3648 PMAP_LOCK(pmap); 3649 pte = pmap_pte_quick(pmap, pv->pv_va); 3650 rv = (*pte & PG_M) != 0; 3651 PMAP_UNLOCK(pmap); 3652 if (rv) 3653 break; 3654 } 3655 if (*PMAP1) 3656 PT_SET_MA(PADDR1, 0); 3657 sched_unpin(); 3658 vm_page_unlock_queues(); 3659 return (rv); 3660} 3661 3662/* 3663 * pmap_is_prefaultable: 3664 * 3665 * Return whether or not the specified virtual address is elgible 3666 * for prefault. 3667 */ 3668static boolean_t 3669pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3670{ 3671 pt_entry_t *pte; 3672 boolean_t rv = FALSE; 3673 3674 return (rv); 3675 3676 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3677 pte = vtopte(addr); 3678 rv = (*pte == 0); 3679 } 3680 return (rv); 3681} 3682 3683boolean_t 3684pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3685{ 3686 boolean_t rv; 3687 3688 PMAP_LOCK(pmap); 3689 rv = pmap_is_prefaultable_locked(pmap, addr); 3690 PMAP_UNLOCK(pmap); 3691 return (rv); 3692} 3693 3694boolean_t 3695pmap_is_referenced(vm_page_t m) 3696{ 3697 pv_entry_t pv; 3698 pt_entry_t *pte; 3699 pmap_t pmap; 3700 boolean_t rv; 3701 3702 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3703 ("pmap_is_referenced: page %p is not managed", m)); 3704 rv = FALSE; 3705 vm_page_lock_queues(); 3706 sched_pin(); 3707 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3708 pmap = PV_PMAP(pv); 3709 PMAP_LOCK(pmap); 3710 pte = pmap_pte_quick(pmap, pv->pv_va); 3711 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3712 PMAP_UNLOCK(pmap); 3713 if (rv) 3714 break; 3715 } 3716 if (*PMAP1) 3717 PT_SET_MA(PADDR1, 0); 3718 sched_unpin(); 3719 vm_page_unlock_queues(); 3720 return (rv); 3721} 3722 3723void 3724pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3725{ 3726 int i, npages = round_page(len) >> PAGE_SHIFT; 3727 for (i = 0; i < npages; i++) { 3728 pt_entry_t *pte; 3729 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3730 vm_page_lock_queues(); 3731 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3732 vm_page_unlock_queues(); 3733 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3734 pmap_pte_release(pte); 3735 } 3736} 3737 3738void 3739pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3740{ 3741 int i, npages = round_page(len) >> PAGE_SHIFT; 3742 for (i = 0; i < npages; i++) { 3743 pt_entry_t *pte; 3744 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3745 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3746 vm_page_lock_queues(); 3747 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3748 vm_page_unlock_queues(); 3749 pmap_pte_release(pte); 3750 } 3751} 3752 3753/* 3754 * Clear the write and modified bits in each of the given page's mappings. 3755 */ 3756void 3757pmap_remove_write(vm_page_t m) 3758{ 3759 pv_entry_t pv; 3760 pmap_t pmap; 3761 pt_entry_t oldpte, *pte; 3762 3763 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3764 ("pmap_remove_write: page %p is not managed", m)); 3765 3766 /* 3767 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 3768 * another thread while the object is locked. Thus, if PGA_WRITEABLE 3769 * is clear, no page table entries need updating. 3770 */ 3771 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3772 if ((m->oflags & VPO_BUSY) == 0 && 3773 (m->aflags & PGA_WRITEABLE) == 0) 3774 return; 3775 vm_page_lock_queues(); 3776 sched_pin(); 3777 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3778 pmap = PV_PMAP(pv); 3779 PMAP_LOCK(pmap); 3780 pte = pmap_pte_quick(pmap, pv->pv_va); 3781retry: 3782 oldpte = *pte; 3783 if ((oldpte & PG_RW) != 0) { 3784 vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3785 3786 /* 3787 * Regardless of whether a pte is 32 or 64 bits 3788 * in size, PG_RW and PG_M are among the least 3789 * significant 32 bits. 3790 */ 3791 PT_SET_VA_MA(pte, newpte, TRUE); 3792 if (*pte != newpte) 3793 goto retry; 3794 3795 if ((oldpte & PG_M) != 0) 3796 vm_page_dirty(m); 3797 pmap_invalidate_page(pmap, pv->pv_va); 3798 } 3799 PMAP_UNLOCK(pmap); 3800 } 3801 vm_page_aflag_clear(m, PGA_WRITEABLE); 3802 PT_UPDATES_FLUSH(); 3803 if (*PMAP1) 3804 PT_SET_MA(PADDR1, 0); 3805 sched_unpin(); 3806 vm_page_unlock_queues(); 3807} 3808 3809/* 3810 * pmap_ts_referenced: 3811 * 3812 * Return a count of reference bits for a page, clearing those bits. 3813 * It is not necessary for every reference bit to be cleared, but it 3814 * is necessary that 0 only be returned when there are truly no 3815 * reference bits set. 3816 * 3817 * XXX: The exact number of bits to check and clear is a matter that 3818 * should be tested and standardized at some point in the future for 3819 * optimal aging of shared pages. 3820 */ 3821int 3822pmap_ts_referenced(vm_page_t m) 3823{ 3824 pv_entry_t pv, pvf, pvn; 3825 pmap_t pmap; 3826 pt_entry_t *pte; 3827 int rtval = 0; 3828 3829 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3830 ("pmap_ts_referenced: page %p is not managed", m)); 3831 vm_page_lock_queues(); 3832 sched_pin(); 3833 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3834 pvf = pv; 3835 do { 3836 pvn = TAILQ_NEXT(pv, pv_list); 3837 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3838 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3839 pmap = PV_PMAP(pv); 3840 PMAP_LOCK(pmap); 3841 pte = pmap_pte_quick(pmap, pv->pv_va); 3842 if ((*pte & PG_A) != 0) { 3843 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3844 pmap_invalidate_page(pmap, pv->pv_va); 3845 rtval++; 3846 if (rtval > 4) 3847 pvn = NULL; 3848 } 3849 PMAP_UNLOCK(pmap); 3850 } while ((pv = pvn) != NULL && pv != pvf); 3851 } 3852 PT_UPDATES_FLUSH(); 3853 if (*PMAP1) 3854 PT_SET_MA(PADDR1, 0); 3855 sched_unpin(); 3856 vm_page_unlock_queues(); 3857 return (rtval); 3858} 3859 3860/* 3861 * Clear the modify bits on the specified physical page. 3862 */ 3863void 3864pmap_clear_modify(vm_page_t m) 3865{ 3866 pv_entry_t pv; 3867 pmap_t pmap; 3868 pt_entry_t *pte; 3869 3870 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3871 ("pmap_clear_modify: page %p is not managed", m)); 3872 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3873 KASSERT((m->oflags & VPO_BUSY) == 0, 3874 ("pmap_clear_modify: page %p is busy", m)); 3875 3876 /* 3877 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 3878 * If the object containing the page is locked and the page is not 3879 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 3880 */ 3881 if ((m->aflags & PGA_WRITEABLE) == 0) 3882 return; 3883 vm_page_lock_queues(); 3884 sched_pin(); 3885 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3886 pmap = PV_PMAP(pv); 3887 PMAP_LOCK(pmap); 3888 pte = pmap_pte_quick(pmap, pv->pv_va); 3889 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3890 /* 3891 * Regardless of whether a pte is 32 or 64 bits 3892 * in size, PG_M is among the least significant 3893 * 32 bits. 3894 */ 3895 PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3896 pmap_invalidate_page(pmap, pv->pv_va); 3897 } 3898 PMAP_UNLOCK(pmap); 3899 } 3900 sched_unpin(); 3901 vm_page_unlock_queues(); 3902} 3903 3904/* 3905 * pmap_clear_reference: 3906 * 3907 * Clear the reference bit on the specified physical page. 3908 */ 3909void 3910pmap_clear_reference(vm_page_t m) 3911{ 3912 pv_entry_t pv; 3913 pmap_t pmap; 3914 pt_entry_t *pte; 3915 3916 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3917 ("pmap_clear_reference: page %p is not managed", m)); 3918 vm_page_lock_queues(); 3919 sched_pin(); 3920 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3921 pmap = PV_PMAP(pv); 3922 PMAP_LOCK(pmap); 3923 pte = pmap_pte_quick(pmap, pv->pv_va); 3924 if ((*pte & PG_A) != 0) { 3925 /* 3926 * Regardless of whether a pte is 32 or 64 bits 3927 * in size, PG_A is among the least significant 3928 * 32 bits. 3929 */ 3930 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3931 pmap_invalidate_page(pmap, pv->pv_va); 3932 } 3933 PMAP_UNLOCK(pmap); 3934 } 3935 sched_unpin(); 3936 vm_page_unlock_queues(); 3937} 3938 3939/* 3940 * Miscellaneous support routines follow 3941 */ 3942 3943/* 3944 * Map a set of physical memory pages into the kernel virtual 3945 * address space. Return a pointer to where it is mapped. This 3946 * routine is intended to be used for mapping device memory, 3947 * NOT real memory. 3948 */ 3949void * 3950pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3951{ 3952 vm_offset_t va, offset; 3953 vm_size_t tmpsize; 3954 3955 offset = pa & PAGE_MASK; 3956 size = roundup(offset + size, PAGE_SIZE); 3957 pa = pa & PG_FRAME; 3958 3959 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3960 va = KERNBASE + pa; 3961 else 3962 va = kmem_alloc_nofault(kernel_map, size); 3963 if (!va) 3964 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3965 3966 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3967 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3968 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3969 pmap_invalidate_cache_range(va, va + size); 3970 return ((void *)(va + offset)); 3971} 3972 3973void * 3974pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3975{ 3976 3977 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3978} 3979 3980void * 3981pmap_mapbios(vm_paddr_t pa, vm_size_t size) 3982{ 3983 3984 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3985} 3986 3987void 3988pmap_unmapdev(vm_offset_t va, vm_size_t size) 3989{ 3990 vm_offset_t base, offset, tmpva; 3991 3992 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3993 return; 3994 base = trunc_page(va); 3995 offset = va & PAGE_MASK; 3996 size = roundup(offset + size, PAGE_SIZE); 3997 critical_enter(); 3998 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 3999 pmap_kremove(tmpva); 4000 pmap_invalidate_range(kernel_pmap, va, tmpva); 4001 critical_exit(); 4002 kmem_free(kernel_map, base, size); 4003} 4004 4005/* 4006 * Sets the memory attribute for the specified page. 4007 */ 4008void 4009pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4010{ 4011 4012 m->md.pat_mode = ma; 4013 if ((m->flags & PG_FICTITIOUS) != 0) 4014 return; 4015 4016 /* 4017 * If "m" is a normal page, flush it from the cache. 4018 * See pmap_invalidate_cache_range(). 4019 * 4020 * First, try to find an existing mapping of the page by sf 4021 * buffer. sf_buf_invalidate_cache() modifies mapping and 4022 * flushes the cache. 4023 */ 4024 if (sf_buf_invalidate_cache(m)) 4025 return; 4026 4027 /* 4028 * If page is not mapped by sf buffer, but CPU does not 4029 * support self snoop, map the page transient and do 4030 * invalidation. In the worst case, whole cache is flushed by 4031 * pmap_invalidate_cache_range(). 4032 */ 4033 if ((cpu_feature & CPUID_SS) == 0) 4034 pmap_flush_page(m); 4035} 4036 4037static void 4038pmap_flush_page(vm_page_t m) 4039{ 4040 struct sysmaps *sysmaps; 4041 vm_offset_t sva, eva; 4042 4043 if ((cpu_feature & CPUID_CLFSH) != 0) { 4044 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4045 mtx_lock(&sysmaps->lock); 4046 if (*sysmaps->CMAP2) 4047 panic("pmap_flush_page: CMAP2 busy"); 4048 sched_pin(); 4049 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 4050 VM_PAGE_TO_MACH(m) | PG_A | PG_M | 4051 pmap_cache_bits(m->md.pat_mode, 0)); 4052 invlcaddr(sysmaps->CADDR2); 4053 sva = (vm_offset_t)sysmaps->CADDR2; 4054 eva = sva + PAGE_SIZE; 4055 4056 /* 4057 * Use mfence despite the ordering implied by 4058 * mtx_{un,}lock() because clflush is not guaranteed 4059 * to be ordered by any other instruction. 4060 */ 4061 mfence(); 4062 for (; sva < eva; sva += cpu_clflush_line_size) 4063 clflush(sva); 4064 mfence(); 4065 PT_SET_MA(sysmaps->CADDR2, 0); 4066 sched_unpin(); 4067 mtx_unlock(&sysmaps->lock); 4068 } else 4069 pmap_invalidate_cache(); 4070} 4071 4072/* 4073 * Changes the specified virtual address range's memory type to that given by 4074 * the parameter "mode". The specified virtual address range must be 4075 * completely contained within either the kernel map. 4076 * 4077 * Returns zero if the change completed successfully, and either EINVAL or 4078 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 4079 * of the virtual address range was not mapped, and ENOMEM is returned if 4080 * there was insufficient memory available to complete the change. 4081 */ 4082int 4083pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 4084{ 4085 vm_offset_t base, offset, tmpva; 4086 pt_entry_t *pte; 4087 u_int opte, npte; 4088 pd_entry_t *pde; 4089 boolean_t changed; 4090 4091 base = trunc_page(va); 4092 offset = va & PAGE_MASK; 4093 size = roundup(offset + size, PAGE_SIZE); 4094 4095 /* Only supported on kernel virtual addresses. */ 4096 if (base <= VM_MAXUSER_ADDRESS) 4097 return (EINVAL); 4098 4099 /* 4MB pages and pages that aren't mapped aren't supported. */ 4100 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4101 pde = pmap_pde(kernel_pmap, tmpva); 4102 if (*pde & PG_PS) 4103 return (EINVAL); 4104 if ((*pde & PG_V) == 0) 4105 return (EINVAL); 4106 pte = vtopte(va); 4107 if ((*pte & PG_V) == 0) 4108 return (EINVAL); 4109 } 4110 4111 changed = FALSE; 4112 4113 /* 4114 * Ok, all the pages exist and are 4k, so run through them updating 4115 * their cache mode. 4116 */ 4117 for (tmpva = base; size > 0; ) { 4118 pte = vtopte(tmpva); 4119 4120 /* 4121 * The cache mode bits are all in the low 32-bits of the 4122 * PTE, so we can just spin on updating the low 32-bits. 4123 */ 4124 do { 4125 opte = *(u_int *)pte; 4126 npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4127 npte |= pmap_cache_bits(mode, 0); 4128 PT_SET_VA_MA(pte, npte, TRUE); 4129 } while (npte != opte && (*pte != npte)); 4130 if (npte != opte) 4131 changed = TRUE; 4132 tmpva += PAGE_SIZE; 4133 size -= PAGE_SIZE; 4134 } 4135 4136 /* 4137 * Flush CPU caches to make sure any data isn't cached that 4138 * shouldn't be, etc. 4139 */ 4140 if (changed) { 4141 pmap_invalidate_range(kernel_pmap, base, tmpva); 4142 pmap_invalidate_cache_range(base, tmpva); 4143 } 4144 return (0); 4145} 4146 4147/* 4148 * perform the pmap work for mincore 4149 */ 4150int 4151pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4152{ 4153 pt_entry_t *ptep, pte; 4154 vm_paddr_t pa; 4155 int val; 4156 4157 PMAP_LOCK(pmap); 4158retry: 4159 ptep = pmap_pte(pmap, addr); 4160 pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4161 pmap_pte_release(ptep); 4162 val = 0; 4163 if ((pte & PG_V) != 0) { 4164 val |= MINCORE_INCORE; 4165 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4166 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4167 if ((pte & PG_A) != 0) 4168 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4169 } 4170 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4171 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4172 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4173 pa = pte & PG_FRAME; 4174 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4175 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4176 goto retry; 4177 } else 4178 PA_UNLOCK_COND(*locked_pa); 4179 PMAP_UNLOCK(pmap); 4180 return (val); 4181} 4182 4183void 4184pmap_activate(struct thread *td) 4185{ 4186 pmap_t pmap, oldpmap; 4187 u_int cpuid; 4188 u_int32_t cr3; 4189 4190 critical_enter(); 4191 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4192 oldpmap = PCPU_GET(curpmap); 4193 cpuid = PCPU_GET(cpuid); 4194#if defined(SMP) 4195 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 4196 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 4197#else 4198 CPU_CLR(cpuid, &oldpmap->pm_active); 4199 CPU_SET(cpuid, &pmap->pm_active); 4200#endif 4201#ifdef PAE 4202 cr3 = vtophys(pmap->pm_pdpt); 4203#else 4204 cr3 = vtophys(pmap->pm_pdir); 4205#endif 4206 /* 4207 * pmap_activate is for the current thread on the current cpu 4208 */ 4209 td->td_pcb->pcb_cr3 = cr3; 4210 PT_UPDATES_FLUSH(); 4211 load_cr3(cr3); 4212 PCPU_SET(curpmap, pmap); 4213 critical_exit(); 4214} 4215 4216void 4217pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4218{ 4219} 4220 4221/* 4222 * Increase the starting virtual address of the given mapping if a 4223 * different alignment might result in more superpage mappings. 4224 */ 4225void 4226pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4227 vm_offset_t *addr, vm_size_t size) 4228{ 4229 vm_offset_t superpage_offset; 4230 4231 if (size < NBPDR) 4232 return; 4233 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4234 offset += ptoa(object->pg_color); 4235 superpage_offset = offset & PDRMASK; 4236 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4237 (*addr & PDRMASK) == superpage_offset) 4238 return; 4239 if ((*addr & PDRMASK) < superpage_offset) 4240 *addr = (*addr & ~PDRMASK) + superpage_offset; 4241 else 4242 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4243} 4244 4245void 4246pmap_suspend() 4247{ 4248 pmap_t pmap; 4249 int i, pdir, offset; 4250 vm_paddr_t pdirma; 4251 mmu_update_t mu[4]; 4252 4253 /* 4254 * We need to remove the recursive mapping structure from all 4255 * our pmaps so that Xen doesn't get confused when it restores 4256 * the page tables. The recursive map lives at page directory 4257 * index PTDPTDI. We assume that the suspend code has stopped 4258 * the other vcpus (if any). 4259 */ 4260 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4261 for (i = 0; i < 4; i++) { 4262 /* 4263 * Figure out which page directory (L2) page 4264 * contains this bit of the recursive map and 4265 * the offset within that page of the map 4266 * entry 4267 */ 4268 pdir = (PTDPTDI + i) / NPDEPG; 4269 offset = (PTDPTDI + i) % NPDEPG; 4270 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4271 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4272 mu[i].val = 0; 4273 } 4274 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4275 } 4276} 4277 4278void 4279pmap_resume() 4280{ 4281 pmap_t pmap; 4282 int i, pdir, offset; 4283 vm_paddr_t pdirma; 4284 mmu_update_t mu[4]; 4285 4286 /* 4287 * Restore the recursive map that we removed on suspend. 4288 */ 4289 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4290 for (i = 0; i < 4; i++) { 4291 /* 4292 * Figure out which page directory (L2) page 4293 * contains this bit of the recursive map and 4294 * the offset within that page of the map 4295 * entry 4296 */ 4297 pdir = (PTDPTDI + i) / NPDEPG; 4298 offset = (PTDPTDI + i) % NPDEPG; 4299 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4300 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4301 mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4302 } 4303 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4304 } 4305} 4306 4307#if defined(PMAP_DEBUG) 4308pmap_pid_dump(int pid) 4309{ 4310 pmap_t pmap; 4311 struct proc *p; 4312 int npte = 0; 4313 int index; 4314 4315 sx_slock(&allproc_lock); 4316 FOREACH_PROC_IN_SYSTEM(p) { 4317 if (p->p_pid != pid) 4318 continue; 4319 4320 if (p->p_vmspace) { 4321 int i,j; 4322 index = 0; 4323 pmap = vmspace_pmap(p->p_vmspace); 4324 for (i = 0; i < NPDEPTD; i++) { 4325 pd_entry_t *pde; 4326 pt_entry_t *pte; 4327 vm_offset_t base = i << PDRSHIFT; 4328 4329 pde = &pmap->pm_pdir[i]; 4330 if (pde && pmap_pde_v(pde)) { 4331 for (j = 0; j < NPTEPG; j++) { 4332 vm_offset_t va = base + (j << PAGE_SHIFT); 4333 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4334 if (index) { 4335 index = 0; 4336 printf("\n"); 4337 } 4338 sx_sunlock(&allproc_lock); 4339 return (npte); 4340 } 4341 pte = pmap_pte(pmap, va); 4342 if (pte && pmap_pte_v(pte)) { 4343 pt_entry_t pa; 4344 vm_page_t m; 4345 pa = PT_GET(pte); 4346 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4347 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4348 va, pa, m->hold_count, m->wire_count, m->flags); 4349 npte++; 4350 index++; 4351 if (index >= 2) { 4352 index = 0; 4353 printf("\n"); 4354 } else { 4355 printf(" "); 4356 } 4357 } 4358 } 4359 } 4360 } 4361 } 4362 } 4363 sx_sunlock(&allproc_lock); 4364 return (npte); 4365} 4366#endif 4367 4368#if defined(DEBUG) 4369 4370static void pads(pmap_t pm); 4371void pmap_pvdump(vm_paddr_t pa); 4372 4373/* print address space of pmap*/ 4374static void 4375pads(pmap_t pm) 4376{ 4377 int i, j; 4378 vm_paddr_t va; 4379 pt_entry_t *ptep; 4380 4381 if (pm == kernel_pmap) 4382 return; 4383 for (i = 0; i < NPDEPTD; i++) 4384 if (pm->pm_pdir[i]) 4385 for (j = 0; j < NPTEPG; j++) { 4386 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4387 if (pm == kernel_pmap && va < KERNBASE) 4388 continue; 4389 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4390 continue; 4391 ptep = pmap_pte(pm, va); 4392 if (pmap_pte_v(ptep)) 4393 printf("%x:%x ", va, *ptep); 4394 }; 4395 4396} 4397 4398void 4399pmap_pvdump(vm_paddr_t pa) 4400{ 4401 pv_entry_t pv; 4402 pmap_t pmap; 4403 vm_page_t m; 4404 4405 printf("pa %x", pa); 4406 m = PHYS_TO_VM_PAGE(pa); 4407 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4408 pmap = PV_PMAP(pv); 4409 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4410 pads(pmap); 4411 } 4412 printf(" "); 4413} 4414#endif 4415