pmap.c revision 228923
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 228923 2011-12-27 23:53:00Z alc $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_smp.h" 109#include "opt_xbox.h" 110 111#include <sys/param.h> 112#include <sys/systm.h> 113#include <sys/kernel.h> 114#include <sys/ktr.h> 115#include <sys/lock.h> 116#include <sys/malloc.h> 117#include <sys/mman.h> 118#include <sys/msgbuf.h> 119#include <sys/mutex.h> 120#include <sys/proc.h> 121#include <sys/sf_buf.h> 122#include <sys/sx.h> 123#include <sys/vmmeter.h> 124#include <sys/sched.h> 125#include <sys/sysctl.h> 126#ifdef SMP 127#include <sys/smp.h> 128#else 129#include <sys/cpuset.h> 130#endif 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/cputypes.h> 145#include <machine/md_var.h> 146#include <machine/pcb.h> 147#include <machine/specialreg.h> 148#ifdef SMP 149#include <machine/smp.h> 150#endif 151 152#ifdef XBOX 153#include <machine/xbox.h> 154#endif 155 156#include <xen/interface/xen.h> 157#include <xen/hypervisor.h> 158#include <machine/xen/hypercall.h> 159#include <machine/xen/xenvar.h> 160#include <machine/xen/xenfunc.h> 161 162#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 163#define CPU_ENABLE_SSE 164#endif 165 166#ifndef PMAP_SHPGPERPROC 167#define PMAP_SHPGPERPROC 200 168#endif 169 170#define DIAGNOSTIC 171 172#if !defined(DIAGNOSTIC) 173#ifdef __GNUC_GNU_INLINE__ 174#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 175#else 176#define PMAP_INLINE extern inline 177#endif 178#else 179#define PMAP_INLINE 180#endif 181 182#define PV_STATS 183#ifdef PV_STATS 184#define PV_STAT(x) do { x ; } while (0) 185#else 186#define PV_STAT(x) do { } while (0) 187#endif 188 189/* 190 * Get PDEs and PTEs for user/kernel address space 191 */ 192#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 193#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 194 195#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 196#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 197#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 198#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 199#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 200 201#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 202 203#define HAMFISTED_LOCKING 204#ifdef HAMFISTED_LOCKING 205static struct mtx createdelete_lock; 206#endif 207 208struct pmap kernel_pmap_store; 209LIST_HEAD(pmaplist, pmap); 210static struct pmaplist allpmaps; 211static struct mtx allpmaps_lock; 212 213vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 214vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 215int pgeflag = 0; /* PG_G or-in */ 216int pseflag = 0; /* PG_PS or-in */ 217 218int nkpt; 219vm_offset_t kernel_vm_end; 220extern u_int32_t KERNend; 221 222#ifdef PAE 223pt_entry_t pg_nx; 224#endif 225 226static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 227 228static int pat_works; /* Is page attribute table sane? */ 229 230/* 231 * Data for the pv entry allocation mechanism 232 */ 233static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 234static int shpgperproc = PMAP_SHPGPERPROC; 235 236struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 237int pv_maxchunks; /* How many chunks we have KVA for */ 238vm_offset_t pv_vafree; /* freelist stored in the PTE */ 239 240/* 241 * All those kernel PT submaps that BSD is so fond of 242 */ 243struct sysmaps { 244 struct mtx lock; 245 pt_entry_t *CMAP1; 246 pt_entry_t *CMAP2; 247 caddr_t CADDR1; 248 caddr_t CADDR2; 249}; 250static struct sysmaps sysmaps_pcpu[MAXCPU]; 251static pt_entry_t *CMAP3; 252caddr_t ptvmmap = 0; 253static caddr_t CADDR3; 254struct msgbuf *msgbufp = 0; 255 256/* 257 * Crashdump maps. 258 */ 259static caddr_t crashdumpmap; 260 261static pt_entry_t *PMAP1 = 0, *PMAP2; 262static pt_entry_t *PADDR1 = 0, *PADDR2; 263#ifdef SMP 264static int PMAP1cpu; 265static int PMAP1changedcpu; 266SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 267 &PMAP1changedcpu, 0, 268 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 269#endif 270static int PMAP1changed; 271SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 272 &PMAP1changed, 0, 273 "Number of times pmap_pte_quick changed PMAP1"); 274static int PMAP1unchanged; 275SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 276 &PMAP1unchanged, 0, 277 "Number of times pmap_pte_quick didn't change PMAP1"); 278static struct mtx PMAP2mutex; 279 280static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 281static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 282static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 283static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 284 vm_offset_t va); 285 286static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 287 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 288static void pmap_flush_page(vm_page_t m); 289static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 290static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 291 vm_page_t *free); 292static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 293 vm_page_t *free); 294static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 295 vm_offset_t va); 296static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 297 vm_page_t m); 298 299static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 300 301static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); 302static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 303static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 304static void pmap_pte_release(pt_entry_t *pte); 305static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 306static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 307 308static __inline void pagezero(void *page); 309 310CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 311CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 312 313/* 314 * If you get an error here, then you set KVA_PAGES wrong! See the 315 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 316 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 317 */ 318CTASSERT(KERNBASE % (1 << 24) == 0); 319 320void 321pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 322{ 323 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 324 325 switch (type) { 326 case SH_PD_SET_VA: 327#if 0 328 xen_queue_pt_update(shadow_pdir_ma, 329 xpmap_ptom(val & ~(PG_RW))); 330#endif 331 xen_queue_pt_update(pdir_ma, 332 xpmap_ptom(val)); 333 break; 334 case SH_PD_SET_VA_MA: 335#if 0 336 xen_queue_pt_update(shadow_pdir_ma, 337 val & ~(PG_RW)); 338#endif 339 xen_queue_pt_update(pdir_ma, val); 340 break; 341 case SH_PD_SET_VA_CLEAR: 342#if 0 343 xen_queue_pt_update(shadow_pdir_ma, 0); 344#endif 345 xen_queue_pt_update(pdir_ma, 0); 346 break; 347 } 348} 349 350/* 351 * Bootstrap the system enough to run with virtual memory. 352 * 353 * On the i386 this is called after mapping has already been enabled 354 * and just syncs the pmap module with what has already been done. 355 * [We can't call it easily with mapping off since the kernel is not 356 * mapped with PA == VA, hence we would have to relocate every address 357 * from the linked base (virtual) address "KERNBASE" to the actual 358 * (physical) address starting relative to 0] 359 */ 360void 361pmap_bootstrap(vm_paddr_t firstaddr) 362{ 363 vm_offset_t va; 364 pt_entry_t *pte, *unused; 365 struct sysmaps *sysmaps; 366 int i; 367 368 /* 369 * Initialize the first available kernel virtual address. However, 370 * using "firstaddr" may waste a few pages of the kernel virtual 371 * address space, because locore may not have mapped every physical 372 * page that it allocated. Preferably, locore would provide a first 373 * unused virtual address in addition to "firstaddr". 374 */ 375 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 376 377 virtual_end = VM_MAX_KERNEL_ADDRESS; 378 379 /* 380 * Initialize the kernel pmap (which is statically allocated). 381 */ 382 PMAP_LOCK_INIT(kernel_pmap); 383 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 384#ifdef PAE 385 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 386#endif 387 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 388 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 389 LIST_INIT(&allpmaps); 390 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 391 mtx_lock_spin(&allpmaps_lock); 392 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 393 mtx_unlock_spin(&allpmaps_lock); 394 if (nkpt == 0) 395 nkpt = NKPT; 396 397 /* 398 * Reserve some special page table entries/VA space for temporary 399 * mapping of pages. 400 */ 401#define SYSMAP(c, p, v, n) \ 402 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 403 404 va = virtual_avail; 405 pte = vtopte(va); 406 407 /* 408 * CMAP1/CMAP2 are used for zeroing and copying pages. 409 * CMAP3 is used for the idle process page zeroing. 410 */ 411 for (i = 0; i < MAXCPU; i++) { 412 sysmaps = &sysmaps_pcpu[i]; 413 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 414 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 415 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 416 PT_SET_MA(sysmaps->CADDR1, 0); 417 PT_SET_MA(sysmaps->CADDR2, 0); 418 } 419 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 420 PT_SET_MA(CADDR3, 0); 421 422 /* 423 * Crashdump maps. 424 */ 425 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 426 427 /* 428 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 429 */ 430 SYSMAP(caddr_t, unused, ptvmmap, 1) 431 432 /* 433 * msgbufp is used to map the system message buffer. 434 */ 435 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 436 437 /* 438 * ptemap is used for pmap_pte_quick 439 */ 440 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 441 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 442 443 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 444 445 virtual_avail = va; 446 447 /* 448 * Leave in place an identity mapping (virt == phys) for the low 1 MB 449 * physical memory region that is used by the ACPI wakeup code. This 450 * mapping must not have PG_G set. 451 */ 452#ifndef XEN 453 /* 454 * leave here deliberately to show that this is not supported 455 */ 456#ifdef XBOX 457 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 458 * an early stadium, we cannot yet neatly map video memory ... :-( 459 * Better fixes are very welcome! */ 460 if (!arch_i386_is_xbox) 461#endif 462 for (i = 1; i < NKPT; i++) 463 PTD[i] = 0; 464 465 /* Initialize the PAT MSR if present. */ 466 pmap_init_pat(); 467 468 /* Turn on PG_G on kernel page(s) */ 469 pmap_set_pg(); 470#endif 471 472#ifdef HAMFISTED_LOCKING 473 mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF); 474#endif 475} 476 477/* 478 * Setup the PAT MSR. 479 */ 480void 481pmap_init_pat(void) 482{ 483 uint64_t pat_msr; 484 485 /* Bail if this CPU doesn't implement PAT. */ 486 if (!(cpu_feature & CPUID_PAT)) 487 return; 488 489 if (cpu_vendor_id != CPU_VENDOR_INTEL || 490 (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 491 /* 492 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 493 * Program 4 and 5 as WP and WC. 494 * Leave 6 and 7 as UC and UC-. 495 */ 496 pat_msr = rdmsr(MSR_PAT); 497 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 498 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 499 PAT_VALUE(5, PAT_WRITE_COMBINING); 500 pat_works = 1; 501 } else { 502 /* 503 * Due to some Intel errata, we can only safely use the lower 4 504 * PAT entries. Thus, just replace PAT Index 2 with WC instead 505 * of UC-. 506 * 507 * Intel Pentium III Processor Specification Update 508 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 509 * or Mode C Paging) 510 * 511 * Intel Pentium IV Processor Specification Update 512 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 513 */ 514 pat_msr = rdmsr(MSR_PAT); 515 pat_msr &= ~PAT_MASK(2); 516 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 517 pat_works = 0; 518 } 519 wrmsr(MSR_PAT, pat_msr); 520} 521 522/* 523 * Initialize a vm_page's machine-dependent fields. 524 */ 525void 526pmap_page_init(vm_page_t m) 527{ 528 529 TAILQ_INIT(&m->md.pv_list); 530 m->md.pat_mode = PAT_WRITE_BACK; 531} 532 533/* 534 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 535 * Requirements: 536 * - Must deal with pages in order to ensure that none of the PG_* bits 537 * are ever set, PG_V in particular. 538 * - Assumes we can write to ptes without pte_store() atomic ops, even 539 * on PAE systems. This should be ok. 540 * - Assumes nothing will ever test these addresses for 0 to indicate 541 * no mapping instead of correctly checking PG_V. 542 * - Assumes a vm_offset_t will fit in a pte (true for i386). 543 * Because PG_V is never set, there can be no mappings to invalidate. 544 */ 545static int ptelist_count = 0; 546static vm_offset_t 547pmap_ptelist_alloc(vm_offset_t *head) 548{ 549 vm_offset_t va; 550 vm_offset_t *phead = (vm_offset_t *)*head; 551 552 if (ptelist_count == 0) { 553 printf("out of memory!!!!!!\n"); 554 return (0); /* Out of memory */ 555 } 556 ptelist_count--; 557 va = phead[ptelist_count]; 558 return (va); 559} 560 561static void 562pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 563{ 564 vm_offset_t *phead = (vm_offset_t *)*head; 565 566 phead[ptelist_count++] = va; 567} 568 569static void 570pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 571{ 572 int i, nstackpages; 573 vm_offset_t va; 574 vm_page_t m; 575 576 nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 577 for (i = 0; i < nstackpages; i++) { 578 va = (vm_offset_t)base + i * PAGE_SIZE; 579 m = vm_page_alloc(NULL, i, 580 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 581 VM_ALLOC_ZERO); 582 pmap_qenter(va, &m, 1); 583 } 584 585 *head = (vm_offset_t)base; 586 for (i = npages - 1; i >= nstackpages; i--) { 587 va = (vm_offset_t)base + i * PAGE_SIZE; 588 pmap_ptelist_free(head, va); 589 } 590} 591 592 593/* 594 * Initialize the pmap module. 595 * Called by vm_init, to initialize any structures that the pmap 596 * system needs to map virtual memory. 597 */ 598void 599pmap_init(void) 600{ 601 602 /* 603 * Initialize the address space (zone) for the pv entries. Set a 604 * high water mark so that the system can recover from excessive 605 * numbers of pv entries. 606 */ 607 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 608 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 609 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 610 pv_entry_max = roundup(pv_entry_max, _NPCPV); 611 pv_entry_high_water = 9 * (pv_entry_max / 10); 612 613 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 614 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 615 PAGE_SIZE * pv_maxchunks); 616 if (pv_chunkbase == NULL) 617 panic("pmap_init: not enough kvm for pv chunks"); 618 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 619} 620 621 622SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 623 "Max number of PV entries"); 624SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 625 "Page share factor per proc"); 626 627static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 628 "2/4MB page mapping counters"); 629 630static u_long pmap_pde_mappings; 631SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 632 &pmap_pde_mappings, 0, "2/4MB page mappings"); 633 634/*************************************************** 635 * Low level helper routines..... 636 ***************************************************/ 637 638/* 639 * Determine the appropriate bits to set in a PTE or PDE for a specified 640 * caching mode. 641 */ 642int 643pmap_cache_bits(int mode, boolean_t is_pde) 644{ 645 int pat_flag, pat_index, cache_bits; 646 647 /* The PAT bit is different for PTE's and PDE's. */ 648 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 649 650 /* If we don't support PAT, map extended modes to older ones. */ 651 if (!(cpu_feature & CPUID_PAT)) { 652 switch (mode) { 653 case PAT_UNCACHEABLE: 654 case PAT_WRITE_THROUGH: 655 case PAT_WRITE_BACK: 656 break; 657 case PAT_UNCACHED: 658 case PAT_WRITE_COMBINING: 659 case PAT_WRITE_PROTECTED: 660 mode = PAT_UNCACHEABLE; 661 break; 662 } 663 } 664 665 /* Map the caching mode to a PAT index. */ 666 if (pat_works) { 667 switch (mode) { 668 case PAT_UNCACHEABLE: 669 pat_index = 3; 670 break; 671 case PAT_WRITE_THROUGH: 672 pat_index = 1; 673 break; 674 case PAT_WRITE_BACK: 675 pat_index = 0; 676 break; 677 case PAT_UNCACHED: 678 pat_index = 2; 679 break; 680 case PAT_WRITE_COMBINING: 681 pat_index = 5; 682 break; 683 case PAT_WRITE_PROTECTED: 684 pat_index = 4; 685 break; 686 default: 687 panic("Unknown caching mode %d\n", mode); 688 } 689 } else { 690 switch (mode) { 691 case PAT_UNCACHED: 692 case PAT_UNCACHEABLE: 693 case PAT_WRITE_PROTECTED: 694 pat_index = 3; 695 break; 696 case PAT_WRITE_THROUGH: 697 pat_index = 1; 698 break; 699 case PAT_WRITE_BACK: 700 pat_index = 0; 701 break; 702 case PAT_WRITE_COMBINING: 703 pat_index = 2; 704 break; 705 default: 706 panic("Unknown caching mode %d\n", mode); 707 } 708 } 709 710 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 711 cache_bits = 0; 712 if (pat_index & 0x4) 713 cache_bits |= pat_flag; 714 if (pat_index & 0x2) 715 cache_bits |= PG_NC_PCD; 716 if (pat_index & 0x1) 717 cache_bits |= PG_NC_PWT; 718 return (cache_bits); 719} 720#ifdef SMP 721/* 722 * For SMP, these functions have to use the IPI mechanism for coherence. 723 * 724 * N.B.: Before calling any of the following TLB invalidation functions, 725 * the calling processor must ensure that all stores updating a non- 726 * kernel page table are globally performed. Otherwise, another 727 * processor could cache an old, pre-update entry without being 728 * invalidated. This can happen one of two ways: (1) The pmap becomes 729 * active on another processor after its pm_active field is checked by 730 * one of the following functions but before a store updating the page 731 * table is globally performed. (2) The pmap becomes active on another 732 * processor before its pm_active field is checked but due to 733 * speculative loads one of the following functions stills reads the 734 * pmap as inactive on the other processor. 735 * 736 * The kernel page table is exempt because its pm_active field is 737 * immutable. The kernel page table is always active on every 738 * processor. 739 */ 740void 741pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 742{ 743 cpuset_t other_cpus; 744 u_int cpuid; 745 746 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 747 pmap, va); 748 749 sched_pin(); 750 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 751 invlpg(va); 752 smp_invlpg(va); 753 } else { 754 cpuid = PCPU_GET(cpuid); 755 other_cpus = all_cpus; 756 CPU_CLR(cpuid, &other_cpus); 757 if (CPU_ISSET(cpuid, &pmap->pm_active)) 758 invlpg(va); 759 CPU_AND(&other_cpus, &pmap->pm_active); 760 if (!CPU_EMPTY(&other_cpus)) 761 smp_masked_invlpg(other_cpus, va); 762 } 763 sched_unpin(); 764 PT_UPDATES_FLUSH(); 765} 766 767void 768pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 769{ 770 cpuset_t other_cpus; 771 vm_offset_t addr; 772 u_int cpuid; 773 774 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 775 pmap, sva, eva); 776 777 sched_pin(); 778 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 779 for (addr = sva; addr < eva; addr += PAGE_SIZE) 780 invlpg(addr); 781 smp_invlpg_range(sva, eva); 782 } else { 783 cpuid = PCPU_GET(cpuid); 784 other_cpus = all_cpus; 785 CPU_CLR(cpuid, &other_cpus); 786 if (CPU_ISSET(cpuid, &pmap->pm_active)) 787 for (addr = sva; addr < eva; addr += PAGE_SIZE) 788 invlpg(addr); 789 CPU_AND(&other_cpus, &pmap->pm_active); 790 if (!CPU_EMPTY(&other_cpus)) 791 smp_masked_invlpg_range(other_cpus, sva, eva); 792 } 793 sched_unpin(); 794 PT_UPDATES_FLUSH(); 795} 796 797void 798pmap_invalidate_all(pmap_t pmap) 799{ 800 cpuset_t other_cpus; 801 u_int cpuid; 802 803 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 804 805 sched_pin(); 806 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 807 invltlb(); 808 smp_invltlb(); 809 } else { 810 cpuid = PCPU_GET(cpuid); 811 other_cpus = all_cpus; 812 CPU_CLR(cpuid, &other_cpus); 813 if (CPU_ISSET(cpuid, &pmap->pm_active)) 814 invltlb(); 815 CPU_AND(&other_cpus, &pmap->pm_active); 816 if (!CPU_EMPTY(&other_cpus)) 817 smp_masked_invltlb(other_cpus); 818 } 819 sched_unpin(); 820} 821 822void 823pmap_invalidate_cache(void) 824{ 825 826 sched_pin(); 827 wbinvd(); 828 smp_cache_flush(); 829 sched_unpin(); 830} 831#else /* !SMP */ 832/* 833 * Normal, non-SMP, 486+ invalidation functions. 834 * We inline these within pmap.c for speed. 835 */ 836PMAP_INLINE void 837pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 838{ 839 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 840 pmap, va); 841 842 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 843 invlpg(va); 844 PT_UPDATES_FLUSH(); 845} 846 847PMAP_INLINE void 848pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 849{ 850 vm_offset_t addr; 851 852 if (eva - sva > PAGE_SIZE) 853 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 854 pmap, sva, eva); 855 856 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 857 for (addr = sva; addr < eva; addr += PAGE_SIZE) 858 invlpg(addr); 859 PT_UPDATES_FLUSH(); 860} 861 862PMAP_INLINE void 863pmap_invalidate_all(pmap_t pmap) 864{ 865 866 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 867 868 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 869 invltlb(); 870} 871 872PMAP_INLINE void 873pmap_invalidate_cache(void) 874{ 875 876 wbinvd(); 877} 878#endif /* !SMP */ 879 880#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 881 882void 883pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 884{ 885 886 KASSERT((sva & PAGE_MASK) == 0, 887 ("pmap_invalidate_cache_range: sva not page-aligned")); 888 KASSERT((eva & PAGE_MASK) == 0, 889 ("pmap_invalidate_cache_range: eva not page-aligned")); 890 891 if (cpu_feature & CPUID_SS) 892 ; /* If "Self Snoop" is supported, do nothing. */ 893 else if ((cpu_feature & CPUID_CLFSH) != 0 && 894 eva - sva < PMAP_CLFLUSH_THRESHOLD) { 895 896 /* 897 * Otherwise, do per-cache line flush. Use the mfence 898 * instruction to insure that previous stores are 899 * included in the write-back. The processor 900 * propagates flush to other processors in the cache 901 * coherence domain. 902 */ 903 mfence(); 904 for (; sva < eva; sva += cpu_clflush_line_size) 905 clflush(sva); 906 mfence(); 907 } else { 908 909 /* 910 * No targeted cache flush methods are supported by CPU, 911 * or the supplied range is bigger than 2MB. 912 * Globally invalidate cache. 913 */ 914 pmap_invalidate_cache(); 915 } 916} 917 918void 919pmap_invalidate_cache_pages(vm_page_t *pages, int count) 920{ 921 int i; 922 923 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 924 (cpu_feature & CPUID_CLFSH) == 0) { 925 pmap_invalidate_cache(); 926 } else { 927 for (i = 0; i < count; i++) 928 pmap_flush_page(pages[i]); 929 } 930} 931 932/* 933 * Are we current address space or kernel? N.B. We return FALSE when 934 * a pmap's page table is in use because a kernel thread is borrowing 935 * it. The borrowed page table can change spontaneously, making any 936 * dependence on its continued use subject to a race condition. 937 */ 938static __inline int 939pmap_is_current(pmap_t pmap) 940{ 941 942 return (pmap == kernel_pmap || 943 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 944 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 945} 946 947/* 948 * If the given pmap is not the current or kernel pmap, the returned pte must 949 * be released by passing it to pmap_pte_release(). 950 */ 951pt_entry_t * 952pmap_pte(pmap_t pmap, vm_offset_t va) 953{ 954 pd_entry_t newpf; 955 pd_entry_t *pde; 956 957 pde = pmap_pde(pmap, va); 958 if (*pde & PG_PS) 959 return (pde); 960 if (*pde != 0) { 961 /* are we current address space or kernel? */ 962 if (pmap_is_current(pmap)) 963 return (vtopte(va)); 964 mtx_lock(&PMAP2mutex); 965 newpf = *pde & PG_FRAME; 966 if ((*PMAP2 & PG_FRAME) != newpf) { 967 vm_page_lock_queues(); 968 PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 969 vm_page_unlock_queues(); 970 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 971 pmap, va, (*PMAP2 & 0xffffffff)); 972 } 973 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 974 } 975 return (NULL); 976} 977 978/* 979 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 980 * being NULL. 981 */ 982static __inline void 983pmap_pte_release(pt_entry_t *pte) 984{ 985 986 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 987 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 988 *PMAP2); 989 vm_page_lock_queues(); 990 PT_SET_VA(PMAP2, 0, TRUE); 991 vm_page_unlock_queues(); 992 mtx_unlock(&PMAP2mutex); 993 } 994} 995 996static __inline void 997invlcaddr(void *caddr) 998{ 999 1000 invlpg((u_int)caddr); 1001 PT_UPDATES_FLUSH(); 1002} 1003 1004/* 1005 * Super fast pmap_pte routine best used when scanning 1006 * the pv lists. This eliminates many coarse-grained 1007 * invltlb calls. Note that many of the pv list 1008 * scans are across different pmaps. It is very wasteful 1009 * to do an entire invltlb for checking a single mapping. 1010 * 1011 * If the given pmap is not the current pmap, vm_page_queue_mtx 1012 * must be held and curthread pinned to a CPU. 1013 */ 1014static pt_entry_t * 1015pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1016{ 1017 pd_entry_t newpf; 1018 pd_entry_t *pde; 1019 1020 pde = pmap_pde(pmap, va); 1021 if (*pde & PG_PS) 1022 return (pde); 1023 if (*pde != 0) { 1024 /* are we current address space or kernel? */ 1025 if (pmap_is_current(pmap)) 1026 return (vtopte(va)); 1027 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1028 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1029 newpf = *pde & PG_FRAME; 1030 if ((*PMAP1 & PG_FRAME) != newpf) { 1031 PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1032 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1033 pmap, va, (u_long)*PMAP1); 1034 1035#ifdef SMP 1036 PMAP1cpu = PCPU_GET(cpuid); 1037#endif 1038 PMAP1changed++; 1039 } else 1040#ifdef SMP 1041 if (PMAP1cpu != PCPU_GET(cpuid)) { 1042 PMAP1cpu = PCPU_GET(cpuid); 1043 invlcaddr(PADDR1); 1044 PMAP1changedcpu++; 1045 } else 1046#endif 1047 PMAP1unchanged++; 1048 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1049 } 1050 return (0); 1051} 1052 1053/* 1054 * Routine: pmap_extract 1055 * Function: 1056 * Extract the physical page address associated 1057 * with the given map/virtual_address pair. 1058 */ 1059vm_paddr_t 1060pmap_extract(pmap_t pmap, vm_offset_t va) 1061{ 1062 vm_paddr_t rtval; 1063 pt_entry_t *pte; 1064 pd_entry_t pde; 1065 pt_entry_t pteval; 1066 1067 rtval = 0; 1068 PMAP_LOCK(pmap); 1069 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1070 if (pde != 0) { 1071 if ((pde & PG_PS) != 0) { 1072 rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1073 PMAP_UNLOCK(pmap); 1074 return rtval; 1075 } 1076 pte = pmap_pte(pmap, va); 1077 pteval = *pte ? xpmap_mtop(*pte) : 0; 1078 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1079 pmap_pte_release(pte); 1080 } 1081 PMAP_UNLOCK(pmap); 1082 return (rtval); 1083} 1084 1085/* 1086 * Routine: pmap_extract_ma 1087 * Function: 1088 * Like pmap_extract, but returns machine address 1089 */ 1090vm_paddr_t 1091pmap_extract_ma(pmap_t pmap, vm_offset_t va) 1092{ 1093 vm_paddr_t rtval; 1094 pt_entry_t *pte; 1095 pd_entry_t pde; 1096 1097 rtval = 0; 1098 PMAP_LOCK(pmap); 1099 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1100 if (pde != 0) { 1101 if ((pde & PG_PS) != 0) { 1102 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1103 PMAP_UNLOCK(pmap); 1104 return rtval; 1105 } 1106 pte = pmap_pte(pmap, va); 1107 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1108 pmap_pte_release(pte); 1109 } 1110 PMAP_UNLOCK(pmap); 1111 return (rtval); 1112} 1113 1114/* 1115 * Routine: pmap_extract_and_hold 1116 * Function: 1117 * Atomically extract and hold the physical page 1118 * with the given pmap and virtual address pair 1119 * if that mapping permits the given protection. 1120 */ 1121vm_page_t 1122pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1123{ 1124 pd_entry_t pde; 1125 pt_entry_t pte; 1126 vm_page_t m; 1127 vm_paddr_t pa; 1128 1129 pa = 0; 1130 m = NULL; 1131 PMAP_LOCK(pmap); 1132retry: 1133 pde = PT_GET(pmap_pde(pmap, va)); 1134 if (pde != 0) { 1135 if (pde & PG_PS) { 1136 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1137 if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | 1138 (va & PDRMASK), &pa)) 1139 goto retry; 1140 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1141 (va & PDRMASK)); 1142 vm_page_hold(m); 1143 } 1144 } else { 1145 sched_pin(); 1146 pte = PT_GET(pmap_pte_quick(pmap, va)); 1147 if (*PMAP1) 1148 PT_SET_MA(PADDR1, 0); 1149 if ((pte & PG_V) && 1150 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1151 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) 1152 goto retry; 1153 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1154 vm_page_hold(m); 1155 } 1156 sched_unpin(); 1157 } 1158 } 1159 PA_UNLOCK_COND(pa); 1160 PMAP_UNLOCK(pmap); 1161 return (m); 1162} 1163 1164/*************************************************** 1165 * Low level mapping routines..... 1166 ***************************************************/ 1167 1168/* 1169 * Add a wired page to the kva. 1170 * Note: not SMP coherent. 1171 * 1172 * This function may be used before pmap_bootstrap() is called. 1173 */ 1174void 1175pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1176{ 1177 1178 PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1179} 1180 1181void 1182pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1183{ 1184 pt_entry_t *pte; 1185 1186 pte = vtopte(va); 1187 pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1188} 1189 1190static __inline void 1191pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1192{ 1193 1194 PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1195} 1196 1197/* 1198 * Remove a page from the kernel pagetables. 1199 * Note: not SMP coherent. 1200 * 1201 * This function may be used before pmap_bootstrap() is called. 1202 */ 1203PMAP_INLINE void 1204pmap_kremove(vm_offset_t va) 1205{ 1206 pt_entry_t *pte; 1207 1208 pte = vtopte(va); 1209 PT_CLEAR_VA(pte, FALSE); 1210} 1211 1212/* 1213 * Used to map a range of physical addresses into kernel 1214 * virtual address space. 1215 * 1216 * The value passed in '*virt' is a suggested virtual address for 1217 * the mapping. Architectures which can support a direct-mapped 1218 * physical to virtual region can return the appropriate address 1219 * within that region, leaving '*virt' unchanged. Other 1220 * architectures should map the pages starting at '*virt' and 1221 * update '*virt' with the first usable address after the mapped 1222 * region. 1223 */ 1224vm_offset_t 1225pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1226{ 1227 vm_offset_t va, sva; 1228 1229 va = sva = *virt; 1230 CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1231 va, start, end, prot); 1232 while (start < end) { 1233 pmap_kenter(va, start); 1234 va += PAGE_SIZE; 1235 start += PAGE_SIZE; 1236 } 1237 pmap_invalidate_range(kernel_pmap, sva, va); 1238 *virt = va; 1239 return (sva); 1240} 1241 1242 1243/* 1244 * Add a list of wired pages to the kva 1245 * this routine is only used for temporary 1246 * kernel mappings that do not need to have 1247 * page modification or references recorded. 1248 * Note that old mappings are simply written 1249 * over. The page *must* be wired. 1250 * Note: SMP coherent. Uses a ranged shootdown IPI. 1251 */ 1252void 1253pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1254{ 1255 pt_entry_t *endpte, *pte; 1256 vm_paddr_t pa; 1257 vm_offset_t va = sva; 1258 int mclcount = 0; 1259 multicall_entry_t mcl[16]; 1260 multicall_entry_t *mclp = mcl; 1261 int error; 1262 1263 CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1264 pte = vtopte(sva); 1265 endpte = pte + count; 1266 while (pte < endpte) { 1267 pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1268 1269 mclp->op = __HYPERVISOR_update_va_mapping; 1270 mclp->args[0] = va; 1271 mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1272 mclp->args[2] = (uint32_t)(pa >> 32); 1273 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1274 1275 va += PAGE_SIZE; 1276 pte++; 1277 ma++; 1278 mclp++; 1279 mclcount++; 1280 if (mclcount == 16) { 1281 error = HYPERVISOR_multicall(mcl, mclcount); 1282 mclp = mcl; 1283 mclcount = 0; 1284 KASSERT(error == 0, ("bad multicall %d", error)); 1285 } 1286 } 1287 if (mclcount) { 1288 error = HYPERVISOR_multicall(mcl, mclcount); 1289 KASSERT(error == 0, ("bad multicall %d", error)); 1290 } 1291 1292#ifdef INVARIANTS 1293 for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1294 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1295#endif 1296} 1297 1298/* 1299 * This routine tears out page mappings from the 1300 * kernel -- it is meant only for temporary mappings. 1301 * Note: SMP coherent. Uses a ranged shootdown IPI. 1302 */ 1303void 1304pmap_qremove(vm_offset_t sva, int count) 1305{ 1306 vm_offset_t va; 1307 1308 CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1309 va = sva; 1310 vm_page_lock_queues(); 1311 critical_enter(); 1312 while (count-- > 0) { 1313 pmap_kremove(va); 1314 va += PAGE_SIZE; 1315 } 1316 PT_UPDATES_FLUSH(); 1317 pmap_invalidate_range(kernel_pmap, sva, va); 1318 critical_exit(); 1319 vm_page_unlock_queues(); 1320} 1321 1322/*************************************************** 1323 * Page table page management routines..... 1324 ***************************************************/ 1325static __inline void 1326pmap_free_zero_pages(vm_page_t free) 1327{ 1328 vm_page_t m; 1329 1330 while (free != NULL) { 1331 m = free; 1332 free = m->right; 1333 vm_page_free_zero(m); 1334 } 1335} 1336 1337/* 1338 * This routine unholds page table pages, and if the hold count 1339 * drops to zero, then it decrements the wire count. 1340 */ 1341static __inline int 1342pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1343{ 1344 1345 --m->wire_count; 1346 if (m->wire_count == 0) 1347 return (_pmap_unwire_pte_hold(pmap, m, free)); 1348 else 1349 return (0); 1350} 1351 1352static int 1353_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1354{ 1355 vm_offset_t pteva; 1356 1357 PT_UPDATES_FLUSH(); 1358 /* 1359 * unmap the page table page 1360 */ 1361 xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1362 /* 1363 * page *might* contain residual mapping :-/ 1364 */ 1365 PD_CLEAR_VA(pmap, m->pindex, TRUE); 1366 pmap_zero_page(m); 1367 --pmap->pm_stats.resident_count; 1368 1369 /* 1370 * This is a release store so that the ordinary store unmapping 1371 * the page table page is globally performed before TLB shoot- 1372 * down is begun. 1373 */ 1374 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1375 1376 /* 1377 * Do an invltlb to make the invalidated mapping 1378 * take effect immediately. 1379 */ 1380 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1381 pmap_invalidate_page(pmap, pteva); 1382 1383 /* 1384 * Put page on a list so that it is released after 1385 * *ALL* TLB shootdown is done 1386 */ 1387 m->right = *free; 1388 *free = m; 1389 1390 return (1); 1391} 1392 1393/* 1394 * After removing a page table entry, this routine is used to 1395 * conditionally free the page, and manage the hold/wire counts. 1396 */ 1397static int 1398pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1399{ 1400 pd_entry_t ptepde; 1401 vm_page_t mpte; 1402 1403 if (va >= VM_MAXUSER_ADDRESS) 1404 return (0); 1405 ptepde = PT_GET(pmap_pde(pmap, va)); 1406 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1407 return (pmap_unwire_pte_hold(pmap, mpte, free)); 1408} 1409 1410/* 1411 * Initialize the pmap for the swapper process. 1412 */ 1413void 1414pmap_pinit0(pmap_t pmap) 1415{ 1416 1417 PMAP_LOCK_INIT(pmap); 1418 /* 1419 * Since the page table directory is shared with the kernel pmap, 1420 * which is already included in the list "allpmaps", this pmap does 1421 * not need to be inserted into that list. 1422 */ 1423 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1424#ifdef PAE 1425 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1426#endif 1427 CPU_ZERO(&pmap->pm_active); 1428 PCPU_SET(curpmap, pmap); 1429 TAILQ_INIT(&pmap->pm_pvchunk); 1430 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1431} 1432 1433/* 1434 * Initialize a preallocated and zeroed pmap structure, 1435 * such as one in a vmspace structure. 1436 */ 1437int 1438pmap_pinit(pmap_t pmap) 1439{ 1440 vm_page_t m, ptdpg[NPGPTD + 1]; 1441 int npgptd = NPGPTD + 1; 1442 int i; 1443 1444#ifdef HAMFISTED_LOCKING 1445 mtx_lock(&createdelete_lock); 1446#endif 1447 1448 PMAP_LOCK_INIT(pmap); 1449 1450 /* 1451 * No need to allocate page table space yet but we do need a valid 1452 * page directory table. 1453 */ 1454 if (pmap->pm_pdir == NULL) { 1455 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1456 NBPTD); 1457 if (pmap->pm_pdir == NULL) { 1458 PMAP_LOCK_DESTROY(pmap); 1459#ifdef HAMFISTED_LOCKING 1460 mtx_unlock(&createdelete_lock); 1461#endif 1462 return (0); 1463 } 1464#ifdef PAE 1465 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1466#endif 1467 } 1468 1469 /* 1470 * allocate the page directory page(s) 1471 */ 1472 for (i = 0; i < npgptd;) { 1473 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1474 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 1475 if (m == NULL) 1476 VM_WAIT; 1477 else { 1478 ptdpg[i++] = m; 1479 } 1480 } 1481 1482 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1483 1484 for (i = 0; i < NPGPTD; i++) 1485 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1486 pagezero(pmap->pm_pdir + (i * NPDEPG)); 1487 1488 mtx_lock_spin(&allpmaps_lock); 1489 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1490 /* Copy the kernel page table directory entries. */ 1491 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1492 mtx_unlock_spin(&allpmaps_lock); 1493 1494#ifdef PAE 1495 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1496 if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1497 bzero(pmap->pm_pdpt, PAGE_SIZE); 1498 for (i = 0; i < NPGPTD; i++) { 1499 vm_paddr_t ma; 1500 1501 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1502 pmap->pm_pdpt[i] = ma | PG_V; 1503 1504 } 1505#endif 1506 for (i = 0; i < NPGPTD; i++) { 1507 pt_entry_t *pd; 1508 vm_paddr_t ma; 1509 1510 ma = VM_PAGE_TO_MACH(ptdpg[i]); 1511 pd = pmap->pm_pdir + (i * NPDEPG); 1512 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1513#if 0 1514 xen_pgd_pin(ma); 1515#endif 1516 } 1517 1518#ifdef PAE 1519 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1520#endif 1521 vm_page_lock_queues(); 1522 xen_flush_queue(); 1523 xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD])); 1524 for (i = 0; i < NPGPTD; i++) { 1525 vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]); 1526 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1527 } 1528 xen_flush_queue(); 1529 vm_page_unlock_queues(); 1530 CPU_ZERO(&pmap->pm_active); 1531 TAILQ_INIT(&pmap->pm_pvchunk); 1532 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1533 1534#ifdef HAMFISTED_LOCKING 1535 mtx_unlock(&createdelete_lock); 1536#endif 1537 return (1); 1538} 1539 1540/* 1541 * this routine is called if the page table page is not 1542 * mapped correctly. 1543 */ 1544static vm_page_t 1545_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) 1546{ 1547 vm_paddr_t ptema; 1548 vm_page_t m; 1549 1550 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1551 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1552 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1553 1554 /* 1555 * Allocate a page table page. 1556 */ 1557 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1558 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1559 if (flags & M_WAITOK) { 1560 PMAP_UNLOCK(pmap); 1561 vm_page_unlock_queues(); 1562 VM_WAIT; 1563 vm_page_lock_queues(); 1564 PMAP_LOCK(pmap); 1565 } 1566 1567 /* 1568 * Indicate the need to retry. While waiting, the page table 1569 * page may have been allocated. 1570 */ 1571 return (NULL); 1572 } 1573 if ((m->flags & PG_ZERO) == 0) 1574 pmap_zero_page(m); 1575 1576 /* 1577 * Map the pagetable page into the process address space, if 1578 * it isn't already there. 1579 */ 1580 1581 pmap->pm_stats.resident_count++; 1582 1583 ptema = VM_PAGE_TO_MACH(m); 1584 xen_pt_pin(ptema); 1585 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1586 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1587 1588 KASSERT(pmap->pm_pdir[ptepindex], 1589 ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1590 return (m); 1591} 1592 1593static vm_page_t 1594pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1595{ 1596 u_int ptepindex; 1597 pd_entry_t ptema; 1598 vm_page_t m; 1599 1600 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1601 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1602 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1603 1604 /* 1605 * Calculate pagetable page index 1606 */ 1607 ptepindex = va >> PDRSHIFT; 1608retry: 1609 /* 1610 * Get the page directory entry 1611 */ 1612 ptema = pmap->pm_pdir[ptepindex]; 1613 1614 /* 1615 * This supports switching from a 4MB page to a 1616 * normal 4K page. 1617 */ 1618 if (ptema & PG_PS) { 1619 /* 1620 * XXX 1621 */ 1622 pmap->pm_pdir[ptepindex] = 0; 1623 ptema = 0; 1624 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1625 pmap_invalidate_all(kernel_pmap); 1626 } 1627 1628 /* 1629 * If the page table page is mapped, we just increment the 1630 * hold count, and activate it. 1631 */ 1632 if (ptema & PG_V) { 1633 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1634 m->wire_count++; 1635 } else { 1636 /* 1637 * Here if the pte page isn't mapped, or if it has 1638 * been deallocated. 1639 */ 1640 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1641 pmap, va, flags); 1642 m = _pmap_allocpte(pmap, ptepindex, flags); 1643 if (m == NULL && (flags & M_WAITOK)) 1644 goto retry; 1645 1646 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1647 } 1648 return (m); 1649} 1650 1651 1652/*************************************************** 1653* Pmap allocation/deallocation routines. 1654 ***************************************************/ 1655 1656#ifdef SMP 1657/* 1658 * Deal with a SMP shootdown of other users of the pmap that we are 1659 * trying to dispose of. This can be a bit hairy. 1660 */ 1661static cpuset_t *lazymask; 1662static u_int lazyptd; 1663static volatile u_int lazywait; 1664 1665void pmap_lazyfix_action(void); 1666 1667void 1668pmap_lazyfix_action(void) 1669{ 1670 1671#ifdef COUNT_IPIS 1672 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1673#endif 1674 if (rcr3() == lazyptd) 1675 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1676 CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); 1677 atomic_store_rel_int(&lazywait, 1); 1678} 1679 1680static void 1681pmap_lazyfix_self(u_int cpuid) 1682{ 1683 1684 if (rcr3() == lazyptd) 1685 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1686 CPU_CLR_ATOMIC(cpuid, lazymask); 1687} 1688 1689 1690static void 1691pmap_lazyfix(pmap_t pmap) 1692{ 1693 cpuset_t mymask, mask; 1694 u_int cpuid, spins; 1695 int lsb; 1696 1697 mask = pmap->pm_active; 1698 while (!CPU_EMPTY(&mask)) { 1699 spins = 50000000; 1700 1701 /* Find least significant set bit. */ 1702 lsb = cpusetobj_ffs(&mask); 1703 MPASS(lsb != 0); 1704 lsb--; 1705 CPU_SETOF(lsb, &mask); 1706 mtx_lock_spin(&smp_ipi_mtx); 1707#ifdef PAE 1708 lazyptd = vtophys(pmap->pm_pdpt); 1709#else 1710 lazyptd = vtophys(pmap->pm_pdir); 1711#endif 1712 cpuid = PCPU_GET(cpuid); 1713 1714 /* Use a cpuset just for having an easy check. */ 1715 CPU_SETOF(cpuid, &mymask); 1716 if (!CPU_CMP(&mask, &mymask)) { 1717 lazymask = &pmap->pm_active; 1718 pmap_lazyfix_self(cpuid); 1719 } else { 1720 atomic_store_rel_int((u_int *)&lazymask, 1721 (u_int)&pmap->pm_active); 1722 atomic_store_rel_int(&lazywait, 0); 1723 ipi_selected(mask, IPI_LAZYPMAP); 1724 while (lazywait == 0) { 1725 ia32_pause(); 1726 if (--spins == 0) 1727 break; 1728 } 1729 } 1730 mtx_unlock_spin(&smp_ipi_mtx); 1731 if (spins == 0) 1732 printf("pmap_lazyfix: spun for 50000000\n"); 1733 mask = pmap->pm_active; 1734 } 1735} 1736 1737#else /* SMP */ 1738 1739/* 1740 * Cleaning up on uniprocessor is easy. For various reasons, we're 1741 * unlikely to have to even execute this code, including the fact 1742 * that the cleanup is deferred until the parent does a wait(2), which 1743 * means that another userland process has run. 1744 */ 1745static void 1746pmap_lazyfix(pmap_t pmap) 1747{ 1748 u_int cr3; 1749 1750 cr3 = vtophys(pmap->pm_pdir); 1751 if (cr3 == rcr3()) { 1752 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1753 CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); 1754 } 1755} 1756#endif /* SMP */ 1757 1758/* 1759 * Release any resources held by the given physical map. 1760 * Called when a pmap initialized by pmap_pinit is being released. 1761 * Should only be called if the map contains no valid mappings. 1762 */ 1763void 1764pmap_release(pmap_t pmap) 1765{ 1766 vm_page_t m, ptdpg[2*NPGPTD+1]; 1767 vm_paddr_t ma; 1768 int i; 1769#ifdef PAE 1770 int npgptd = NPGPTD + 1; 1771#else 1772 int npgptd = NPGPTD; 1773#endif 1774 1775 KASSERT(pmap->pm_stats.resident_count == 0, 1776 ("pmap_release: pmap resident count %ld != 0", 1777 pmap->pm_stats.resident_count)); 1778 PT_UPDATES_FLUSH(); 1779 1780#ifdef HAMFISTED_LOCKING 1781 mtx_lock(&createdelete_lock); 1782#endif 1783 1784 pmap_lazyfix(pmap); 1785 mtx_lock_spin(&allpmaps_lock); 1786 LIST_REMOVE(pmap, pm_list); 1787 mtx_unlock_spin(&allpmaps_lock); 1788 1789 for (i = 0; i < NPGPTD; i++) 1790 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1791 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1792#ifdef PAE 1793 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1794#endif 1795 1796 for (i = 0; i < npgptd; i++) { 1797 m = ptdpg[i]; 1798 ma = VM_PAGE_TO_MACH(m); 1799 /* unpinning L1 and L2 treated the same */ 1800#if 0 1801 xen_pgd_unpin(ma); 1802#else 1803 if (i == NPGPTD) 1804 xen_pgd_unpin(ma); 1805#endif 1806#ifdef PAE 1807 if (i < NPGPTD) 1808 KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1809 ("pmap_release: got wrong ptd page")); 1810#endif 1811 m->wire_count--; 1812 atomic_subtract_int(&cnt.v_wire_count, 1); 1813 vm_page_free(m); 1814 } 1815#ifdef PAE 1816 pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1817#endif 1818 PMAP_LOCK_DESTROY(pmap); 1819 1820#ifdef HAMFISTED_LOCKING 1821 mtx_unlock(&createdelete_lock); 1822#endif 1823} 1824 1825static int 1826kvm_size(SYSCTL_HANDLER_ARGS) 1827{ 1828 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1829 1830 return (sysctl_handle_long(oidp, &ksize, 0, req)); 1831} 1832SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1833 0, 0, kvm_size, "IU", "Size of KVM"); 1834 1835static int 1836kvm_free(SYSCTL_HANDLER_ARGS) 1837{ 1838 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1839 1840 return (sysctl_handle_long(oidp, &kfree, 0, req)); 1841} 1842SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1843 0, 0, kvm_free, "IU", "Amount of KVM free"); 1844 1845/* 1846 * grow the number of kernel page table entries, if needed 1847 */ 1848void 1849pmap_growkernel(vm_offset_t addr) 1850{ 1851 struct pmap *pmap; 1852 vm_paddr_t ptppaddr; 1853 vm_page_t nkpg; 1854 pd_entry_t newpdir; 1855 1856 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1857 if (kernel_vm_end == 0) { 1858 kernel_vm_end = KERNBASE; 1859 nkpt = 0; 1860 while (pdir_pde(PTD, kernel_vm_end)) { 1861 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1862 nkpt++; 1863 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1864 kernel_vm_end = kernel_map->max_offset; 1865 break; 1866 } 1867 } 1868 } 1869 addr = roundup2(addr, NBPDR); 1870 if (addr - 1 >= kernel_map->max_offset) 1871 addr = kernel_map->max_offset; 1872 while (kernel_vm_end < addr) { 1873 if (pdir_pde(PTD, kernel_vm_end)) { 1874 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1875 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1876 kernel_vm_end = kernel_map->max_offset; 1877 break; 1878 } 1879 continue; 1880 } 1881 1882 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 1883 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1884 VM_ALLOC_ZERO); 1885 if (nkpg == NULL) 1886 panic("pmap_growkernel: no memory to grow kernel"); 1887 1888 nkpt++; 1889 1890 if ((nkpg->flags & PG_ZERO) == 0) 1891 pmap_zero_page(nkpg); 1892 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1893 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1894 vm_page_lock_queues(); 1895 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1896 mtx_lock_spin(&allpmaps_lock); 1897 LIST_FOREACH(pmap, &allpmaps, pm_list) 1898 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1899 1900 mtx_unlock_spin(&allpmaps_lock); 1901 vm_page_unlock_queues(); 1902 1903 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1904 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1905 kernel_vm_end = kernel_map->max_offset; 1906 break; 1907 } 1908 } 1909} 1910 1911 1912/*************************************************** 1913 * page management routines. 1914 ***************************************************/ 1915 1916CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1917CTASSERT(_NPCM == 11); 1918 1919static __inline struct pv_chunk * 1920pv_to_chunk(pv_entry_t pv) 1921{ 1922 1923 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1924} 1925 1926#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1927 1928#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1929#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1930 1931static uint32_t pc_freemask[11] = { 1932 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1933 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1934 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1935 PC_FREE0_9, PC_FREE10 1936}; 1937 1938SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1939 "Current number of pv entries"); 1940 1941#ifdef PV_STATS 1942static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1943 1944SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1945 "Current number of pv entry chunks"); 1946SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1947 "Current number of pv entry chunks allocated"); 1948SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1949 "Current number of pv entry chunks frees"); 1950SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1951 "Number of times tried to get a chunk page but failed."); 1952 1953static long pv_entry_frees, pv_entry_allocs; 1954static int pv_entry_spare; 1955 1956SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1957 "Current number of pv entry frees"); 1958SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1959 "Current number of pv entry allocs"); 1960SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1961 "Current number of spare pv entries"); 1962 1963static int pmap_collect_inactive, pmap_collect_active; 1964 1965SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 1966 "Current number times pmap_collect called on inactive queue"); 1967SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 1968 "Current number times pmap_collect called on active queue"); 1969#endif 1970 1971/* 1972 * We are in a serious low memory condition. Resort to 1973 * drastic measures to free some pages so we can allocate 1974 * another pv entry chunk. This is normally called to 1975 * unmap inactive pages, and if necessary, active pages. 1976 */ 1977static void 1978pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 1979{ 1980 pmap_t pmap; 1981 pt_entry_t *pte, tpte; 1982 pv_entry_t next_pv, pv; 1983 vm_offset_t va; 1984 vm_page_t m, free; 1985 1986 sched_pin(); 1987 TAILQ_FOREACH(m, &vpq->pl, pageq) { 1988 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy) 1989 continue; 1990 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 1991 va = pv->pv_va; 1992 pmap = PV_PMAP(pv); 1993 /* Avoid deadlock and lock recursion. */ 1994 if (pmap > locked_pmap) 1995 PMAP_LOCK(pmap); 1996 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 1997 continue; 1998 pmap->pm_stats.resident_count--; 1999 pte = pmap_pte_quick(pmap, va); 2000 tpte = pte_load_clear(pte); 2001 KASSERT((tpte & PG_W) == 0, 2002 ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 2003 if (tpte & PG_A) 2004 vm_page_aflag_set(m, PGA_REFERENCED); 2005 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2006 vm_page_dirty(m); 2007 free = NULL; 2008 pmap_unuse_pt(pmap, va, &free); 2009 pmap_invalidate_page(pmap, va); 2010 pmap_free_zero_pages(free); 2011 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2012 free_pv_entry(pmap, pv); 2013 if (pmap != locked_pmap) 2014 PMAP_UNLOCK(pmap); 2015 } 2016 if (TAILQ_EMPTY(&m->md.pv_list)) 2017 vm_page_aflag_clear(m, PGA_WRITEABLE); 2018 } 2019 sched_unpin(); 2020} 2021 2022 2023/* 2024 * free the pv_entry back to the free list 2025 */ 2026static void 2027free_pv_entry(pmap_t pmap, pv_entry_t pv) 2028{ 2029 vm_page_t m; 2030 struct pv_chunk *pc; 2031 int idx, field, bit; 2032 2033 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2034 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2035 PV_STAT(pv_entry_frees++); 2036 PV_STAT(pv_entry_spare++); 2037 pv_entry_count--; 2038 pc = pv_to_chunk(pv); 2039 idx = pv - &pc->pc_pventry[0]; 2040 field = idx / 32; 2041 bit = idx % 32; 2042 pc->pc_map[field] |= 1ul << bit; 2043 /* move to head of list */ 2044 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2045 for (idx = 0; idx < _NPCM; idx++) 2046 if (pc->pc_map[idx] != pc_freemask[idx]) { 2047 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2048 return; 2049 } 2050 PV_STAT(pv_entry_spare -= _NPCPV); 2051 PV_STAT(pc_chunk_count--); 2052 PV_STAT(pc_chunk_frees++); 2053 /* entire chunk is free, return it */ 2054 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2055 pmap_qremove((vm_offset_t)pc, 1); 2056 vm_page_unwire(m, 0); 2057 vm_page_free(m); 2058 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2059} 2060 2061/* 2062 * get a new pv_entry, allocating a block from the system 2063 * when needed. 2064 */ 2065static pv_entry_t 2066get_pv_entry(pmap_t pmap, int try) 2067{ 2068 static const struct timeval printinterval = { 60, 0 }; 2069 static struct timeval lastprint; 2070 struct vpgqueues *pq; 2071 int bit, field; 2072 pv_entry_t pv; 2073 struct pv_chunk *pc; 2074 vm_page_t m; 2075 2076 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2077 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2078 PV_STAT(pv_entry_allocs++); 2079 pv_entry_count++; 2080 if (pv_entry_count > pv_entry_high_water) 2081 if (ratecheck(&lastprint, &printinterval)) 2082 printf("Approaching the limit on PV entries, consider " 2083 "increasing either the vm.pmap.shpgperproc or the " 2084 "vm.pmap.pv_entry_max tunable.\n"); 2085 pq = NULL; 2086retry: 2087 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2088 if (pc != NULL) { 2089 for (field = 0; field < _NPCM; field++) { 2090 if (pc->pc_map[field]) { 2091 bit = bsfl(pc->pc_map[field]); 2092 break; 2093 } 2094 } 2095 if (field < _NPCM) { 2096 pv = &pc->pc_pventry[field * 32 + bit]; 2097 pc->pc_map[field] &= ~(1ul << bit); 2098 /* If this was the last item, move it to tail */ 2099 for (field = 0; field < _NPCM; field++) 2100 if (pc->pc_map[field] != 0) { 2101 PV_STAT(pv_entry_spare--); 2102 return (pv); /* not full, return */ 2103 } 2104 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2105 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2106 PV_STAT(pv_entry_spare--); 2107 return (pv); 2108 } 2109 } 2110 /* 2111 * Access to the ptelist "pv_vafree" is synchronized by the page 2112 * queues lock. If "pv_vafree" is currently non-empty, it will 2113 * remain non-empty until pmap_ptelist_alloc() completes. 2114 */ 2115 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, (pq == 2116 &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | 2117 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2118 if (try) { 2119 pv_entry_count--; 2120 PV_STAT(pc_chunk_tryfail++); 2121 return (NULL); 2122 } 2123 /* 2124 * Reclaim pv entries: At first, destroy mappings to 2125 * inactive pages. After that, if a pv chunk entry 2126 * is still needed, destroy mappings to active pages. 2127 */ 2128 if (pq == NULL) { 2129 PV_STAT(pmap_collect_inactive++); 2130 pq = &vm_page_queues[PQ_INACTIVE]; 2131 } else if (pq == &vm_page_queues[PQ_INACTIVE]) { 2132 PV_STAT(pmap_collect_active++); 2133 pq = &vm_page_queues[PQ_ACTIVE]; 2134 } else 2135 panic("get_pv_entry: increase vm.pmap.shpgperproc"); 2136 pmap_collect(pmap, pq); 2137 goto retry; 2138 } 2139 PV_STAT(pc_chunk_count++); 2140 PV_STAT(pc_chunk_allocs++); 2141 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2142 pmap_qenter((vm_offset_t)pc, &m, 1); 2143 if ((m->flags & PG_ZERO) == 0) 2144 pagezero(pc); 2145 pc->pc_pmap = pmap; 2146 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2147 for (field = 1; field < _NPCM; field++) 2148 pc->pc_map[field] = pc_freemask[field]; 2149 pv = &pc->pc_pventry[0]; 2150 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2151 PV_STAT(pv_entry_spare += _NPCPV - 1); 2152 return (pv); 2153} 2154 2155static __inline pv_entry_t 2156pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2157{ 2158 pv_entry_t pv; 2159 2160 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2161 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2162 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2163 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2164 break; 2165 } 2166 } 2167 return (pv); 2168} 2169 2170static void 2171pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2172{ 2173 pv_entry_t pv; 2174 2175 pv = pmap_pvh_remove(pvh, pmap, va); 2176 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2177 free_pv_entry(pmap, pv); 2178} 2179 2180static void 2181pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2182{ 2183 2184 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2185 pmap_pvh_free(&m->md, pmap, va); 2186 if (TAILQ_EMPTY(&m->md.pv_list)) 2187 vm_page_aflag_clear(m, PGA_WRITEABLE); 2188} 2189 2190/* 2191 * Conditionally create a pv entry. 2192 */ 2193static boolean_t 2194pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2195{ 2196 pv_entry_t pv; 2197 2198 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2199 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2200 if (pv_entry_count < pv_entry_high_water && 2201 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2202 pv->pv_va = va; 2203 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2204 return (TRUE); 2205 } else 2206 return (FALSE); 2207} 2208 2209/* 2210 * pmap_remove_pte: do the things to unmap a page in a process 2211 */ 2212static int 2213pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2214{ 2215 pt_entry_t oldpte; 2216 vm_page_t m; 2217 2218 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2219 pmap, (u_long)*ptq, va); 2220 2221 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2222 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2223 oldpte = *ptq; 2224 PT_SET_VA_MA(ptq, 0, TRUE); 2225 if (oldpte & PG_W) 2226 pmap->pm_stats.wired_count -= 1; 2227 /* 2228 * Machines that don't support invlpg, also don't support 2229 * PG_G. 2230 */ 2231 if (oldpte & PG_G) 2232 pmap_invalidate_page(kernel_pmap, va); 2233 pmap->pm_stats.resident_count -= 1; 2234 if (oldpte & PG_MANAGED) { 2235 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2236 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2237 vm_page_dirty(m); 2238 if (oldpte & PG_A) 2239 vm_page_aflag_set(m, PGA_REFERENCED); 2240 pmap_remove_entry(pmap, m, va); 2241 } 2242 return (pmap_unuse_pt(pmap, va, free)); 2243} 2244 2245/* 2246 * Remove a single page from a process address space 2247 */ 2248static void 2249pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2250{ 2251 pt_entry_t *pte; 2252 2253 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2254 pmap, va); 2255 2256 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2257 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2258 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2259 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2260 return; 2261 pmap_remove_pte(pmap, pte, va, free); 2262 pmap_invalidate_page(pmap, va); 2263 if (*PMAP1) 2264 PT_SET_MA(PADDR1, 0); 2265 2266} 2267 2268/* 2269 * Remove the given range of addresses from the specified map. 2270 * 2271 * It is assumed that the start and end are properly 2272 * rounded to the page size. 2273 */ 2274void 2275pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2276{ 2277 vm_offset_t pdnxt; 2278 pd_entry_t ptpaddr; 2279 pt_entry_t *pte; 2280 vm_page_t free = NULL; 2281 int anyvalid; 2282 2283 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2284 pmap, sva, eva); 2285 2286 /* 2287 * Perform an unsynchronized read. This is, however, safe. 2288 */ 2289 if (pmap->pm_stats.resident_count == 0) 2290 return; 2291 2292 anyvalid = 0; 2293 2294 vm_page_lock_queues(); 2295 sched_pin(); 2296 PMAP_LOCK(pmap); 2297 2298 /* 2299 * special handling of removing one page. a very 2300 * common operation and easy to short circuit some 2301 * code. 2302 */ 2303 if ((sva + PAGE_SIZE == eva) && 2304 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2305 pmap_remove_page(pmap, sva, &free); 2306 goto out; 2307 } 2308 2309 for (; sva < eva; sva = pdnxt) { 2310 u_int pdirindex; 2311 2312 /* 2313 * Calculate index for next page table. 2314 */ 2315 pdnxt = (sva + NBPDR) & ~PDRMASK; 2316 if (pmap->pm_stats.resident_count == 0) 2317 break; 2318 2319 pdirindex = sva >> PDRSHIFT; 2320 ptpaddr = pmap->pm_pdir[pdirindex]; 2321 2322 /* 2323 * Weed out invalid mappings. Note: we assume that the page 2324 * directory table is always allocated, and in kernel virtual. 2325 */ 2326 if (ptpaddr == 0) 2327 continue; 2328 2329 /* 2330 * Check for large page. 2331 */ 2332 if ((ptpaddr & PG_PS) != 0) { 2333 PD_CLEAR_VA(pmap, pdirindex, TRUE); 2334 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2335 anyvalid = 1; 2336 continue; 2337 } 2338 2339 /* 2340 * Limit our scan to either the end of the va represented 2341 * by the current page table page, or to the end of the 2342 * range being removed. 2343 */ 2344 if (pdnxt > eva) 2345 pdnxt = eva; 2346 2347 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2348 sva += PAGE_SIZE) { 2349 if ((*pte & PG_V) == 0) 2350 continue; 2351 2352 /* 2353 * The TLB entry for a PG_G mapping is invalidated 2354 * by pmap_remove_pte(). 2355 */ 2356 if ((*pte & PG_G) == 0) 2357 anyvalid = 1; 2358 if (pmap_remove_pte(pmap, pte, sva, &free)) 2359 break; 2360 } 2361 } 2362 PT_UPDATES_FLUSH(); 2363 if (*PMAP1) 2364 PT_SET_VA_MA(PMAP1, 0, TRUE); 2365out: 2366 if (anyvalid) 2367 pmap_invalidate_all(pmap); 2368 sched_unpin(); 2369 vm_page_unlock_queues(); 2370 PMAP_UNLOCK(pmap); 2371 pmap_free_zero_pages(free); 2372} 2373 2374/* 2375 * Routine: pmap_remove_all 2376 * Function: 2377 * Removes this physical page from 2378 * all physical maps in which it resides. 2379 * Reflects back modify bits to the pager. 2380 * 2381 * Notes: 2382 * Original versions of this routine were very 2383 * inefficient because they iteratively called 2384 * pmap_remove (slow...) 2385 */ 2386 2387void 2388pmap_remove_all(vm_page_t m) 2389{ 2390 pv_entry_t pv; 2391 pmap_t pmap; 2392 pt_entry_t *pte, tpte; 2393 vm_page_t free; 2394 2395 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2396 ("pmap_remove_all: page %p is not managed", m)); 2397 free = NULL; 2398 vm_page_lock_queues(); 2399 sched_pin(); 2400 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2401 pmap = PV_PMAP(pv); 2402 PMAP_LOCK(pmap); 2403 pmap->pm_stats.resident_count--; 2404 pte = pmap_pte_quick(pmap, pv->pv_va); 2405 tpte = *pte; 2406 PT_SET_VA_MA(pte, 0, TRUE); 2407 if (tpte & PG_W) 2408 pmap->pm_stats.wired_count--; 2409 if (tpte & PG_A) 2410 vm_page_aflag_set(m, PGA_REFERENCED); 2411 2412 /* 2413 * Update the vm_page_t clean and reference bits. 2414 */ 2415 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2416 vm_page_dirty(m); 2417 pmap_unuse_pt(pmap, pv->pv_va, &free); 2418 pmap_invalidate_page(pmap, pv->pv_va); 2419 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2420 free_pv_entry(pmap, pv); 2421 PMAP_UNLOCK(pmap); 2422 } 2423 vm_page_aflag_clear(m, PGA_WRITEABLE); 2424 PT_UPDATES_FLUSH(); 2425 if (*PMAP1) 2426 PT_SET_MA(PADDR1, 0); 2427 sched_unpin(); 2428 vm_page_unlock_queues(); 2429 pmap_free_zero_pages(free); 2430} 2431 2432/* 2433 * Set the physical protection on the 2434 * specified range of this map as requested. 2435 */ 2436void 2437pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2438{ 2439 vm_offset_t pdnxt; 2440 pd_entry_t ptpaddr; 2441 pt_entry_t *pte; 2442 int anychanged; 2443 2444 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2445 pmap, sva, eva, prot); 2446 2447 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2448 pmap_remove(pmap, sva, eva); 2449 return; 2450 } 2451 2452#ifdef PAE 2453 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2454 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2455 return; 2456#else 2457 if (prot & VM_PROT_WRITE) 2458 return; 2459#endif 2460 2461 anychanged = 0; 2462 2463 vm_page_lock_queues(); 2464 sched_pin(); 2465 PMAP_LOCK(pmap); 2466 for (; sva < eva; sva = pdnxt) { 2467 pt_entry_t obits, pbits; 2468 u_int pdirindex; 2469 2470 pdnxt = (sva + NBPDR) & ~PDRMASK; 2471 2472 pdirindex = sva >> PDRSHIFT; 2473 ptpaddr = pmap->pm_pdir[pdirindex]; 2474 2475 /* 2476 * Weed out invalid mappings. Note: we assume that the page 2477 * directory table is always allocated, and in kernel virtual. 2478 */ 2479 if (ptpaddr == 0) 2480 continue; 2481 2482 /* 2483 * Check for large page. 2484 */ 2485 if ((ptpaddr & PG_PS) != 0) { 2486 if ((prot & VM_PROT_WRITE) == 0) 2487 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2488#ifdef PAE 2489 if ((prot & VM_PROT_EXECUTE) == 0) 2490 pmap->pm_pdir[pdirindex] |= pg_nx; 2491#endif 2492 anychanged = 1; 2493 continue; 2494 } 2495 2496 if (pdnxt > eva) 2497 pdnxt = eva; 2498 2499 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2500 sva += PAGE_SIZE) { 2501 vm_page_t m; 2502 2503retry: 2504 /* 2505 * Regardless of whether a pte is 32 or 64 bits in 2506 * size, PG_RW, PG_A, and PG_M are among the least 2507 * significant 32 bits. 2508 */ 2509 obits = pbits = *pte; 2510 if ((pbits & PG_V) == 0) 2511 continue; 2512 2513 if ((prot & VM_PROT_WRITE) == 0) { 2514 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2515 (PG_MANAGED | PG_M | PG_RW)) { 2516 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2517 PG_FRAME); 2518 vm_page_dirty(m); 2519 } 2520 pbits &= ~(PG_RW | PG_M); 2521 } 2522#ifdef PAE 2523 if ((prot & VM_PROT_EXECUTE) == 0) 2524 pbits |= pg_nx; 2525#endif 2526 2527 if (pbits != obits) { 2528 obits = *pte; 2529 PT_SET_VA_MA(pte, pbits, TRUE); 2530 if (*pte != pbits) 2531 goto retry; 2532 if (obits & PG_G) 2533 pmap_invalidate_page(pmap, sva); 2534 else 2535 anychanged = 1; 2536 } 2537 } 2538 } 2539 PT_UPDATES_FLUSH(); 2540 if (*PMAP1) 2541 PT_SET_VA_MA(PMAP1, 0, TRUE); 2542 if (anychanged) 2543 pmap_invalidate_all(pmap); 2544 sched_unpin(); 2545 vm_page_unlock_queues(); 2546 PMAP_UNLOCK(pmap); 2547} 2548 2549/* 2550 * Insert the given physical page (p) at 2551 * the specified virtual address (v) in the 2552 * target physical map with the protection requested. 2553 * 2554 * If specified, the page will be wired down, meaning 2555 * that the related pte can not be reclaimed. 2556 * 2557 * NB: This is the only routine which MAY NOT lazy-evaluate 2558 * or lose information. That is, this routine must actually 2559 * insert this page into the given map NOW. 2560 */ 2561void 2562pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2563 vm_prot_t prot, boolean_t wired) 2564{ 2565 pd_entry_t *pde; 2566 pt_entry_t *pte; 2567 pt_entry_t newpte, origpte; 2568 pv_entry_t pv; 2569 vm_paddr_t opa, pa; 2570 vm_page_t mpte, om; 2571 boolean_t invlva; 2572 2573 CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2574 pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); 2575 va = trunc_page(va); 2576 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2577 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2578 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2579 va)); 2580 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 2581 VM_OBJECT_LOCKED(m->object), 2582 ("pmap_enter: page %p is not busy", m)); 2583 2584 mpte = NULL; 2585 2586 vm_page_lock_queues(); 2587 PMAP_LOCK(pmap); 2588 sched_pin(); 2589 2590 /* 2591 * In the case that a page table page is not 2592 * resident, we are creating it here. 2593 */ 2594 if (va < VM_MAXUSER_ADDRESS) { 2595 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2596 } 2597 2598 pde = pmap_pde(pmap, va); 2599 if ((*pde & PG_PS) != 0) 2600 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2601 pte = pmap_pte_quick(pmap, va); 2602 2603 /* 2604 * Page Directory table entry not valid, we need a new PT page 2605 */ 2606 if (pte == NULL) { 2607 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2608 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2609 } 2610 2611 pa = VM_PAGE_TO_PHYS(m); 2612 om = NULL; 2613 opa = origpte = 0; 2614 2615#if 0 2616 KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2617 pte, *pte)); 2618#endif 2619 origpte = *pte; 2620 if (origpte) 2621 origpte = xpmap_mtop(origpte); 2622 opa = origpte & PG_FRAME; 2623 2624 /* 2625 * Mapping has not changed, must be protection or wiring change. 2626 */ 2627 if (origpte && (opa == pa)) { 2628 /* 2629 * Wiring change, just update stats. We don't worry about 2630 * wiring PT pages as they remain resident as long as there 2631 * are valid mappings in them. Hence, if a user page is wired, 2632 * the PT page will be also. 2633 */ 2634 if (wired && ((origpte & PG_W) == 0)) 2635 pmap->pm_stats.wired_count++; 2636 else if (!wired && (origpte & PG_W)) 2637 pmap->pm_stats.wired_count--; 2638 2639 /* 2640 * Remove extra pte reference 2641 */ 2642 if (mpte) 2643 mpte->wire_count--; 2644 2645 if (origpte & PG_MANAGED) { 2646 om = m; 2647 pa |= PG_MANAGED; 2648 } 2649 goto validate; 2650 } 2651 2652 pv = NULL; 2653 2654 /* 2655 * Mapping has changed, invalidate old range and fall through to 2656 * handle validating new mapping. 2657 */ 2658 if (opa) { 2659 if (origpte & PG_W) 2660 pmap->pm_stats.wired_count--; 2661 if (origpte & PG_MANAGED) { 2662 om = PHYS_TO_VM_PAGE(opa); 2663 pv = pmap_pvh_remove(&om->md, pmap, va); 2664 } else if (va < VM_MAXUSER_ADDRESS) 2665 printf("va=0x%x is unmanaged :-( \n", va); 2666 2667 if (mpte != NULL) { 2668 mpte->wire_count--; 2669 KASSERT(mpte->wire_count > 0, 2670 ("pmap_enter: missing reference to page table page," 2671 " va: 0x%x", va)); 2672 } 2673 } else 2674 pmap->pm_stats.resident_count++; 2675 2676 /* 2677 * Enter on the PV list if part of our managed memory. 2678 */ 2679 if ((m->oflags & VPO_UNMANAGED) == 0) { 2680 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2681 ("pmap_enter: managed mapping within the clean submap")); 2682 if (pv == NULL) 2683 pv = get_pv_entry(pmap, FALSE); 2684 pv->pv_va = va; 2685 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2686 pa |= PG_MANAGED; 2687 } else if (pv != NULL) 2688 free_pv_entry(pmap, pv); 2689 2690 /* 2691 * Increment counters 2692 */ 2693 if (wired) 2694 pmap->pm_stats.wired_count++; 2695 2696validate: 2697 /* 2698 * Now validate mapping with desired protection/wiring. 2699 */ 2700 newpte = (pt_entry_t)(pa | PG_V); 2701 if ((prot & VM_PROT_WRITE) != 0) { 2702 newpte |= PG_RW; 2703 if ((newpte & PG_MANAGED) != 0) 2704 vm_page_aflag_set(m, PGA_WRITEABLE); 2705 } 2706#ifdef PAE 2707 if ((prot & VM_PROT_EXECUTE) == 0) 2708 newpte |= pg_nx; 2709#endif 2710 if (wired) 2711 newpte |= PG_W; 2712 if (va < VM_MAXUSER_ADDRESS) 2713 newpte |= PG_U; 2714 if (pmap == kernel_pmap) 2715 newpte |= pgeflag; 2716 2717 critical_enter(); 2718 /* 2719 * if the mapping or permission bits are different, we need 2720 * to update the pte. 2721 */ 2722 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2723 if (origpte) { 2724 invlva = FALSE; 2725 origpte = *pte; 2726 PT_SET_VA(pte, newpte | PG_A, FALSE); 2727 if (origpte & PG_A) { 2728 if (origpte & PG_MANAGED) 2729 vm_page_aflag_set(om, PGA_REFERENCED); 2730 if (opa != VM_PAGE_TO_PHYS(m)) 2731 invlva = TRUE; 2732#ifdef PAE 2733 if ((origpte & PG_NX) == 0 && 2734 (newpte & PG_NX) != 0) 2735 invlva = TRUE; 2736#endif 2737 } 2738 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2739 if ((origpte & PG_MANAGED) != 0) 2740 vm_page_dirty(om); 2741 if ((prot & VM_PROT_WRITE) == 0) 2742 invlva = TRUE; 2743 } 2744 if ((origpte & PG_MANAGED) != 0 && 2745 TAILQ_EMPTY(&om->md.pv_list)) 2746 vm_page_aflag_clear(om, PGA_WRITEABLE); 2747 if (invlva) 2748 pmap_invalidate_page(pmap, va); 2749 } else{ 2750 PT_SET_VA(pte, newpte | PG_A, FALSE); 2751 } 2752 2753 } 2754 PT_UPDATES_FLUSH(); 2755 critical_exit(); 2756 if (*PMAP1) 2757 PT_SET_VA_MA(PMAP1, 0, TRUE); 2758 sched_unpin(); 2759 vm_page_unlock_queues(); 2760 PMAP_UNLOCK(pmap); 2761} 2762 2763/* 2764 * Maps a sequence of resident pages belonging to the same object. 2765 * The sequence begins with the given page m_start. This page is 2766 * mapped at the given virtual address start. Each subsequent page is 2767 * mapped at a virtual address that is offset from start by the same 2768 * amount as the page is offset from m_start within the object. The 2769 * last page in the sequence is the page with the largest offset from 2770 * m_start that can be mapped at a virtual address less than the given 2771 * virtual address end. Not every virtual page between start and end 2772 * is mapped; only those for which a resident page exists with the 2773 * corresponding offset from m_start are mapped. 2774 */ 2775void 2776pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2777 vm_page_t m_start, vm_prot_t prot) 2778{ 2779 vm_page_t m, mpte; 2780 vm_pindex_t diff, psize; 2781 multicall_entry_t mcl[16]; 2782 multicall_entry_t *mclp = mcl; 2783 int error, count = 0; 2784 2785 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2786 psize = atop(end - start); 2787 mpte = NULL; 2788 m = m_start; 2789 vm_page_lock_queues(); 2790 PMAP_LOCK(pmap); 2791 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2792 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2793 prot, mpte); 2794 m = TAILQ_NEXT(m, listq); 2795 if (count == 16) { 2796 error = HYPERVISOR_multicall(mcl, count); 2797 KASSERT(error == 0, ("bad multicall %d", error)); 2798 mclp = mcl; 2799 count = 0; 2800 } 2801 } 2802 if (count) { 2803 error = HYPERVISOR_multicall(mcl, count); 2804 KASSERT(error == 0, ("bad multicall %d", error)); 2805 } 2806 vm_page_unlock_queues(); 2807 PMAP_UNLOCK(pmap); 2808} 2809 2810/* 2811 * this code makes some *MAJOR* assumptions: 2812 * 1. Current pmap & pmap exists. 2813 * 2. Not wired. 2814 * 3. Read access. 2815 * 4. No page table pages. 2816 * but is *MUCH* faster than pmap_enter... 2817 */ 2818 2819void 2820pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2821{ 2822 multicall_entry_t mcl, *mclp; 2823 int count = 0; 2824 mclp = &mcl; 2825 2826 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2827 pmap, va, m, prot); 2828 2829 vm_page_lock_queues(); 2830 PMAP_LOCK(pmap); 2831 (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2832 if (count) 2833 HYPERVISOR_multicall(&mcl, count); 2834 vm_page_unlock_queues(); 2835 PMAP_UNLOCK(pmap); 2836} 2837 2838#ifdef notyet 2839void 2840pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2841{ 2842 int i, error, index = 0; 2843 multicall_entry_t mcl[16]; 2844 multicall_entry_t *mclp = mcl; 2845 2846 PMAP_LOCK(pmap); 2847 for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2848 if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2849 continue; 2850 2851 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2852 if (index == 16) { 2853 error = HYPERVISOR_multicall(mcl, index); 2854 mclp = mcl; 2855 index = 0; 2856 KASSERT(error == 0, ("bad multicall %d", error)); 2857 } 2858 } 2859 if (index) { 2860 error = HYPERVISOR_multicall(mcl, index); 2861 KASSERT(error == 0, ("bad multicall %d", error)); 2862 } 2863 2864 PMAP_UNLOCK(pmap); 2865} 2866#endif 2867 2868static vm_page_t 2869pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2870 vm_prot_t prot, vm_page_t mpte) 2871{ 2872 pt_entry_t *pte; 2873 vm_paddr_t pa; 2874 vm_page_t free; 2875 multicall_entry_t *mcl = *mclpp; 2876 2877 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2878 (m->oflags & VPO_UNMANAGED) != 0, 2879 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2880 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2881 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2882 2883 /* 2884 * In the case that a page table page is not 2885 * resident, we are creating it here. 2886 */ 2887 if (va < VM_MAXUSER_ADDRESS) { 2888 u_int ptepindex; 2889 pd_entry_t ptema; 2890 2891 /* 2892 * Calculate pagetable page index 2893 */ 2894 ptepindex = va >> PDRSHIFT; 2895 if (mpte && (mpte->pindex == ptepindex)) { 2896 mpte->wire_count++; 2897 } else { 2898 /* 2899 * Get the page directory entry 2900 */ 2901 ptema = pmap->pm_pdir[ptepindex]; 2902 2903 /* 2904 * If the page table page is mapped, we just increment 2905 * the hold count, and activate it. 2906 */ 2907 if (ptema & PG_V) { 2908 if (ptema & PG_PS) 2909 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2910 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2911 mpte->wire_count++; 2912 } else { 2913 mpte = _pmap_allocpte(pmap, ptepindex, 2914 M_NOWAIT); 2915 if (mpte == NULL) 2916 return (mpte); 2917 } 2918 } 2919 } else { 2920 mpte = NULL; 2921 } 2922 2923 /* 2924 * This call to vtopte makes the assumption that we are 2925 * entering the page into the current pmap. In order to support 2926 * quick entry into any pmap, one would likely use pmap_pte_quick. 2927 * But that isn't as quick as vtopte. 2928 */ 2929 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 2930 pte = vtopte(va); 2931 if (*pte & PG_V) { 2932 if (mpte != NULL) { 2933 mpte->wire_count--; 2934 mpte = NULL; 2935 } 2936 return (mpte); 2937 } 2938 2939 /* 2940 * Enter on the PV list if part of our managed memory. 2941 */ 2942 if ((m->oflags & VPO_UNMANAGED) == 0 && 2943 !pmap_try_insert_pv_entry(pmap, va, m)) { 2944 if (mpte != NULL) { 2945 free = NULL; 2946 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 2947 pmap_invalidate_page(pmap, va); 2948 pmap_free_zero_pages(free); 2949 } 2950 2951 mpte = NULL; 2952 } 2953 return (mpte); 2954 } 2955 2956 /* 2957 * Increment counters 2958 */ 2959 pmap->pm_stats.resident_count++; 2960 2961 pa = VM_PAGE_TO_PHYS(m); 2962#ifdef PAE 2963 if ((prot & VM_PROT_EXECUTE) == 0) 2964 pa |= pg_nx; 2965#endif 2966 2967#if 0 2968 /* 2969 * Now validate mapping with RO protection 2970 */ 2971 if ((m->oflags & VPO_UNMANAGED) != 0) 2972 pte_store(pte, pa | PG_V | PG_U); 2973 else 2974 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 2975#else 2976 /* 2977 * Now validate mapping with RO protection 2978 */ 2979 if ((m->oflags & VPO_UNMANAGED) != 0) 2980 pa = xpmap_ptom(pa | PG_V | PG_U); 2981 else 2982 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 2983 2984 mcl->op = __HYPERVISOR_update_va_mapping; 2985 mcl->args[0] = va; 2986 mcl->args[1] = (uint32_t)(pa & 0xffffffff); 2987 mcl->args[2] = (uint32_t)(pa >> 32); 2988 mcl->args[3] = 0; 2989 *mclpp = mcl + 1; 2990 *count = *count + 1; 2991#endif 2992 return (mpte); 2993} 2994 2995/* 2996 * Make a temporary mapping for a physical address. This is only intended 2997 * to be used for panic dumps. 2998 */ 2999void * 3000pmap_kenter_temporary(vm_paddr_t pa, int i) 3001{ 3002 vm_offset_t va; 3003 vm_paddr_t ma = xpmap_ptom(pa); 3004 3005 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3006 PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3007 invlpg(va); 3008 return ((void *)crashdumpmap); 3009} 3010 3011/* 3012 * This code maps large physical mmap regions into the 3013 * processor address space. Note that some shortcuts 3014 * are taken, but the code works. 3015 */ 3016void 3017pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3018 vm_pindex_t pindex, vm_size_t size) 3019{ 3020 pd_entry_t *pde; 3021 vm_paddr_t pa, ptepa; 3022 vm_page_t p; 3023 int pat_mode; 3024 3025 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3026 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3027 ("pmap_object_init_pt: non-device object")); 3028 if (pseflag && 3029 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3030 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3031 return; 3032 p = vm_page_lookup(object, pindex); 3033 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3034 ("pmap_object_init_pt: invalid page %p", p)); 3035 pat_mode = p->md.pat_mode; 3036 3037 /* 3038 * Abort the mapping if the first page is not physically 3039 * aligned to a 2/4MB page boundary. 3040 */ 3041 ptepa = VM_PAGE_TO_PHYS(p); 3042 if (ptepa & (NBPDR - 1)) 3043 return; 3044 3045 /* 3046 * Skip the first page. Abort the mapping if the rest of 3047 * the pages are not physically contiguous or have differing 3048 * memory attributes. 3049 */ 3050 p = TAILQ_NEXT(p, listq); 3051 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3052 pa += PAGE_SIZE) { 3053 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3054 ("pmap_object_init_pt: invalid page %p", p)); 3055 if (pa != VM_PAGE_TO_PHYS(p) || 3056 pat_mode != p->md.pat_mode) 3057 return; 3058 p = TAILQ_NEXT(p, listq); 3059 } 3060 3061 /* 3062 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 3063 * "size" is a multiple of 2/4M, adding the PAT setting to 3064 * "pa" will not affect the termination of this loop. 3065 */ 3066 PMAP_LOCK(pmap); 3067 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3068 size; pa += NBPDR) { 3069 pde = pmap_pde(pmap, addr); 3070 if (*pde == 0) { 3071 pde_store(pde, pa | PG_PS | PG_M | PG_A | 3072 PG_U | PG_RW | PG_V); 3073 pmap->pm_stats.resident_count += NBPDR / 3074 PAGE_SIZE; 3075 pmap_pde_mappings++; 3076 } 3077 /* Else continue on if the PDE is already valid. */ 3078 addr += NBPDR; 3079 } 3080 PMAP_UNLOCK(pmap); 3081 } 3082} 3083 3084/* 3085 * Routine: pmap_change_wiring 3086 * Function: Change the wiring attribute for a map/virtual-address 3087 * pair. 3088 * In/out conditions: 3089 * The mapping must already exist in the pmap. 3090 */ 3091void 3092pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3093{ 3094 pt_entry_t *pte; 3095 3096 vm_page_lock_queues(); 3097 PMAP_LOCK(pmap); 3098 pte = pmap_pte(pmap, va); 3099 3100 if (wired && !pmap_pte_w(pte)) { 3101 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3102 pmap->pm_stats.wired_count++; 3103 } else if (!wired && pmap_pte_w(pte)) { 3104 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3105 pmap->pm_stats.wired_count--; 3106 } 3107 3108 /* 3109 * Wiring is not a hardware characteristic so there is no need to 3110 * invalidate TLB. 3111 */ 3112 pmap_pte_release(pte); 3113 PMAP_UNLOCK(pmap); 3114 vm_page_unlock_queues(); 3115} 3116 3117 3118 3119/* 3120 * Copy the range specified by src_addr/len 3121 * from the source map to the range dst_addr/len 3122 * in the destination map. 3123 * 3124 * This routine is only advisory and need not do anything. 3125 */ 3126 3127void 3128pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3129 vm_offset_t src_addr) 3130{ 3131 vm_page_t free; 3132 vm_offset_t addr; 3133 vm_offset_t end_addr = src_addr + len; 3134 vm_offset_t pdnxt; 3135 3136 if (dst_addr != src_addr) 3137 return; 3138 3139 if (!pmap_is_current(src_pmap)) { 3140 CTR2(KTR_PMAP, 3141 "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3142 (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3143 3144 return; 3145 } 3146 CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3147 dst_pmap, src_pmap, dst_addr, len, src_addr); 3148 3149#ifdef HAMFISTED_LOCKING 3150 mtx_lock(&createdelete_lock); 3151#endif 3152 3153 vm_page_lock_queues(); 3154 if (dst_pmap < src_pmap) { 3155 PMAP_LOCK(dst_pmap); 3156 PMAP_LOCK(src_pmap); 3157 } else { 3158 PMAP_LOCK(src_pmap); 3159 PMAP_LOCK(dst_pmap); 3160 } 3161 sched_pin(); 3162 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3163 pt_entry_t *src_pte, *dst_pte; 3164 vm_page_t dstmpte, srcmpte; 3165 pd_entry_t srcptepaddr; 3166 u_int ptepindex; 3167 3168 KASSERT(addr < UPT_MIN_ADDRESS, 3169 ("pmap_copy: invalid to pmap_copy page tables")); 3170 3171 pdnxt = (addr + NBPDR) & ~PDRMASK; 3172 ptepindex = addr >> PDRSHIFT; 3173 3174 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3175 if (srcptepaddr == 0) 3176 continue; 3177 3178 if (srcptepaddr & PG_PS) { 3179 if (dst_pmap->pm_pdir[ptepindex] == 0) { 3180 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3181 dst_pmap->pm_stats.resident_count += 3182 NBPDR / PAGE_SIZE; 3183 } 3184 continue; 3185 } 3186 3187 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3188 KASSERT(srcmpte->wire_count > 0, 3189 ("pmap_copy: source page table page is unused")); 3190 3191 if (pdnxt > end_addr) 3192 pdnxt = end_addr; 3193 3194 src_pte = vtopte(addr); 3195 while (addr < pdnxt) { 3196 pt_entry_t ptetemp; 3197 ptetemp = *src_pte; 3198 /* 3199 * we only virtual copy managed pages 3200 */ 3201 if ((ptetemp & PG_MANAGED) != 0) { 3202 dstmpte = pmap_allocpte(dst_pmap, addr, 3203 M_NOWAIT); 3204 if (dstmpte == NULL) 3205 goto out; 3206 dst_pte = pmap_pte_quick(dst_pmap, addr); 3207 if (*dst_pte == 0 && 3208 pmap_try_insert_pv_entry(dst_pmap, addr, 3209 PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3210 /* 3211 * Clear the wired, modified, and 3212 * accessed (referenced) bits 3213 * during the copy. 3214 */ 3215 KASSERT(ptetemp != 0, ("src_pte not set")); 3216 PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3217 KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3218 ("no pmap copy expected: 0x%jx saw: 0x%jx", 3219 ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3220 dst_pmap->pm_stats.resident_count++; 3221 } else { 3222 free = NULL; 3223 if (pmap_unwire_pte_hold(dst_pmap, 3224 dstmpte, &free)) { 3225 pmap_invalidate_page(dst_pmap, 3226 addr); 3227 pmap_free_zero_pages(free); 3228 } 3229 goto out; 3230 } 3231 if (dstmpte->wire_count >= srcmpte->wire_count) 3232 break; 3233 } 3234 addr += PAGE_SIZE; 3235 src_pte++; 3236 } 3237 } 3238out: 3239 PT_UPDATES_FLUSH(); 3240 sched_unpin(); 3241 vm_page_unlock_queues(); 3242 PMAP_UNLOCK(src_pmap); 3243 PMAP_UNLOCK(dst_pmap); 3244 3245#ifdef HAMFISTED_LOCKING 3246 mtx_unlock(&createdelete_lock); 3247#endif 3248} 3249 3250static __inline void 3251pagezero(void *page) 3252{ 3253#if defined(I686_CPU) 3254 if (cpu_class == CPUCLASS_686) { 3255#if defined(CPU_ENABLE_SSE) 3256 if (cpu_feature & CPUID_SSE2) 3257 sse2_pagezero(page); 3258 else 3259#endif 3260 i686_pagezero(page); 3261 } else 3262#endif 3263 bzero(page, PAGE_SIZE); 3264} 3265 3266/* 3267 * pmap_zero_page zeros the specified hardware page by mapping 3268 * the page into KVM and using bzero to clear its contents. 3269 */ 3270void 3271pmap_zero_page(vm_page_t m) 3272{ 3273 struct sysmaps *sysmaps; 3274 3275 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3276 mtx_lock(&sysmaps->lock); 3277 if (*sysmaps->CMAP2) 3278 panic("pmap_zero_page: CMAP2 busy"); 3279 sched_pin(); 3280 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3281 pagezero(sysmaps->CADDR2); 3282 PT_SET_MA(sysmaps->CADDR2, 0); 3283 sched_unpin(); 3284 mtx_unlock(&sysmaps->lock); 3285} 3286 3287/* 3288 * pmap_zero_page_area zeros the specified hardware page by mapping 3289 * the page into KVM and using bzero to clear its contents. 3290 * 3291 * off and size may not cover an area beyond a single hardware page. 3292 */ 3293void 3294pmap_zero_page_area(vm_page_t m, int off, int size) 3295{ 3296 struct sysmaps *sysmaps; 3297 3298 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3299 mtx_lock(&sysmaps->lock); 3300 if (*sysmaps->CMAP2) 3301 panic("pmap_zero_page_area: CMAP2 busy"); 3302 sched_pin(); 3303 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3304 3305 if (off == 0 && size == PAGE_SIZE) 3306 pagezero(sysmaps->CADDR2); 3307 else 3308 bzero((char *)sysmaps->CADDR2 + off, size); 3309 PT_SET_MA(sysmaps->CADDR2, 0); 3310 sched_unpin(); 3311 mtx_unlock(&sysmaps->lock); 3312} 3313 3314/* 3315 * pmap_zero_page_idle zeros the specified hardware page by mapping 3316 * the page into KVM and using bzero to clear its contents. This 3317 * is intended to be called from the vm_pagezero process only and 3318 * outside of Giant. 3319 */ 3320void 3321pmap_zero_page_idle(vm_page_t m) 3322{ 3323 3324 if (*CMAP3) 3325 panic("pmap_zero_page_idle: CMAP3 busy"); 3326 sched_pin(); 3327 PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3328 pagezero(CADDR3); 3329 PT_SET_MA(CADDR3, 0); 3330 sched_unpin(); 3331} 3332 3333/* 3334 * pmap_copy_page copies the specified (machine independent) 3335 * page by mapping the page into virtual memory and using 3336 * bcopy to copy the page, one machine dependent page at a 3337 * time. 3338 */ 3339void 3340pmap_copy_page(vm_page_t src, vm_page_t dst) 3341{ 3342 struct sysmaps *sysmaps; 3343 3344 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3345 mtx_lock(&sysmaps->lock); 3346 if (*sysmaps->CMAP1) 3347 panic("pmap_copy_page: CMAP1 busy"); 3348 if (*sysmaps->CMAP2) 3349 panic("pmap_copy_page: CMAP2 busy"); 3350 sched_pin(); 3351 PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A); 3352 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M); 3353 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3354 PT_SET_MA(sysmaps->CADDR1, 0); 3355 PT_SET_MA(sysmaps->CADDR2, 0); 3356 sched_unpin(); 3357 mtx_unlock(&sysmaps->lock); 3358} 3359 3360/* 3361 * Returns true if the pmap's pv is one of the first 3362 * 16 pvs linked to from this page. This count may 3363 * be changed upwards or downwards in the future; it 3364 * is only necessary that true be returned for a small 3365 * subset of pmaps for proper page aging. 3366 */ 3367boolean_t 3368pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3369{ 3370 pv_entry_t pv; 3371 int loops = 0; 3372 boolean_t rv; 3373 3374 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3375 ("pmap_page_exists_quick: page %p is not managed", m)); 3376 rv = FALSE; 3377 vm_page_lock_queues(); 3378 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3379 if (PV_PMAP(pv) == pmap) { 3380 rv = TRUE; 3381 break; 3382 } 3383 loops++; 3384 if (loops >= 16) 3385 break; 3386 } 3387 vm_page_unlock_queues(); 3388 return (rv); 3389} 3390 3391/* 3392 * pmap_page_wired_mappings: 3393 * 3394 * Return the number of managed mappings to the given physical page 3395 * that are wired. 3396 */ 3397int 3398pmap_page_wired_mappings(vm_page_t m) 3399{ 3400 pv_entry_t pv; 3401 pt_entry_t *pte; 3402 pmap_t pmap; 3403 int count; 3404 3405 count = 0; 3406 if ((m->oflags & VPO_UNMANAGED) != 0) 3407 return (count); 3408 vm_page_lock_queues(); 3409 sched_pin(); 3410 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3411 pmap = PV_PMAP(pv); 3412 PMAP_LOCK(pmap); 3413 pte = pmap_pte_quick(pmap, pv->pv_va); 3414 if ((*pte & PG_W) != 0) 3415 count++; 3416 PMAP_UNLOCK(pmap); 3417 } 3418 sched_unpin(); 3419 vm_page_unlock_queues(); 3420 return (count); 3421} 3422 3423/* 3424 * Returns TRUE if the given page is mapped. Otherwise, returns FALSE. 3425 */ 3426boolean_t 3427pmap_page_is_mapped(vm_page_t m) 3428{ 3429 3430 if ((m->oflags & VPO_UNMANAGED) != 0) 3431 return (FALSE); 3432 return (!TAILQ_EMPTY(&m->md.pv_list)); 3433} 3434 3435/* 3436 * Remove all pages from specified address space 3437 * this aids process exit speeds. Also, this code 3438 * is special cased for current process only, but 3439 * can have the more generic (and slightly slower) 3440 * mode enabled. This is much faster than pmap_remove 3441 * in the case of running down an entire address space. 3442 */ 3443void 3444pmap_remove_pages(pmap_t pmap) 3445{ 3446 pt_entry_t *pte, tpte; 3447 vm_page_t m, free = NULL; 3448 pv_entry_t pv; 3449 struct pv_chunk *pc, *npc; 3450 int field, idx; 3451 int32_t bit; 3452 uint32_t inuse, bitmask; 3453 int allfree; 3454 3455 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3456 3457 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3458 printf("warning: pmap_remove_pages called with non-current pmap\n"); 3459 return; 3460 } 3461 vm_page_lock_queues(); 3462 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3463 PMAP_LOCK(pmap); 3464 sched_pin(); 3465 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3466 allfree = 1; 3467 for (field = 0; field < _NPCM; field++) { 3468 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3469 while (inuse != 0) { 3470 bit = bsfl(inuse); 3471 bitmask = 1UL << bit; 3472 idx = field * 32 + bit; 3473 pv = &pc->pc_pventry[idx]; 3474 inuse &= ~bitmask; 3475 3476 pte = vtopte(pv->pv_va); 3477 tpte = *pte ? xpmap_mtop(*pte) : 0; 3478 3479 if (tpte == 0) { 3480 printf( 3481 "TPTE at %p IS ZERO @ VA %08x\n", 3482 pte, pv->pv_va); 3483 panic("bad pte"); 3484 } 3485 3486/* 3487 * We cannot remove wired pages from a process' mapping at this time 3488 */ 3489 if (tpte & PG_W) { 3490 allfree = 0; 3491 continue; 3492 } 3493 3494 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3495 KASSERT(m->phys_addr == (tpte & PG_FRAME), 3496 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3497 m, (uintmax_t)m->phys_addr, 3498 (uintmax_t)tpte)); 3499 3500 KASSERT(m < &vm_page_array[vm_page_array_size], 3501 ("pmap_remove_pages: bad tpte %#jx", 3502 (uintmax_t)tpte)); 3503 3504 3505 PT_CLEAR_VA(pte, FALSE); 3506 3507 /* 3508 * Update the vm_page_t clean/reference bits. 3509 */ 3510 if (tpte & PG_M) 3511 vm_page_dirty(m); 3512 3513 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3514 if (TAILQ_EMPTY(&m->md.pv_list)) 3515 vm_page_aflag_clear(m, PGA_WRITEABLE); 3516 3517 pmap_unuse_pt(pmap, pv->pv_va, &free); 3518 3519 /* Mark free */ 3520 PV_STAT(pv_entry_frees++); 3521 PV_STAT(pv_entry_spare++); 3522 pv_entry_count--; 3523 pc->pc_map[field] |= bitmask; 3524 pmap->pm_stats.resident_count--; 3525 } 3526 } 3527 PT_UPDATES_FLUSH(); 3528 if (allfree) { 3529 PV_STAT(pv_entry_spare -= _NPCPV); 3530 PV_STAT(pc_chunk_count--); 3531 PV_STAT(pc_chunk_frees++); 3532 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3533 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 3534 pmap_qremove((vm_offset_t)pc, 1); 3535 vm_page_unwire(m, 0); 3536 vm_page_free(m); 3537 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 3538 } 3539 } 3540 PT_UPDATES_FLUSH(); 3541 if (*PMAP1) 3542 PT_SET_MA(PADDR1, 0); 3543 3544 sched_unpin(); 3545 pmap_invalidate_all(pmap); 3546 vm_page_unlock_queues(); 3547 PMAP_UNLOCK(pmap); 3548 pmap_free_zero_pages(free); 3549} 3550 3551/* 3552 * pmap_is_modified: 3553 * 3554 * Return whether or not the specified physical page was modified 3555 * in any physical maps. 3556 */ 3557boolean_t 3558pmap_is_modified(vm_page_t m) 3559{ 3560 pv_entry_t pv; 3561 pt_entry_t *pte; 3562 pmap_t pmap; 3563 boolean_t rv; 3564 3565 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3566 ("pmap_is_modified: page %p is not managed", m)); 3567 rv = FALSE; 3568 3569 /* 3570 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 3571 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3572 * is clear, no PTEs can have PG_M set. 3573 */ 3574 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3575 if ((m->oflags & VPO_BUSY) == 0 && 3576 (m->aflags & PGA_WRITEABLE) == 0) 3577 return (rv); 3578 vm_page_lock_queues(); 3579 sched_pin(); 3580 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3581 pmap = PV_PMAP(pv); 3582 PMAP_LOCK(pmap); 3583 pte = pmap_pte_quick(pmap, pv->pv_va); 3584 rv = (*pte & PG_M) != 0; 3585 PMAP_UNLOCK(pmap); 3586 if (rv) 3587 break; 3588 } 3589 if (*PMAP1) 3590 PT_SET_MA(PADDR1, 0); 3591 sched_unpin(); 3592 vm_page_unlock_queues(); 3593 return (rv); 3594} 3595 3596/* 3597 * pmap_is_prefaultable: 3598 * 3599 * Return whether or not the specified virtual address is elgible 3600 * for prefault. 3601 */ 3602static boolean_t 3603pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3604{ 3605 pt_entry_t *pte; 3606 boolean_t rv = FALSE; 3607 3608 return (rv); 3609 3610 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3611 pte = vtopte(addr); 3612 rv = (*pte == 0); 3613 } 3614 return (rv); 3615} 3616 3617boolean_t 3618pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3619{ 3620 boolean_t rv; 3621 3622 PMAP_LOCK(pmap); 3623 rv = pmap_is_prefaultable_locked(pmap, addr); 3624 PMAP_UNLOCK(pmap); 3625 return (rv); 3626} 3627 3628boolean_t 3629pmap_is_referenced(vm_page_t m) 3630{ 3631 pv_entry_t pv; 3632 pt_entry_t *pte; 3633 pmap_t pmap; 3634 boolean_t rv; 3635 3636 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3637 ("pmap_is_referenced: page %p is not managed", m)); 3638 rv = FALSE; 3639 vm_page_lock_queues(); 3640 sched_pin(); 3641 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3642 pmap = PV_PMAP(pv); 3643 PMAP_LOCK(pmap); 3644 pte = pmap_pte_quick(pmap, pv->pv_va); 3645 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3646 PMAP_UNLOCK(pmap); 3647 if (rv) 3648 break; 3649 } 3650 if (*PMAP1) 3651 PT_SET_MA(PADDR1, 0); 3652 sched_unpin(); 3653 vm_page_unlock_queues(); 3654 return (rv); 3655} 3656 3657void 3658pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3659{ 3660 int i, npages = round_page(len) >> PAGE_SHIFT; 3661 for (i = 0; i < npages; i++) { 3662 pt_entry_t *pte; 3663 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3664 vm_page_lock_queues(); 3665 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3666 vm_page_unlock_queues(); 3667 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3668 pmap_pte_release(pte); 3669 } 3670} 3671 3672void 3673pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3674{ 3675 int i, npages = round_page(len) >> PAGE_SHIFT; 3676 for (i = 0; i < npages; i++) { 3677 pt_entry_t *pte; 3678 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3679 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3680 vm_page_lock_queues(); 3681 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3682 vm_page_unlock_queues(); 3683 pmap_pte_release(pte); 3684 } 3685} 3686 3687/* 3688 * Clear the write and modified bits in each of the given page's mappings. 3689 */ 3690void 3691pmap_remove_write(vm_page_t m) 3692{ 3693 pv_entry_t pv; 3694 pmap_t pmap; 3695 pt_entry_t oldpte, *pte; 3696 3697 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3698 ("pmap_remove_write: page %p is not managed", m)); 3699 3700 /* 3701 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 3702 * another thread while the object is locked. Thus, if PGA_WRITEABLE 3703 * is clear, no page table entries need updating. 3704 */ 3705 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3706 if ((m->oflags & VPO_BUSY) == 0 && 3707 (m->aflags & PGA_WRITEABLE) == 0) 3708 return; 3709 vm_page_lock_queues(); 3710 sched_pin(); 3711 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3712 pmap = PV_PMAP(pv); 3713 PMAP_LOCK(pmap); 3714 pte = pmap_pte_quick(pmap, pv->pv_va); 3715retry: 3716 oldpte = *pte; 3717 if ((oldpte & PG_RW) != 0) { 3718 vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3719 3720 /* 3721 * Regardless of whether a pte is 32 or 64 bits 3722 * in size, PG_RW and PG_M are among the least 3723 * significant 32 bits. 3724 */ 3725 PT_SET_VA_MA(pte, newpte, TRUE); 3726 if (*pte != newpte) 3727 goto retry; 3728 3729 if ((oldpte & PG_M) != 0) 3730 vm_page_dirty(m); 3731 pmap_invalidate_page(pmap, pv->pv_va); 3732 } 3733 PMAP_UNLOCK(pmap); 3734 } 3735 vm_page_aflag_clear(m, PGA_WRITEABLE); 3736 PT_UPDATES_FLUSH(); 3737 if (*PMAP1) 3738 PT_SET_MA(PADDR1, 0); 3739 sched_unpin(); 3740 vm_page_unlock_queues(); 3741} 3742 3743/* 3744 * pmap_ts_referenced: 3745 * 3746 * Return a count of reference bits for a page, clearing those bits. 3747 * It is not necessary for every reference bit to be cleared, but it 3748 * is necessary that 0 only be returned when there are truly no 3749 * reference bits set. 3750 * 3751 * XXX: The exact number of bits to check and clear is a matter that 3752 * should be tested and standardized at some point in the future for 3753 * optimal aging of shared pages. 3754 */ 3755int 3756pmap_ts_referenced(vm_page_t m) 3757{ 3758 pv_entry_t pv, pvf, pvn; 3759 pmap_t pmap; 3760 pt_entry_t *pte; 3761 int rtval = 0; 3762 3763 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3764 ("pmap_ts_referenced: page %p is not managed", m)); 3765 vm_page_lock_queues(); 3766 sched_pin(); 3767 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3768 pvf = pv; 3769 do { 3770 pvn = TAILQ_NEXT(pv, pv_list); 3771 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3772 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3773 pmap = PV_PMAP(pv); 3774 PMAP_LOCK(pmap); 3775 pte = pmap_pte_quick(pmap, pv->pv_va); 3776 if ((*pte & PG_A) != 0) { 3777 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3778 pmap_invalidate_page(pmap, pv->pv_va); 3779 rtval++; 3780 if (rtval > 4) 3781 pvn = NULL; 3782 } 3783 PMAP_UNLOCK(pmap); 3784 } while ((pv = pvn) != NULL && pv != pvf); 3785 } 3786 PT_UPDATES_FLUSH(); 3787 if (*PMAP1) 3788 PT_SET_MA(PADDR1, 0); 3789 sched_unpin(); 3790 vm_page_unlock_queues(); 3791 return (rtval); 3792} 3793 3794/* 3795 * Clear the modify bits on the specified physical page. 3796 */ 3797void 3798pmap_clear_modify(vm_page_t m) 3799{ 3800 pv_entry_t pv; 3801 pmap_t pmap; 3802 pt_entry_t *pte; 3803 3804 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3805 ("pmap_clear_modify: page %p is not managed", m)); 3806 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3807 KASSERT((m->oflags & VPO_BUSY) == 0, 3808 ("pmap_clear_modify: page %p is busy", m)); 3809 3810 /* 3811 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 3812 * If the object containing the page is locked and the page is not 3813 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 3814 */ 3815 if ((m->aflags & PGA_WRITEABLE) == 0) 3816 return; 3817 vm_page_lock_queues(); 3818 sched_pin(); 3819 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3820 pmap = PV_PMAP(pv); 3821 PMAP_LOCK(pmap); 3822 pte = pmap_pte_quick(pmap, pv->pv_va); 3823 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3824 /* 3825 * Regardless of whether a pte is 32 or 64 bits 3826 * in size, PG_M is among the least significant 3827 * 32 bits. 3828 */ 3829 PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3830 pmap_invalidate_page(pmap, pv->pv_va); 3831 } 3832 PMAP_UNLOCK(pmap); 3833 } 3834 sched_unpin(); 3835 vm_page_unlock_queues(); 3836} 3837 3838/* 3839 * pmap_clear_reference: 3840 * 3841 * Clear the reference bit on the specified physical page. 3842 */ 3843void 3844pmap_clear_reference(vm_page_t m) 3845{ 3846 pv_entry_t pv; 3847 pmap_t pmap; 3848 pt_entry_t *pte; 3849 3850 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3851 ("pmap_clear_reference: page %p is not managed", m)); 3852 vm_page_lock_queues(); 3853 sched_pin(); 3854 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3855 pmap = PV_PMAP(pv); 3856 PMAP_LOCK(pmap); 3857 pte = pmap_pte_quick(pmap, pv->pv_va); 3858 if ((*pte & PG_A) != 0) { 3859 /* 3860 * Regardless of whether a pte is 32 or 64 bits 3861 * in size, PG_A is among the least significant 3862 * 32 bits. 3863 */ 3864 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3865 pmap_invalidate_page(pmap, pv->pv_va); 3866 } 3867 PMAP_UNLOCK(pmap); 3868 } 3869 sched_unpin(); 3870 vm_page_unlock_queues(); 3871} 3872 3873/* 3874 * Miscellaneous support routines follow 3875 */ 3876 3877/* 3878 * Map a set of physical memory pages into the kernel virtual 3879 * address space. Return a pointer to where it is mapped. This 3880 * routine is intended to be used for mapping device memory, 3881 * NOT real memory. 3882 */ 3883void * 3884pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3885{ 3886 vm_offset_t va, offset; 3887 vm_size_t tmpsize; 3888 3889 offset = pa & PAGE_MASK; 3890 size = roundup(offset + size, PAGE_SIZE); 3891 pa = pa & PG_FRAME; 3892 3893 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3894 va = KERNBASE + pa; 3895 else 3896 va = kmem_alloc_nofault(kernel_map, size); 3897 if (!va) 3898 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3899 3900 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3901 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3902 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3903 pmap_invalidate_cache_range(va, va + size); 3904 return ((void *)(va + offset)); 3905} 3906 3907void * 3908pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3909{ 3910 3911 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3912} 3913 3914void * 3915pmap_mapbios(vm_paddr_t pa, vm_size_t size) 3916{ 3917 3918 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3919} 3920 3921void 3922pmap_unmapdev(vm_offset_t va, vm_size_t size) 3923{ 3924 vm_offset_t base, offset, tmpva; 3925 3926 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3927 return; 3928 base = trunc_page(va); 3929 offset = va & PAGE_MASK; 3930 size = roundup(offset + size, PAGE_SIZE); 3931 critical_enter(); 3932 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 3933 pmap_kremove(tmpva); 3934 pmap_invalidate_range(kernel_pmap, va, tmpva); 3935 critical_exit(); 3936 kmem_free(kernel_map, base, size); 3937} 3938 3939/* 3940 * Sets the memory attribute for the specified page. 3941 */ 3942void 3943pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 3944{ 3945 3946 m->md.pat_mode = ma; 3947 if ((m->flags & PG_FICTITIOUS) != 0) 3948 return; 3949 3950 /* 3951 * If "m" is a normal page, flush it from the cache. 3952 * See pmap_invalidate_cache_range(). 3953 * 3954 * First, try to find an existing mapping of the page by sf 3955 * buffer. sf_buf_invalidate_cache() modifies mapping and 3956 * flushes the cache. 3957 */ 3958 if (sf_buf_invalidate_cache(m)) 3959 return; 3960 3961 /* 3962 * If page is not mapped by sf buffer, but CPU does not 3963 * support self snoop, map the page transient and do 3964 * invalidation. In the worst case, whole cache is flushed by 3965 * pmap_invalidate_cache_range(). 3966 */ 3967 if ((cpu_feature & CPUID_SS) == 0) 3968 pmap_flush_page(m); 3969} 3970 3971static void 3972pmap_flush_page(vm_page_t m) 3973{ 3974 struct sysmaps *sysmaps; 3975 vm_offset_t sva, eva; 3976 3977 if ((cpu_feature & CPUID_CLFSH) != 0) { 3978 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3979 mtx_lock(&sysmaps->lock); 3980 if (*sysmaps->CMAP2) 3981 panic("pmap_flush_page: CMAP2 busy"); 3982 sched_pin(); 3983 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 3984 VM_PAGE_TO_MACH(m) | PG_A | PG_M | 3985 pmap_cache_bits(m->md.pat_mode, 0)); 3986 invlcaddr(sysmaps->CADDR2); 3987 sva = (vm_offset_t)sysmaps->CADDR2; 3988 eva = sva + PAGE_SIZE; 3989 3990 /* 3991 * Use mfence despite the ordering implied by 3992 * mtx_{un,}lock() because clflush is not guaranteed 3993 * to be ordered by any other instruction. 3994 */ 3995 mfence(); 3996 for (; sva < eva; sva += cpu_clflush_line_size) 3997 clflush(sva); 3998 mfence(); 3999 PT_SET_MA(sysmaps->CADDR2, 0); 4000 sched_unpin(); 4001 mtx_unlock(&sysmaps->lock); 4002 } else 4003 pmap_invalidate_cache(); 4004} 4005 4006/* 4007 * Changes the specified virtual address range's memory type to that given by 4008 * the parameter "mode". The specified virtual address range must be 4009 * completely contained within either the kernel map. 4010 * 4011 * Returns zero if the change completed successfully, and either EINVAL or 4012 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 4013 * of the virtual address range was not mapped, and ENOMEM is returned if 4014 * there was insufficient memory available to complete the change. 4015 */ 4016int 4017pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 4018{ 4019 vm_offset_t base, offset, tmpva; 4020 pt_entry_t *pte; 4021 u_int opte, npte; 4022 pd_entry_t *pde; 4023 boolean_t changed; 4024 4025 base = trunc_page(va); 4026 offset = va & PAGE_MASK; 4027 size = roundup(offset + size, PAGE_SIZE); 4028 4029 /* Only supported on kernel virtual addresses. */ 4030 if (base <= VM_MAXUSER_ADDRESS) 4031 return (EINVAL); 4032 4033 /* 4MB pages and pages that aren't mapped aren't supported. */ 4034 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4035 pde = pmap_pde(kernel_pmap, tmpva); 4036 if (*pde & PG_PS) 4037 return (EINVAL); 4038 if ((*pde & PG_V) == 0) 4039 return (EINVAL); 4040 pte = vtopte(va); 4041 if ((*pte & PG_V) == 0) 4042 return (EINVAL); 4043 } 4044 4045 changed = FALSE; 4046 4047 /* 4048 * Ok, all the pages exist and are 4k, so run through them updating 4049 * their cache mode. 4050 */ 4051 for (tmpva = base; size > 0; ) { 4052 pte = vtopte(tmpva); 4053 4054 /* 4055 * The cache mode bits are all in the low 32-bits of the 4056 * PTE, so we can just spin on updating the low 32-bits. 4057 */ 4058 do { 4059 opte = *(u_int *)pte; 4060 npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4061 npte |= pmap_cache_bits(mode, 0); 4062 PT_SET_VA_MA(pte, npte, TRUE); 4063 } while (npte != opte && (*pte != npte)); 4064 if (npte != opte) 4065 changed = TRUE; 4066 tmpva += PAGE_SIZE; 4067 size -= PAGE_SIZE; 4068 } 4069 4070 /* 4071 * Flush CPU caches to make sure any data isn't cached that 4072 * shouldn't be, etc. 4073 */ 4074 if (changed) { 4075 pmap_invalidate_range(kernel_pmap, base, tmpva); 4076 pmap_invalidate_cache_range(base, tmpva); 4077 } 4078 return (0); 4079} 4080 4081/* 4082 * perform the pmap work for mincore 4083 */ 4084int 4085pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4086{ 4087 pt_entry_t *ptep, pte; 4088 vm_paddr_t pa; 4089 int val; 4090 4091 PMAP_LOCK(pmap); 4092retry: 4093 ptep = pmap_pte(pmap, addr); 4094 pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4095 pmap_pte_release(ptep); 4096 val = 0; 4097 if ((pte & PG_V) != 0) { 4098 val |= MINCORE_INCORE; 4099 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4100 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4101 if ((pte & PG_A) != 0) 4102 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4103 } 4104 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4105 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4106 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4107 pa = pte & PG_FRAME; 4108 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4109 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4110 goto retry; 4111 } else 4112 PA_UNLOCK_COND(*locked_pa); 4113 PMAP_UNLOCK(pmap); 4114 return (val); 4115} 4116 4117void 4118pmap_activate(struct thread *td) 4119{ 4120 pmap_t pmap, oldpmap; 4121 u_int cpuid; 4122 u_int32_t cr3; 4123 4124 critical_enter(); 4125 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4126 oldpmap = PCPU_GET(curpmap); 4127 cpuid = PCPU_GET(cpuid); 4128#if defined(SMP) 4129 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 4130 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 4131#else 4132 CPU_CLR(cpuid, &oldpmap->pm_active); 4133 CPU_SET(cpuid, &pmap->pm_active); 4134#endif 4135#ifdef PAE 4136 cr3 = vtophys(pmap->pm_pdpt); 4137#else 4138 cr3 = vtophys(pmap->pm_pdir); 4139#endif 4140 /* 4141 * pmap_activate is for the current thread on the current cpu 4142 */ 4143 td->td_pcb->pcb_cr3 = cr3; 4144 PT_UPDATES_FLUSH(); 4145 load_cr3(cr3); 4146 PCPU_SET(curpmap, pmap); 4147 critical_exit(); 4148} 4149 4150void 4151pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4152{ 4153} 4154 4155/* 4156 * Increase the starting virtual address of the given mapping if a 4157 * different alignment might result in more superpage mappings. 4158 */ 4159void 4160pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4161 vm_offset_t *addr, vm_size_t size) 4162{ 4163 vm_offset_t superpage_offset; 4164 4165 if (size < NBPDR) 4166 return; 4167 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4168 offset += ptoa(object->pg_color); 4169 superpage_offset = offset & PDRMASK; 4170 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4171 (*addr & PDRMASK) == superpage_offset) 4172 return; 4173 if ((*addr & PDRMASK) < superpage_offset) 4174 *addr = (*addr & ~PDRMASK) + superpage_offset; 4175 else 4176 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4177} 4178 4179void 4180pmap_suspend() 4181{ 4182 pmap_t pmap; 4183 int i, pdir, offset; 4184 vm_paddr_t pdirma; 4185 mmu_update_t mu[4]; 4186 4187 /* 4188 * We need to remove the recursive mapping structure from all 4189 * our pmaps so that Xen doesn't get confused when it restores 4190 * the page tables. The recursive map lives at page directory 4191 * index PTDPTDI. We assume that the suspend code has stopped 4192 * the other vcpus (if any). 4193 */ 4194 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4195 for (i = 0; i < 4; i++) { 4196 /* 4197 * Figure out which page directory (L2) page 4198 * contains this bit of the recursive map and 4199 * the offset within that page of the map 4200 * entry 4201 */ 4202 pdir = (PTDPTDI + i) / NPDEPG; 4203 offset = (PTDPTDI + i) % NPDEPG; 4204 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4205 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4206 mu[i].val = 0; 4207 } 4208 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4209 } 4210} 4211 4212void 4213pmap_resume() 4214{ 4215 pmap_t pmap; 4216 int i, pdir, offset; 4217 vm_paddr_t pdirma; 4218 mmu_update_t mu[4]; 4219 4220 /* 4221 * Restore the recursive map that we removed on suspend. 4222 */ 4223 LIST_FOREACH(pmap, &allpmaps, pm_list) { 4224 for (i = 0; i < 4; i++) { 4225 /* 4226 * Figure out which page directory (L2) page 4227 * contains this bit of the recursive map and 4228 * the offset within that page of the map 4229 * entry 4230 */ 4231 pdir = (PTDPTDI + i) / NPDEPG; 4232 offset = (PTDPTDI + i) % NPDEPG; 4233 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4234 mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4235 mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4236 } 4237 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4238 } 4239} 4240 4241#if defined(PMAP_DEBUG) 4242pmap_pid_dump(int pid) 4243{ 4244 pmap_t pmap; 4245 struct proc *p; 4246 int npte = 0; 4247 int index; 4248 4249 sx_slock(&allproc_lock); 4250 FOREACH_PROC_IN_SYSTEM(p) { 4251 if (p->p_pid != pid) 4252 continue; 4253 4254 if (p->p_vmspace) { 4255 int i,j; 4256 index = 0; 4257 pmap = vmspace_pmap(p->p_vmspace); 4258 for (i = 0; i < NPDEPTD; i++) { 4259 pd_entry_t *pde; 4260 pt_entry_t *pte; 4261 vm_offset_t base = i << PDRSHIFT; 4262 4263 pde = &pmap->pm_pdir[i]; 4264 if (pde && pmap_pde_v(pde)) { 4265 for (j = 0; j < NPTEPG; j++) { 4266 vm_offset_t va = base + (j << PAGE_SHIFT); 4267 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4268 if (index) { 4269 index = 0; 4270 printf("\n"); 4271 } 4272 sx_sunlock(&allproc_lock); 4273 return (npte); 4274 } 4275 pte = pmap_pte(pmap, va); 4276 if (pte && pmap_pte_v(pte)) { 4277 pt_entry_t pa; 4278 vm_page_t m; 4279 pa = PT_GET(pte); 4280 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4281 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4282 va, pa, m->hold_count, m->wire_count, m->flags); 4283 npte++; 4284 index++; 4285 if (index >= 2) { 4286 index = 0; 4287 printf("\n"); 4288 } else { 4289 printf(" "); 4290 } 4291 } 4292 } 4293 } 4294 } 4295 } 4296 } 4297 sx_sunlock(&allproc_lock); 4298 return (npte); 4299} 4300#endif 4301 4302#if defined(DEBUG) 4303 4304static void pads(pmap_t pm); 4305void pmap_pvdump(vm_paddr_t pa); 4306 4307/* print address space of pmap*/ 4308static void 4309pads(pmap_t pm) 4310{ 4311 int i, j; 4312 vm_paddr_t va; 4313 pt_entry_t *ptep; 4314 4315 if (pm == kernel_pmap) 4316 return; 4317 for (i = 0; i < NPDEPTD; i++) 4318 if (pm->pm_pdir[i]) 4319 for (j = 0; j < NPTEPG; j++) { 4320 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4321 if (pm == kernel_pmap && va < KERNBASE) 4322 continue; 4323 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4324 continue; 4325 ptep = pmap_pte(pm, va); 4326 if (pmap_pte_v(ptep)) 4327 printf("%x:%x ", va, *ptep); 4328 }; 4329 4330} 4331 4332void 4333pmap_pvdump(vm_paddr_t pa) 4334{ 4335 pv_entry_t pv; 4336 pmap_t pmap; 4337 vm_page_t m; 4338 4339 printf("pa %x", pa); 4340 m = PHYS_TO_VM_PAGE(pa); 4341 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4342 pmap = PV_PMAP(pv); 4343 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4344 pads(pmap); 4345 } 4346 printf(" "); 4347} 4348#endif 4349