pmap.c revision 181747
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 181747 2008-08-15 04:00:44Z kmacy $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#define PMAP_DIAGNOSTIC 107 108#include "opt_cpu.h" 109#include "opt_pmap.h" 110#include "opt_msgbuf.h" 111#include "opt_smp.h" 112#include "opt_xbox.h" 113 114#include <sys/param.h> 115#include <sys/systm.h> 116#include <sys/kernel.h> 117#include <sys/ktr.h> 118#include <sys/lock.h> 119#include <sys/malloc.h> 120#include <sys/mman.h> 121#include <sys/msgbuf.h> 122#include <sys/mutex.h> 123#include <sys/proc.h> 124#include <sys/sx.h> 125#include <sys/vmmeter.h> 126#include <sys/sched.h> 127#include <sys/sysctl.h> 128#ifdef SMP 129#include <sys/smp.h> 130#endif 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/cputypes.h> 145#include <machine/md_var.h> 146#include <machine/pcb.h> 147#include <machine/specialreg.h> 148#ifdef SMP 149#include <machine/smp.h> 150#endif 151 152#ifdef XBOX 153#include <machine/xbox.h> 154#endif 155 156#include <xen/interface/xen.h> 157#include <machine/xen/hypervisor.h> 158#include <machine/xen/hypercall.h> 159#include <machine/xen/xenvar.h> 160#include <machine/xen/xenfunc.h> 161 162#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 163#define CPU_ENABLE_SSE 164#endif 165 166#ifndef PMAP_SHPGPERPROC 167#define PMAP_SHPGPERPROC 200 168#endif 169 170#if defined(DIAGNOSTIC) 171#define PMAP_DIAGNOSTIC 172#endif 173 174#if !defined(PMAP_DIAGNOSTIC) 175#define PMAP_INLINE __gnu89_inline 176#else 177#define PMAP_INLINE 178#endif 179 180#define PV_STATS 181#ifdef PV_STATS 182#define PV_STAT(x) do { x ; } while (0) 183#else 184#define PV_STAT(x) do { } while (0) 185#endif 186 187#define pa_index(pa) ((pa) >> PDRSHIFT) 188#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 189 190/* 191 * Get PDEs and PTEs for user/kernel address space 192 */ 193#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 194#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 195 196#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 197#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 198#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 199#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 200#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 201 202#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 203 204struct pmap kernel_pmap_store; 205LIST_HEAD(pmaplist, pmap); 206static struct pmaplist allpmaps; 207static struct mtx allpmaps_lock; 208 209vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 210vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 211int pgeflag = 0; /* PG_G or-in */ 212int pseflag = 0; /* PG_PS or-in */ 213 214static int nkpt; 215vm_offset_t kernel_vm_end; 216extern u_int32_t KERNend; 217 218#ifdef PAE 219pt_entry_t pg_nx; 220#if !defined(XEN) 221static uma_zone_t pdptzone; 222#endif 223#endif 224 225/* 226 * Data for the pv entry allocation mechanism 227 */ 228static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 229static struct md_page *pv_table; 230static int shpgperproc = PMAP_SHPGPERPROC; 231 232struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 233int pv_maxchunks; /* How many chunks we have KVA for */ 234vm_offset_t pv_vafree; /* freelist stored in the PTE */ 235 236/* 237 * All those kernel PT submaps that BSD is so fond of 238 */ 239struct sysmaps { 240 struct mtx lock; 241 pt_entry_t *CMAP1; 242 pt_entry_t *CMAP2; 243 caddr_t CADDR1; 244 caddr_t CADDR2; 245}; 246static struct sysmaps sysmaps_pcpu[MAXCPU]; 247pt_entry_t *CMAP1 = 0; 248static pt_entry_t *CMAP3; 249caddr_t CADDR1 = 0, ptvmmap = 0; 250static caddr_t CADDR3; 251struct msgbuf *msgbufp = 0; 252 253/* 254 * Crashdump maps. 255 */ 256static caddr_t crashdumpmap; 257 258static pt_entry_t *PMAP1 = 0, *PMAP2; 259static pt_entry_t *PADDR1 = 0, *PADDR2; 260#ifdef SMP 261static int PMAP1cpu; 262static int PMAP1changedcpu; 263SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 264 &PMAP1changedcpu, 0, 265 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 266#endif 267static int PMAP1changed; 268SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 269 &PMAP1changed, 0, 270 "Number of times pmap_pte_quick changed PMAP1"); 271static int PMAP1unchanged; 272SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 273 &PMAP1unchanged, 0, 274 "Number of times pmap_pte_quick didn't change PMAP1"); 275static struct mtx PMAP2mutex; 276 277SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 278static int pg_ps_enabled; 279SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0, 280 "Are large page mappings enabled?"); 281 282SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 283 "Max number of PV entries"); 284SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 285 "Page share factor per proc"); 286 287static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 288static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 289 290static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 291 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 292static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 293 vm_page_t *free); 294static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 295 vm_page_t *free); 296static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 297 vm_offset_t va); 298static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 299static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 300 vm_page_t m); 301 302static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 303 304static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 305static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 306static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 307static void pmap_pte_release(pt_entry_t *pte); 308static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 309static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 310static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 311static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 312 313 314#if defined(PAE) && !defined(XEN) 315static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 316#endif 317 318CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 319CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 320 321/* 322 * If you get an error here, then you set KVA_PAGES wrong! See the 323 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 324 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 325 */ 326CTASSERT(KERNBASE % (1 << 24) == 0); 327 328 329 330static __inline void 331pagezero(void *page) 332{ 333#if defined(I686_CPU) 334 if (cpu_class == CPUCLASS_686) { 335#if defined(CPU_ENABLE_SSE) 336 if (cpu_feature & CPUID_SSE2) 337 sse2_pagezero(page); 338 else 339#endif 340 i686_pagezero(page); 341 } else 342#endif 343 bzero(page, PAGE_SIZE); 344} 345 346void 347pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 348{ 349 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 350 351 switch (type) { 352 case SH_PD_SET_VA: 353#if 0 354 xen_queue_pt_update(shadow_pdir_ma, 355 xpmap_ptom(val & ~(PG_RW))); 356#endif 357 xen_queue_pt_update(pdir_ma, 358 xpmap_ptom(val)); 359 break; 360 case SH_PD_SET_VA_MA: 361#if 0 362 xen_queue_pt_update(shadow_pdir_ma, 363 val & ~(PG_RW)); 364#endif 365 xen_queue_pt_update(pdir_ma, val); 366 break; 367 case SH_PD_SET_VA_CLEAR: 368#if 0 369 xen_queue_pt_update(shadow_pdir_ma, 0); 370#endif 371 xen_queue_pt_update(pdir_ma, 0); 372 break; 373 } 374} 375 376/* 377 * Move the kernel virtual free pointer to the next 378 * 4MB. This is used to help improve performance 379 * by using a large (4MB) page for much of the kernel 380 * (.text, .data, .bss) 381 */ 382static vm_offset_t 383pmap_kmem_choose(vm_offset_t addr) 384{ 385 vm_offset_t newaddr = addr; 386 387#ifndef DISABLE_PSE 388 if (cpu_feature & CPUID_PSE) 389 newaddr = (addr + PDRMASK) & ~PDRMASK; 390#endif 391 return newaddr; 392} 393 394/* 395 * Bootstrap the system enough to run with virtual memory. 396 * 397 * On the i386 this is called after mapping has already been enabled 398 * and just syncs the pmap module with what has already been done. 399 * [We can't call it easily with mapping off since the kernel is not 400 * mapped with PA == VA, hence we would have to relocate every address 401 * from the linked base (virtual) address "KERNBASE" to the actual 402 * (physical) address starting relative to 0] 403 */ 404void 405pmap_bootstrap(vm_paddr_t firstaddr) 406{ 407 vm_offset_t va; 408 pt_entry_t *pte, *unused; 409 struct sysmaps *sysmaps; 410 int i; 411 412 /* 413 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 414 * large. It should instead be correctly calculated in locore.s and 415 * not based on 'first' (which is a physical address, not a virtual 416 * address, for the start of unused physical memory). The kernel 417 * page tables are NOT double mapped and thus should not be included 418 * in this calculation. 419 */ 420 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 421 virtual_avail = pmap_kmem_choose(virtual_avail); 422 423 virtual_end = VM_MAX_KERNEL_ADDRESS; 424 425 /* 426 * Initialize the kernel pmap (which is statically allocated). 427 */ 428 PMAP_LOCK_INIT(kernel_pmap); 429 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 430#ifdef PAE 431 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 432#endif 433 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 434 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 435 LIST_INIT(&allpmaps); 436 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 437 mtx_lock_spin(&allpmaps_lock); 438 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 439 mtx_unlock_spin(&allpmaps_lock); 440 nkpt = NKPT; 441 442 /* 443 * Reserve some special page table entries/VA space for temporary 444 * mapping of pages. 445 */ 446#define SYSMAP(c, p, v, n) \ 447 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 448 449 va = virtual_avail; 450 pte = vtopte(va); 451 452 /* 453 * CMAP1/CMAP2 are used for zeroing and copying pages. 454 * CMAP3 is used for the idle process page zeroing. 455 */ 456 for (i = 0; i < MAXCPU; i++) { 457 sysmaps = &sysmaps_pcpu[i]; 458 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 459 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 460 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 461 } 462 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 463 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 464 PT_SET_MA(CADDR3, 0); 465 466 /* 467 * Crashdump maps. 468 */ 469 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 470 471 /* 472 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 473 */ 474 SYSMAP(caddr_t, unused, ptvmmap, 1) 475 476 /* 477 * msgbufp is used to map the system message buffer. 478 */ 479 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) 480 481 /* 482 * ptemap is used for pmap_pte_quick 483 */ 484 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 485 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); 486 487 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 488 489 virtual_avail = va; 490 PT_SET_MA(CADDR1, 0); 491 492 /* 493 * Leave in place an identity mapping (virt == phys) for the low 1 MB 494 * physical memory region that is used by the ACPI wakeup code. This 495 * mapping must not have PG_G set. 496 */ 497#ifndef XEN 498 /* 499 * leave here deliberately to show that this is not supported 500 */ 501#ifdef XBOX 502 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 503 * an early stadium, we cannot yet neatly map video memory ... :-( 504 * Better fixes are very welcome! */ 505 if (!arch_i386_is_xbox) 506#endif 507 for (i = 1; i < NKPT; i++) 508 PTD[i] = 0; 509 510 /* Initialize the PAT MSR if present. */ 511 pmap_init_pat(); 512 513 /* Turn on PG_G on kernel page(s) */ 514 pmap_set_pg(); 515#endif 516} 517 518/* 519 * Setup the PAT MSR. 520 */ 521void 522pmap_init_pat(void) 523{ 524 uint64_t pat_msr; 525 526 /* Bail if this CPU doesn't implement PAT. */ 527 if (!(cpu_feature & CPUID_PAT)) 528 return; 529 530#ifdef PAT_WORKS 531 /* 532 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 533 * Program 4 and 5 as WP and WC. 534 * Leave 6 and 7 as UC and UC-. 535 */ 536 pat_msr = rdmsr(MSR_PAT); 537 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 538 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 539 PAT_VALUE(5, PAT_WRITE_COMBINING); 540#else 541 /* 542 * Due to some Intel errata, we can only safely use the lower 4 543 * PAT entries. Thus, just replace PAT Index 2 with WC instead 544 * of UC-. 545 * 546 * Intel Pentium III Processor Specification Update 547 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 548 * or Mode C Paging) 549 * 550 * Intel Pentium IV Processor Specification Update 551 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 552 */ 553 pat_msr = rdmsr(MSR_PAT); 554 pat_msr &= ~PAT_MASK(2); 555 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 556#endif 557 wrmsr(MSR_PAT, pat_msr); 558} 559 560/* 561 * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. 562 */ 563void 564pmap_set_pg(void) 565{ 566 pd_entry_t pdir; 567 pt_entry_t *pte; 568 vm_offset_t va, endva; 569 int i; 570 571 if (pgeflag == 0) 572 return; 573 574 i = KERNLOAD/NBPDR; 575 endva = KERNBASE + KERNend; 576 577 if (pseflag) { 578 va = KERNBASE + KERNLOAD; 579 while (va < endva) { 580 pdir = kernel_pmap->pm_pdir[KPTDI+i]; 581 pdir |= pgeflag; 582 kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir; 583 invltlb(); /* Play it safe, invltlb() every time */ 584 i++; 585 va += NBPDR; 586 } 587 } else { 588 va = (vm_offset_t)btext; 589 while (va < endva) { 590 pte = vtopte(va); 591 if (*pte & PG_V) 592 *pte |= pgeflag; 593 invltlb(); /* Play it safe, invltlb() every time */ 594 va += PAGE_SIZE; 595 } 596 } 597} 598 599/* 600 * Initialize a vm_page's machine-dependent fields. 601 */ 602void 603pmap_page_init(vm_page_t m) 604{ 605 606 TAILQ_INIT(&m->md.pv_list); 607} 608 609#if defined(PAE) && !defined(XEN) 610 611static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt"); 612 613static void * 614pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 615{ 616 *flags = UMA_SLAB_PRIV; 617 return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL, 618 1, 0)); 619} 620#endif 621 622/* 623 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 624 * Requirements: 625 * - Must deal with pages in order to ensure that none of the PG_* bits 626 * are ever set, PG_V in particular. 627 * - Assumes we can write to ptes without pte_store() atomic ops, even 628 * on PAE systems. This should be ok. 629 * - Assumes nothing will ever test these addresses for 0 to indicate 630 * no mapping instead of correctly checking PG_V. 631 * - Assumes a vm_offset_t will fit in a pte (true for i386). 632 * Because PG_V is never set, there can be no mappings to invalidate. 633 */ 634static int ptelist_count = 0; 635static vm_offset_t 636pmap_ptelist_alloc(vm_offset_t *head) 637{ 638 vm_offset_t va; 639 vm_offset_t *phead = (vm_offset_t *)*head; 640 641 if (ptelist_count == 0) { 642 printf("out of memory!!!!!!\n"); 643 return (0); /* Out of memory */ 644 } 645 ptelist_count--; 646 va = phead[ptelist_count]; 647 return (va); 648} 649 650static void 651pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 652{ 653 vm_offset_t *phead = (vm_offset_t *)*head; 654 655 phead[ptelist_count++] = va; 656} 657 658static void 659pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 660{ 661 int i, nstackpages; 662 vm_offset_t va; 663 vm_page_t m; 664 665 nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 666 for (i = 0; i < nstackpages; i++) { 667 va = (vm_offset_t)base + i * PAGE_SIZE; 668 m = vm_page_alloc(NULL, i, 669 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 670 VM_ALLOC_ZERO); 671 pmap_qenter(va, &m, 1); 672 } 673 674 *head = (vm_offset_t)base; 675 for (i = npages - 1; i >= nstackpages; i--) { 676 va = (vm_offset_t)base + i * PAGE_SIZE; 677 pmap_ptelist_free(head, va); 678 } 679} 680 681 682/* 683 * Initialize the pmap module. 684 * Called by vm_init, to initialize any structures that the pmap 685 * system needs to map virtual memory. 686 */ 687void 688pmap_init(void) 689{ 690 vm_page_t mpte; 691 vm_size_t s; 692 int i, pv_npg; 693 694 695 /* 696 * Initialize the vm page array entries for the kernel pmap's 697 * page table pages. 698 */ 699 for (i = 0; i < nkpt; i++) { 700 mpte = PHYS_TO_VM_PAGE(PTD[i + KPTDI] & PG_FRAME); 701 KASSERT(mpte >= vm_page_array && 702 mpte < &vm_page_array[vm_page_array_size], 703 ("pmap_init: page table page is out of range")); 704 mpte->pindex = i + KPTDI; 705 mpte->phys_addr = PTD[i + KPTDI] & PG_FRAME; 706 } 707 708 /* 709 * Initialize the address space (zone) for the pv entries. Set a 710 * high water mark so that the system can recover from excessive 711 * numbers of pv entries. 712 */ 713 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 714 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 715 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 716 pv_entry_max = roundup(pv_entry_max, _NPCPV); 717 pv_entry_high_water = 9 * (pv_entry_max / 10); 718 719 /* 720 * Are large page mappings enabled? 721 */ 722 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 723 724 /* 725 * Calculate the size of the pv head table for superpages. 726 */ 727 for (i = 0; phys_avail[i + 1]; i += 2); 728 pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR; 729 730 /* 731 * Allocate memory for the pv head table for superpages. 732 */ 733 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 734 s = round_page(s); 735 pv_table = (struct md_page *)kmem_alloc(kernel_map, s); 736 for (i = 0; i < pv_npg; i++) 737 TAILQ_INIT(&pv_table[i].pv_list); 738 739 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 740 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 741 PAGE_SIZE * pv_maxchunks); 742 if (pv_chunkbase == NULL) 743 panic("pmap_init: not enough kvm for pv chunks"); 744 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 745#if defined(PAE) && !defined(XEN) 746 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 747 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 748 UMA_ZONE_VM | UMA_ZONE_NOFREE); 749 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 750#endif 751} 752 753 754/*************************************************** 755 * Low level helper routines..... 756 ***************************************************/ 757 758/* 759 * Determine the appropriate bits to set in a PTE or PDE for a specified 760 * caching mode. 761 */ 762static int 763pmap_cache_bits(int mode, boolean_t is_pde) 764{ 765 int pat_flag, pat_index, cache_bits; 766 767 /* The PAT bit is different for PTE's and PDE's. */ 768 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 769 770 /* If we don't support PAT, map extended modes to older ones. */ 771 if (!(cpu_feature & CPUID_PAT)) { 772 switch (mode) { 773 case PAT_UNCACHEABLE: 774 case PAT_WRITE_THROUGH: 775 case PAT_WRITE_BACK: 776 break; 777 case PAT_UNCACHED: 778 case PAT_WRITE_COMBINING: 779 case PAT_WRITE_PROTECTED: 780 mode = PAT_UNCACHEABLE; 781 break; 782 } 783 } 784 785 /* Map the caching mode to a PAT index. */ 786 switch (mode) { 787#ifdef PAT_WORKS 788 case PAT_UNCACHEABLE: 789 pat_index = 3; 790 break; 791 case PAT_WRITE_THROUGH: 792 pat_index = 1; 793 break; 794 case PAT_WRITE_BACK: 795 pat_index = 0; 796 break; 797 case PAT_UNCACHED: 798 pat_index = 2; 799 break; 800 case PAT_WRITE_COMBINING: 801 pat_index = 5; 802 break; 803 case PAT_WRITE_PROTECTED: 804 pat_index = 4; 805 break; 806#else 807 case PAT_UNCACHED: 808 case PAT_UNCACHEABLE: 809 case PAT_WRITE_PROTECTED: 810 pat_index = 3; 811 break; 812 case PAT_WRITE_THROUGH: 813 pat_index = 1; 814 break; 815 case PAT_WRITE_BACK: 816 pat_index = 0; 817 break; 818 case PAT_WRITE_COMBINING: 819 pat_index = 2; 820 break; 821#endif 822 default: 823 panic("Unknown caching mode %d\n", mode); 824 } 825 826 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 827 cache_bits = 0; 828 if (pat_index & 0x4) 829 cache_bits |= pat_flag; 830 if (pat_index & 0x2) 831 cache_bits |= PG_NC_PCD; 832 if (pat_index & 0x1) 833 cache_bits |= PG_NC_PWT; 834 return (cache_bits); 835} 836#ifdef SMP 837/* 838 * For SMP, these functions have to use the IPI mechanism for coherence. 839 * 840 * N.B.: Before calling any of the following TLB invalidation functions, 841 * the calling processor must ensure that all stores updating a non- 842 * kernel page table are globally performed. Otherwise, another 843 * processor could cache an old, pre-update entry without being 844 * invalidated. This can happen one of two ways: (1) The pmap becomes 845 * active on another processor after its pm_active field is checked by 846 * one of the following functions but before a store updating the page 847 * table is globally performed. (2) The pmap becomes active on another 848 * processor before its pm_active field is checked but due to 849 * speculative loads one of the following functions stills reads the 850 * pmap as inactive on the other processor. 851 * 852 * The kernel page table is exempt because its pm_active field is 853 * immutable. The kernel page table is always active on every 854 * processor. 855 */ 856void 857pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 858{ 859 u_int cpumask; 860 u_int other_cpus; 861 862 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 863 pmap, va); 864 865 sched_pin(); 866 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 867 invlpg(va); 868 smp_invlpg(va); 869 } else { 870 cpumask = PCPU_GET(cpumask); 871 other_cpus = PCPU_GET(other_cpus); 872 if (pmap->pm_active & cpumask) 873 invlpg(va); 874 if (pmap->pm_active & other_cpus) 875 smp_masked_invlpg(pmap->pm_active & other_cpus, va); 876 } 877 sched_unpin(); 878 PT_UPDATES_FLUSH(); 879} 880 881void 882pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 883{ 884 u_int cpumask; 885 u_int other_cpus; 886 vm_offset_t addr; 887 888 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 889 pmap, sva, eva); 890 891 sched_pin(); 892 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 893 for (addr = sva; addr < eva; addr += PAGE_SIZE) 894 invlpg(addr); 895 smp_invlpg_range(sva, eva); 896 } else { 897 cpumask = PCPU_GET(cpumask); 898 other_cpus = PCPU_GET(other_cpus); 899 if (pmap->pm_active & cpumask) 900 for (addr = sva; addr < eva; addr += PAGE_SIZE) 901 invlpg(addr); 902 if (pmap->pm_active & other_cpus) 903 smp_masked_invlpg_range(pmap->pm_active & other_cpus, 904 sva, eva); 905 } 906 sched_unpin(); 907 PT_UPDATES_FLUSH(); 908} 909 910void 911pmap_invalidate_all(pmap_t pmap) 912{ 913 u_int cpumask; 914 u_int other_cpus; 915 916 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 917 918 sched_pin(); 919 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 920 invltlb(); 921 smp_invltlb(); 922 } else { 923 cpumask = PCPU_GET(cpumask); 924 other_cpus = PCPU_GET(other_cpus); 925 if (pmap->pm_active & cpumask) 926 invltlb(); 927 if (pmap->pm_active & other_cpus) 928 smp_masked_invltlb(pmap->pm_active & other_cpus); 929 } 930 sched_unpin(); 931} 932 933void 934pmap_invalidate_cache(void) 935{ 936 937 sched_pin(); 938 wbinvd(); 939 smp_cache_flush(); 940 sched_unpin(); 941} 942#else /* !SMP */ 943/* 944 * Normal, non-SMP, 486+ invalidation functions. 945 * We inline these within pmap.c for speed. 946 */ 947PMAP_INLINE void 948pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 949{ 950 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 951 pmap, va); 952 953 if (pmap == kernel_pmap || pmap->pm_active) 954 invlpg(va); 955 PT_UPDATES_FLUSH(); 956} 957 958PMAP_INLINE void 959pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 960{ 961 vm_offset_t addr; 962 963 if (eva - sva > PAGE_SIZE) 964 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 965 pmap, sva, eva); 966 967 if (pmap == kernel_pmap || pmap->pm_active) 968 for (addr = sva; addr < eva; addr += PAGE_SIZE) 969 invlpg(addr); 970 PT_UPDATES_FLUSH(); 971} 972 973PMAP_INLINE void 974pmap_invalidate_all(pmap_t pmap) 975{ 976 977 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 978 979 if (pmap == kernel_pmap || pmap->pm_active) 980 invltlb(); 981} 982 983PMAP_INLINE void 984pmap_invalidate_cache(void) 985{ 986 987 wbinvd(); 988} 989#endif /* !SMP */ 990 991/* 992 * Are we current address space or kernel? N.B. We return FALSE when 993 * a pmap's page table is in use because a kernel thread is borrowing 994 * it. The borrowed page table can change spontaneously, making any 995 * dependence on its continued use subject to a race condition. 996 */ 997static __inline int 998pmap_is_current(pmap_t pmap) 999{ 1000 1001 return (pmap == kernel_pmap || 1002 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 1003 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 1004} 1005 1006/* 1007 * If the given pmap is not the current or kernel pmap, the returned pte must 1008 * be released by passing it to pmap_pte_release(). 1009 */ 1010pt_entry_t * 1011pmap_pte(pmap_t pmap, vm_offset_t va) 1012{ 1013 pd_entry_t newpf; 1014 pd_entry_t *pde; 1015 1016 pde = pmap_pde(pmap, va); 1017 if (*pde & PG_PS) 1018 return (pde); 1019 if (*pde != 0) { 1020 /* are we current address space or kernel? */ 1021 if (pmap_is_current(pmap)) 1022 return (vtopte(va)); 1023 mtx_lock(&PMAP2mutex); 1024 newpf = *pde & PG_FRAME; 1025 if ((*PMAP2 & PG_FRAME) != newpf) { 1026 PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 1027 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 1028 pmap, va, (*PMAP2 & 0xffffffff)); 1029 } 1030 1031 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1032 } 1033 return (0); 1034} 1035 1036/* 1037 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1038 * being NULL. 1039 */ 1040static __inline void 1041pmap_pte_release(pt_entry_t *pte) 1042{ 1043 1044 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 1045 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 1046 *PMAP2); 1047 PT_SET_VA(PMAP2, 0, TRUE); 1048 mtx_unlock(&PMAP2mutex); 1049 } 1050} 1051 1052static __inline void 1053invlcaddr(void *caddr) 1054{ 1055 1056 invlpg((u_int)caddr); 1057 PT_UPDATES_FLUSH(); 1058} 1059 1060/* 1061 * Super fast pmap_pte routine best used when scanning 1062 * the pv lists. This eliminates many coarse-grained 1063 * invltlb calls. Note that many of the pv list 1064 * scans are across different pmaps. It is very wasteful 1065 * to do an entire invltlb for checking a single mapping. 1066 * 1067 * If the given pmap is not the current pmap, vm_page_queue_mtx 1068 * must be held and curthread pinned to a CPU. 1069 */ 1070static pt_entry_t * 1071pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1072{ 1073 pd_entry_t newpf; 1074 pd_entry_t *pde; 1075 1076 pde = pmap_pde(pmap, va); 1077 if (*pde & PG_PS) 1078 return (pde); 1079 if (*pde != 0) { 1080 /* are we current address space or kernel? */ 1081 if (pmap_is_current(pmap)) 1082 return (vtopte(va)); 1083 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1084 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1085 newpf = *pde & PG_FRAME; 1086 if ((*PMAP1 & PG_FRAME) != newpf) { 1087 PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1088 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1089 pmap, va, (u_long)*PMAP1); 1090 1091#ifdef SMP 1092 PMAP1cpu = PCPU_GET(cpuid); 1093#endif 1094 PMAP1changed++; 1095 } else 1096#ifdef SMP 1097 if (PMAP1cpu != PCPU_GET(cpuid)) { 1098 PMAP1cpu = PCPU_GET(cpuid); 1099 invlcaddr(PADDR1); 1100 PMAP1changedcpu++; 1101 } else 1102#endif 1103 PMAP1unchanged++; 1104 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1105 } 1106 return (0); 1107} 1108 1109/* 1110 * Routine: pmap_extract 1111 * Function: 1112 * Extract the physical page address associated 1113 * with the given map/virtual_address pair. 1114 */ 1115vm_paddr_t 1116pmap_extract(pmap_t pmap, vm_offset_t va) 1117{ 1118 vm_paddr_t rtval; 1119 pt_entry_t *pte; 1120 pd_entry_t pde; 1121 pt_entry_t pteval; 1122 1123 rtval = 0; 1124 PMAP_LOCK(pmap); 1125 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1126 if (pde != 0) { 1127 if ((pde & PG_PS) != 0) { 1128 rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1129 PMAP_UNLOCK(pmap); 1130 return rtval; 1131 } 1132 pte = pmap_pte(pmap, va); 1133 pteval = *pte ? xpmap_mtop(*pte) : 0; 1134 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1135 pmap_pte_release(pte); 1136 } 1137 PMAP_UNLOCK(pmap); 1138 return (rtval); 1139} 1140 1141/* 1142 * Routine: pmap_extract_ma 1143 * Function: 1144 * Like pmap_extract, but returns machine address 1145 */ 1146vm_paddr_t 1147pmap_extract_ma(pmap_t pmap, vm_offset_t va) 1148{ 1149 vm_paddr_t rtval; 1150 pt_entry_t *pte; 1151 pd_entry_t pde; 1152 1153 rtval = 0; 1154 PMAP_LOCK(pmap); 1155 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1156 if (pde != 0) { 1157 if ((pde & PG_PS) != 0) { 1158 rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1159 PMAP_UNLOCK(pmap); 1160 return rtval; 1161 } 1162 pte = pmap_pte(pmap, va); 1163 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1164 pmap_pte_release(pte); 1165 } 1166 PMAP_UNLOCK(pmap); 1167 return (rtval); 1168} 1169 1170/* 1171 * Routine: pmap_extract_and_hold 1172 * Function: 1173 * Atomically extract and hold the physical page 1174 * with the given pmap and virtual address pair 1175 * if that mapping permits the given protection. 1176 */ 1177vm_page_t 1178pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1179{ 1180 pd_entry_t pde; 1181 pt_entry_t pte; 1182 vm_page_t m; 1183 1184 m = NULL; 1185 vm_page_lock_queues(); 1186 PMAP_LOCK(pmap); 1187 pde = PT_GET(pmap_pde(pmap, va)); 1188 if (pde != 0) { 1189 if (pde & PG_PS) { 1190 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1191 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1192 (va & PDRMASK)); 1193 vm_page_hold(m); 1194 } 1195 } else { 1196 sched_pin(); 1197 pte = PT_GET(pmap_pte_quick(pmap, va)); 1198 if (*PMAP1) 1199 PT_SET_MA(PADDR1, 0); 1200 if ((pte & PG_V) && 1201 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1202 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1203 vm_page_hold(m); 1204 } 1205 sched_unpin(); 1206 } 1207 } 1208 vm_page_unlock_queues(); 1209 PMAP_UNLOCK(pmap); 1210 return (m); 1211} 1212 1213/*************************************************** 1214 * Low level mapping routines..... 1215 ***************************************************/ 1216 1217/* 1218 * Add a wired page to the kva. 1219 * Note: not SMP coherent. 1220 */ 1221void 1222pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1223{ 1224 PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1225} 1226 1227void 1228pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1229{ 1230 pt_entry_t *pte; 1231 1232 pte = vtopte(va); 1233 pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1234} 1235 1236 1237static __inline void 1238pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1239{ 1240 PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1241} 1242 1243/* 1244 * Remove a page from the kernel pagetables. 1245 * Note: not SMP coherent. 1246 */ 1247PMAP_INLINE void 1248pmap_kremove(vm_offset_t va) 1249{ 1250 pt_entry_t *pte; 1251 1252 pte = vtopte(va); 1253 PT_CLEAR_VA(pte, FALSE); 1254} 1255 1256/* 1257 * Used to map a range of physical addresses into kernel 1258 * virtual address space. 1259 * 1260 * The value passed in '*virt' is a suggested virtual address for 1261 * the mapping. Architectures which can support a direct-mapped 1262 * physical to virtual region can return the appropriate address 1263 * within that region, leaving '*virt' unchanged. Other 1264 * architectures should map the pages starting at '*virt' and 1265 * update '*virt' with the first usable address after the mapped 1266 * region. 1267 */ 1268vm_offset_t 1269pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1270{ 1271 vm_offset_t va, sva; 1272 1273 va = sva = *virt; 1274 CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1275 va, start, end, prot); 1276 while (start < end) { 1277 pmap_kenter(va, start); 1278 va += PAGE_SIZE; 1279 start += PAGE_SIZE; 1280 } 1281 pmap_invalidate_range(kernel_pmap, sva, va); 1282 *virt = va; 1283 return (sva); 1284} 1285 1286 1287/* 1288 * Add a list of wired pages to the kva 1289 * this routine is only used for temporary 1290 * kernel mappings that do not need to have 1291 * page modification or references recorded. 1292 * Note that old mappings are simply written 1293 * over. The page *must* be wired. 1294 * Note: SMP coherent. Uses a ranged shootdown IPI. 1295 */ 1296void 1297pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1298{ 1299 pt_entry_t *endpte, *pte; 1300 vm_paddr_t pa; 1301 vm_offset_t va = sva; 1302 int mclcount = 0; 1303 multicall_entry_t mcl[16]; 1304 multicall_entry_t *mclp = mcl; 1305 int error; 1306 1307 CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1308 pte = vtopte(sva); 1309 endpte = pte + count; 1310 while (pte < endpte) { 1311 pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1312 1313 mclp->op = __HYPERVISOR_update_va_mapping; 1314 mclp->args[0] = va; 1315 mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1316 mclp->args[2] = (uint32_t)(pa >> 32); 1317 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1318 1319 va += PAGE_SIZE; 1320 pte++; 1321 ma++; 1322 mclp++; 1323 mclcount++; 1324 if (mclcount == 16) { 1325 error = HYPERVISOR_multicall(mcl, mclcount); 1326 mclp = mcl; 1327 mclcount = 0; 1328 KASSERT(error == 0, ("bad multicall %d", error)); 1329 } 1330 } 1331 if (mclcount) { 1332 error = HYPERVISOR_multicall(mcl, mclcount); 1333 KASSERT(error == 0, ("bad multicall %d", error)); 1334 } 1335 1336#ifdef INVARIANTS 1337 for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1338 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1339#endif 1340} 1341 1342 1343/* 1344 * This routine tears out page mappings from the 1345 * kernel -- it is meant only for temporary mappings. 1346 * Note: SMP coherent. Uses a ranged shootdown IPI. 1347 */ 1348void 1349pmap_qremove(vm_offset_t sva, int count) 1350{ 1351 vm_offset_t va; 1352 1353 CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1354 va = sva; 1355 vm_page_lock_queues(); 1356 critical_enter(); 1357 while (count-- > 0) { 1358 pmap_kremove(va); 1359 va += PAGE_SIZE; 1360 } 1361 pmap_invalidate_range(kernel_pmap, sva, va); 1362 critical_exit(); 1363 vm_page_unlock_queues(); 1364} 1365 1366/*************************************************** 1367 * Page table page management routines..... 1368 ***************************************************/ 1369static __inline void 1370pmap_free_zero_pages(vm_page_t free) 1371{ 1372 vm_page_t m; 1373 1374 while (free != NULL) { 1375 m = free; 1376 free = m->right; 1377 vm_page_free_zero(m); 1378 } 1379} 1380 1381/* 1382 * This routine unholds page table pages, and if the hold count 1383 * drops to zero, then it decrements the wire count. 1384 */ 1385static __inline int 1386pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1387{ 1388 1389 --m->wire_count; 1390 if (m->wire_count == 0) 1391 return _pmap_unwire_pte_hold(pmap, m, free); 1392 else 1393 return 0; 1394} 1395 1396static int 1397_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1398{ 1399 vm_offset_t pteva; 1400 1401 PT_UPDATES_FLUSH(); 1402 /* 1403 * unmap the page table page 1404 */ 1405 xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1406 /* 1407 * page *might* contain residual mapping :-/ 1408 */ 1409 PD_CLEAR_VA(pmap, m->pindex, TRUE); 1410 pmap_zero_page(m); 1411 --pmap->pm_stats.resident_count; 1412 1413 /* 1414 * This is a release store so that the ordinary store unmapping 1415 * the page table page is globally performed before TLB shoot- 1416 * down is begun. 1417 */ 1418 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1419 1420 /* 1421 * Do an invltlb to make the invalidated mapping 1422 * take effect immediately. 1423 */ 1424 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1425 pmap_invalidate_page(pmap, pteva); 1426 1427 /* 1428 * Put page on a list so that it is released after 1429 * *ALL* TLB shootdown is done 1430 */ 1431 m->right = *free; 1432 *free = m; 1433 1434 return 1; 1435} 1436 1437/* 1438 * After removing a page table entry, this routine is used to 1439 * conditionally free the page, and manage the hold/wire counts. 1440 */ 1441static int 1442pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1443{ 1444 pd_entry_t ptepde; 1445 vm_page_t mpte; 1446 1447 if (va >= VM_MAXUSER_ADDRESS) 1448 return 0; 1449 ptepde = PT_GET(pmap_pde(pmap, va)); 1450 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1451 return pmap_unwire_pte_hold(pmap, mpte, free); 1452} 1453 1454void 1455pmap_pinit0(pmap_t pmap) 1456{ 1457 1458 PMAP_LOCK_INIT(pmap); 1459 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1460#ifdef PAE 1461 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1462#endif 1463 pmap->pm_active = 0; 1464 PCPU_SET(curpmap, pmap); 1465 TAILQ_INIT(&pmap->pm_pvchunk); 1466 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1467 mtx_lock_spin(&allpmaps_lock); 1468 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1469 mtx_unlock_spin(&allpmaps_lock); 1470} 1471 1472/* 1473 * Initialize a preallocated and zeroed pmap structure, 1474 * such as one in a vmspace structure. 1475 */ 1476int 1477pmap_pinit(pmap_t pmap) 1478{ 1479 vm_page_t m, ptdpg[NPGPTD + 1]; 1480 int npgptd = NPGPTD + 1; 1481 static int color; 1482 int i; 1483 1484 PMAP_LOCK_INIT(pmap); 1485 1486 /* 1487 * No need to allocate page table space yet but we do need a valid 1488 * page directory table. 1489 */ 1490 if (pmap->pm_pdir == NULL) { 1491 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1492 NBPTD); 1493 if (pmap->pm_pdir == NULL) { 1494 PMAP_LOCK_DESTROY(pmap); 1495 return (0); 1496 } 1497#if defined(XEN) && defined(PAE) 1498 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1499#endif 1500 1501#if defined(PAE) && !defined(XEN) 1502 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 1503 KASSERT(((vm_offset_t)pmap->pm_pdpt & 1504 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 1505 ("pmap_pinit: pdpt misaligned")); 1506 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 1507 ("pmap_pinit: pdpt above 4g")); 1508#endif 1509 } 1510 1511 /* 1512 * allocate the page directory page(s) 1513 */ 1514 for (i = 0; i < npgptd;) { 1515 m = vm_page_alloc(NULL, color++, 1516 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1517 VM_ALLOC_ZERO); 1518 if (m == NULL) 1519 VM_WAIT; 1520 else { 1521 ptdpg[i++] = m; 1522 } 1523 } 1524 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1525 for (i = 0; i < NPGPTD; i++) { 1526 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1527 pagezero(&pmap->pm_pdir[i*NPTEPG]); 1528 } 1529 1530 mtx_lock_spin(&allpmaps_lock); 1531 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1532 mtx_unlock_spin(&allpmaps_lock); 1533 /* Wire in kernel global address entries. */ 1534 1535 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1536#ifdef PAE 1537#ifdef XEN 1538 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1539 if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1540 bzero(pmap->pm_pdpt, PAGE_SIZE); 1541#endif 1542 for (i = 0; i < NPGPTD; i++) { 1543 vm_paddr_t ma; 1544 1545 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1546 pmap->pm_pdpt[i] = ma | PG_V; 1547 1548 } 1549#endif 1550#ifdef XEN 1551 for (i = 0; i < NPGPTD; i++) { 1552 pt_entry_t *pd; 1553 vm_paddr_t ma; 1554 1555 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1556 pd = pmap->pm_pdir + (i * NPDEPG); 1557 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1558#if 0 1559 xen_pgd_pin(ma); 1560#endif 1561 } 1562 1563#ifdef PAE 1564 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1565#endif 1566 vm_page_lock_queues(); 1567 xen_flush_queue(); 1568 xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD]))); 1569 for (i = 0; i < NPGPTD; i++) { 1570 vm_paddr_t ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1571 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1572 } 1573 xen_flush_queue(); 1574 vm_page_unlock_queues(); 1575#endif 1576 pmap->pm_active = 0; 1577 TAILQ_INIT(&pmap->pm_pvchunk); 1578 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1579 1580 return (1); 1581} 1582 1583/* 1584 * this routine is called if the page table page is not 1585 * mapped correctly. 1586 */ 1587static vm_page_t 1588_pmap_allocpte(pmap_t pmap, unsigned int ptepindex, int flags) 1589{ 1590 vm_paddr_t ptema; 1591 vm_page_t m; 1592 1593 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1594 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1595 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1596 1597 /* 1598 * Allocate a page table page. 1599 */ 1600 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1601 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1602 if (flags & M_WAITOK) { 1603 PMAP_UNLOCK(pmap); 1604 vm_page_unlock_queues(); 1605 VM_WAIT; 1606 vm_page_lock_queues(); 1607 PMAP_LOCK(pmap); 1608 } 1609 1610 /* 1611 * Indicate the need to retry. While waiting, the page table 1612 * page may have been allocated. 1613 */ 1614 return (NULL); 1615 } 1616 if ((m->flags & PG_ZERO) == 0) 1617 pmap_zero_page(m); 1618 1619 /* 1620 * Map the pagetable page into the process address space, if 1621 * it isn't already there. 1622 */ 1623 pmap->pm_stats.resident_count++; 1624 1625 ptema = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 1626 xen_pt_pin(ptema); 1627 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1628 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1629 1630 KASSERT(pmap->pm_pdir[ptepindex], 1631 ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1632 return (m); 1633} 1634 1635static vm_page_t 1636pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1637{ 1638 unsigned ptepindex; 1639 pd_entry_t ptema; 1640 vm_page_t m; 1641 1642 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1643 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1644 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1645 1646 /* 1647 * Calculate pagetable page index 1648 */ 1649 ptepindex = va >> PDRSHIFT; 1650retry: 1651 /* 1652 * Get the page directory entry 1653 */ 1654 ptema = pmap->pm_pdir[ptepindex]; 1655 1656 /* 1657 * This supports switching from a 4MB page to a 1658 * normal 4K page. 1659 */ 1660 if (ptema & PG_PS) { 1661 /* 1662 * XXX 1663 */ 1664 pmap->pm_pdir[ptepindex] = 0; 1665 ptema = 0; 1666 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1667 pmap_invalidate_all(kernel_pmap); 1668 } 1669 1670 /* 1671 * If the page table page is mapped, we just increment the 1672 * hold count, and activate it. 1673 */ 1674 if (ptema & PG_V) { 1675 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1676 m->wire_count++; 1677 } else { 1678 /* 1679 * Here if the pte page isn't mapped, or if it has 1680 * been deallocated. 1681 */ 1682 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1683 pmap, va, flags); 1684 m = _pmap_allocpte(pmap, ptepindex, flags); 1685 if (m == NULL && (flags & M_WAITOK)) 1686 goto retry; 1687 1688 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1689 } 1690 return (m); 1691} 1692 1693 1694/*************************************************** 1695* Pmap allocation/deallocation routines. 1696 ***************************************************/ 1697 1698#ifdef SMP 1699/* 1700 * Deal with a SMP shootdown of other users of the pmap that we are 1701 * trying to dispose of. This can be a bit hairy. 1702 */ 1703static u_int *lazymask; 1704static u_int lazyptd; 1705static volatile u_int lazywait; 1706 1707void pmap_lazyfix_action(void); 1708 1709void 1710pmap_lazyfix_action(void) 1711{ 1712 u_int mymask = PCPU_GET(cpumask); 1713 1714#ifdef COUNT_IPIS 1715 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1716#endif 1717 if (rcr3() == lazyptd) 1718 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1719 atomic_clear_int(lazymask, mymask); 1720 atomic_store_rel_int(&lazywait, 1); 1721} 1722 1723static void 1724pmap_lazyfix_self(u_int mymask) 1725{ 1726 1727 if (rcr3() == lazyptd) 1728 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1729 atomic_clear_int(lazymask, mymask); 1730} 1731 1732 1733static void 1734pmap_lazyfix(pmap_t pmap) 1735{ 1736 u_int mymask; 1737 u_int mask; 1738 u_int spins; 1739 1740 while ((mask = pmap->pm_active) != 0) { 1741 spins = 50000000; 1742 mask = mask & -mask; /* Find least significant set bit */ 1743 mtx_lock_spin(&smp_ipi_mtx); 1744#ifdef PAE 1745 lazyptd = vtophys(pmap->pm_pdpt); 1746#else 1747 lazyptd = vtophys(pmap->pm_pdir); 1748#endif 1749 mymask = PCPU_GET(cpumask); 1750 if (mask == mymask) { 1751 lazymask = &pmap->pm_active; 1752 pmap_lazyfix_self(mymask); 1753 } else { 1754 atomic_store_rel_int((u_int *)&lazymask, 1755 (u_int)&pmap->pm_active); 1756 atomic_store_rel_int(&lazywait, 0); 1757 ipi_selected(mask, IPI_LAZYPMAP); 1758 while (lazywait == 0) { 1759 ia32_pause(); 1760 if (--spins == 0) 1761 break; 1762 } 1763 } 1764 mtx_unlock_spin(&smp_ipi_mtx); 1765 if (spins == 0) 1766 printf("pmap_lazyfix: spun for 50000000\n"); 1767 } 1768} 1769 1770#else /* SMP */ 1771 1772/* 1773 * Cleaning up on uniprocessor is easy. For various reasons, we're 1774 * unlikely to have to even execute this code, including the fact 1775 * that the cleanup is deferred until the parent does a wait(2), which 1776 * means that another userland process has run. 1777 */ 1778static void 1779pmap_lazyfix(pmap_t pmap) 1780{ 1781 u_int cr3; 1782 1783 cr3 = vtophys(pmap->pm_pdir); 1784 if (cr3 == rcr3()) { 1785 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1786 pmap->pm_active &= ~(PCPU_GET(cpumask)); 1787 } 1788} 1789#endif /* SMP */ 1790 1791/* 1792 * Release any resources held by the given physical map. 1793 * Called when a pmap initialized by pmap_pinit is being released. 1794 * Should only be called if the map contains no valid mappings. 1795 */ 1796void 1797pmap_release(pmap_t pmap) 1798{ 1799 vm_page_t m, ptdpg[2*NPGPTD+1]; 1800 vm_paddr_t ma; 1801 int i; 1802#ifdef XEN 1803#ifdef PAE 1804 int npgptd = NPGPTD + 1; 1805#else 1806 int npgptd = NPGPTD; 1807#endif 1808#else 1809 int npgptd = NPGPTD; 1810#endif 1811 KASSERT(pmap->pm_stats.resident_count == 0, 1812 ("pmap_release: pmap resident count %ld != 0", 1813 pmap->pm_stats.resident_count)); 1814 PT_UPDATES_FLUSH(); 1815 1816 pmap_lazyfix(pmap); 1817 mtx_lock_spin(&allpmaps_lock); 1818 LIST_REMOVE(pmap, pm_list); 1819 mtx_unlock_spin(&allpmaps_lock); 1820 1821 for (i = 0; i < NPGPTD; i++) 1822 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1823 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1824#if defined(PAE) && defined(XEN) 1825 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1826#endif 1827 1828 for (i = 0; i < npgptd; i++) { 1829 m = ptdpg[i]; 1830 ma = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 1831 /* unpinning L1 and L2 treated the same */ 1832 xen_pgd_unpin(ma); 1833#ifdef PAE 1834 KASSERT(xpmap_ptom(VM_PAGE_TO_PHYS(m)) == (pmap->pm_pdpt[i] & PG_FRAME), 1835 ("pmap_release: got wrong ptd page")); 1836#endif 1837 m->wire_count--; 1838 atomic_subtract_int(&cnt.v_wire_count, 1); 1839 vm_page_free(m); 1840 } 1841 PMAP_LOCK_DESTROY(pmap); 1842} 1843 1844static int 1845kvm_size(SYSCTL_HANDLER_ARGS) 1846{ 1847 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1848 1849 return sysctl_handle_long(oidp, &ksize, 0, req); 1850} 1851SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1852 0, 0, kvm_size, "IU", "Size of KVM"); 1853 1854static int 1855kvm_free(SYSCTL_HANDLER_ARGS) 1856{ 1857 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1858 1859 return sysctl_handle_long(oidp, &kfree, 0, req); 1860} 1861SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1862 0, 0, kvm_free, "IU", "Amount of KVM free"); 1863 1864/* 1865 * grow the number of kernel page table entries, if needed 1866 */ 1867void 1868pmap_growkernel(vm_offset_t addr) 1869{ 1870 struct pmap *pmap; 1871 vm_paddr_t ptppaddr; 1872 vm_page_t nkpg; 1873 pd_entry_t newpdir; 1874 1875 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1876 if (kernel_vm_end == 0) { 1877 kernel_vm_end = KERNBASE; 1878 nkpt = 0; 1879 while (pdir_pde(PTD, kernel_vm_end)) { 1880 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1881 nkpt++; 1882 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1883 kernel_vm_end = kernel_map->max_offset; 1884 break; 1885 } 1886 } 1887 } 1888 addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1889 if (addr - 1 >= kernel_map->max_offset) 1890 addr = kernel_map->max_offset; 1891 while (kernel_vm_end < addr) { 1892 if (pdir_pde(PTD, kernel_vm_end)) { 1893 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1894 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1895 kernel_vm_end = kernel_map->max_offset; 1896 break; 1897 } 1898 continue; 1899 } 1900 1901 /* 1902 * This index is bogus, but out of the way 1903 */ 1904 nkpg = vm_page_alloc(NULL, nkpt, 1905 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 1906 if (!nkpg) 1907 panic("pmap_growkernel: no memory to grow kernel"); 1908 1909 nkpt++; 1910 1911 pmap_zero_page(nkpg); 1912 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1913 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1914 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1915 1916 mtx_lock_spin(&allpmaps_lock); 1917 LIST_FOREACH(pmap, &allpmaps, pm_list) 1918 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1919 1920 mtx_unlock_spin(&allpmaps_lock); 1921 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1922 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1923 kernel_vm_end = kernel_map->max_offset; 1924 break; 1925 } 1926 } 1927} 1928 1929 1930/*************************************************** 1931 * page management routines. 1932 ***************************************************/ 1933 1934CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1935CTASSERT(_NPCM == 11); 1936 1937static __inline struct pv_chunk * 1938pv_to_chunk(pv_entry_t pv) 1939{ 1940 1941 return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK); 1942} 1943 1944#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1945 1946#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1947#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1948 1949static uint32_t pc_freemask[11] = { 1950 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1951 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1952 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1953 PC_FREE0_9, PC_FREE10 1954}; 1955 1956SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1957 "Current number of pv entries"); 1958 1959#ifdef PV_STATS 1960static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1961 1962SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1963 "Current number of pv entry chunks"); 1964SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1965 "Current number of pv entry chunks allocated"); 1966SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1967 "Current number of pv entry chunks frees"); 1968SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1969 "Number of times tried to get a chunk page but failed."); 1970 1971static long pv_entry_frees, pv_entry_allocs; 1972static int pv_entry_spare; 1973 1974SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1975 "Current number of pv entry frees"); 1976SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1977 "Current number of pv entry allocs"); 1978SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1979 "Current number of spare pv entries"); 1980 1981static int pmap_collect_inactive, pmap_collect_active; 1982 1983SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 1984 "Current number times pmap_collect called on inactive queue"); 1985SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 1986 "Current number times pmap_collect called on active queue"); 1987#endif 1988 1989/* 1990 * We are in a serious low memory condition. Resort to 1991 * drastic measures to free some pages so we can allocate 1992 * another pv entry chunk. This is normally called to 1993 * unmap inactive pages, and if necessary, active pages. 1994 */ 1995static void 1996pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 1997{ 1998 pmap_t pmap; 1999 pt_entry_t *pte, tpte; 2000 pv_entry_t next_pv, pv; 2001 vm_offset_t va; 2002 vm_page_t m, free; 2003 2004 sched_pin(); 2005 TAILQ_FOREACH(m, &vpq->pl, pageq) { 2006 if (m->hold_count || m->busy) 2007 continue; 2008 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 2009 va = pv->pv_va; 2010 pmap = PV_PMAP(pv); 2011 /* Avoid deadlock and lock recursion. */ 2012 if (pmap > locked_pmap) 2013 PMAP_LOCK(pmap); 2014 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 2015 continue; 2016 pmap->pm_stats.resident_count--; 2017 pte = pmap_pte_quick(pmap, va); 2018 tpte = pte_load_clear(pte); 2019 KASSERT((tpte & PG_W) == 0, 2020 ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 2021 if (tpte & PG_A) 2022 vm_page_flag_set(m, PG_REFERENCED); 2023 if (tpte & PG_M) { 2024 KASSERT((tpte & PG_RW), 2025 ("pmap_collect: modified page not writable: va: %#x, pte: %#jx", 2026 va, (uintmax_t)tpte)); 2027 vm_page_dirty(m); 2028 } 2029 free = NULL; 2030 pmap_unuse_pt(pmap, va, &free); 2031 pmap_invalidate_page(pmap, va); 2032 pmap_free_zero_pages(free); 2033 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2034 if (TAILQ_EMPTY(&m->md.pv_list)) 2035 vm_page_flag_clear(m, PG_WRITEABLE); 2036 free_pv_entry(pmap, pv); 2037 if (pmap != locked_pmap) 2038 PMAP_UNLOCK(pmap); 2039 } 2040 } 2041 sched_unpin(); 2042} 2043 2044 2045/* 2046 * free the pv_entry back to the free list 2047 */ 2048static void 2049free_pv_entry(pmap_t pmap, pv_entry_t pv) 2050{ 2051 vm_page_t m; 2052 struct pv_chunk *pc; 2053 int idx, field, bit; 2054 2055 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2056 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2057 PV_STAT(pv_entry_frees++); 2058 PV_STAT(pv_entry_spare++); 2059 pv_entry_count--; 2060 pc = pv_to_chunk(pv); 2061 idx = pv - &pc->pc_pventry[0]; 2062 field = idx / 32; 2063 bit = idx % 32; 2064 pc->pc_map[field] |= 1ul << bit; 2065 /* move to head of list */ 2066 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2067 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2068 for (idx = 0; idx < _NPCM; idx++) 2069 if (pc->pc_map[idx] != pc_freemask[idx]) 2070 return; 2071 PV_STAT(pv_entry_spare -= _NPCPV); 2072 PV_STAT(pc_chunk_count--); 2073 PV_STAT(pc_chunk_frees++); 2074 /* entire chunk is free, return it */ 2075 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2076 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2077 pmap_qremove((vm_offset_t)pc, 1); 2078 vm_page_unwire(m, 0); 2079 vm_page_free(m); 2080 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2081} 2082 2083/* 2084 * get a new pv_entry, allocating a block from the system 2085 * when needed. 2086 */ 2087static pv_entry_t 2088get_pv_entry(pmap_t pmap, int try) 2089{ 2090 static const struct timeval printinterval = { 60, 0 }; 2091 static struct timeval lastprint; 2092 static vm_pindex_t colour; 2093 struct vpgqueues *pq; 2094 int bit, field; 2095 pv_entry_t pv; 2096 struct pv_chunk *pc; 2097 vm_page_t m; 2098 2099 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2100 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2101 PV_STAT(pv_entry_allocs++); 2102 pv_entry_count++; 2103 if (pv_entry_count > pv_entry_high_water) 2104 if (ratecheck(&lastprint, &printinterval)) 2105 printf("Approaching the limit on PV entries, consider " 2106 "increasing either the vm.pmap.shpgperproc or the " 2107 "vm.pmap.pv_entry_max tunable.\n"); 2108 pq = NULL; 2109retry: 2110 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2111 if (pc != NULL) { 2112 for (field = 0; field < _NPCM; field++) { 2113 if (pc->pc_map[field]) { 2114 bit = bsfl(pc->pc_map[field]); 2115 break; 2116 } 2117 } 2118 if (field < _NPCM) { 2119 pv = &pc->pc_pventry[field * 32 + bit]; 2120 pc->pc_map[field] &= ~(1ul << bit); 2121 /* If this was the last item, move it to tail */ 2122 for (field = 0; field < _NPCM; field++) 2123 if (pc->pc_map[field] != 0) { 2124 PV_STAT(pv_entry_spare--); 2125 return (pv); /* not full, return */ 2126 } 2127 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2128 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2129 PV_STAT(pv_entry_spare--); 2130 return (pv); 2131 } 2132 } 2133 /* 2134 * Access to the ptelist "pv_vafree" is synchronized by the page 2135 * queues lock. If "pv_vafree" is currently non-empty, it will 2136 * remain non-empty until pmap_ptelist_alloc() completes. 2137 */ 2138 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq == 2139 &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | 2140 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2141 if (try) { 2142 pv_entry_count--; 2143 PV_STAT(pc_chunk_tryfail++); 2144 return (NULL); 2145 } 2146 /* 2147 * Reclaim pv entries: At first, destroy mappings to 2148 * inactive pages. After that, if a pv chunk entry 2149 * is still needed, destroy mappings to active pages. 2150 */ 2151 if (pq == NULL) { 2152 PV_STAT(pmap_collect_inactive++); 2153 pq = &vm_page_queues[PQ_INACTIVE]; 2154 } else if (pq == &vm_page_queues[PQ_INACTIVE]) { 2155 PV_STAT(pmap_collect_active++); 2156 pq = &vm_page_queues[PQ_ACTIVE]; 2157 } else 2158 panic("get_pv_entry: increase vm.pmap.shpgperproc"); 2159 pmap_collect(pmap, pq); 2160 goto retry; 2161 } 2162 PV_STAT(pc_chunk_count++); 2163 PV_STAT(pc_chunk_allocs++); 2164 colour++; 2165 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2166 pmap_qenter((vm_offset_t)pc, &m, 1); 2167 if ((m->flags & PG_ZERO) == 0) 2168 pagezero(pc); 2169 pc->pc_pmap = pmap; 2170 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2171 for (field = 1; field < _NPCM; field++) 2172 pc->pc_map[field] = pc_freemask[field]; 2173 pv = &pc->pc_pventry[0]; 2174 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2175 PV_STAT(pv_entry_spare += _NPCPV - 1); 2176 return (pv); 2177} 2178 2179static void 2180pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2181{ 2182 pv_entry_t pv; 2183 2184 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2185 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2186 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2187 if (pmap == PV_PMAP(pv) && va == pv->pv_va) 2188 break; 2189 } 2190 KASSERT(pv != NULL, ("pmap_remove_entry: pv not found")); 2191 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2192 if (TAILQ_EMPTY(&m->md.pv_list)) 2193 vm_page_flag_clear(m, PG_WRITEABLE); 2194 free_pv_entry(pmap, pv); 2195} 2196 2197/* 2198 * Create a pv entry for page at pa for 2199 * (pmap, va). 2200 */ 2201static void 2202pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2203{ 2204 pv_entry_t pv; 2205 2206 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2207 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2208 pv = get_pv_entry(pmap, FALSE); 2209 pv->pv_va = va; 2210 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2211} 2212 2213/* 2214 * Conditionally create a pv entry. 2215 */ 2216static boolean_t 2217pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2218{ 2219 pv_entry_t pv; 2220 2221 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2222 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2223 if (pv_entry_count < pv_entry_high_water && 2224 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2225 pv->pv_va = va; 2226 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2227 return (TRUE); 2228 } else 2229 return (FALSE); 2230} 2231 2232/* 2233 * pmap_remove_pte: do the things to unmap a page in a process 2234 */ 2235static int 2236pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2237{ 2238 pt_entry_t oldpte; 2239 vm_page_t m; 2240 2241 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2242 pmap, (u_long)*ptq, va); 2243 2244 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2245 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2246 oldpte = *ptq; 2247 PT_SET_VA_MA(ptq, 0, TRUE); 2248 if (oldpte & PG_W) 2249 pmap->pm_stats.wired_count -= 1; 2250 /* 2251 * Machines that don't support invlpg, also don't support 2252 * PG_G. 2253 */ 2254 if (oldpte & PG_G) 2255 pmap_invalidate_page(kernel_pmap, va); 2256 pmap->pm_stats.resident_count -= 1; 2257 /* 2258 * XXX This is not strictly correctly, but somewhere along the line 2259 * we are losing the managed bit on some pages. It is unclear to me 2260 * why, but I think the most likely explanation is that xen's writable 2261 * page table implementation doesn't respect the unused bits. 2262 */ 2263 if ((oldpte & PG_MANAGED) || ((oldpte & PG_V) && (va < VM_MAXUSER_ADDRESS)) 2264 ) { 2265 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2266 2267 if (!(oldpte & PG_MANAGED)) 2268 printf("va=0x%x is unmanaged :-( pte=0x%llx\n", va, oldpte); 2269 2270 if (oldpte & PG_M) { 2271 KASSERT((oldpte & PG_RW), 2272 ("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx", 2273 va, (uintmax_t)oldpte)); 2274 vm_page_dirty(m); 2275 } 2276 if (oldpte & PG_A) 2277 vm_page_flag_set(m, PG_REFERENCED); 2278 pmap_remove_entry(pmap, m, va); 2279 } else if ((va < VM_MAXUSER_ADDRESS) && (oldpte & PG_V)) 2280 printf("va=0x%x is unmanaged :-( pte=0x%llx\n", va, oldpte); 2281 2282 return (pmap_unuse_pt(pmap, va, free)); 2283} 2284 2285/* 2286 * Remove a single page from a process address space 2287 */ 2288static void 2289pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2290{ 2291 pt_entry_t *pte; 2292 2293 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2294 pmap, va); 2295 2296 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2297 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2298 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2299 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2300 return; 2301 pmap_remove_pte(pmap, pte, va, free); 2302 pmap_invalidate_page(pmap, va); 2303 if (*PMAP1) 2304 PT_SET_MA(PADDR1, 0); 2305 2306} 2307 2308/* 2309 * Remove the given range of addresses from the specified map. 2310 * 2311 * It is assumed that the start and end are properly 2312 * rounded to the page size. 2313 */ 2314void 2315pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2316{ 2317 vm_offset_t pdnxt; 2318 pd_entry_t ptpaddr; 2319 pt_entry_t *pte; 2320 vm_page_t free = NULL; 2321 int anyvalid; 2322 2323 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2324 pmap, sva, eva); 2325 2326 /* 2327 * Perform an unsynchronized read. This is, however, safe. 2328 */ 2329 if (pmap->pm_stats.resident_count == 0) 2330 return; 2331 2332 anyvalid = 0; 2333 2334 vm_page_lock_queues(); 2335 sched_pin(); 2336 PMAP_LOCK(pmap); 2337 2338 /* 2339 * special handling of removing one page. a very 2340 * common operation and easy to short circuit some 2341 * code. 2342 */ 2343 if ((sva + PAGE_SIZE == eva) && 2344 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2345 pmap_remove_page(pmap, sva, &free); 2346 goto out; 2347 } 2348 2349 for (; sva < eva; sva = pdnxt) { 2350 unsigned pdirindex; 2351 2352 /* 2353 * Calculate index for next page table. 2354 */ 2355 pdnxt = (sva + NBPDR) & ~PDRMASK; 2356 if (pmap->pm_stats.resident_count == 0) 2357 break; 2358 2359 pdirindex = sva >> PDRSHIFT; 2360 ptpaddr = pmap->pm_pdir[pdirindex]; 2361 2362 /* 2363 * Weed out invalid mappings. Note: we assume that the page 2364 * directory table is always allocated, and in kernel virtual. 2365 */ 2366 if (ptpaddr == 0) 2367 continue; 2368 2369 /* 2370 * Check for large page. 2371 */ 2372 if ((ptpaddr & PG_PS) != 0) { 2373 PD_CLEAR_VA(pmap, pdirindex, TRUE); 2374 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2375 anyvalid = 1; 2376 continue; 2377 } 2378 2379 /* 2380 * Limit our scan to either the end of the va represented 2381 * by the current page table page, or to the end of the 2382 * range being removed. 2383 */ 2384 if (pdnxt > eva) 2385 pdnxt = eva; 2386 2387 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2388 sva += PAGE_SIZE) { 2389 if ((*pte & PG_V) == 0) 2390 continue; 2391 2392 /* 2393 * The TLB entry for a PG_G mapping is invalidated 2394 * by pmap_remove_pte(). 2395 */ 2396 if ((*pte & PG_G) == 0) 2397 anyvalid = 1; 2398 if (pmap_remove_pte(pmap, pte, sva, &free)) 2399 break; 2400 } 2401 } 2402 PT_UPDATES_FLUSH(); 2403 if (*PMAP1) 2404 PT_SET_VA_MA(PMAP1, 0, TRUE); 2405out: 2406 if (anyvalid) 2407 pmap_invalidate_all(pmap); 2408 sched_unpin(); 2409 vm_page_unlock_queues(); 2410 PMAP_UNLOCK(pmap); 2411 pmap_free_zero_pages(free); 2412} 2413 2414/* 2415 * Routine: pmap_remove_all 2416 * Function: 2417 * Removes this physical page from 2418 * all physical maps in which it resides. 2419 * Reflects back modify bits to the pager. 2420 * 2421 * Notes: 2422 * Original versions of this routine were very 2423 * inefficient because they iteratively called 2424 * pmap_remove (slow...) 2425 */ 2426 2427void 2428pmap_remove_all(vm_page_t m) 2429{ 2430 pv_entry_t pv; 2431 pmap_t pmap; 2432 pt_entry_t *pte, tpte; 2433 vm_page_t free; 2434 2435#if defined(PMAP_DIAGNOSTIC) 2436 /* 2437 * XXX This makes pmap_remove_all() illegal for non-managed pages! 2438 */ 2439 if (m->flags & PG_FICTITIOUS) { 2440 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%jx", 2441 VM_PAGE_TO_PHYS(m) & 0xffffffff); 2442 } 2443#endif 2444 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2445 sched_pin(); 2446 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2447 pmap = PV_PMAP(pv); 2448 PMAP_LOCK(pmap); 2449 pmap->pm_stats.resident_count--; 2450 pte = pmap_pte_quick(pmap, pv->pv_va); 2451 2452 tpte = *pte; 2453 PT_SET_VA_MA(pte, 0, TRUE); 2454 if (tpte & PG_W) 2455 pmap->pm_stats.wired_count--; 2456 if (tpte & PG_A) 2457 vm_page_flag_set(m, PG_REFERENCED); 2458 2459 /* 2460 * Update the vm_page_t clean and reference bits. 2461 */ 2462 if (tpte & PG_M) { 2463 KASSERT((tpte & PG_RW), 2464 ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx", 2465 pv->pv_va, (uintmax_t)tpte)); 2466 vm_page_dirty(m); 2467 } 2468 free = NULL; 2469 pmap_unuse_pt(pmap, pv->pv_va, &free); 2470 pmap_invalidate_page(pmap, pv->pv_va); 2471 pmap_free_zero_pages(free); 2472 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2473 free_pv_entry(pmap, pv); 2474 PMAP_UNLOCK(pmap); 2475 } 2476 vm_page_flag_clear(m, PG_WRITEABLE); 2477 PT_UPDATES_FLUSH(); 2478 if (*PMAP1) 2479 PT_SET_MA(PADDR1, 0); 2480 sched_unpin(); 2481} 2482 2483/* 2484 * Set the physical protection on the 2485 * specified range of this map as requested. 2486 */ 2487void 2488pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2489{ 2490 vm_offset_t pdnxt; 2491 pd_entry_t ptpaddr; 2492 pt_entry_t *pte; 2493 int anychanged; 2494 2495 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2496 pmap, sva, eva, prot); 2497 2498 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2499 pmap_remove(pmap, sva, eva); 2500 return; 2501 } 2502 2503#ifdef PAE 2504 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2505 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2506 return; 2507#else 2508 if (prot & VM_PROT_WRITE) 2509 return; 2510#endif 2511 2512 anychanged = 0; 2513 2514 vm_page_lock_queues(); 2515 sched_pin(); 2516 PMAP_LOCK(pmap); 2517 for (; sva < eva; sva = pdnxt) { 2518 pt_entry_t obits, pbits; 2519 unsigned pdirindex; 2520 2521 pdnxt = (sva + NBPDR) & ~PDRMASK; 2522 2523 pdirindex = sva >> PDRSHIFT; 2524 ptpaddr = pmap->pm_pdir[pdirindex]; 2525 2526 /* 2527 * Weed out invalid mappings. Note: we assume that the page 2528 * directory table is always allocated, and in kernel virtual. 2529 */ 2530 if (ptpaddr == 0) 2531 continue; 2532 2533 /* 2534 * Check for large page. 2535 */ 2536 if ((ptpaddr & PG_PS) != 0) { 2537 if ((prot & VM_PROT_WRITE) == 0) 2538 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2539#ifdef PAE 2540 if ((prot & VM_PROT_EXECUTE) == 0) 2541 pmap->pm_pdir[pdirindex] |= pg_nx; 2542#endif 2543 anychanged = 1; 2544 continue; 2545 } 2546 2547 if (pdnxt > eva) 2548 pdnxt = eva; 2549 2550 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2551 sva += PAGE_SIZE) { 2552 vm_page_t m; 2553 2554retry: 2555 /* 2556 * Regardless of whether a pte is 32 or 64 bits in 2557 * size, PG_RW, PG_A, and PG_M are among the least 2558 * significant 32 bits. 2559 */ 2560 obits = pbits = *pte; 2561 if ((pbits & PG_V) == 0) 2562 continue; 2563 if (pbits & PG_MANAGED) { 2564 m = NULL; 2565 if (pbits & PG_A) { 2566 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME); 2567 vm_page_flag_set(m, PG_REFERENCED); 2568 pbits &= ~PG_A; 2569 } 2570 if ((pbits & PG_M) != 0) { 2571 if (m == NULL) 2572 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME); 2573 vm_page_dirty(m); 2574 } 2575 } 2576 2577 if ((prot & VM_PROT_WRITE) == 0) 2578 pbits &= ~(PG_RW | PG_M); 2579#ifdef PAE 2580 if ((prot & VM_PROT_EXECUTE) == 0) 2581 pbits |= pg_nx; 2582#endif 2583 2584 if (pbits != obits) { 2585#ifdef XEN 2586 obits = *pte; 2587 PT_SET_VA_MA(pte, pbits, TRUE); 2588 if (*pte != pbits) 2589 goto retry; 2590#else 2591#ifdef PAE 2592 if (!atomic_cmpset_64(pte, obits, pbits)) 2593 goto retry; 2594#else 2595 if (!atomic_cmpset_int((u_int *)pte, obits, 2596 pbits)) 2597 goto retry; 2598#endif 2599#endif 2600 if (obits & PG_G) 2601 pmap_invalidate_page(pmap, sva); 2602 else 2603 anychanged = 1; 2604 } 2605 } 2606 } 2607 PT_UPDATES_FLUSH(); 2608 if (*PMAP1) 2609 PT_SET_VA_MA(PMAP1, 0, TRUE); 2610 if (anychanged) 2611 pmap_invalidate_all(pmap); 2612 sched_unpin(); 2613 vm_page_unlock_queues(); 2614 PMAP_UNLOCK(pmap); 2615} 2616 2617/* 2618 * Insert the given physical page (p) at 2619 * the specified virtual address (v) in the 2620 * target physical map with the protection requested. 2621 * 2622 * If specified, the page will be wired down, meaning 2623 * that the related pte can not be reclaimed. 2624 * 2625 * NB: This is the only routine which MAY NOT lazy-evaluate 2626 * or lose information. That is, this routine must actually 2627 * insert this page into the given map NOW. 2628 */ 2629void 2630pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2631 vm_prot_t prot, boolean_t wired) 2632{ 2633 vm_paddr_t pa; 2634 pd_entry_t *pde; 2635 pt_entry_t *pte; 2636 vm_paddr_t opa; 2637 pt_entry_t origpte, newpte; 2638 vm_page_t mpte, om; 2639 boolean_t invlva; 2640 2641 CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2642 pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired); 2643 va = trunc_page(va); 2644#ifdef PMAP_DIAGNOSTIC 2645 if (va > VM_MAX_KERNEL_ADDRESS) 2646 panic("pmap_enter: toobig"); 2647 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 2648 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); 2649#endif 2650 2651 mpte = NULL; 2652 2653 vm_page_lock_queues(); 2654 PMAP_LOCK(pmap); 2655 sched_pin(); 2656 2657 /* 2658 * In the case that a page table page is not 2659 * resident, we are creating it here. 2660 */ 2661 if (va < VM_MAXUSER_ADDRESS) { 2662 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2663 } 2664#if 0 && defined(PMAP_DIAGNOSTIC) 2665 else { 2666 pd_entry_t *pdeaddr = pmap_pde(pmap, va); 2667 origpte = *pdeaddr; 2668 if ((origpte & PG_V) == 0) { 2669 panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n", 2670 pmap->pm_pdir[PTDPTDI], origpte, va); 2671 } 2672 } 2673#endif 2674 2675 pde = pmap_pde(pmap, va); 2676 if ((*pde & PG_PS) != 0) 2677 panic("pmap_enter: attempted pmap_enter on 4MB page"); 2678 pte = pmap_pte_quick(pmap, va); 2679 2680 /* 2681 * Page Directory table entry not valid, we need a new PT page 2682 */ 2683 if (pte == NULL) { 2684 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n", 2685 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2686 } 2687 2688 pa = VM_PAGE_TO_PHYS(m); 2689 om = NULL; 2690 opa = origpte = 0; 2691 2692#if 0 2693 KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2694 pte, *pte)); 2695#endif 2696 origpte = *pte; 2697 if (origpte) 2698 origpte = xpmap_mtop(origpte); 2699 opa = origpte & PG_FRAME; 2700 2701 /* 2702 * Mapping has not changed, must be protection or wiring change. 2703 */ 2704 if (origpte && (opa == pa)) { 2705 /* 2706 * Wiring change, just update stats. We don't worry about 2707 * wiring PT pages as they remain resident as long as there 2708 * are valid mappings in them. Hence, if a user page is wired, 2709 * the PT page will be also. 2710 */ 2711 if (wired && ((origpte & PG_W) == 0)) 2712 pmap->pm_stats.wired_count++; 2713 else if (!wired && (origpte & PG_W)) 2714 pmap->pm_stats.wired_count--; 2715 2716 /* 2717 * Remove extra pte reference 2718 */ 2719 if (mpte) 2720 mpte->wire_count--; 2721 2722 /* 2723 * We might be turning off write access to the page, 2724 * so we go ahead and sense modify status. 2725 */ 2726 if (origpte & PG_MANAGED) { 2727 om = m; 2728 pa |= PG_MANAGED; 2729 } 2730 goto validate; 2731 } 2732 /* 2733 * Mapping has changed, invalidate old range and fall through to 2734 * handle validating new mapping. 2735 */ 2736 if (opa) { 2737 if (origpte & PG_W) 2738 pmap->pm_stats.wired_count--; 2739 if (origpte & PG_MANAGED) { 2740 om = PHYS_TO_VM_PAGE(opa); 2741 pmap_remove_entry(pmap, om, va); 2742 } else if (va < VM_MAXUSER_ADDRESS) 2743 printf("va=0x%x is unmanaged :-( \n", va); 2744 2745 if (mpte != NULL) { 2746 mpte->wire_count--; 2747 KASSERT(mpte->wire_count > 0, 2748 ("pmap_enter: missing reference to page table page," 2749 " va: 0x%x", va)); 2750 } 2751 } else 2752 pmap->pm_stats.resident_count++; 2753 2754 /* 2755 * Enter on the PV list if part of our managed memory. 2756 */ 2757 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 2758 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2759 ("pmap_enter: managed mapping within the clean submap")); 2760 pmap_insert_entry(pmap, va, m); 2761 pa |= PG_MANAGED; 2762 } 2763 2764 /* 2765 * Increment counters 2766 */ 2767 if (wired) 2768 pmap->pm_stats.wired_count++; 2769 2770validate: 2771 /* 2772 * Now validate mapping with desired protection/wiring. 2773 */ 2774 newpte = (pt_entry_t)(pa | PG_V); 2775 if ((prot & VM_PROT_WRITE) != 0) { 2776 newpte |= PG_RW; 2777 vm_page_flag_set(m, PG_WRITEABLE); 2778 } 2779#ifdef PAE 2780 if ((prot & VM_PROT_EXECUTE) == 0) 2781 newpte |= pg_nx; 2782#endif 2783 if (wired) 2784 newpte |= PG_W; 2785 if (va < VM_MAXUSER_ADDRESS) 2786 newpte |= PG_U; 2787 if (pmap == kernel_pmap) 2788 newpte |= pgeflag; 2789 2790 critical_enter(); 2791 /* 2792 * if the mapping or permission bits are different, we need 2793 * to update the pte. 2794 */ 2795 if ((origpte & ~(PG_M|PG_A)) != newpte) { 2796 if (origpte) { 2797 invlva = FALSE; 2798 origpte = *pte; 2799 PT_SET_VA(pte, newpte | PG_A, FALSE); 2800 if (origpte & PG_A) { 2801 if (origpte & PG_MANAGED) 2802 vm_page_flag_set(om, PG_REFERENCED); 2803 if (opa != VM_PAGE_TO_PHYS(m)) 2804 invlva = TRUE; 2805#ifdef PAE 2806 if ((origpte & PG_NX) == 0 && 2807 (newpte & PG_NX) != 0) 2808 invlva = TRUE; 2809#endif 2810 } 2811 if (origpte & PG_M) { 2812 KASSERT((origpte & PG_RW), 2813 ("pmap_enter: modified page not writable: va: %#x, pte: %#jx", 2814 va, (uintmax_t)origpte)); 2815 if ((origpte & PG_MANAGED) != 0) 2816 vm_page_dirty(om); 2817 if ((prot & VM_PROT_WRITE) == 0) 2818 invlva = TRUE; 2819 } 2820 if (invlva) 2821 pmap_invalidate_page(pmap, va); 2822 } else{ 2823 PT_SET_VA(pte, newpte | PG_A, FALSE); 2824 } 2825 2826 } 2827 PT_UPDATES_FLUSH(); 2828 critical_exit(); 2829 if (*PMAP1) 2830 PT_SET_VA_MA(PMAP1, 0, TRUE); 2831 sched_unpin(); 2832 vm_page_unlock_queues(); 2833 PMAP_UNLOCK(pmap); 2834} 2835 2836/* 2837 * Maps a sequence of resident pages belonging to the same object. 2838 * The sequence begins with the given page m_start. This page is 2839 * mapped at the given virtual address start. Each subsequent page is 2840 * mapped at a virtual address that is offset from start by the same 2841 * amount as the page is offset from m_start within the object. The 2842 * last page in the sequence is the page with the largest offset from 2843 * m_start that can be mapped at a virtual address less than the given 2844 * virtual address end. Not every virtual page between start and end 2845 * is mapped; only those for which a resident page exists with the 2846 * corresponding offset from m_start are mapped. 2847 */ 2848void 2849pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2850 vm_page_t m_start, vm_prot_t prot) 2851{ 2852 vm_page_t m, mpte; 2853 vm_pindex_t diff, psize; 2854 multicall_entry_t mcl[16]; 2855 multicall_entry_t *mclp = mcl; 2856 int error, count = 0; 2857 2858 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2859 psize = atop(end - start); 2860 2861 mpte = NULL; 2862 m = m_start; 2863 PMAP_LOCK(pmap); 2864 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2865 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2866 prot, mpte); 2867 m = TAILQ_NEXT(m, listq); 2868 if (count == 16) { 2869 error = HYPERVISOR_multicall(mcl, count); 2870 KASSERT(error == 0, ("bad multicall %d", error)); 2871 mclp = mcl; 2872 count = 0; 2873 } 2874 } 2875 if (count) { 2876 error = HYPERVISOR_multicall(mcl, count); 2877 KASSERT(error == 0, ("bad multicall %d", error)); 2878 } 2879 2880 PMAP_UNLOCK(pmap); 2881} 2882 2883/* 2884 * this code makes some *MAJOR* assumptions: 2885 * 1. Current pmap & pmap exists. 2886 * 2. Not wired. 2887 * 3. Read access. 2888 * 4. No page table pages. 2889 * but is *MUCH* faster than pmap_enter... 2890 */ 2891 2892void 2893pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2894{ 2895 multicall_entry_t mcl, *mclp; 2896 int count = 0; 2897 mclp = &mcl; 2898 2899 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2900 pmap, va, m, prot); 2901 2902 PMAP_LOCK(pmap); 2903 (void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2904 if (count) 2905 HYPERVISOR_multicall(&mcl, count); 2906 PMAP_UNLOCK(pmap); 2907} 2908 2909#ifdef notyet 2910void 2911pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2912{ 2913 int i, error, index = 0; 2914 multicall_entry_t mcl[16]; 2915 multicall_entry_t *mclp = mcl; 2916 2917 PMAP_LOCK(pmap); 2918 for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2919 if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2920 continue; 2921 2922 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2923 if (index == 16) { 2924 error = HYPERVISOR_multicall(mcl, index); 2925 mclp = mcl; 2926 index = 0; 2927 KASSERT(error == 0, ("bad multicall %d", error)); 2928 } 2929 } 2930 if (index) { 2931 error = HYPERVISOR_multicall(mcl, index); 2932 KASSERT(error == 0, ("bad multicall %d", error)); 2933 } 2934 2935 PMAP_UNLOCK(pmap); 2936} 2937#endif 2938 2939static vm_page_t 2940pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2941 vm_prot_t prot, vm_page_t mpte) 2942{ 2943 pt_entry_t *pte; 2944 vm_paddr_t pa; 2945 vm_page_t free; 2946 multicall_entry_t *mcl = *mclpp; 2947 2948 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2949 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, 2950 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2951 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2952 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2953 2954 /* 2955 * In the case that a page table page is not 2956 * resident, we are creating it here. 2957 */ 2958 if (va < VM_MAXUSER_ADDRESS) { 2959 unsigned ptepindex; 2960 pd_entry_t ptema; 2961 2962 /* 2963 * Calculate pagetable page index 2964 */ 2965 ptepindex = va >> PDRSHIFT; 2966 if (mpte && (mpte->pindex == ptepindex)) { 2967 mpte->wire_count++; 2968 } else { 2969 /* 2970 * Get the page directory entry 2971 */ 2972 ptema = pmap->pm_pdir[ptepindex]; 2973 2974 /* 2975 * If the page table page is mapped, we just increment 2976 * the hold count, and activate it. 2977 */ 2978 if (ptema & PG_V) { 2979 if (ptema & PG_PS) 2980 panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2981 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2982 mpte->wire_count++; 2983 } else { 2984 mpte = _pmap_allocpte(pmap, ptepindex, 2985 M_NOWAIT); 2986 if (mpte == NULL) 2987 return (mpte); 2988 } 2989 } 2990 } else { 2991 mpte = NULL; 2992 } 2993 2994 /* 2995 * This call to vtopte makes the assumption that we are 2996 * entering the page into the current pmap. In order to support 2997 * quick entry into any pmap, one would likely use pmap_pte_quick. 2998 * But that isn't as quick as vtopte. 2999 */ 3000 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 3001 pte = vtopte(va); 3002 if (*pte & PG_V) { 3003 if (mpte != NULL) { 3004 mpte->wire_count--; 3005 mpte = NULL; 3006 } 3007 return (mpte); 3008 } 3009 3010 /* 3011 * Enter on the PV list if part of our managed memory. 3012 */ 3013 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && 3014 !pmap_try_insert_pv_entry(pmap, va, m)) { 3015 if (mpte != NULL) { 3016 free = NULL; 3017 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3018 pmap_invalidate_page(pmap, va); 3019 pmap_free_zero_pages(free); 3020 } 3021 3022 mpte = NULL; 3023 } 3024 return (mpte); 3025 } 3026 3027 /* 3028 * Increment counters 3029 */ 3030 pmap->pm_stats.resident_count++; 3031 3032 pa = VM_PAGE_TO_PHYS(m); 3033#ifdef PAE 3034 if ((prot & VM_PROT_EXECUTE) == 0) 3035 pa |= pg_nx; 3036#endif 3037 3038#if 0 3039 /* 3040 * Now validate mapping with RO protection 3041 */ 3042 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3043 pte_store(pte, pa | PG_V | PG_U); 3044 else 3045 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3046#else 3047 /* 3048 * Now validate mapping with RO protection 3049 */ 3050 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3051 pa = xpmap_ptom(pa | PG_V | PG_U); 3052 else 3053 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3054 3055 mcl->op = __HYPERVISOR_update_va_mapping; 3056 mcl->args[0] = va; 3057 mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3058 mcl->args[2] = (uint32_t)(pa >> 32); 3059 mcl->args[3] = 0; 3060 *mclpp = mcl + 1; 3061 *count = *count + 1; 3062#endif 3063 return mpte; 3064} 3065 3066/* 3067 * Make a temporary mapping for a physical address. This is only intended 3068 * to be used for panic dumps. 3069 */ 3070void * 3071pmap_kenter_temporary(vm_paddr_t pa, int i) 3072{ 3073 vm_offset_t va; 3074 3075 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3076 pmap_kenter(va, pa); 3077 invlpg(va); 3078 return ((void *)crashdumpmap); 3079} 3080 3081/* 3082 * This code maps large physical mmap regions into the 3083 * processor address space. Note that some shortcuts 3084 * are taken, but the code works. 3085 */ 3086void 3087pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 3088 vm_object_t object, vm_pindex_t pindex, 3089 vm_size_t size) 3090{ 3091 vm_page_t p; 3092 3093 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3094 KASSERT(object->type == OBJT_DEVICE, 3095 ("pmap_object_init_pt: non-device object")); 3096 if (pseflag && 3097 ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) { 3098 int i; 3099 vm_page_t m[1]; 3100 unsigned int ptepindex; 3101 int npdes; 3102 pd_entry_t ptepa; 3103 3104 PMAP_LOCK(pmap); 3105 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)]) 3106 goto out; 3107 PMAP_UNLOCK(pmap); 3108retry: 3109 p = vm_page_lookup(object, pindex); 3110 if (p != NULL) { 3111 if (vm_page_sleep_if_busy(p, FALSE, "init4p")) 3112 goto retry; 3113 } else { 3114 p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); 3115 if (p == NULL) 3116 return; 3117 m[0] = p; 3118 3119 if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) { 3120 vm_page_lock_queues(); 3121 vm_page_free(p); 3122 vm_page_unlock_queues(); 3123 return; 3124 } 3125 3126 p = vm_page_lookup(object, pindex); 3127 vm_page_lock_queues(); 3128 vm_page_wakeup(p); 3129 vm_page_unlock_queues(); 3130 } 3131 3132 ptepa = VM_PAGE_TO_PHYS(p); 3133 if (ptepa & (NBPDR - 1)) 3134 return; 3135 3136 p->valid = VM_PAGE_BITS_ALL; 3137 3138 PMAP_LOCK(pmap); 3139 pmap->pm_stats.resident_count += size >> PAGE_SHIFT; 3140 npdes = size >> PDRSHIFT; 3141 critical_enter(); 3142 for(i = 0; i < npdes; i++) { 3143 PD_SET_VA(pmap, ptepindex, 3144 ptepa | PG_U | PG_M | PG_RW | PG_V | PG_PS, FALSE); 3145 ptepa += NBPDR; 3146 ptepindex += 1; 3147 } 3148 pmap_invalidate_all(pmap); 3149 critical_exit(); 3150out: 3151 PMAP_UNLOCK(pmap); 3152 } 3153} 3154 3155/* 3156 * Routine: pmap_change_wiring 3157 * Function: Change the wiring attribute for a map/virtual-address 3158 * pair. 3159 * In/out conditions: 3160 * The mapping must already exist in the pmap. 3161 */ 3162void 3163pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3164{ 3165 pt_entry_t *pte; 3166 3167 vm_page_lock_queues(); 3168 PMAP_LOCK(pmap); 3169 pte = pmap_pte(pmap, va); 3170 3171 if (wired && !pmap_pte_w(pte)) { 3172 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3173 pmap->pm_stats.wired_count++; 3174 } else if (!wired && pmap_pte_w(pte)) { 3175 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3176 pmap->pm_stats.wired_count--; 3177 } 3178 3179 /* 3180 * Wiring is not a hardware characteristic so there is no need to 3181 * invalidate TLB. 3182 */ 3183 pmap_pte_release(pte); 3184 PMAP_UNLOCK(pmap); 3185 vm_page_unlock_queues(); 3186} 3187 3188 3189 3190/* 3191 * Copy the range specified by src_addr/len 3192 * from the source map to the range dst_addr/len 3193 * in the destination map. 3194 * 3195 * This routine is only advisory and need not do anything. 3196 */ 3197 3198void 3199pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3200 vm_offset_t src_addr) 3201{ 3202 vm_page_t free; 3203 vm_offset_t addr; 3204 vm_offset_t end_addr = src_addr + len; 3205 vm_offset_t pdnxt; 3206 3207 if (dst_addr != src_addr) 3208 return; 3209 3210 if (!pmap_is_current(src_pmap)) { 3211 CTR2(KTR_PMAP, 3212 "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3213 (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3214 3215 return; 3216 } 3217 CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3218 dst_pmap, src_pmap, dst_addr, len, src_addr); 3219 3220 vm_page_lock_queues(); 3221 if (dst_pmap < src_pmap) { 3222 PMAP_LOCK(dst_pmap); 3223 PMAP_LOCK(src_pmap); 3224 } else { 3225 PMAP_LOCK(src_pmap); 3226 PMAP_LOCK(dst_pmap); 3227 } 3228 sched_pin(); 3229 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3230 pt_entry_t *src_pte, *dst_pte; 3231 vm_page_t dstmpte, srcmpte; 3232 pd_entry_t srcptepaddr; 3233 unsigned ptepindex; 3234 3235 if (addr >= UPT_MIN_ADDRESS) 3236 panic("pmap_copy: invalid to pmap_copy page tables"); 3237 3238 pdnxt = (addr + NBPDR) & ~PDRMASK; 3239 ptepindex = addr >> PDRSHIFT; 3240 3241 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3242 if (srcptepaddr == 0) 3243 continue; 3244 3245 if (srcptepaddr & PG_PS) { 3246 if (dst_pmap->pm_pdir[ptepindex] == 0) { 3247 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3248 dst_pmap->pm_stats.resident_count += 3249 NBPDR / PAGE_SIZE; 3250 } 3251 continue; 3252 } 3253 3254 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3255 if (srcmpte->wire_count == 0) 3256 panic("pmap_copy: source page table page is unused"); 3257 3258 if (pdnxt > end_addr) 3259 pdnxt = end_addr; 3260 3261 src_pte = vtopte(addr); 3262 while (addr < pdnxt) { 3263 pt_entry_t ptetemp; 3264 ptetemp = *src_pte; 3265 /* 3266 * we only virtual copy managed pages 3267 */ 3268 if ((ptetemp & PG_MANAGED) != 0) { 3269 dstmpte = pmap_allocpte(dst_pmap, addr, 3270 M_NOWAIT); 3271 if (dstmpte == NULL) 3272 break; 3273 dst_pte = pmap_pte_quick(dst_pmap, addr); 3274 if (*dst_pte == 0 && 3275 pmap_try_insert_pv_entry(dst_pmap, addr, 3276 PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3277 /* 3278 * Clear the wired, modified, and 3279 * accessed (referenced) bits 3280 * during the copy. 3281 */ 3282 KASSERT(ptetemp != 0, ("src_pte not set")); 3283 PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3284 KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3285 ("no pmap copy expected: 0x%jx saw: 0x%jx", 3286 ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3287 dst_pmap->pm_stats.resident_count++; 3288 } else { 3289 free = NULL; 3290 if (pmap_unwire_pte_hold(dst_pmap, 3291 dstmpte, &free)) { 3292 pmap_invalidate_page(dst_pmap, 3293 addr); 3294 pmap_free_zero_pages(free); 3295 } 3296 } 3297 if (dstmpte->wire_count >= srcmpte->wire_count) 3298 break; 3299 } 3300 addr += PAGE_SIZE; 3301 src_pte++; 3302 } 3303 } 3304 PT_UPDATES_FLUSH(); 3305 sched_unpin(); 3306 vm_page_unlock_queues(); 3307 PMAP_UNLOCK(src_pmap); 3308 PMAP_UNLOCK(dst_pmap); 3309} 3310 3311/* 3312 * pmap_zero_page zeros the specified hardware page by mapping 3313 * the page into KVM and using bzero to clear its contents. 3314 */ 3315void 3316pmap_zero_page(vm_page_t m) 3317{ 3318 struct sysmaps *sysmaps; 3319 3320 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3321 mtx_lock(&sysmaps->lock); 3322 if (*sysmaps->CMAP2) 3323 panic("pmap_zero_page: CMAP2 busy"); 3324 sched_pin(); 3325 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3326 pagezero(sysmaps->CADDR2); 3327 PT_SET_MA(sysmaps->CADDR2, 0); 3328 sched_unpin(); 3329 mtx_unlock(&sysmaps->lock); 3330} 3331 3332/* 3333 * pmap_zero_page_area zeros the specified hardware page by mapping 3334 * the page into KVM and using bzero to clear its contents. 3335 * 3336 * off and size may not cover an area beyond a single hardware page. 3337 */ 3338void 3339pmap_zero_page_area(vm_page_t m, int off, int size) 3340{ 3341 struct sysmaps *sysmaps; 3342 3343 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3344 mtx_lock(&sysmaps->lock); 3345 if (*sysmaps->CMAP2) 3346 panic("pmap_zero_page: CMAP2 busy"); 3347 sched_pin(); 3348 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3349 3350 if (off == 0 && size == PAGE_SIZE) 3351 pagezero(sysmaps->CADDR2); 3352 else 3353 bzero((char *)sysmaps->CADDR2 + off, size); 3354 PT_SET_MA(sysmaps->CADDR2, 0); 3355 sched_unpin(); 3356 mtx_unlock(&sysmaps->lock); 3357} 3358 3359/* 3360 * pmap_zero_page_idle zeros the specified hardware page by mapping 3361 * the page into KVM and using bzero to clear its contents. This 3362 * is intended to be called from the vm_pagezero process only and 3363 * outside of Giant. 3364 */ 3365void 3366pmap_zero_page_idle(vm_page_t m) 3367{ 3368 3369 if (*CMAP3) 3370 panic("pmap_zero_page: CMAP3 busy"); 3371 sched_pin(); 3372 PT_SET_MA(CADDR3, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3373 pagezero(CADDR3); 3374 PT_SET_MA(CADDR3, 0); 3375 sched_unpin(); 3376} 3377 3378/* 3379 * pmap_copy_page copies the specified (machine independent) 3380 * page by mapping the page into virtual memory and using 3381 * bcopy to copy the page, one machine dependent page at a 3382 * time. 3383 */ 3384void 3385pmap_copy_page(vm_page_t src, vm_page_t dst) 3386{ 3387 struct sysmaps *sysmaps; 3388 3389 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3390 mtx_lock(&sysmaps->lock); 3391 if (*sysmaps->CMAP1) 3392 panic("pmap_copy_page: CMAP1 busy"); 3393 if (*sysmaps->CMAP2) 3394 panic("pmap_copy_page: CMAP2 busy"); 3395 sched_pin(); 3396 PT_SET_MA(sysmaps->CADDR1, PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A); 3397 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M); 3398 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3399 PT_SET_MA(sysmaps->CADDR1, 0); 3400 PT_SET_MA(sysmaps->CADDR2, 0); 3401 sched_unpin(); 3402 mtx_unlock(&sysmaps->lock); 3403} 3404 3405/* 3406 * Returns true if the pmap's pv is one of the first 3407 * 16 pvs linked to from this page. This count may 3408 * be changed upwards or downwards in the future; it 3409 * is only necessary that true be returned for a small 3410 * subset of pmaps for proper page aging. 3411 */ 3412boolean_t 3413pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3414{ 3415 pv_entry_t pv; 3416 int loops = 0; 3417 3418 if (m->flags & PG_FICTITIOUS) 3419 return (FALSE); 3420 3421 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3422 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3423 if (PV_PMAP(pv) == pmap) { 3424 return TRUE; 3425 } 3426 loops++; 3427 if (loops >= 16) 3428 break; 3429 } 3430 return (FALSE); 3431} 3432 3433/* 3434 * pmap_page_wired_mappings: 3435 * 3436 * Return the number of managed mappings to the given physical page 3437 * that are wired. 3438 */ 3439int 3440pmap_page_wired_mappings(vm_page_t m) 3441{ 3442 pv_entry_t pv; 3443 pt_entry_t *pte; 3444 pmap_t pmap; 3445 int count; 3446 3447 count = 0; 3448 if ((m->flags & PG_FICTITIOUS) != 0) 3449 return (count); 3450 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3451 sched_pin(); 3452 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3453 pmap = PV_PMAP(pv); 3454 PMAP_LOCK(pmap); 3455 pte = pmap_pte_quick(pmap, pv->pv_va); 3456 if ((*pte & PG_W) != 0) 3457 count++; 3458 PMAP_UNLOCK(pmap); 3459 } 3460 sched_unpin(); 3461 return (count); 3462} 3463 3464/* 3465 * Returns TRUE if the given page is mapped individually or as part of 3466 * a 4mpage. Otherwise, returns FALSE. 3467 */ 3468boolean_t 3469pmap_page_is_mapped(vm_page_t m) 3470{ 3471 struct md_page *pvh; 3472 3473 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 3474 return (FALSE); 3475 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3476 if (TAILQ_EMPTY(&m->md.pv_list)) { 3477 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3478 return (!TAILQ_EMPTY(&pvh->pv_list)); 3479 } else 3480 return (TRUE); 3481} 3482 3483/* 3484 * Remove all pages from specified address space 3485 * this aids process exit speeds. Also, this code 3486 * is special cased for current process only, but 3487 * can have the more generic (and slightly slower) 3488 * mode enabled. This is much faster than pmap_remove 3489 * in the case of running down an entire address space. 3490 */ 3491void 3492pmap_remove_pages(pmap_t pmap) 3493{ 3494 pt_entry_t *pte, tpte; 3495 vm_page_t m, free = NULL; 3496 pv_entry_t pv; 3497 struct pv_chunk *pc, *npc; 3498 int field, idx; 3499 int32_t bit; 3500 uint32_t inuse, bitmask; 3501 int allfree; 3502 3503 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3504 3505 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3506 printf("warning: pmap_remove_pages called with non-current pmap\n"); 3507 return; 3508 } 3509 vm_page_lock_queues(); 3510 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3511 PMAP_LOCK(pmap); 3512 sched_pin(); 3513 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3514 allfree = 1; 3515 for (field = 0; field < _NPCM; field++) { 3516 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3517 while (inuse != 0) { 3518 bit = bsfl(inuse); 3519 bitmask = 1UL << bit; 3520 idx = field * 32 + bit; 3521 pv = &pc->pc_pventry[idx]; 3522 inuse &= ~bitmask; 3523 3524 pte = vtopte(pv->pv_va); 3525 tpte = *pte ? xpmap_mtop(*pte) : 0; 3526 3527 if (tpte == 0) { 3528 printf( 3529 "TPTE at %p IS ZERO @ VA %08x\n", 3530 pte, pv->pv_va); 3531 panic("bad pte"); 3532 } 3533 3534/* 3535 * We cannot remove wired pages from a process' mapping at this time 3536 */ 3537 if (tpte & PG_W) { 3538 allfree = 0; 3539 continue; 3540 } 3541 3542 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3543 KASSERT(m->phys_addr == (tpte & PG_FRAME), 3544 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3545 m, (uintmax_t)m->phys_addr, 3546 (uintmax_t)tpte)); 3547 3548 KASSERT(m < &vm_page_array[vm_page_array_size], 3549 ("pmap_remove_pages: bad tpte %#jx", 3550 (uintmax_t)tpte)); 3551 3552 3553 PT_CLEAR_VA(pte, FALSE); 3554 3555 /* 3556 * Update the vm_page_t clean/reference bits. 3557 */ 3558 if (tpte & PG_M) 3559 vm_page_dirty(m); 3560 3561 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3562 if (TAILQ_EMPTY(&m->md.pv_list)) 3563 vm_page_flag_clear(m, PG_WRITEABLE); 3564 3565 pmap_unuse_pt(pmap, pv->pv_va, &free); 3566 3567 /* Mark free */ 3568 PV_STAT(pv_entry_frees++); 3569 PV_STAT(pv_entry_spare++); 3570 pv_entry_count--; 3571 pc->pc_map[field] |= bitmask; 3572 pmap->pm_stats.resident_count--; 3573 } 3574 } 3575 PT_UPDATES_FLUSH(); 3576 if (allfree) { 3577 PV_STAT(pv_entry_spare -= _NPCPV); 3578 PV_STAT(pc_chunk_count--); 3579 PV_STAT(pc_chunk_frees++); 3580 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3581 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 3582 pmap_qremove((vm_offset_t)pc, 1); 3583 vm_page_unwire(m, 0); 3584 vm_page_free(m); 3585 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 3586 } 3587 } 3588 PT_UPDATES_FLUSH(); 3589 if (*PMAP1) 3590 PT_SET_MA(PADDR1, 0); 3591 3592 sched_unpin(); 3593 pmap_invalidate_all(pmap); 3594 vm_page_unlock_queues(); 3595 PMAP_UNLOCK(pmap); 3596 pmap_free_zero_pages(free); 3597} 3598 3599/* 3600 * pmap_is_modified: 3601 * 3602 * Return whether or not the specified physical page was modified 3603 * in any physical maps. 3604 */ 3605boolean_t 3606pmap_is_modified(vm_page_t m) 3607{ 3608 pv_entry_t pv; 3609 pt_entry_t *pte; 3610 pmap_t pmap; 3611 boolean_t rv; 3612 3613 rv = FALSE; 3614 if (m->flags & PG_FICTITIOUS) 3615 return (rv); 3616 3617 sched_pin(); 3618 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3619 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3620 pmap = PV_PMAP(pv); 3621 PMAP_LOCK(pmap); 3622 pte = pmap_pte_quick(pmap, pv->pv_va); 3623 rv = (*pte & PG_M) != 0; 3624 PMAP_UNLOCK(pmap); 3625 if (rv) 3626 break; 3627 } 3628 if (*PMAP1) 3629 PT_SET_MA(PADDR1, 0); 3630 sched_unpin(); 3631 return (rv); 3632} 3633 3634/* 3635 * pmap_is_prefaultable: 3636 * 3637 * Return whether or not the specified virtual address is elgible 3638 * for prefault. 3639 */ 3640static boolean_t 3641pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3642{ 3643 pt_entry_t *pte; 3644 boolean_t rv = FALSE; 3645 3646 return (rv); 3647 3648 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3649 pte = vtopte(addr); 3650 rv = (*pte == 0); 3651 } 3652 return (rv); 3653} 3654 3655boolean_t 3656pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3657{ 3658 boolean_t rv; 3659 3660 PMAP_LOCK(pmap); 3661 rv = pmap_is_prefaultable_locked(pmap, addr); 3662 PMAP_UNLOCK(pmap); 3663 return (rv); 3664} 3665 3666void 3667pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3668{ 3669 int i, npages = round_page(len) >> PAGE_SHIFT; 3670 for (i = 0; i < npages; i++) { 3671 pt_entry_t *pte; 3672 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3673 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3674 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3675 pmap_pte_release(pte); 3676 } 3677} 3678 3679void 3680pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3681{ 3682 int i, npages = round_page(len) >> PAGE_SHIFT; 3683 for (i = 0; i < npages; i++) { 3684 pt_entry_t *pte; 3685 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3686 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3687 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3688 pmap_pte_release(pte); 3689 } 3690} 3691 3692/* 3693 * Clear the write and modified bits in each of the given page's mappings. 3694 */ 3695void 3696pmap_remove_write(vm_page_t m) 3697{ 3698 pv_entry_t pv; 3699 pmap_t pmap; 3700 pt_entry_t oldpte, *pte; 3701 3702 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3703 if ((m->flags & PG_FICTITIOUS) != 0 || 3704 (m->flags & PG_WRITEABLE) == 0) 3705 return; 3706 sched_pin(); 3707 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3708 pmap = PV_PMAP(pv); 3709 PMAP_LOCK(pmap); 3710 pte = pmap_pte_quick(pmap, pv->pv_va); 3711retry: 3712 oldpte = *pte; 3713 if ((oldpte & PG_RW) != 0) { 3714 /* 3715 * Regardless of whether a pte is 32 or 64 bits 3716 * in size, PG_RW and PG_M are among the least 3717 * significant 32 bits. 3718 */ 3719 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3720 oldpte & ~(PG_RW | PG_M))) 3721 goto retry; 3722 if ((oldpte & PG_M) != 0) 3723 vm_page_dirty(m); 3724 pmap_invalidate_page(pmap, pv->pv_va); 3725 } 3726 PMAP_UNLOCK(pmap); 3727 } 3728 vm_page_flag_clear(m, PG_WRITEABLE); 3729 PT_UPDATES_FLUSH(); 3730 if (*PMAP1) 3731 PT_SET_MA(PADDR1, 0); 3732 sched_unpin(); 3733} 3734 3735/* 3736 * pmap_ts_referenced: 3737 * 3738 * Return a count of reference bits for a page, clearing those bits. 3739 * It is not necessary for every reference bit to be cleared, but it 3740 * is necessary that 0 only be returned when there are truly no 3741 * reference bits set. 3742 * 3743 * XXX: The exact number of bits to check and clear is a matter that 3744 * should be tested and standardized at some point in the future for 3745 * optimal aging of shared pages. 3746 */ 3747int 3748pmap_ts_referenced(vm_page_t m) 3749{ 3750 pv_entry_t pv, pvf, pvn; 3751 pmap_t pmap; 3752 pt_entry_t *pte; 3753 int rtval = 0; 3754 3755 if (m->flags & PG_FICTITIOUS) 3756 return (rtval); 3757 sched_pin(); 3758 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3759 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3760 pvf = pv; 3761 do { 3762 pvn = TAILQ_NEXT(pv, pv_list); 3763 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3764 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3765 pmap = PV_PMAP(pv); 3766 PMAP_LOCK(pmap); 3767 pte = pmap_pte_quick(pmap, pv->pv_va); 3768 if ((*pte & PG_A) != 0) { 3769 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3770 pmap_invalidate_page(pmap, pv->pv_va); 3771 rtval++; 3772 if (rtval > 4) 3773 pvn = NULL; 3774 } 3775 PMAP_UNLOCK(pmap); 3776 } while ((pv = pvn) != NULL && pv != pvf); 3777 } 3778 PT_UPDATES_FLUSH(); 3779 if (*PMAP1) 3780 PT_SET_MA(PADDR1, 0); 3781 3782 sched_unpin(); 3783 return (rtval); 3784} 3785 3786/* 3787 * Clear the modify bits on the specified physical page. 3788 */ 3789void 3790pmap_clear_modify(vm_page_t m) 3791{ 3792 pv_entry_t pv; 3793 pmap_t pmap; 3794 pt_entry_t *pte; 3795 3796 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3797 if ((m->flags & PG_FICTITIOUS) != 0) 3798 return; 3799 sched_pin(); 3800 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3801 pmap = PV_PMAP(pv); 3802 PMAP_LOCK(pmap); 3803 pte = pmap_pte_quick(pmap, pv->pv_va); 3804 if ((*pte & PG_M) != 0) { 3805 /* 3806 * Regardless of whether a pte is 32 or 64 bits 3807 * in size, PG_M is among the least significant 3808 * 32 bits. 3809 */ 3810 PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3811 pmap_invalidate_page(pmap, pv->pv_va); 3812 } 3813 PMAP_UNLOCK(pmap); 3814 } 3815 sched_unpin(); 3816} 3817 3818/* 3819 * pmap_clear_reference: 3820 * 3821 * Clear the reference bit on the specified physical page. 3822 */ 3823void 3824pmap_clear_reference(vm_page_t m) 3825{ 3826 pv_entry_t pv; 3827 pmap_t pmap; 3828 pt_entry_t *pte; 3829 3830 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3831 if ((m->flags & PG_FICTITIOUS) != 0) 3832 return; 3833 sched_pin(); 3834 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3835 pmap = PV_PMAP(pv); 3836 PMAP_LOCK(pmap); 3837 pte = pmap_pte_quick(pmap, pv->pv_va); 3838 if ((*pte & PG_A) != 0) { 3839 /* 3840 * Regardless of whether a pte is 32 or 64 bits 3841 * in size, PG_A is among the least significant 3842 * 32 bits. 3843 */ 3844 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3845 pmap_invalidate_page(pmap, pv->pv_va); 3846 } 3847 PMAP_UNLOCK(pmap); 3848 } 3849 sched_unpin(); 3850} 3851 3852/* 3853 * Miscellaneous support routines follow 3854 */ 3855 3856/* 3857 * Map a set of physical memory pages into the kernel virtual 3858 * address space. Return a pointer to where it is mapped. This 3859 * routine is intended to be used for mapping device memory, 3860 * NOT real memory. 3861 */ 3862void * 3863pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3864{ 3865 vm_offset_t va, tmpva, offset; 3866 3867 offset = pa & PAGE_MASK; 3868 size = roundup(offset + size, PAGE_SIZE); 3869 pa = pa & PG_FRAME; 3870 3871 if (pa < KERNLOAD && pa + size <= KERNLOAD) 3872 va = KERNBASE + pa; 3873 else 3874 va = kmem_alloc_nofault(kernel_map, size); 3875 if (!va) 3876 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3877 3878 for (tmpva = va; size > 0; ) { 3879 pmap_kenter_attr(tmpva, pa, mode); 3880 size -= PAGE_SIZE; 3881 tmpva += PAGE_SIZE; 3882 pa += PAGE_SIZE; 3883 } 3884 pmap_invalidate_range(kernel_pmap, va, tmpva); 3885 pmap_invalidate_cache(); 3886 return ((void *)(va + offset)); 3887} 3888 3889void * 3890pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3891{ 3892 3893 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3894} 3895 3896void * 3897pmap_mapbios(vm_paddr_t pa, vm_size_t size) 3898{ 3899 3900 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3901} 3902 3903void 3904pmap_unmapdev(vm_offset_t va, vm_size_t size) 3905{ 3906 vm_offset_t base, offset, tmpva; 3907 3908 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3909 return; 3910 base = trunc_page(va); 3911 offset = va & PAGE_MASK; 3912 size = roundup(offset + size, PAGE_SIZE); 3913 critical_enter(); 3914 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 3915 pmap_kremove(tmpva); 3916 pmap_invalidate_range(kernel_pmap, va, tmpva); 3917 critical_exit(); 3918 kmem_free(kernel_map, base, size); 3919} 3920 3921int 3922pmap_change_attr(va, size, mode) 3923 vm_offset_t va; 3924 vm_size_t size; 3925 int mode; 3926{ 3927 vm_offset_t base, offset, tmpva; 3928 pt_entry_t *pte; 3929 u_int opte, npte; 3930 pd_entry_t *pde; 3931 3932 base = trunc_page(va); 3933 offset = va & PAGE_MASK; 3934 size = roundup(offset + size, PAGE_SIZE); 3935 3936 /* Only supported on kernel virtual addresses. */ 3937 if (base <= VM_MAXUSER_ADDRESS) 3938 return (EINVAL); 3939 3940 /* 4MB pages and pages that aren't mapped aren't supported. */ 3941 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 3942 pde = pmap_pde(kernel_pmap, tmpva); 3943 if (*pde & PG_PS) 3944 return (EINVAL); 3945 if ((*pde & PG_V) == 0) 3946 return (EINVAL); 3947 pte = vtopte(va); 3948 if ((*pte & PG_V) == 0) 3949 return (EINVAL); 3950 } 3951 3952 /* 3953 * Ok, all the pages exist and are 4k, so run through them updating 3954 * their cache mode. 3955 */ 3956 for (tmpva = base; size > 0; ) { 3957 pte = vtopte(tmpva); 3958 3959 /* 3960 * The cache mode bits are all in the low 32-bits of the 3961 * PTE, so we can just spin on updating the low 32-bits. 3962 */ 3963 do { 3964 opte = *(u_int *)pte; 3965 npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 3966 npte |= pmap_cache_bits(mode, 0); 3967 PT_SET_VA_MA(pte, npte, TRUE); 3968 } while (npte != opte && (*pte != npte)); 3969 tmpva += PAGE_SIZE; 3970 size -= PAGE_SIZE; 3971 } 3972 3973 /* 3974 * Flush CPU caches to make sure any data isn't cached that shouldn't 3975 * be, etc. 3976 */ 3977 pmap_invalidate_range(kernel_pmap, base, tmpva); 3978 pmap_invalidate_cache(); 3979 return (0); 3980} 3981 3982/* 3983 * perform the pmap work for mincore 3984 */ 3985int 3986pmap_mincore(pmap_t pmap, vm_offset_t addr) 3987{ 3988 pt_entry_t *ptep, pte; 3989 vm_page_t m; 3990 int val = 0; 3991 3992 PMAP_LOCK(pmap); 3993 ptep = pmap_pte(pmap, addr); 3994 pte = (ptep != NULL) ? PT_GET(ptep) : 0; 3995 pmap_pte_release(ptep); 3996 PMAP_UNLOCK(pmap); 3997 3998 if (pte != 0) { 3999 vm_paddr_t pa; 4000 4001 val = MINCORE_INCORE; 4002 if ((pte & PG_MANAGED) == 0) 4003 return val; 4004 4005 pa = pte & PG_FRAME; 4006 4007 m = PHYS_TO_VM_PAGE(pa); 4008 4009 /* 4010 * Modified by us 4011 */ 4012 if (pte & PG_M) 4013 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 4014 else { 4015 /* 4016 * Modified by someone else 4017 */ 4018 vm_page_lock_queues(); 4019 if (m->dirty || pmap_is_modified(m)) 4020 val |= MINCORE_MODIFIED_OTHER; 4021 vm_page_unlock_queues(); 4022 } 4023 /* 4024 * Referenced by us 4025 */ 4026 if (pte & PG_A) 4027 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 4028 else { 4029 /* 4030 * Referenced by someone else 4031 */ 4032 vm_page_lock_queues(); 4033 if ((m->flags & PG_REFERENCED) || 4034 pmap_ts_referenced(m)) { 4035 val |= MINCORE_REFERENCED_OTHER; 4036 vm_page_flag_set(m, PG_REFERENCED); 4037 } 4038 vm_page_unlock_queues(); 4039 } 4040 } 4041 return val; 4042} 4043 4044void 4045pmap_activate(struct thread *td) 4046{ 4047 pmap_t pmap, oldpmap; 4048 u_int32_t cr3; 4049 4050 critical_enter(); 4051 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4052 oldpmap = PCPU_GET(curpmap); 4053#if defined(SMP) 4054 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); 4055 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 4056#else 4057 oldpmap->pm_active &= ~1; 4058 pmap->pm_active |= 1; 4059#endif 4060#ifdef PAE 4061 cr3 = vtophys(pmap->pm_pdpt); 4062#else 4063 cr3 = vtophys(pmap->pm_pdir); 4064#endif 4065 /* 4066 * pmap_activate is for the current thread on the current cpu 4067 */ 4068 td->td_pcb->pcb_cr3 = cr3; 4069 PT_UPDATES_FLUSH(); 4070 load_cr3(cr3); 4071 4072 PCPU_SET(curpmap, pmap); 4073 critical_exit(); 4074} 4075 4076/* 4077 * Increase the starting virtual address of the given mapping if a 4078 * different alignment might result in more superpage mappings. 4079 */ 4080void 4081pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4082 vm_offset_t *addr, vm_size_t size) 4083{ 4084 vm_offset_t superpage_offset; 4085 4086 if (size < NBPDR) 4087 return; 4088 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4089 offset += ptoa(object->pg_color); 4090 superpage_offset = offset & PDRMASK; 4091 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4092 (*addr & PDRMASK) == superpage_offset) 4093 return; 4094 if ((*addr & PDRMASK) < superpage_offset) 4095 *addr = (*addr & ~PDRMASK) + superpage_offset; 4096 else 4097 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4098} 4099 4100#if defined(PMAP_DEBUG) 4101pmap_pid_dump(int pid) 4102{ 4103 pmap_t pmap; 4104 struct proc *p; 4105 int npte = 0; 4106 int index; 4107 4108 sx_slock(&allproc_lock); 4109 FOREACH_PROC_IN_SYSTEM(p) { 4110 if (p->p_pid != pid) 4111 continue; 4112 4113 if (p->p_vmspace) { 4114 int i,j; 4115 index = 0; 4116 pmap = vmspace_pmap(p->p_vmspace); 4117 for (i = 0; i < NPDEPTD; i++) { 4118 pd_entry_t *pde; 4119 pt_entry_t *pte; 4120 vm_offset_t base = i << PDRSHIFT; 4121 4122 pde = &pmap->pm_pdir[i]; 4123 if (pde && pmap_pde_v(pde)) { 4124 for (j = 0; j < NPTEPG; j++) { 4125 vm_offset_t va = base + (j << PAGE_SHIFT); 4126 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4127 if (index) { 4128 index = 0; 4129 printf("\n"); 4130 } 4131 sx_sunlock(&allproc_lock); 4132 return npte; 4133 } 4134 pte = pmap_pte(pmap, va); 4135 if (pte && pmap_pte_v(pte)) { 4136 pt_entry_t pa; 4137 vm_page_t m; 4138 pa = PT_GET(pte); 4139 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4140 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4141 va, pa, m->hold_count, m->wire_count, m->flags); 4142 npte++; 4143 index++; 4144 if (index >= 2) { 4145 index = 0; 4146 printf("\n"); 4147 } else { 4148 printf(" "); 4149 } 4150 } 4151 } 4152 } 4153 } 4154 } 4155 } 4156 sx_sunlock(&allproc_lock); 4157 return npte; 4158} 4159#endif 4160 4161#if defined(DEBUG) 4162 4163static void pads(pmap_t pm); 4164void pmap_pvdump(vm_paddr_t pa); 4165 4166/* print address space of pmap*/ 4167static void 4168pads(pmap_t pm) 4169{ 4170 int i, j; 4171 vm_paddr_t va; 4172 pt_entry_t *ptep; 4173 4174 if (pm == kernel_pmap) 4175 return; 4176 for (i = 0; i < NPDEPTD; i++) 4177 if (pm->pm_pdir[i]) 4178 for (j = 0; j < NPTEPG; j++) { 4179 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4180 if (pm == kernel_pmap && va < KERNBASE) 4181 continue; 4182 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4183 continue; 4184 ptep = pmap_pte(pm, va); 4185 if (pmap_pte_v(ptep)) 4186 printf("%x:%x ", va, *ptep); 4187 }; 4188 4189} 4190 4191void 4192pmap_pvdump(vm_paddr_t pa) 4193{ 4194 pv_entry_t pv; 4195 pmap_t pmap; 4196 vm_page_t m; 4197 4198 printf("pa %x", pa); 4199 m = PHYS_TO_VM_PAGE(pa); 4200 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4201 pmap = PV_PMAP(pv); 4202 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4203 pads(pmap); 4204 } 4205 printf(" "); 4206} 4207#endif 4208