pmap.c revision 199184
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 */ 45/*- 46 * Copyright (c) 2003 Networks Associates Technology, Inc. 47 * All rights reserved. 48 * 49 * This software was developed for the FreeBSD Project by Jake Burkholder, 50 * Safeport Network Services, and Network Associates Laboratories, the 51 * Security Research Division of Network Associates, Inc. under 52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53 * CHATS research program. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74 * SUCH DAMAGE. 75 */ 76 77#include <sys/cdefs.h> 78__FBSDID("$FreeBSD: head/sys/i386/i386/pmap.c 199184 2009-11-11 14:21:31Z avg $"); 79 80/* 81 * Manages physical address maps. 82 * 83 * In addition to hardware address maps, this 84 * module is called upon to provide software-use-only 85 * maps which may or may not be stored in the same 86 * form as hardware maps. These pseudo-maps are 87 * used to store intermediate results from copy 88 * operations to and from address spaces. 89 * 90 * Since the information managed by this module is 91 * also stored by the logical address mapping module, 92 * this module may throw away valid virtual-to-physical 93 * mappings at almost any time. However, invalidations 94 * of virtual-to-physical mappings must be done as 95 * requested. 96 * 97 * In order to cope with hardware architectures which 98 * make virtual-to-physical map invalidates expensive, 99 * this module may delay invalidate or reduced protection 100 * operations until such time as they are actually 101 * necessary. This module is given full information as 102 * to which processors are currently using which maps, 103 * and to when physical maps must be made correct. 104 */ 105 106#include "opt_cpu.h" 107#include "opt_pmap.h" 108#include "opt_msgbuf.h" 109#include "opt_smp.h" 110#include "opt_xbox.h" 111 112#include <sys/param.h> 113#include <sys/systm.h> 114#include <sys/kernel.h> 115#include <sys/ktr.h> 116#include <sys/lock.h> 117#include <sys/malloc.h> 118#include <sys/mman.h> 119#include <sys/msgbuf.h> 120#include <sys/mutex.h> 121#include <sys/proc.h> 122#include <sys/sf_buf.h> 123#include <sys/sx.h> 124#include <sys/vmmeter.h> 125#include <sys/sched.h> 126#include <sys/sysctl.h> 127#ifdef SMP 128#include <sys/smp.h> 129#endif 130 131#include <vm/vm.h> 132#include <vm/vm_param.h> 133#include <vm/vm_kern.h> 134#include <vm/vm_page.h> 135#include <vm/vm_map.h> 136#include <vm/vm_object.h> 137#include <vm/vm_extern.h> 138#include <vm/vm_pageout.h> 139#include <vm/vm_pager.h> 140#include <vm/vm_reserv.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/cputypes.h> 145#include <machine/md_var.h> 146#include <machine/pcb.h> 147#include <machine/specialreg.h> 148#ifdef SMP 149#include <machine/smp.h> 150#endif 151 152#ifdef XBOX 153#include <machine/xbox.h> 154#endif 155 156#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 157#define CPU_ENABLE_SSE 158#endif 159 160#ifndef PMAP_SHPGPERPROC 161#define PMAP_SHPGPERPROC 200 162#endif 163 164#if !defined(DIAGNOSTIC) 165#define PMAP_INLINE __gnu89_inline 166#else 167#define PMAP_INLINE 168#endif 169 170#define PV_STATS 171#ifdef PV_STATS 172#define PV_STAT(x) do { x ; } while (0) 173#else 174#define PV_STAT(x) do { } while (0) 175#endif 176 177#define pa_index(pa) ((pa) >> PDRSHIFT) 178#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 179 180/* 181 * Get PDEs and PTEs for user/kernel address space 182 */ 183#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 184#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 185 186#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 187#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 188#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 189#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 190#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 191 192#define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ 193 atomic_clear_int((u_int *)(pte), PG_W)) 194#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 195 196struct pmap kernel_pmap_store; 197LIST_HEAD(pmaplist, pmap); 198static struct pmaplist allpmaps; 199static struct mtx allpmaps_lock; 200 201vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 202vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 203int pgeflag = 0; /* PG_G or-in */ 204int pseflag = 0; /* PG_PS or-in */ 205 206static int nkpt; 207vm_offset_t kernel_vm_end; 208extern u_int32_t KERNend; 209 210#ifdef PAE 211pt_entry_t pg_nx; 212static uma_zone_t pdptzone; 213#endif 214 215static int pat_works = 0; /* Is page attribute table sane? */ 216 217SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 218 219static int pg_ps_enabled; 220SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0, 221 "Are large page mappings enabled?"); 222 223/* 224 * Data for the pv entry allocation mechanism 225 */ 226static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 227static struct md_page *pv_table; 228static int shpgperproc = PMAP_SHPGPERPROC; 229 230struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 231int pv_maxchunks; /* How many chunks we have KVA for */ 232vm_offset_t pv_vafree; /* freelist stored in the PTE */ 233 234/* 235 * All those kernel PT submaps that BSD is so fond of 236 */ 237struct sysmaps { 238 struct mtx lock; 239 pt_entry_t *CMAP1; 240 pt_entry_t *CMAP2; 241 caddr_t CADDR1; 242 caddr_t CADDR2; 243}; 244static struct sysmaps sysmaps_pcpu[MAXCPU]; 245pt_entry_t *CMAP1 = 0; 246static pt_entry_t *CMAP3; 247caddr_t CADDR1 = 0, ptvmmap = 0; 248static caddr_t CADDR3; 249struct msgbuf *msgbufp = 0; 250 251/* 252 * Crashdump maps. 253 */ 254static caddr_t crashdumpmap; 255 256static pt_entry_t *PMAP1 = 0, *PMAP2; 257static pt_entry_t *PADDR1 = 0, *PADDR2; 258#ifdef SMP 259static int PMAP1cpu; 260static int PMAP1changedcpu; 261SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 262 &PMAP1changedcpu, 0, 263 "Number of times pmap_pte_quick changed CPU with same PMAP1"); 264#endif 265static int PMAP1changed; 266SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 267 &PMAP1changed, 0, 268 "Number of times pmap_pte_quick changed PMAP1"); 269static int PMAP1unchanged; 270SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 271 &PMAP1unchanged, 0, 272 "Number of times pmap_pte_quick didn't change PMAP1"); 273static struct mtx PMAP2mutex; 274 275static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 276static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 277static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 278static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 279static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); 280static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 281static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 282 vm_offset_t va); 283static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); 284 285static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 286static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, 287 vm_prot_t prot); 288static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 289 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 290static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); 291static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); 292static boolean_t pmap_is_modified_pvh(struct md_page *pvh); 293static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 294static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va); 295static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); 296static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); 297static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, 298 vm_prot_t prot); 299static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); 300static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 301 vm_page_t *free); 302static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 303 vm_page_t *free); 304static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte); 305static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 306 vm_page_t *free); 307static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 308 vm_offset_t va); 309static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 310static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 311 vm_page_t m); 312 313static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 314 315static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 316static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 317static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 318static void pmap_pte_release(pt_entry_t *pte); 319static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 320static vm_offset_t pmap_kmem_choose(vm_offset_t addr); 321#ifdef PAE 322static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 323#endif 324 325CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 326CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 327 328/* 329 * If you get an error here, then you set KVA_PAGES wrong! See the 330 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 331 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 332 */ 333CTASSERT(KERNBASE % (1 << 24) == 0); 334 335/* 336 * Move the kernel virtual free pointer to the next 337 * 4MB. This is used to help improve performance 338 * by using a large (4MB) page for much of the kernel 339 * (.text, .data, .bss) 340 */ 341static vm_offset_t 342pmap_kmem_choose(vm_offset_t addr) 343{ 344 vm_offset_t newaddr = addr; 345 346#ifndef DISABLE_PSE 347 if (cpu_feature & CPUID_PSE) 348 newaddr = (addr + PDRMASK) & ~PDRMASK; 349#endif 350 return newaddr; 351} 352 353/* 354 * Bootstrap the system enough to run with virtual memory. 355 * 356 * On the i386 this is called after mapping has already been enabled 357 * and just syncs the pmap module with what has already been done. 358 * [We can't call it easily with mapping off since the kernel is not 359 * mapped with PA == VA, hence we would have to relocate every address 360 * from the linked base (virtual) address "KERNBASE" to the actual 361 * (physical) address starting relative to 0] 362 */ 363void 364pmap_bootstrap(vm_paddr_t firstaddr) 365{ 366 vm_offset_t va; 367 pt_entry_t *pte, *unused; 368 struct sysmaps *sysmaps; 369 int i; 370 371 /* 372 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 373 * large. It should instead be correctly calculated in locore.s and 374 * not based on 'first' (which is a physical address, not a virtual 375 * address, for the start of unused physical memory). The kernel 376 * page tables are NOT double mapped and thus should not be included 377 * in this calculation. 378 */ 379 virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 380 virtual_avail = pmap_kmem_choose(virtual_avail); 381 382 virtual_end = VM_MAX_KERNEL_ADDRESS; 383 384 /* 385 * Initialize the kernel pmap (which is statically allocated). 386 */ 387 PMAP_LOCK_INIT(kernel_pmap); 388 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 389#ifdef PAE 390 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 391#endif 392 kernel_pmap->pm_root = NULL; 393 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 394 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 395 LIST_INIT(&allpmaps); 396 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 397 mtx_lock_spin(&allpmaps_lock); 398 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 399 mtx_unlock_spin(&allpmaps_lock); 400 nkpt = NKPT; 401 402 /* 403 * Reserve some special page table entries/VA space for temporary 404 * mapping of pages. 405 */ 406#define SYSMAP(c, p, v, n) \ 407 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 408 409 va = virtual_avail; 410 pte = vtopte(va); 411 412 /* 413 * CMAP1/CMAP2 are used for zeroing and copying pages. 414 * CMAP3 is used for the idle process page zeroing. 415 */ 416 for (i = 0; i < MAXCPU; i++) { 417 sysmaps = &sysmaps_pcpu[i]; 418 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 419 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 420 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 421 } 422 SYSMAP(caddr_t, CMAP1, CADDR1, 1) 423 SYSMAP(caddr_t, CMAP3, CADDR3, 1) 424 *CMAP3 = 0; 425 426 /* 427 * Crashdump maps. 428 */ 429 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 430 431 /* 432 * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 433 */ 434 SYSMAP(caddr_t, unused, ptvmmap, 1) 435 436 /* 437 * msgbufp is used to map the system message buffer. 438 */ 439 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) 440 441 /* 442 * ptemap is used for pmap_pte_quick 443 */ 444 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 445 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); 446 447 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 448 449 virtual_avail = va; 450 451 *CMAP1 = 0; 452 453 /* 454 * Leave in place an identity mapping (virt == phys) for the low 1 MB 455 * physical memory region that is used by the ACPI wakeup code. This 456 * mapping must not have PG_G set. 457 */ 458#ifdef XBOX 459 /* FIXME: This is gross, but needed for the XBOX. Since we are in such 460 * an early stadium, we cannot yet neatly map video memory ... :-( 461 * Better fixes are very welcome! */ 462 if (!arch_i386_is_xbox) 463#endif 464 for (i = 1; i < NKPT; i++) 465 PTD[i] = 0; 466 467 /* Initialize the PAT MSR if present. */ 468 pmap_init_pat(); 469 470 /* Turn on PG_G on kernel page(s) */ 471 pmap_set_pg(); 472} 473 474/* 475 * Setup the PAT MSR. 476 */ 477void 478pmap_init_pat(void) 479{ 480 uint64_t pat_msr; 481 char *sysenv; 482 static int pat_tested = 0; 483 484 /* Bail if this CPU doesn't implement PAT. */ 485 if (!(cpu_feature & CPUID_PAT)) 486 return; 487 488 /* 489 * Due to some Intel errata, we can only safely use the lower 4 490 * PAT entries. 491 * 492 * Intel Pentium III Processor Specification Update 493 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 494 * or Mode C Paging) 495 * 496 * Intel Pentium IV Processor Specification Update 497 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 498 * 499 * Some Apple Macs based on nVidia chipsets cannot enter ACPI mode 500 * via SMI# when we use upper 4 PAT entries for unknown reason. 501 */ 502 if (!pat_tested) { 503 if (cpu_vendor_id != CPU_VENDOR_INTEL || 504 (CPUID_TO_FAMILY(cpu_id) == 6 && 505 CPUID_TO_MODEL(cpu_id) >= 0xe)) { 506 pat_works = 1; 507 sysenv = getenv("smbios.system.product"); 508 if (sysenv != NULL) { 509 if (strncmp(sysenv, "MacBook5,1", 10) == 0 || 510 strncmp(sysenv, "MacBookPro5,5", 13) == 0 || 511 strncmp(sysenv, "Macmini3,1", 10) == 0) 512 pat_works = 0; 513 freeenv(sysenv); 514 } 515 } 516 pat_tested = 1; 517 } 518 519 /* Initialize default PAT entries. */ 520 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | 521 PAT_VALUE(1, PAT_WRITE_THROUGH) | 522 PAT_VALUE(2, PAT_UNCACHED) | 523 PAT_VALUE(3, PAT_UNCACHEABLE) | 524 PAT_VALUE(4, PAT_WRITE_BACK) | 525 PAT_VALUE(5, PAT_WRITE_THROUGH) | 526 PAT_VALUE(6, PAT_UNCACHED) | 527 PAT_VALUE(7, PAT_UNCACHEABLE); 528 529 if (pat_works) { 530 /* 531 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. 532 * Program 4 and 5 as WP and WC. 533 * Leave 6 and 7 as UC- and UC. 534 */ 535 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 536 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 537 PAT_VALUE(5, PAT_WRITE_COMBINING); 538 } else { 539 /* 540 * Just replace PAT Index 2 with WC instead of UC-. 541 */ 542 pat_msr &= ~PAT_MASK(2); 543 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 544 } 545 wrmsr(MSR_PAT, pat_msr); 546} 547 548/* 549 * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. 550 */ 551void 552pmap_set_pg(void) 553{ 554 pd_entry_t pdir; 555 pt_entry_t *pte; 556 vm_offset_t va, endva; 557 int i; 558 559 if (pgeflag == 0) 560 return; 561 562 i = KERNLOAD/NBPDR; 563 endva = KERNBASE + KERNend; 564 565 if (pseflag) { 566 va = KERNBASE + KERNLOAD; 567 while (va < endva) { 568 pdir = kernel_pmap->pm_pdir[KPTDI+i]; 569 pdir |= pgeflag; 570 kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir; 571 invltlb(); /* Play it safe, invltlb() every time */ 572 i++; 573 va += NBPDR; 574 } 575 } else { 576 va = (vm_offset_t)btext; 577 while (va < endva) { 578 pte = vtopte(va); 579 if (*pte) 580 *pte |= pgeflag; 581 invltlb(); /* Play it safe, invltlb() every time */ 582 va += PAGE_SIZE; 583 } 584 } 585} 586 587/* 588 * Initialize a vm_page's machine-dependent fields. 589 */ 590void 591pmap_page_init(vm_page_t m) 592{ 593 594 TAILQ_INIT(&m->md.pv_list); 595 m->md.pat_mode = PAT_WRITE_BACK; 596} 597 598#ifdef PAE 599static void * 600pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 601{ 602 603 /* Inform UMA that this allocator uses kernel_map/object. */ 604 *flags = UMA_SLAB_KERNEL; 605 return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL, 606 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 607} 608#endif 609 610/* 611 * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 612 * Requirements: 613 * - Must deal with pages in order to ensure that none of the PG_* bits 614 * are ever set, PG_V in particular. 615 * - Assumes we can write to ptes without pte_store() atomic ops, even 616 * on PAE systems. This should be ok. 617 * - Assumes nothing will ever test these addresses for 0 to indicate 618 * no mapping instead of correctly checking PG_V. 619 * - Assumes a vm_offset_t will fit in a pte (true for i386). 620 * Because PG_V is never set, there can be no mappings to invalidate. 621 */ 622static vm_offset_t 623pmap_ptelist_alloc(vm_offset_t *head) 624{ 625 pt_entry_t *pte; 626 vm_offset_t va; 627 628 va = *head; 629 if (va == 0) 630 return (va); /* Out of memory */ 631 pte = vtopte(va); 632 *head = *pte; 633 if (*head & PG_V) 634 panic("pmap_ptelist_alloc: va with PG_V set!"); 635 *pte = 0; 636 return (va); 637} 638 639static void 640pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 641{ 642 pt_entry_t *pte; 643 644 if (va & PG_V) 645 panic("pmap_ptelist_free: freeing va with PG_V set!"); 646 pte = vtopte(va); 647 *pte = *head; /* virtual! PG_V is 0 though */ 648 *head = va; 649} 650 651static void 652pmap_ptelist_init(vm_offset_t *head, void *base, int npages) 653{ 654 int i; 655 vm_offset_t va; 656 657 *head = 0; 658 for (i = npages - 1; i >= 0; i--) { 659 va = (vm_offset_t)base + i * PAGE_SIZE; 660 pmap_ptelist_free(head, va); 661 } 662} 663 664 665/* 666 * Initialize the pmap module. 667 * Called by vm_init, to initialize any structures that the pmap 668 * system needs to map virtual memory. 669 */ 670void 671pmap_init(void) 672{ 673 vm_page_t mpte; 674 vm_size_t s; 675 int i, pv_npg; 676 677 /* 678 * Initialize the vm page array entries for the kernel pmap's 679 * page table pages. 680 */ 681 for (i = 0; i < nkpt; i++) { 682 mpte = PHYS_TO_VM_PAGE(PTD[i + KPTDI] & PG_FRAME); 683 KASSERT(mpte >= vm_page_array && 684 mpte < &vm_page_array[vm_page_array_size], 685 ("pmap_init: page table page is out of range")); 686 mpte->pindex = i + KPTDI; 687 mpte->phys_addr = PTD[i + KPTDI] & PG_FRAME; 688 } 689 690 /* 691 * Initialize the address space (zone) for the pv entries. Set a 692 * high water mark so that the system can recover from excessive 693 * numbers of pv entries. 694 */ 695 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 696 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 697 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 698 pv_entry_max = roundup(pv_entry_max, _NPCPV); 699 pv_entry_high_water = 9 * (pv_entry_max / 10); 700 701 /* 702 * Are large page mappings enabled? 703 */ 704 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 705 if (pg_ps_enabled) { 706 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 707 ("pmap_init: can't assign to pagesizes[1]")); 708 pagesizes[1] = NBPDR; 709 } 710 711 /* 712 * Calculate the size of the pv head table for superpages. 713 */ 714 for (i = 0; phys_avail[i + 1]; i += 2); 715 pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR; 716 717 /* 718 * Allocate memory for the pv head table for superpages. 719 */ 720 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 721 s = round_page(s); 722 pv_table = (struct md_page *)kmem_alloc(kernel_map, s); 723 for (i = 0; i < pv_npg; i++) 724 TAILQ_INIT(&pv_table[i].pv_list); 725 726 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 727 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 728 PAGE_SIZE * pv_maxchunks); 729 if (pv_chunkbase == NULL) 730 panic("pmap_init: not enough kvm for pv chunks"); 731 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 732#ifdef PAE 733 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 734 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 735 UMA_ZONE_VM | UMA_ZONE_NOFREE); 736 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 737#endif 738} 739 740 741SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 742 "Max number of PV entries"); 743SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 744 "Page share factor per proc"); 745 746SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 747 "2/4MB page mapping counters"); 748 749static u_long pmap_pde_demotions; 750SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD, 751 &pmap_pde_demotions, 0, "2/4MB page demotions"); 752 753static u_long pmap_pde_mappings; 754SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 755 &pmap_pde_mappings, 0, "2/4MB page mappings"); 756 757static u_long pmap_pde_p_failures; 758SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD, 759 &pmap_pde_p_failures, 0, "2/4MB page promotion failures"); 760 761static u_long pmap_pde_promotions; 762SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD, 763 &pmap_pde_promotions, 0, "2/4MB page promotions"); 764 765/*************************************************** 766 * Low level helper routines..... 767 ***************************************************/ 768 769/* 770 * Determine the appropriate bits to set in a PTE or PDE for a specified 771 * caching mode. 772 */ 773int 774pmap_cache_bits(int mode, boolean_t is_pde) 775{ 776 int pat_flag, pat_index, cache_bits; 777 778 /* The PAT bit is different for PTE's and PDE's. */ 779 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 780 781 /* If we don't support PAT, map extended modes to older ones. */ 782 if (!(cpu_feature & CPUID_PAT)) { 783 switch (mode) { 784 case PAT_UNCACHEABLE: 785 case PAT_WRITE_THROUGH: 786 case PAT_WRITE_BACK: 787 break; 788 case PAT_UNCACHED: 789 case PAT_WRITE_COMBINING: 790 case PAT_WRITE_PROTECTED: 791 mode = PAT_UNCACHEABLE; 792 break; 793 } 794 } 795 796 /* Map the caching mode to a PAT index. */ 797 if (pat_works) { 798 switch (mode) { 799 case PAT_UNCACHEABLE: 800 pat_index = 3; 801 break; 802 case PAT_WRITE_THROUGH: 803 pat_index = 1; 804 break; 805 case PAT_WRITE_BACK: 806 pat_index = 0; 807 break; 808 case PAT_UNCACHED: 809 pat_index = 2; 810 break; 811 case PAT_WRITE_COMBINING: 812 pat_index = 5; 813 break; 814 case PAT_WRITE_PROTECTED: 815 pat_index = 4; 816 break; 817 default: 818 panic("Unknown caching mode %d\n", mode); 819 } 820 } else { 821 switch (mode) { 822 case PAT_UNCACHED: 823 case PAT_UNCACHEABLE: 824 case PAT_WRITE_PROTECTED: 825 pat_index = 3; 826 break; 827 case PAT_WRITE_THROUGH: 828 pat_index = 1; 829 break; 830 case PAT_WRITE_BACK: 831 pat_index = 0; 832 break; 833 case PAT_WRITE_COMBINING: 834 pat_index = 2; 835 break; 836 default: 837 panic("Unknown caching mode %d\n", mode); 838 } 839 } 840 841 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 842 cache_bits = 0; 843 if (pat_index & 0x4) 844 cache_bits |= pat_flag; 845 if (pat_index & 0x2) 846 cache_bits |= PG_NC_PCD; 847 if (pat_index & 0x1) 848 cache_bits |= PG_NC_PWT; 849 return (cache_bits); 850} 851#ifdef SMP 852/* 853 * For SMP, these functions have to use the IPI mechanism for coherence. 854 * 855 * N.B.: Before calling any of the following TLB invalidation functions, 856 * the calling processor must ensure that all stores updating a non- 857 * kernel page table are globally performed. Otherwise, another 858 * processor could cache an old, pre-update entry without being 859 * invalidated. This can happen one of two ways: (1) The pmap becomes 860 * active on another processor after its pm_active field is checked by 861 * one of the following functions but before a store updating the page 862 * table is globally performed. (2) The pmap becomes active on another 863 * processor before its pm_active field is checked but due to 864 * speculative loads one of the following functions stills reads the 865 * pmap as inactive on the other processor. 866 * 867 * The kernel page table is exempt because its pm_active field is 868 * immutable. The kernel page table is always active on every 869 * processor. 870 */ 871void 872pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 873{ 874 u_int cpumask; 875 u_int other_cpus; 876 877 sched_pin(); 878 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 879 invlpg(va); 880 smp_invlpg(va); 881 } else { 882 cpumask = PCPU_GET(cpumask); 883 other_cpus = PCPU_GET(other_cpus); 884 if (pmap->pm_active & cpumask) 885 invlpg(va); 886 if (pmap->pm_active & other_cpus) 887 smp_masked_invlpg(pmap->pm_active & other_cpus, va); 888 } 889 sched_unpin(); 890} 891 892void 893pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 894{ 895 u_int cpumask; 896 u_int other_cpus; 897 vm_offset_t addr; 898 899 sched_pin(); 900 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 901 for (addr = sva; addr < eva; addr += PAGE_SIZE) 902 invlpg(addr); 903 smp_invlpg_range(sva, eva); 904 } else { 905 cpumask = PCPU_GET(cpumask); 906 other_cpus = PCPU_GET(other_cpus); 907 if (pmap->pm_active & cpumask) 908 for (addr = sva; addr < eva; addr += PAGE_SIZE) 909 invlpg(addr); 910 if (pmap->pm_active & other_cpus) 911 smp_masked_invlpg_range(pmap->pm_active & other_cpus, 912 sva, eva); 913 } 914 sched_unpin(); 915} 916 917void 918pmap_invalidate_all(pmap_t pmap) 919{ 920 u_int cpumask; 921 u_int other_cpus; 922 923 sched_pin(); 924 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 925 invltlb(); 926 smp_invltlb(); 927 } else { 928 cpumask = PCPU_GET(cpumask); 929 other_cpus = PCPU_GET(other_cpus); 930 if (pmap->pm_active & cpumask) 931 invltlb(); 932 if (pmap->pm_active & other_cpus) 933 smp_masked_invltlb(pmap->pm_active & other_cpus); 934 } 935 sched_unpin(); 936} 937 938void 939pmap_invalidate_cache(void) 940{ 941 942 sched_pin(); 943 wbinvd(); 944 smp_cache_flush(); 945 sched_unpin(); 946} 947#else /* !SMP */ 948/* 949 * Normal, non-SMP, 486+ invalidation functions. 950 * We inline these within pmap.c for speed. 951 */ 952PMAP_INLINE void 953pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 954{ 955 956 if (pmap == kernel_pmap || pmap->pm_active) 957 invlpg(va); 958} 959 960PMAP_INLINE void 961pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 962{ 963 vm_offset_t addr; 964 965 if (pmap == kernel_pmap || pmap->pm_active) 966 for (addr = sva; addr < eva; addr += PAGE_SIZE) 967 invlpg(addr); 968} 969 970PMAP_INLINE void 971pmap_invalidate_all(pmap_t pmap) 972{ 973 974 if (pmap == kernel_pmap || pmap->pm_active) 975 invltlb(); 976} 977 978PMAP_INLINE void 979pmap_invalidate_cache(void) 980{ 981 982 wbinvd(); 983} 984#endif /* !SMP */ 985 986void 987pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 988{ 989 990 KASSERT((sva & PAGE_MASK) == 0, 991 ("pmap_invalidate_cache_range: sva not page-aligned")); 992 KASSERT((eva & PAGE_MASK) == 0, 993 ("pmap_invalidate_cache_range: eva not page-aligned")); 994 995 if (cpu_feature & CPUID_SS) 996 ; /* If "Self Snoop" is supported, do nothing. */ 997 else if (cpu_feature & CPUID_CLFSH) { 998 999 /* 1000 * Otherwise, do per-cache line flush. Use the mfence 1001 * instruction to insure that previous stores are 1002 * included in the write-back. The processor 1003 * propagates flush to other processors in the cache 1004 * coherence domain. 1005 */ 1006 mfence(); 1007 for (; sva < eva; sva += cpu_clflush_line_size) 1008 clflush(sva); 1009 mfence(); 1010 } else { 1011 1012 /* 1013 * No targeted cache flush methods are supported by CPU, 1014 * globally invalidate cache as a last resort. 1015 */ 1016 pmap_invalidate_cache(); 1017 } 1018} 1019 1020/* 1021 * Are we current address space or kernel? N.B. We return FALSE when 1022 * a pmap's page table is in use because a kernel thread is borrowing 1023 * it. The borrowed page table can change spontaneously, making any 1024 * dependence on its continued use subject to a race condition. 1025 */ 1026static __inline int 1027pmap_is_current(pmap_t pmap) 1028{ 1029 1030 return (pmap == kernel_pmap || 1031 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 1032 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 1033} 1034 1035/* 1036 * If the given pmap is not the current or kernel pmap, the returned pte must 1037 * be released by passing it to pmap_pte_release(). 1038 */ 1039pt_entry_t * 1040pmap_pte(pmap_t pmap, vm_offset_t va) 1041{ 1042 pd_entry_t newpf; 1043 pd_entry_t *pde; 1044 1045 pde = pmap_pde(pmap, va); 1046 if (*pde & PG_PS) 1047 return (pde); 1048 if (*pde != 0) { 1049 /* are we current address space or kernel? */ 1050 if (pmap_is_current(pmap)) 1051 return (vtopte(va)); 1052 mtx_lock(&PMAP2mutex); 1053 newpf = *pde & PG_FRAME; 1054 if ((*PMAP2 & PG_FRAME) != newpf) { 1055 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; 1056 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); 1057 } 1058 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1059 } 1060 return (0); 1061} 1062 1063/* 1064 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1065 * being NULL. 1066 */ 1067static __inline void 1068pmap_pte_release(pt_entry_t *pte) 1069{ 1070 1071 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) 1072 mtx_unlock(&PMAP2mutex); 1073} 1074 1075static __inline void 1076invlcaddr(void *caddr) 1077{ 1078 1079 invlpg((u_int)caddr); 1080} 1081 1082/* 1083 * Super fast pmap_pte routine best used when scanning 1084 * the pv lists. This eliminates many coarse-grained 1085 * invltlb calls. Note that many of the pv list 1086 * scans are across different pmaps. It is very wasteful 1087 * to do an entire invltlb for checking a single mapping. 1088 * 1089 * If the given pmap is not the current pmap, vm_page_queue_mtx 1090 * must be held and curthread pinned to a CPU. 1091 */ 1092static pt_entry_t * 1093pmap_pte_quick(pmap_t pmap, vm_offset_t va) 1094{ 1095 pd_entry_t newpf; 1096 pd_entry_t *pde; 1097 1098 pde = pmap_pde(pmap, va); 1099 if (*pde & PG_PS) 1100 return (pde); 1101 if (*pde != 0) { 1102 /* are we current address space or kernel? */ 1103 if (pmap_is_current(pmap)) 1104 return (vtopte(va)); 1105 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1106 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1107 newpf = *pde & PG_FRAME; 1108 if ((*PMAP1 & PG_FRAME) != newpf) { 1109 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; 1110#ifdef SMP 1111 PMAP1cpu = PCPU_GET(cpuid); 1112#endif 1113 invlcaddr(PADDR1); 1114 PMAP1changed++; 1115 } else 1116#ifdef SMP 1117 if (PMAP1cpu != PCPU_GET(cpuid)) { 1118 PMAP1cpu = PCPU_GET(cpuid); 1119 invlcaddr(PADDR1); 1120 PMAP1changedcpu++; 1121 } else 1122#endif 1123 PMAP1unchanged++; 1124 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1125 } 1126 return (0); 1127} 1128 1129/* 1130 * Routine: pmap_extract 1131 * Function: 1132 * Extract the physical page address associated 1133 * with the given map/virtual_address pair. 1134 */ 1135vm_paddr_t 1136pmap_extract(pmap_t pmap, vm_offset_t va) 1137{ 1138 vm_paddr_t rtval; 1139 pt_entry_t *pte; 1140 pd_entry_t pde; 1141 1142 rtval = 0; 1143 PMAP_LOCK(pmap); 1144 pde = pmap->pm_pdir[va >> PDRSHIFT]; 1145 if (pde != 0) { 1146 if ((pde & PG_PS) != 0) 1147 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); 1148 else { 1149 pte = pmap_pte(pmap, va); 1150 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1151 pmap_pte_release(pte); 1152 } 1153 } 1154 PMAP_UNLOCK(pmap); 1155 return (rtval); 1156} 1157 1158/* 1159 * Routine: pmap_extract_and_hold 1160 * Function: 1161 * Atomically extract and hold the physical page 1162 * with the given pmap and virtual address pair 1163 * if that mapping permits the given protection. 1164 */ 1165vm_page_t 1166pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1167{ 1168 pd_entry_t pde; 1169 pt_entry_t pte; 1170 vm_page_t m; 1171 1172 m = NULL; 1173 vm_page_lock_queues(); 1174 PMAP_LOCK(pmap); 1175 pde = *pmap_pde(pmap, va); 1176 if (pde != 0) { 1177 if (pde & PG_PS) { 1178 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1179 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1180 (va & PDRMASK)); 1181 vm_page_hold(m); 1182 } 1183 } else { 1184 sched_pin(); 1185 pte = *pmap_pte_quick(pmap, va); 1186 if (pte != 0 && 1187 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1188 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1189 vm_page_hold(m); 1190 } 1191 sched_unpin(); 1192 } 1193 } 1194 vm_page_unlock_queues(); 1195 PMAP_UNLOCK(pmap); 1196 return (m); 1197} 1198 1199/*************************************************** 1200 * Low level mapping routines..... 1201 ***************************************************/ 1202 1203/* 1204 * Add a wired page to the kva. 1205 * Note: not SMP coherent. 1206 */ 1207PMAP_INLINE void 1208pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1209{ 1210 pt_entry_t *pte; 1211 1212 pte = vtopte(va); 1213 pte_store(pte, pa | PG_RW | PG_V | pgeflag); 1214} 1215 1216static __inline void 1217pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1218{ 1219 pt_entry_t *pte; 1220 1221 pte = vtopte(va); 1222 pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1223} 1224 1225/* 1226 * Remove a page from the kernel pagetables. 1227 * Note: not SMP coherent. 1228 */ 1229PMAP_INLINE void 1230pmap_kremove(vm_offset_t va) 1231{ 1232 pt_entry_t *pte; 1233 1234 pte = vtopte(va); 1235 pte_clear(pte); 1236} 1237 1238/* 1239 * Used to map a range of physical addresses into kernel 1240 * virtual address space. 1241 * 1242 * The value passed in '*virt' is a suggested virtual address for 1243 * the mapping. Architectures which can support a direct-mapped 1244 * physical to virtual region can return the appropriate address 1245 * within that region, leaving '*virt' unchanged. Other 1246 * architectures should map the pages starting at '*virt' and 1247 * update '*virt' with the first usable address after the mapped 1248 * region. 1249 */ 1250vm_offset_t 1251pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1252{ 1253 vm_offset_t va, sva; 1254 1255 va = sva = *virt; 1256 while (start < end) { 1257 pmap_kenter(va, start); 1258 va += PAGE_SIZE; 1259 start += PAGE_SIZE; 1260 } 1261 pmap_invalidate_range(kernel_pmap, sva, va); 1262 *virt = va; 1263 return (sva); 1264} 1265 1266 1267/* 1268 * Add a list of wired pages to the kva 1269 * this routine is only used for temporary 1270 * kernel mappings that do not need to have 1271 * page modification or references recorded. 1272 * Note that old mappings are simply written 1273 * over. The page *must* be wired. 1274 * Note: SMP coherent. Uses a ranged shootdown IPI. 1275 */ 1276void 1277pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1278{ 1279 pt_entry_t *endpte, oldpte, *pte; 1280 1281 oldpte = 0; 1282 pte = vtopte(sva); 1283 endpte = pte + count; 1284 while (pte < endpte) { 1285 oldpte |= *pte; 1286 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | 1287 pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V); 1288 pte++; 1289 ma++; 1290 } 1291 if ((oldpte & PG_V) != 0) 1292 pmap_invalidate_range(kernel_pmap, sva, sva + count * 1293 PAGE_SIZE); 1294} 1295 1296/* 1297 * This routine tears out page mappings from the 1298 * kernel -- it is meant only for temporary mappings. 1299 * Note: SMP coherent. Uses a ranged shootdown IPI. 1300 */ 1301void 1302pmap_qremove(vm_offset_t sva, int count) 1303{ 1304 vm_offset_t va; 1305 1306 va = sva; 1307 while (count-- > 0) { 1308 pmap_kremove(va); 1309 va += PAGE_SIZE; 1310 } 1311 pmap_invalidate_range(kernel_pmap, sva, va); 1312} 1313 1314/*************************************************** 1315 * Page table page management routines..... 1316 ***************************************************/ 1317static __inline void 1318pmap_free_zero_pages(vm_page_t free) 1319{ 1320 vm_page_t m; 1321 1322 while (free != NULL) { 1323 m = free; 1324 free = m->right; 1325 /* Preserve the page's PG_ZERO setting. */ 1326 vm_page_free_toq(m); 1327 } 1328} 1329 1330/* 1331 * Schedule the specified unused page table page to be freed. Specifically, 1332 * add the page to the specified list of pages that will be released to the 1333 * physical memory manager after the TLB has been updated. 1334 */ 1335static __inline void 1336pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO) 1337{ 1338 1339 if (set_PG_ZERO) 1340 m->flags |= PG_ZERO; 1341 else 1342 m->flags &= ~PG_ZERO; 1343 m->right = *free; 1344 *free = m; 1345} 1346 1347/* 1348 * Inserts the specified page table page into the specified pmap's collection 1349 * of idle page table pages. Each of a pmap's page table pages is responsible 1350 * for mapping a distinct range of virtual addresses. The pmap's collection is 1351 * ordered by this virtual address range. 1352 */ 1353static void 1354pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) 1355{ 1356 vm_page_t root; 1357 1358 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1359 root = pmap->pm_root; 1360 if (root == NULL) { 1361 mpte->left = NULL; 1362 mpte->right = NULL; 1363 } else { 1364 root = vm_page_splay(mpte->pindex, root); 1365 if (mpte->pindex < root->pindex) { 1366 mpte->left = root->left; 1367 mpte->right = root; 1368 root->left = NULL; 1369 } else if (mpte->pindex == root->pindex) 1370 panic("pmap_insert_pt_page: pindex already inserted"); 1371 else { 1372 mpte->right = root->right; 1373 mpte->left = root; 1374 root->right = NULL; 1375 } 1376 } 1377 pmap->pm_root = mpte; 1378} 1379 1380/* 1381 * Looks for a page table page mapping the specified virtual address in the 1382 * specified pmap's collection of idle page table pages. Returns NULL if there 1383 * is no page table page corresponding to the specified virtual address. 1384 */ 1385static vm_page_t 1386pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va) 1387{ 1388 vm_page_t mpte; 1389 vm_pindex_t pindex = va >> PDRSHIFT; 1390 1391 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1392 if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) { 1393 mpte = vm_page_splay(pindex, mpte); 1394 if ((pmap->pm_root = mpte)->pindex != pindex) 1395 mpte = NULL; 1396 } 1397 return (mpte); 1398} 1399 1400/* 1401 * Removes the specified page table page from the specified pmap's collection 1402 * of idle page table pages. The specified page table page must be a member of 1403 * the pmap's collection. 1404 */ 1405static void 1406pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte) 1407{ 1408 vm_page_t root; 1409 1410 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1411 if (mpte != pmap->pm_root) 1412 vm_page_splay(mpte->pindex, pmap->pm_root); 1413 if (mpte->left == NULL) 1414 root = mpte->right; 1415 else { 1416 root = vm_page_splay(mpte->pindex, mpte->left); 1417 root->right = mpte->right; 1418 } 1419 pmap->pm_root = root; 1420} 1421 1422/* 1423 * This routine unholds page table pages, and if the hold count 1424 * drops to zero, then it decrements the wire count. 1425 */ 1426static __inline int 1427pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1428{ 1429 1430 --m->wire_count; 1431 if (m->wire_count == 0) 1432 return _pmap_unwire_pte_hold(pmap, m, free); 1433 else 1434 return 0; 1435} 1436 1437static int 1438_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1439{ 1440 vm_offset_t pteva; 1441 1442 /* 1443 * unmap the page table page 1444 */ 1445 pmap->pm_pdir[m->pindex] = 0; 1446 --pmap->pm_stats.resident_count; 1447 1448 /* 1449 * This is a release store so that the ordinary store unmapping 1450 * the page table page is globally performed before TLB shoot- 1451 * down is begun. 1452 */ 1453 atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1454 1455 /* 1456 * Do an invltlb to make the invalidated mapping 1457 * take effect immediately. 1458 */ 1459 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1460 pmap_invalidate_page(pmap, pteva); 1461 1462 /* 1463 * Put page on a list so that it is released after 1464 * *ALL* TLB shootdown is done 1465 */ 1466 pmap_add_delayed_free_list(m, free, TRUE); 1467 1468 return 1; 1469} 1470 1471/* 1472 * After removing a page table entry, this routine is used to 1473 * conditionally free the page, and manage the hold/wire counts. 1474 */ 1475static int 1476pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1477{ 1478 pd_entry_t ptepde; 1479 vm_page_t mpte; 1480 1481 if (va >= VM_MAXUSER_ADDRESS) 1482 return 0; 1483 ptepde = *pmap_pde(pmap, va); 1484 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1485 return pmap_unwire_pte_hold(pmap, mpte, free); 1486} 1487 1488void 1489pmap_pinit0(pmap_t pmap) 1490{ 1491 1492 PMAP_LOCK_INIT(pmap); 1493 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1494#ifdef PAE 1495 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1496#endif 1497 pmap->pm_root = NULL; 1498 pmap->pm_active = 0; 1499 PCPU_SET(curpmap, pmap); 1500 TAILQ_INIT(&pmap->pm_pvchunk); 1501 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1502 mtx_lock_spin(&allpmaps_lock); 1503 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1504 mtx_unlock_spin(&allpmaps_lock); 1505} 1506 1507/* 1508 * Initialize a preallocated and zeroed pmap structure, 1509 * such as one in a vmspace structure. 1510 */ 1511int 1512pmap_pinit(pmap_t pmap) 1513{ 1514 vm_page_t m, ptdpg[NPGPTD]; 1515 vm_paddr_t pa; 1516 static int color; 1517 int i; 1518 1519 PMAP_LOCK_INIT(pmap); 1520 1521 /* 1522 * No need to allocate page table space yet but we do need a valid 1523 * page directory table. 1524 */ 1525 if (pmap->pm_pdir == NULL) { 1526 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1527 NBPTD); 1528 1529 if (pmap->pm_pdir == NULL) { 1530 PMAP_LOCK_DESTROY(pmap); 1531 return (0); 1532 } 1533#ifdef PAE 1534 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 1535 KASSERT(((vm_offset_t)pmap->pm_pdpt & 1536 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 1537 ("pmap_pinit: pdpt misaligned")); 1538 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 1539 ("pmap_pinit: pdpt above 4g")); 1540#endif 1541 pmap->pm_root = NULL; 1542 } 1543 KASSERT(pmap->pm_root == NULL, 1544 ("pmap_pinit: pmap has reserved page table page(s)")); 1545 1546 /* 1547 * allocate the page directory page(s) 1548 */ 1549 for (i = 0; i < NPGPTD;) { 1550 m = vm_page_alloc(NULL, color++, 1551 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1552 VM_ALLOC_ZERO); 1553 if (m == NULL) 1554 VM_WAIT; 1555 else { 1556 ptdpg[i++] = m; 1557 } 1558 } 1559 1560 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1561 1562 for (i = 0; i < NPGPTD; i++) { 1563 if ((ptdpg[i]->flags & PG_ZERO) == 0) 1564 bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE); 1565 } 1566 1567 mtx_lock_spin(&allpmaps_lock); 1568 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1569 mtx_unlock_spin(&allpmaps_lock); 1570 /* Wire in kernel global address entries. */ 1571 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1572 1573 /* install self-referential address mapping entry(s) */ 1574 for (i = 0; i < NPGPTD; i++) { 1575 pa = VM_PAGE_TO_PHYS(ptdpg[i]); 1576 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M; 1577#ifdef PAE 1578 pmap->pm_pdpt[i] = pa | PG_V; 1579#endif 1580 } 1581 1582 pmap->pm_active = 0; 1583 TAILQ_INIT(&pmap->pm_pvchunk); 1584 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1585 1586 return (1); 1587} 1588 1589/* 1590 * this routine is called if the page table page is not 1591 * mapped correctly. 1592 */ 1593static vm_page_t 1594_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1595{ 1596 vm_paddr_t ptepa; 1597 vm_page_t m; 1598 1599 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1600 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1601 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1602 1603 /* 1604 * Allocate a page table page. 1605 */ 1606 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1607 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1608 if (flags & M_WAITOK) { 1609 PMAP_UNLOCK(pmap); 1610 vm_page_unlock_queues(); 1611 VM_WAIT; 1612 vm_page_lock_queues(); 1613 PMAP_LOCK(pmap); 1614 } 1615 1616 /* 1617 * Indicate the need to retry. While waiting, the page table 1618 * page may have been allocated. 1619 */ 1620 return (NULL); 1621 } 1622 if ((m->flags & PG_ZERO) == 0) 1623 pmap_zero_page(m); 1624 1625 /* 1626 * Map the pagetable page into the process address space, if 1627 * it isn't already there. 1628 */ 1629 1630 pmap->pm_stats.resident_count++; 1631 1632 ptepa = VM_PAGE_TO_PHYS(m); 1633 pmap->pm_pdir[ptepindex] = 1634 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); 1635 1636 return m; 1637} 1638 1639static vm_page_t 1640pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1641{ 1642 unsigned ptepindex; 1643 pd_entry_t ptepa; 1644 vm_page_t m; 1645 1646 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1647 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1648 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1649 1650 /* 1651 * Calculate pagetable page index 1652 */ 1653 ptepindex = va >> PDRSHIFT; 1654retry: 1655 /* 1656 * Get the page directory entry 1657 */ 1658 ptepa = pmap->pm_pdir[ptepindex]; 1659 1660 /* 1661 * This supports switching from a 4MB page to a 1662 * normal 4K page. 1663 */ 1664 if (ptepa & PG_PS) { 1665 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); 1666 ptepa = pmap->pm_pdir[ptepindex]; 1667 } 1668 1669 /* 1670 * If the page table page is mapped, we just increment the 1671 * hold count, and activate it. 1672 */ 1673 if (ptepa) { 1674 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 1675 m->wire_count++; 1676 } else { 1677 /* 1678 * Here if the pte page isn't mapped, or if it has 1679 * been deallocated. 1680 */ 1681 m = _pmap_allocpte(pmap, ptepindex, flags); 1682 if (m == NULL && (flags & M_WAITOK)) 1683 goto retry; 1684 } 1685 return (m); 1686} 1687 1688 1689/*************************************************** 1690* Pmap allocation/deallocation routines. 1691 ***************************************************/ 1692 1693#ifdef SMP 1694/* 1695 * Deal with a SMP shootdown of other users of the pmap that we are 1696 * trying to dispose of. This can be a bit hairy. 1697 */ 1698static cpumask_t *lazymask; 1699static u_int lazyptd; 1700static volatile u_int lazywait; 1701 1702void pmap_lazyfix_action(void); 1703 1704void 1705pmap_lazyfix_action(void) 1706{ 1707 cpumask_t mymask = PCPU_GET(cpumask); 1708 1709#ifdef COUNT_IPIS 1710 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1711#endif 1712 if (rcr3() == lazyptd) 1713 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1714 atomic_clear_int(lazymask, mymask); 1715 atomic_store_rel_int(&lazywait, 1); 1716} 1717 1718static void 1719pmap_lazyfix_self(cpumask_t mymask) 1720{ 1721 1722 if (rcr3() == lazyptd) 1723 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1724 atomic_clear_int(lazymask, mymask); 1725} 1726 1727 1728static void 1729pmap_lazyfix(pmap_t pmap) 1730{ 1731 cpumask_t mymask, mask; 1732 u_int spins; 1733 1734 while ((mask = pmap->pm_active) != 0) { 1735 spins = 50000000; 1736 mask = mask & -mask; /* Find least significant set bit */ 1737 mtx_lock_spin(&smp_ipi_mtx); 1738#ifdef PAE 1739 lazyptd = vtophys(pmap->pm_pdpt); 1740#else 1741 lazyptd = vtophys(pmap->pm_pdir); 1742#endif 1743 mymask = PCPU_GET(cpumask); 1744 if (mask == mymask) { 1745 lazymask = &pmap->pm_active; 1746 pmap_lazyfix_self(mymask); 1747 } else { 1748 atomic_store_rel_int((u_int *)&lazymask, 1749 (u_int)&pmap->pm_active); 1750 atomic_store_rel_int(&lazywait, 0); 1751 ipi_selected(mask, IPI_LAZYPMAP); 1752 while (lazywait == 0) { 1753 ia32_pause(); 1754 if (--spins == 0) 1755 break; 1756 } 1757 } 1758 mtx_unlock_spin(&smp_ipi_mtx); 1759 if (spins == 0) 1760 printf("pmap_lazyfix: spun for 50000000\n"); 1761 } 1762} 1763 1764#else /* SMP */ 1765 1766/* 1767 * Cleaning up on uniprocessor is easy. For various reasons, we're 1768 * unlikely to have to even execute this code, including the fact 1769 * that the cleanup is deferred until the parent does a wait(2), which 1770 * means that another userland process has run. 1771 */ 1772static void 1773pmap_lazyfix(pmap_t pmap) 1774{ 1775 u_int cr3; 1776 1777 cr3 = vtophys(pmap->pm_pdir); 1778 if (cr3 == rcr3()) { 1779 load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1780 pmap->pm_active &= ~(PCPU_GET(cpumask)); 1781 } 1782} 1783#endif /* SMP */ 1784 1785/* 1786 * Release any resources held by the given physical map. 1787 * Called when a pmap initialized by pmap_pinit is being released. 1788 * Should only be called if the map contains no valid mappings. 1789 */ 1790void 1791pmap_release(pmap_t pmap) 1792{ 1793 vm_page_t m, ptdpg[NPGPTD]; 1794 int i; 1795 1796 KASSERT(pmap->pm_stats.resident_count == 0, 1797 ("pmap_release: pmap resident count %ld != 0", 1798 pmap->pm_stats.resident_count)); 1799 KASSERT(pmap->pm_root == NULL, 1800 ("pmap_release: pmap has reserved page table page(s)")); 1801 1802 pmap_lazyfix(pmap); 1803 mtx_lock_spin(&allpmaps_lock); 1804 LIST_REMOVE(pmap, pm_list); 1805 mtx_unlock_spin(&allpmaps_lock); 1806 1807 for (i = 0; i < NPGPTD; i++) 1808 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] & 1809 PG_FRAME); 1810 1811 bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) * 1812 sizeof(*pmap->pm_pdir)); 1813 1814 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1815 1816 for (i = 0; i < NPGPTD; i++) { 1817 m = ptdpg[i]; 1818#ifdef PAE 1819 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1820 ("pmap_release: got wrong ptd page")); 1821#endif 1822 m->wire_count--; 1823 atomic_subtract_int(&cnt.v_wire_count, 1); 1824 vm_page_free_zero(m); 1825 } 1826 PMAP_LOCK_DESTROY(pmap); 1827} 1828 1829static int 1830kvm_size(SYSCTL_HANDLER_ARGS) 1831{ 1832 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1833 1834 return sysctl_handle_long(oidp, &ksize, 0, req); 1835} 1836SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1837 0, 0, kvm_size, "IU", "Size of KVM"); 1838 1839static int 1840kvm_free(SYSCTL_HANDLER_ARGS) 1841{ 1842 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1843 1844 return sysctl_handle_long(oidp, &kfree, 0, req); 1845} 1846SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1847 0, 0, kvm_free, "IU", "Amount of KVM free"); 1848 1849/* 1850 * grow the number of kernel page table entries, if needed 1851 */ 1852void 1853pmap_growkernel(vm_offset_t addr) 1854{ 1855 struct pmap *pmap; 1856 vm_paddr_t ptppaddr; 1857 vm_page_t nkpg; 1858 pd_entry_t newpdir; 1859 pt_entry_t *pde; 1860 1861 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1862 if (kernel_vm_end == 0) { 1863 kernel_vm_end = KERNBASE; 1864 nkpt = 0; 1865 while (pdir_pde(PTD, kernel_vm_end)) { 1866 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1867 nkpt++; 1868 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1869 kernel_vm_end = kernel_map->max_offset; 1870 break; 1871 } 1872 } 1873 } 1874 addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1875 if (addr - 1 >= kernel_map->max_offset) 1876 addr = kernel_map->max_offset; 1877 while (kernel_vm_end < addr) { 1878 if (pdir_pde(PTD, kernel_vm_end)) { 1879 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1880 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1881 kernel_vm_end = kernel_map->max_offset; 1882 break; 1883 } 1884 continue; 1885 } 1886 1887 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 1888 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1889 VM_ALLOC_ZERO); 1890 if (nkpg == NULL) 1891 panic("pmap_growkernel: no memory to grow kernel"); 1892 1893 nkpt++; 1894 1895 if ((nkpg->flags & PG_ZERO) == 0) 1896 pmap_zero_page(nkpg); 1897 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1898 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1899 pdir_pde(PTD, kernel_vm_end) = newpdir; 1900 1901 mtx_lock_spin(&allpmaps_lock); 1902 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1903 pde = pmap_pde(pmap, kernel_vm_end); 1904 pde_store(pde, newpdir); 1905 } 1906 mtx_unlock_spin(&allpmaps_lock); 1907 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1908 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1909 kernel_vm_end = kernel_map->max_offset; 1910 break; 1911 } 1912 } 1913} 1914 1915 1916/*************************************************** 1917 * page management routines. 1918 ***************************************************/ 1919 1920CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1921CTASSERT(_NPCM == 11); 1922 1923static __inline struct pv_chunk * 1924pv_to_chunk(pv_entry_t pv) 1925{ 1926 1927 return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK); 1928} 1929 1930#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1931 1932#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1933#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1934 1935static uint32_t pc_freemask[11] = { 1936 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1937 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1938 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1939 PC_FREE0_9, PC_FREE10 1940}; 1941 1942SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1943 "Current number of pv entries"); 1944 1945#ifdef PV_STATS 1946static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1947 1948SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1949 "Current number of pv entry chunks"); 1950SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1951 "Current number of pv entry chunks allocated"); 1952SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1953 "Current number of pv entry chunks frees"); 1954SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1955 "Number of times tried to get a chunk page but failed."); 1956 1957static long pv_entry_frees, pv_entry_allocs; 1958static int pv_entry_spare; 1959 1960SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1961 "Current number of pv entry frees"); 1962SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1963 "Current number of pv entry allocs"); 1964SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1965 "Current number of spare pv entries"); 1966 1967static int pmap_collect_inactive, pmap_collect_active; 1968 1969SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 1970 "Current number times pmap_collect called on inactive queue"); 1971SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 1972 "Current number times pmap_collect called on active queue"); 1973#endif 1974 1975/* 1976 * We are in a serious low memory condition. Resort to 1977 * drastic measures to free some pages so we can allocate 1978 * another pv entry chunk. This is normally called to 1979 * unmap inactive pages, and if necessary, active pages. 1980 */ 1981static void 1982pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 1983{ 1984 struct md_page *pvh; 1985 pd_entry_t *pde; 1986 pmap_t pmap; 1987 pt_entry_t *pte, tpte; 1988 pv_entry_t next_pv, pv; 1989 vm_offset_t va; 1990 vm_page_t m, free; 1991 1992 sched_pin(); 1993 TAILQ_FOREACH(m, &vpq->pl, pageq) { 1994 if (m->hold_count || m->busy) 1995 continue; 1996 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 1997 va = pv->pv_va; 1998 pmap = PV_PMAP(pv); 1999 /* Avoid deadlock and lock recursion. */ 2000 if (pmap > locked_pmap) 2001 PMAP_LOCK(pmap); 2002 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 2003 continue; 2004 pmap->pm_stats.resident_count--; 2005 pde = pmap_pde(pmap, va); 2006 KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found" 2007 " a 4mpage in page %p's pv list", m)); 2008 pte = pmap_pte_quick(pmap, va); 2009 tpte = pte_load_clear(pte); 2010 KASSERT((tpte & PG_W) == 0, 2011 ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 2012 if (tpte & PG_A) 2013 vm_page_flag_set(m, PG_REFERENCED); 2014 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2015 vm_page_dirty(m); 2016 free = NULL; 2017 pmap_unuse_pt(pmap, va, &free); 2018 pmap_invalidate_page(pmap, va); 2019 pmap_free_zero_pages(free); 2020 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2021 if (TAILQ_EMPTY(&m->md.pv_list)) { 2022 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2023 if (TAILQ_EMPTY(&pvh->pv_list)) 2024 vm_page_flag_clear(m, PG_WRITEABLE); 2025 } 2026 free_pv_entry(pmap, pv); 2027 if (pmap != locked_pmap) 2028 PMAP_UNLOCK(pmap); 2029 } 2030 } 2031 sched_unpin(); 2032} 2033 2034 2035/* 2036 * free the pv_entry back to the free list 2037 */ 2038static void 2039free_pv_entry(pmap_t pmap, pv_entry_t pv) 2040{ 2041 vm_page_t m; 2042 struct pv_chunk *pc; 2043 int idx, field, bit; 2044 2045 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2046 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2047 PV_STAT(pv_entry_frees++); 2048 PV_STAT(pv_entry_spare++); 2049 pv_entry_count--; 2050 pc = pv_to_chunk(pv); 2051 idx = pv - &pc->pc_pventry[0]; 2052 field = idx / 32; 2053 bit = idx % 32; 2054 pc->pc_map[field] |= 1ul << bit; 2055 /* move to head of list */ 2056 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2057 for (idx = 0; idx < _NPCM; idx++) 2058 if (pc->pc_map[idx] != pc_freemask[idx]) { 2059 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2060 return; 2061 } 2062 PV_STAT(pv_entry_spare -= _NPCPV); 2063 PV_STAT(pc_chunk_count--); 2064 PV_STAT(pc_chunk_frees++); 2065 /* entire chunk is free, return it */ 2066 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2067 pmap_qremove((vm_offset_t)pc, 1); 2068 vm_page_unwire(m, 0); 2069 vm_page_free(m); 2070 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2071} 2072 2073/* 2074 * get a new pv_entry, allocating a block from the system 2075 * when needed. 2076 */ 2077static pv_entry_t 2078get_pv_entry(pmap_t pmap, int try) 2079{ 2080 static const struct timeval printinterval = { 60, 0 }; 2081 static struct timeval lastprint; 2082 static vm_pindex_t colour; 2083 struct vpgqueues *pq; 2084 int bit, field; 2085 pv_entry_t pv; 2086 struct pv_chunk *pc; 2087 vm_page_t m; 2088 2089 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2090 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2091 PV_STAT(pv_entry_allocs++); 2092 pv_entry_count++; 2093 if (pv_entry_count > pv_entry_high_water) 2094 if (ratecheck(&lastprint, &printinterval)) 2095 printf("Approaching the limit on PV entries, consider " 2096 "increasing either the vm.pmap.shpgperproc or the " 2097 "vm.pmap.pv_entry_max tunable.\n"); 2098 pq = NULL; 2099retry: 2100 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2101 if (pc != NULL) { 2102 for (field = 0; field < _NPCM; field++) { 2103 if (pc->pc_map[field]) { 2104 bit = bsfl(pc->pc_map[field]); 2105 break; 2106 } 2107 } 2108 if (field < _NPCM) { 2109 pv = &pc->pc_pventry[field * 32 + bit]; 2110 pc->pc_map[field] &= ~(1ul << bit); 2111 /* If this was the last item, move it to tail */ 2112 for (field = 0; field < _NPCM; field++) 2113 if (pc->pc_map[field] != 0) { 2114 PV_STAT(pv_entry_spare--); 2115 return (pv); /* not full, return */ 2116 } 2117 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2118 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2119 PV_STAT(pv_entry_spare--); 2120 return (pv); 2121 } 2122 } 2123 /* 2124 * Access to the ptelist "pv_vafree" is synchronized by the page 2125 * queues lock. If "pv_vafree" is currently non-empty, it will 2126 * remain non-empty until pmap_ptelist_alloc() completes. 2127 */ 2128 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq == 2129 &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | 2130 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2131 if (try) { 2132 pv_entry_count--; 2133 PV_STAT(pc_chunk_tryfail++); 2134 return (NULL); 2135 } 2136 /* 2137 * Reclaim pv entries: At first, destroy mappings to 2138 * inactive pages. After that, if a pv chunk entry 2139 * is still needed, destroy mappings to active pages. 2140 */ 2141 if (pq == NULL) { 2142 PV_STAT(pmap_collect_inactive++); 2143 pq = &vm_page_queues[PQ_INACTIVE]; 2144 } else if (pq == &vm_page_queues[PQ_INACTIVE]) { 2145 PV_STAT(pmap_collect_active++); 2146 pq = &vm_page_queues[PQ_ACTIVE]; 2147 } else 2148 panic("get_pv_entry: increase vm.pmap.shpgperproc"); 2149 pmap_collect(pmap, pq); 2150 goto retry; 2151 } 2152 PV_STAT(pc_chunk_count++); 2153 PV_STAT(pc_chunk_allocs++); 2154 colour++; 2155 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2156 pmap_qenter((vm_offset_t)pc, &m, 1); 2157 pc->pc_pmap = pmap; 2158 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2159 for (field = 1; field < _NPCM; field++) 2160 pc->pc_map[field] = pc_freemask[field]; 2161 pv = &pc->pc_pventry[0]; 2162 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2163 PV_STAT(pv_entry_spare += _NPCPV - 1); 2164 return (pv); 2165} 2166 2167static __inline pv_entry_t 2168pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2169{ 2170 pv_entry_t pv; 2171 2172 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2173 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2174 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2175 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2176 break; 2177 } 2178 } 2179 return (pv); 2180} 2181 2182static void 2183pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2184{ 2185 struct md_page *pvh; 2186 pv_entry_t pv; 2187 vm_offset_t va_last; 2188 vm_page_t m; 2189 2190 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2191 KASSERT((pa & PDRMASK) == 0, 2192 ("pmap_pv_demote_pde: pa is not 4mpage aligned")); 2193 2194 /* 2195 * Transfer the 4mpage's pv entry for this mapping to the first 2196 * page's pv list. 2197 */ 2198 pvh = pa_to_pvh(pa); 2199 va = trunc_4mpage(va); 2200 pv = pmap_pvh_remove(pvh, pmap, va); 2201 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 2202 m = PHYS_TO_VM_PAGE(pa); 2203 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2204 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 2205 va_last = va + NBPDR - PAGE_SIZE; 2206 do { 2207 m++; 2208 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2209 ("pmap_pv_demote_pde: page %p is not managed", m)); 2210 va += PAGE_SIZE; 2211 pmap_insert_entry(pmap, va, m); 2212 } while (va < va_last); 2213} 2214 2215static void 2216pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2217{ 2218 struct md_page *pvh; 2219 pv_entry_t pv; 2220 vm_offset_t va_last; 2221 vm_page_t m; 2222 2223 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2224 KASSERT((pa & PDRMASK) == 0, 2225 ("pmap_pv_promote_pde: pa is not 4mpage aligned")); 2226 2227 /* 2228 * Transfer the first page's pv entry for this mapping to the 2229 * 4mpage's pv list. Aside from avoiding the cost of a call 2230 * to get_pv_entry(), a transfer avoids the possibility that 2231 * get_pv_entry() calls pmap_collect() and that pmap_collect() 2232 * removes one of the mappings that is being promoted. 2233 */ 2234 m = PHYS_TO_VM_PAGE(pa); 2235 va = trunc_4mpage(va); 2236 pv = pmap_pvh_remove(&m->md, pmap, va); 2237 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 2238 pvh = pa_to_pvh(pa); 2239 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); 2240 /* Free the remaining NPTEPG - 1 pv entries. */ 2241 va_last = va + NBPDR - PAGE_SIZE; 2242 do { 2243 m++; 2244 va += PAGE_SIZE; 2245 pmap_pvh_free(&m->md, pmap, va); 2246 } while (va < va_last); 2247} 2248 2249static void 2250pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2251{ 2252 pv_entry_t pv; 2253 2254 pv = pmap_pvh_remove(pvh, pmap, va); 2255 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2256 free_pv_entry(pmap, pv); 2257} 2258 2259static void 2260pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2261{ 2262 struct md_page *pvh; 2263 2264 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2265 pmap_pvh_free(&m->md, pmap, va); 2266 if (TAILQ_EMPTY(&m->md.pv_list)) { 2267 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2268 if (TAILQ_EMPTY(&pvh->pv_list)) 2269 vm_page_flag_clear(m, PG_WRITEABLE); 2270 } 2271} 2272 2273/* 2274 * Create a pv entry for page at pa for 2275 * (pmap, va). 2276 */ 2277static void 2278pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2279{ 2280 pv_entry_t pv; 2281 2282 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2283 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2284 pv = get_pv_entry(pmap, FALSE); 2285 pv->pv_va = va; 2286 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2287} 2288 2289/* 2290 * Conditionally create a pv entry. 2291 */ 2292static boolean_t 2293pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2294{ 2295 pv_entry_t pv; 2296 2297 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2298 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2299 if (pv_entry_count < pv_entry_high_water && 2300 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2301 pv->pv_va = va; 2302 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2303 return (TRUE); 2304 } else 2305 return (FALSE); 2306} 2307 2308/* 2309 * Create the pv entries for each of the pages within a superpage. 2310 */ 2311static boolean_t 2312pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 2313{ 2314 struct md_page *pvh; 2315 pv_entry_t pv; 2316 2317 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2318 if (pv_entry_count < pv_entry_high_water && 2319 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2320 pv->pv_va = va; 2321 pvh = pa_to_pvh(pa); 2322 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); 2323 return (TRUE); 2324 } else 2325 return (FALSE); 2326} 2327 2328/* 2329 * Fills a page table page with mappings to consecutive physical pages. 2330 */ 2331static void 2332pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 2333{ 2334 pt_entry_t *pte; 2335 2336 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 2337 *pte = newpte; 2338 newpte += PAGE_SIZE; 2339 } 2340} 2341 2342/* 2343 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the 2344 * 2- or 4MB page mapping is invalidated. 2345 */ 2346static boolean_t 2347pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2348{ 2349 pd_entry_t newpde, oldpde; 2350 pmap_t allpmaps_entry; 2351 pt_entry_t *firstpte, newpte; 2352 vm_paddr_t mptepa; 2353 vm_page_t free, mpte; 2354 2355 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2356 oldpde = *pde; 2357 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), 2358 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); 2359 mpte = pmap_lookup_pt_page(pmap, va); 2360 if (mpte != NULL) 2361 pmap_remove_pt_page(pmap, mpte); 2362 else { 2363 KASSERT((oldpde & PG_W) == 0, 2364 ("pmap_demote_pde: page table page for a wired mapping" 2365 " is missing")); 2366 2367 /* 2368 * Invalidate the 2- or 4MB page mapping and return 2369 * "failure" if the mapping was never accessed or the 2370 * allocation of the new page table page fails. 2371 */ 2372 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, 2373 va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | 2374 VM_ALLOC_WIRED)) == NULL) { 2375 free = NULL; 2376 pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free); 2377 pmap_invalidate_page(pmap, trunc_4mpage(va)); 2378 pmap_free_zero_pages(free); 2379 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" 2380 " in pmap %p", va, pmap); 2381 return (FALSE); 2382 } 2383 if (va < VM_MAXUSER_ADDRESS) 2384 pmap->pm_stats.resident_count++; 2385 } 2386 mptepa = VM_PAGE_TO_PHYS(mpte); 2387 2388 /* 2389 * Temporarily map the page table page (mpte) into the kernel's 2390 * address space at either PADDR1 or PADDR2. 2391 */ 2392 if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) { 2393 if ((*PMAP1 & PG_FRAME) != mptepa) { 2394 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2395#ifdef SMP 2396 PMAP1cpu = PCPU_GET(cpuid); 2397#endif 2398 invlcaddr(PADDR1); 2399 PMAP1changed++; 2400 } else 2401#ifdef SMP 2402 if (PMAP1cpu != PCPU_GET(cpuid)) { 2403 PMAP1cpu = PCPU_GET(cpuid); 2404 invlcaddr(PADDR1); 2405 PMAP1changedcpu++; 2406 } else 2407#endif 2408 PMAP1unchanged++; 2409 firstpte = PADDR1; 2410 } else { 2411 mtx_lock(&PMAP2mutex); 2412 if ((*PMAP2 & PG_FRAME) != mptepa) { 2413 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; 2414 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); 2415 } 2416 firstpte = PADDR2; 2417 } 2418 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; 2419 KASSERT((oldpde & PG_A) != 0, 2420 ("pmap_demote_pde: oldpde is missing PG_A")); 2421 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 2422 ("pmap_demote_pde: oldpde is missing PG_M")); 2423 newpte = oldpde & ~PG_PS; 2424 if ((newpte & PG_PDE_PAT) != 0) 2425 newpte ^= PG_PDE_PAT | PG_PTE_PAT; 2426 2427 /* 2428 * If the page table page is new, initialize it. 2429 */ 2430 if (mpte->wire_count == 1) { 2431 mpte->wire_count = NPTEPG; 2432 pmap_fill_ptp(firstpte, newpte); 2433 } 2434 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), 2435 ("pmap_demote_pde: firstpte and newpte map different physical" 2436 " addresses")); 2437 2438 /* 2439 * If the mapping has changed attributes, update the page table 2440 * entries. 2441 */ 2442 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) 2443 pmap_fill_ptp(firstpte, newpte); 2444 2445 /* 2446 * Demote the mapping. This pmap is locked. The old PDE has 2447 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 2448 * set. Thus, there is no danger of a race with another 2449 * processor changing the setting of PG_A and/or PG_M between 2450 * the read above and the store below. 2451 */ 2452 if (pmap == kernel_pmap) { 2453 /* 2454 * A harmless race exists between this loop and the bcopy() 2455 * in pmap_pinit() that initializes the kernel segment of 2456 * the new page table. Specifically, that bcopy() may copy 2457 * the new PDE from the PTD, which is first in allpmaps, to 2458 * the new page table before this loop updates that new 2459 * page table. 2460 */ 2461 mtx_lock_spin(&allpmaps_lock); 2462 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) { 2463 pde = pmap_pde(allpmaps_entry, va); 2464 KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) == 2465 (oldpde & PG_PTE_PROMOTE), 2466 ("pmap_demote_pde: pde was %#jx, expected %#jx", 2467 (uintmax_t)*pde, (uintmax_t)oldpde)); 2468 pde_store(pde, newpde); 2469 } 2470 mtx_unlock_spin(&allpmaps_lock); 2471 } else 2472 pde_store(pde, newpde); 2473 if (firstpte == PADDR2) 2474 mtx_unlock(&PMAP2mutex); 2475 2476 /* 2477 * Invalidate the recursive mapping of the page table page. 2478 */ 2479 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); 2480 2481 /* 2482 * Demote the pv entry. This depends on the earlier demotion 2483 * of the mapping. Specifically, the (re)creation of a per- 2484 * page pv entry might trigger the execution of pmap_collect(), 2485 * which might reclaim a newly (re)created per-page pv entry 2486 * and destroy the associated mapping. In order to destroy 2487 * the mapping, the PDE must have already changed from mapping 2488 * the 2mpage to referencing the page table page. 2489 */ 2490 if ((oldpde & PG_MANAGED) != 0) 2491 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); 2492 2493 pmap_pde_demotions++; 2494 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" 2495 " in pmap %p", va, pmap); 2496 return (TRUE); 2497} 2498 2499/* 2500 * pmap_remove_pde: do the things to unmap a superpage in a process 2501 */ 2502static void 2503pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, 2504 vm_page_t *free) 2505{ 2506 struct md_page *pvh; 2507 pd_entry_t oldpde; 2508 vm_offset_t eva, va; 2509 vm_page_t m, mpte; 2510 2511 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2512 KASSERT((sva & PDRMASK) == 0, 2513 ("pmap_remove_pde: sva is not 4mpage aligned")); 2514 oldpde = pte_load_clear(pdq); 2515 if (oldpde & PG_W) 2516 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; 2517 2518 /* 2519 * Machines that don't support invlpg, also don't support 2520 * PG_G. 2521 */ 2522 if (oldpde & PG_G) 2523 pmap_invalidate_page(kernel_pmap, sva); 2524 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2525 if (oldpde & PG_MANAGED) { 2526 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 2527 pmap_pvh_free(pvh, pmap, sva); 2528 eva = sva + NBPDR; 2529 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 2530 va < eva; va += PAGE_SIZE, m++) { 2531 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2532 vm_page_dirty(m); 2533 if (oldpde & PG_A) 2534 vm_page_flag_set(m, PG_REFERENCED); 2535 if (TAILQ_EMPTY(&m->md.pv_list) && 2536 TAILQ_EMPTY(&pvh->pv_list)) 2537 vm_page_flag_clear(m, PG_WRITEABLE); 2538 } 2539 } 2540 if (pmap == kernel_pmap) { 2541 if (!pmap_demote_pde(pmap, pdq, sva)) 2542 panic("pmap_remove_pde: failed demotion"); 2543 } else { 2544 mpte = pmap_lookup_pt_page(pmap, sva); 2545 if (mpte != NULL) { 2546 pmap_remove_pt_page(pmap, mpte); 2547 pmap->pm_stats.resident_count--; 2548 KASSERT(mpte->wire_count == NPTEPG, 2549 ("pmap_remove_pde: pte page wire count error")); 2550 mpte->wire_count = 0; 2551 pmap_add_delayed_free_list(mpte, free, FALSE); 2552 atomic_subtract_int(&cnt.v_wire_count, 1); 2553 } 2554 } 2555} 2556 2557/* 2558 * pmap_remove_pte: do the things to unmap a page in a process 2559 */ 2560static int 2561pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2562{ 2563 pt_entry_t oldpte; 2564 vm_page_t m; 2565 2566 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2567 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2568 oldpte = pte_load_clear(ptq); 2569 if (oldpte & PG_W) 2570 pmap->pm_stats.wired_count -= 1; 2571 /* 2572 * Machines that don't support invlpg, also don't support 2573 * PG_G. 2574 */ 2575 if (oldpte & PG_G) 2576 pmap_invalidate_page(kernel_pmap, va); 2577 pmap->pm_stats.resident_count -= 1; 2578 if (oldpte & PG_MANAGED) { 2579 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 2580 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2581 vm_page_dirty(m); 2582 if (oldpte & PG_A) 2583 vm_page_flag_set(m, PG_REFERENCED); 2584 pmap_remove_entry(pmap, m, va); 2585 } 2586 return (pmap_unuse_pt(pmap, va, free)); 2587} 2588 2589/* 2590 * Remove a single page from a process address space 2591 */ 2592static void 2593pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2594{ 2595 pt_entry_t *pte; 2596 2597 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2598 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2599 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2600 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) 2601 return; 2602 pmap_remove_pte(pmap, pte, va, free); 2603 pmap_invalidate_page(pmap, va); 2604} 2605 2606/* 2607 * Remove the given range of addresses from the specified map. 2608 * 2609 * It is assumed that the start and end are properly 2610 * rounded to the page size. 2611 */ 2612void 2613pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2614{ 2615 vm_offset_t pdnxt; 2616 pd_entry_t ptpaddr; 2617 pt_entry_t *pte; 2618 vm_page_t free = NULL; 2619 int anyvalid; 2620 2621 /* 2622 * Perform an unsynchronized read. This is, however, safe. 2623 */ 2624 if (pmap->pm_stats.resident_count == 0) 2625 return; 2626 2627 anyvalid = 0; 2628 2629 vm_page_lock_queues(); 2630 sched_pin(); 2631 PMAP_LOCK(pmap); 2632 2633 /* 2634 * special handling of removing one page. a very 2635 * common operation and easy to short circuit some 2636 * code. 2637 */ 2638 if ((sva + PAGE_SIZE == eva) && 2639 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2640 pmap_remove_page(pmap, sva, &free); 2641 goto out; 2642 } 2643 2644 for (; sva < eva; sva = pdnxt) { 2645 unsigned pdirindex; 2646 2647 /* 2648 * Calculate index for next page table. 2649 */ 2650 pdnxt = (sva + NBPDR) & ~PDRMASK; 2651 if (pdnxt < sva) 2652 pdnxt = eva; 2653 if (pmap->pm_stats.resident_count == 0) 2654 break; 2655 2656 pdirindex = sva >> PDRSHIFT; 2657 ptpaddr = pmap->pm_pdir[pdirindex]; 2658 2659 /* 2660 * Weed out invalid mappings. Note: we assume that the page 2661 * directory table is always allocated, and in kernel virtual. 2662 */ 2663 if (ptpaddr == 0) 2664 continue; 2665 2666 /* 2667 * Check for large page. 2668 */ 2669 if ((ptpaddr & PG_PS) != 0) { 2670 /* 2671 * Are we removing the entire large page? If not, 2672 * demote the mapping and fall through. 2673 */ 2674 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 2675 /* 2676 * The TLB entry for a PG_G mapping is 2677 * invalidated by pmap_remove_pde(). 2678 */ 2679 if ((ptpaddr & PG_G) == 0) 2680 anyvalid = 1; 2681 pmap_remove_pde(pmap, 2682 &pmap->pm_pdir[pdirindex], sva, &free); 2683 continue; 2684 } else if (!pmap_demote_pde(pmap, 2685 &pmap->pm_pdir[pdirindex], sva)) { 2686 /* The large page mapping was destroyed. */ 2687 continue; 2688 } 2689 } 2690 2691 /* 2692 * Limit our scan to either the end of the va represented 2693 * by the current page table page, or to the end of the 2694 * range being removed. 2695 */ 2696 if (pdnxt > eva) 2697 pdnxt = eva; 2698 2699 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2700 sva += PAGE_SIZE) { 2701 if (*pte == 0) 2702 continue; 2703 2704 /* 2705 * The TLB entry for a PG_G mapping is invalidated 2706 * by pmap_remove_pte(). 2707 */ 2708 if ((*pte & PG_G) == 0) 2709 anyvalid = 1; 2710 if (pmap_remove_pte(pmap, pte, sva, &free)) 2711 break; 2712 } 2713 } 2714out: 2715 sched_unpin(); 2716 if (anyvalid) 2717 pmap_invalidate_all(pmap); 2718 vm_page_unlock_queues(); 2719 PMAP_UNLOCK(pmap); 2720 pmap_free_zero_pages(free); 2721} 2722 2723/* 2724 * Routine: pmap_remove_all 2725 * Function: 2726 * Removes this physical page from 2727 * all physical maps in which it resides. 2728 * Reflects back modify bits to the pager. 2729 * 2730 * Notes: 2731 * Original versions of this routine were very 2732 * inefficient because they iteratively called 2733 * pmap_remove (slow...) 2734 */ 2735 2736void 2737pmap_remove_all(vm_page_t m) 2738{ 2739 struct md_page *pvh; 2740 pv_entry_t pv; 2741 pmap_t pmap; 2742 pt_entry_t *pte, tpte; 2743 pd_entry_t *pde; 2744 vm_offset_t va; 2745 vm_page_t free; 2746 2747 KASSERT((m->flags & PG_FICTITIOUS) == 0, 2748 ("pmap_remove_all: page %p is fictitious", m)); 2749 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2750 sched_pin(); 2751 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2752 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 2753 va = pv->pv_va; 2754 pmap = PV_PMAP(pv); 2755 PMAP_LOCK(pmap); 2756 pde = pmap_pde(pmap, va); 2757 (void)pmap_demote_pde(pmap, pde, va); 2758 PMAP_UNLOCK(pmap); 2759 } 2760 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2761 pmap = PV_PMAP(pv); 2762 PMAP_LOCK(pmap); 2763 pmap->pm_stats.resident_count--; 2764 pde = pmap_pde(pmap, pv->pv_va); 2765 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" 2766 " a 4mpage in page %p's pv list", m)); 2767 pte = pmap_pte_quick(pmap, pv->pv_va); 2768 tpte = pte_load_clear(pte); 2769 if (tpte & PG_W) 2770 pmap->pm_stats.wired_count--; 2771 if (tpte & PG_A) 2772 vm_page_flag_set(m, PG_REFERENCED); 2773 2774 /* 2775 * Update the vm_page_t clean and reference bits. 2776 */ 2777 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2778 vm_page_dirty(m); 2779 free = NULL; 2780 pmap_unuse_pt(pmap, pv->pv_va, &free); 2781 pmap_invalidate_page(pmap, pv->pv_va); 2782 pmap_free_zero_pages(free); 2783 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2784 free_pv_entry(pmap, pv); 2785 PMAP_UNLOCK(pmap); 2786 } 2787 vm_page_flag_clear(m, PG_WRITEABLE); 2788 sched_unpin(); 2789} 2790 2791/* 2792 * pmap_protect_pde: do the things to protect a 4mpage in a process 2793 */ 2794static boolean_t 2795pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) 2796{ 2797 pd_entry_t newpde, oldpde; 2798 vm_offset_t eva, va; 2799 vm_page_t m; 2800 boolean_t anychanged; 2801 2802 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2803 KASSERT((sva & PDRMASK) == 0, 2804 ("pmap_protect_pde: sva is not 4mpage aligned")); 2805 anychanged = FALSE; 2806retry: 2807 oldpde = newpde = *pde; 2808 if (oldpde & PG_MANAGED) { 2809 eva = sva + NBPDR; 2810 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 2811 va < eva; va += PAGE_SIZE, m++) { 2812 /* 2813 * In contrast to the analogous operation on a 4KB page 2814 * mapping, the mapping's PG_A flag is not cleared and 2815 * the page's PG_REFERENCED flag is not set. The 2816 * reason is that pmap_demote_pde() expects that a 2/4MB 2817 * page mapping with a stored page table page has PG_A 2818 * set. 2819 */ 2820 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2821 vm_page_dirty(m); 2822 } 2823 } 2824 if ((prot & VM_PROT_WRITE) == 0) 2825 newpde &= ~(PG_RW | PG_M); 2826#ifdef PAE 2827 if ((prot & VM_PROT_EXECUTE) == 0) 2828 newpde |= pg_nx; 2829#endif 2830 if (newpde != oldpde) { 2831 if (!pde_cmpset(pde, oldpde, newpde)) 2832 goto retry; 2833 if (oldpde & PG_G) 2834 pmap_invalidate_page(pmap, sva); 2835 else 2836 anychanged = TRUE; 2837 } 2838 return (anychanged); 2839} 2840 2841/* 2842 * Set the physical protection on the 2843 * specified range of this map as requested. 2844 */ 2845void 2846pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2847{ 2848 vm_offset_t pdnxt; 2849 pd_entry_t ptpaddr; 2850 pt_entry_t *pte; 2851 int anychanged; 2852 2853 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2854 pmap_remove(pmap, sva, eva); 2855 return; 2856 } 2857 2858#ifdef PAE 2859 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2860 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2861 return; 2862#else 2863 if (prot & VM_PROT_WRITE) 2864 return; 2865#endif 2866 2867 anychanged = 0; 2868 2869 vm_page_lock_queues(); 2870 sched_pin(); 2871 PMAP_LOCK(pmap); 2872 for (; sva < eva; sva = pdnxt) { 2873 pt_entry_t obits, pbits; 2874 unsigned pdirindex; 2875 2876 pdnxt = (sva + NBPDR) & ~PDRMASK; 2877 if (pdnxt < sva) 2878 pdnxt = eva; 2879 2880 pdirindex = sva >> PDRSHIFT; 2881 ptpaddr = pmap->pm_pdir[pdirindex]; 2882 2883 /* 2884 * Weed out invalid mappings. Note: we assume that the page 2885 * directory table is always allocated, and in kernel virtual. 2886 */ 2887 if (ptpaddr == 0) 2888 continue; 2889 2890 /* 2891 * Check for large page. 2892 */ 2893 if ((ptpaddr & PG_PS) != 0) { 2894 /* 2895 * Are we protecting the entire large page? If not, 2896 * demote the mapping and fall through. 2897 */ 2898 if (sva + NBPDR == pdnxt && eva >= pdnxt) { 2899 /* 2900 * The TLB entry for a PG_G mapping is 2901 * invalidated by pmap_protect_pde(). 2902 */ 2903 if (pmap_protect_pde(pmap, 2904 &pmap->pm_pdir[pdirindex], sva, prot)) 2905 anychanged = 1; 2906 continue; 2907 } else if (!pmap_demote_pde(pmap, 2908 &pmap->pm_pdir[pdirindex], sva)) { 2909 /* The large page mapping was destroyed. */ 2910 continue; 2911 } 2912 } 2913 2914 if (pdnxt > eva) 2915 pdnxt = eva; 2916 2917 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2918 sva += PAGE_SIZE) { 2919 vm_page_t m; 2920 2921retry: 2922 /* 2923 * Regardless of whether a pte is 32 or 64 bits in 2924 * size, PG_RW, PG_A, and PG_M are among the least 2925 * significant 32 bits. 2926 */ 2927 obits = pbits = *pte; 2928 if ((pbits & PG_V) == 0) 2929 continue; 2930 if (pbits & PG_MANAGED) { 2931 m = NULL; 2932 if (pbits & PG_A) { 2933 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 2934 vm_page_flag_set(m, PG_REFERENCED); 2935 pbits &= ~PG_A; 2936 } 2937 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2938 if (m == NULL) 2939 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 2940 vm_page_dirty(m); 2941 } 2942 } 2943 2944 if ((prot & VM_PROT_WRITE) == 0) 2945 pbits &= ~(PG_RW | PG_M); 2946#ifdef PAE 2947 if ((prot & VM_PROT_EXECUTE) == 0) 2948 pbits |= pg_nx; 2949#endif 2950 2951 if (pbits != obits) { 2952#ifdef PAE 2953 if (!atomic_cmpset_64(pte, obits, pbits)) 2954 goto retry; 2955#else 2956 if (!atomic_cmpset_int((u_int *)pte, obits, 2957 pbits)) 2958 goto retry; 2959#endif 2960 if (obits & PG_G) 2961 pmap_invalidate_page(pmap, sva); 2962 else 2963 anychanged = 1; 2964 } 2965 } 2966 } 2967 sched_unpin(); 2968 if (anychanged) 2969 pmap_invalidate_all(pmap); 2970 vm_page_unlock_queues(); 2971 PMAP_UNLOCK(pmap); 2972} 2973 2974/* 2975 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are 2976 * within a single page table page (PTP) to a single 2- or 4MB page mapping. 2977 * For promotion to occur, two conditions must be met: (1) the 4KB page 2978 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 2979 * mappings must have identical characteristics. 2980 * 2981 * Managed (PG_MANAGED) mappings within the kernel address space are not 2982 * promoted. The reason is that kernel PDEs are replicated in each pmap but 2983 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel 2984 * pmap. 2985 */ 2986static void 2987pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) 2988{ 2989 pd_entry_t newpde; 2990 pmap_t allpmaps_entry; 2991 pt_entry_t *firstpte, oldpte, pa, *pte; 2992 vm_offset_t oldpteva; 2993 vm_page_t mpte; 2994 2995 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2996 2997 /* 2998 * Examine the first PTE in the specified PTP. Abort if this PTE is 2999 * either invalid, unused, or does not map the first 4KB physical page 3000 * within a 2- or 4MB page. 3001 */ 3002 firstpte = vtopte(trunc_4mpage(va)); 3003setpde: 3004 newpde = *firstpte; 3005 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 3006 pmap_pde_p_failures++; 3007 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3008 " in pmap %p", va, pmap); 3009 return; 3010 } 3011 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { 3012 pmap_pde_p_failures++; 3013 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3014 " in pmap %p", va, pmap); 3015 return; 3016 } 3017 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 3018 /* 3019 * When PG_M is already clear, PG_RW can be cleared without 3020 * a TLB invalidation. 3021 */ 3022 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & 3023 ~PG_RW)) 3024 goto setpde; 3025 newpde &= ~PG_RW; 3026 } 3027 3028 /* 3029 * Examine each of the other PTEs in the specified PTP. Abort if this 3030 * PTE maps an unexpected 4KB physical page or does not have identical 3031 * characteristics to the first PTE. 3032 */ 3033 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; 3034 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 3035setpte: 3036 oldpte = *pte; 3037 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 3038 pmap_pde_p_failures++; 3039 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3040 " in pmap %p", va, pmap); 3041 return; 3042 } 3043 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 3044 /* 3045 * When PG_M is already clear, PG_RW can be cleared 3046 * without a TLB invalidation. 3047 */ 3048 if (!atomic_cmpset_int((u_int *)pte, oldpte, 3049 oldpte & ~PG_RW)) 3050 goto setpte; 3051 oldpte &= ~PG_RW; 3052 oldpteva = (oldpte & PG_FRAME & PDRMASK) | 3053 (va & ~PDRMASK); 3054 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" 3055 " in pmap %p", oldpteva, pmap); 3056 } 3057 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 3058 pmap_pde_p_failures++; 3059 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" 3060 " in pmap %p", va, pmap); 3061 return; 3062 } 3063 pa -= PAGE_SIZE; 3064 } 3065 3066 /* 3067 * Save the page table page in its current state until the PDE 3068 * mapping the superpage is demoted by pmap_demote_pde() or 3069 * destroyed by pmap_remove_pde(). 3070 */ 3071 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); 3072 KASSERT(mpte >= vm_page_array && 3073 mpte < &vm_page_array[vm_page_array_size], 3074 ("pmap_promote_pde: page table page is out of range")); 3075 KASSERT(mpte->pindex == va >> PDRSHIFT, 3076 ("pmap_promote_pde: page table page's pindex is wrong")); 3077 pmap_insert_pt_page(pmap, mpte); 3078 3079 /* 3080 * Promote the pv entries. 3081 */ 3082 if ((newpde & PG_MANAGED) != 0) 3083 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); 3084 3085 /* 3086 * Propagate the PAT index to its proper position. 3087 */ 3088 if ((newpde & PG_PTE_PAT) != 0) 3089 newpde ^= PG_PDE_PAT | PG_PTE_PAT; 3090 3091 /* 3092 * Map the superpage. 3093 */ 3094 if (pmap == kernel_pmap) { 3095 mtx_lock_spin(&allpmaps_lock); 3096 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) { 3097 pde = pmap_pde(allpmaps_entry, va); 3098 pde_store(pde, PG_PS | newpde); 3099 } 3100 mtx_unlock_spin(&allpmaps_lock); 3101 } else 3102 pde_store(pde, PG_PS | newpde); 3103 3104 pmap_pde_promotions++; 3105 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" 3106 " in pmap %p", va, pmap); 3107} 3108 3109/* 3110 * Insert the given physical page (p) at 3111 * the specified virtual address (v) in the 3112 * target physical map with the protection requested. 3113 * 3114 * If specified, the page will be wired down, meaning 3115 * that the related pte can not be reclaimed. 3116 * 3117 * NB: This is the only routine which MAY NOT lazy-evaluate 3118 * or lose information. That is, this routine must actually 3119 * insert this page into the given map NOW. 3120 */ 3121void 3122pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 3123 vm_prot_t prot, boolean_t wired) 3124{ 3125 vm_paddr_t pa; 3126 pd_entry_t *pde; 3127 pt_entry_t *pte; 3128 vm_paddr_t opa; 3129 pt_entry_t origpte, newpte; 3130 vm_page_t mpte, om; 3131 boolean_t invlva; 3132 3133 va = trunc_page(va); 3134 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 3135 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 3136 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); 3137 3138 mpte = NULL; 3139 3140 vm_page_lock_queues(); 3141 PMAP_LOCK(pmap); 3142 sched_pin(); 3143 3144 /* 3145 * In the case that a page table page is not 3146 * resident, we are creating it here. 3147 */ 3148 if (va < VM_MAXUSER_ADDRESS) { 3149 mpte = pmap_allocpte(pmap, va, M_WAITOK); 3150 } 3151 3152 pde = pmap_pde(pmap, va); 3153 if ((*pde & PG_PS) != 0) 3154 panic("pmap_enter: attempted pmap_enter on 4MB page"); 3155 pte = pmap_pte_quick(pmap, va); 3156 3157 /* 3158 * Page Directory table entry not valid, we need a new PT page 3159 */ 3160 if (pte == NULL) { 3161 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 3162 (uintmax_t)pmap->pm_pdir[PTDPTDI], va); 3163 } 3164 3165 pa = VM_PAGE_TO_PHYS(m); 3166 om = NULL; 3167 origpte = *pte; 3168 opa = origpte & PG_FRAME; 3169 3170 /* 3171 * Mapping has not changed, must be protection or wiring change. 3172 */ 3173 if (origpte && (opa == pa)) { 3174 /* 3175 * Wiring change, just update stats. We don't worry about 3176 * wiring PT pages as they remain resident as long as there 3177 * are valid mappings in them. Hence, if a user page is wired, 3178 * the PT page will be also. 3179 */ 3180 if (wired && ((origpte & PG_W) == 0)) 3181 pmap->pm_stats.wired_count++; 3182 else if (!wired && (origpte & PG_W)) 3183 pmap->pm_stats.wired_count--; 3184 3185 /* 3186 * Remove extra pte reference 3187 */ 3188 if (mpte) 3189 mpte->wire_count--; 3190 3191 /* 3192 * We might be turning off write access to the page, 3193 * so we go ahead and sense modify status. 3194 */ 3195 if (origpte & PG_MANAGED) { 3196 om = m; 3197 pa |= PG_MANAGED; 3198 } 3199 goto validate; 3200 } 3201 /* 3202 * Mapping has changed, invalidate old range and fall through to 3203 * handle validating new mapping. 3204 */ 3205 if (opa) { 3206 if (origpte & PG_W) 3207 pmap->pm_stats.wired_count--; 3208 if (origpte & PG_MANAGED) { 3209 om = PHYS_TO_VM_PAGE(opa); 3210 pmap_remove_entry(pmap, om, va); 3211 } 3212 if (mpte != NULL) { 3213 mpte->wire_count--; 3214 KASSERT(mpte->wire_count > 0, 3215 ("pmap_enter: missing reference to page table page," 3216 " va: 0x%x", va)); 3217 } 3218 } else 3219 pmap->pm_stats.resident_count++; 3220 3221 /* 3222 * Enter on the PV list if part of our managed memory. 3223 */ 3224 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 3225 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 3226 ("pmap_enter: managed mapping within the clean submap")); 3227 pmap_insert_entry(pmap, va, m); 3228 pa |= PG_MANAGED; 3229 } 3230 3231 /* 3232 * Increment counters 3233 */ 3234 if (wired) 3235 pmap->pm_stats.wired_count++; 3236 3237validate: 3238 /* 3239 * Now validate mapping with desired protection/wiring. 3240 */ 3241 newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V); 3242 if ((prot & VM_PROT_WRITE) != 0) { 3243 newpte |= PG_RW; 3244 vm_page_flag_set(m, PG_WRITEABLE); 3245 } 3246#ifdef PAE 3247 if ((prot & VM_PROT_EXECUTE) == 0) 3248 newpte |= pg_nx; 3249#endif 3250 if (wired) 3251 newpte |= PG_W; 3252 if (va < VM_MAXUSER_ADDRESS) 3253 newpte |= PG_U; 3254 if (pmap == kernel_pmap) 3255 newpte |= pgeflag; 3256 3257 /* 3258 * if the mapping or permission bits are different, we need 3259 * to update the pte. 3260 */ 3261 if ((origpte & ~(PG_M|PG_A)) != newpte) { 3262 newpte |= PG_A; 3263 if ((access & VM_PROT_WRITE) != 0) 3264 newpte |= PG_M; 3265 if (origpte & PG_V) { 3266 invlva = FALSE; 3267 origpte = pte_load_store(pte, newpte); 3268 if (origpte & PG_A) { 3269 if (origpte & PG_MANAGED) 3270 vm_page_flag_set(om, PG_REFERENCED); 3271 if (opa != VM_PAGE_TO_PHYS(m)) 3272 invlva = TRUE; 3273#ifdef PAE 3274 if ((origpte & PG_NX) == 0 && 3275 (newpte & PG_NX) != 0) 3276 invlva = TRUE; 3277#endif 3278 } 3279 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3280 if ((origpte & PG_MANAGED) != 0) 3281 vm_page_dirty(om); 3282 if ((prot & VM_PROT_WRITE) == 0) 3283 invlva = TRUE; 3284 } 3285 if (invlva) 3286 pmap_invalidate_page(pmap, va); 3287 } else 3288 pte_store(pte, newpte); 3289 } 3290 3291 /* 3292 * If both the page table page and the reservation are fully 3293 * populated, then attempt promotion. 3294 */ 3295 if ((mpte == NULL || mpte->wire_count == NPTEPG) && 3296 pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0) 3297 pmap_promote_pde(pmap, pde, va); 3298 3299 sched_unpin(); 3300 vm_page_unlock_queues(); 3301 PMAP_UNLOCK(pmap); 3302} 3303 3304/* 3305 * Tries to create a 2- or 4MB page mapping. Returns TRUE if successful and 3306 * FALSE otherwise. Fails if (1) a page table page cannot be allocated without 3307 * blocking, (2) a mapping already exists at the specified virtual address, or 3308 * (3) a pv entry cannot be allocated without reclaiming another pv entry. 3309 */ 3310static boolean_t 3311pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3312{ 3313 pd_entry_t *pde, newpde; 3314 3315 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3316 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3317 pde = pmap_pde(pmap, va); 3318 if (*pde != 0) { 3319 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3320 " in pmap %p", va, pmap); 3321 return (FALSE); 3322 } 3323 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | 3324 PG_PS | PG_V; 3325 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 3326 newpde |= PG_MANAGED; 3327 3328 /* 3329 * Abort this mapping if its PV entry could not be created. 3330 */ 3331 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) { 3332 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3333 " in pmap %p", va, pmap); 3334 return (FALSE); 3335 } 3336 } 3337#ifdef PAE 3338 if ((prot & VM_PROT_EXECUTE) == 0) 3339 newpde |= pg_nx; 3340#endif 3341 if (va < VM_MAXUSER_ADDRESS) 3342 newpde |= PG_U; 3343 3344 /* 3345 * Increment counters. 3346 */ 3347 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; 3348 3349 /* 3350 * Map the superpage. 3351 */ 3352 pde_store(pde, newpde); 3353 3354 pmap_pde_mappings++; 3355 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" 3356 " in pmap %p", va, pmap); 3357 return (TRUE); 3358} 3359 3360/* 3361 * Maps a sequence of resident pages belonging to the same object. 3362 * The sequence begins with the given page m_start. This page is 3363 * mapped at the given virtual address start. Each subsequent page is 3364 * mapped at a virtual address that is offset from start by the same 3365 * amount as the page is offset from m_start within the object. The 3366 * last page in the sequence is the page with the largest offset from 3367 * m_start that can be mapped at a virtual address less than the given 3368 * virtual address end. Not every virtual page between start and end 3369 * is mapped; only those for which a resident page exists with the 3370 * corresponding offset from m_start are mapped. 3371 */ 3372void 3373pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 3374 vm_page_t m_start, vm_prot_t prot) 3375{ 3376 vm_offset_t va; 3377 vm_page_t m, mpte; 3378 vm_pindex_t diff, psize; 3379 3380 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 3381 psize = atop(end - start); 3382 mpte = NULL; 3383 m = m_start; 3384 PMAP_LOCK(pmap); 3385 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 3386 va = start + ptoa(diff); 3387 if ((va & PDRMASK) == 0 && va + NBPDR <= end && 3388 (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 && 3389 pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 && 3390 pmap_enter_pde(pmap, va, m, prot)) 3391 m = &m[NBPDR / PAGE_SIZE - 1]; 3392 else 3393 mpte = pmap_enter_quick_locked(pmap, va, m, prot, 3394 mpte); 3395 m = TAILQ_NEXT(m, listq); 3396 } 3397 PMAP_UNLOCK(pmap); 3398} 3399 3400/* 3401 * this code makes some *MAJOR* assumptions: 3402 * 1. Current pmap & pmap exists. 3403 * 2. Not wired. 3404 * 3. Read access. 3405 * 4. No page table pages. 3406 * but is *MUCH* faster than pmap_enter... 3407 */ 3408 3409void 3410pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3411{ 3412 3413 PMAP_LOCK(pmap); 3414 (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); 3415 PMAP_UNLOCK(pmap); 3416} 3417 3418static vm_page_t 3419pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 3420 vm_prot_t prot, vm_page_t mpte) 3421{ 3422 pt_entry_t *pte; 3423 vm_paddr_t pa; 3424 vm_page_t free; 3425 3426 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 3427 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, 3428 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 3429 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3430 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3431 3432 /* 3433 * In the case that a page table page is not 3434 * resident, we are creating it here. 3435 */ 3436 if (va < VM_MAXUSER_ADDRESS) { 3437 unsigned ptepindex; 3438 pd_entry_t ptepa; 3439 3440 /* 3441 * Calculate pagetable page index 3442 */ 3443 ptepindex = va >> PDRSHIFT; 3444 if (mpte && (mpte->pindex == ptepindex)) { 3445 mpte->wire_count++; 3446 } else { 3447 /* 3448 * Get the page directory entry 3449 */ 3450 ptepa = pmap->pm_pdir[ptepindex]; 3451 3452 /* 3453 * If the page table page is mapped, we just increment 3454 * the hold count, and activate it. 3455 */ 3456 if (ptepa) { 3457 if (ptepa & PG_PS) 3458 return (NULL); 3459 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); 3460 mpte->wire_count++; 3461 } else { 3462 mpte = _pmap_allocpte(pmap, ptepindex, 3463 M_NOWAIT); 3464 if (mpte == NULL) 3465 return (mpte); 3466 } 3467 } 3468 } else { 3469 mpte = NULL; 3470 } 3471 3472 /* 3473 * This call to vtopte makes the assumption that we are 3474 * entering the page into the current pmap. In order to support 3475 * quick entry into any pmap, one would likely use pmap_pte_quick. 3476 * But that isn't as quick as vtopte. 3477 */ 3478 pte = vtopte(va); 3479 if (*pte) { 3480 if (mpte != NULL) { 3481 mpte->wire_count--; 3482 mpte = NULL; 3483 } 3484 return (mpte); 3485 } 3486 3487 /* 3488 * Enter on the PV list if part of our managed memory. 3489 */ 3490 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && 3491 !pmap_try_insert_pv_entry(pmap, va, m)) { 3492 if (mpte != NULL) { 3493 free = NULL; 3494 if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3495 pmap_invalidate_page(pmap, va); 3496 pmap_free_zero_pages(free); 3497 } 3498 3499 mpte = NULL; 3500 } 3501 return (mpte); 3502 } 3503 3504 /* 3505 * Increment counters 3506 */ 3507 pmap->pm_stats.resident_count++; 3508 3509 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); 3510#ifdef PAE 3511 if ((prot & VM_PROT_EXECUTE) == 0) 3512 pa |= pg_nx; 3513#endif 3514 3515 /* 3516 * Now validate mapping with RO protection 3517 */ 3518 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3519 pte_store(pte, pa | PG_V | PG_U); 3520 else 3521 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3522 return mpte; 3523} 3524 3525/* 3526 * Make a temporary mapping for a physical address. This is only intended 3527 * to be used for panic dumps. 3528 */ 3529void * 3530pmap_kenter_temporary(vm_paddr_t pa, int i) 3531{ 3532 vm_offset_t va; 3533 3534 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3535 pmap_kenter(va, pa); 3536 invlpg(va); 3537 return ((void *)crashdumpmap); 3538} 3539 3540/* 3541 * This code maps large physical mmap regions into the 3542 * processor address space. Note that some shortcuts 3543 * are taken, but the code works. 3544 */ 3545void 3546pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3547 vm_pindex_t pindex, vm_size_t size) 3548{ 3549 pd_entry_t *pde; 3550 vm_paddr_t pa, ptepa; 3551 vm_page_t p; 3552 int pat_mode; 3553 3554 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3555 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3556 ("pmap_object_init_pt: non-device object")); 3557 if (pseflag && 3558 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3559 if (!vm_object_populate(object, pindex, pindex + atop(size))) 3560 return; 3561 p = vm_page_lookup(object, pindex); 3562 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3563 ("pmap_object_init_pt: invalid page %p", p)); 3564 pat_mode = p->md.pat_mode; 3565 3566 /* 3567 * Abort the mapping if the first page is not physically 3568 * aligned to a 2/4MB page boundary. 3569 */ 3570 ptepa = VM_PAGE_TO_PHYS(p); 3571 if (ptepa & (NBPDR - 1)) 3572 return; 3573 3574 /* 3575 * Skip the first page. Abort the mapping if the rest of 3576 * the pages are not physically contiguous or have differing 3577 * memory attributes. 3578 */ 3579 p = TAILQ_NEXT(p, listq); 3580 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3581 pa += PAGE_SIZE) { 3582 KASSERT(p->valid == VM_PAGE_BITS_ALL, 3583 ("pmap_object_init_pt: invalid page %p", p)); 3584 if (pa != VM_PAGE_TO_PHYS(p) || 3585 pat_mode != p->md.pat_mode) 3586 return; 3587 p = TAILQ_NEXT(p, listq); 3588 } 3589 3590 /* 3591 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 3592 * "size" is a multiple of 2/4M, adding the PAT setting to 3593 * "pa" will not affect the termination of this loop. 3594 */ 3595 PMAP_LOCK(pmap); 3596 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3597 size; pa += NBPDR) { 3598 pde = pmap_pde(pmap, addr); 3599 if (*pde == 0) { 3600 pde_store(pde, pa | PG_PS | PG_M | PG_A | 3601 PG_U | PG_RW | PG_V); 3602 pmap->pm_stats.resident_count += NBPDR / 3603 PAGE_SIZE; 3604 pmap_pde_mappings++; 3605 } 3606 /* Else continue on if the PDE is already valid. */ 3607 addr += NBPDR; 3608 } 3609 PMAP_UNLOCK(pmap); 3610 } 3611} 3612 3613/* 3614 * Routine: pmap_change_wiring 3615 * Function: Change the wiring attribute for a map/virtual-address 3616 * pair. 3617 * In/out conditions: 3618 * The mapping must already exist in the pmap. 3619 */ 3620void 3621pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3622{ 3623 pd_entry_t *pde; 3624 pt_entry_t *pte; 3625 boolean_t are_queues_locked; 3626 3627 are_queues_locked = FALSE; 3628retry: 3629 PMAP_LOCK(pmap); 3630 pde = pmap_pde(pmap, va); 3631 if ((*pde & PG_PS) != 0) { 3632 if (!wired != ((*pde & PG_W) == 0)) { 3633 if (!are_queues_locked) { 3634 are_queues_locked = TRUE; 3635 if (!mtx_trylock(&vm_page_queue_mtx)) { 3636 PMAP_UNLOCK(pmap); 3637 vm_page_lock_queues(); 3638 goto retry; 3639 } 3640 } 3641 if (!pmap_demote_pde(pmap, pde, va)) 3642 panic("pmap_change_wiring: demotion failed"); 3643 } else 3644 goto out; 3645 } 3646 pte = pmap_pte(pmap, va); 3647 3648 if (wired && !pmap_pte_w(pte)) 3649 pmap->pm_stats.wired_count++; 3650 else if (!wired && pmap_pte_w(pte)) 3651 pmap->pm_stats.wired_count--; 3652 3653 /* 3654 * Wiring is not a hardware characteristic so there is no need to 3655 * invalidate TLB. 3656 */ 3657 pmap_pte_set_w(pte, wired); 3658 pmap_pte_release(pte); 3659out: 3660 if (are_queues_locked) 3661 vm_page_unlock_queues(); 3662 PMAP_UNLOCK(pmap); 3663} 3664 3665 3666 3667/* 3668 * Copy the range specified by src_addr/len 3669 * from the source map to the range dst_addr/len 3670 * in the destination map. 3671 * 3672 * This routine is only advisory and need not do anything. 3673 */ 3674 3675void 3676pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3677 vm_offset_t src_addr) 3678{ 3679 vm_page_t free; 3680 vm_offset_t addr; 3681 vm_offset_t end_addr = src_addr + len; 3682 vm_offset_t pdnxt; 3683 3684 if (dst_addr != src_addr) 3685 return; 3686 3687 if (!pmap_is_current(src_pmap)) 3688 return; 3689 3690 vm_page_lock_queues(); 3691 if (dst_pmap < src_pmap) { 3692 PMAP_LOCK(dst_pmap); 3693 PMAP_LOCK(src_pmap); 3694 } else { 3695 PMAP_LOCK(src_pmap); 3696 PMAP_LOCK(dst_pmap); 3697 } 3698 sched_pin(); 3699 for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3700 pt_entry_t *src_pte, *dst_pte; 3701 vm_page_t dstmpte, srcmpte; 3702 pd_entry_t srcptepaddr; 3703 unsigned ptepindex; 3704 3705 KASSERT(addr < UPT_MIN_ADDRESS, 3706 ("pmap_copy: invalid to pmap_copy page tables")); 3707 3708 pdnxt = (addr + NBPDR) & ~PDRMASK; 3709 if (pdnxt < addr) 3710 pdnxt = end_addr; 3711 ptepindex = addr >> PDRSHIFT; 3712 3713 srcptepaddr = src_pmap->pm_pdir[ptepindex]; 3714 if (srcptepaddr == 0) 3715 continue; 3716 3717 if (srcptepaddr & PG_PS) { 3718 if (dst_pmap->pm_pdir[ptepindex] == 0 && 3719 ((srcptepaddr & PG_MANAGED) == 0 || 3720 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr & 3721 PG_PS_FRAME))) { 3722 dst_pmap->pm_pdir[ptepindex] = srcptepaddr & 3723 ~PG_W; 3724 dst_pmap->pm_stats.resident_count += 3725 NBPDR / PAGE_SIZE; 3726 } 3727 continue; 3728 } 3729 3730 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3731 KASSERT(srcmpte->wire_count > 0, 3732 ("pmap_copy: source page table page is unused")); 3733 3734 if (pdnxt > end_addr) 3735 pdnxt = end_addr; 3736 3737 src_pte = vtopte(addr); 3738 while (addr < pdnxt) { 3739 pt_entry_t ptetemp; 3740 ptetemp = *src_pte; 3741 /* 3742 * we only virtual copy managed pages 3743 */ 3744 if ((ptetemp & PG_MANAGED) != 0) { 3745 dstmpte = pmap_allocpte(dst_pmap, addr, 3746 M_NOWAIT); 3747 if (dstmpte == NULL) 3748 goto out; 3749 dst_pte = pmap_pte_quick(dst_pmap, addr); 3750 if (*dst_pte == 0 && 3751 pmap_try_insert_pv_entry(dst_pmap, addr, 3752 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { 3753 /* 3754 * Clear the wired, modified, and 3755 * accessed (referenced) bits 3756 * during the copy. 3757 */ 3758 *dst_pte = ptetemp & ~(PG_W | PG_M | 3759 PG_A); 3760 dst_pmap->pm_stats.resident_count++; 3761 } else { 3762 free = NULL; 3763 if (pmap_unwire_pte_hold(dst_pmap, 3764 dstmpte, &free)) { 3765 pmap_invalidate_page(dst_pmap, 3766 addr); 3767 pmap_free_zero_pages(free); 3768 } 3769 goto out; 3770 } 3771 if (dstmpte->wire_count >= srcmpte->wire_count) 3772 break; 3773 } 3774 addr += PAGE_SIZE; 3775 src_pte++; 3776 } 3777 } 3778out: 3779 sched_unpin(); 3780 vm_page_unlock_queues(); 3781 PMAP_UNLOCK(src_pmap); 3782 PMAP_UNLOCK(dst_pmap); 3783} 3784 3785static __inline void 3786pagezero(void *page) 3787{ 3788#if defined(I686_CPU) 3789 if (cpu_class == CPUCLASS_686) { 3790#if defined(CPU_ENABLE_SSE) 3791 if (cpu_feature & CPUID_SSE2) 3792 sse2_pagezero(page); 3793 else 3794#endif 3795 i686_pagezero(page); 3796 } else 3797#endif 3798 bzero(page, PAGE_SIZE); 3799} 3800 3801/* 3802 * pmap_zero_page zeros the specified hardware page by mapping 3803 * the page into KVM and using bzero to clear its contents. 3804 */ 3805void 3806pmap_zero_page(vm_page_t m) 3807{ 3808 struct sysmaps *sysmaps; 3809 3810 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3811 mtx_lock(&sysmaps->lock); 3812 if (*sysmaps->CMAP2) 3813 panic("pmap_zero_page: CMAP2 busy"); 3814 sched_pin(); 3815 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 3816 pmap_cache_bits(m->md.pat_mode, 0); 3817 invlcaddr(sysmaps->CADDR2); 3818 pagezero(sysmaps->CADDR2); 3819 *sysmaps->CMAP2 = 0; 3820 sched_unpin(); 3821 mtx_unlock(&sysmaps->lock); 3822} 3823 3824/* 3825 * pmap_zero_page_area zeros the specified hardware page by mapping 3826 * the page into KVM and using bzero to clear its contents. 3827 * 3828 * off and size may not cover an area beyond a single hardware page. 3829 */ 3830void 3831pmap_zero_page_area(vm_page_t m, int off, int size) 3832{ 3833 struct sysmaps *sysmaps; 3834 3835 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3836 mtx_lock(&sysmaps->lock); 3837 if (*sysmaps->CMAP2) 3838 panic("pmap_zero_page_area: CMAP2 busy"); 3839 sched_pin(); 3840 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 3841 pmap_cache_bits(m->md.pat_mode, 0); 3842 invlcaddr(sysmaps->CADDR2); 3843 if (off == 0 && size == PAGE_SIZE) 3844 pagezero(sysmaps->CADDR2); 3845 else 3846 bzero((char *)sysmaps->CADDR2 + off, size); 3847 *sysmaps->CMAP2 = 0; 3848 sched_unpin(); 3849 mtx_unlock(&sysmaps->lock); 3850} 3851 3852/* 3853 * pmap_zero_page_idle zeros the specified hardware page by mapping 3854 * the page into KVM and using bzero to clear its contents. This 3855 * is intended to be called from the vm_pagezero process only and 3856 * outside of Giant. 3857 */ 3858void 3859pmap_zero_page_idle(vm_page_t m) 3860{ 3861 3862 if (*CMAP3) 3863 panic("pmap_zero_page_idle: CMAP3 busy"); 3864 sched_pin(); 3865 *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | 3866 pmap_cache_bits(m->md.pat_mode, 0); 3867 invlcaddr(CADDR3); 3868 pagezero(CADDR3); 3869 *CMAP3 = 0; 3870 sched_unpin(); 3871} 3872 3873/* 3874 * pmap_copy_page copies the specified (machine independent) 3875 * page by mapping the page into virtual memory and using 3876 * bcopy to copy the page, one machine dependent page at a 3877 * time. 3878 */ 3879void 3880pmap_copy_page(vm_page_t src, vm_page_t dst) 3881{ 3882 struct sysmaps *sysmaps; 3883 3884 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3885 mtx_lock(&sysmaps->lock); 3886 if (*sysmaps->CMAP1) 3887 panic("pmap_copy_page: CMAP1 busy"); 3888 if (*sysmaps->CMAP2) 3889 panic("pmap_copy_page: CMAP2 busy"); 3890 sched_pin(); 3891 invlpg((u_int)sysmaps->CADDR1); 3892 invlpg((u_int)sysmaps->CADDR2); 3893 *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | 3894 pmap_cache_bits(src->md.pat_mode, 0); 3895 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | 3896 pmap_cache_bits(dst->md.pat_mode, 0); 3897 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3898 *sysmaps->CMAP1 = 0; 3899 *sysmaps->CMAP2 = 0; 3900 sched_unpin(); 3901 mtx_unlock(&sysmaps->lock); 3902} 3903 3904/* 3905 * Returns true if the pmap's pv is one of the first 3906 * 16 pvs linked to from this page. This count may 3907 * be changed upwards or downwards in the future; it 3908 * is only necessary that true be returned for a small 3909 * subset of pmaps for proper page aging. 3910 */ 3911boolean_t 3912pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3913{ 3914 struct md_page *pvh; 3915 pv_entry_t pv; 3916 int loops = 0; 3917 3918 if (m->flags & PG_FICTITIOUS) 3919 return FALSE; 3920 3921 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3922 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3923 if (PV_PMAP(pv) == pmap) { 3924 return TRUE; 3925 } 3926 loops++; 3927 if (loops >= 16) 3928 break; 3929 } 3930 if (loops < 16) { 3931 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3932 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 3933 if (PV_PMAP(pv) == pmap) 3934 return (TRUE); 3935 loops++; 3936 if (loops >= 16) 3937 break; 3938 } 3939 } 3940 return (FALSE); 3941} 3942 3943/* 3944 * pmap_page_wired_mappings: 3945 * 3946 * Return the number of managed mappings to the given physical page 3947 * that are wired. 3948 */ 3949int 3950pmap_page_wired_mappings(vm_page_t m) 3951{ 3952 int count; 3953 3954 count = 0; 3955 if ((m->flags & PG_FICTITIOUS) != 0) 3956 return (count); 3957 count = pmap_pvh_wired_mappings(&m->md, count); 3958 return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count)); 3959} 3960 3961/* 3962 * pmap_pvh_wired_mappings: 3963 * 3964 * Return the updated number "count" of managed mappings that are wired. 3965 */ 3966static int 3967pmap_pvh_wired_mappings(struct md_page *pvh, int count) 3968{ 3969 pmap_t pmap; 3970 pt_entry_t *pte; 3971 pv_entry_t pv; 3972 3973 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3974 sched_pin(); 3975 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 3976 pmap = PV_PMAP(pv); 3977 PMAP_LOCK(pmap); 3978 pte = pmap_pte_quick(pmap, pv->pv_va); 3979 if ((*pte & PG_W) != 0) 3980 count++; 3981 PMAP_UNLOCK(pmap); 3982 } 3983 sched_unpin(); 3984 return (count); 3985} 3986 3987/* 3988 * Returns TRUE if the given page is mapped individually or as part of 3989 * a 4mpage. Otherwise, returns FALSE. 3990 */ 3991boolean_t 3992pmap_page_is_mapped(vm_page_t m) 3993{ 3994 struct md_page *pvh; 3995 3996 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 3997 return (FALSE); 3998 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3999 if (TAILQ_EMPTY(&m->md.pv_list)) { 4000 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4001 return (!TAILQ_EMPTY(&pvh->pv_list)); 4002 } else 4003 return (TRUE); 4004} 4005 4006/* 4007 * Remove all pages from specified address space 4008 * this aids process exit speeds. Also, this code 4009 * is special cased for current process only, but 4010 * can have the more generic (and slightly slower) 4011 * mode enabled. This is much faster than pmap_remove 4012 * in the case of running down an entire address space. 4013 */ 4014void 4015pmap_remove_pages(pmap_t pmap) 4016{ 4017 pt_entry_t *pte, tpte; 4018 vm_page_t free = NULL; 4019 vm_page_t m, mpte, mt; 4020 pv_entry_t pv; 4021 struct md_page *pvh; 4022 struct pv_chunk *pc, *npc; 4023 int field, idx; 4024 int32_t bit; 4025 uint32_t inuse, bitmask; 4026 int allfree; 4027 4028 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 4029 printf("warning: pmap_remove_pages called with non-current pmap\n"); 4030 return; 4031 } 4032 vm_page_lock_queues(); 4033 PMAP_LOCK(pmap); 4034 sched_pin(); 4035 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4036 allfree = 1; 4037 for (field = 0; field < _NPCM; field++) { 4038 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 4039 while (inuse != 0) { 4040 bit = bsfl(inuse); 4041 bitmask = 1UL << bit; 4042 idx = field * 32 + bit; 4043 pv = &pc->pc_pventry[idx]; 4044 inuse &= ~bitmask; 4045 4046 pte = pmap_pde(pmap, pv->pv_va); 4047 tpte = *pte; 4048 if ((tpte & PG_PS) == 0) { 4049 pte = vtopte(pv->pv_va); 4050 tpte = *pte & ~PG_PTE_PAT; 4051 } 4052 4053 if (tpte == 0) { 4054 printf( 4055 "TPTE at %p IS ZERO @ VA %08x\n", 4056 pte, pv->pv_va); 4057 panic("bad pte"); 4058 } 4059 4060/* 4061 * We cannot remove wired pages from a process' mapping at this time 4062 */ 4063 if (tpte & PG_W) { 4064 allfree = 0; 4065 continue; 4066 } 4067 4068 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 4069 KASSERT(m->phys_addr == (tpte & PG_FRAME), 4070 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 4071 m, (uintmax_t)m->phys_addr, 4072 (uintmax_t)tpte)); 4073 4074 KASSERT(m < &vm_page_array[vm_page_array_size], 4075 ("pmap_remove_pages: bad tpte %#jx", 4076 (uintmax_t)tpte)); 4077 4078 pte_clear(pte); 4079 4080 /* 4081 * Update the vm_page_t clean/reference bits. 4082 */ 4083 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4084 if ((tpte & PG_PS) != 0) { 4085 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4086 vm_page_dirty(mt); 4087 } else 4088 vm_page_dirty(m); 4089 } 4090 4091 /* Mark free */ 4092 PV_STAT(pv_entry_frees++); 4093 PV_STAT(pv_entry_spare++); 4094 pv_entry_count--; 4095 pc->pc_map[field] |= bitmask; 4096 if ((tpte & PG_PS) != 0) { 4097 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 4098 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 4099 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 4100 if (TAILQ_EMPTY(&pvh->pv_list)) { 4101 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) 4102 if (TAILQ_EMPTY(&mt->md.pv_list)) 4103 vm_page_flag_clear(mt, PG_WRITEABLE); 4104 } 4105 mpte = pmap_lookup_pt_page(pmap, pv->pv_va); 4106 if (mpte != NULL) { 4107 pmap_remove_pt_page(pmap, mpte); 4108 pmap->pm_stats.resident_count--; 4109 KASSERT(mpte->wire_count == NPTEPG, 4110 ("pmap_remove_pages: pte page wire count error")); 4111 mpte->wire_count = 0; 4112 pmap_add_delayed_free_list(mpte, &free, FALSE); 4113 atomic_subtract_int(&cnt.v_wire_count, 1); 4114 } 4115 } else { 4116 pmap->pm_stats.resident_count--; 4117 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 4118 if (TAILQ_EMPTY(&m->md.pv_list)) { 4119 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4120 if (TAILQ_EMPTY(&pvh->pv_list)) 4121 vm_page_flag_clear(m, PG_WRITEABLE); 4122 } 4123 pmap_unuse_pt(pmap, pv->pv_va, &free); 4124 } 4125 } 4126 } 4127 if (allfree) { 4128 PV_STAT(pv_entry_spare -= _NPCPV); 4129 PV_STAT(pc_chunk_count--); 4130 PV_STAT(pc_chunk_frees++); 4131 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4132 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 4133 pmap_qremove((vm_offset_t)pc, 1); 4134 vm_page_unwire(m, 0); 4135 vm_page_free(m); 4136 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 4137 } 4138 } 4139 sched_unpin(); 4140 pmap_invalidate_all(pmap); 4141 vm_page_unlock_queues(); 4142 PMAP_UNLOCK(pmap); 4143 pmap_free_zero_pages(free); 4144} 4145 4146/* 4147 * pmap_is_modified: 4148 * 4149 * Return whether or not the specified physical page was modified 4150 * in any physical maps. 4151 */ 4152boolean_t 4153pmap_is_modified(vm_page_t m) 4154{ 4155 4156 if (m->flags & PG_FICTITIOUS) 4157 return (FALSE); 4158 if (pmap_is_modified_pvh(&m->md)) 4159 return (TRUE); 4160 return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 4161} 4162 4163/* 4164 * Returns TRUE if any of the given mappings were used to modify 4165 * physical memory. Otherwise, returns FALSE. Both page and 2mpage 4166 * mappings are supported. 4167 */ 4168static boolean_t 4169pmap_is_modified_pvh(struct md_page *pvh) 4170{ 4171 pv_entry_t pv; 4172 pt_entry_t *pte; 4173 pmap_t pmap; 4174 boolean_t rv; 4175 4176 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 4177 rv = FALSE; 4178 sched_pin(); 4179 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 4180 pmap = PV_PMAP(pv); 4181 PMAP_LOCK(pmap); 4182 pte = pmap_pte_quick(pmap, pv->pv_va); 4183 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); 4184 PMAP_UNLOCK(pmap); 4185 if (rv) 4186 break; 4187 } 4188 sched_unpin(); 4189 return (rv); 4190} 4191 4192/* 4193 * pmap_is_prefaultable: 4194 * 4195 * Return whether or not the specified virtual address is elgible 4196 * for prefault. 4197 */ 4198boolean_t 4199pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 4200{ 4201 pd_entry_t *pde; 4202 pt_entry_t *pte; 4203 boolean_t rv; 4204 4205 rv = FALSE; 4206 PMAP_LOCK(pmap); 4207 pde = pmap_pde(pmap, addr); 4208 if (*pde != 0 && (*pde & PG_PS) == 0) { 4209 pte = vtopte(addr); 4210 rv = *pte == 0; 4211 } 4212 PMAP_UNLOCK(pmap); 4213 return (rv); 4214} 4215 4216/* 4217 * Clear the write and modified bits in each of the given page's mappings. 4218 */ 4219void 4220pmap_remove_write(vm_page_t m) 4221{ 4222 struct md_page *pvh; 4223 pv_entry_t next_pv, pv; 4224 pmap_t pmap; 4225 pd_entry_t *pde; 4226 pt_entry_t oldpte, *pte; 4227 vm_offset_t va; 4228 4229 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 4230 if ((m->flags & PG_FICTITIOUS) != 0 || 4231 (m->flags & PG_WRITEABLE) == 0) 4232 return; 4233 sched_pin(); 4234 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4235 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { 4236 va = pv->pv_va; 4237 pmap = PV_PMAP(pv); 4238 PMAP_LOCK(pmap); 4239 pde = pmap_pde(pmap, va); 4240 if ((*pde & PG_RW) != 0) 4241 (void)pmap_demote_pde(pmap, pde, va); 4242 PMAP_UNLOCK(pmap); 4243 } 4244 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4245 pmap = PV_PMAP(pv); 4246 PMAP_LOCK(pmap); 4247 pde = pmap_pde(pmap, pv->pv_va); 4248 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" 4249 " a 4mpage in page %p's pv list", m)); 4250 pte = pmap_pte_quick(pmap, pv->pv_va); 4251retry: 4252 oldpte = *pte; 4253 if ((oldpte & PG_RW) != 0) { 4254 /* 4255 * Regardless of whether a pte is 32 or 64 bits 4256 * in size, PG_RW and PG_M are among the least 4257 * significant 32 bits. 4258 */ 4259 if (!atomic_cmpset_int((u_int *)pte, oldpte, 4260 oldpte & ~(PG_RW | PG_M))) 4261 goto retry; 4262 if ((oldpte & PG_M) != 0) 4263 vm_page_dirty(m); 4264 pmap_invalidate_page(pmap, pv->pv_va); 4265 } 4266 PMAP_UNLOCK(pmap); 4267 } 4268 vm_page_flag_clear(m, PG_WRITEABLE); 4269 sched_unpin(); 4270} 4271 4272/* 4273 * pmap_ts_referenced: 4274 * 4275 * Return a count of reference bits for a page, clearing those bits. 4276 * It is not necessary for every reference bit to be cleared, but it 4277 * is necessary that 0 only be returned when there are truly no 4278 * reference bits set. 4279 * 4280 * XXX: The exact number of bits to check and clear is a matter that 4281 * should be tested and standardized at some point in the future for 4282 * optimal aging of shared pages. 4283 */ 4284int 4285pmap_ts_referenced(vm_page_t m) 4286{ 4287 struct md_page *pvh; 4288 pv_entry_t pv, pvf, pvn; 4289 pmap_t pmap; 4290 pd_entry_t oldpde, *pde; 4291 pt_entry_t *pte; 4292 vm_offset_t va; 4293 int rtval = 0; 4294 4295 if (m->flags & PG_FICTITIOUS) 4296 return (rtval); 4297 sched_pin(); 4298 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 4299 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4300 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) { 4301 va = pv->pv_va; 4302 pmap = PV_PMAP(pv); 4303 PMAP_LOCK(pmap); 4304 pde = pmap_pde(pmap, va); 4305 oldpde = *pde; 4306 if ((oldpde & PG_A) != 0) { 4307 if (pmap_demote_pde(pmap, pde, va)) { 4308 if ((oldpde & PG_W) == 0) { 4309 /* 4310 * Remove the mapping to a single page 4311 * so that a subsequent access may 4312 * repromote. Since the underlying 4313 * page table page is fully populated, 4314 * this removal never frees a page 4315 * table page. 4316 */ 4317 va += VM_PAGE_TO_PHYS(m) - (oldpde & 4318 PG_PS_FRAME); 4319 pmap_remove_page(pmap, va, NULL); 4320 rtval++; 4321 if (rtval > 4) { 4322 PMAP_UNLOCK(pmap); 4323 return (rtval); 4324 } 4325 } 4326 } 4327 } 4328 PMAP_UNLOCK(pmap); 4329 } 4330 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 4331 pvf = pv; 4332 do { 4333 pvn = TAILQ_NEXT(pv, pv_list); 4334 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 4335 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 4336 pmap = PV_PMAP(pv); 4337 PMAP_LOCK(pmap); 4338 pde = pmap_pde(pmap, pv->pv_va); 4339 KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:" 4340 " found a 4mpage in page %p's pv list", m)); 4341 pte = pmap_pte_quick(pmap, pv->pv_va); 4342 if ((*pte & PG_A) != 0) { 4343 atomic_clear_int((u_int *)pte, PG_A); 4344 pmap_invalidate_page(pmap, pv->pv_va); 4345 rtval++; 4346 if (rtval > 4) 4347 pvn = NULL; 4348 } 4349 PMAP_UNLOCK(pmap); 4350 } while ((pv = pvn) != NULL && pv != pvf); 4351 } 4352 sched_unpin(); 4353 return (rtval); 4354} 4355 4356/* 4357 * Clear the modify bits on the specified physical page. 4358 */ 4359void 4360pmap_clear_modify(vm_page_t m) 4361{ 4362 struct md_page *pvh; 4363 pv_entry_t next_pv, pv; 4364 pmap_t pmap; 4365 pd_entry_t oldpde, *pde; 4366 pt_entry_t oldpte, *pte; 4367 vm_offset_t va; 4368 4369 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 4370 if ((m->flags & PG_FICTITIOUS) != 0) 4371 return; 4372 sched_pin(); 4373 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4374 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { 4375 va = pv->pv_va; 4376 pmap = PV_PMAP(pv); 4377 PMAP_LOCK(pmap); 4378 pde = pmap_pde(pmap, va); 4379 oldpde = *pde; 4380 if ((oldpde & PG_RW) != 0) { 4381 if (pmap_demote_pde(pmap, pde, va)) { 4382 if ((oldpde & PG_W) == 0) { 4383 /* 4384 * Write protect the mapping to a 4385 * single page so that a subsequent 4386 * write access may repromote. 4387 */ 4388 va += VM_PAGE_TO_PHYS(m) - (oldpde & 4389 PG_PS_FRAME); 4390 pte = pmap_pte_quick(pmap, va); 4391 oldpte = *pte; 4392 if ((oldpte & PG_V) != 0) { 4393 /* 4394 * Regardless of whether a pte is 32 or 64 bits 4395 * in size, PG_RW and PG_M are among the least 4396 * significant 32 bits. 4397 */ 4398 while (!atomic_cmpset_int((u_int *)pte, 4399 oldpte, 4400 oldpte & ~(PG_M | PG_RW))) 4401 oldpte = *pte; 4402 vm_page_dirty(m); 4403 pmap_invalidate_page(pmap, va); 4404 } 4405 } 4406 } 4407 } 4408 PMAP_UNLOCK(pmap); 4409 } 4410 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4411 pmap = PV_PMAP(pv); 4412 PMAP_LOCK(pmap); 4413 pde = pmap_pde(pmap, pv->pv_va); 4414 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" 4415 " a 4mpage in page %p's pv list", m)); 4416 pte = pmap_pte_quick(pmap, pv->pv_va); 4417 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 4418 /* 4419 * Regardless of whether a pte is 32 or 64 bits 4420 * in size, PG_M is among the least significant 4421 * 32 bits. 4422 */ 4423 atomic_clear_int((u_int *)pte, PG_M); 4424 pmap_invalidate_page(pmap, pv->pv_va); 4425 } 4426 PMAP_UNLOCK(pmap); 4427 } 4428 sched_unpin(); 4429} 4430 4431/* 4432 * pmap_clear_reference: 4433 * 4434 * Clear the reference bit on the specified physical page. 4435 */ 4436void 4437pmap_clear_reference(vm_page_t m) 4438{ 4439 struct md_page *pvh; 4440 pv_entry_t next_pv, pv; 4441 pmap_t pmap; 4442 pd_entry_t oldpde, *pde; 4443 pt_entry_t *pte; 4444 vm_offset_t va; 4445 4446 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 4447 if ((m->flags & PG_FICTITIOUS) != 0) 4448 return; 4449 sched_pin(); 4450 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4451 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { 4452 va = pv->pv_va; 4453 pmap = PV_PMAP(pv); 4454 PMAP_LOCK(pmap); 4455 pde = pmap_pde(pmap, va); 4456 oldpde = *pde; 4457 if ((oldpde & PG_A) != 0) { 4458 if (pmap_demote_pde(pmap, pde, va)) { 4459 /* 4460 * Remove the mapping to a single page so 4461 * that a subsequent access may repromote. 4462 * Since the underlying page table page is 4463 * fully populated, this removal never frees 4464 * a page table page. 4465 */ 4466 va += VM_PAGE_TO_PHYS(m) - (oldpde & 4467 PG_PS_FRAME); 4468 pmap_remove_page(pmap, va, NULL); 4469 } 4470 } 4471 PMAP_UNLOCK(pmap); 4472 } 4473 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4474 pmap = PV_PMAP(pv); 4475 PMAP_LOCK(pmap); 4476 pde = pmap_pde(pmap, pv->pv_va); 4477 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found" 4478 " a 4mpage in page %p's pv list", m)); 4479 pte = pmap_pte_quick(pmap, pv->pv_va); 4480 if ((*pte & PG_A) != 0) { 4481 /* 4482 * Regardless of whether a pte is 32 or 64 bits 4483 * in size, PG_A is among the least significant 4484 * 32 bits. 4485 */ 4486 atomic_clear_int((u_int *)pte, PG_A); 4487 pmap_invalidate_page(pmap, pv->pv_va); 4488 } 4489 PMAP_UNLOCK(pmap); 4490 } 4491 sched_unpin(); 4492} 4493 4494/* 4495 * Miscellaneous support routines follow 4496 */ 4497 4498/* Adjust the cache mode for a 4KB page mapped via a PTE. */ 4499static __inline void 4500pmap_pte_attr(pt_entry_t *pte, int cache_bits) 4501{ 4502 u_int opte, npte; 4503 4504 /* 4505 * The cache mode bits are all in the low 32-bits of the 4506 * PTE, so we can just spin on updating the low 32-bits. 4507 */ 4508 do { 4509 opte = *(u_int *)pte; 4510 npte = opte & ~PG_PTE_CACHE; 4511 npte |= cache_bits; 4512 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); 4513} 4514 4515/* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ 4516static __inline void 4517pmap_pde_attr(pd_entry_t *pde, int cache_bits) 4518{ 4519 u_int opde, npde; 4520 4521 /* 4522 * The cache mode bits are all in the low 32-bits of the 4523 * PDE, so we can just spin on updating the low 32-bits. 4524 */ 4525 do { 4526 opde = *(u_int *)pde; 4527 npde = opde & ~PG_PDE_CACHE; 4528 npde |= cache_bits; 4529 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); 4530} 4531 4532/* 4533 * Map a set of physical memory pages into the kernel virtual 4534 * address space. Return a pointer to where it is mapped. This 4535 * routine is intended to be used for mapping device memory, 4536 * NOT real memory. 4537 */ 4538void * 4539pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 4540{ 4541 vm_offset_t va, offset; 4542 vm_size_t tmpsize; 4543 4544 offset = pa & PAGE_MASK; 4545 size = roundup(offset + size, PAGE_SIZE); 4546 pa = pa & PG_FRAME; 4547 4548 if (pa < KERNLOAD && pa + size <= KERNLOAD) 4549 va = KERNBASE + pa; 4550 else 4551 va = kmem_alloc_nofault(kernel_map, size); 4552 if (!va) 4553 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 4554 4555 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 4556 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 4557 pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 4558 pmap_invalidate_cache_range(va, va + size); 4559 return ((void *)(va + offset)); 4560} 4561 4562void * 4563pmap_mapdev(vm_paddr_t pa, vm_size_t size) 4564{ 4565 4566 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 4567} 4568 4569void * 4570pmap_mapbios(vm_paddr_t pa, vm_size_t size) 4571{ 4572 4573 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 4574} 4575 4576void 4577pmap_unmapdev(vm_offset_t va, vm_size_t size) 4578{ 4579 vm_offset_t base, offset, tmpva; 4580 4581 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 4582 return; 4583 base = trunc_page(va); 4584 offset = va & PAGE_MASK; 4585 size = roundup(offset + size, PAGE_SIZE); 4586 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 4587 pmap_kremove(tmpva); 4588 pmap_invalidate_range(kernel_pmap, va, tmpva); 4589 kmem_free(kernel_map, base, size); 4590} 4591 4592/* 4593 * Sets the memory attribute for the specified page. 4594 */ 4595void 4596pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4597{ 4598 struct sysmaps *sysmaps; 4599 vm_offset_t sva, eva; 4600 4601 m->md.pat_mode = ma; 4602 if ((m->flags & PG_FICTITIOUS) != 0) 4603 return; 4604 4605 /* 4606 * If "m" is a normal page, flush it from the cache. 4607 * See pmap_invalidate_cache_range(). 4608 * 4609 * First, try to find an existing mapping of the page by sf 4610 * buffer. sf_buf_invalidate_cache() modifies mapping and 4611 * flushes the cache. 4612 */ 4613 if (sf_buf_invalidate_cache(m)) 4614 return; 4615 4616 /* 4617 * If page is not mapped by sf buffer, but CPU does not 4618 * support self snoop, map the page transient and do 4619 * invalidation. In the worst case, whole cache is flushed by 4620 * pmap_invalidate_cache_range(). 4621 */ 4622 if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) { 4623 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4624 mtx_lock(&sysmaps->lock); 4625 if (*sysmaps->CMAP2) 4626 panic("pmap_page_set_memattr: CMAP2 busy"); 4627 sched_pin(); 4628 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | 4629 PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0); 4630 invlcaddr(sysmaps->CADDR2); 4631 sva = (vm_offset_t)sysmaps->CADDR2; 4632 eva = sva + PAGE_SIZE; 4633 } else 4634 sva = eva = 0; /* gcc */ 4635 pmap_invalidate_cache_range(sva, eva); 4636 if (sva != 0) { 4637 *sysmaps->CMAP2 = 0; 4638 sched_unpin(); 4639 mtx_unlock(&sysmaps->lock); 4640 } 4641} 4642 4643/* 4644 * Changes the specified virtual address range's memory type to that given by 4645 * the parameter "mode". The specified virtual address range must be 4646 * completely contained within either the kernel map. 4647 * 4648 * Returns zero if the change completed successfully, and either EINVAL or 4649 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 4650 * of the virtual address range was not mapped, and ENOMEM is returned if 4651 * there was insufficient memory available to complete the change. 4652 */ 4653int 4654pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 4655{ 4656 vm_offset_t base, offset, tmpva; 4657 pd_entry_t *pde; 4658 pt_entry_t *pte; 4659 int cache_bits_pte, cache_bits_pde; 4660 boolean_t changed; 4661 4662 base = trunc_page(va); 4663 offset = va & PAGE_MASK; 4664 size = roundup(offset + size, PAGE_SIZE); 4665 4666 /* 4667 * Only supported on kernel virtual addresses above the recursive map. 4668 */ 4669 if (base < VM_MIN_KERNEL_ADDRESS) 4670 return (EINVAL); 4671 4672 cache_bits_pde = pmap_cache_bits(mode, 1); 4673 cache_bits_pte = pmap_cache_bits(mode, 0); 4674 changed = FALSE; 4675 4676 /* 4677 * Pages that aren't mapped aren't supported. Also break down 4678 * 2/4MB pages into 4KB pages if required. 4679 */ 4680 PMAP_LOCK(kernel_pmap); 4681 for (tmpva = base; tmpva < base + size; ) { 4682 pde = pmap_pde(kernel_pmap, tmpva); 4683 if (*pde == 0) { 4684 PMAP_UNLOCK(kernel_pmap); 4685 return (EINVAL); 4686 } 4687 if (*pde & PG_PS) { 4688 /* 4689 * If the current 2/4MB page already has 4690 * the required memory type, then we need not 4691 * demote this page. Just increment tmpva to 4692 * the next 2/4MB page frame. 4693 */ 4694 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { 4695 tmpva = trunc_4mpage(tmpva) + NBPDR; 4696 continue; 4697 } 4698 4699 /* 4700 * If the current offset aligns with a 2/4MB 4701 * page frame and there is at least 2/4MB left 4702 * within the range, then we need not break 4703 * down this page into 4KB pages. 4704 */ 4705 if ((tmpva & PDRMASK) == 0 && 4706 tmpva + PDRMASK < base + size) { 4707 tmpva += NBPDR; 4708 continue; 4709 } 4710 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { 4711 PMAP_UNLOCK(kernel_pmap); 4712 return (ENOMEM); 4713 } 4714 } 4715 pte = vtopte(tmpva); 4716 if (*pte == 0) { 4717 PMAP_UNLOCK(kernel_pmap); 4718 return (EINVAL); 4719 } 4720 tmpva += PAGE_SIZE; 4721 } 4722 PMAP_UNLOCK(kernel_pmap); 4723 4724 /* 4725 * Ok, all the pages exist, so run through them updating their 4726 * cache mode if required. 4727 */ 4728 for (tmpva = base; tmpva < base + size; ) { 4729 pde = pmap_pde(kernel_pmap, tmpva); 4730 if (*pde & PG_PS) { 4731 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { 4732 pmap_pde_attr(pde, cache_bits_pde); 4733 changed = TRUE; 4734 } 4735 tmpva = trunc_4mpage(tmpva) + NBPDR; 4736 } else { 4737 pte = vtopte(tmpva); 4738 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { 4739 pmap_pte_attr(pte, cache_bits_pte); 4740 changed = TRUE; 4741 } 4742 tmpva += PAGE_SIZE; 4743 } 4744 } 4745 4746 /* 4747 * Flush CPU caches to make sure any data isn't cached that 4748 * shouldn't be, etc. 4749 */ 4750 if (changed) { 4751 pmap_invalidate_range(kernel_pmap, base, tmpva); 4752 pmap_invalidate_cache_range(base, tmpva); 4753 } 4754 return (0); 4755} 4756 4757/* 4758 * perform the pmap work for mincore 4759 */ 4760int 4761pmap_mincore(pmap_t pmap, vm_offset_t addr) 4762{ 4763 pd_entry_t *pdep; 4764 pt_entry_t *ptep, pte; 4765 vm_paddr_t pa; 4766 vm_page_t m; 4767 int val = 0; 4768 4769 PMAP_LOCK(pmap); 4770 pdep = pmap_pde(pmap, addr); 4771 if (*pdep != 0) { 4772 if (*pdep & PG_PS) { 4773 pte = *pdep; 4774 val = MINCORE_SUPER; 4775 /* Compute the physical address of the 4KB page. */ 4776 pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) & 4777 PG_FRAME; 4778 } else { 4779 ptep = pmap_pte(pmap, addr); 4780 pte = *ptep; 4781 pmap_pte_release(ptep); 4782 pa = pte & PG_FRAME; 4783 } 4784 } else { 4785 pte = 0; 4786 pa = 0; 4787 } 4788 PMAP_UNLOCK(pmap); 4789 4790 if (pte != 0) { 4791 val |= MINCORE_INCORE; 4792 if ((pte & PG_MANAGED) == 0) 4793 return val; 4794 4795 m = PHYS_TO_VM_PAGE(pa); 4796 4797 /* 4798 * Modified by us 4799 */ 4800 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4801 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 4802 else { 4803 /* 4804 * Modified by someone else 4805 */ 4806 vm_page_lock_queues(); 4807 if (m->dirty || pmap_is_modified(m)) 4808 val |= MINCORE_MODIFIED_OTHER; 4809 vm_page_unlock_queues(); 4810 } 4811 /* 4812 * Referenced by us 4813 */ 4814 if (pte & PG_A) 4815 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 4816 else { 4817 /* 4818 * Referenced by someone else 4819 */ 4820 vm_page_lock_queues(); 4821 if ((m->flags & PG_REFERENCED) || 4822 pmap_ts_referenced(m)) { 4823 val |= MINCORE_REFERENCED_OTHER; 4824 vm_page_flag_set(m, PG_REFERENCED); 4825 } 4826 vm_page_unlock_queues(); 4827 } 4828 } 4829 return val; 4830} 4831 4832void 4833pmap_activate(struct thread *td) 4834{ 4835 pmap_t pmap, oldpmap; 4836 u_int32_t cr3; 4837 4838 critical_enter(); 4839 pmap = vmspace_pmap(td->td_proc->p_vmspace); 4840 oldpmap = PCPU_GET(curpmap); 4841#if defined(SMP) 4842 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); 4843 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 4844#else 4845 oldpmap->pm_active &= ~1; 4846 pmap->pm_active |= 1; 4847#endif 4848#ifdef PAE 4849 cr3 = vtophys(pmap->pm_pdpt); 4850#else 4851 cr3 = vtophys(pmap->pm_pdir); 4852#endif 4853 /* 4854 * pmap_activate is for the current thread on the current cpu 4855 */ 4856 td->td_pcb->pcb_cr3 = cr3; 4857 load_cr3(cr3); 4858 PCPU_SET(curpmap, pmap); 4859 critical_exit(); 4860} 4861 4862void 4863pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4864{ 4865} 4866 4867/* 4868 * Increase the starting virtual address of the given mapping if a 4869 * different alignment might result in more superpage mappings. 4870 */ 4871void 4872pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4873 vm_offset_t *addr, vm_size_t size) 4874{ 4875 vm_offset_t superpage_offset; 4876 4877 if (size < NBPDR) 4878 return; 4879 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4880 offset += ptoa(object->pg_color); 4881 superpage_offset = offset & PDRMASK; 4882 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4883 (*addr & PDRMASK) == superpage_offset) 4884 return; 4885 if ((*addr & PDRMASK) < superpage_offset) 4886 *addr = (*addr & ~PDRMASK) + superpage_offset; 4887 else 4888 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4889} 4890 4891 4892#if defined(PMAP_DEBUG) 4893pmap_pid_dump(int pid) 4894{ 4895 pmap_t pmap; 4896 struct proc *p; 4897 int npte = 0; 4898 int index; 4899 4900 sx_slock(&allproc_lock); 4901 FOREACH_PROC_IN_SYSTEM(p) { 4902 if (p->p_pid != pid) 4903 continue; 4904 4905 if (p->p_vmspace) { 4906 int i,j; 4907 index = 0; 4908 pmap = vmspace_pmap(p->p_vmspace); 4909 for (i = 0; i < NPDEPTD; i++) { 4910 pd_entry_t *pde; 4911 pt_entry_t *pte; 4912 vm_offset_t base = i << PDRSHIFT; 4913 4914 pde = &pmap->pm_pdir[i]; 4915 if (pde && pmap_pde_v(pde)) { 4916 for (j = 0; j < NPTEPG; j++) { 4917 vm_offset_t va = base + (j << PAGE_SHIFT); 4918 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4919 if (index) { 4920 index = 0; 4921 printf("\n"); 4922 } 4923 sx_sunlock(&allproc_lock); 4924 return npte; 4925 } 4926 pte = pmap_pte(pmap, va); 4927 if (pte && pmap_pte_v(pte)) { 4928 pt_entry_t pa; 4929 vm_page_t m; 4930 pa = *pte; 4931 m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4932 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4933 va, pa, m->hold_count, m->wire_count, m->flags); 4934 npte++; 4935 index++; 4936 if (index >= 2) { 4937 index = 0; 4938 printf("\n"); 4939 } else { 4940 printf(" "); 4941 } 4942 } 4943 } 4944 } 4945 } 4946 } 4947 } 4948 sx_sunlock(&allproc_lock); 4949 return npte; 4950} 4951#endif 4952 4953#if defined(DEBUG) 4954 4955static void pads(pmap_t pm); 4956void pmap_pvdump(vm_offset_t pa); 4957 4958/* print address space of pmap*/ 4959static void 4960pads(pmap_t pm) 4961{ 4962 int i, j; 4963 vm_paddr_t va; 4964 pt_entry_t *ptep; 4965 4966 if (pm == kernel_pmap) 4967 return; 4968 for (i = 0; i < NPDEPTD; i++) 4969 if (pm->pm_pdir[i]) 4970 for (j = 0; j < NPTEPG; j++) { 4971 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4972 if (pm == kernel_pmap && va < KERNBASE) 4973 continue; 4974 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4975 continue; 4976 ptep = pmap_pte(pm, va); 4977 if (pmap_pte_v(ptep)) 4978 printf("%x:%x ", va, *ptep); 4979 }; 4980 4981} 4982 4983void 4984pmap_pvdump(vm_paddr_t pa) 4985{ 4986 pv_entry_t pv; 4987 pmap_t pmap; 4988 vm_page_t m; 4989 4990 printf("pa %x", pa); 4991 m = PHYS_TO_VM_PAGE(pa); 4992 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4993 pmap = PV_PMAP(pv); 4994 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4995 pads(pmap); 4996 } 4997 printf(" "); 4998} 4999#endif 5000