pmap.c revision 246855
1181641Skmacy/*- 2181641Skmacy * Copyright (c) 1991 Regents of the University of California. 3181641Skmacy * All rights reserved. 4181641Skmacy * Copyright (c) 1994 John S. Dyson 5181641Skmacy * All rights reserved. 6181641Skmacy * Copyright (c) 1994 David Greenman 7181641Skmacy * All rights reserved. 8181641Skmacy * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9181641Skmacy * All rights reserved. 10181641Skmacy * 11181641Skmacy * This code is derived from software contributed to Berkeley by 12181641Skmacy * the Systems Programming Group of the University of Utah Computer 13181641Skmacy * Science Department and William Jolitz of UUNET Technologies Inc. 14181641Skmacy * 15181641Skmacy * Redistribution and use in source and binary forms, with or without 16181641Skmacy * modification, are permitted provided that the following conditions 17181641Skmacy * are met: 18181641Skmacy * 1. Redistributions of source code must retain the above copyright 19181641Skmacy * notice, this list of conditions and the following disclaimer. 20181641Skmacy * 2. Redistributions in binary form must reproduce the above copyright 21181641Skmacy * notice, this list of conditions and the following disclaimer in the 22181641Skmacy * documentation and/or other materials provided with the distribution. 23181641Skmacy * 3. All advertising materials mentioning features or use of this software 24181641Skmacy * must display the following acknowledgement: 25181641Skmacy * This product includes software developed by the University of 26181641Skmacy * California, Berkeley and its contributors. 27181641Skmacy * 4. Neither the name of the University nor the names of its contributors 28181641Skmacy * may be used to endorse or promote products derived from this software 29181641Skmacy * without specific prior written permission. 30181641Skmacy * 31181641Skmacy * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32181641Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33181641Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34181641Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35181641Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36181641Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37181641Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38181641Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39181641Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40181641Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41181641Skmacy * SUCH DAMAGE. 42181641Skmacy * 43181641Skmacy * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44181641Skmacy */ 45181641Skmacy/*- 46181641Skmacy * Copyright (c) 2003 Networks Associates Technology, Inc. 47181641Skmacy * All rights reserved. 48181641Skmacy * 49181641Skmacy * This software was developed for the FreeBSD Project by Jake Burkholder, 50181641Skmacy * Safeport Network Services, and Network Associates Laboratories, the 51181641Skmacy * Security Research Division of Network Associates, Inc. under 52181641Skmacy * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53181641Skmacy * CHATS research program. 54181641Skmacy * 55181641Skmacy * Redistribution and use in source and binary forms, with or without 56181641Skmacy * modification, are permitted provided that the following conditions 57181641Skmacy * are met: 58181641Skmacy * 1. Redistributions of source code must retain the above copyright 59181641Skmacy * notice, this list of conditions and the following disclaimer. 60181641Skmacy * 2. Redistributions in binary form must reproduce the above copyright 61181641Skmacy * notice, this list of conditions and the following disclaimer in the 62181641Skmacy * documentation and/or other materials provided with the distribution. 63181641Skmacy * 64181641Skmacy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65181641Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66181641Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67181641Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68181641Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69181641Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70181641Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71181641Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72181641Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73181641Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74181641Skmacy * SUCH DAMAGE. 75181641Skmacy */ 76181641Skmacy 77181641Skmacy#include <sys/cdefs.h> 78181641Skmacy__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 246855 2013-02-15 22:43:08Z jkim $"); 79181641Skmacy 80181641Skmacy/* 81181641Skmacy * Manages physical address maps. 82181641Skmacy * 83181641Skmacy * Since the information managed by this module is 84181641Skmacy * also stored by the logical address mapping module, 85181641Skmacy * this module may throw away valid virtual-to-physical 86181641Skmacy * mappings at almost any time. However, invalidations 87181641Skmacy * of virtual-to-physical mappings must be done as 88181641Skmacy * requested. 89181641Skmacy * 90181641Skmacy * In order to cope with hardware architectures which 91181641Skmacy * make virtual-to-physical map invalidates expensive, 92181641Skmacy * this module may delay invalidate or reduced protection 93181641Skmacy * operations until such time as they are actually 94181641Skmacy * necessary. This module is given full information as 95181641Skmacy * to which processors are currently using which maps, 96181641Skmacy * and to when physical maps must be made correct. 97181641Skmacy */ 98181641Skmacy 99181641Skmacy#include "opt_cpu.h" 100181641Skmacy#include "opt_pmap.h" 101181641Skmacy#include "opt_smp.h" 102181641Skmacy#include "opt_xbox.h" 103181641Skmacy 104181641Skmacy#include <sys/param.h> 105181641Skmacy#include <sys/systm.h> 106181641Skmacy#include <sys/kernel.h> 107181641Skmacy#include <sys/ktr.h> 108181641Skmacy#include <sys/lock.h> 109181641Skmacy#include <sys/malloc.h> 110181641Skmacy#include <sys/mman.h> 111181641Skmacy#include <sys/msgbuf.h> 112181641Skmacy#include <sys/mutex.h> 113181641Skmacy#include <sys/proc.h> 114241498Salc#include <sys/rwlock.h> 115195949Skib#include <sys/sf_buf.h> 116181641Skmacy#include <sys/sx.h> 117181641Skmacy#include <sys/vmmeter.h> 118181641Skmacy#include <sys/sched.h> 119181641Skmacy#include <sys/sysctl.h> 120181641Skmacy#ifdef SMP 121181641Skmacy#include <sys/smp.h> 122228923Salc#else 123228923Salc#include <sys/cpuset.h> 124181641Skmacy#endif 125181641Skmacy 126181641Skmacy#include <vm/vm.h> 127181641Skmacy#include <vm/vm_param.h> 128181641Skmacy#include <vm/vm_kern.h> 129181641Skmacy#include <vm/vm_page.h> 130181641Skmacy#include <vm/vm_map.h> 131181641Skmacy#include <vm/vm_object.h> 132181641Skmacy#include <vm/vm_extern.h> 133181641Skmacy#include <vm/vm_pageout.h> 134181641Skmacy#include <vm/vm_pager.h> 135181641Skmacy#include <vm/uma.h> 136181641Skmacy 137181641Skmacy#include <machine/cpu.h> 138181641Skmacy#include <machine/cputypes.h> 139181641Skmacy#include <machine/md_var.h> 140181641Skmacy#include <machine/pcb.h> 141181641Skmacy#include <machine/specialreg.h> 142181641Skmacy#ifdef SMP 143181641Skmacy#include <machine/smp.h> 144181641Skmacy#endif 145181641Skmacy 146181641Skmacy#ifdef XBOX 147181641Skmacy#include <machine/xbox.h> 148181641Skmacy#endif 149181641Skmacy 150181641Skmacy#include <xen/interface/xen.h> 151186557Skmacy#include <xen/hypervisor.h> 152181641Skmacy#include <machine/xen/hypercall.h> 153181641Skmacy#include <machine/xen/xenvar.h> 154181641Skmacy#include <machine/xen/xenfunc.h> 155181641Skmacy 156181641Skmacy#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 157181641Skmacy#define CPU_ENABLE_SSE 158181641Skmacy#endif 159181641Skmacy 160181641Skmacy#ifndef PMAP_SHPGPERPROC 161181641Skmacy#define PMAP_SHPGPERPROC 200 162181641Skmacy#endif 163181641Skmacy 164208651Salc#define DIAGNOSTIC 165181641Skmacy 166208651Salc#if !defined(DIAGNOSTIC) 167204041Sed#ifdef __GNUC_GNU_INLINE__ 168208651Salc#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 169204041Sed#else 170202628Sed#define PMAP_INLINE extern inline 171204041Sed#endif 172181641Skmacy#else 173181641Skmacy#define PMAP_INLINE 174181641Skmacy#endif 175181641Skmacy 176181641Skmacy#ifdef PV_STATS 177181641Skmacy#define PV_STAT(x) do { x ; } while (0) 178181641Skmacy#else 179181641Skmacy#define PV_STAT(x) do { } while (0) 180181641Skmacy#endif 181181641Skmacy 182181641Skmacy/* 183181641Skmacy * Get PDEs and PTEs for user/kernel address space 184181641Skmacy */ 185181641Skmacy#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 186181641Skmacy#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 187181641Skmacy 188181641Skmacy#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 189181641Skmacy#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 190181641Skmacy#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 191181641Skmacy#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 192181641Skmacy#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 193181641Skmacy 194181641Skmacy#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 195181641Skmacy 196216960Scperciva#define HAMFISTED_LOCKING 197216960Scperciva#ifdef HAMFISTED_LOCKING 198216960Scpercivastatic struct mtx createdelete_lock; 199216960Scperciva#endif 200216960Scperciva 201181641Skmacystruct pmap kernel_pmap_store; 202181641SkmacyLIST_HEAD(pmaplist, pmap); 203181641Skmacystatic struct pmaplist allpmaps; 204181641Skmacystatic struct mtx allpmaps_lock; 205181641Skmacy 206181641Skmacyvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 207181641Skmacyvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 208181641Skmacyint pgeflag = 0; /* PG_G or-in */ 209181641Skmacyint pseflag = 0; /* PG_PS or-in */ 210181641Skmacy 211182902Skmacyint nkpt; 212181641Skmacyvm_offset_t kernel_vm_end; 213181641Skmacyextern u_int32_t KERNend; 214181641Skmacy 215181641Skmacy#ifdef PAE 216181641Skmacypt_entry_t pg_nx; 217181641Skmacy#endif 218181641Skmacy 219228923Salcstatic SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 220228923Salc 221196726Sadrianstatic int pat_works; /* Is page attribute table sane? */ 222196726Sadrian 223181641Skmacy/* 224241498Salc * This lock is defined as static in other pmap implementations. It cannot, 225241498Salc * however, be defined as static here, because it is (ab)used to serialize 226241498Salc * queued page table changes in other sources files. 227241498Salc */ 228241498Salcstruct rwlock pvh_global_lock; 229241498Salc 230241498Salc/* 231181641Skmacy * Data for the pv entry allocation mechanism 232181641Skmacy */ 233236240Salcstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 234181641Skmacystatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 235181641Skmacystatic int shpgperproc = PMAP_SHPGPERPROC; 236181641Skmacy 237181641Skmacystruct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 238181641Skmacyint pv_maxchunks; /* How many chunks we have KVA for */ 239181641Skmacyvm_offset_t pv_vafree; /* freelist stored in the PTE */ 240181641Skmacy 241181641Skmacy/* 242181641Skmacy * All those kernel PT submaps that BSD is so fond of 243181641Skmacy */ 244181641Skmacystruct sysmaps { 245181641Skmacy struct mtx lock; 246181641Skmacy pt_entry_t *CMAP1; 247181641Skmacy pt_entry_t *CMAP2; 248181641Skmacy caddr_t CADDR1; 249181641Skmacy caddr_t CADDR2; 250181641Skmacy}; 251181641Skmacystatic struct sysmaps sysmaps_pcpu[MAXCPU]; 252181641Skmacystatic pt_entry_t *CMAP3; 253204160Skmacycaddr_t ptvmmap = 0; 254181641Skmacystatic caddr_t CADDR3; 255181641Skmacystruct msgbuf *msgbufp = 0; 256181641Skmacy 257181641Skmacy/* 258181641Skmacy * Crashdump maps. 259181641Skmacy */ 260181641Skmacystatic caddr_t crashdumpmap; 261181641Skmacy 262181641Skmacystatic pt_entry_t *PMAP1 = 0, *PMAP2; 263181641Skmacystatic pt_entry_t *PADDR1 = 0, *PADDR2; 264181641Skmacy#ifdef SMP 265181641Skmacystatic int PMAP1cpu; 266181641Skmacystatic int PMAP1changedcpu; 267181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 268181641Skmacy &PMAP1changedcpu, 0, 269181641Skmacy "Number of times pmap_pte_quick changed CPU with same PMAP1"); 270181641Skmacy#endif 271181641Skmacystatic int PMAP1changed; 272181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 273181641Skmacy &PMAP1changed, 0, 274181641Skmacy "Number of times pmap_pte_quick changed PMAP1"); 275181641Skmacystatic int PMAP1unchanged; 276181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 277181641Skmacy &PMAP1unchanged, 0, 278181641Skmacy "Number of times pmap_pte_quick didn't change PMAP1"); 279181641Skmacystatic struct mtx PMAP2mutex; 280181641Skmacy 281236378Salcstatic void free_pv_chunk(struct pv_chunk *pc); 282181641Skmacystatic void free_pv_entry(pmap_t pmap, pv_entry_t pv); 283236291Salcstatic pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 284208651Salcstatic void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 285208651Salcstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 286208651Salc vm_offset_t va); 287181641Skmacy 288181641Skmacystatic vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 289181641Skmacy vm_page_t m, vm_prot_t prot, vm_page_t mpte); 290228923Salcstatic void pmap_flush_page(vm_page_t m); 291228923Salcstatic void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 292181641Skmacystatic int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 293181641Skmacy vm_page_t *free); 294181641Skmacystatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 295181641Skmacy vm_page_t *free); 296181641Skmacystatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 297181641Skmacy vm_offset_t va); 298181641Skmacystatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 299181641Skmacy vm_page_t m); 300181641Skmacy 301181641Skmacystatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 302181641Skmacy 303228923Salcstatic vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags); 304240126Salcstatic void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free); 305181641Skmacystatic pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 306181641Skmacystatic void pmap_pte_release(pt_entry_t *pte); 307181641Skmacystatic int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 308181641Skmacystatic boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 309181641Skmacy 310196725Sadrianstatic __inline void pagezero(void *page); 311181747Skmacy 312181641SkmacyCTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 313181641SkmacyCTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 314181641Skmacy 315181641Skmacy/* 316181641Skmacy * If you get an error here, then you set KVA_PAGES wrong! See the 317181641Skmacy * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 318181641Skmacy * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 319181641Skmacy */ 320181641SkmacyCTASSERT(KERNBASE % (1 << 24) == 0); 321181641Skmacy 322181641Skmacyvoid 323181641Skmacypd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 324181641Skmacy{ 325181641Skmacy vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 326181641Skmacy 327181641Skmacy switch (type) { 328181641Skmacy case SH_PD_SET_VA: 329181641Skmacy#if 0 330181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 331181641Skmacy xpmap_ptom(val & ~(PG_RW))); 332181641Skmacy#endif 333181641Skmacy xen_queue_pt_update(pdir_ma, 334181641Skmacy xpmap_ptom(val)); 335181641Skmacy break; 336181641Skmacy case SH_PD_SET_VA_MA: 337181641Skmacy#if 0 338181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 339181641Skmacy val & ~(PG_RW)); 340181641Skmacy#endif 341181641Skmacy xen_queue_pt_update(pdir_ma, val); 342181641Skmacy break; 343181641Skmacy case SH_PD_SET_VA_CLEAR: 344181641Skmacy#if 0 345181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 0); 346181641Skmacy#endif 347181641Skmacy xen_queue_pt_update(pdir_ma, 0); 348181641Skmacy break; 349181641Skmacy } 350181641Skmacy} 351181641Skmacy 352181641Skmacy/* 353181641Skmacy * Bootstrap the system enough to run with virtual memory. 354181641Skmacy * 355181641Skmacy * On the i386 this is called after mapping has already been enabled 356181641Skmacy * and just syncs the pmap module with what has already been done. 357181641Skmacy * [We can't call it easily with mapping off since the kernel is not 358181641Skmacy * mapped with PA == VA, hence we would have to relocate every address 359181641Skmacy * from the linked base (virtual) address "KERNBASE" to the actual 360181641Skmacy * (physical) address starting relative to 0] 361181641Skmacy */ 362181641Skmacyvoid 363181641Skmacypmap_bootstrap(vm_paddr_t firstaddr) 364181641Skmacy{ 365181641Skmacy vm_offset_t va; 366181641Skmacy pt_entry_t *pte, *unused; 367181641Skmacy struct sysmaps *sysmaps; 368181641Skmacy int i; 369181641Skmacy 370181641Skmacy /* 371228923Salc * Initialize the first available kernel virtual address. However, 372228923Salc * using "firstaddr" may waste a few pages of the kernel virtual 373228923Salc * address space, because locore may not have mapped every physical 374228923Salc * page that it allocated. Preferably, locore would provide a first 375228923Salc * unused virtual address in addition to "firstaddr". 376181641Skmacy */ 377181641Skmacy virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 378181641Skmacy 379181641Skmacy virtual_end = VM_MAX_KERNEL_ADDRESS; 380181641Skmacy 381181641Skmacy /* 382181641Skmacy * Initialize the kernel pmap (which is statically allocated). 383181641Skmacy */ 384181641Skmacy PMAP_LOCK_INIT(kernel_pmap); 385181641Skmacy kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 386181641Skmacy#ifdef PAE 387181641Skmacy kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 388181641Skmacy#endif 389222813Sattilio CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 390181641Skmacy TAILQ_INIT(&kernel_pmap->pm_pvchunk); 391241498Salc 392241498Salc /* 393241498Salc * Initialize the global pv list lock. 394241498Salc */ 395241498Salc rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); 396241498Salc 397181641Skmacy LIST_INIT(&allpmaps); 398181641Skmacy mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 399181641Skmacy mtx_lock_spin(&allpmaps_lock); 400181641Skmacy LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 401181641Skmacy mtx_unlock_spin(&allpmaps_lock); 402183342Skmacy if (nkpt == 0) 403183342Skmacy nkpt = NKPT; 404181641Skmacy 405181641Skmacy /* 406181641Skmacy * Reserve some special page table entries/VA space for temporary 407181641Skmacy * mapping of pages. 408181641Skmacy */ 409181641Skmacy#define SYSMAP(c, p, v, n) \ 410181641Skmacy v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 411181641Skmacy 412181641Skmacy va = virtual_avail; 413181641Skmacy pte = vtopte(va); 414181641Skmacy 415181641Skmacy /* 416181641Skmacy * CMAP1/CMAP2 are used for zeroing and copying pages. 417181641Skmacy * CMAP3 is used for the idle process page zeroing. 418181641Skmacy */ 419181641Skmacy for (i = 0; i < MAXCPU; i++) { 420181641Skmacy sysmaps = &sysmaps_pcpu[i]; 421181641Skmacy mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 422181641Skmacy SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 423181641Skmacy SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 424204160Skmacy PT_SET_MA(sysmaps->CADDR1, 0); 425204160Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 426181641Skmacy } 427181641Skmacy SYSMAP(caddr_t, CMAP3, CADDR3, 1) 428181641Skmacy PT_SET_MA(CADDR3, 0); 429181641Skmacy 430181641Skmacy /* 431181641Skmacy * Crashdump maps. 432181641Skmacy */ 433181641Skmacy SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 434181641Skmacy 435181641Skmacy /* 436181641Skmacy * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 437181641Skmacy */ 438181641Skmacy SYSMAP(caddr_t, unused, ptvmmap, 1) 439181641Skmacy 440181641Skmacy /* 441181641Skmacy * msgbufp is used to map the system message buffer. 442181641Skmacy */ 443217688Spluknet SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 444181641Skmacy 445181641Skmacy /* 446241353Salc * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), 447241353Salc * respectively. 448181641Skmacy */ 449228923Salc SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) 450228923Salc SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) 451181641Skmacy 452181641Skmacy mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 453181641Skmacy 454181641Skmacy virtual_avail = va; 455181641Skmacy 456181641Skmacy /* 457181641Skmacy * Leave in place an identity mapping (virt == phys) for the low 1 MB 458181641Skmacy * physical memory region that is used by the ACPI wakeup code. This 459181641Skmacy * mapping must not have PG_G set. 460181641Skmacy */ 461181641Skmacy#ifndef XEN 462181641Skmacy /* 463181641Skmacy * leave here deliberately to show that this is not supported 464181641Skmacy */ 465181641Skmacy#ifdef XBOX 466181641Skmacy /* FIXME: This is gross, but needed for the XBOX. Since we are in such 467181641Skmacy * an early stadium, we cannot yet neatly map video memory ... :-( 468181641Skmacy * Better fixes are very welcome! */ 469181641Skmacy if (!arch_i386_is_xbox) 470181641Skmacy#endif 471181641Skmacy for (i = 1; i < NKPT; i++) 472181641Skmacy PTD[i] = 0; 473181641Skmacy 474181641Skmacy /* Initialize the PAT MSR if present. */ 475181641Skmacy pmap_init_pat(); 476181641Skmacy 477181641Skmacy /* Turn on PG_G on kernel page(s) */ 478181641Skmacy pmap_set_pg(); 479181641Skmacy#endif 480216960Scperciva 481216960Scperciva#ifdef HAMFISTED_LOCKING 482216960Scperciva mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF); 483216960Scperciva#endif 484181641Skmacy} 485181641Skmacy 486181641Skmacy/* 487181641Skmacy * Setup the PAT MSR. 488181641Skmacy */ 489181641Skmacyvoid 490181641Skmacypmap_init_pat(void) 491181641Skmacy{ 492181641Skmacy uint64_t pat_msr; 493181641Skmacy 494181641Skmacy /* Bail if this CPU doesn't implement PAT. */ 495181641Skmacy if (!(cpu_feature & CPUID_PAT)) 496181641Skmacy return; 497181641Skmacy 498196726Sadrian if (cpu_vendor_id != CPU_VENDOR_INTEL || 499197070Sjkim (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 500196726Sadrian /* 501196726Sadrian * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 502196726Sadrian * Program 4 and 5 as WP and WC. 503196726Sadrian * Leave 6 and 7 as UC and UC-. 504196726Sadrian */ 505196726Sadrian pat_msr = rdmsr(MSR_PAT); 506196726Sadrian pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 507196726Sadrian pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 508196726Sadrian PAT_VALUE(5, PAT_WRITE_COMBINING); 509196726Sadrian pat_works = 1; 510196726Sadrian } else { 511196726Sadrian /* 512196726Sadrian * Due to some Intel errata, we can only safely use the lower 4 513196726Sadrian * PAT entries. Thus, just replace PAT Index 2 with WC instead 514196726Sadrian * of UC-. 515196726Sadrian * 516196726Sadrian * Intel Pentium III Processor Specification Update 517196726Sadrian * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 518196726Sadrian * or Mode C Paging) 519196726Sadrian * 520196726Sadrian * Intel Pentium IV Processor Specification Update 521196726Sadrian * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 522196726Sadrian */ 523196726Sadrian pat_msr = rdmsr(MSR_PAT); 524196726Sadrian pat_msr &= ~PAT_MASK(2); 525196726Sadrian pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 526196726Sadrian pat_works = 0; 527196726Sadrian } 528181641Skmacy wrmsr(MSR_PAT, pat_msr); 529181641Skmacy} 530181641Skmacy 531181641Skmacy/* 532181641Skmacy * Initialize a vm_page's machine-dependent fields. 533181641Skmacy */ 534181641Skmacyvoid 535181641Skmacypmap_page_init(vm_page_t m) 536181641Skmacy{ 537181641Skmacy 538181641Skmacy TAILQ_INIT(&m->md.pv_list); 539195649Salc m->md.pat_mode = PAT_WRITE_BACK; 540181641Skmacy} 541181641Skmacy 542181641Skmacy/* 543181641Skmacy * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 544181641Skmacy * Requirements: 545181641Skmacy * - Must deal with pages in order to ensure that none of the PG_* bits 546181641Skmacy * are ever set, PG_V in particular. 547181641Skmacy * - Assumes we can write to ptes without pte_store() atomic ops, even 548181641Skmacy * on PAE systems. This should be ok. 549181641Skmacy * - Assumes nothing will ever test these addresses for 0 to indicate 550181641Skmacy * no mapping instead of correctly checking PG_V. 551181641Skmacy * - Assumes a vm_offset_t will fit in a pte (true for i386). 552181641Skmacy * Because PG_V is never set, there can be no mappings to invalidate. 553181641Skmacy */ 554181641Skmacystatic int ptelist_count = 0; 555181641Skmacystatic vm_offset_t 556181641Skmacypmap_ptelist_alloc(vm_offset_t *head) 557181641Skmacy{ 558181641Skmacy vm_offset_t va; 559181641Skmacy vm_offset_t *phead = (vm_offset_t *)*head; 560181641Skmacy 561181641Skmacy if (ptelist_count == 0) { 562181641Skmacy printf("out of memory!!!!!!\n"); 563181641Skmacy return (0); /* Out of memory */ 564181641Skmacy } 565181641Skmacy ptelist_count--; 566181641Skmacy va = phead[ptelist_count]; 567181641Skmacy return (va); 568181641Skmacy} 569181641Skmacy 570181641Skmacystatic void 571181641Skmacypmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 572181641Skmacy{ 573181641Skmacy vm_offset_t *phead = (vm_offset_t *)*head; 574181641Skmacy 575181641Skmacy phead[ptelist_count++] = va; 576181641Skmacy} 577181641Skmacy 578181641Skmacystatic void 579181641Skmacypmap_ptelist_init(vm_offset_t *head, void *base, int npages) 580181641Skmacy{ 581181641Skmacy int i, nstackpages; 582181641Skmacy vm_offset_t va; 583181641Skmacy vm_page_t m; 584181641Skmacy 585181641Skmacy nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 586181641Skmacy for (i = 0; i < nstackpages; i++) { 587181641Skmacy va = (vm_offset_t)base + i * PAGE_SIZE; 588181641Skmacy m = vm_page_alloc(NULL, i, 589181641Skmacy VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 590181641Skmacy VM_ALLOC_ZERO); 591181641Skmacy pmap_qenter(va, &m, 1); 592181641Skmacy } 593181641Skmacy 594181641Skmacy *head = (vm_offset_t)base; 595181641Skmacy for (i = npages - 1; i >= nstackpages; i--) { 596181641Skmacy va = (vm_offset_t)base + i * PAGE_SIZE; 597181641Skmacy pmap_ptelist_free(head, va); 598181641Skmacy } 599181641Skmacy} 600181641Skmacy 601181641Skmacy 602181641Skmacy/* 603181641Skmacy * Initialize the pmap module. 604181641Skmacy * Called by vm_init, to initialize any structures that the pmap 605181641Skmacy * system needs to map virtual memory. 606181641Skmacy */ 607181641Skmacyvoid 608181641Skmacypmap_init(void) 609181641Skmacy{ 610181641Skmacy 611181641Skmacy /* 612181641Skmacy * Initialize the address space (zone) for the pv entries. Set a 613181641Skmacy * high water mark so that the system can recover from excessive 614181641Skmacy * numbers of pv entries. 615181641Skmacy */ 616181641Skmacy TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 617181641Skmacy pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 618181641Skmacy TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 619181641Skmacy pv_entry_max = roundup(pv_entry_max, _NPCPV); 620181641Skmacy pv_entry_high_water = 9 * (pv_entry_max / 10); 621181641Skmacy 622181641Skmacy pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 623181641Skmacy pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 624181641Skmacy PAGE_SIZE * pv_maxchunks); 625181641Skmacy if (pv_chunkbase == NULL) 626181641Skmacy panic("pmap_init: not enough kvm for pv chunks"); 627181641Skmacy pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 628181641Skmacy} 629181641Skmacy 630181641Skmacy 631228923SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 632228923Salc "Max number of PV entries"); 633228923SalcSYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 634228923Salc "Page share factor per proc"); 635228923Salc 636228923Salcstatic SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 637228923Salc "2/4MB page mapping counters"); 638228923Salc 639228923Salcstatic u_long pmap_pde_mappings; 640228923SalcSYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 641228923Salc &pmap_pde_mappings, 0, "2/4MB page mappings"); 642228923Salc 643181641Skmacy/*************************************************** 644181641Skmacy * Low level helper routines..... 645181641Skmacy ***************************************************/ 646181641Skmacy 647181641Skmacy/* 648181641Skmacy * Determine the appropriate bits to set in a PTE or PDE for a specified 649181641Skmacy * caching mode. 650181641Skmacy */ 651195949Skibint 652181641Skmacypmap_cache_bits(int mode, boolean_t is_pde) 653181641Skmacy{ 654181641Skmacy int pat_flag, pat_index, cache_bits; 655181641Skmacy 656181641Skmacy /* The PAT bit is different for PTE's and PDE's. */ 657181641Skmacy pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 658181641Skmacy 659181641Skmacy /* If we don't support PAT, map extended modes to older ones. */ 660181641Skmacy if (!(cpu_feature & CPUID_PAT)) { 661181641Skmacy switch (mode) { 662181641Skmacy case PAT_UNCACHEABLE: 663181641Skmacy case PAT_WRITE_THROUGH: 664181641Skmacy case PAT_WRITE_BACK: 665181641Skmacy break; 666181641Skmacy case PAT_UNCACHED: 667181641Skmacy case PAT_WRITE_COMBINING: 668181641Skmacy case PAT_WRITE_PROTECTED: 669181641Skmacy mode = PAT_UNCACHEABLE; 670181641Skmacy break; 671181641Skmacy } 672181641Skmacy } 673181641Skmacy 674181641Skmacy /* Map the caching mode to a PAT index. */ 675196726Sadrian if (pat_works) { 676196726Sadrian switch (mode) { 677196726Sadrian case PAT_UNCACHEABLE: 678196726Sadrian pat_index = 3; 679196726Sadrian break; 680196726Sadrian case PAT_WRITE_THROUGH: 681196726Sadrian pat_index = 1; 682196726Sadrian break; 683196726Sadrian case PAT_WRITE_BACK: 684196726Sadrian pat_index = 0; 685196726Sadrian break; 686196726Sadrian case PAT_UNCACHED: 687196726Sadrian pat_index = 2; 688196726Sadrian break; 689196726Sadrian case PAT_WRITE_COMBINING: 690196726Sadrian pat_index = 5; 691196726Sadrian break; 692196726Sadrian case PAT_WRITE_PROTECTED: 693196726Sadrian pat_index = 4; 694196726Sadrian break; 695196726Sadrian default: 696196726Sadrian panic("Unknown caching mode %d\n", mode); 697196726Sadrian } 698196726Sadrian } else { 699196726Sadrian switch (mode) { 700196726Sadrian case PAT_UNCACHED: 701196726Sadrian case PAT_UNCACHEABLE: 702196726Sadrian case PAT_WRITE_PROTECTED: 703196726Sadrian pat_index = 3; 704196726Sadrian break; 705196726Sadrian case PAT_WRITE_THROUGH: 706196726Sadrian pat_index = 1; 707196726Sadrian break; 708196726Sadrian case PAT_WRITE_BACK: 709196726Sadrian pat_index = 0; 710196726Sadrian break; 711196726Sadrian case PAT_WRITE_COMBINING: 712196726Sadrian pat_index = 2; 713196726Sadrian break; 714196726Sadrian default: 715196726Sadrian panic("Unknown caching mode %d\n", mode); 716196726Sadrian } 717181641Skmacy } 718181641Skmacy 719181641Skmacy /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 720181641Skmacy cache_bits = 0; 721181641Skmacy if (pat_index & 0x4) 722181641Skmacy cache_bits |= pat_flag; 723181641Skmacy if (pat_index & 0x2) 724181641Skmacy cache_bits |= PG_NC_PCD; 725181641Skmacy if (pat_index & 0x1) 726181641Skmacy cache_bits |= PG_NC_PWT; 727181641Skmacy return (cache_bits); 728181641Skmacy} 729181641Skmacy#ifdef SMP 730181641Skmacy/* 731181641Skmacy * For SMP, these functions have to use the IPI mechanism for coherence. 732181641Skmacy * 733181641Skmacy * N.B.: Before calling any of the following TLB invalidation functions, 734181641Skmacy * the calling processor must ensure that all stores updating a non- 735181641Skmacy * kernel page table are globally performed. Otherwise, another 736181641Skmacy * processor could cache an old, pre-update entry without being 737181641Skmacy * invalidated. This can happen one of two ways: (1) The pmap becomes 738181641Skmacy * active on another processor after its pm_active field is checked by 739181641Skmacy * one of the following functions but before a store updating the page 740181641Skmacy * table is globally performed. (2) The pmap becomes active on another 741181641Skmacy * processor before its pm_active field is checked but due to 742181641Skmacy * speculative loads one of the following functions stills reads the 743181641Skmacy * pmap as inactive on the other processor. 744181641Skmacy * 745181641Skmacy * The kernel page table is exempt because its pm_active field is 746181641Skmacy * immutable. The kernel page table is always active on every 747181641Skmacy * processor. 748181641Skmacy */ 749181641Skmacyvoid 750181641Skmacypmap_invalidate_page(pmap_t pmap, vm_offset_t va) 751181641Skmacy{ 752223758Sattilio cpuset_t other_cpus; 753223758Sattilio u_int cpuid; 754181641Skmacy 755181641Skmacy CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 756181641Skmacy pmap, va); 757181641Skmacy 758181641Skmacy sched_pin(); 759222813Sattilio if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 760181641Skmacy invlpg(va); 761181641Skmacy smp_invlpg(va); 762181641Skmacy } else { 763223758Sattilio cpuid = PCPU_GET(cpuid); 764223758Sattilio other_cpus = all_cpus; 765223758Sattilio CPU_CLR(cpuid, &other_cpus); 766223758Sattilio if (CPU_ISSET(cpuid, &pmap->pm_active)) 767181641Skmacy invlpg(va); 768222813Sattilio CPU_AND(&other_cpus, &pmap->pm_active); 769222813Sattilio if (!CPU_EMPTY(&other_cpus)) 770222813Sattilio smp_masked_invlpg(other_cpus, va); 771181641Skmacy } 772181641Skmacy sched_unpin(); 773181641Skmacy PT_UPDATES_FLUSH(); 774181641Skmacy} 775181641Skmacy 776181641Skmacyvoid 777181641Skmacypmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 778181641Skmacy{ 779223758Sattilio cpuset_t other_cpus; 780181641Skmacy vm_offset_t addr; 781223758Sattilio u_int cpuid; 782181641Skmacy 783181641Skmacy CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 784181641Skmacy pmap, sva, eva); 785181641Skmacy 786181641Skmacy sched_pin(); 787222813Sattilio if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 788181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 789181641Skmacy invlpg(addr); 790181641Skmacy smp_invlpg_range(sva, eva); 791181641Skmacy } else { 792223758Sattilio cpuid = PCPU_GET(cpuid); 793223758Sattilio other_cpus = all_cpus; 794223758Sattilio CPU_CLR(cpuid, &other_cpus); 795223758Sattilio if (CPU_ISSET(cpuid, &pmap->pm_active)) 796181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 797181641Skmacy invlpg(addr); 798222813Sattilio CPU_AND(&other_cpus, &pmap->pm_active); 799222813Sattilio if (!CPU_EMPTY(&other_cpus)) 800222813Sattilio smp_masked_invlpg_range(other_cpus, sva, eva); 801181641Skmacy } 802181641Skmacy sched_unpin(); 803181641Skmacy PT_UPDATES_FLUSH(); 804181641Skmacy} 805181641Skmacy 806181641Skmacyvoid 807181641Skmacypmap_invalidate_all(pmap_t pmap) 808181641Skmacy{ 809223758Sattilio cpuset_t other_cpus; 810223758Sattilio u_int cpuid; 811181641Skmacy 812181641Skmacy CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 813181641Skmacy 814181641Skmacy sched_pin(); 815222813Sattilio if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 816181641Skmacy invltlb(); 817181641Skmacy smp_invltlb(); 818181641Skmacy } else { 819223758Sattilio cpuid = PCPU_GET(cpuid); 820223758Sattilio other_cpus = all_cpus; 821223758Sattilio CPU_CLR(cpuid, &other_cpus); 822223758Sattilio if (CPU_ISSET(cpuid, &pmap->pm_active)) 823181641Skmacy invltlb(); 824222813Sattilio CPU_AND(&other_cpus, &pmap->pm_active); 825222813Sattilio if (!CPU_EMPTY(&other_cpus)) 826222813Sattilio smp_masked_invltlb(other_cpus); 827181641Skmacy } 828181641Skmacy sched_unpin(); 829181641Skmacy} 830181641Skmacy 831181641Skmacyvoid 832181641Skmacypmap_invalidate_cache(void) 833181641Skmacy{ 834181641Skmacy 835181641Skmacy sched_pin(); 836181641Skmacy wbinvd(); 837181641Skmacy smp_cache_flush(); 838181641Skmacy sched_unpin(); 839181641Skmacy} 840181641Skmacy#else /* !SMP */ 841181641Skmacy/* 842181641Skmacy * Normal, non-SMP, 486+ invalidation functions. 843181641Skmacy * We inline these within pmap.c for speed. 844181641Skmacy */ 845181641SkmacyPMAP_INLINE void 846181641Skmacypmap_invalidate_page(pmap_t pmap, vm_offset_t va) 847181641Skmacy{ 848181641Skmacy CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 849181641Skmacy pmap, va); 850181641Skmacy 851222813Sattilio if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 852181641Skmacy invlpg(va); 853181641Skmacy PT_UPDATES_FLUSH(); 854181641Skmacy} 855181641Skmacy 856181641SkmacyPMAP_INLINE void 857181641Skmacypmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 858181641Skmacy{ 859181641Skmacy vm_offset_t addr; 860181641Skmacy 861181641Skmacy if (eva - sva > PAGE_SIZE) 862181641Skmacy CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 863181641Skmacy pmap, sva, eva); 864181641Skmacy 865222813Sattilio if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 866181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 867181641Skmacy invlpg(addr); 868181641Skmacy PT_UPDATES_FLUSH(); 869181641Skmacy} 870181641Skmacy 871181641SkmacyPMAP_INLINE void 872181641Skmacypmap_invalidate_all(pmap_t pmap) 873181641Skmacy{ 874181641Skmacy 875181641Skmacy CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 876181641Skmacy 877222813Sattilio if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 878181641Skmacy invltlb(); 879181641Skmacy} 880181641Skmacy 881181641SkmacyPMAP_INLINE void 882181641Skmacypmap_invalidate_cache(void) 883181641Skmacy{ 884181641Skmacy 885181641Skmacy wbinvd(); 886181641Skmacy} 887181641Skmacy#endif /* !SMP */ 888181641Skmacy 889228923Salc#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) 890228923Salc 891195949Skibvoid 892195949Skibpmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 893195949Skib{ 894195949Skib 895195949Skib KASSERT((sva & PAGE_MASK) == 0, 896195949Skib ("pmap_invalidate_cache_range: sva not page-aligned")); 897195949Skib KASSERT((eva & PAGE_MASK) == 0, 898195949Skib ("pmap_invalidate_cache_range: eva not page-aligned")); 899195949Skib 900195949Skib if (cpu_feature & CPUID_SS) 901195949Skib ; /* If "Self Snoop" is supported, do nothing. */ 902228923Salc else if ((cpu_feature & CPUID_CLFSH) != 0 && 903228923Salc eva - sva < PMAP_CLFLUSH_THRESHOLD) { 904195949Skib 905195949Skib /* 906195949Skib * Otherwise, do per-cache line flush. Use the mfence 907195949Skib * instruction to insure that previous stores are 908195949Skib * included in the write-back. The processor 909195949Skib * propagates flush to other processors in the cache 910195949Skib * coherence domain. 911195949Skib */ 912195949Skib mfence(); 913197046Skib for (; sva < eva; sva += cpu_clflush_line_size) 914197046Skib clflush(sva); 915195949Skib mfence(); 916195949Skib } else { 917195949Skib 918195949Skib /* 919195949Skib * No targeted cache flush methods are supported by CPU, 920228923Salc * or the supplied range is bigger than 2MB. 921228923Salc * Globally invalidate cache. 922195949Skib */ 923195949Skib pmap_invalidate_cache(); 924195949Skib } 925195949Skib} 926195949Skib 927228923Salcvoid 928228923Salcpmap_invalidate_cache_pages(vm_page_t *pages, int count) 929228923Salc{ 930228923Salc int i; 931228923Salc 932228923Salc if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || 933228923Salc (cpu_feature & CPUID_CLFSH) == 0) { 934228923Salc pmap_invalidate_cache(); 935228923Salc } else { 936228923Salc for (i = 0; i < count; i++) 937228923Salc pmap_flush_page(pages[i]); 938228923Salc } 939228923Salc} 940228923Salc 941181641Skmacy/* 942181641Skmacy * Are we current address space or kernel? N.B. We return FALSE when 943181641Skmacy * a pmap's page table is in use because a kernel thread is borrowing 944181641Skmacy * it. The borrowed page table can change spontaneously, making any 945181641Skmacy * dependence on its continued use subject to a race condition. 946181641Skmacy */ 947181641Skmacystatic __inline int 948181641Skmacypmap_is_current(pmap_t pmap) 949181641Skmacy{ 950181641Skmacy 951181641Skmacy return (pmap == kernel_pmap || 952181641Skmacy (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 953228923Salc (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 954181641Skmacy} 955181641Skmacy 956181641Skmacy/* 957181641Skmacy * If the given pmap is not the current or kernel pmap, the returned pte must 958181641Skmacy * be released by passing it to pmap_pte_release(). 959181641Skmacy */ 960181641Skmacypt_entry_t * 961181641Skmacypmap_pte(pmap_t pmap, vm_offset_t va) 962181641Skmacy{ 963181641Skmacy pd_entry_t newpf; 964181641Skmacy pd_entry_t *pde; 965181641Skmacy 966181641Skmacy pde = pmap_pde(pmap, va); 967181641Skmacy if (*pde & PG_PS) 968181641Skmacy return (pde); 969181641Skmacy if (*pde != 0) { 970181641Skmacy /* are we current address space or kernel? */ 971181641Skmacy if (pmap_is_current(pmap)) 972181641Skmacy return (vtopte(va)); 973181641Skmacy mtx_lock(&PMAP2mutex); 974181641Skmacy newpf = *pde & PG_FRAME; 975181641Skmacy if ((*PMAP2 & PG_FRAME) != newpf) { 976181641Skmacy PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 977181641Skmacy CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 978181641Skmacy pmap, va, (*PMAP2 & 0xffffffff)); 979181641Skmacy } 980181641Skmacy return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 981181641Skmacy } 982228923Salc return (NULL); 983181641Skmacy} 984181641Skmacy 985181641Skmacy/* 986181641Skmacy * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 987181641Skmacy * being NULL. 988181641Skmacy */ 989181641Skmacystatic __inline void 990181641Skmacypmap_pte_release(pt_entry_t *pte) 991181641Skmacy{ 992181641Skmacy 993181641Skmacy if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 994181641Skmacy CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 995181641Skmacy *PMAP2); 996241498Salc rw_wlock(&pvh_global_lock); 997181641Skmacy PT_SET_VA(PMAP2, 0, TRUE); 998241498Salc rw_wunlock(&pvh_global_lock); 999181641Skmacy mtx_unlock(&PMAP2mutex); 1000181641Skmacy } 1001181641Skmacy} 1002181641Skmacy 1003181641Skmacystatic __inline void 1004181641Skmacyinvlcaddr(void *caddr) 1005181641Skmacy{ 1006181641Skmacy 1007181641Skmacy invlpg((u_int)caddr); 1008181641Skmacy PT_UPDATES_FLUSH(); 1009181641Skmacy} 1010181641Skmacy 1011181641Skmacy/* 1012181641Skmacy * Super fast pmap_pte routine best used when scanning 1013181641Skmacy * the pv lists. This eliminates many coarse-grained 1014181641Skmacy * invltlb calls. Note that many of the pv list 1015181641Skmacy * scans are across different pmaps. It is very wasteful 1016181641Skmacy * to do an entire invltlb for checking a single mapping. 1017181641Skmacy * 1018241498Salc * If the given pmap is not the current pmap, pvh_global_lock 1019181641Skmacy * must be held and curthread pinned to a CPU. 1020181641Skmacy */ 1021181641Skmacystatic pt_entry_t * 1022181641Skmacypmap_pte_quick(pmap_t pmap, vm_offset_t va) 1023181641Skmacy{ 1024181641Skmacy pd_entry_t newpf; 1025181641Skmacy pd_entry_t *pde; 1026181641Skmacy 1027181641Skmacy pde = pmap_pde(pmap, va); 1028181641Skmacy if (*pde & PG_PS) 1029181641Skmacy return (pde); 1030181641Skmacy if (*pde != 0) { 1031181641Skmacy /* are we current address space or kernel? */ 1032181641Skmacy if (pmap_is_current(pmap)) 1033181641Skmacy return (vtopte(va)); 1034241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 1035181641Skmacy KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1036181641Skmacy newpf = *pde & PG_FRAME; 1037181641Skmacy if ((*PMAP1 & PG_FRAME) != newpf) { 1038181641Skmacy PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1039181641Skmacy CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1040181641Skmacy pmap, va, (u_long)*PMAP1); 1041181641Skmacy 1042181641Skmacy#ifdef SMP 1043181641Skmacy PMAP1cpu = PCPU_GET(cpuid); 1044181641Skmacy#endif 1045181641Skmacy PMAP1changed++; 1046181641Skmacy } else 1047181641Skmacy#ifdef SMP 1048181641Skmacy if (PMAP1cpu != PCPU_GET(cpuid)) { 1049181641Skmacy PMAP1cpu = PCPU_GET(cpuid); 1050181641Skmacy invlcaddr(PADDR1); 1051181641Skmacy PMAP1changedcpu++; 1052181641Skmacy } else 1053181641Skmacy#endif 1054181641Skmacy PMAP1unchanged++; 1055181641Skmacy return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1056181641Skmacy } 1057181641Skmacy return (0); 1058181641Skmacy} 1059181641Skmacy 1060181641Skmacy/* 1061181641Skmacy * Routine: pmap_extract 1062181641Skmacy * Function: 1063181641Skmacy * Extract the physical page address associated 1064181641Skmacy * with the given map/virtual_address pair. 1065181641Skmacy */ 1066181641Skmacyvm_paddr_t 1067181641Skmacypmap_extract(pmap_t pmap, vm_offset_t va) 1068181641Skmacy{ 1069181641Skmacy vm_paddr_t rtval; 1070181641Skmacy pt_entry_t *pte; 1071181641Skmacy pd_entry_t pde; 1072181641Skmacy pt_entry_t pteval; 1073228923Salc 1074181641Skmacy rtval = 0; 1075181641Skmacy PMAP_LOCK(pmap); 1076181641Skmacy pde = pmap->pm_pdir[va >> PDRSHIFT]; 1077181641Skmacy if (pde != 0) { 1078181641Skmacy if ((pde & PG_PS) != 0) { 1079181641Skmacy rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1080181641Skmacy PMAP_UNLOCK(pmap); 1081181641Skmacy return rtval; 1082181641Skmacy } 1083181641Skmacy pte = pmap_pte(pmap, va); 1084181641Skmacy pteval = *pte ? xpmap_mtop(*pte) : 0; 1085181641Skmacy rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1086181641Skmacy pmap_pte_release(pte); 1087181641Skmacy } 1088181641Skmacy PMAP_UNLOCK(pmap); 1089181641Skmacy return (rtval); 1090181641Skmacy} 1091181641Skmacy 1092181641Skmacy/* 1093181641Skmacy * Routine: pmap_extract_ma 1094181641Skmacy * Function: 1095181641Skmacy * Like pmap_extract, but returns machine address 1096181641Skmacy */ 1097181641Skmacyvm_paddr_t 1098181641Skmacypmap_extract_ma(pmap_t pmap, vm_offset_t va) 1099181641Skmacy{ 1100181641Skmacy vm_paddr_t rtval; 1101181641Skmacy pt_entry_t *pte; 1102181641Skmacy pd_entry_t pde; 1103181641Skmacy 1104181641Skmacy rtval = 0; 1105181641Skmacy PMAP_LOCK(pmap); 1106181641Skmacy pde = pmap->pm_pdir[va >> PDRSHIFT]; 1107181641Skmacy if (pde != 0) { 1108181641Skmacy if ((pde & PG_PS) != 0) { 1109181641Skmacy rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1110181641Skmacy PMAP_UNLOCK(pmap); 1111181641Skmacy return rtval; 1112181641Skmacy } 1113181641Skmacy pte = pmap_pte(pmap, va); 1114181641Skmacy rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1115181641Skmacy pmap_pte_release(pte); 1116181641Skmacy } 1117181641Skmacy PMAP_UNLOCK(pmap); 1118181641Skmacy return (rtval); 1119181641Skmacy} 1120181641Skmacy 1121181641Skmacy/* 1122181641Skmacy * Routine: pmap_extract_and_hold 1123181641Skmacy * Function: 1124181641Skmacy * Atomically extract and hold the physical page 1125181641Skmacy * with the given pmap and virtual address pair 1126181641Skmacy * if that mapping permits the given protection. 1127181641Skmacy */ 1128181641Skmacyvm_page_t 1129181641Skmacypmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1130181641Skmacy{ 1131181641Skmacy pd_entry_t pde; 1132229007Salc pt_entry_t pte, *ptep; 1133181641Skmacy vm_page_t m; 1134207410Skmacy vm_paddr_t pa; 1135181641Skmacy 1136207410Skmacy pa = 0; 1137181641Skmacy m = NULL; 1138181641Skmacy PMAP_LOCK(pmap); 1139207410Skmacyretry: 1140181641Skmacy pde = PT_GET(pmap_pde(pmap, va)); 1141181641Skmacy if (pde != 0) { 1142181641Skmacy if (pde & PG_PS) { 1143181641Skmacy if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1144228935Salc if (vm_page_pa_tryrelock(pmap, (pde & 1145228935Salc PG_PS_FRAME) | (va & PDRMASK), &pa)) 1146207410Skmacy goto retry; 1147181641Skmacy m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1148181641Skmacy (va & PDRMASK)); 1149181641Skmacy vm_page_hold(m); 1150181641Skmacy } 1151181641Skmacy } else { 1152229007Salc ptep = pmap_pte(pmap, va); 1153229007Salc pte = PT_GET(ptep); 1154229007Salc pmap_pte_release(ptep); 1155229007Salc if (pte != 0 && 1156181641Skmacy ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1157228935Salc if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, 1158229007Salc &pa)) 1159207410Skmacy goto retry; 1160181641Skmacy m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1161181641Skmacy vm_page_hold(m); 1162181641Skmacy } 1163181641Skmacy } 1164181641Skmacy } 1165207410Skmacy PA_UNLOCK_COND(pa); 1166181641Skmacy PMAP_UNLOCK(pmap); 1167181641Skmacy return (m); 1168181641Skmacy} 1169181641Skmacy 1170181641Skmacy/*************************************************** 1171181641Skmacy * Low level mapping routines..... 1172181641Skmacy ***************************************************/ 1173181641Skmacy 1174181641Skmacy/* 1175181641Skmacy * Add a wired page to the kva. 1176181641Skmacy * Note: not SMP coherent. 1177228923Salc * 1178228923Salc * This function may be used before pmap_bootstrap() is called. 1179181641Skmacy */ 1180181747Skmacyvoid 1181181641Skmacypmap_kenter(vm_offset_t va, vm_paddr_t pa) 1182181641Skmacy{ 1183228923Salc 1184181641Skmacy PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1185181641Skmacy} 1186181641Skmacy 1187181747Skmacyvoid 1188181641Skmacypmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1189181641Skmacy{ 1190181641Skmacy pt_entry_t *pte; 1191181641Skmacy 1192181641Skmacy pte = vtopte(va); 1193181641Skmacy pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1194181641Skmacy} 1195181641Skmacy 1196228923Salcstatic __inline void 1197181641Skmacypmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1198181641Skmacy{ 1199228923Salc 1200181641Skmacy PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1201181641Skmacy} 1202181641Skmacy 1203181641Skmacy/* 1204181641Skmacy * Remove a page from the kernel pagetables. 1205181641Skmacy * Note: not SMP coherent. 1206228923Salc * 1207228923Salc * This function may be used before pmap_bootstrap() is called. 1208181641Skmacy */ 1209181641SkmacyPMAP_INLINE void 1210181641Skmacypmap_kremove(vm_offset_t va) 1211181641Skmacy{ 1212181641Skmacy pt_entry_t *pte; 1213181641Skmacy 1214181641Skmacy pte = vtopte(va); 1215181641Skmacy PT_CLEAR_VA(pte, FALSE); 1216181641Skmacy} 1217181641Skmacy 1218181641Skmacy/* 1219181641Skmacy * Used to map a range of physical addresses into kernel 1220181641Skmacy * virtual address space. 1221181641Skmacy * 1222181641Skmacy * The value passed in '*virt' is a suggested virtual address for 1223181641Skmacy * the mapping. Architectures which can support a direct-mapped 1224181641Skmacy * physical to virtual region can return the appropriate address 1225181641Skmacy * within that region, leaving '*virt' unchanged. Other 1226181641Skmacy * architectures should map the pages starting at '*virt' and 1227181641Skmacy * update '*virt' with the first usable address after the mapped 1228181641Skmacy * region. 1229181641Skmacy */ 1230181641Skmacyvm_offset_t 1231181641Skmacypmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1232181641Skmacy{ 1233181641Skmacy vm_offset_t va, sva; 1234181641Skmacy 1235181641Skmacy va = sva = *virt; 1236181641Skmacy CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1237181641Skmacy va, start, end, prot); 1238181641Skmacy while (start < end) { 1239181641Skmacy pmap_kenter(va, start); 1240181641Skmacy va += PAGE_SIZE; 1241181641Skmacy start += PAGE_SIZE; 1242181641Skmacy } 1243181641Skmacy pmap_invalidate_range(kernel_pmap, sva, va); 1244181641Skmacy *virt = va; 1245181641Skmacy return (sva); 1246181641Skmacy} 1247181641Skmacy 1248181641Skmacy 1249181641Skmacy/* 1250181641Skmacy * Add a list of wired pages to the kva 1251181641Skmacy * this routine is only used for temporary 1252181641Skmacy * kernel mappings that do not need to have 1253181641Skmacy * page modification or references recorded. 1254181641Skmacy * Note that old mappings are simply written 1255181641Skmacy * over. The page *must* be wired. 1256181641Skmacy * Note: SMP coherent. Uses a ranged shootdown IPI. 1257181641Skmacy */ 1258181641Skmacyvoid 1259181641Skmacypmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1260181641Skmacy{ 1261181641Skmacy pt_entry_t *endpte, *pte; 1262181641Skmacy vm_paddr_t pa; 1263181641Skmacy vm_offset_t va = sva; 1264181641Skmacy int mclcount = 0; 1265181641Skmacy multicall_entry_t mcl[16]; 1266181641Skmacy multicall_entry_t *mclp = mcl; 1267181641Skmacy int error; 1268181641Skmacy 1269181641Skmacy CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1270181641Skmacy pte = vtopte(sva); 1271181641Skmacy endpte = pte + count; 1272181641Skmacy while (pte < endpte) { 1273215587Scperciva pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1274181641Skmacy 1275181641Skmacy mclp->op = __HYPERVISOR_update_va_mapping; 1276181641Skmacy mclp->args[0] = va; 1277181641Skmacy mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1278181641Skmacy mclp->args[2] = (uint32_t)(pa >> 32); 1279181641Skmacy mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1280181641Skmacy 1281181641Skmacy va += PAGE_SIZE; 1282181641Skmacy pte++; 1283181641Skmacy ma++; 1284181641Skmacy mclp++; 1285181641Skmacy mclcount++; 1286181641Skmacy if (mclcount == 16) { 1287181641Skmacy error = HYPERVISOR_multicall(mcl, mclcount); 1288181641Skmacy mclp = mcl; 1289181641Skmacy mclcount = 0; 1290181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 1291181641Skmacy } 1292181641Skmacy } 1293181641Skmacy if (mclcount) { 1294181641Skmacy error = HYPERVISOR_multicall(mcl, mclcount); 1295181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 1296181641Skmacy } 1297181641Skmacy 1298181641Skmacy#ifdef INVARIANTS 1299181641Skmacy for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1300181641Skmacy KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1301181641Skmacy#endif 1302181641Skmacy} 1303181641Skmacy 1304181641Skmacy/* 1305181641Skmacy * This routine tears out page mappings from the 1306181641Skmacy * kernel -- it is meant only for temporary mappings. 1307181641Skmacy * Note: SMP coherent. Uses a ranged shootdown IPI. 1308181641Skmacy */ 1309181641Skmacyvoid 1310181641Skmacypmap_qremove(vm_offset_t sva, int count) 1311181641Skmacy{ 1312181641Skmacy vm_offset_t va; 1313181641Skmacy 1314181641Skmacy CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1315181641Skmacy va = sva; 1316241498Salc rw_wlock(&pvh_global_lock); 1317181641Skmacy critical_enter(); 1318181641Skmacy while (count-- > 0) { 1319181641Skmacy pmap_kremove(va); 1320181641Skmacy va += PAGE_SIZE; 1321181641Skmacy } 1322215844Scperciva PT_UPDATES_FLUSH(); 1323181641Skmacy pmap_invalidate_range(kernel_pmap, sva, va); 1324181641Skmacy critical_exit(); 1325241498Salc rw_wunlock(&pvh_global_lock); 1326181641Skmacy} 1327181641Skmacy 1328181641Skmacy/*************************************************** 1329181641Skmacy * Page table page management routines..... 1330181641Skmacy ***************************************************/ 1331181641Skmacystatic __inline void 1332181641Skmacypmap_free_zero_pages(vm_page_t free) 1333181641Skmacy{ 1334181641Skmacy vm_page_t m; 1335181641Skmacy 1336181641Skmacy while (free != NULL) { 1337181641Skmacy m = free; 1338181641Skmacy free = m->right; 1339181641Skmacy vm_page_free_zero(m); 1340181641Skmacy } 1341181641Skmacy} 1342181641Skmacy 1343181641Skmacy/* 1344240126Salc * Decrements a page table page's wire count, which is used to record the 1345240126Salc * number of valid page table entries within the page. If the wire count 1346240126Salc * drops to zero, then the page table page is unmapped. Returns TRUE if the 1347240126Salc * page table page was unmapped and FALSE otherwise. 1348181641Skmacy */ 1349240126Salcstatic inline boolean_t 1350240126Salcpmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free) 1351181641Skmacy{ 1352181641Skmacy 1353181641Skmacy --m->wire_count; 1354240126Salc if (m->wire_count == 0) { 1355240126Salc _pmap_unwire_ptp(pmap, m, free); 1356240126Salc return (TRUE); 1357240126Salc } else 1358240126Salc return (FALSE); 1359181641Skmacy} 1360181641Skmacy 1361240126Salcstatic void 1362240126Salc_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free) 1363181641Skmacy{ 1364181641Skmacy vm_offset_t pteva; 1365181641Skmacy 1366181641Skmacy PT_UPDATES_FLUSH(); 1367181641Skmacy /* 1368181641Skmacy * unmap the page table page 1369181641Skmacy */ 1370181641Skmacy xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1371181641Skmacy /* 1372181641Skmacy * page *might* contain residual mapping :-/ 1373181641Skmacy */ 1374181641Skmacy PD_CLEAR_VA(pmap, m->pindex, TRUE); 1375181641Skmacy pmap_zero_page(m); 1376181641Skmacy --pmap->pm_stats.resident_count; 1377181641Skmacy 1378181641Skmacy /* 1379181641Skmacy * This is a release store so that the ordinary store unmapping 1380181641Skmacy * the page table page is globally performed before TLB shoot- 1381181641Skmacy * down is begun. 1382181641Skmacy */ 1383181641Skmacy atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1384181641Skmacy 1385181641Skmacy /* 1386181641Skmacy * Do an invltlb to make the invalidated mapping 1387181641Skmacy * take effect immediately. 1388181641Skmacy */ 1389181641Skmacy pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1390181641Skmacy pmap_invalidate_page(pmap, pteva); 1391181641Skmacy 1392181641Skmacy /* 1393181641Skmacy * Put page on a list so that it is released after 1394181641Skmacy * *ALL* TLB shootdown is done 1395181641Skmacy */ 1396181641Skmacy m->right = *free; 1397181641Skmacy *free = m; 1398181641Skmacy} 1399181641Skmacy 1400181641Skmacy/* 1401181641Skmacy * After removing a page table entry, this routine is used to 1402181641Skmacy * conditionally free the page, and manage the hold/wire counts. 1403181641Skmacy */ 1404181641Skmacystatic int 1405181641Skmacypmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1406181641Skmacy{ 1407181641Skmacy pd_entry_t ptepde; 1408181641Skmacy vm_page_t mpte; 1409181641Skmacy 1410181641Skmacy if (va >= VM_MAXUSER_ADDRESS) 1411228923Salc return (0); 1412181641Skmacy ptepde = PT_GET(pmap_pde(pmap, va)); 1413181641Skmacy mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1414240126Salc return (pmap_unwire_ptp(pmap, mpte, free)); 1415181641Skmacy} 1416181641Skmacy 1417228923Salc/* 1418228923Salc * Initialize the pmap for the swapper process. 1419228923Salc */ 1420181641Skmacyvoid 1421181641Skmacypmap_pinit0(pmap_t pmap) 1422181641Skmacy{ 1423181641Skmacy 1424181641Skmacy PMAP_LOCK_INIT(pmap); 1425228923Salc /* 1426228923Salc * Since the page table directory is shared with the kernel pmap, 1427228923Salc * which is already included in the list "allpmaps", this pmap does 1428228923Salc * not need to be inserted into that list. 1429228923Salc */ 1430181641Skmacy pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1431181641Skmacy#ifdef PAE 1432181641Skmacy pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1433181641Skmacy#endif 1434222813Sattilio CPU_ZERO(&pmap->pm_active); 1435181641Skmacy PCPU_SET(curpmap, pmap); 1436181641Skmacy TAILQ_INIT(&pmap->pm_pvchunk); 1437181641Skmacy bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1438181641Skmacy} 1439181641Skmacy 1440181641Skmacy/* 1441181641Skmacy * Initialize a preallocated and zeroed pmap structure, 1442181641Skmacy * such as one in a vmspace structure. 1443181641Skmacy */ 1444181641Skmacyint 1445181641Skmacypmap_pinit(pmap_t pmap) 1446181641Skmacy{ 1447181641Skmacy vm_page_t m, ptdpg[NPGPTD + 1]; 1448181641Skmacy int npgptd = NPGPTD + 1; 1449181641Skmacy int i; 1450181641Skmacy 1451216960Scperciva#ifdef HAMFISTED_LOCKING 1452216960Scperciva mtx_lock(&createdelete_lock); 1453216960Scperciva#endif 1454216960Scperciva 1455181641Skmacy PMAP_LOCK_INIT(pmap); 1456181641Skmacy 1457181641Skmacy /* 1458181641Skmacy * No need to allocate page table space yet but we do need a valid 1459181641Skmacy * page directory table. 1460181641Skmacy */ 1461181641Skmacy if (pmap->pm_pdir == NULL) { 1462181641Skmacy pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1463181641Skmacy NBPTD); 1464181641Skmacy if (pmap->pm_pdir == NULL) { 1465181641Skmacy PMAP_LOCK_DESTROY(pmap); 1466216960Scperciva#ifdef HAMFISTED_LOCKING 1467216960Scperciva mtx_unlock(&createdelete_lock); 1468216960Scperciva#endif 1469181641Skmacy return (0); 1470181641Skmacy } 1471215593Scperciva#ifdef PAE 1472181641Skmacy pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1473181641Skmacy#endif 1474181641Skmacy } 1475181641Skmacy 1476181641Skmacy /* 1477181641Skmacy * allocate the page directory page(s) 1478181641Skmacy */ 1479181641Skmacy for (i = 0; i < npgptd;) { 1480226843Salc m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1481226843Salc VM_ALLOC_WIRED | VM_ALLOC_ZERO); 1482181641Skmacy if (m == NULL) 1483181641Skmacy VM_WAIT; 1484181641Skmacy else { 1485181641Skmacy ptdpg[i++] = m; 1486181641Skmacy } 1487181641Skmacy } 1488228923Salc 1489181641Skmacy pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1490228923Salc 1491228923Salc for (i = 0; i < NPGPTD; i++) 1492181641Skmacy if ((ptdpg[i]->flags & PG_ZERO) == 0) 1493228923Salc pagezero(pmap->pm_pdir + (i * NPDEPG)); 1494181641Skmacy 1495181641Skmacy mtx_lock_spin(&allpmaps_lock); 1496181641Skmacy LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1497228923Salc /* Copy the kernel page table directory entries. */ 1498228923Salc bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1499181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1500181641Skmacy 1501181641Skmacy#ifdef PAE 1502181641Skmacy pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1503181641Skmacy if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1504181641Skmacy bzero(pmap->pm_pdpt, PAGE_SIZE); 1505181641Skmacy for (i = 0; i < NPGPTD; i++) { 1506181641Skmacy vm_paddr_t ma; 1507181641Skmacy 1508215587Scperciva ma = VM_PAGE_TO_MACH(ptdpg[i]); 1509181641Skmacy pmap->pm_pdpt[i] = ma | PG_V; 1510181641Skmacy 1511181641Skmacy } 1512181641Skmacy#endif 1513181641Skmacy for (i = 0; i < NPGPTD; i++) { 1514181641Skmacy pt_entry_t *pd; 1515181641Skmacy vm_paddr_t ma; 1516181641Skmacy 1517215587Scperciva ma = VM_PAGE_TO_MACH(ptdpg[i]); 1518181641Skmacy pd = pmap->pm_pdir + (i * NPDEPG); 1519181641Skmacy PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1520181641Skmacy#if 0 1521181641Skmacy xen_pgd_pin(ma); 1522181641Skmacy#endif 1523181641Skmacy } 1524181641Skmacy 1525181641Skmacy#ifdef PAE 1526181641Skmacy PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1527181641Skmacy#endif 1528241498Salc rw_wlock(&pvh_global_lock); 1529181641Skmacy xen_flush_queue(); 1530215587Scperciva xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD])); 1531181641Skmacy for (i = 0; i < NPGPTD; i++) { 1532215587Scperciva vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]); 1533181641Skmacy PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1534181641Skmacy } 1535181641Skmacy xen_flush_queue(); 1536241498Salc rw_wunlock(&pvh_global_lock); 1537222813Sattilio CPU_ZERO(&pmap->pm_active); 1538181641Skmacy TAILQ_INIT(&pmap->pm_pvchunk); 1539181641Skmacy bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1540181641Skmacy 1541216960Scperciva#ifdef HAMFISTED_LOCKING 1542216960Scperciva mtx_unlock(&createdelete_lock); 1543216960Scperciva#endif 1544181641Skmacy return (1); 1545181641Skmacy} 1546181641Skmacy 1547181641Skmacy/* 1548181641Skmacy * this routine is called if the page table page is not 1549181641Skmacy * mapped correctly. 1550181641Skmacy */ 1551181641Skmacystatic vm_page_t 1552228923Salc_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags) 1553181641Skmacy{ 1554181641Skmacy vm_paddr_t ptema; 1555181641Skmacy vm_page_t m; 1556181641Skmacy 1557181641Skmacy KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1558181641Skmacy (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1559181641Skmacy ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1560181641Skmacy 1561181641Skmacy /* 1562181641Skmacy * Allocate a page table page. 1563181641Skmacy */ 1564181641Skmacy if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1565181641Skmacy VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1566181641Skmacy if (flags & M_WAITOK) { 1567181641Skmacy PMAP_UNLOCK(pmap); 1568241498Salc rw_wunlock(&pvh_global_lock); 1569181641Skmacy VM_WAIT; 1570241498Salc rw_wlock(&pvh_global_lock); 1571181641Skmacy PMAP_LOCK(pmap); 1572181641Skmacy } 1573181641Skmacy 1574181641Skmacy /* 1575181641Skmacy * Indicate the need to retry. While waiting, the page table 1576181641Skmacy * page may have been allocated. 1577181641Skmacy */ 1578181641Skmacy return (NULL); 1579181641Skmacy } 1580181641Skmacy if ((m->flags & PG_ZERO) == 0) 1581181641Skmacy pmap_zero_page(m); 1582181641Skmacy 1583181641Skmacy /* 1584181641Skmacy * Map the pagetable page into the process address space, if 1585181641Skmacy * it isn't already there. 1586181641Skmacy */ 1587228923Salc 1588181641Skmacy pmap->pm_stats.resident_count++; 1589181641Skmacy 1590215587Scperciva ptema = VM_PAGE_TO_MACH(m); 1591181641Skmacy xen_pt_pin(ptema); 1592181641Skmacy PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1593181641Skmacy (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1594181641Skmacy 1595181641Skmacy KASSERT(pmap->pm_pdir[ptepindex], 1596181641Skmacy ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1597181641Skmacy return (m); 1598181641Skmacy} 1599181641Skmacy 1600181641Skmacystatic vm_page_t 1601181641Skmacypmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1602181641Skmacy{ 1603228923Salc u_int ptepindex; 1604181641Skmacy pd_entry_t ptema; 1605181641Skmacy vm_page_t m; 1606181641Skmacy 1607181641Skmacy KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1608181641Skmacy (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1609181641Skmacy ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1610181641Skmacy 1611181641Skmacy /* 1612181641Skmacy * Calculate pagetable page index 1613181641Skmacy */ 1614181641Skmacy ptepindex = va >> PDRSHIFT; 1615181641Skmacyretry: 1616181641Skmacy /* 1617181641Skmacy * Get the page directory entry 1618181641Skmacy */ 1619181641Skmacy ptema = pmap->pm_pdir[ptepindex]; 1620181641Skmacy 1621181641Skmacy /* 1622181641Skmacy * This supports switching from a 4MB page to a 1623181641Skmacy * normal 4K page. 1624181641Skmacy */ 1625181641Skmacy if (ptema & PG_PS) { 1626181641Skmacy /* 1627181641Skmacy * XXX 1628181641Skmacy */ 1629181641Skmacy pmap->pm_pdir[ptepindex] = 0; 1630181641Skmacy ptema = 0; 1631181641Skmacy pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1632181641Skmacy pmap_invalidate_all(kernel_pmap); 1633181641Skmacy } 1634181641Skmacy 1635181641Skmacy /* 1636181641Skmacy * If the page table page is mapped, we just increment the 1637181641Skmacy * hold count, and activate it. 1638181641Skmacy */ 1639181641Skmacy if (ptema & PG_V) { 1640181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1641181641Skmacy m->wire_count++; 1642181641Skmacy } else { 1643181641Skmacy /* 1644181641Skmacy * Here if the pte page isn't mapped, or if it has 1645181641Skmacy * been deallocated. 1646181641Skmacy */ 1647181641Skmacy CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1648181641Skmacy pmap, va, flags); 1649181641Skmacy m = _pmap_allocpte(pmap, ptepindex, flags); 1650181641Skmacy if (m == NULL && (flags & M_WAITOK)) 1651181641Skmacy goto retry; 1652181641Skmacy 1653181641Skmacy KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1654181641Skmacy } 1655181641Skmacy return (m); 1656181641Skmacy} 1657181641Skmacy 1658181641Skmacy 1659181641Skmacy/*************************************************** 1660181641Skmacy* Pmap allocation/deallocation routines. 1661181641Skmacy ***************************************************/ 1662181641Skmacy 1663181641Skmacy#ifdef SMP 1664181641Skmacy/* 1665181641Skmacy * Deal with a SMP shootdown of other users of the pmap that we are 1666181641Skmacy * trying to dispose of. This can be a bit hairy. 1667181641Skmacy */ 1668222813Sattiliostatic cpuset_t *lazymask; 1669181641Skmacystatic u_int lazyptd; 1670181641Skmacystatic volatile u_int lazywait; 1671181641Skmacy 1672181641Skmacyvoid pmap_lazyfix_action(void); 1673181641Skmacy 1674181641Skmacyvoid 1675181641Skmacypmap_lazyfix_action(void) 1676181641Skmacy{ 1677181641Skmacy 1678181641Skmacy#ifdef COUNT_IPIS 1679181641Skmacy (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1680181641Skmacy#endif 1681181641Skmacy if (rcr3() == lazyptd) 1682181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1683222813Sattilio CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); 1684181641Skmacy atomic_store_rel_int(&lazywait, 1); 1685181641Skmacy} 1686181641Skmacy 1687181641Skmacystatic void 1688223758Sattiliopmap_lazyfix_self(u_int cpuid) 1689181641Skmacy{ 1690181641Skmacy 1691181641Skmacy if (rcr3() == lazyptd) 1692181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1693223758Sattilio CPU_CLR_ATOMIC(cpuid, lazymask); 1694181641Skmacy} 1695181641Skmacy 1696181641Skmacy 1697181641Skmacystatic void 1698181641Skmacypmap_lazyfix(pmap_t pmap) 1699181641Skmacy{ 1700222813Sattilio cpuset_t mymask, mask; 1701223758Sattilio u_int cpuid, spins; 1702222813Sattilio int lsb; 1703181641Skmacy 1704222813Sattilio mask = pmap->pm_active; 1705222813Sattilio while (!CPU_EMPTY(&mask)) { 1706181641Skmacy spins = 50000000; 1707222813Sattilio 1708222813Sattilio /* Find least significant set bit. */ 1709222813Sattilio lsb = cpusetobj_ffs(&mask); 1710222813Sattilio MPASS(lsb != 0); 1711222813Sattilio lsb--; 1712222813Sattilio CPU_SETOF(lsb, &mask); 1713181641Skmacy mtx_lock_spin(&smp_ipi_mtx); 1714181641Skmacy#ifdef PAE 1715181641Skmacy lazyptd = vtophys(pmap->pm_pdpt); 1716181641Skmacy#else 1717181641Skmacy lazyptd = vtophys(pmap->pm_pdir); 1718181641Skmacy#endif 1719223758Sattilio cpuid = PCPU_GET(cpuid); 1720223758Sattilio 1721223758Sattilio /* Use a cpuset just for having an easy check. */ 1722223758Sattilio CPU_SETOF(cpuid, &mymask); 1723222813Sattilio if (!CPU_CMP(&mask, &mymask)) { 1724181641Skmacy lazymask = &pmap->pm_active; 1725223758Sattilio pmap_lazyfix_self(cpuid); 1726181641Skmacy } else { 1727181641Skmacy atomic_store_rel_int((u_int *)&lazymask, 1728181641Skmacy (u_int)&pmap->pm_active); 1729181641Skmacy atomic_store_rel_int(&lazywait, 0); 1730181641Skmacy ipi_selected(mask, IPI_LAZYPMAP); 1731181641Skmacy while (lazywait == 0) { 1732181641Skmacy ia32_pause(); 1733181641Skmacy if (--spins == 0) 1734181641Skmacy break; 1735181641Skmacy } 1736181641Skmacy } 1737181641Skmacy mtx_unlock_spin(&smp_ipi_mtx); 1738181641Skmacy if (spins == 0) 1739181641Skmacy printf("pmap_lazyfix: spun for 50000000\n"); 1740222813Sattilio mask = pmap->pm_active; 1741181641Skmacy } 1742181641Skmacy} 1743181641Skmacy 1744181641Skmacy#else /* SMP */ 1745181641Skmacy 1746181641Skmacy/* 1747181641Skmacy * Cleaning up on uniprocessor is easy. For various reasons, we're 1748181641Skmacy * unlikely to have to even execute this code, including the fact 1749181641Skmacy * that the cleanup is deferred until the parent does a wait(2), which 1750181641Skmacy * means that another userland process has run. 1751181641Skmacy */ 1752181641Skmacystatic void 1753181641Skmacypmap_lazyfix(pmap_t pmap) 1754181641Skmacy{ 1755181641Skmacy u_int cr3; 1756181641Skmacy 1757181641Skmacy cr3 = vtophys(pmap->pm_pdir); 1758181641Skmacy if (cr3 == rcr3()) { 1759181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1760222813Sattilio CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); 1761181641Skmacy } 1762181641Skmacy} 1763181641Skmacy#endif /* SMP */ 1764181641Skmacy 1765181641Skmacy/* 1766181641Skmacy * Release any resources held by the given physical map. 1767181641Skmacy * Called when a pmap initialized by pmap_pinit is being released. 1768181641Skmacy * Should only be called if the map contains no valid mappings. 1769181641Skmacy */ 1770181641Skmacyvoid 1771181641Skmacypmap_release(pmap_t pmap) 1772181641Skmacy{ 1773181641Skmacy vm_page_t m, ptdpg[2*NPGPTD+1]; 1774181641Skmacy vm_paddr_t ma; 1775181641Skmacy int i; 1776181641Skmacy#ifdef PAE 1777181641Skmacy int npgptd = NPGPTD + 1; 1778181641Skmacy#else 1779181641Skmacy int npgptd = NPGPTD; 1780181641Skmacy#endif 1781228923Salc 1782181641Skmacy KASSERT(pmap->pm_stats.resident_count == 0, 1783181641Skmacy ("pmap_release: pmap resident count %ld != 0", 1784181641Skmacy pmap->pm_stats.resident_count)); 1785181641Skmacy PT_UPDATES_FLUSH(); 1786181641Skmacy 1787216960Scperciva#ifdef HAMFISTED_LOCKING 1788216960Scperciva mtx_lock(&createdelete_lock); 1789216960Scperciva#endif 1790216960Scperciva 1791181641Skmacy pmap_lazyfix(pmap); 1792181641Skmacy mtx_lock_spin(&allpmaps_lock); 1793181641Skmacy LIST_REMOVE(pmap, pm_list); 1794181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1795181641Skmacy 1796181641Skmacy for (i = 0; i < NPGPTD; i++) 1797181641Skmacy ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1798181641Skmacy pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1799215593Scperciva#ifdef PAE 1800181641Skmacy ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1801181641Skmacy#endif 1802181641Skmacy 1803181641Skmacy for (i = 0; i < npgptd; i++) { 1804181641Skmacy m = ptdpg[i]; 1805215587Scperciva ma = VM_PAGE_TO_MACH(m); 1806181641Skmacy /* unpinning L1 and L2 treated the same */ 1807215525Scperciva#if 0 1808181641Skmacy xen_pgd_unpin(ma); 1809215525Scperciva#else 1810215525Scperciva if (i == NPGPTD) 1811215525Scperciva xen_pgd_unpin(ma); 1812215525Scperciva#endif 1813181641Skmacy#ifdef PAE 1814215470Scperciva if (i < NPGPTD) 1815215587Scperciva KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1816215470Scperciva ("pmap_release: got wrong ptd page")); 1817181641Skmacy#endif 1818181641Skmacy m->wire_count--; 1819181641Skmacy atomic_subtract_int(&cnt.v_wire_count, 1); 1820181641Skmacy vm_page_free(m); 1821181641Skmacy } 1822215472Scperciva#ifdef PAE 1823215472Scperciva pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1824215472Scperciva#endif 1825181641Skmacy PMAP_LOCK_DESTROY(pmap); 1826216960Scperciva 1827216960Scperciva#ifdef HAMFISTED_LOCKING 1828216960Scperciva mtx_unlock(&createdelete_lock); 1829216960Scperciva#endif 1830181641Skmacy} 1831181641Skmacy 1832181641Skmacystatic int 1833181641Skmacykvm_size(SYSCTL_HANDLER_ARGS) 1834181641Skmacy{ 1835181641Skmacy unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1836181641Skmacy 1837228923Salc return (sysctl_handle_long(oidp, &ksize, 0, req)); 1838181641Skmacy} 1839181641SkmacySYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1840181641Skmacy 0, 0, kvm_size, "IU", "Size of KVM"); 1841181641Skmacy 1842181641Skmacystatic int 1843181641Skmacykvm_free(SYSCTL_HANDLER_ARGS) 1844181641Skmacy{ 1845181641Skmacy unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1846181641Skmacy 1847228923Salc return (sysctl_handle_long(oidp, &kfree, 0, req)); 1848181641Skmacy} 1849181641SkmacySYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1850181641Skmacy 0, 0, kvm_free, "IU", "Amount of KVM free"); 1851181641Skmacy 1852181641Skmacy/* 1853181641Skmacy * grow the number of kernel page table entries, if needed 1854181641Skmacy */ 1855181641Skmacyvoid 1856181641Skmacypmap_growkernel(vm_offset_t addr) 1857181641Skmacy{ 1858181641Skmacy struct pmap *pmap; 1859181641Skmacy vm_paddr_t ptppaddr; 1860181641Skmacy vm_page_t nkpg; 1861181641Skmacy pd_entry_t newpdir; 1862181641Skmacy 1863181641Skmacy mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1864181641Skmacy if (kernel_vm_end == 0) { 1865181641Skmacy kernel_vm_end = KERNBASE; 1866181641Skmacy nkpt = 0; 1867181641Skmacy while (pdir_pde(PTD, kernel_vm_end)) { 1868181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1869181641Skmacy nkpt++; 1870181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1871181641Skmacy kernel_vm_end = kernel_map->max_offset; 1872181641Skmacy break; 1873181641Skmacy } 1874181641Skmacy } 1875181641Skmacy } 1876228923Salc addr = roundup2(addr, NBPDR); 1877181641Skmacy if (addr - 1 >= kernel_map->max_offset) 1878181641Skmacy addr = kernel_map->max_offset; 1879181641Skmacy while (kernel_vm_end < addr) { 1880181641Skmacy if (pdir_pde(PTD, kernel_vm_end)) { 1881228923Salc kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1882181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1883181641Skmacy kernel_vm_end = kernel_map->max_offset; 1884181641Skmacy break; 1885181641Skmacy } 1886181641Skmacy continue; 1887181641Skmacy } 1888181641Skmacy 1889228923Salc nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, 1890228923Salc VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1891228923Salc VM_ALLOC_ZERO); 1892228923Salc if (nkpg == NULL) 1893181641Skmacy panic("pmap_growkernel: no memory to grow kernel"); 1894181641Skmacy 1895181641Skmacy nkpt++; 1896181641Skmacy 1897228923Salc if ((nkpg->flags & PG_ZERO) == 0) 1898228923Salc pmap_zero_page(nkpg); 1899181641Skmacy ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1900181641Skmacy newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1901241498Salc rw_wlock(&pvh_global_lock); 1902181641Skmacy PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1903181641Skmacy mtx_lock_spin(&allpmaps_lock); 1904181641Skmacy LIST_FOREACH(pmap, &allpmaps, pm_list) 1905181641Skmacy PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1906181641Skmacy 1907181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1908241498Salc rw_wunlock(&pvh_global_lock); 1909181946Skmacy 1910228923Salc kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1911181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1912181641Skmacy kernel_vm_end = kernel_map->max_offset; 1913181641Skmacy break; 1914181641Skmacy } 1915181641Skmacy } 1916181641Skmacy} 1917181641Skmacy 1918181641Skmacy 1919181641Skmacy/*************************************************** 1920181641Skmacy * page management routines. 1921181641Skmacy ***************************************************/ 1922181641Skmacy 1923181641SkmacyCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1924181641SkmacyCTASSERT(_NPCM == 11); 1925236291SalcCTASSERT(_NPCPV == 336); 1926181641Skmacy 1927181641Skmacystatic __inline struct pv_chunk * 1928181641Skmacypv_to_chunk(pv_entry_t pv) 1929181641Skmacy{ 1930181641Skmacy 1931228923Salc return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1932181641Skmacy} 1933181641Skmacy 1934181641Skmacy#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1935181641Skmacy 1936181641Skmacy#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1937181641Skmacy#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1938181641Skmacy 1939236534Salcstatic const uint32_t pc_freemask[_NPCM] = { 1940181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1941181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1942181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1943181641Skmacy PC_FREE0_9, PC_FREE10 1944181641Skmacy}; 1945181641Skmacy 1946181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1947181641Skmacy "Current number of pv entries"); 1948181641Skmacy 1949181641Skmacy#ifdef PV_STATS 1950181641Skmacystatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1951181641Skmacy 1952181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1953181641Skmacy "Current number of pv entry chunks"); 1954181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1955181641Skmacy "Current number of pv entry chunks allocated"); 1956181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1957181641Skmacy "Current number of pv entry chunks frees"); 1958181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1959181641Skmacy "Number of times tried to get a chunk page but failed."); 1960181641Skmacy 1961181641Skmacystatic long pv_entry_frees, pv_entry_allocs; 1962181641Skmacystatic int pv_entry_spare; 1963181641Skmacy 1964181641SkmacySYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1965181641Skmacy "Current number of pv entry frees"); 1966181641SkmacySYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1967181641Skmacy "Current number of pv entry allocs"); 1968181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1969181641Skmacy "Current number of spare pv entries"); 1970181641Skmacy#endif 1971181641Skmacy 1972181641Skmacy/* 1973181641Skmacy * We are in a serious low memory condition. Resort to 1974181641Skmacy * drastic measures to free some pages so we can allocate 1975236240Salc * another pv entry chunk. 1976181641Skmacy */ 1977236240Salcstatic vm_page_t 1978236240Salcpmap_pv_reclaim(pmap_t locked_pmap) 1979181641Skmacy{ 1980236240Salc struct pch newtail; 1981236240Salc struct pv_chunk *pc; 1982181641Skmacy pmap_t pmap; 1983181641Skmacy pt_entry_t *pte, tpte; 1984236240Salc pv_entry_t pv; 1985181641Skmacy vm_offset_t va; 1986236240Salc vm_page_t free, m, m_pc; 1987236534Salc uint32_t inuse; 1988236240Salc int bit, field, freed; 1989181641Skmacy 1990236240Salc PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1991236240Salc pmap = NULL; 1992236240Salc free = m_pc = NULL; 1993236240Salc TAILQ_INIT(&newtail); 1994236240Salc while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 1995236240Salc free == NULL)) { 1996236240Salc TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1997236240Salc if (pmap != pc->pc_pmap) { 1998236240Salc if (pmap != NULL) { 1999236240Salc pmap_invalidate_all(pmap); 2000236240Salc if (pmap != locked_pmap) 2001236240Salc PMAP_UNLOCK(pmap); 2002236240Salc } 2003236240Salc pmap = pc->pc_pmap; 2004181641Skmacy /* Avoid deadlock and lock recursion. */ 2005181641Skmacy if (pmap > locked_pmap) 2006181641Skmacy PMAP_LOCK(pmap); 2007236240Salc else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2008236240Salc pmap = NULL; 2009236240Salc TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2010181641Skmacy continue; 2011236240Salc } 2012181641Skmacy } 2013236240Salc 2014236240Salc /* 2015236240Salc * Destroy every non-wired, 4 KB page mapping in the chunk. 2016236240Salc */ 2017236240Salc freed = 0; 2018236240Salc for (field = 0; field < _NPCM; field++) { 2019236240Salc for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2020236240Salc inuse != 0; inuse &= ~(1UL << bit)) { 2021236240Salc bit = bsfl(inuse); 2022236240Salc pv = &pc->pc_pventry[field * 32 + bit]; 2023236240Salc va = pv->pv_va; 2024241353Salc pte = pmap_pte(pmap, va); 2025241353Salc tpte = *pte; 2026241353Salc if ((tpte & PG_W) == 0) 2027241353Salc tpte = pte_load_clear(pte); 2028241353Salc pmap_pte_release(pte); 2029241353Salc if ((tpte & PG_W) != 0) 2030236240Salc continue; 2031241400Salc KASSERT(tpte != 0, 2032241400Salc ("pmap_pv_reclaim: pmap %p va %x zero pte", 2033241400Salc pmap, va)); 2034236240Salc if ((tpte & PG_G) != 0) 2035236240Salc pmap_invalidate_page(pmap, va); 2036236240Salc m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 2037236240Salc if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2038236240Salc vm_page_dirty(m); 2039236240Salc if ((tpte & PG_A) != 0) 2040236240Salc vm_page_aflag_set(m, PGA_REFERENCED); 2041236240Salc TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2042236240Salc if (TAILQ_EMPTY(&m->md.pv_list)) 2043236240Salc vm_page_aflag_clear(m, PGA_WRITEABLE); 2044236534Salc pc->pc_map[field] |= 1UL << bit; 2045236240Salc pmap_unuse_pt(pmap, va, &free); 2046236240Salc freed++; 2047236240Salc } 2048236240Salc } 2049236240Salc if (freed == 0) { 2050236240Salc TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2051236240Salc continue; 2052236240Salc } 2053236534Salc /* Every freed mapping is for a 4 KB page. */ 2054236240Salc pmap->pm_stats.resident_count -= freed; 2055236240Salc PV_STAT(pv_entry_frees += freed); 2056236240Salc PV_STAT(pv_entry_spare += freed); 2057236240Salc pv_entry_count -= freed; 2058236240Salc TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2059236240Salc for (field = 0; field < _NPCM; field++) 2060236240Salc if (pc->pc_map[field] != pc_freemask[field]) { 2061236240Salc TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2062236240Salc pc_list); 2063236240Salc TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2064236240Salc 2065236240Salc /* 2066236240Salc * One freed pv entry in locked_pmap is 2067236240Salc * sufficient. 2068236240Salc */ 2069236240Salc if (pmap == locked_pmap) 2070236240Salc goto out; 2071236240Salc break; 2072236240Salc } 2073236240Salc if (field == _NPCM) { 2074236240Salc PV_STAT(pv_entry_spare -= _NPCPV); 2075236240Salc PV_STAT(pc_chunk_count--); 2076236240Salc PV_STAT(pc_chunk_frees++); 2077236240Salc /* Entire chunk is free; return it. */ 2078236240Salc m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2079236240Salc pmap_qremove((vm_offset_t)pc, 1); 2080236240Salc pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2081236240Salc break; 2082236240Salc } 2083181641Skmacy } 2084236240Salcout: 2085236240Salc TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2086236240Salc if (pmap != NULL) { 2087236240Salc pmap_invalidate_all(pmap); 2088236240Salc if (pmap != locked_pmap) 2089236240Salc PMAP_UNLOCK(pmap); 2090236240Salc } 2091236240Salc if (m_pc == NULL && pv_vafree != 0 && free != NULL) { 2092236240Salc m_pc = free; 2093236240Salc free = m_pc->right; 2094236240Salc /* Recycle a freed page table page. */ 2095236240Salc m_pc->wire_count = 1; 2096236240Salc atomic_add_int(&cnt.v_wire_count, 1); 2097236240Salc } 2098236240Salc pmap_free_zero_pages(free); 2099236240Salc return (m_pc); 2100181641Skmacy} 2101181641Skmacy 2102181641Skmacy/* 2103181641Skmacy * free the pv_entry back to the free list 2104181641Skmacy */ 2105181641Skmacystatic void 2106181641Skmacyfree_pv_entry(pmap_t pmap, pv_entry_t pv) 2107181641Skmacy{ 2108181641Skmacy struct pv_chunk *pc; 2109181641Skmacy int idx, field, bit; 2110181641Skmacy 2111241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2112181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2113181641Skmacy PV_STAT(pv_entry_frees++); 2114181641Skmacy PV_STAT(pv_entry_spare++); 2115181641Skmacy pv_entry_count--; 2116181641Skmacy pc = pv_to_chunk(pv); 2117181641Skmacy idx = pv - &pc->pc_pventry[0]; 2118181641Skmacy field = idx / 32; 2119181641Skmacy bit = idx % 32; 2120181641Skmacy pc->pc_map[field] |= 1ul << bit; 2121181641Skmacy for (idx = 0; idx < _NPCM; idx++) 2122228923Salc if (pc->pc_map[idx] != pc_freemask[idx]) { 2123236534Salc /* 2124236534Salc * 98% of the time, pc is already at the head of the 2125236534Salc * list. If it isn't already, move it to the head. 2126236534Salc */ 2127236534Salc if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2128236534Salc pc)) { 2129236534Salc TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2130236534Salc TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2131236534Salc pc_list); 2132236534Salc } 2133181641Skmacy return; 2134228923Salc } 2135236534Salc TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2136236378Salc free_pv_chunk(pc); 2137236378Salc} 2138236378Salc 2139236378Salcstatic void 2140236378Salcfree_pv_chunk(struct pv_chunk *pc) 2141236378Salc{ 2142236378Salc vm_page_t m; 2143236378Salc 2144236240Salc TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2145181641Skmacy PV_STAT(pv_entry_spare -= _NPCPV); 2146181641Skmacy PV_STAT(pc_chunk_count--); 2147181641Skmacy PV_STAT(pc_chunk_frees++); 2148181641Skmacy /* entire chunk is free, return it */ 2149181641Skmacy m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2150181641Skmacy pmap_qremove((vm_offset_t)pc, 1); 2151181641Skmacy vm_page_unwire(m, 0); 2152181641Skmacy vm_page_free(m); 2153181641Skmacy pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2154181641Skmacy} 2155181641Skmacy 2156181641Skmacy/* 2157181641Skmacy * get a new pv_entry, allocating a block from the system 2158181641Skmacy * when needed. 2159181641Skmacy */ 2160181641Skmacystatic pv_entry_t 2161236291Salcget_pv_entry(pmap_t pmap, boolean_t try) 2162181641Skmacy{ 2163181641Skmacy static const struct timeval printinterval = { 60, 0 }; 2164181641Skmacy static struct timeval lastprint; 2165181641Skmacy int bit, field; 2166181641Skmacy pv_entry_t pv; 2167181641Skmacy struct pv_chunk *pc; 2168181641Skmacy vm_page_t m; 2169181641Skmacy 2170181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2171241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2172181641Skmacy PV_STAT(pv_entry_allocs++); 2173181641Skmacy pv_entry_count++; 2174181641Skmacy if (pv_entry_count > pv_entry_high_water) 2175181641Skmacy if (ratecheck(&lastprint, &printinterval)) 2176181641Skmacy printf("Approaching the limit on PV entries, consider " 2177181641Skmacy "increasing either the vm.pmap.shpgperproc or the " 2178181641Skmacy "vm.pmap.pv_entry_max tunable.\n"); 2179181641Skmacyretry: 2180181641Skmacy pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2181181641Skmacy if (pc != NULL) { 2182181641Skmacy for (field = 0; field < _NPCM; field++) { 2183181641Skmacy if (pc->pc_map[field]) { 2184181641Skmacy bit = bsfl(pc->pc_map[field]); 2185181641Skmacy break; 2186181641Skmacy } 2187181641Skmacy } 2188181641Skmacy if (field < _NPCM) { 2189181641Skmacy pv = &pc->pc_pventry[field * 32 + bit]; 2190181641Skmacy pc->pc_map[field] &= ~(1ul << bit); 2191181641Skmacy /* If this was the last item, move it to tail */ 2192181641Skmacy for (field = 0; field < _NPCM; field++) 2193181641Skmacy if (pc->pc_map[field] != 0) { 2194181641Skmacy PV_STAT(pv_entry_spare--); 2195181641Skmacy return (pv); /* not full, return */ 2196181641Skmacy } 2197181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2198181641Skmacy TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2199181641Skmacy PV_STAT(pv_entry_spare--); 2200181641Skmacy return (pv); 2201181641Skmacy } 2202181641Skmacy } 2203181641Skmacy /* 2204181641Skmacy * Access to the ptelist "pv_vafree" is synchronized by the page 2205181641Skmacy * queues lock. If "pv_vafree" is currently non-empty, it will 2206181641Skmacy * remain non-empty until pmap_ptelist_alloc() completes. 2207181641Skmacy */ 2208236240Salc if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 2209181641Skmacy VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2210181641Skmacy if (try) { 2211181641Skmacy pv_entry_count--; 2212181641Skmacy PV_STAT(pc_chunk_tryfail++); 2213181641Skmacy return (NULL); 2214181641Skmacy } 2215236240Salc m = pmap_pv_reclaim(pmap); 2216236240Salc if (m == NULL) 2217236240Salc goto retry; 2218181641Skmacy } 2219181641Skmacy PV_STAT(pc_chunk_count++); 2220181641Skmacy PV_STAT(pc_chunk_allocs++); 2221181641Skmacy pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2222181641Skmacy pmap_qenter((vm_offset_t)pc, &m, 1); 2223181641Skmacy if ((m->flags & PG_ZERO) == 0) 2224181641Skmacy pagezero(pc); 2225181641Skmacy pc->pc_pmap = pmap; 2226181641Skmacy pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2227181641Skmacy for (field = 1; field < _NPCM; field++) 2228181641Skmacy pc->pc_map[field] = pc_freemask[field]; 2229236240Salc TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 2230181641Skmacy pv = &pc->pc_pventry[0]; 2231181641Skmacy TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2232181641Skmacy PV_STAT(pv_entry_spare += _NPCPV - 1); 2233181641Skmacy return (pv); 2234181641Skmacy} 2235181641Skmacy 2236208651Salcstatic __inline pv_entry_t 2237208651Salcpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2238181641Skmacy{ 2239181641Skmacy pv_entry_t pv; 2240181641Skmacy 2241241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2242208651Salc TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2243208651Salc if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2244208651Salc TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2245181641Skmacy break; 2246208651Salc } 2247181641Skmacy } 2248208651Salc return (pv); 2249181641Skmacy} 2250181641Skmacy 2251181641Skmacystatic void 2252208651Salcpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2253181641Skmacy{ 2254181641Skmacy pv_entry_t pv; 2255181641Skmacy 2256208651Salc pv = pmap_pvh_remove(pvh, pmap, va); 2257208651Salc KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2258208651Salc free_pv_entry(pmap, pv); 2259208651Salc} 2260208651Salc 2261208651Salcstatic void 2262208651Salcpmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2263208651Salc{ 2264208651Salc 2265241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2266208651Salc pmap_pvh_free(&m->md, pmap, va); 2267208651Salc if (TAILQ_EMPTY(&m->md.pv_list)) 2268225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2269181641Skmacy} 2270181641Skmacy 2271181641Skmacy/* 2272181641Skmacy * Conditionally create a pv entry. 2273181641Skmacy */ 2274181641Skmacystatic boolean_t 2275181641Skmacypmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2276181641Skmacy{ 2277181641Skmacy pv_entry_t pv; 2278181641Skmacy 2279181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2280241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2281181641Skmacy if (pv_entry_count < pv_entry_high_water && 2282181641Skmacy (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2283181641Skmacy pv->pv_va = va; 2284181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2285181641Skmacy return (TRUE); 2286181641Skmacy } else 2287181641Skmacy return (FALSE); 2288181641Skmacy} 2289181641Skmacy 2290181641Skmacy/* 2291181641Skmacy * pmap_remove_pte: do the things to unmap a page in a process 2292181641Skmacy */ 2293181641Skmacystatic int 2294181641Skmacypmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2295181641Skmacy{ 2296181641Skmacy pt_entry_t oldpte; 2297181641Skmacy vm_page_t m; 2298181641Skmacy 2299181641Skmacy CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2300181641Skmacy pmap, (u_long)*ptq, va); 2301181641Skmacy 2302241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2303181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2304181641Skmacy oldpte = *ptq; 2305181641Skmacy PT_SET_VA_MA(ptq, 0, TRUE); 2306241400Salc KASSERT(oldpte != 0, 2307241400Salc ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); 2308181641Skmacy if (oldpte & PG_W) 2309181641Skmacy pmap->pm_stats.wired_count -= 1; 2310181641Skmacy /* 2311181641Skmacy * Machines that don't support invlpg, also don't support 2312181641Skmacy * PG_G. 2313181641Skmacy */ 2314181641Skmacy if (oldpte & PG_G) 2315181641Skmacy pmap_invalidate_page(kernel_pmap, va); 2316181641Skmacy pmap->pm_stats.resident_count -= 1; 2317216762Scperciva if (oldpte & PG_MANAGED) { 2318181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2319208651Salc if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2320181641Skmacy vm_page_dirty(m); 2321181641Skmacy if (oldpte & PG_A) 2322225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 2323181641Skmacy pmap_remove_entry(pmap, m, va); 2324216762Scperciva } 2325181641Skmacy return (pmap_unuse_pt(pmap, va, free)); 2326181641Skmacy} 2327181641Skmacy 2328181641Skmacy/* 2329181641Skmacy * Remove a single page from a process address space 2330181641Skmacy */ 2331181641Skmacystatic void 2332181641Skmacypmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2333181641Skmacy{ 2334181641Skmacy pt_entry_t *pte; 2335181641Skmacy 2336181641Skmacy CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2337181641Skmacy pmap, va); 2338181641Skmacy 2339241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2340181641Skmacy KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2341181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2342181641Skmacy if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2343181641Skmacy return; 2344181641Skmacy pmap_remove_pte(pmap, pte, va, free); 2345181641Skmacy pmap_invalidate_page(pmap, va); 2346181641Skmacy if (*PMAP1) 2347181641Skmacy PT_SET_MA(PADDR1, 0); 2348181641Skmacy 2349181641Skmacy} 2350181641Skmacy 2351181641Skmacy/* 2352181641Skmacy * Remove the given range of addresses from the specified map. 2353181641Skmacy * 2354181641Skmacy * It is assumed that the start and end are properly 2355181641Skmacy * rounded to the page size. 2356181641Skmacy */ 2357181641Skmacyvoid 2358181641Skmacypmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2359181641Skmacy{ 2360181641Skmacy vm_offset_t pdnxt; 2361181641Skmacy pd_entry_t ptpaddr; 2362181641Skmacy pt_entry_t *pte; 2363181641Skmacy vm_page_t free = NULL; 2364181641Skmacy int anyvalid; 2365228923Salc 2366181641Skmacy CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2367181641Skmacy pmap, sva, eva); 2368228923Salc 2369181641Skmacy /* 2370181641Skmacy * Perform an unsynchronized read. This is, however, safe. 2371181641Skmacy */ 2372181641Skmacy if (pmap->pm_stats.resident_count == 0) 2373181641Skmacy return; 2374181641Skmacy 2375181641Skmacy anyvalid = 0; 2376181641Skmacy 2377241498Salc rw_wlock(&pvh_global_lock); 2378181641Skmacy sched_pin(); 2379181641Skmacy PMAP_LOCK(pmap); 2380181641Skmacy 2381181641Skmacy /* 2382181641Skmacy * special handling of removing one page. a very 2383181641Skmacy * common operation and easy to short circuit some 2384181641Skmacy * code. 2385181641Skmacy */ 2386181641Skmacy if ((sva + PAGE_SIZE == eva) && 2387181641Skmacy ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2388181641Skmacy pmap_remove_page(pmap, sva, &free); 2389181641Skmacy goto out; 2390181641Skmacy } 2391181641Skmacy 2392181641Skmacy for (; sva < eva; sva = pdnxt) { 2393228923Salc u_int pdirindex; 2394181641Skmacy 2395181641Skmacy /* 2396181641Skmacy * Calculate index for next page table. 2397181641Skmacy */ 2398181641Skmacy pdnxt = (sva + NBPDR) & ~PDRMASK; 2399229007Salc if (pdnxt < sva) 2400229007Salc pdnxt = eva; 2401181641Skmacy if (pmap->pm_stats.resident_count == 0) 2402181641Skmacy break; 2403181641Skmacy 2404181641Skmacy pdirindex = sva >> PDRSHIFT; 2405181641Skmacy ptpaddr = pmap->pm_pdir[pdirindex]; 2406181641Skmacy 2407181641Skmacy /* 2408181641Skmacy * Weed out invalid mappings. Note: we assume that the page 2409181641Skmacy * directory table is always allocated, and in kernel virtual. 2410181641Skmacy */ 2411181641Skmacy if (ptpaddr == 0) 2412181641Skmacy continue; 2413181641Skmacy 2414181641Skmacy /* 2415181641Skmacy * Check for large page. 2416181641Skmacy */ 2417181641Skmacy if ((ptpaddr & PG_PS) != 0) { 2418181641Skmacy PD_CLEAR_VA(pmap, pdirindex, TRUE); 2419181641Skmacy pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2420181641Skmacy anyvalid = 1; 2421181641Skmacy continue; 2422181641Skmacy } 2423181641Skmacy 2424181641Skmacy /* 2425181641Skmacy * Limit our scan to either the end of the va represented 2426181641Skmacy * by the current page table page, or to the end of the 2427181641Skmacy * range being removed. 2428181641Skmacy */ 2429181641Skmacy if (pdnxt > eva) 2430181641Skmacy pdnxt = eva; 2431181641Skmacy 2432181641Skmacy for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2433181641Skmacy sva += PAGE_SIZE) { 2434181641Skmacy if ((*pte & PG_V) == 0) 2435181641Skmacy continue; 2436181641Skmacy 2437181641Skmacy /* 2438181641Skmacy * The TLB entry for a PG_G mapping is invalidated 2439181641Skmacy * by pmap_remove_pte(). 2440181641Skmacy */ 2441181641Skmacy if ((*pte & PG_G) == 0) 2442181641Skmacy anyvalid = 1; 2443181641Skmacy if (pmap_remove_pte(pmap, pte, sva, &free)) 2444181641Skmacy break; 2445181641Skmacy } 2446181641Skmacy } 2447181641Skmacy PT_UPDATES_FLUSH(); 2448181641Skmacy if (*PMAP1) 2449181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2450181641Skmacyout: 2451181641Skmacy if (anyvalid) 2452181641Skmacy pmap_invalidate_all(pmap); 2453181641Skmacy sched_unpin(); 2454241498Salc rw_wunlock(&pvh_global_lock); 2455181641Skmacy PMAP_UNLOCK(pmap); 2456181641Skmacy pmap_free_zero_pages(free); 2457181641Skmacy} 2458181641Skmacy 2459181641Skmacy/* 2460181641Skmacy * Routine: pmap_remove_all 2461181641Skmacy * Function: 2462181641Skmacy * Removes this physical page from 2463181641Skmacy * all physical maps in which it resides. 2464181641Skmacy * Reflects back modify bits to the pager. 2465181641Skmacy * 2466181641Skmacy * Notes: 2467181641Skmacy * Original versions of this routine were very 2468181641Skmacy * inefficient because they iteratively called 2469181641Skmacy * pmap_remove (slow...) 2470181641Skmacy */ 2471181641Skmacy 2472181641Skmacyvoid 2473181641Skmacypmap_remove_all(vm_page_t m) 2474181641Skmacy{ 2475181641Skmacy pv_entry_t pv; 2476181641Skmacy pmap_t pmap; 2477181641Skmacy pt_entry_t *pte, tpte; 2478181641Skmacy vm_page_t free; 2479181641Skmacy 2480224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2481223677Salc ("pmap_remove_all: page %p is not managed", m)); 2482208651Salc free = NULL; 2483241498Salc rw_wlock(&pvh_global_lock); 2484181641Skmacy sched_pin(); 2485181641Skmacy while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2486181641Skmacy pmap = PV_PMAP(pv); 2487181641Skmacy PMAP_LOCK(pmap); 2488181641Skmacy pmap->pm_stats.resident_count--; 2489181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 2490181641Skmacy tpte = *pte; 2491181641Skmacy PT_SET_VA_MA(pte, 0, TRUE); 2492241400Salc KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", 2493241400Salc pmap, pv->pv_va)); 2494181641Skmacy if (tpte & PG_W) 2495181641Skmacy pmap->pm_stats.wired_count--; 2496181641Skmacy if (tpte & PG_A) 2497225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 2498181641Skmacy 2499181641Skmacy /* 2500181641Skmacy * Update the vm_page_t clean and reference bits. 2501181641Skmacy */ 2502208651Salc if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2503181641Skmacy vm_page_dirty(m); 2504181641Skmacy pmap_unuse_pt(pmap, pv->pv_va, &free); 2505181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 2506181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2507181641Skmacy free_pv_entry(pmap, pv); 2508181641Skmacy PMAP_UNLOCK(pmap); 2509181641Skmacy } 2510225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2511181641Skmacy PT_UPDATES_FLUSH(); 2512181641Skmacy if (*PMAP1) 2513181641Skmacy PT_SET_MA(PADDR1, 0); 2514181641Skmacy sched_unpin(); 2515241498Salc rw_wunlock(&pvh_global_lock); 2516208651Salc pmap_free_zero_pages(free); 2517181641Skmacy} 2518181641Skmacy 2519181641Skmacy/* 2520181641Skmacy * Set the physical protection on the 2521181641Skmacy * specified range of this map as requested. 2522181641Skmacy */ 2523181641Skmacyvoid 2524181641Skmacypmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2525181641Skmacy{ 2526181641Skmacy vm_offset_t pdnxt; 2527181641Skmacy pd_entry_t ptpaddr; 2528181641Skmacy pt_entry_t *pte; 2529181641Skmacy int anychanged; 2530181641Skmacy 2531181641Skmacy CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2532181641Skmacy pmap, sva, eva, prot); 2533181641Skmacy 2534181641Skmacy if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2535181641Skmacy pmap_remove(pmap, sva, eva); 2536181641Skmacy return; 2537181641Skmacy } 2538181641Skmacy 2539181641Skmacy#ifdef PAE 2540181641Skmacy if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2541181641Skmacy (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2542181641Skmacy return; 2543181641Skmacy#else 2544181641Skmacy if (prot & VM_PROT_WRITE) 2545181641Skmacy return; 2546181641Skmacy#endif 2547181641Skmacy 2548181641Skmacy anychanged = 0; 2549181641Skmacy 2550241498Salc rw_wlock(&pvh_global_lock); 2551181641Skmacy sched_pin(); 2552181641Skmacy PMAP_LOCK(pmap); 2553181641Skmacy for (; sva < eva; sva = pdnxt) { 2554181641Skmacy pt_entry_t obits, pbits; 2555228923Salc u_int pdirindex; 2556181641Skmacy 2557181641Skmacy pdnxt = (sva + NBPDR) & ~PDRMASK; 2558229007Salc if (pdnxt < sva) 2559229007Salc pdnxt = eva; 2560181641Skmacy 2561181641Skmacy pdirindex = sva >> PDRSHIFT; 2562181641Skmacy ptpaddr = pmap->pm_pdir[pdirindex]; 2563181641Skmacy 2564181641Skmacy /* 2565181641Skmacy * Weed out invalid mappings. Note: we assume that the page 2566181641Skmacy * directory table is always allocated, and in kernel virtual. 2567181641Skmacy */ 2568181641Skmacy if (ptpaddr == 0) 2569181641Skmacy continue; 2570181641Skmacy 2571181641Skmacy /* 2572181641Skmacy * Check for large page. 2573181641Skmacy */ 2574181641Skmacy if ((ptpaddr & PG_PS) != 0) { 2575181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2576181641Skmacy pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2577181641Skmacy#ifdef PAE 2578181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2579181641Skmacy pmap->pm_pdir[pdirindex] |= pg_nx; 2580181641Skmacy#endif 2581181641Skmacy anychanged = 1; 2582181641Skmacy continue; 2583181641Skmacy } 2584181641Skmacy 2585181641Skmacy if (pdnxt > eva) 2586181641Skmacy pdnxt = eva; 2587181641Skmacy 2588181641Skmacy for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2589181641Skmacy sva += PAGE_SIZE) { 2590181641Skmacy vm_page_t m; 2591181641Skmacy 2592181641Skmacyretry: 2593181641Skmacy /* 2594181641Skmacy * Regardless of whether a pte is 32 or 64 bits in 2595181641Skmacy * size, PG_RW, PG_A, and PG_M are among the least 2596181641Skmacy * significant 32 bits. 2597181641Skmacy */ 2598181641Skmacy obits = pbits = *pte; 2599181641Skmacy if ((pbits & PG_V) == 0) 2600181641Skmacy continue; 2601207262Salc 2602207262Salc if ((prot & VM_PROT_WRITE) == 0) { 2603207262Salc if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2604207262Salc (PG_MANAGED | PG_M | PG_RW)) { 2605207262Salc m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2606207262Salc PG_FRAME); 2607181641Skmacy vm_page_dirty(m); 2608181641Skmacy } 2609207262Salc pbits &= ~(PG_RW | PG_M); 2610181641Skmacy } 2611181641Skmacy#ifdef PAE 2612181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2613181641Skmacy pbits |= pg_nx; 2614181641Skmacy#endif 2615181641Skmacy 2616181641Skmacy if (pbits != obits) { 2617181641Skmacy obits = *pte; 2618181641Skmacy PT_SET_VA_MA(pte, pbits, TRUE); 2619181641Skmacy if (*pte != pbits) 2620181641Skmacy goto retry; 2621181641Skmacy if (obits & PG_G) 2622181641Skmacy pmap_invalidate_page(pmap, sva); 2623181641Skmacy else 2624181641Skmacy anychanged = 1; 2625181641Skmacy } 2626181641Skmacy } 2627181641Skmacy } 2628181641Skmacy PT_UPDATES_FLUSH(); 2629181641Skmacy if (*PMAP1) 2630181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2631181641Skmacy if (anychanged) 2632181641Skmacy pmap_invalidate_all(pmap); 2633181641Skmacy sched_unpin(); 2634241498Salc rw_wunlock(&pvh_global_lock); 2635181641Skmacy PMAP_UNLOCK(pmap); 2636181641Skmacy} 2637181641Skmacy 2638181641Skmacy/* 2639181641Skmacy * Insert the given physical page (p) at 2640181641Skmacy * the specified virtual address (v) in the 2641181641Skmacy * target physical map with the protection requested. 2642181641Skmacy * 2643181641Skmacy * If specified, the page will be wired down, meaning 2644181641Skmacy * that the related pte can not be reclaimed. 2645181641Skmacy * 2646181641Skmacy * NB: This is the only routine which MAY NOT lazy-evaluate 2647181641Skmacy * or lose information. That is, this routine must actually 2648181641Skmacy * insert this page into the given map NOW. 2649181641Skmacy */ 2650181641Skmacyvoid 2651181641Skmacypmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2652181641Skmacy vm_prot_t prot, boolean_t wired) 2653181641Skmacy{ 2654181641Skmacy pd_entry_t *pde; 2655181641Skmacy pt_entry_t *pte; 2656208651Salc pt_entry_t newpte, origpte; 2657208651Salc pv_entry_t pv; 2658208651Salc vm_paddr_t opa, pa; 2659181641Skmacy vm_page_t mpte, om; 2660181641Skmacy boolean_t invlva; 2661181641Skmacy 2662181641Skmacy CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2663215587Scperciva pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); 2664181641Skmacy va = trunc_page(va); 2665208651Salc KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2666208651Salc KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2667208175Salc ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2668208175Salc va)); 2669228923Salc KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 2670228923Salc VM_OBJECT_LOCKED(m->object), 2671208175Salc ("pmap_enter: page %p is not busy", m)); 2672181641Skmacy 2673181641Skmacy mpte = NULL; 2674181641Skmacy 2675241498Salc rw_wlock(&pvh_global_lock); 2676181641Skmacy PMAP_LOCK(pmap); 2677181641Skmacy sched_pin(); 2678181641Skmacy 2679181641Skmacy /* 2680181641Skmacy * In the case that a page table page is not 2681181641Skmacy * resident, we are creating it here. 2682181641Skmacy */ 2683181641Skmacy if (va < VM_MAXUSER_ADDRESS) { 2684181641Skmacy mpte = pmap_allocpte(pmap, va, M_WAITOK); 2685181641Skmacy } 2686181641Skmacy 2687181641Skmacy pde = pmap_pde(pmap, va); 2688181641Skmacy if ((*pde & PG_PS) != 0) 2689181641Skmacy panic("pmap_enter: attempted pmap_enter on 4MB page"); 2690181641Skmacy pte = pmap_pte_quick(pmap, va); 2691181641Skmacy 2692181641Skmacy /* 2693181641Skmacy * Page Directory table entry not valid, we need a new PT page 2694181641Skmacy */ 2695181641Skmacy if (pte == NULL) { 2696208651Salc panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2697181641Skmacy (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2698181641Skmacy } 2699181641Skmacy 2700181641Skmacy pa = VM_PAGE_TO_PHYS(m); 2701181641Skmacy om = NULL; 2702181641Skmacy opa = origpte = 0; 2703181641Skmacy 2704181641Skmacy#if 0 2705181641Skmacy KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2706181641Skmacy pte, *pte)); 2707181641Skmacy#endif 2708181641Skmacy origpte = *pte; 2709181641Skmacy if (origpte) 2710181641Skmacy origpte = xpmap_mtop(origpte); 2711181641Skmacy opa = origpte & PG_FRAME; 2712181641Skmacy 2713181641Skmacy /* 2714181641Skmacy * Mapping has not changed, must be protection or wiring change. 2715181641Skmacy */ 2716181641Skmacy if (origpte && (opa == pa)) { 2717181641Skmacy /* 2718181641Skmacy * Wiring change, just update stats. We don't worry about 2719181641Skmacy * wiring PT pages as they remain resident as long as there 2720181641Skmacy * are valid mappings in them. Hence, if a user page is wired, 2721181641Skmacy * the PT page will be also. 2722181641Skmacy */ 2723181641Skmacy if (wired && ((origpte & PG_W) == 0)) 2724181641Skmacy pmap->pm_stats.wired_count++; 2725181641Skmacy else if (!wired && (origpte & PG_W)) 2726181641Skmacy pmap->pm_stats.wired_count--; 2727181641Skmacy 2728181641Skmacy /* 2729181641Skmacy * Remove extra pte reference 2730181641Skmacy */ 2731181641Skmacy if (mpte) 2732181641Skmacy mpte->wire_count--; 2733181641Skmacy 2734181641Skmacy if (origpte & PG_MANAGED) { 2735181641Skmacy om = m; 2736181641Skmacy pa |= PG_MANAGED; 2737181641Skmacy } 2738181641Skmacy goto validate; 2739181641Skmacy } 2740208651Salc 2741208651Salc pv = NULL; 2742208651Salc 2743181641Skmacy /* 2744181641Skmacy * Mapping has changed, invalidate old range and fall through to 2745181641Skmacy * handle validating new mapping. 2746181641Skmacy */ 2747181641Skmacy if (opa) { 2748181641Skmacy if (origpte & PG_W) 2749181641Skmacy pmap->pm_stats.wired_count--; 2750181641Skmacy if (origpte & PG_MANAGED) { 2751181641Skmacy om = PHYS_TO_VM_PAGE(opa); 2752208651Salc pv = pmap_pvh_remove(&om->md, pmap, va); 2753181641Skmacy } else if (va < VM_MAXUSER_ADDRESS) 2754181641Skmacy printf("va=0x%x is unmanaged :-( \n", va); 2755181641Skmacy 2756181641Skmacy if (mpte != NULL) { 2757181641Skmacy mpte->wire_count--; 2758181641Skmacy KASSERT(mpte->wire_count > 0, 2759181641Skmacy ("pmap_enter: missing reference to page table page," 2760181641Skmacy " va: 0x%x", va)); 2761181641Skmacy } 2762181641Skmacy } else 2763181641Skmacy pmap->pm_stats.resident_count++; 2764181641Skmacy 2765181641Skmacy /* 2766181641Skmacy * Enter on the PV list if part of our managed memory. 2767181641Skmacy */ 2768224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) { 2769181641Skmacy KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2770181641Skmacy ("pmap_enter: managed mapping within the clean submap")); 2771208651Salc if (pv == NULL) 2772208651Salc pv = get_pv_entry(pmap, FALSE); 2773208651Salc pv->pv_va = va; 2774208651Salc TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2775181641Skmacy pa |= PG_MANAGED; 2776208651Salc } else if (pv != NULL) 2777208651Salc free_pv_entry(pmap, pv); 2778181641Skmacy 2779181641Skmacy /* 2780181641Skmacy * Increment counters 2781181641Skmacy */ 2782181641Skmacy if (wired) 2783181641Skmacy pmap->pm_stats.wired_count++; 2784181641Skmacy 2785181641Skmacyvalidate: 2786181641Skmacy /* 2787181641Skmacy * Now validate mapping with desired protection/wiring. 2788181641Skmacy */ 2789181641Skmacy newpte = (pt_entry_t)(pa | PG_V); 2790181641Skmacy if ((prot & VM_PROT_WRITE) != 0) { 2791181641Skmacy newpte |= PG_RW; 2792208651Salc if ((newpte & PG_MANAGED) != 0) 2793225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 2794181641Skmacy } 2795181641Skmacy#ifdef PAE 2796181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2797181641Skmacy newpte |= pg_nx; 2798181641Skmacy#endif 2799181641Skmacy if (wired) 2800181641Skmacy newpte |= PG_W; 2801181641Skmacy if (va < VM_MAXUSER_ADDRESS) 2802181641Skmacy newpte |= PG_U; 2803181641Skmacy if (pmap == kernel_pmap) 2804181641Skmacy newpte |= pgeflag; 2805181641Skmacy 2806181641Skmacy critical_enter(); 2807181641Skmacy /* 2808181641Skmacy * if the mapping or permission bits are different, we need 2809181641Skmacy * to update the pte. 2810181641Skmacy */ 2811181641Skmacy if ((origpte & ~(PG_M|PG_A)) != newpte) { 2812181641Skmacy if (origpte) { 2813181641Skmacy invlva = FALSE; 2814181641Skmacy origpte = *pte; 2815181641Skmacy PT_SET_VA(pte, newpte | PG_A, FALSE); 2816181641Skmacy if (origpte & PG_A) { 2817181641Skmacy if (origpte & PG_MANAGED) 2818225418Skib vm_page_aflag_set(om, PGA_REFERENCED); 2819181641Skmacy if (opa != VM_PAGE_TO_PHYS(m)) 2820181641Skmacy invlva = TRUE; 2821181641Skmacy#ifdef PAE 2822181641Skmacy if ((origpte & PG_NX) == 0 && 2823181641Skmacy (newpte & PG_NX) != 0) 2824181641Skmacy invlva = TRUE; 2825181641Skmacy#endif 2826181641Skmacy } 2827208651Salc if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2828181641Skmacy if ((origpte & PG_MANAGED) != 0) 2829181641Skmacy vm_page_dirty(om); 2830181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2831181641Skmacy invlva = TRUE; 2832181641Skmacy } 2833208651Salc if ((origpte & PG_MANAGED) != 0 && 2834208651Salc TAILQ_EMPTY(&om->md.pv_list)) 2835225418Skib vm_page_aflag_clear(om, PGA_WRITEABLE); 2836181641Skmacy if (invlva) 2837181641Skmacy pmap_invalidate_page(pmap, va); 2838181641Skmacy } else{ 2839181641Skmacy PT_SET_VA(pte, newpte | PG_A, FALSE); 2840181641Skmacy } 2841181641Skmacy 2842181641Skmacy } 2843181641Skmacy PT_UPDATES_FLUSH(); 2844181641Skmacy critical_exit(); 2845181641Skmacy if (*PMAP1) 2846181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2847181641Skmacy sched_unpin(); 2848241498Salc rw_wunlock(&pvh_global_lock); 2849181641Skmacy PMAP_UNLOCK(pmap); 2850181641Skmacy} 2851181641Skmacy 2852181641Skmacy/* 2853181641Skmacy * Maps a sequence of resident pages belonging to the same object. 2854181641Skmacy * The sequence begins with the given page m_start. This page is 2855181641Skmacy * mapped at the given virtual address start. Each subsequent page is 2856181641Skmacy * mapped at a virtual address that is offset from start by the same 2857181641Skmacy * amount as the page is offset from m_start within the object. The 2858181641Skmacy * last page in the sequence is the page with the largest offset from 2859181641Skmacy * m_start that can be mapped at a virtual address less than the given 2860181641Skmacy * virtual address end. Not every virtual page between start and end 2861181641Skmacy * is mapped; only those for which a resident page exists with the 2862181641Skmacy * corresponding offset from m_start are mapped. 2863181641Skmacy */ 2864181641Skmacyvoid 2865181641Skmacypmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2866181641Skmacy vm_page_t m_start, vm_prot_t prot) 2867181641Skmacy{ 2868181641Skmacy vm_page_t m, mpte; 2869181641Skmacy vm_pindex_t diff, psize; 2870181641Skmacy multicall_entry_t mcl[16]; 2871181641Skmacy multicall_entry_t *mclp = mcl; 2872181641Skmacy int error, count = 0; 2873228923Salc 2874181641Skmacy VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2875181641Skmacy psize = atop(end - start); 2876181641Skmacy mpte = NULL; 2877181641Skmacy m = m_start; 2878241498Salc rw_wlock(&pvh_global_lock); 2879181641Skmacy PMAP_LOCK(pmap); 2880181641Skmacy while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2881181641Skmacy mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2882181641Skmacy prot, mpte); 2883181641Skmacy m = TAILQ_NEXT(m, listq); 2884181641Skmacy if (count == 16) { 2885181641Skmacy error = HYPERVISOR_multicall(mcl, count); 2886181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2887181641Skmacy mclp = mcl; 2888181641Skmacy count = 0; 2889181641Skmacy } 2890181641Skmacy } 2891181641Skmacy if (count) { 2892181641Skmacy error = HYPERVISOR_multicall(mcl, count); 2893181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2894181641Skmacy } 2895241498Salc rw_wunlock(&pvh_global_lock); 2896181641Skmacy PMAP_UNLOCK(pmap); 2897181641Skmacy} 2898181641Skmacy 2899181641Skmacy/* 2900181641Skmacy * this code makes some *MAJOR* assumptions: 2901181641Skmacy * 1. Current pmap & pmap exists. 2902181641Skmacy * 2. Not wired. 2903181641Skmacy * 3. Read access. 2904181641Skmacy * 4. No page table pages. 2905181641Skmacy * but is *MUCH* faster than pmap_enter... 2906181641Skmacy */ 2907181641Skmacy 2908181641Skmacyvoid 2909181641Skmacypmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2910181641Skmacy{ 2911181641Skmacy multicall_entry_t mcl, *mclp; 2912181641Skmacy int count = 0; 2913181641Skmacy mclp = &mcl; 2914228923Salc 2915181641Skmacy CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2916181641Skmacy pmap, va, m, prot); 2917181641Skmacy 2918241498Salc rw_wlock(&pvh_global_lock); 2919181641Skmacy PMAP_LOCK(pmap); 2920207796Salc (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2921181641Skmacy if (count) 2922181641Skmacy HYPERVISOR_multicall(&mcl, count); 2923241498Salc rw_wunlock(&pvh_global_lock); 2924181641Skmacy PMAP_UNLOCK(pmap); 2925181641Skmacy} 2926181641Skmacy 2927181747Skmacy#ifdef notyet 2928181641Skmacyvoid 2929181641Skmacypmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2930181641Skmacy{ 2931181641Skmacy int i, error, index = 0; 2932181641Skmacy multicall_entry_t mcl[16]; 2933181641Skmacy multicall_entry_t *mclp = mcl; 2934181641Skmacy 2935181641Skmacy PMAP_LOCK(pmap); 2936181641Skmacy for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2937181641Skmacy if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2938181641Skmacy continue; 2939181641Skmacy 2940181641Skmacy (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2941181641Skmacy if (index == 16) { 2942181641Skmacy error = HYPERVISOR_multicall(mcl, index); 2943181641Skmacy mclp = mcl; 2944181641Skmacy index = 0; 2945181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2946181641Skmacy } 2947181641Skmacy } 2948181641Skmacy if (index) { 2949181641Skmacy error = HYPERVISOR_multicall(mcl, index); 2950181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2951181641Skmacy } 2952181641Skmacy 2953181641Skmacy PMAP_UNLOCK(pmap); 2954181641Skmacy} 2955181747Skmacy#endif 2956181641Skmacy 2957181641Skmacystatic vm_page_t 2958181641Skmacypmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2959181641Skmacy vm_prot_t prot, vm_page_t mpte) 2960181641Skmacy{ 2961181641Skmacy pt_entry_t *pte; 2962181641Skmacy vm_paddr_t pa; 2963181641Skmacy vm_page_t free; 2964181641Skmacy multicall_entry_t *mcl = *mclpp; 2965228923Salc 2966181641Skmacy KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2967224746Skib (m->oflags & VPO_UNMANAGED) != 0, 2968181641Skmacy ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2969241498Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2970181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2971181641Skmacy 2972181641Skmacy /* 2973181641Skmacy * In the case that a page table page is not 2974181641Skmacy * resident, we are creating it here. 2975181641Skmacy */ 2976181641Skmacy if (va < VM_MAXUSER_ADDRESS) { 2977228923Salc u_int ptepindex; 2978181641Skmacy pd_entry_t ptema; 2979181641Skmacy 2980181641Skmacy /* 2981181641Skmacy * Calculate pagetable page index 2982181641Skmacy */ 2983181641Skmacy ptepindex = va >> PDRSHIFT; 2984181641Skmacy if (mpte && (mpte->pindex == ptepindex)) { 2985181641Skmacy mpte->wire_count++; 2986181641Skmacy } else { 2987181641Skmacy /* 2988181641Skmacy * Get the page directory entry 2989181641Skmacy */ 2990181641Skmacy ptema = pmap->pm_pdir[ptepindex]; 2991181641Skmacy 2992181641Skmacy /* 2993181641Skmacy * If the page table page is mapped, we just increment 2994181641Skmacy * the hold count, and activate it. 2995181641Skmacy */ 2996181641Skmacy if (ptema & PG_V) { 2997181641Skmacy if (ptema & PG_PS) 2998181641Skmacy panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2999181641Skmacy mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 3000181641Skmacy mpte->wire_count++; 3001181641Skmacy } else { 3002181641Skmacy mpte = _pmap_allocpte(pmap, ptepindex, 3003181641Skmacy M_NOWAIT); 3004181641Skmacy if (mpte == NULL) 3005181641Skmacy return (mpte); 3006181641Skmacy } 3007181641Skmacy } 3008181641Skmacy } else { 3009181641Skmacy mpte = NULL; 3010181641Skmacy } 3011181641Skmacy 3012181641Skmacy /* 3013181641Skmacy * This call to vtopte makes the assumption that we are 3014181641Skmacy * entering the page into the current pmap. In order to support 3015181641Skmacy * quick entry into any pmap, one would likely use pmap_pte_quick. 3016181641Skmacy * But that isn't as quick as vtopte. 3017181641Skmacy */ 3018181641Skmacy KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 3019181641Skmacy pte = vtopte(va); 3020181641Skmacy if (*pte & PG_V) { 3021181641Skmacy if (mpte != NULL) { 3022181641Skmacy mpte->wire_count--; 3023181641Skmacy mpte = NULL; 3024181641Skmacy } 3025181641Skmacy return (mpte); 3026181641Skmacy } 3027181641Skmacy 3028181641Skmacy /* 3029181641Skmacy * Enter on the PV list if part of our managed memory. 3030181641Skmacy */ 3031224746Skib if ((m->oflags & VPO_UNMANAGED) == 0 && 3032181641Skmacy !pmap_try_insert_pv_entry(pmap, va, m)) { 3033181641Skmacy if (mpte != NULL) { 3034181641Skmacy free = NULL; 3035240126Salc if (pmap_unwire_ptp(pmap, mpte, &free)) { 3036181641Skmacy pmap_invalidate_page(pmap, va); 3037181641Skmacy pmap_free_zero_pages(free); 3038181641Skmacy } 3039181641Skmacy 3040181641Skmacy mpte = NULL; 3041181641Skmacy } 3042181641Skmacy return (mpte); 3043181641Skmacy } 3044181641Skmacy 3045181641Skmacy /* 3046181641Skmacy * Increment counters 3047181641Skmacy */ 3048181641Skmacy pmap->pm_stats.resident_count++; 3049181641Skmacy 3050181641Skmacy pa = VM_PAGE_TO_PHYS(m); 3051181641Skmacy#ifdef PAE 3052181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 3053181641Skmacy pa |= pg_nx; 3054181641Skmacy#endif 3055181641Skmacy 3056181641Skmacy#if 0 3057181641Skmacy /* 3058181641Skmacy * Now validate mapping with RO protection 3059181641Skmacy */ 3060224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3061181641Skmacy pte_store(pte, pa | PG_V | PG_U); 3062181641Skmacy else 3063181641Skmacy pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3064181641Skmacy#else 3065181641Skmacy /* 3066181641Skmacy * Now validate mapping with RO protection 3067181641Skmacy */ 3068224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3069181641Skmacy pa = xpmap_ptom(pa | PG_V | PG_U); 3070181641Skmacy else 3071181641Skmacy pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3072181641Skmacy 3073181641Skmacy mcl->op = __HYPERVISOR_update_va_mapping; 3074181641Skmacy mcl->args[0] = va; 3075181641Skmacy mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3076181641Skmacy mcl->args[2] = (uint32_t)(pa >> 32); 3077181641Skmacy mcl->args[3] = 0; 3078181641Skmacy *mclpp = mcl + 1; 3079181641Skmacy *count = *count + 1; 3080181641Skmacy#endif 3081228923Salc return (mpte); 3082181641Skmacy} 3083181641Skmacy 3084181641Skmacy/* 3085181641Skmacy * Make a temporary mapping for a physical address. This is only intended 3086181641Skmacy * to be used for panic dumps. 3087181641Skmacy */ 3088181641Skmacyvoid * 3089181641Skmacypmap_kenter_temporary(vm_paddr_t pa, int i) 3090181641Skmacy{ 3091181641Skmacy vm_offset_t va; 3092200346Skmacy vm_paddr_t ma = xpmap_ptom(pa); 3093181641Skmacy 3094181641Skmacy va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3095200346Skmacy PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3096181641Skmacy invlpg(va); 3097181641Skmacy return ((void *)crashdumpmap); 3098181641Skmacy} 3099181641Skmacy 3100181641Skmacy/* 3101181641Skmacy * This code maps large physical mmap regions into the 3102181641Skmacy * processor address space. Note that some shortcuts 3103181641Skmacy * are taken, but the code works. 3104181641Skmacy */ 3105181641Skmacyvoid 3106228923Salcpmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3107228923Salc vm_pindex_t pindex, vm_size_t size) 3108181641Skmacy{ 3109207419Skmacy pd_entry_t *pde; 3110207419Skmacy vm_paddr_t pa, ptepa; 3111181641Skmacy vm_page_t p; 3112207419Skmacy int pat_mode; 3113181641Skmacy 3114181641Skmacy VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3115195840Sjhb KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3116181641Skmacy ("pmap_object_init_pt: non-device object")); 3117181641Skmacy if (pseflag && 3118207419Skmacy (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3119207419Skmacy if (!vm_object_populate(object, pindex, pindex + atop(size))) 3120207419Skmacy return; 3121181641Skmacy p = vm_page_lookup(object, pindex); 3122207419Skmacy KASSERT(p->valid == VM_PAGE_BITS_ALL, 3123207419Skmacy ("pmap_object_init_pt: invalid page %p", p)); 3124207419Skmacy pat_mode = p->md.pat_mode; 3125228923Salc 3126207419Skmacy /* 3127207419Skmacy * Abort the mapping if the first page is not physically 3128207419Skmacy * aligned to a 2/4MB page boundary. 3129207419Skmacy */ 3130181641Skmacy ptepa = VM_PAGE_TO_PHYS(p); 3131181641Skmacy if (ptepa & (NBPDR - 1)) 3132181641Skmacy return; 3133228923Salc 3134207419Skmacy /* 3135207419Skmacy * Skip the first page. Abort the mapping if the rest of 3136207419Skmacy * the pages are not physically contiguous or have differing 3137207419Skmacy * memory attributes. 3138207419Skmacy */ 3139207419Skmacy p = TAILQ_NEXT(p, listq); 3140207419Skmacy for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3141207419Skmacy pa += PAGE_SIZE) { 3142207419Skmacy KASSERT(p->valid == VM_PAGE_BITS_ALL, 3143207419Skmacy ("pmap_object_init_pt: invalid page %p", p)); 3144207419Skmacy if (pa != VM_PAGE_TO_PHYS(p) || 3145207419Skmacy pat_mode != p->md.pat_mode) 3146207419Skmacy return; 3147207419Skmacy p = TAILQ_NEXT(p, listq); 3148207419Skmacy } 3149228923Salc 3150228923Salc /* 3151228923Salc * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and 3152228923Salc * "size" is a multiple of 2/4M, adding the PAT setting to 3153228923Salc * "pa" will not affect the termination of this loop. 3154228923Salc */ 3155181641Skmacy PMAP_LOCK(pmap); 3156207419Skmacy for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3157207419Skmacy size; pa += NBPDR) { 3158207419Skmacy pde = pmap_pde(pmap, addr); 3159207419Skmacy if (*pde == 0) { 3160207419Skmacy pde_store(pde, pa | PG_PS | PG_M | PG_A | 3161207419Skmacy PG_U | PG_RW | PG_V); 3162207419Skmacy pmap->pm_stats.resident_count += NBPDR / 3163207419Skmacy PAGE_SIZE; 3164207419Skmacy pmap_pde_mappings++; 3165207419Skmacy } 3166207419Skmacy /* Else continue on if the PDE is already valid. */ 3167207419Skmacy addr += NBPDR; 3168181641Skmacy } 3169181641Skmacy PMAP_UNLOCK(pmap); 3170181641Skmacy } 3171181641Skmacy} 3172181641Skmacy 3173181641Skmacy/* 3174181641Skmacy * Routine: pmap_change_wiring 3175181641Skmacy * Function: Change the wiring attribute for a map/virtual-address 3176181641Skmacy * pair. 3177181641Skmacy * In/out conditions: 3178181641Skmacy * The mapping must already exist in the pmap. 3179181641Skmacy */ 3180181641Skmacyvoid 3181181641Skmacypmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3182181641Skmacy{ 3183181641Skmacy pt_entry_t *pte; 3184181641Skmacy 3185241498Salc rw_wlock(&pvh_global_lock); 3186181641Skmacy PMAP_LOCK(pmap); 3187181641Skmacy pte = pmap_pte(pmap, va); 3188181641Skmacy 3189181641Skmacy if (wired && !pmap_pte_w(pte)) { 3190181641Skmacy PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3191181641Skmacy pmap->pm_stats.wired_count++; 3192181641Skmacy } else if (!wired && pmap_pte_w(pte)) { 3193181641Skmacy PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3194181641Skmacy pmap->pm_stats.wired_count--; 3195181641Skmacy } 3196181641Skmacy 3197181641Skmacy /* 3198181641Skmacy * Wiring is not a hardware characteristic so there is no need to 3199181641Skmacy * invalidate TLB. 3200181641Skmacy */ 3201181641Skmacy pmap_pte_release(pte); 3202181641Skmacy PMAP_UNLOCK(pmap); 3203241498Salc rw_wunlock(&pvh_global_lock); 3204181641Skmacy} 3205181641Skmacy 3206181641Skmacy 3207181641Skmacy 3208181641Skmacy/* 3209181641Skmacy * Copy the range specified by src_addr/len 3210181641Skmacy * from the source map to the range dst_addr/len 3211181641Skmacy * in the destination map. 3212181641Skmacy * 3213181641Skmacy * This routine is only advisory and need not do anything. 3214181641Skmacy */ 3215181641Skmacy 3216181641Skmacyvoid 3217181641Skmacypmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3218228923Salc vm_offset_t src_addr) 3219181641Skmacy{ 3220181641Skmacy vm_page_t free; 3221181641Skmacy vm_offset_t addr; 3222181641Skmacy vm_offset_t end_addr = src_addr + len; 3223181641Skmacy vm_offset_t pdnxt; 3224181641Skmacy 3225181641Skmacy if (dst_addr != src_addr) 3226181641Skmacy return; 3227181641Skmacy 3228181641Skmacy if (!pmap_is_current(src_pmap)) { 3229181641Skmacy CTR2(KTR_PMAP, 3230181641Skmacy "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3231181641Skmacy (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3232181641Skmacy 3233181641Skmacy return; 3234181641Skmacy } 3235181641Skmacy CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3236181641Skmacy dst_pmap, src_pmap, dst_addr, len, src_addr); 3237181641Skmacy 3238216960Scperciva#ifdef HAMFISTED_LOCKING 3239216960Scperciva mtx_lock(&createdelete_lock); 3240216960Scperciva#endif 3241216960Scperciva 3242241498Salc rw_wlock(&pvh_global_lock); 3243181641Skmacy if (dst_pmap < src_pmap) { 3244181641Skmacy PMAP_LOCK(dst_pmap); 3245181641Skmacy PMAP_LOCK(src_pmap); 3246181641Skmacy } else { 3247181641Skmacy PMAP_LOCK(src_pmap); 3248181641Skmacy PMAP_LOCK(dst_pmap); 3249181641Skmacy } 3250181641Skmacy sched_pin(); 3251181641Skmacy for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3252181641Skmacy pt_entry_t *src_pte, *dst_pte; 3253181641Skmacy vm_page_t dstmpte, srcmpte; 3254181641Skmacy pd_entry_t srcptepaddr; 3255228923Salc u_int ptepindex; 3256181641Skmacy 3257208651Salc KASSERT(addr < UPT_MIN_ADDRESS, 3258208651Salc ("pmap_copy: invalid to pmap_copy page tables")); 3259181641Skmacy 3260181641Skmacy pdnxt = (addr + NBPDR) & ~PDRMASK; 3261229007Salc if (pdnxt < addr) 3262229007Salc pdnxt = end_addr; 3263181641Skmacy ptepindex = addr >> PDRSHIFT; 3264181641Skmacy 3265181641Skmacy srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3266181641Skmacy if (srcptepaddr == 0) 3267181641Skmacy continue; 3268181641Skmacy 3269181641Skmacy if (srcptepaddr & PG_PS) { 3270181641Skmacy if (dst_pmap->pm_pdir[ptepindex] == 0) { 3271181641Skmacy PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3272181641Skmacy dst_pmap->pm_stats.resident_count += 3273181641Skmacy NBPDR / PAGE_SIZE; 3274181641Skmacy } 3275181641Skmacy continue; 3276181641Skmacy } 3277181641Skmacy 3278181641Skmacy srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3279208651Salc KASSERT(srcmpte->wire_count > 0, 3280208651Salc ("pmap_copy: source page table page is unused")); 3281181641Skmacy 3282181641Skmacy if (pdnxt > end_addr) 3283181641Skmacy pdnxt = end_addr; 3284181641Skmacy 3285181641Skmacy src_pte = vtopte(addr); 3286181641Skmacy while (addr < pdnxt) { 3287181641Skmacy pt_entry_t ptetemp; 3288181641Skmacy ptetemp = *src_pte; 3289181641Skmacy /* 3290181641Skmacy * we only virtual copy managed pages 3291181641Skmacy */ 3292181641Skmacy if ((ptetemp & PG_MANAGED) != 0) { 3293181641Skmacy dstmpte = pmap_allocpte(dst_pmap, addr, 3294181641Skmacy M_NOWAIT); 3295181641Skmacy if (dstmpte == NULL) 3296228923Salc goto out; 3297181641Skmacy dst_pte = pmap_pte_quick(dst_pmap, addr); 3298181641Skmacy if (*dst_pte == 0 && 3299181641Skmacy pmap_try_insert_pv_entry(dst_pmap, addr, 3300181641Skmacy PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3301181641Skmacy /* 3302181641Skmacy * Clear the wired, modified, and 3303181641Skmacy * accessed (referenced) bits 3304181641Skmacy * during the copy. 3305181641Skmacy */ 3306181641Skmacy KASSERT(ptetemp != 0, ("src_pte not set")); 3307181641Skmacy PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3308181641Skmacy KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3309181641Skmacy ("no pmap copy expected: 0x%jx saw: 0x%jx", 3310181641Skmacy ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3311181641Skmacy dst_pmap->pm_stats.resident_count++; 3312181641Skmacy } else { 3313181641Skmacy free = NULL; 3314240126Salc if (pmap_unwire_ptp(dst_pmap, dstmpte, 3315240126Salc &free)) { 3316181641Skmacy pmap_invalidate_page(dst_pmap, 3317181641Skmacy addr); 3318181641Skmacy pmap_free_zero_pages(free); 3319181641Skmacy } 3320228923Salc goto out; 3321181641Skmacy } 3322181641Skmacy if (dstmpte->wire_count >= srcmpte->wire_count) 3323181641Skmacy break; 3324181641Skmacy } 3325181641Skmacy addr += PAGE_SIZE; 3326181641Skmacy src_pte++; 3327181641Skmacy } 3328181641Skmacy } 3329228923Salcout: 3330181641Skmacy PT_UPDATES_FLUSH(); 3331181641Skmacy sched_unpin(); 3332241498Salc rw_wunlock(&pvh_global_lock); 3333181641Skmacy PMAP_UNLOCK(src_pmap); 3334181641Skmacy PMAP_UNLOCK(dst_pmap); 3335216960Scperciva 3336216960Scperciva#ifdef HAMFISTED_LOCKING 3337216960Scperciva mtx_unlock(&createdelete_lock); 3338216960Scperciva#endif 3339181641Skmacy} 3340181641Skmacy 3341196723Sadrianstatic __inline void 3342196723Sadrianpagezero(void *page) 3343196723Sadrian{ 3344196723Sadrian#if defined(I686_CPU) 3345196723Sadrian if (cpu_class == CPUCLASS_686) { 3346196723Sadrian#if defined(CPU_ENABLE_SSE) 3347196723Sadrian if (cpu_feature & CPUID_SSE2) 3348196723Sadrian sse2_pagezero(page); 3349196723Sadrian else 3350196723Sadrian#endif 3351196723Sadrian i686_pagezero(page); 3352196723Sadrian } else 3353196723Sadrian#endif 3354196723Sadrian bzero(page, PAGE_SIZE); 3355196723Sadrian} 3356196723Sadrian 3357181641Skmacy/* 3358181641Skmacy * pmap_zero_page zeros the specified hardware page by mapping 3359181641Skmacy * the page into KVM and using bzero to clear its contents. 3360181641Skmacy */ 3361181641Skmacyvoid 3362181641Skmacypmap_zero_page(vm_page_t m) 3363181641Skmacy{ 3364181641Skmacy struct sysmaps *sysmaps; 3365181641Skmacy 3366181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3367181641Skmacy mtx_lock(&sysmaps->lock); 3368181641Skmacy if (*sysmaps->CMAP2) 3369181641Skmacy panic("pmap_zero_page: CMAP2 busy"); 3370181641Skmacy sched_pin(); 3371215587Scperciva PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3372181641Skmacy pagezero(sysmaps->CADDR2); 3373181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3374181641Skmacy sched_unpin(); 3375181641Skmacy mtx_unlock(&sysmaps->lock); 3376181641Skmacy} 3377181641Skmacy 3378181641Skmacy/* 3379181641Skmacy * pmap_zero_page_area zeros the specified hardware page by mapping 3380181641Skmacy * the page into KVM and using bzero to clear its contents. 3381181641Skmacy * 3382181641Skmacy * off and size may not cover an area beyond a single hardware page. 3383181641Skmacy */ 3384181641Skmacyvoid 3385181641Skmacypmap_zero_page_area(vm_page_t m, int off, int size) 3386181641Skmacy{ 3387181641Skmacy struct sysmaps *sysmaps; 3388181641Skmacy 3389181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3390181641Skmacy mtx_lock(&sysmaps->lock); 3391181641Skmacy if (*sysmaps->CMAP2) 3392228923Salc panic("pmap_zero_page_area: CMAP2 busy"); 3393181641Skmacy sched_pin(); 3394215587Scperciva PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3395181641Skmacy 3396181641Skmacy if (off == 0 && size == PAGE_SIZE) 3397181641Skmacy pagezero(sysmaps->CADDR2); 3398181641Skmacy else 3399181641Skmacy bzero((char *)sysmaps->CADDR2 + off, size); 3400181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3401181641Skmacy sched_unpin(); 3402181641Skmacy mtx_unlock(&sysmaps->lock); 3403181641Skmacy} 3404181641Skmacy 3405181641Skmacy/* 3406181641Skmacy * pmap_zero_page_idle zeros the specified hardware page by mapping 3407181641Skmacy * the page into KVM and using bzero to clear its contents. This 3408181641Skmacy * is intended to be called from the vm_pagezero process only and 3409181641Skmacy * outside of Giant. 3410181641Skmacy */ 3411181641Skmacyvoid 3412181641Skmacypmap_zero_page_idle(vm_page_t m) 3413181641Skmacy{ 3414181641Skmacy 3415181641Skmacy if (*CMAP3) 3416228923Salc panic("pmap_zero_page_idle: CMAP3 busy"); 3417181641Skmacy sched_pin(); 3418215587Scperciva PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3419181641Skmacy pagezero(CADDR3); 3420181641Skmacy PT_SET_MA(CADDR3, 0); 3421181641Skmacy sched_unpin(); 3422181641Skmacy} 3423181641Skmacy 3424181641Skmacy/* 3425181641Skmacy * pmap_copy_page copies the specified (machine independent) 3426181641Skmacy * page by mapping the page into virtual memory and using 3427181641Skmacy * bcopy to copy the page, one machine dependent page at a 3428181641Skmacy * time. 3429181641Skmacy */ 3430181641Skmacyvoid 3431181641Skmacypmap_copy_page(vm_page_t src, vm_page_t dst) 3432181641Skmacy{ 3433181641Skmacy struct sysmaps *sysmaps; 3434181641Skmacy 3435181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3436181641Skmacy mtx_lock(&sysmaps->lock); 3437181641Skmacy if (*sysmaps->CMAP1) 3438181641Skmacy panic("pmap_copy_page: CMAP1 busy"); 3439181641Skmacy if (*sysmaps->CMAP2) 3440181641Skmacy panic("pmap_copy_page: CMAP2 busy"); 3441181641Skmacy sched_pin(); 3442215587Scperciva PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A); 3443215587Scperciva PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M); 3444181641Skmacy bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3445181641Skmacy PT_SET_MA(sysmaps->CADDR1, 0); 3446181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3447181641Skmacy sched_unpin(); 3448181641Skmacy mtx_unlock(&sysmaps->lock); 3449181641Skmacy} 3450181641Skmacy 3451181641Skmacy/* 3452181641Skmacy * Returns true if the pmap's pv is one of the first 3453181641Skmacy * 16 pvs linked to from this page. This count may 3454181641Skmacy * be changed upwards or downwards in the future; it 3455181641Skmacy * is only necessary that true be returned for a small 3456181641Skmacy * subset of pmaps for proper page aging. 3457181641Skmacy */ 3458181641Skmacyboolean_t 3459181641Skmacypmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3460181641Skmacy{ 3461181641Skmacy pv_entry_t pv; 3462181641Skmacy int loops = 0; 3463208990Salc boolean_t rv; 3464181641Skmacy 3465224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3466208990Salc ("pmap_page_exists_quick: page %p is not managed", m)); 3467208990Salc rv = FALSE; 3468241498Salc rw_wlock(&pvh_global_lock); 3469181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3470181641Skmacy if (PV_PMAP(pv) == pmap) { 3471208990Salc rv = TRUE; 3472208990Salc break; 3473181641Skmacy } 3474181641Skmacy loops++; 3475181641Skmacy if (loops >= 16) 3476181641Skmacy break; 3477181641Skmacy } 3478241498Salc rw_wunlock(&pvh_global_lock); 3479208990Salc return (rv); 3480181641Skmacy} 3481181641Skmacy 3482181641Skmacy/* 3483181641Skmacy * pmap_page_wired_mappings: 3484181641Skmacy * 3485181641Skmacy * Return the number of managed mappings to the given physical page 3486181641Skmacy * that are wired. 3487181641Skmacy */ 3488181641Skmacyint 3489181641Skmacypmap_page_wired_mappings(vm_page_t m) 3490181641Skmacy{ 3491181641Skmacy pv_entry_t pv; 3492181641Skmacy pt_entry_t *pte; 3493181641Skmacy pmap_t pmap; 3494181641Skmacy int count; 3495181641Skmacy 3496181641Skmacy count = 0; 3497224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3498181641Skmacy return (count); 3499241498Salc rw_wlock(&pvh_global_lock); 3500181641Skmacy sched_pin(); 3501181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3502181641Skmacy pmap = PV_PMAP(pv); 3503181641Skmacy PMAP_LOCK(pmap); 3504181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3505181641Skmacy if ((*pte & PG_W) != 0) 3506181641Skmacy count++; 3507181641Skmacy PMAP_UNLOCK(pmap); 3508181641Skmacy } 3509181641Skmacy sched_unpin(); 3510241498Salc rw_wunlock(&pvh_global_lock); 3511181641Skmacy return (count); 3512181641Skmacy} 3513181641Skmacy 3514181641Skmacy/* 3515228746Salc * Returns TRUE if the given page is mapped. Otherwise, returns FALSE. 3516181747Skmacy */ 3517181747Skmacyboolean_t 3518181747Skmacypmap_page_is_mapped(vm_page_t m) 3519181747Skmacy{ 3520181747Skmacy 3521224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3522181747Skmacy return (FALSE); 3523228746Salc return (!TAILQ_EMPTY(&m->md.pv_list)); 3524181747Skmacy} 3525181747Skmacy 3526181747Skmacy/* 3527181641Skmacy * Remove all pages from specified address space 3528181641Skmacy * this aids process exit speeds. Also, this code 3529181641Skmacy * is special cased for current process only, but 3530181641Skmacy * can have the more generic (and slightly slower) 3531181641Skmacy * mode enabled. This is much faster than pmap_remove 3532181641Skmacy * in the case of running down an entire address space. 3533181641Skmacy */ 3534181641Skmacyvoid 3535181641Skmacypmap_remove_pages(pmap_t pmap) 3536181641Skmacy{ 3537181641Skmacy pt_entry_t *pte, tpte; 3538181641Skmacy vm_page_t m, free = NULL; 3539181641Skmacy pv_entry_t pv; 3540181641Skmacy struct pv_chunk *pc, *npc; 3541181641Skmacy int field, idx; 3542181641Skmacy int32_t bit; 3543181641Skmacy uint32_t inuse, bitmask; 3544181641Skmacy int allfree; 3545181641Skmacy 3546181641Skmacy CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3547181641Skmacy 3548181641Skmacy if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3549181641Skmacy printf("warning: pmap_remove_pages called with non-current pmap\n"); 3550181641Skmacy return; 3551181641Skmacy } 3552241498Salc rw_wlock(&pvh_global_lock); 3553181641Skmacy KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3554181641Skmacy PMAP_LOCK(pmap); 3555181641Skmacy sched_pin(); 3556181641Skmacy TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3557241400Salc KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, 3558241400Salc pc->pc_pmap)); 3559181641Skmacy allfree = 1; 3560181641Skmacy for (field = 0; field < _NPCM; field++) { 3561236534Salc inuse = ~pc->pc_map[field] & pc_freemask[field]; 3562181641Skmacy while (inuse != 0) { 3563181641Skmacy bit = bsfl(inuse); 3564181641Skmacy bitmask = 1UL << bit; 3565181641Skmacy idx = field * 32 + bit; 3566181641Skmacy pv = &pc->pc_pventry[idx]; 3567181641Skmacy inuse &= ~bitmask; 3568181641Skmacy 3569181641Skmacy pte = vtopte(pv->pv_va); 3570181641Skmacy tpte = *pte ? xpmap_mtop(*pte) : 0; 3571181641Skmacy 3572181641Skmacy if (tpte == 0) { 3573181641Skmacy printf( 3574181641Skmacy "TPTE at %p IS ZERO @ VA %08x\n", 3575181641Skmacy pte, pv->pv_va); 3576181641Skmacy panic("bad pte"); 3577181641Skmacy } 3578181641Skmacy 3579181641Skmacy/* 3580181641Skmacy * We cannot remove wired pages from a process' mapping at this time 3581181641Skmacy */ 3582181641Skmacy if (tpte & PG_W) { 3583181641Skmacy allfree = 0; 3584181641Skmacy continue; 3585181641Skmacy } 3586181641Skmacy 3587181641Skmacy m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3588181641Skmacy KASSERT(m->phys_addr == (tpte & PG_FRAME), 3589181641Skmacy ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3590181641Skmacy m, (uintmax_t)m->phys_addr, 3591181641Skmacy (uintmax_t)tpte)); 3592181641Skmacy 3593181641Skmacy KASSERT(m < &vm_page_array[vm_page_array_size], 3594181641Skmacy ("pmap_remove_pages: bad tpte %#jx", 3595181641Skmacy (uintmax_t)tpte)); 3596181641Skmacy 3597181641Skmacy 3598181641Skmacy PT_CLEAR_VA(pte, FALSE); 3599181641Skmacy 3600181641Skmacy /* 3601181641Skmacy * Update the vm_page_t clean/reference bits. 3602181641Skmacy */ 3603181641Skmacy if (tpte & PG_M) 3604181641Skmacy vm_page_dirty(m); 3605181641Skmacy 3606181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3607181641Skmacy if (TAILQ_EMPTY(&m->md.pv_list)) 3608225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 3609181641Skmacy 3610181641Skmacy pmap_unuse_pt(pmap, pv->pv_va, &free); 3611181641Skmacy 3612181641Skmacy /* Mark free */ 3613181641Skmacy PV_STAT(pv_entry_frees++); 3614181641Skmacy PV_STAT(pv_entry_spare++); 3615181641Skmacy pv_entry_count--; 3616181641Skmacy pc->pc_map[field] |= bitmask; 3617181641Skmacy pmap->pm_stats.resident_count--; 3618181641Skmacy } 3619181641Skmacy } 3620181641Skmacy PT_UPDATES_FLUSH(); 3621181641Skmacy if (allfree) { 3622181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3623236378Salc free_pv_chunk(pc); 3624181641Skmacy } 3625181641Skmacy } 3626181641Skmacy PT_UPDATES_FLUSH(); 3627181641Skmacy if (*PMAP1) 3628181641Skmacy PT_SET_MA(PADDR1, 0); 3629181641Skmacy 3630181641Skmacy sched_unpin(); 3631181641Skmacy pmap_invalidate_all(pmap); 3632241498Salc rw_wunlock(&pvh_global_lock); 3633181641Skmacy PMAP_UNLOCK(pmap); 3634181641Skmacy pmap_free_zero_pages(free); 3635181641Skmacy} 3636181641Skmacy 3637181641Skmacy/* 3638181641Skmacy * pmap_is_modified: 3639181641Skmacy * 3640181641Skmacy * Return whether or not the specified physical page was modified 3641181641Skmacy * in any physical maps. 3642181641Skmacy */ 3643181641Skmacyboolean_t 3644181641Skmacypmap_is_modified(vm_page_t m) 3645181641Skmacy{ 3646181641Skmacy pv_entry_t pv; 3647181641Skmacy pt_entry_t *pte; 3648181641Skmacy pmap_t pmap; 3649181641Skmacy boolean_t rv; 3650181641Skmacy 3651224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3652208504Salc ("pmap_is_modified: page %p is not managed", m)); 3653181641Skmacy rv = FALSE; 3654208504Salc 3655208504Salc /* 3656225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 3657225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3658208504Salc * is clear, no PTEs can have PG_M set. 3659208504Salc */ 3660208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3661208504Salc if ((m->oflags & VPO_BUSY) == 0 && 3662225418Skib (m->aflags & PGA_WRITEABLE) == 0) 3663181641Skmacy return (rv); 3664241498Salc rw_wlock(&pvh_global_lock); 3665181641Skmacy sched_pin(); 3666181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3667181641Skmacy pmap = PV_PMAP(pv); 3668181641Skmacy PMAP_LOCK(pmap); 3669181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3670181641Skmacy rv = (*pte & PG_M) != 0; 3671181641Skmacy PMAP_UNLOCK(pmap); 3672181641Skmacy if (rv) 3673181641Skmacy break; 3674181641Skmacy } 3675181641Skmacy if (*PMAP1) 3676181641Skmacy PT_SET_MA(PADDR1, 0); 3677181641Skmacy sched_unpin(); 3678241498Salc rw_wunlock(&pvh_global_lock); 3679181641Skmacy return (rv); 3680181641Skmacy} 3681181641Skmacy 3682181641Skmacy/* 3683181641Skmacy * pmap_is_prefaultable: 3684181641Skmacy * 3685181641Skmacy * Return whether or not the specified virtual address is elgible 3686181641Skmacy * for prefault. 3687181641Skmacy */ 3688181641Skmacystatic boolean_t 3689181641Skmacypmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3690181641Skmacy{ 3691181641Skmacy pt_entry_t *pte; 3692181641Skmacy boolean_t rv = FALSE; 3693181641Skmacy 3694181641Skmacy return (rv); 3695181641Skmacy 3696181641Skmacy if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3697181641Skmacy pte = vtopte(addr); 3698181641Skmacy rv = (*pte == 0); 3699181641Skmacy } 3700181641Skmacy return (rv); 3701181641Skmacy} 3702181641Skmacy 3703181641Skmacyboolean_t 3704181641Skmacypmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3705181641Skmacy{ 3706181641Skmacy boolean_t rv; 3707181641Skmacy 3708181641Skmacy PMAP_LOCK(pmap); 3709181641Skmacy rv = pmap_is_prefaultable_locked(pmap, addr); 3710181641Skmacy PMAP_UNLOCK(pmap); 3711181641Skmacy return (rv); 3712181641Skmacy} 3713181641Skmacy 3714207155Salcboolean_t 3715207155Salcpmap_is_referenced(vm_page_t m) 3716207155Salc{ 3717207155Salc pv_entry_t pv; 3718207155Salc pt_entry_t *pte; 3719207155Salc pmap_t pmap; 3720207155Salc boolean_t rv; 3721207155Salc 3722224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3723208574Salc ("pmap_is_referenced: page %p is not managed", m)); 3724207155Salc rv = FALSE; 3725241498Salc rw_wlock(&pvh_global_lock); 3726207155Salc sched_pin(); 3727207155Salc TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3728207155Salc pmap = PV_PMAP(pv); 3729207155Salc PMAP_LOCK(pmap); 3730207155Salc pte = pmap_pte_quick(pmap, pv->pv_va); 3731207155Salc rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3732207155Salc PMAP_UNLOCK(pmap); 3733207155Salc if (rv) 3734207155Salc break; 3735207155Salc } 3736207155Salc if (*PMAP1) 3737207155Salc PT_SET_MA(PADDR1, 0); 3738207155Salc sched_unpin(); 3739241498Salc rw_wunlock(&pvh_global_lock); 3740207155Salc return (rv); 3741207155Salc} 3742207155Salc 3743181641Skmacyvoid 3744181641Skmacypmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3745181641Skmacy{ 3746181641Skmacy int i, npages = round_page(len) >> PAGE_SHIFT; 3747181641Skmacy for (i = 0; i < npages; i++) { 3748181641Skmacy pt_entry_t *pte; 3749181641Skmacy pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3750241498Salc rw_wlock(&pvh_global_lock); 3751181641Skmacy pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3752241498Salc rw_wunlock(&pvh_global_lock); 3753181641Skmacy PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3754181641Skmacy pmap_pte_release(pte); 3755181641Skmacy } 3756181641Skmacy} 3757181641Skmacy 3758181641Skmacyvoid 3759181641Skmacypmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3760181641Skmacy{ 3761181641Skmacy int i, npages = round_page(len) >> PAGE_SHIFT; 3762181641Skmacy for (i = 0; i < npages; i++) { 3763181641Skmacy pt_entry_t *pte; 3764181641Skmacy pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3765181641Skmacy PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3766241498Salc rw_wlock(&pvh_global_lock); 3767181641Skmacy pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3768241498Salc rw_wunlock(&pvh_global_lock); 3769181641Skmacy pmap_pte_release(pte); 3770181641Skmacy } 3771181641Skmacy} 3772181641Skmacy 3773181641Skmacy/* 3774181641Skmacy * Clear the write and modified bits in each of the given page's mappings. 3775181641Skmacy */ 3776181641Skmacyvoid 3777181641Skmacypmap_remove_write(vm_page_t m) 3778181641Skmacy{ 3779181641Skmacy pv_entry_t pv; 3780181641Skmacy pmap_t pmap; 3781181641Skmacy pt_entry_t oldpte, *pte; 3782181641Skmacy 3783224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3784208175Salc ("pmap_remove_write: page %p is not managed", m)); 3785208175Salc 3786208175Salc /* 3787225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 3788225418Skib * another thread while the object is locked. Thus, if PGA_WRITEABLE 3789208175Salc * is clear, no page table entries need updating. 3790208175Salc */ 3791208175Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3792208175Salc if ((m->oflags & VPO_BUSY) == 0 && 3793225418Skib (m->aflags & PGA_WRITEABLE) == 0) 3794181641Skmacy return; 3795241498Salc rw_wlock(&pvh_global_lock); 3796181641Skmacy sched_pin(); 3797181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3798181641Skmacy pmap = PV_PMAP(pv); 3799181641Skmacy PMAP_LOCK(pmap); 3800181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3801181641Skmacyretry: 3802181641Skmacy oldpte = *pte; 3803181641Skmacy if ((oldpte & PG_RW) != 0) { 3804188341Skmacy vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3805188341Skmacy 3806181641Skmacy /* 3807181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3808181641Skmacy * in size, PG_RW and PG_M are among the least 3809181641Skmacy * significant 32 bits. 3810181641Skmacy */ 3811188341Skmacy PT_SET_VA_MA(pte, newpte, TRUE); 3812188341Skmacy if (*pte != newpte) 3813181641Skmacy goto retry; 3814188341Skmacy 3815181641Skmacy if ((oldpte & PG_M) != 0) 3816181641Skmacy vm_page_dirty(m); 3817181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3818181641Skmacy } 3819181641Skmacy PMAP_UNLOCK(pmap); 3820181641Skmacy } 3821225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 3822181641Skmacy PT_UPDATES_FLUSH(); 3823181641Skmacy if (*PMAP1) 3824181641Skmacy PT_SET_MA(PADDR1, 0); 3825181641Skmacy sched_unpin(); 3826241498Salc rw_wunlock(&pvh_global_lock); 3827181641Skmacy} 3828181641Skmacy 3829181641Skmacy/* 3830181641Skmacy * pmap_ts_referenced: 3831181641Skmacy * 3832181641Skmacy * Return a count of reference bits for a page, clearing those bits. 3833181641Skmacy * It is not necessary for every reference bit to be cleared, but it 3834181641Skmacy * is necessary that 0 only be returned when there are truly no 3835181641Skmacy * reference bits set. 3836181641Skmacy * 3837181641Skmacy * XXX: The exact number of bits to check and clear is a matter that 3838181641Skmacy * should be tested and standardized at some point in the future for 3839181641Skmacy * optimal aging of shared pages. 3840181641Skmacy */ 3841181641Skmacyint 3842181641Skmacypmap_ts_referenced(vm_page_t m) 3843181641Skmacy{ 3844181641Skmacy pv_entry_t pv, pvf, pvn; 3845181641Skmacy pmap_t pmap; 3846181641Skmacy pt_entry_t *pte; 3847181641Skmacy int rtval = 0; 3848181641Skmacy 3849224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3850208990Salc ("pmap_ts_referenced: page %p is not managed", m)); 3851241498Salc rw_wlock(&pvh_global_lock); 3852181641Skmacy sched_pin(); 3853181641Skmacy if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3854181641Skmacy pvf = pv; 3855181641Skmacy do { 3856181641Skmacy pvn = TAILQ_NEXT(pv, pv_list); 3857181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3858181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3859181641Skmacy pmap = PV_PMAP(pv); 3860181641Skmacy PMAP_LOCK(pmap); 3861181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3862181641Skmacy if ((*pte & PG_A) != 0) { 3863181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3864181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3865181641Skmacy rtval++; 3866181641Skmacy if (rtval > 4) 3867181641Skmacy pvn = NULL; 3868181641Skmacy } 3869181641Skmacy PMAP_UNLOCK(pmap); 3870181641Skmacy } while ((pv = pvn) != NULL && pv != pvf); 3871181641Skmacy } 3872181641Skmacy PT_UPDATES_FLUSH(); 3873181641Skmacy if (*PMAP1) 3874181641Skmacy PT_SET_MA(PADDR1, 0); 3875181641Skmacy sched_unpin(); 3876241498Salc rw_wunlock(&pvh_global_lock); 3877181641Skmacy return (rtval); 3878181641Skmacy} 3879181641Skmacy 3880181641Skmacy/* 3881181641Skmacy * Clear the modify bits on the specified physical page. 3882181641Skmacy */ 3883181641Skmacyvoid 3884181641Skmacypmap_clear_modify(vm_page_t m) 3885181641Skmacy{ 3886181641Skmacy pv_entry_t pv; 3887181641Skmacy pmap_t pmap; 3888181641Skmacy pt_entry_t *pte; 3889181641Skmacy 3890224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3891208504Salc ("pmap_clear_modify: page %p is not managed", m)); 3892208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3893208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 3894208504Salc ("pmap_clear_modify: page %p is busy", m)); 3895208504Salc 3896208504Salc /* 3897225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 3898208504Salc * If the object containing the page is locked and the page is not 3899225418Skib * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 3900208504Salc */ 3901225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 3902181641Skmacy return; 3903241498Salc rw_wlock(&pvh_global_lock); 3904181641Skmacy sched_pin(); 3905181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3906181641Skmacy pmap = PV_PMAP(pv); 3907181641Skmacy PMAP_LOCK(pmap); 3908181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3909228923Salc if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3910181641Skmacy /* 3911181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3912181641Skmacy * in size, PG_M is among the least significant 3913181641Skmacy * 32 bits. 3914181641Skmacy */ 3915181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3916181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3917181641Skmacy } 3918181641Skmacy PMAP_UNLOCK(pmap); 3919181641Skmacy } 3920181641Skmacy sched_unpin(); 3921241498Salc rw_wunlock(&pvh_global_lock); 3922181641Skmacy} 3923181641Skmacy 3924181641Skmacy/* 3925181641Skmacy * pmap_clear_reference: 3926181641Skmacy * 3927181641Skmacy * Clear the reference bit on the specified physical page. 3928181641Skmacy */ 3929181641Skmacyvoid 3930181641Skmacypmap_clear_reference(vm_page_t m) 3931181641Skmacy{ 3932181641Skmacy pv_entry_t pv; 3933181641Skmacy pmap_t pmap; 3934181641Skmacy pt_entry_t *pte; 3935181641Skmacy 3936224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3937208504Salc ("pmap_clear_reference: page %p is not managed", m)); 3938241498Salc rw_wlock(&pvh_global_lock); 3939181641Skmacy sched_pin(); 3940181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3941181641Skmacy pmap = PV_PMAP(pv); 3942181641Skmacy PMAP_LOCK(pmap); 3943181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3944181641Skmacy if ((*pte & PG_A) != 0) { 3945181641Skmacy /* 3946181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3947181641Skmacy * in size, PG_A is among the least significant 3948181641Skmacy * 32 bits. 3949181641Skmacy */ 3950181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3951181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3952181641Skmacy } 3953181641Skmacy PMAP_UNLOCK(pmap); 3954181641Skmacy } 3955181641Skmacy sched_unpin(); 3956241498Salc rw_wunlock(&pvh_global_lock); 3957181641Skmacy} 3958181641Skmacy 3959181641Skmacy/* 3960181641Skmacy * Miscellaneous support routines follow 3961181641Skmacy */ 3962181641Skmacy 3963181641Skmacy/* 3964181641Skmacy * Map a set of physical memory pages into the kernel virtual 3965181641Skmacy * address space. Return a pointer to where it is mapped. This 3966181641Skmacy * routine is intended to be used for mapping device memory, 3967181641Skmacy * NOT real memory. 3968181641Skmacy */ 3969181641Skmacyvoid * 3970181641Skmacypmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3971181641Skmacy{ 3972195949Skib vm_offset_t va, offset; 3973195949Skib vm_size_t tmpsize; 3974181641Skmacy 3975181641Skmacy offset = pa & PAGE_MASK; 3976246855Sjkim size = round_page(offset + size); 3977181641Skmacy pa = pa & PG_FRAME; 3978181641Skmacy 3979181641Skmacy if (pa < KERNLOAD && pa + size <= KERNLOAD) 3980181641Skmacy va = KERNBASE + pa; 3981181641Skmacy else 3982181641Skmacy va = kmem_alloc_nofault(kernel_map, size); 3983181641Skmacy if (!va) 3984181641Skmacy panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3985181641Skmacy 3986195949Skib for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3987195949Skib pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3988195949Skib pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3989195949Skib pmap_invalidate_cache_range(va, va + size); 3990181641Skmacy return ((void *)(va + offset)); 3991181641Skmacy} 3992181641Skmacy 3993181641Skmacyvoid * 3994181641Skmacypmap_mapdev(vm_paddr_t pa, vm_size_t size) 3995181641Skmacy{ 3996181641Skmacy 3997181641Skmacy return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3998181641Skmacy} 3999181641Skmacy 4000181641Skmacyvoid * 4001181641Skmacypmap_mapbios(vm_paddr_t pa, vm_size_t size) 4002181641Skmacy{ 4003181641Skmacy 4004181641Skmacy return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 4005181641Skmacy} 4006181641Skmacy 4007181641Skmacyvoid 4008181641Skmacypmap_unmapdev(vm_offset_t va, vm_size_t size) 4009181641Skmacy{ 4010240317Salc vm_offset_t base, offset; 4011181641Skmacy 4012181641Skmacy if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 4013181641Skmacy return; 4014181641Skmacy base = trunc_page(va); 4015181641Skmacy offset = va & PAGE_MASK; 4016246855Sjkim size = round_page(offset + size); 4017181641Skmacy kmem_free(kernel_map, base, size); 4018181641Skmacy} 4019181641Skmacy 4020195774Salc/* 4021195774Salc * Sets the memory attribute for the specified page. 4022195774Salc */ 4023195774Salcvoid 4024195774Salcpmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4025195774Salc{ 4026195774Salc 4027195774Salc m->md.pat_mode = ma; 4028195949Skib if ((m->flags & PG_FICTITIOUS) != 0) 4029195949Skib return; 4030195774Salc 4031195774Salc /* 4032195774Salc * If "m" is a normal page, flush it from the cache. 4033195949Skib * See pmap_invalidate_cache_range(). 4034195949Skib * 4035195949Skib * First, try to find an existing mapping of the page by sf 4036195949Skib * buffer. sf_buf_invalidate_cache() modifies mapping and 4037195949Skib * flushes the cache. 4038195774Salc */ 4039195949Skib if (sf_buf_invalidate_cache(m)) 4040195949Skib return; 4041195949Skib 4042195949Skib /* 4043195949Skib * If page is not mapped by sf buffer, but CPU does not 4044195949Skib * support self snoop, map the page transient and do 4045195949Skib * invalidation. In the worst case, whole cache is flushed by 4046195949Skib * pmap_invalidate_cache_range(). 4047195949Skib */ 4048228923Salc if ((cpu_feature & CPUID_SS) == 0) 4049228923Salc pmap_flush_page(m); 4050228923Salc} 4051228923Salc 4052228923Salcstatic void 4053228923Salcpmap_flush_page(vm_page_t m) 4054228923Salc{ 4055228923Salc struct sysmaps *sysmaps; 4056228923Salc vm_offset_t sva, eva; 4057228923Salc 4058228923Salc if ((cpu_feature & CPUID_CLFSH) != 0) { 4059195949Skib sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4060195949Skib mtx_lock(&sysmaps->lock); 4061195949Skib if (*sysmaps->CMAP2) 4062228923Salc panic("pmap_flush_page: CMAP2 busy"); 4063195949Skib sched_pin(); 4064195949Skib PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 4065215587Scperciva VM_PAGE_TO_MACH(m) | PG_A | PG_M | 4066195949Skib pmap_cache_bits(m->md.pat_mode, 0)); 4067195949Skib invlcaddr(sysmaps->CADDR2); 4068195949Skib sva = (vm_offset_t)sysmaps->CADDR2; 4069195949Skib eva = sva + PAGE_SIZE; 4070228923Salc 4071228923Salc /* 4072228923Salc * Use mfence despite the ordering implied by 4073228923Salc * mtx_{un,}lock() because clflush is not guaranteed 4074228923Salc * to be ordered by any other instruction. 4075228923Salc */ 4076228923Salc mfence(); 4077228923Salc for (; sva < eva; sva += cpu_clflush_line_size) 4078228923Salc clflush(sva); 4079228923Salc mfence(); 4080195949Skib PT_SET_MA(sysmaps->CADDR2, 0); 4081195949Skib sched_unpin(); 4082195949Skib mtx_unlock(&sysmaps->lock); 4083228923Salc } else 4084228923Salc pmap_invalidate_cache(); 4085195774Salc} 4086195774Salc 4087228923Salc/* 4088228923Salc * Changes the specified virtual address range's memory type to that given by 4089228923Salc * the parameter "mode". The specified virtual address range must be 4090228923Salc * completely contained within either the kernel map. 4091228923Salc * 4092228923Salc * Returns zero if the change completed successfully, and either EINVAL or 4093228923Salc * ENOMEM if the change failed. Specifically, EINVAL is returned if some part 4094228923Salc * of the virtual address range was not mapped, and ENOMEM is returned if 4095228923Salc * there was insufficient memory available to complete the change. 4096228923Salc */ 4097181641Skmacyint 4098228923Salcpmap_change_attr(vm_offset_t va, vm_size_t size, int mode) 4099181641Skmacy{ 4100181641Skmacy vm_offset_t base, offset, tmpva; 4101181641Skmacy pt_entry_t *pte; 4102181641Skmacy u_int opte, npte; 4103181641Skmacy pd_entry_t *pde; 4104195949Skib boolean_t changed; 4105181641Skmacy 4106181641Skmacy base = trunc_page(va); 4107181641Skmacy offset = va & PAGE_MASK; 4108246855Sjkim size = round_page(offset + size); 4109181641Skmacy 4110181641Skmacy /* Only supported on kernel virtual addresses. */ 4111181641Skmacy if (base <= VM_MAXUSER_ADDRESS) 4112181641Skmacy return (EINVAL); 4113181641Skmacy 4114181641Skmacy /* 4MB pages and pages that aren't mapped aren't supported. */ 4115181641Skmacy for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4116181641Skmacy pde = pmap_pde(kernel_pmap, tmpva); 4117181641Skmacy if (*pde & PG_PS) 4118181641Skmacy return (EINVAL); 4119181641Skmacy if ((*pde & PG_V) == 0) 4120181641Skmacy return (EINVAL); 4121181641Skmacy pte = vtopte(va); 4122181641Skmacy if ((*pte & PG_V) == 0) 4123181641Skmacy return (EINVAL); 4124181641Skmacy } 4125181641Skmacy 4126195949Skib changed = FALSE; 4127195949Skib 4128181641Skmacy /* 4129181641Skmacy * Ok, all the pages exist and are 4k, so run through them updating 4130181641Skmacy * their cache mode. 4131181641Skmacy */ 4132181641Skmacy for (tmpva = base; size > 0; ) { 4133181641Skmacy pte = vtopte(tmpva); 4134181641Skmacy 4135181641Skmacy /* 4136181641Skmacy * The cache mode bits are all in the low 32-bits of the 4137181641Skmacy * PTE, so we can just spin on updating the low 32-bits. 4138181641Skmacy */ 4139181641Skmacy do { 4140181641Skmacy opte = *(u_int *)pte; 4141181641Skmacy npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4142181641Skmacy npte |= pmap_cache_bits(mode, 0); 4143181641Skmacy PT_SET_VA_MA(pte, npte, TRUE); 4144181641Skmacy } while (npte != opte && (*pte != npte)); 4145195949Skib if (npte != opte) 4146195949Skib changed = TRUE; 4147181641Skmacy tmpva += PAGE_SIZE; 4148181641Skmacy size -= PAGE_SIZE; 4149181641Skmacy } 4150181641Skmacy 4151181641Skmacy /* 4152228923Salc * Flush CPU caches to make sure any data isn't cached that 4153228923Salc * shouldn't be, etc. 4154181641Skmacy */ 4155195949Skib if (changed) { 4156195949Skib pmap_invalidate_range(kernel_pmap, base, tmpva); 4157195949Skib pmap_invalidate_cache_range(base, tmpva); 4158195949Skib } 4159181641Skmacy return (0); 4160181641Skmacy} 4161181641Skmacy 4162181641Skmacy/* 4163181641Skmacy * perform the pmap work for mincore 4164181641Skmacy */ 4165181641Skmacyint 4166208504Salcpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4167181641Skmacy{ 4168181641Skmacy pt_entry_t *ptep, pte; 4169208504Salc vm_paddr_t pa; 4170208504Salc int val; 4171228923Salc 4172181641Skmacy PMAP_LOCK(pmap); 4173208504Salcretry: 4174181641Skmacy ptep = pmap_pte(pmap, addr); 4175181641Skmacy pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4176181641Skmacy pmap_pte_release(ptep); 4177208504Salc val = 0; 4178208504Salc if ((pte & PG_V) != 0) { 4179208504Salc val |= MINCORE_INCORE; 4180208504Salc if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4181208504Salc val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4182208504Salc if ((pte & PG_A) != 0) 4183208504Salc val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4184208504Salc } 4185208504Salc if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4186208504Salc (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4187208504Salc (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4188208504Salc pa = pte & PG_FRAME; 4189208504Salc /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4190208504Salc if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4191208504Salc goto retry; 4192208504Salc } else 4193208504Salc PA_UNLOCK_COND(*locked_pa); 4194181641Skmacy PMAP_UNLOCK(pmap); 4195208504Salc return (val); 4196181641Skmacy} 4197181641Skmacy 4198181641Skmacyvoid 4199181641Skmacypmap_activate(struct thread *td) 4200181641Skmacy{ 4201181641Skmacy pmap_t pmap, oldpmap; 4202223758Sattilio u_int cpuid; 4203181641Skmacy u_int32_t cr3; 4204181641Skmacy 4205181641Skmacy critical_enter(); 4206181641Skmacy pmap = vmspace_pmap(td->td_proc->p_vmspace); 4207181641Skmacy oldpmap = PCPU_GET(curpmap); 4208223758Sattilio cpuid = PCPU_GET(cpuid); 4209181641Skmacy#if defined(SMP) 4210223758Sattilio CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 4211223758Sattilio CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 4212181641Skmacy#else 4213223758Sattilio CPU_CLR(cpuid, &oldpmap->pm_active); 4214223758Sattilio CPU_SET(cpuid, &pmap->pm_active); 4215181641Skmacy#endif 4216181641Skmacy#ifdef PAE 4217181641Skmacy cr3 = vtophys(pmap->pm_pdpt); 4218181641Skmacy#else 4219181641Skmacy cr3 = vtophys(pmap->pm_pdir); 4220181641Skmacy#endif 4221181641Skmacy /* 4222181641Skmacy * pmap_activate is for the current thread on the current cpu 4223181641Skmacy */ 4224181641Skmacy td->td_pcb->pcb_cr3 = cr3; 4225181641Skmacy PT_UPDATES_FLUSH(); 4226181641Skmacy load_cr3(cr3); 4227181641Skmacy PCPU_SET(curpmap, pmap); 4228181641Skmacy critical_exit(); 4229181641Skmacy} 4230181641Skmacy 4231198341Smarcelvoid 4232198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4233198341Smarcel{ 4234198341Smarcel} 4235198341Smarcel 4236181747Skmacy/* 4237181747Skmacy * Increase the starting virtual address of the given mapping if a 4238181747Skmacy * different alignment might result in more superpage mappings. 4239181747Skmacy */ 4240181747Skmacyvoid 4241181747Skmacypmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4242181747Skmacy vm_offset_t *addr, vm_size_t size) 4243181641Skmacy{ 4244181747Skmacy vm_offset_t superpage_offset; 4245181641Skmacy 4246181747Skmacy if (size < NBPDR) 4247181747Skmacy return; 4248181747Skmacy if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4249181747Skmacy offset += ptoa(object->pg_color); 4250181747Skmacy superpage_offset = offset & PDRMASK; 4251181747Skmacy if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4252181747Skmacy (*addr & PDRMASK) == superpage_offset) 4253181747Skmacy return; 4254181747Skmacy if ((*addr & PDRMASK) < superpage_offset) 4255181747Skmacy *addr = (*addr & ~PDRMASK) + superpage_offset; 4256181747Skmacy else 4257181747Skmacy *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4258181641Skmacy} 4259181641Skmacy 4260190627Sdfrvoid 4261190627Sdfrpmap_suspend() 4262190627Sdfr{ 4263190627Sdfr pmap_t pmap; 4264190627Sdfr int i, pdir, offset; 4265190627Sdfr vm_paddr_t pdirma; 4266190627Sdfr mmu_update_t mu[4]; 4267190627Sdfr 4268190627Sdfr /* 4269190627Sdfr * We need to remove the recursive mapping structure from all 4270190627Sdfr * our pmaps so that Xen doesn't get confused when it restores 4271190627Sdfr * the page tables. The recursive map lives at page directory 4272190627Sdfr * index PTDPTDI. We assume that the suspend code has stopped 4273190627Sdfr * the other vcpus (if any). 4274190627Sdfr */ 4275190627Sdfr LIST_FOREACH(pmap, &allpmaps, pm_list) { 4276190627Sdfr for (i = 0; i < 4; i++) { 4277190627Sdfr /* 4278190627Sdfr * Figure out which page directory (L2) page 4279190627Sdfr * contains this bit of the recursive map and 4280190627Sdfr * the offset within that page of the map 4281190627Sdfr * entry 4282190627Sdfr */ 4283190627Sdfr pdir = (PTDPTDI + i) / NPDEPG; 4284190627Sdfr offset = (PTDPTDI + i) % NPDEPG; 4285190627Sdfr pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4286190627Sdfr mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4287190627Sdfr mu[i].val = 0; 4288190627Sdfr } 4289190627Sdfr HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4290190627Sdfr } 4291190627Sdfr} 4292190627Sdfr 4293190627Sdfrvoid 4294190627Sdfrpmap_resume() 4295190627Sdfr{ 4296190627Sdfr pmap_t pmap; 4297190627Sdfr int i, pdir, offset; 4298190627Sdfr vm_paddr_t pdirma; 4299190627Sdfr mmu_update_t mu[4]; 4300190627Sdfr 4301190627Sdfr /* 4302190627Sdfr * Restore the recursive map that we removed on suspend. 4303190627Sdfr */ 4304190627Sdfr LIST_FOREACH(pmap, &allpmaps, pm_list) { 4305190627Sdfr for (i = 0; i < 4; i++) { 4306190627Sdfr /* 4307190627Sdfr * Figure out which page directory (L2) page 4308190627Sdfr * contains this bit of the recursive map and 4309190627Sdfr * the offset within that page of the map 4310190627Sdfr * entry 4311190627Sdfr */ 4312190627Sdfr pdir = (PTDPTDI + i) / NPDEPG; 4313190627Sdfr offset = (PTDPTDI + i) % NPDEPG; 4314190627Sdfr pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4315190627Sdfr mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4316190627Sdfr mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4317190627Sdfr } 4318190627Sdfr HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4319190627Sdfr } 4320190627Sdfr} 4321190627Sdfr 4322181641Skmacy#if defined(PMAP_DEBUG) 4323181641Skmacypmap_pid_dump(int pid) 4324181641Skmacy{ 4325181641Skmacy pmap_t pmap; 4326181641Skmacy struct proc *p; 4327181641Skmacy int npte = 0; 4328181641Skmacy int index; 4329181641Skmacy 4330181641Skmacy sx_slock(&allproc_lock); 4331181641Skmacy FOREACH_PROC_IN_SYSTEM(p) { 4332181641Skmacy if (p->p_pid != pid) 4333181641Skmacy continue; 4334181641Skmacy 4335181641Skmacy if (p->p_vmspace) { 4336181641Skmacy int i,j; 4337181641Skmacy index = 0; 4338181641Skmacy pmap = vmspace_pmap(p->p_vmspace); 4339181641Skmacy for (i = 0; i < NPDEPTD; i++) { 4340181641Skmacy pd_entry_t *pde; 4341181641Skmacy pt_entry_t *pte; 4342181641Skmacy vm_offset_t base = i << PDRSHIFT; 4343181641Skmacy 4344181641Skmacy pde = &pmap->pm_pdir[i]; 4345181641Skmacy if (pde && pmap_pde_v(pde)) { 4346181641Skmacy for (j = 0; j < NPTEPG; j++) { 4347181641Skmacy vm_offset_t va = base + (j << PAGE_SHIFT); 4348181641Skmacy if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4349181641Skmacy if (index) { 4350181641Skmacy index = 0; 4351181641Skmacy printf("\n"); 4352181641Skmacy } 4353181641Skmacy sx_sunlock(&allproc_lock); 4354228923Salc return (npte); 4355181641Skmacy } 4356181641Skmacy pte = pmap_pte(pmap, va); 4357181641Skmacy if (pte && pmap_pte_v(pte)) { 4358181641Skmacy pt_entry_t pa; 4359181641Skmacy vm_page_t m; 4360181641Skmacy pa = PT_GET(pte); 4361181641Skmacy m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4362181641Skmacy printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4363181641Skmacy va, pa, m->hold_count, m->wire_count, m->flags); 4364181641Skmacy npte++; 4365181641Skmacy index++; 4366181641Skmacy if (index >= 2) { 4367181641Skmacy index = 0; 4368181641Skmacy printf("\n"); 4369181641Skmacy } else { 4370181641Skmacy printf(" "); 4371181641Skmacy } 4372181641Skmacy } 4373181641Skmacy } 4374181641Skmacy } 4375181641Skmacy } 4376181641Skmacy } 4377181641Skmacy } 4378181641Skmacy sx_sunlock(&allproc_lock); 4379228923Salc return (npte); 4380181641Skmacy} 4381181641Skmacy#endif 4382181641Skmacy 4383181641Skmacy#if defined(DEBUG) 4384181641Skmacy 4385181641Skmacystatic void pads(pmap_t pm); 4386181641Skmacyvoid pmap_pvdump(vm_paddr_t pa); 4387181641Skmacy 4388181641Skmacy/* print address space of pmap*/ 4389181641Skmacystatic void 4390181641Skmacypads(pmap_t pm) 4391181641Skmacy{ 4392181641Skmacy int i, j; 4393181641Skmacy vm_paddr_t va; 4394181641Skmacy pt_entry_t *ptep; 4395181641Skmacy 4396181641Skmacy if (pm == kernel_pmap) 4397181641Skmacy return; 4398181641Skmacy for (i = 0; i < NPDEPTD; i++) 4399181641Skmacy if (pm->pm_pdir[i]) 4400181641Skmacy for (j = 0; j < NPTEPG; j++) { 4401181641Skmacy va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4402181641Skmacy if (pm == kernel_pmap && va < KERNBASE) 4403181641Skmacy continue; 4404181641Skmacy if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4405181641Skmacy continue; 4406181641Skmacy ptep = pmap_pte(pm, va); 4407181641Skmacy if (pmap_pte_v(ptep)) 4408181641Skmacy printf("%x:%x ", va, *ptep); 4409181641Skmacy }; 4410181641Skmacy 4411181641Skmacy} 4412181641Skmacy 4413181641Skmacyvoid 4414181641Skmacypmap_pvdump(vm_paddr_t pa) 4415181641Skmacy{ 4416181641Skmacy pv_entry_t pv; 4417181641Skmacy pmap_t pmap; 4418181641Skmacy vm_page_t m; 4419181641Skmacy 4420181641Skmacy printf("pa %x", pa); 4421181641Skmacy m = PHYS_TO_VM_PAGE(pa); 4422181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4423181641Skmacy pmap = PV_PMAP(pv); 4424181641Skmacy printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4425181641Skmacy pads(pmap); 4426181641Skmacy } 4427181641Skmacy printf(" "); 4428181641Skmacy} 4429181641Skmacy#endif 4430