pmap.c revision 225418
1181641Skmacy/*- 2181641Skmacy * Copyright (c) 1991 Regents of the University of California. 3181641Skmacy * All rights reserved. 4181641Skmacy * Copyright (c) 1994 John S. Dyson 5181641Skmacy * All rights reserved. 6181641Skmacy * Copyright (c) 1994 David Greenman 7181641Skmacy * All rights reserved. 8181641Skmacy * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9181641Skmacy * All rights reserved. 10181641Skmacy * 11181641Skmacy * This code is derived from software contributed to Berkeley by 12181641Skmacy * the Systems Programming Group of the University of Utah Computer 13181641Skmacy * Science Department and William Jolitz of UUNET Technologies Inc. 14181641Skmacy * 15181641Skmacy * Redistribution and use in source and binary forms, with or without 16181641Skmacy * modification, are permitted provided that the following conditions 17181641Skmacy * are met: 18181641Skmacy * 1. Redistributions of source code must retain the above copyright 19181641Skmacy * notice, this list of conditions and the following disclaimer. 20181641Skmacy * 2. Redistributions in binary form must reproduce the above copyright 21181641Skmacy * notice, this list of conditions and the following disclaimer in the 22181641Skmacy * documentation and/or other materials provided with the distribution. 23181641Skmacy * 3. All advertising materials mentioning features or use of this software 24181641Skmacy * must display the following acknowledgement: 25181641Skmacy * This product includes software developed by the University of 26181641Skmacy * California, Berkeley and its contributors. 27181641Skmacy * 4. Neither the name of the University nor the names of its contributors 28181641Skmacy * may be used to endorse or promote products derived from this software 29181641Skmacy * without specific prior written permission. 30181641Skmacy * 31181641Skmacy * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32181641Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33181641Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34181641Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35181641Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36181641Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37181641Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38181641Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39181641Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40181641Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41181641Skmacy * SUCH DAMAGE. 42181641Skmacy * 43181641Skmacy * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44181641Skmacy */ 45181641Skmacy/*- 46181641Skmacy * Copyright (c) 2003 Networks Associates Technology, Inc. 47181641Skmacy * All rights reserved. 48181641Skmacy * 49181641Skmacy * This software was developed for the FreeBSD Project by Jake Burkholder, 50181641Skmacy * Safeport Network Services, and Network Associates Laboratories, the 51181641Skmacy * Security Research Division of Network Associates, Inc. under 52181641Skmacy * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53181641Skmacy * CHATS research program. 54181641Skmacy * 55181641Skmacy * Redistribution and use in source and binary forms, with or without 56181641Skmacy * modification, are permitted provided that the following conditions 57181641Skmacy * are met: 58181641Skmacy * 1. Redistributions of source code must retain the above copyright 59181641Skmacy * notice, this list of conditions and the following disclaimer. 60181641Skmacy * 2. Redistributions in binary form must reproduce the above copyright 61181641Skmacy * notice, this list of conditions and the following disclaimer in the 62181641Skmacy * documentation and/or other materials provided with the distribution. 63181641Skmacy * 64181641Skmacy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65181641Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66181641Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67181641Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68181641Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69181641Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70181641Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71181641Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72181641Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73181641Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74181641Skmacy * SUCH DAMAGE. 75181641Skmacy */ 76181641Skmacy 77181641Skmacy#include <sys/cdefs.h> 78181641Skmacy__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 225418 2011-09-06 10:30:11Z kib $"); 79181641Skmacy 80181641Skmacy/* 81181641Skmacy * Manages physical address maps. 82181641Skmacy * 83181641Skmacy * In addition to hardware address maps, this 84181641Skmacy * module is called upon to provide software-use-only 85181641Skmacy * maps which may or may not be stored in the same 86181641Skmacy * form as hardware maps. These pseudo-maps are 87181641Skmacy * used to store intermediate results from copy 88181641Skmacy * operations to and from address spaces. 89181641Skmacy * 90181641Skmacy * Since the information managed by this module is 91181641Skmacy * also stored by the logical address mapping module, 92181641Skmacy * this module may throw away valid virtual-to-physical 93181641Skmacy * mappings at almost any time. However, invalidations 94181641Skmacy * of virtual-to-physical mappings must be done as 95181641Skmacy * requested. 96181641Skmacy * 97181641Skmacy * In order to cope with hardware architectures which 98181641Skmacy * make virtual-to-physical map invalidates expensive, 99181641Skmacy * this module may delay invalidate or reduced protection 100181641Skmacy * operations until such time as they are actually 101181641Skmacy * necessary. This module is given full information as 102181641Skmacy * to which processors are currently using which maps, 103181641Skmacy * and to when physical maps must be made correct. 104181641Skmacy */ 105181641Skmacy 106181641Skmacy#include "opt_cpu.h" 107181641Skmacy#include "opt_pmap.h" 108181641Skmacy#include "opt_smp.h" 109181641Skmacy#include "opt_xbox.h" 110181641Skmacy 111181641Skmacy#include <sys/param.h> 112181641Skmacy#include <sys/systm.h> 113181641Skmacy#include <sys/kernel.h> 114181641Skmacy#include <sys/ktr.h> 115181641Skmacy#include <sys/lock.h> 116181641Skmacy#include <sys/malloc.h> 117181641Skmacy#include <sys/mman.h> 118181641Skmacy#include <sys/msgbuf.h> 119181641Skmacy#include <sys/mutex.h> 120181641Skmacy#include <sys/proc.h> 121195949Skib#include <sys/sf_buf.h> 122181641Skmacy#include <sys/sx.h> 123181641Skmacy#include <sys/vmmeter.h> 124181641Skmacy#include <sys/sched.h> 125181641Skmacy#include <sys/sysctl.h> 126181641Skmacy#ifdef SMP 127181641Skmacy#include <sys/smp.h> 128181641Skmacy#endif 129181641Skmacy 130181641Skmacy#include <vm/vm.h> 131181641Skmacy#include <vm/vm_param.h> 132181641Skmacy#include <vm/vm_kern.h> 133181641Skmacy#include <vm/vm_page.h> 134181641Skmacy#include <vm/vm_map.h> 135181641Skmacy#include <vm/vm_object.h> 136181641Skmacy#include <vm/vm_extern.h> 137181641Skmacy#include <vm/vm_pageout.h> 138181641Skmacy#include <vm/vm_pager.h> 139181641Skmacy#include <vm/uma.h> 140181641Skmacy 141181641Skmacy#include <machine/cpu.h> 142181641Skmacy#include <machine/cputypes.h> 143181641Skmacy#include <machine/md_var.h> 144181641Skmacy#include <machine/pcb.h> 145181641Skmacy#include <machine/specialreg.h> 146181641Skmacy#ifdef SMP 147181641Skmacy#include <machine/smp.h> 148181641Skmacy#endif 149181641Skmacy 150181641Skmacy#ifdef XBOX 151181641Skmacy#include <machine/xbox.h> 152181641Skmacy#endif 153181641Skmacy 154181641Skmacy#include <xen/interface/xen.h> 155186557Skmacy#include <xen/hypervisor.h> 156181641Skmacy#include <machine/xen/hypercall.h> 157181641Skmacy#include <machine/xen/xenvar.h> 158181641Skmacy#include <machine/xen/xenfunc.h> 159181641Skmacy 160181641Skmacy#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 161181641Skmacy#define CPU_ENABLE_SSE 162181641Skmacy#endif 163181641Skmacy 164181641Skmacy#ifndef PMAP_SHPGPERPROC 165181641Skmacy#define PMAP_SHPGPERPROC 200 166181641Skmacy#endif 167181641Skmacy 168208651Salc#define DIAGNOSTIC 169181641Skmacy 170208651Salc#if !defined(DIAGNOSTIC) 171204041Sed#ifdef __GNUC_GNU_INLINE__ 172208651Salc#define PMAP_INLINE __attribute__((__gnu_inline__)) inline 173204041Sed#else 174202628Sed#define PMAP_INLINE extern inline 175204041Sed#endif 176181641Skmacy#else 177181641Skmacy#define PMAP_INLINE 178181641Skmacy#endif 179181641Skmacy 180181641Skmacy#define PV_STATS 181181641Skmacy#ifdef PV_STATS 182181641Skmacy#define PV_STAT(x) do { x ; } while (0) 183181641Skmacy#else 184181641Skmacy#define PV_STAT(x) do { } while (0) 185181641Skmacy#endif 186181641Skmacy 187181747Skmacy#define pa_index(pa) ((pa) >> PDRSHIFT) 188181747Skmacy#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 189181747Skmacy 190181641Skmacy/* 191181641Skmacy * Get PDEs and PTEs for user/kernel address space 192181641Skmacy */ 193181641Skmacy#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 194181641Skmacy#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 195181641Skmacy 196181641Skmacy#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 197181641Skmacy#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 198181641Skmacy#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 199181641Skmacy#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 200181641Skmacy#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 201181641Skmacy 202181641Skmacy#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 203181641Skmacy 204216960Scperciva#define HAMFISTED_LOCKING 205216960Scperciva#ifdef HAMFISTED_LOCKING 206216960Scpercivastatic struct mtx createdelete_lock; 207216960Scperciva#endif 208216960Scperciva 209181641Skmacystruct pmap kernel_pmap_store; 210181641SkmacyLIST_HEAD(pmaplist, pmap); 211181641Skmacystatic struct pmaplist allpmaps; 212181641Skmacystatic struct mtx allpmaps_lock; 213181641Skmacy 214181641Skmacyvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 215181641Skmacyvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 216181641Skmacyint pgeflag = 0; /* PG_G or-in */ 217181641Skmacyint pseflag = 0; /* PG_PS or-in */ 218181641Skmacy 219182902Skmacyint nkpt; 220181641Skmacyvm_offset_t kernel_vm_end; 221181641Skmacyextern u_int32_t KERNend; 222181641Skmacy 223181641Skmacy#ifdef PAE 224181641Skmacypt_entry_t pg_nx; 225181641Skmacy#endif 226181641Skmacy 227196726Sadrianstatic int pat_works; /* Is page attribute table sane? */ 228196726Sadrian 229181641Skmacy/* 230181641Skmacy * Data for the pv entry allocation mechanism 231181641Skmacy */ 232181641Skmacystatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 233181747Skmacystatic struct md_page *pv_table; 234181641Skmacystatic int shpgperproc = PMAP_SHPGPERPROC; 235181641Skmacy 236181641Skmacystruct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 237181641Skmacyint pv_maxchunks; /* How many chunks we have KVA for */ 238181641Skmacyvm_offset_t pv_vafree; /* freelist stored in the PTE */ 239181641Skmacy 240181641Skmacy/* 241181641Skmacy * All those kernel PT submaps that BSD is so fond of 242181641Skmacy */ 243181641Skmacystruct sysmaps { 244181641Skmacy struct mtx lock; 245181641Skmacy pt_entry_t *CMAP1; 246181641Skmacy pt_entry_t *CMAP2; 247181641Skmacy caddr_t CADDR1; 248181641Skmacy caddr_t CADDR2; 249181641Skmacy}; 250181641Skmacystatic struct sysmaps sysmaps_pcpu[MAXCPU]; 251181641Skmacystatic pt_entry_t *CMAP3; 252204160Skmacycaddr_t ptvmmap = 0; 253181641Skmacystatic caddr_t CADDR3; 254181641Skmacystruct msgbuf *msgbufp = 0; 255181641Skmacy 256181641Skmacy/* 257181641Skmacy * Crashdump maps. 258181641Skmacy */ 259181641Skmacystatic caddr_t crashdumpmap; 260181641Skmacy 261181641Skmacystatic pt_entry_t *PMAP1 = 0, *PMAP2; 262181641Skmacystatic pt_entry_t *PADDR1 = 0, *PADDR2; 263181641Skmacy#ifdef SMP 264181641Skmacystatic int PMAP1cpu; 265181641Skmacystatic int PMAP1changedcpu; 266181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 267181641Skmacy &PMAP1changedcpu, 0, 268181641Skmacy "Number of times pmap_pte_quick changed CPU with same PMAP1"); 269181641Skmacy#endif 270181641Skmacystatic int PMAP1changed; 271181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 272181641Skmacy &PMAP1changed, 0, 273181641Skmacy "Number of times pmap_pte_quick changed PMAP1"); 274181641Skmacystatic int PMAP1unchanged; 275181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 276181641Skmacy &PMAP1unchanged, 0, 277181641Skmacy "Number of times pmap_pte_quick didn't change PMAP1"); 278181641Skmacystatic struct mtx PMAP2mutex; 279181641Skmacy 280181747SkmacySYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 281181747Skmacystatic int pg_ps_enabled; 282199184SavgSYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0, 283181747Skmacy "Are large page mappings enabled?"); 284181747Skmacy 285181747SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 286181747Skmacy "Max number of PV entries"); 287181747SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 288181747Skmacy "Page share factor per proc"); 289207419SkmacySYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, 290207419Skmacy "2/4MB page mapping counters"); 291181747Skmacy 292207419Skmacystatic u_long pmap_pde_mappings; 293207419SkmacySYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, 294207419Skmacy &pmap_pde_mappings, 0, "2/4MB page mappings"); 295207419Skmacy 296181641Skmacystatic void free_pv_entry(pmap_t pmap, pv_entry_t pv); 297181641Skmacystatic pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 298208651Salcstatic void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 299208651Salcstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 300208651Salc vm_offset_t va); 301181641Skmacy 302181641Skmacystatic vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 303181641Skmacy vm_page_t m, vm_prot_t prot, vm_page_t mpte); 304181641Skmacystatic int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 305181641Skmacy vm_page_t *free); 306181641Skmacystatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 307181641Skmacy vm_page_t *free); 308181641Skmacystatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 309181641Skmacy vm_offset_t va); 310181641Skmacystatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 311181641Skmacy vm_page_t m); 312181641Skmacy 313181641Skmacystatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 314181641Skmacy 315181641Skmacystatic vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 316181641Skmacystatic int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 317181641Skmacystatic pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 318181641Skmacystatic void pmap_pte_release(pt_entry_t *pte); 319181641Skmacystatic int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 320181641Skmacystatic vm_offset_t pmap_kmem_choose(vm_offset_t addr); 321181641Skmacystatic boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 322181747Skmacystatic void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 323181641Skmacy 324196725Sadrianstatic __inline void pagezero(void *page); 325181747Skmacy 326181641SkmacyCTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 327181641SkmacyCTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 328181641Skmacy 329181641Skmacy/* 330181641Skmacy * If you get an error here, then you set KVA_PAGES wrong! See the 331181641Skmacy * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 332181641Skmacy * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 333181641Skmacy */ 334181641SkmacyCTASSERT(KERNBASE % (1 << 24) == 0); 335181641Skmacy 336181641Skmacy 337181641Skmacy 338181641Skmacyvoid 339181641Skmacypd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 340181641Skmacy{ 341181641Skmacy vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 342181641Skmacy 343181641Skmacy switch (type) { 344181641Skmacy case SH_PD_SET_VA: 345181641Skmacy#if 0 346181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 347181641Skmacy xpmap_ptom(val & ~(PG_RW))); 348181641Skmacy#endif 349181641Skmacy xen_queue_pt_update(pdir_ma, 350181641Skmacy xpmap_ptom(val)); 351181641Skmacy break; 352181641Skmacy case SH_PD_SET_VA_MA: 353181641Skmacy#if 0 354181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 355181641Skmacy val & ~(PG_RW)); 356181641Skmacy#endif 357181641Skmacy xen_queue_pt_update(pdir_ma, val); 358181641Skmacy break; 359181641Skmacy case SH_PD_SET_VA_CLEAR: 360181641Skmacy#if 0 361181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 0); 362181641Skmacy#endif 363181641Skmacy xen_queue_pt_update(pdir_ma, 0); 364181641Skmacy break; 365181641Skmacy } 366181641Skmacy} 367181641Skmacy 368181641Skmacy/* 369181641Skmacy * Move the kernel virtual free pointer to the next 370181641Skmacy * 4MB. This is used to help improve performance 371181641Skmacy * by using a large (4MB) page for much of the kernel 372181641Skmacy * (.text, .data, .bss) 373181641Skmacy */ 374181641Skmacystatic vm_offset_t 375181641Skmacypmap_kmem_choose(vm_offset_t addr) 376181641Skmacy{ 377181641Skmacy vm_offset_t newaddr = addr; 378181641Skmacy 379181641Skmacy#ifndef DISABLE_PSE 380181641Skmacy if (cpu_feature & CPUID_PSE) 381181641Skmacy newaddr = (addr + PDRMASK) & ~PDRMASK; 382181641Skmacy#endif 383181641Skmacy return newaddr; 384181641Skmacy} 385181641Skmacy 386181641Skmacy/* 387181641Skmacy * Bootstrap the system enough to run with virtual memory. 388181641Skmacy * 389181641Skmacy * On the i386 this is called after mapping has already been enabled 390181641Skmacy * and just syncs the pmap module with what has already been done. 391181641Skmacy * [We can't call it easily with mapping off since the kernel is not 392181641Skmacy * mapped with PA == VA, hence we would have to relocate every address 393181641Skmacy * from the linked base (virtual) address "KERNBASE" to the actual 394181641Skmacy * (physical) address starting relative to 0] 395181641Skmacy */ 396181641Skmacyvoid 397181641Skmacypmap_bootstrap(vm_paddr_t firstaddr) 398181641Skmacy{ 399181641Skmacy vm_offset_t va; 400181641Skmacy pt_entry_t *pte, *unused; 401181641Skmacy struct sysmaps *sysmaps; 402181641Skmacy int i; 403181641Skmacy 404181641Skmacy /* 405181641Skmacy * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 406181641Skmacy * large. It should instead be correctly calculated in locore.s and 407181641Skmacy * not based on 'first' (which is a physical address, not a virtual 408181641Skmacy * address, for the start of unused physical memory). The kernel 409181641Skmacy * page tables are NOT double mapped and thus should not be included 410181641Skmacy * in this calculation. 411181641Skmacy */ 412181641Skmacy virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 413181641Skmacy virtual_avail = pmap_kmem_choose(virtual_avail); 414181641Skmacy 415181641Skmacy virtual_end = VM_MAX_KERNEL_ADDRESS; 416181641Skmacy 417181641Skmacy /* 418181641Skmacy * Initialize the kernel pmap (which is statically allocated). 419181641Skmacy */ 420181641Skmacy PMAP_LOCK_INIT(kernel_pmap); 421181641Skmacy kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 422181641Skmacy#ifdef PAE 423181641Skmacy kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 424181641Skmacy#endif 425222813Sattilio CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 426181641Skmacy TAILQ_INIT(&kernel_pmap->pm_pvchunk); 427181641Skmacy LIST_INIT(&allpmaps); 428181641Skmacy mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 429181641Skmacy mtx_lock_spin(&allpmaps_lock); 430181641Skmacy LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 431181641Skmacy mtx_unlock_spin(&allpmaps_lock); 432183342Skmacy if (nkpt == 0) 433183342Skmacy nkpt = NKPT; 434181641Skmacy 435181641Skmacy /* 436181641Skmacy * Reserve some special page table entries/VA space for temporary 437181641Skmacy * mapping of pages. 438181641Skmacy */ 439181641Skmacy#define SYSMAP(c, p, v, n) \ 440181641Skmacy v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 441181641Skmacy 442181641Skmacy va = virtual_avail; 443181641Skmacy pte = vtopte(va); 444181641Skmacy 445181641Skmacy /* 446181641Skmacy * CMAP1/CMAP2 are used for zeroing and copying pages. 447181641Skmacy * CMAP3 is used for the idle process page zeroing. 448181641Skmacy */ 449181641Skmacy for (i = 0; i < MAXCPU; i++) { 450181641Skmacy sysmaps = &sysmaps_pcpu[i]; 451181641Skmacy mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 452181641Skmacy SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 453181641Skmacy SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 454204160Skmacy PT_SET_MA(sysmaps->CADDR1, 0); 455204160Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 456181641Skmacy } 457181641Skmacy SYSMAP(caddr_t, CMAP3, CADDR3, 1) 458181641Skmacy PT_SET_MA(CADDR3, 0); 459181641Skmacy 460181641Skmacy /* 461181641Skmacy * Crashdump maps. 462181641Skmacy */ 463181641Skmacy SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 464181641Skmacy 465181641Skmacy /* 466181641Skmacy * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 467181641Skmacy */ 468181641Skmacy SYSMAP(caddr_t, unused, ptvmmap, 1) 469181641Skmacy 470181641Skmacy /* 471181641Skmacy * msgbufp is used to map the system message buffer. 472181641Skmacy */ 473217688Spluknet SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) 474181641Skmacy 475181641Skmacy /* 476181641Skmacy * ptemap is used for pmap_pte_quick 477181641Skmacy */ 478181641Skmacy SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 479181641Skmacy SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); 480181641Skmacy 481181641Skmacy mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 482181641Skmacy 483181641Skmacy virtual_avail = va; 484181641Skmacy 485181641Skmacy /* 486181641Skmacy * Leave in place an identity mapping (virt == phys) for the low 1 MB 487181641Skmacy * physical memory region that is used by the ACPI wakeup code. This 488181641Skmacy * mapping must not have PG_G set. 489181641Skmacy */ 490181641Skmacy#ifndef XEN 491181641Skmacy /* 492181641Skmacy * leave here deliberately to show that this is not supported 493181641Skmacy */ 494181641Skmacy#ifdef XBOX 495181641Skmacy /* FIXME: This is gross, but needed for the XBOX. Since we are in such 496181641Skmacy * an early stadium, we cannot yet neatly map video memory ... :-( 497181641Skmacy * Better fixes are very welcome! */ 498181641Skmacy if (!arch_i386_is_xbox) 499181641Skmacy#endif 500181641Skmacy for (i = 1; i < NKPT; i++) 501181641Skmacy PTD[i] = 0; 502181641Skmacy 503181641Skmacy /* Initialize the PAT MSR if present. */ 504181641Skmacy pmap_init_pat(); 505181641Skmacy 506181641Skmacy /* Turn on PG_G on kernel page(s) */ 507181641Skmacy pmap_set_pg(); 508181641Skmacy#endif 509216960Scperciva 510216960Scperciva#ifdef HAMFISTED_LOCKING 511216960Scperciva mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF); 512216960Scperciva#endif 513181641Skmacy} 514181641Skmacy 515181641Skmacy/* 516181641Skmacy * Setup the PAT MSR. 517181641Skmacy */ 518181641Skmacyvoid 519181641Skmacypmap_init_pat(void) 520181641Skmacy{ 521181641Skmacy uint64_t pat_msr; 522181641Skmacy 523181641Skmacy /* Bail if this CPU doesn't implement PAT. */ 524181641Skmacy if (!(cpu_feature & CPUID_PAT)) 525181641Skmacy return; 526181641Skmacy 527196726Sadrian if (cpu_vendor_id != CPU_VENDOR_INTEL || 528197070Sjkim (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) { 529196726Sadrian /* 530196726Sadrian * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 531196726Sadrian * Program 4 and 5 as WP and WC. 532196726Sadrian * Leave 6 and 7 as UC and UC-. 533196726Sadrian */ 534196726Sadrian pat_msr = rdmsr(MSR_PAT); 535196726Sadrian pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 536196726Sadrian pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 537196726Sadrian PAT_VALUE(5, PAT_WRITE_COMBINING); 538196726Sadrian pat_works = 1; 539196726Sadrian } else { 540196726Sadrian /* 541196726Sadrian * Due to some Intel errata, we can only safely use the lower 4 542196726Sadrian * PAT entries. Thus, just replace PAT Index 2 with WC instead 543196726Sadrian * of UC-. 544196726Sadrian * 545196726Sadrian * Intel Pentium III Processor Specification Update 546196726Sadrian * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 547196726Sadrian * or Mode C Paging) 548196726Sadrian * 549196726Sadrian * Intel Pentium IV Processor Specification Update 550196726Sadrian * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 551196726Sadrian */ 552196726Sadrian pat_msr = rdmsr(MSR_PAT); 553196726Sadrian pat_msr &= ~PAT_MASK(2); 554196726Sadrian pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 555196726Sadrian pat_works = 0; 556196726Sadrian } 557181641Skmacy wrmsr(MSR_PAT, pat_msr); 558181641Skmacy} 559181641Skmacy 560181641Skmacy/* 561181641Skmacy * Initialize a vm_page's machine-dependent fields. 562181641Skmacy */ 563181641Skmacyvoid 564181641Skmacypmap_page_init(vm_page_t m) 565181641Skmacy{ 566181641Skmacy 567181641Skmacy TAILQ_INIT(&m->md.pv_list); 568195649Salc m->md.pat_mode = PAT_WRITE_BACK; 569181641Skmacy} 570181641Skmacy 571181641Skmacy/* 572181641Skmacy * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 573181641Skmacy * Requirements: 574181641Skmacy * - Must deal with pages in order to ensure that none of the PG_* bits 575181641Skmacy * are ever set, PG_V in particular. 576181641Skmacy * - Assumes we can write to ptes without pte_store() atomic ops, even 577181641Skmacy * on PAE systems. This should be ok. 578181641Skmacy * - Assumes nothing will ever test these addresses for 0 to indicate 579181641Skmacy * no mapping instead of correctly checking PG_V. 580181641Skmacy * - Assumes a vm_offset_t will fit in a pte (true for i386). 581181641Skmacy * Because PG_V is never set, there can be no mappings to invalidate. 582181641Skmacy */ 583181641Skmacystatic int ptelist_count = 0; 584181641Skmacystatic vm_offset_t 585181641Skmacypmap_ptelist_alloc(vm_offset_t *head) 586181641Skmacy{ 587181641Skmacy vm_offset_t va; 588181641Skmacy vm_offset_t *phead = (vm_offset_t *)*head; 589181641Skmacy 590181641Skmacy if (ptelist_count == 0) { 591181641Skmacy printf("out of memory!!!!!!\n"); 592181641Skmacy return (0); /* Out of memory */ 593181641Skmacy } 594181641Skmacy ptelist_count--; 595181641Skmacy va = phead[ptelist_count]; 596181641Skmacy return (va); 597181641Skmacy} 598181641Skmacy 599181641Skmacystatic void 600181641Skmacypmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 601181641Skmacy{ 602181641Skmacy vm_offset_t *phead = (vm_offset_t *)*head; 603181641Skmacy 604181641Skmacy phead[ptelist_count++] = va; 605181641Skmacy} 606181641Skmacy 607181641Skmacystatic void 608181641Skmacypmap_ptelist_init(vm_offset_t *head, void *base, int npages) 609181641Skmacy{ 610181641Skmacy int i, nstackpages; 611181641Skmacy vm_offset_t va; 612181641Skmacy vm_page_t m; 613181641Skmacy 614181641Skmacy nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 615181641Skmacy for (i = 0; i < nstackpages; i++) { 616181641Skmacy va = (vm_offset_t)base + i * PAGE_SIZE; 617181641Skmacy m = vm_page_alloc(NULL, i, 618181641Skmacy VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 619181641Skmacy VM_ALLOC_ZERO); 620181641Skmacy pmap_qenter(va, &m, 1); 621181641Skmacy } 622181641Skmacy 623181641Skmacy *head = (vm_offset_t)base; 624181641Skmacy for (i = npages - 1; i >= nstackpages; i--) { 625181641Skmacy va = (vm_offset_t)base + i * PAGE_SIZE; 626181641Skmacy pmap_ptelist_free(head, va); 627181641Skmacy } 628181641Skmacy} 629181641Skmacy 630181641Skmacy 631181641Skmacy/* 632181641Skmacy * Initialize the pmap module. 633181641Skmacy * Called by vm_init, to initialize any structures that the pmap 634181641Skmacy * system needs to map virtual memory. 635181641Skmacy */ 636181641Skmacyvoid 637181641Skmacypmap_init(void) 638181641Skmacy{ 639181747Skmacy vm_page_t mpte; 640181747Skmacy vm_size_t s; 641181747Skmacy int i, pv_npg; 642181641Skmacy 643181641Skmacy /* 644181747Skmacy * Initialize the vm page array entries for the kernel pmap's 645181747Skmacy * page table pages. 646181747Skmacy */ 647181747Skmacy for (i = 0; i < nkpt; i++) { 648181808Skmacy mpte = PHYS_TO_VM_PAGE(xpmap_mtop(PTD[i + KPTDI] & PG_FRAME)); 649181747Skmacy KASSERT(mpte >= vm_page_array && 650181747Skmacy mpte < &vm_page_array[vm_page_array_size], 651181747Skmacy ("pmap_init: page table page is out of range")); 652181747Skmacy mpte->pindex = i + KPTDI; 653181808Skmacy mpte->phys_addr = xpmap_mtop(PTD[i + KPTDI] & PG_FRAME); 654181747Skmacy } 655181747Skmacy 656181747Skmacy /* 657181641Skmacy * Initialize the address space (zone) for the pv entries. Set a 658181641Skmacy * high water mark so that the system can recover from excessive 659181641Skmacy * numbers of pv entries. 660181641Skmacy */ 661181641Skmacy TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 662181641Skmacy pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 663181641Skmacy TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 664181641Skmacy pv_entry_max = roundup(pv_entry_max, _NPCPV); 665181641Skmacy pv_entry_high_water = 9 * (pv_entry_max / 10); 666181641Skmacy 667181747Skmacy /* 668181747Skmacy * Are large page mappings enabled? 669181747Skmacy */ 670181747Skmacy TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 671181747Skmacy 672181747Skmacy /* 673181747Skmacy * Calculate the size of the pv head table for superpages. 674181747Skmacy */ 675181747Skmacy for (i = 0; phys_avail[i + 1]; i += 2); 676181747Skmacy pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR; 677181747Skmacy 678181747Skmacy /* 679181747Skmacy * Allocate memory for the pv head table for superpages. 680181747Skmacy */ 681181747Skmacy s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 682181747Skmacy s = round_page(s); 683181747Skmacy pv_table = (struct md_page *)kmem_alloc(kernel_map, s); 684181747Skmacy for (i = 0; i < pv_npg; i++) 685181747Skmacy TAILQ_INIT(&pv_table[i].pv_list); 686181747Skmacy 687181641Skmacy pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 688181641Skmacy pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 689181641Skmacy PAGE_SIZE * pv_maxchunks); 690181641Skmacy if (pv_chunkbase == NULL) 691181641Skmacy panic("pmap_init: not enough kvm for pv chunks"); 692181641Skmacy pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 693181641Skmacy} 694181641Skmacy 695181641Skmacy 696181641Skmacy/*************************************************** 697181641Skmacy * Low level helper routines..... 698181641Skmacy ***************************************************/ 699181641Skmacy 700181641Skmacy/* 701181641Skmacy * Determine the appropriate bits to set in a PTE or PDE for a specified 702181641Skmacy * caching mode. 703181641Skmacy */ 704195949Skibint 705181641Skmacypmap_cache_bits(int mode, boolean_t is_pde) 706181641Skmacy{ 707181641Skmacy int pat_flag, pat_index, cache_bits; 708181641Skmacy 709181641Skmacy /* The PAT bit is different for PTE's and PDE's. */ 710181641Skmacy pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 711181641Skmacy 712181641Skmacy /* If we don't support PAT, map extended modes to older ones. */ 713181641Skmacy if (!(cpu_feature & CPUID_PAT)) { 714181641Skmacy switch (mode) { 715181641Skmacy case PAT_UNCACHEABLE: 716181641Skmacy case PAT_WRITE_THROUGH: 717181641Skmacy case PAT_WRITE_BACK: 718181641Skmacy break; 719181641Skmacy case PAT_UNCACHED: 720181641Skmacy case PAT_WRITE_COMBINING: 721181641Skmacy case PAT_WRITE_PROTECTED: 722181641Skmacy mode = PAT_UNCACHEABLE; 723181641Skmacy break; 724181641Skmacy } 725181641Skmacy } 726181641Skmacy 727181641Skmacy /* Map the caching mode to a PAT index. */ 728196726Sadrian if (pat_works) { 729196726Sadrian switch (mode) { 730196726Sadrian case PAT_UNCACHEABLE: 731196726Sadrian pat_index = 3; 732196726Sadrian break; 733196726Sadrian case PAT_WRITE_THROUGH: 734196726Sadrian pat_index = 1; 735196726Sadrian break; 736196726Sadrian case PAT_WRITE_BACK: 737196726Sadrian pat_index = 0; 738196726Sadrian break; 739196726Sadrian case PAT_UNCACHED: 740196726Sadrian pat_index = 2; 741196726Sadrian break; 742196726Sadrian case PAT_WRITE_COMBINING: 743196726Sadrian pat_index = 5; 744196726Sadrian break; 745196726Sadrian case PAT_WRITE_PROTECTED: 746196726Sadrian pat_index = 4; 747196726Sadrian break; 748196726Sadrian default: 749196726Sadrian panic("Unknown caching mode %d\n", mode); 750196726Sadrian } 751196726Sadrian } else { 752196726Sadrian switch (mode) { 753196726Sadrian case PAT_UNCACHED: 754196726Sadrian case PAT_UNCACHEABLE: 755196726Sadrian case PAT_WRITE_PROTECTED: 756196726Sadrian pat_index = 3; 757196726Sadrian break; 758196726Sadrian case PAT_WRITE_THROUGH: 759196726Sadrian pat_index = 1; 760196726Sadrian break; 761196726Sadrian case PAT_WRITE_BACK: 762196726Sadrian pat_index = 0; 763196726Sadrian break; 764196726Sadrian case PAT_WRITE_COMBINING: 765196726Sadrian pat_index = 2; 766196726Sadrian break; 767196726Sadrian default: 768196726Sadrian panic("Unknown caching mode %d\n", mode); 769196726Sadrian } 770181641Skmacy } 771181641Skmacy 772181641Skmacy /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 773181641Skmacy cache_bits = 0; 774181641Skmacy if (pat_index & 0x4) 775181641Skmacy cache_bits |= pat_flag; 776181641Skmacy if (pat_index & 0x2) 777181641Skmacy cache_bits |= PG_NC_PCD; 778181641Skmacy if (pat_index & 0x1) 779181641Skmacy cache_bits |= PG_NC_PWT; 780181641Skmacy return (cache_bits); 781181641Skmacy} 782181641Skmacy#ifdef SMP 783181641Skmacy/* 784181641Skmacy * For SMP, these functions have to use the IPI mechanism for coherence. 785181641Skmacy * 786181641Skmacy * N.B.: Before calling any of the following TLB invalidation functions, 787181641Skmacy * the calling processor must ensure that all stores updating a non- 788181641Skmacy * kernel page table are globally performed. Otherwise, another 789181641Skmacy * processor could cache an old, pre-update entry without being 790181641Skmacy * invalidated. This can happen one of two ways: (1) The pmap becomes 791181641Skmacy * active on another processor after its pm_active field is checked by 792181641Skmacy * one of the following functions but before a store updating the page 793181641Skmacy * table is globally performed. (2) The pmap becomes active on another 794181641Skmacy * processor before its pm_active field is checked but due to 795181641Skmacy * speculative loads one of the following functions stills reads the 796181641Skmacy * pmap as inactive on the other processor. 797181641Skmacy * 798181641Skmacy * The kernel page table is exempt because its pm_active field is 799181641Skmacy * immutable. The kernel page table is always active on every 800181641Skmacy * processor. 801181641Skmacy */ 802181641Skmacyvoid 803181641Skmacypmap_invalidate_page(pmap_t pmap, vm_offset_t va) 804181641Skmacy{ 805223758Sattilio cpuset_t other_cpus; 806223758Sattilio u_int cpuid; 807181641Skmacy 808181641Skmacy CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 809181641Skmacy pmap, va); 810181641Skmacy 811181641Skmacy sched_pin(); 812222813Sattilio if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 813181641Skmacy invlpg(va); 814181641Skmacy smp_invlpg(va); 815181641Skmacy } else { 816223758Sattilio cpuid = PCPU_GET(cpuid); 817223758Sattilio other_cpus = all_cpus; 818223758Sattilio CPU_CLR(cpuid, &other_cpus); 819223758Sattilio if (CPU_ISSET(cpuid, &pmap->pm_active)) 820181641Skmacy invlpg(va); 821222813Sattilio CPU_AND(&other_cpus, &pmap->pm_active); 822222813Sattilio if (!CPU_EMPTY(&other_cpus)) 823222813Sattilio smp_masked_invlpg(other_cpus, va); 824181641Skmacy } 825181641Skmacy sched_unpin(); 826181641Skmacy PT_UPDATES_FLUSH(); 827181641Skmacy} 828181641Skmacy 829181641Skmacyvoid 830181641Skmacypmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 831181641Skmacy{ 832223758Sattilio cpuset_t other_cpus; 833181641Skmacy vm_offset_t addr; 834223758Sattilio u_int cpuid; 835181641Skmacy 836181641Skmacy CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 837181641Skmacy pmap, sva, eva); 838181641Skmacy 839181641Skmacy sched_pin(); 840222813Sattilio if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 841181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 842181641Skmacy invlpg(addr); 843181641Skmacy smp_invlpg_range(sva, eva); 844181641Skmacy } else { 845223758Sattilio cpuid = PCPU_GET(cpuid); 846223758Sattilio other_cpus = all_cpus; 847223758Sattilio CPU_CLR(cpuid, &other_cpus); 848223758Sattilio if (CPU_ISSET(cpuid, &pmap->pm_active)) 849181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 850181641Skmacy invlpg(addr); 851222813Sattilio CPU_AND(&other_cpus, &pmap->pm_active); 852222813Sattilio if (!CPU_EMPTY(&other_cpus)) 853222813Sattilio smp_masked_invlpg_range(other_cpus, sva, eva); 854181641Skmacy } 855181641Skmacy sched_unpin(); 856181641Skmacy PT_UPDATES_FLUSH(); 857181641Skmacy} 858181641Skmacy 859181641Skmacyvoid 860181641Skmacypmap_invalidate_all(pmap_t pmap) 861181641Skmacy{ 862223758Sattilio cpuset_t other_cpus; 863223758Sattilio u_int cpuid; 864181641Skmacy 865181641Skmacy CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 866181641Skmacy 867181641Skmacy sched_pin(); 868222813Sattilio if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { 869181641Skmacy invltlb(); 870181641Skmacy smp_invltlb(); 871181641Skmacy } else { 872223758Sattilio cpuid = PCPU_GET(cpuid); 873223758Sattilio other_cpus = all_cpus; 874223758Sattilio CPU_CLR(cpuid, &other_cpus); 875223758Sattilio if (CPU_ISSET(cpuid, &pmap->pm_active)) 876181641Skmacy invltlb(); 877222813Sattilio CPU_AND(&other_cpus, &pmap->pm_active); 878222813Sattilio if (!CPU_EMPTY(&other_cpus)) 879222813Sattilio smp_masked_invltlb(other_cpus); 880181641Skmacy } 881181641Skmacy sched_unpin(); 882181641Skmacy} 883181641Skmacy 884181641Skmacyvoid 885181641Skmacypmap_invalidate_cache(void) 886181641Skmacy{ 887181641Skmacy 888181641Skmacy sched_pin(); 889181641Skmacy wbinvd(); 890181641Skmacy smp_cache_flush(); 891181641Skmacy sched_unpin(); 892181641Skmacy} 893181641Skmacy#else /* !SMP */ 894181641Skmacy/* 895181641Skmacy * Normal, non-SMP, 486+ invalidation functions. 896181641Skmacy * We inline these within pmap.c for speed. 897181641Skmacy */ 898181641SkmacyPMAP_INLINE void 899181641Skmacypmap_invalidate_page(pmap_t pmap, vm_offset_t va) 900181641Skmacy{ 901181641Skmacy CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 902181641Skmacy pmap, va); 903181641Skmacy 904222813Sattilio if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 905181641Skmacy invlpg(va); 906181641Skmacy PT_UPDATES_FLUSH(); 907181641Skmacy} 908181641Skmacy 909181641SkmacyPMAP_INLINE void 910181641Skmacypmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 911181641Skmacy{ 912181641Skmacy vm_offset_t addr; 913181641Skmacy 914181641Skmacy if (eva - sva > PAGE_SIZE) 915181641Skmacy CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 916181641Skmacy pmap, sva, eva); 917181641Skmacy 918222813Sattilio if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 919181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 920181641Skmacy invlpg(addr); 921181641Skmacy PT_UPDATES_FLUSH(); 922181641Skmacy} 923181641Skmacy 924181641SkmacyPMAP_INLINE void 925181641Skmacypmap_invalidate_all(pmap_t pmap) 926181641Skmacy{ 927181641Skmacy 928181641Skmacy CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 929181641Skmacy 930222813Sattilio if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 931181641Skmacy invltlb(); 932181641Skmacy} 933181641Skmacy 934181641SkmacyPMAP_INLINE void 935181641Skmacypmap_invalidate_cache(void) 936181641Skmacy{ 937181641Skmacy 938181641Skmacy wbinvd(); 939181641Skmacy} 940181641Skmacy#endif /* !SMP */ 941181641Skmacy 942195949Skibvoid 943195949Skibpmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 944195949Skib{ 945195949Skib 946195949Skib KASSERT((sva & PAGE_MASK) == 0, 947195949Skib ("pmap_invalidate_cache_range: sva not page-aligned")); 948195949Skib KASSERT((eva & PAGE_MASK) == 0, 949195949Skib ("pmap_invalidate_cache_range: eva not page-aligned")); 950195949Skib 951195949Skib if (cpu_feature & CPUID_SS) 952195949Skib ; /* If "Self Snoop" is supported, do nothing. */ 953195949Skib else if (cpu_feature & CPUID_CLFSH) { 954195949Skib 955195949Skib /* 956195949Skib * Otherwise, do per-cache line flush. Use the mfence 957195949Skib * instruction to insure that previous stores are 958195949Skib * included in the write-back. The processor 959195949Skib * propagates flush to other processors in the cache 960195949Skib * coherence domain. 961195949Skib */ 962195949Skib mfence(); 963197046Skib for (; sva < eva; sva += cpu_clflush_line_size) 964197046Skib clflush(sva); 965195949Skib mfence(); 966195949Skib } else { 967195949Skib 968195949Skib /* 969195949Skib * No targeted cache flush methods are supported by CPU, 970195949Skib * globally invalidate cache as a last resort. 971195949Skib */ 972195949Skib pmap_invalidate_cache(); 973195949Skib } 974195949Skib} 975195949Skib 976181641Skmacy/* 977181641Skmacy * Are we current address space or kernel? N.B. We return FALSE when 978181641Skmacy * a pmap's page table is in use because a kernel thread is borrowing 979181641Skmacy * it. The borrowed page table can change spontaneously, making any 980181641Skmacy * dependence on its continued use subject to a race condition. 981181641Skmacy */ 982181641Skmacystatic __inline int 983181641Skmacypmap_is_current(pmap_t pmap) 984181641Skmacy{ 985181641Skmacy 986181641Skmacy return (pmap == kernel_pmap || 987181641Skmacy (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 988181641Skmacy (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 989181641Skmacy} 990181641Skmacy 991181641Skmacy/* 992181641Skmacy * If the given pmap is not the current or kernel pmap, the returned pte must 993181641Skmacy * be released by passing it to pmap_pte_release(). 994181641Skmacy */ 995181641Skmacypt_entry_t * 996181641Skmacypmap_pte(pmap_t pmap, vm_offset_t va) 997181641Skmacy{ 998181641Skmacy pd_entry_t newpf; 999181641Skmacy pd_entry_t *pde; 1000181641Skmacy 1001181641Skmacy pde = pmap_pde(pmap, va); 1002181641Skmacy if (*pde & PG_PS) 1003181641Skmacy return (pde); 1004181641Skmacy if (*pde != 0) { 1005181641Skmacy /* are we current address space or kernel? */ 1006181641Skmacy if (pmap_is_current(pmap)) 1007181641Skmacy return (vtopte(va)); 1008181641Skmacy mtx_lock(&PMAP2mutex); 1009181641Skmacy newpf = *pde & PG_FRAME; 1010181641Skmacy if ((*PMAP2 & PG_FRAME) != newpf) { 1011204160Skmacy vm_page_lock_queues(); 1012181641Skmacy PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 1013204160Skmacy vm_page_unlock_queues(); 1014181641Skmacy CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 1015181641Skmacy pmap, va, (*PMAP2 & 0xffffffff)); 1016181641Skmacy } 1017181641Skmacy 1018181641Skmacy return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1019181641Skmacy } 1020181641Skmacy return (0); 1021181641Skmacy} 1022181641Skmacy 1023181641Skmacy/* 1024181641Skmacy * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1025181641Skmacy * being NULL. 1026181641Skmacy */ 1027181641Skmacystatic __inline void 1028181641Skmacypmap_pte_release(pt_entry_t *pte) 1029181641Skmacy{ 1030181641Skmacy 1031181641Skmacy if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 1032181641Skmacy CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 1033181641Skmacy *PMAP2); 1034216703Scperciva vm_page_lock_queues(); 1035181641Skmacy PT_SET_VA(PMAP2, 0, TRUE); 1036216703Scperciva vm_page_unlock_queues(); 1037181641Skmacy mtx_unlock(&PMAP2mutex); 1038181641Skmacy } 1039181641Skmacy} 1040181641Skmacy 1041181641Skmacystatic __inline void 1042181641Skmacyinvlcaddr(void *caddr) 1043181641Skmacy{ 1044181641Skmacy 1045181641Skmacy invlpg((u_int)caddr); 1046181641Skmacy PT_UPDATES_FLUSH(); 1047181641Skmacy} 1048181641Skmacy 1049181641Skmacy/* 1050181641Skmacy * Super fast pmap_pte routine best used when scanning 1051181641Skmacy * the pv lists. This eliminates many coarse-grained 1052181641Skmacy * invltlb calls. Note that many of the pv list 1053181641Skmacy * scans are across different pmaps. It is very wasteful 1054181641Skmacy * to do an entire invltlb for checking a single mapping. 1055181641Skmacy * 1056181641Skmacy * If the given pmap is not the current pmap, vm_page_queue_mtx 1057181641Skmacy * must be held and curthread pinned to a CPU. 1058181641Skmacy */ 1059181641Skmacystatic pt_entry_t * 1060181641Skmacypmap_pte_quick(pmap_t pmap, vm_offset_t va) 1061181641Skmacy{ 1062181641Skmacy pd_entry_t newpf; 1063181641Skmacy pd_entry_t *pde; 1064181641Skmacy 1065181641Skmacy pde = pmap_pde(pmap, va); 1066181641Skmacy if (*pde & PG_PS) 1067181641Skmacy return (pde); 1068181641Skmacy if (*pde != 0) { 1069181641Skmacy /* are we current address space or kernel? */ 1070181641Skmacy if (pmap_is_current(pmap)) 1071181641Skmacy return (vtopte(va)); 1072181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1073181641Skmacy KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1074181641Skmacy newpf = *pde & PG_FRAME; 1075181641Skmacy if ((*PMAP1 & PG_FRAME) != newpf) { 1076181641Skmacy PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1077181641Skmacy CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1078181641Skmacy pmap, va, (u_long)*PMAP1); 1079181641Skmacy 1080181641Skmacy#ifdef SMP 1081181641Skmacy PMAP1cpu = PCPU_GET(cpuid); 1082181641Skmacy#endif 1083181641Skmacy PMAP1changed++; 1084181641Skmacy } else 1085181641Skmacy#ifdef SMP 1086181641Skmacy if (PMAP1cpu != PCPU_GET(cpuid)) { 1087181641Skmacy PMAP1cpu = PCPU_GET(cpuid); 1088181641Skmacy invlcaddr(PADDR1); 1089181641Skmacy PMAP1changedcpu++; 1090181641Skmacy } else 1091181641Skmacy#endif 1092181641Skmacy PMAP1unchanged++; 1093181641Skmacy return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1094181641Skmacy } 1095181641Skmacy return (0); 1096181641Skmacy} 1097181641Skmacy 1098181641Skmacy/* 1099181641Skmacy * Routine: pmap_extract 1100181641Skmacy * Function: 1101181641Skmacy * Extract the physical page address associated 1102181641Skmacy * with the given map/virtual_address pair. 1103181641Skmacy */ 1104181641Skmacyvm_paddr_t 1105181641Skmacypmap_extract(pmap_t pmap, vm_offset_t va) 1106181641Skmacy{ 1107181641Skmacy vm_paddr_t rtval; 1108181641Skmacy pt_entry_t *pte; 1109181641Skmacy pd_entry_t pde; 1110181641Skmacy pt_entry_t pteval; 1111181641Skmacy 1112181641Skmacy rtval = 0; 1113181641Skmacy PMAP_LOCK(pmap); 1114181641Skmacy pde = pmap->pm_pdir[va >> PDRSHIFT]; 1115181641Skmacy if (pde != 0) { 1116181641Skmacy if ((pde & PG_PS) != 0) { 1117181641Skmacy rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1118181641Skmacy PMAP_UNLOCK(pmap); 1119181641Skmacy return rtval; 1120181641Skmacy } 1121181641Skmacy pte = pmap_pte(pmap, va); 1122181641Skmacy pteval = *pte ? xpmap_mtop(*pte) : 0; 1123181641Skmacy rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1124181641Skmacy pmap_pte_release(pte); 1125181641Skmacy } 1126181641Skmacy PMAP_UNLOCK(pmap); 1127181641Skmacy return (rtval); 1128181641Skmacy} 1129181641Skmacy 1130181641Skmacy/* 1131181641Skmacy * Routine: pmap_extract_ma 1132181641Skmacy * Function: 1133181641Skmacy * Like pmap_extract, but returns machine address 1134181641Skmacy */ 1135181641Skmacyvm_paddr_t 1136181641Skmacypmap_extract_ma(pmap_t pmap, vm_offset_t va) 1137181641Skmacy{ 1138181641Skmacy vm_paddr_t rtval; 1139181641Skmacy pt_entry_t *pte; 1140181641Skmacy pd_entry_t pde; 1141181641Skmacy 1142181641Skmacy rtval = 0; 1143181641Skmacy PMAP_LOCK(pmap); 1144181641Skmacy pde = pmap->pm_pdir[va >> PDRSHIFT]; 1145181641Skmacy if (pde != 0) { 1146181641Skmacy if ((pde & PG_PS) != 0) { 1147181641Skmacy rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1148181641Skmacy PMAP_UNLOCK(pmap); 1149181641Skmacy return rtval; 1150181641Skmacy } 1151181641Skmacy pte = pmap_pte(pmap, va); 1152181641Skmacy rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1153181641Skmacy pmap_pte_release(pte); 1154181641Skmacy } 1155181641Skmacy PMAP_UNLOCK(pmap); 1156181641Skmacy return (rtval); 1157181641Skmacy} 1158181641Skmacy 1159181641Skmacy/* 1160181641Skmacy * Routine: pmap_extract_and_hold 1161181641Skmacy * Function: 1162181641Skmacy * Atomically extract and hold the physical page 1163181641Skmacy * with the given pmap and virtual address pair 1164181641Skmacy * if that mapping permits the given protection. 1165181641Skmacy */ 1166181641Skmacyvm_page_t 1167181641Skmacypmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1168181641Skmacy{ 1169181641Skmacy pd_entry_t pde; 1170181641Skmacy pt_entry_t pte; 1171181641Skmacy vm_page_t m; 1172207410Skmacy vm_paddr_t pa; 1173181641Skmacy 1174207410Skmacy pa = 0; 1175181641Skmacy m = NULL; 1176181641Skmacy PMAP_LOCK(pmap); 1177207410Skmacyretry: 1178181641Skmacy pde = PT_GET(pmap_pde(pmap, va)); 1179181641Skmacy if (pde != 0) { 1180181641Skmacy if (pde & PG_PS) { 1181181641Skmacy if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1182207410Skmacy if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | 1183207410Skmacy (va & PDRMASK), &pa)) 1184207410Skmacy goto retry; 1185181641Skmacy m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1186181641Skmacy (va & PDRMASK)); 1187181641Skmacy vm_page_hold(m); 1188181641Skmacy } 1189181641Skmacy } else { 1190181641Skmacy sched_pin(); 1191181641Skmacy pte = PT_GET(pmap_pte_quick(pmap, va)); 1192181641Skmacy if (*PMAP1) 1193181641Skmacy PT_SET_MA(PADDR1, 0); 1194181641Skmacy if ((pte & PG_V) && 1195181641Skmacy ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1196207410Skmacy if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) 1197207410Skmacy goto retry; 1198181641Skmacy m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1199181641Skmacy vm_page_hold(m); 1200181641Skmacy } 1201181641Skmacy sched_unpin(); 1202181641Skmacy } 1203181641Skmacy } 1204207410Skmacy PA_UNLOCK_COND(pa); 1205181641Skmacy PMAP_UNLOCK(pmap); 1206181641Skmacy return (m); 1207181641Skmacy} 1208181641Skmacy 1209181641Skmacy/*************************************************** 1210181641Skmacy * Low level mapping routines..... 1211181641Skmacy ***************************************************/ 1212181641Skmacy 1213181641Skmacy/* 1214181641Skmacy * Add a wired page to the kva. 1215181641Skmacy * Note: not SMP coherent. 1216181641Skmacy */ 1217181747Skmacyvoid 1218181641Skmacypmap_kenter(vm_offset_t va, vm_paddr_t pa) 1219181641Skmacy{ 1220181641Skmacy PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1221181641Skmacy} 1222181641Skmacy 1223181747Skmacyvoid 1224181641Skmacypmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1225181641Skmacy{ 1226181641Skmacy pt_entry_t *pte; 1227181641Skmacy 1228181641Skmacy pte = vtopte(va); 1229181641Skmacy pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1230181641Skmacy} 1231181641Skmacy 1232181641Skmacy 1233181747Skmacystatic __inline void 1234181641Skmacypmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1235181641Skmacy{ 1236181641Skmacy PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1237181641Skmacy} 1238181641Skmacy 1239181641Skmacy/* 1240181641Skmacy * Remove a page from the kernel pagetables. 1241181641Skmacy * Note: not SMP coherent. 1242181641Skmacy */ 1243181641SkmacyPMAP_INLINE void 1244181641Skmacypmap_kremove(vm_offset_t va) 1245181641Skmacy{ 1246181641Skmacy pt_entry_t *pte; 1247181641Skmacy 1248181641Skmacy pte = vtopte(va); 1249181641Skmacy PT_CLEAR_VA(pte, FALSE); 1250181641Skmacy} 1251181641Skmacy 1252181641Skmacy/* 1253181641Skmacy * Used to map a range of physical addresses into kernel 1254181641Skmacy * virtual address space. 1255181641Skmacy * 1256181641Skmacy * The value passed in '*virt' is a suggested virtual address for 1257181641Skmacy * the mapping. Architectures which can support a direct-mapped 1258181641Skmacy * physical to virtual region can return the appropriate address 1259181641Skmacy * within that region, leaving '*virt' unchanged. Other 1260181641Skmacy * architectures should map the pages starting at '*virt' and 1261181641Skmacy * update '*virt' with the first usable address after the mapped 1262181641Skmacy * region. 1263181641Skmacy */ 1264181641Skmacyvm_offset_t 1265181641Skmacypmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1266181641Skmacy{ 1267181641Skmacy vm_offset_t va, sva; 1268181641Skmacy 1269181641Skmacy va = sva = *virt; 1270181641Skmacy CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1271181641Skmacy va, start, end, prot); 1272181641Skmacy while (start < end) { 1273181641Skmacy pmap_kenter(va, start); 1274181641Skmacy va += PAGE_SIZE; 1275181641Skmacy start += PAGE_SIZE; 1276181641Skmacy } 1277181641Skmacy pmap_invalidate_range(kernel_pmap, sva, va); 1278181641Skmacy *virt = va; 1279181641Skmacy return (sva); 1280181641Skmacy} 1281181641Skmacy 1282181641Skmacy 1283181641Skmacy/* 1284181641Skmacy * Add a list of wired pages to the kva 1285181641Skmacy * this routine is only used for temporary 1286181641Skmacy * kernel mappings that do not need to have 1287181641Skmacy * page modification or references recorded. 1288181641Skmacy * Note that old mappings are simply written 1289181641Skmacy * over. The page *must* be wired. 1290181641Skmacy * Note: SMP coherent. Uses a ranged shootdown IPI. 1291181641Skmacy */ 1292181641Skmacyvoid 1293181641Skmacypmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1294181641Skmacy{ 1295181641Skmacy pt_entry_t *endpte, *pte; 1296181641Skmacy vm_paddr_t pa; 1297181641Skmacy vm_offset_t va = sva; 1298181641Skmacy int mclcount = 0; 1299181641Skmacy multicall_entry_t mcl[16]; 1300181641Skmacy multicall_entry_t *mclp = mcl; 1301181641Skmacy int error; 1302181641Skmacy 1303181641Skmacy CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1304181641Skmacy pte = vtopte(sva); 1305181641Skmacy endpte = pte + count; 1306181641Skmacy while (pte < endpte) { 1307215587Scperciva pa = VM_PAGE_TO_MACH(*ma) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1308181641Skmacy 1309181641Skmacy mclp->op = __HYPERVISOR_update_va_mapping; 1310181641Skmacy mclp->args[0] = va; 1311181641Skmacy mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1312181641Skmacy mclp->args[2] = (uint32_t)(pa >> 32); 1313181641Skmacy mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1314181641Skmacy 1315181641Skmacy va += PAGE_SIZE; 1316181641Skmacy pte++; 1317181641Skmacy ma++; 1318181641Skmacy mclp++; 1319181641Skmacy mclcount++; 1320181641Skmacy if (mclcount == 16) { 1321181641Skmacy error = HYPERVISOR_multicall(mcl, mclcount); 1322181641Skmacy mclp = mcl; 1323181641Skmacy mclcount = 0; 1324181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 1325181641Skmacy } 1326181641Skmacy } 1327181641Skmacy if (mclcount) { 1328181641Skmacy error = HYPERVISOR_multicall(mcl, mclcount); 1329181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 1330181641Skmacy } 1331181641Skmacy 1332181641Skmacy#ifdef INVARIANTS 1333181641Skmacy for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1334181641Skmacy KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1335181641Skmacy#endif 1336181641Skmacy} 1337181641Skmacy 1338181641Skmacy 1339181641Skmacy/* 1340181641Skmacy * This routine tears out page mappings from the 1341181641Skmacy * kernel -- it is meant only for temporary mappings. 1342181641Skmacy * Note: SMP coherent. Uses a ranged shootdown IPI. 1343181641Skmacy */ 1344181641Skmacyvoid 1345181641Skmacypmap_qremove(vm_offset_t sva, int count) 1346181641Skmacy{ 1347181641Skmacy vm_offset_t va; 1348181641Skmacy 1349181641Skmacy CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1350181641Skmacy va = sva; 1351181641Skmacy vm_page_lock_queues(); 1352181641Skmacy critical_enter(); 1353181641Skmacy while (count-- > 0) { 1354181641Skmacy pmap_kremove(va); 1355181641Skmacy va += PAGE_SIZE; 1356181641Skmacy } 1357215844Scperciva PT_UPDATES_FLUSH(); 1358181641Skmacy pmap_invalidate_range(kernel_pmap, sva, va); 1359181641Skmacy critical_exit(); 1360181641Skmacy vm_page_unlock_queues(); 1361181641Skmacy} 1362181641Skmacy 1363181641Skmacy/*************************************************** 1364181641Skmacy * Page table page management routines..... 1365181641Skmacy ***************************************************/ 1366181641Skmacystatic __inline void 1367181641Skmacypmap_free_zero_pages(vm_page_t free) 1368181641Skmacy{ 1369181641Skmacy vm_page_t m; 1370181641Skmacy 1371181641Skmacy while (free != NULL) { 1372181641Skmacy m = free; 1373181641Skmacy free = m->right; 1374181641Skmacy vm_page_free_zero(m); 1375181641Skmacy } 1376181641Skmacy} 1377181641Skmacy 1378181641Skmacy/* 1379181641Skmacy * This routine unholds page table pages, and if the hold count 1380181641Skmacy * drops to zero, then it decrements the wire count. 1381181641Skmacy */ 1382181641Skmacystatic __inline int 1383181641Skmacypmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1384181641Skmacy{ 1385181641Skmacy 1386181641Skmacy --m->wire_count; 1387181641Skmacy if (m->wire_count == 0) 1388181641Skmacy return _pmap_unwire_pte_hold(pmap, m, free); 1389181641Skmacy else 1390181641Skmacy return 0; 1391181641Skmacy} 1392181641Skmacy 1393181641Skmacystatic int 1394181641Skmacy_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1395181641Skmacy{ 1396181641Skmacy vm_offset_t pteva; 1397181641Skmacy 1398181641Skmacy PT_UPDATES_FLUSH(); 1399181641Skmacy /* 1400181641Skmacy * unmap the page table page 1401181641Skmacy */ 1402181641Skmacy xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1403181641Skmacy /* 1404181641Skmacy * page *might* contain residual mapping :-/ 1405181641Skmacy */ 1406181641Skmacy PD_CLEAR_VA(pmap, m->pindex, TRUE); 1407181641Skmacy pmap_zero_page(m); 1408181641Skmacy --pmap->pm_stats.resident_count; 1409181641Skmacy 1410181641Skmacy /* 1411181641Skmacy * This is a release store so that the ordinary store unmapping 1412181641Skmacy * the page table page is globally performed before TLB shoot- 1413181641Skmacy * down is begun. 1414181641Skmacy */ 1415181641Skmacy atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1416181641Skmacy 1417181641Skmacy /* 1418181641Skmacy * Do an invltlb to make the invalidated mapping 1419181641Skmacy * take effect immediately. 1420181641Skmacy */ 1421181641Skmacy pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1422181641Skmacy pmap_invalidate_page(pmap, pteva); 1423181641Skmacy 1424181641Skmacy /* 1425181641Skmacy * Put page on a list so that it is released after 1426181641Skmacy * *ALL* TLB shootdown is done 1427181641Skmacy */ 1428181641Skmacy m->right = *free; 1429181641Skmacy *free = m; 1430181641Skmacy 1431181641Skmacy return 1; 1432181641Skmacy} 1433181641Skmacy 1434181641Skmacy/* 1435181641Skmacy * After removing a page table entry, this routine is used to 1436181641Skmacy * conditionally free the page, and manage the hold/wire counts. 1437181641Skmacy */ 1438181641Skmacystatic int 1439181641Skmacypmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1440181641Skmacy{ 1441181641Skmacy pd_entry_t ptepde; 1442181641Skmacy vm_page_t mpte; 1443181641Skmacy 1444181641Skmacy if (va >= VM_MAXUSER_ADDRESS) 1445181641Skmacy return 0; 1446181641Skmacy ptepde = PT_GET(pmap_pde(pmap, va)); 1447181641Skmacy mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1448181641Skmacy return pmap_unwire_pte_hold(pmap, mpte, free); 1449181641Skmacy} 1450181641Skmacy 1451181641Skmacyvoid 1452181641Skmacypmap_pinit0(pmap_t pmap) 1453181641Skmacy{ 1454181641Skmacy 1455181641Skmacy PMAP_LOCK_INIT(pmap); 1456181641Skmacy pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1457181641Skmacy#ifdef PAE 1458181641Skmacy pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1459181641Skmacy#endif 1460222813Sattilio CPU_ZERO(&pmap->pm_active); 1461181641Skmacy PCPU_SET(curpmap, pmap); 1462181641Skmacy TAILQ_INIT(&pmap->pm_pvchunk); 1463181641Skmacy bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1464181641Skmacy mtx_lock_spin(&allpmaps_lock); 1465181641Skmacy LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1466181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1467181641Skmacy} 1468181641Skmacy 1469181641Skmacy/* 1470181641Skmacy * Initialize a preallocated and zeroed pmap structure, 1471181641Skmacy * such as one in a vmspace structure. 1472181641Skmacy */ 1473181641Skmacyint 1474181641Skmacypmap_pinit(pmap_t pmap) 1475181641Skmacy{ 1476181641Skmacy vm_page_t m, ptdpg[NPGPTD + 1]; 1477181641Skmacy int npgptd = NPGPTD + 1; 1478181641Skmacy static int color; 1479181641Skmacy int i; 1480181641Skmacy 1481216960Scperciva#ifdef HAMFISTED_LOCKING 1482216960Scperciva mtx_lock(&createdelete_lock); 1483216960Scperciva#endif 1484216960Scperciva 1485181641Skmacy PMAP_LOCK_INIT(pmap); 1486181641Skmacy 1487181641Skmacy /* 1488181641Skmacy * No need to allocate page table space yet but we do need a valid 1489181641Skmacy * page directory table. 1490181641Skmacy */ 1491181641Skmacy if (pmap->pm_pdir == NULL) { 1492181641Skmacy pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1493181641Skmacy NBPTD); 1494181641Skmacy if (pmap->pm_pdir == NULL) { 1495181641Skmacy PMAP_LOCK_DESTROY(pmap); 1496216960Scperciva#ifdef HAMFISTED_LOCKING 1497216960Scperciva mtx_unlock(&createdelete_lock); 1498216960Scperciva#endif 1499181641Skmacy return (0); 1500181641Skmacy } 1501215593Scperciva#ifdef PAE 1502181641Skmacy pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1503181641Skmacy#endif 1504181641Skmacy } 1505181641Skmacy 1506181641Skmacy /* 1507181641Skmacy * allocate the page directory page(s) 1508181641Skmacy */ 1509181641Skmacy for (i = 0; i < npgptd;) { 1510181641Skmacy m = vm_page_alloc(NULL, color++, 1511181641Skmacy VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1512181641Skmacy VM_ALLOC_ZERO); 1513181641Skmacy if (m == NULL) 1514181641Skmacy VM_WAIT; 1515181641Skmacy else { 1516181641Skmacy ptdpg[i++] = m; 1517181641Skmacy } 1518181641Skmacy } 1519181641Skmacy pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1520181641Skmacy for (i = 0; i < NPGPTD; i++) { 1521181641Skmacy if ((ptdpg[i]->flags & PG_ZERO) == 0) 1522181641Skmacy pagezero(&pmap->pm_pdir[i*NPTEPG]); 1523181641Skmacy } 1524181641Skmacy 1525181641Skmacy mtx_lock_spin(&allpmaps_lock); 1526181641Skmacy LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1527181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1528181641Skmacy /* Wire in kernel global address entries. */ 1529181641Skmacy 1530181641Skmacy bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1531181641Skmacy#ifdef PAE 1532181641Skmacy pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1533181641Skmacy if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1534181641Skmacy bzero(pmap->pm_pdpt, PAGE_SIZE); 1535181641Skmacy for (i = 0; i < NPGPTD; i++) { 1536181641Skmacy vm_paddr_t ma; 1537181641Skmacy 1538215587Scperciva ma = VM_PAGE_TO_MACH(ptdpg[i]); 1539181641Skmacy pmap->pm_pdpt[i] = ma | PG_V; 1540181641Skmacy 1541181641Skmacy } 1542181641Skmacy#endif 1543181641Skmacy for (i = 0; i < NPGPTD; i++) { 1544181641Skmacy pt_entry_t *pd; 1545181641Skmacy vm_paddr_t ma; 1546181641Skmacy 1547215587Scperciva ma = VM_PAGE_TO_MACH(ptdpg[i]); 1548181641Skmacy pd = pmap->pm_pdir + (i * NPDEPG); 1549181641Skmacy PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1550181641Skmacy#if 0 1551181641Skmacy xen_pgd_pin(ma); 1552181641Skmacy#endif 1553181641Skmacy } 1554181641Skmacy 1555181641Skmacy#ifdef PAE 1556181641Skmacy PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1557181641Skmacy#endif 1558181641Skmacy vm_page_lock_queues(); 1559181641Skmacy xen_flush_queue(); 1560215587Scperciva xen_pgdpt_pin(VM_PAGE_TO_MACH(ptdpg[NPGPTD])); 1561181641Skmacy for (i = 0; i < NPGPTD; i++) { 1562215587Scperciva vm_paddr_t ma = VM_PAGE_TO_MACH(ptdpg[i]); 1563181641Skmacy PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1564181641Skmacy } 1565181641Skmacy xen_flush_queue(); 1566181641Skmacy vm_page_unlock_queues(); 1567222813Sattilio CPU_ZERO(&pmap->pm_active); 1568181641Skmacy TAILQ_INIT(&pmap->pm_pvchunk); 1569181641Skmacy bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1570181641Skmacy 1571216960Scperciva#ifdef HAMFISTED_LOCKING 1572216960Scperciva mtx_unlock(&createdelete_lock); 1573216960Scperciva#endif 1574181641Skmacy return (1); 1575181641Skmacy} 1576181641Skmacy 1577181641Skmacy/* 1578181641Skmacy * this routine is called if the page table page is not 1579181641Skmacy * mapped correctly. 1580181641Skmacy */ 1581181641Skmacystatic vm_page_t 1582181641Skmacy_pmap_allocpte(pmap_t pmap, unsigned int ptepindex, int flags) 1583181641Skmacy{ 1584181641Skmacy vm_paddr_t ptema; 1585181641Skmacy vm_page_t m; 1586181641Skmacy 1587181641Skmacy KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1588181641Skmacy (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1589181641Skmacy ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1590181641Skmacy 1591181641Skmacy /* 1592181641Skmacy * Allocate a page table page. 1593181641Skmacy */ 1594181641Skmacy if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1595181641Skmacy VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1596181641Skmacy if (flags & M_WAITOK) { 1597181641Skmacy PMAP_UNLOCK(pmap); 1598181641Skmacy vm_page_unlock_queues(); 1599181641Skmacy VM_WAIT; 1600181641Skmacy vm_page_lock_queues(); 1601181641Skmacy PMAP_LOCK(pmap); 1602181641Skmacy } 1603181641Skmacy 1604181641Skmacy /* 1605181641Skmacy * Indicate the need to retry. While waiting, the page table 1606181641Skmacy * page may have been allocated. 1607181641Skmacy */ 1608181641Skmacy return (NULL); 1609181641Skmacy } 1610181641Skmacy if ((m->flags & PG_ZERO) == 0) 1611181641Skmacy pmap_zero_page(m); 1612181641Skmacy 1613181641Skmacy /* 1614181641Skmacy * Map the pagetable page into the process address space, if 1615181641Skmacy * it isn't already there. 1616181641Skmacy */ 1617181641Skmacy pmap->pm_stats.resident_count++; 1618181641Skmacy 1619215587Scperciva ptema = VM_PAGE_TO_MACH(m); 1620181641Skmacy xen_pt_pin(ptema); 1621181641Skmacy PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1622181641Skmacy (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1623181641Skmacy 1624181641Skmacy KASSERT(pmap->pm_pdir[ptepindex], 1625181641Skmacy ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1626181641Skmacy return (m); 1627181641Skmacy} 1628181641Skmacy 1629181641Skmacystatic vm_page_t 1630181641Skmacypmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1631181641Skmacy{ 1632181641Skmacy unsigned ptepindex; 1633181641Skmacy pd_entry_t ptema; 1634181641Skmacy vm_page_t m; 1635181641Skmacy 1636181641Skmacy KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1637181641Skmacy (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1638181641Skmacy ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1639181641Skmacy 1640181641Skmacy /* 1641181641Skmacy * Calculate pagetable page index 1642181641Skmacy */ 1643181641Skmacy ptepindex = va >> PDRSHIFT; 1644181641Skmacyretry: 1645181641Skmacy /* 1646181641Skmacy * Get the page directory entry 1647181641Skmacy */ 1648181641Skmacy ptema = pmap->pm_pdir[ptepindex]; 1649181641Skmacy 1650181641Skmacy /* 1651181641Skmacy * This supports switching from a 4MB page to a 1652181641Skmacy * normal 4K page. 1653181641Skmacy */ 1654181641Skmacy if (ptema & PG_PS) { 1655181641Skmacy /* 1656181641Skmacy * XXX 1657181641Skmacy */ 1658181641Skmacy pmap->pm_pdir[ptepindex] = 0; 1659181641Skmacy ptema = 0; 1660181641Skmacy pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1661181641Skmacy pmap_invalidate_all(kernel_pmap); 1662181641Skmacy } 1663181641Skmacy 1664181641Skmacy /* 1665181641Skmacy * If the page table page is mapped, we just increment the 1666181641Skmacy * hold count, and activate it. 1667181641Skmacy */ 1668181641Skmacy if (ptema & PG_V) { 1669181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1670181641Skmacy m->wire_count++; 1671181641Skmacy } else { 1672181641Skmacy /* 1673181641Skmacy * Here if the pte page isn't mapped, or if it has 1674181641Skmacy * been deallocated. 1675181641Skmacy */ 1676181641Skmacy CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1677181641Skmacy pmap, va, flags); 1678181641Skmacy m = _pmap_allocpte(pmap, ptepindex, flags); 1679181641Skmacy if (m == NULL && (flags & M_WAITOK)) 1680181641Skmacy goto retry; 1681181641Skmacy 1682181641Skmacy KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1683181641Skmacy } 1684181641Skmacy return (m); 1685181641Skmacy} 1686181641Skmacy 1687181641Skmacy 1688181641Skmacy/*************************************************** 1689181641Skmacy* Pmap allocation/deallocation routines. 1690181641Skmacy ***************************************************/ 1691181641Skmacy 1692181641Skmacy#ifdef SMP 1693181641Skmacy/* 1694181641Skmacy * Deal with a SMP shootdown of other users of the pmap that we are 1695181641Skmacy * trying to dispose of. This can be a bit hairy. 1696181641Skmacy */ 1697222813Sattiliostatic cpuset_t *lazymask; 1698181641Skmacystatic u_int lazyptd; 1699181641Skmacystatic volatile u_int lazywait; 1700181641Skmacy 1701181641Skmacyvoid pmap_lazyfix_action(void); 1702181641Skmacy 1703181641Skmacyvoid 1704181641Skmacypmap_lazyfix_action(void) 1705181641Skmacy{ 1706181641Skmacy 1707181641Skmacy#ifdef COUNT_IPIS 1708181641Skmacy (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1709181641Skmacy#endif 1710181641Skmacy if (rcr3() == lazyptd) 1711181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1712222813Sattilio CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask); 1713181641Skmacy atomic_store_rel_int(&lazywait, 1); 1714181641Skmacy} 1715181641Skmacy 1716181641Skmacystatic void 1717223758Sattiliopmap_lazyfix_self(u_int cpuid) 1718181641Skmacy{ 1719181641Skmacy 1720181641Skmacy if (rcr3() == lazyptd) 1721181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1722223758Sattilio CPU_CLR_ATOMIC(cpuid, lazymask); 1723181641Skmacy} 1724181641Skmacy 1725181641Skmacy 1726181641Skmacystatic void 1727181641Skmacypmap_lazyfix(pmap_t pmap) 1728181641Skmacy{ 1729222813Sattilio cpuset_t mymask, mask; 1730223758Sattilio u_int cpuid, spins; 1731222813Sattilio int lsb; 1732181641Skmacy 1733222813Sattilio mask = pmap->pm_active; 1734222813Sattilio while (!CPU_EMPTY(&mask)) { 1735181641Skmacy spins = 50000000; 1736222813Sattilio 1737222813Sattilio /* Find least significant set bit. */ 1738222813Sattilio lsb = cpusetobj_ffs(&mask); 1739222813Sattilio MPASS(lsb != 0); 1740222813Sattilio lsb--; 1741222813Sattilio CPU_SETOF(lsb, &mask); 1742181641Skmacy mtx_lock_spin(&smp_ipi_mtx); 1743181641Skmacy#ifdef PAE 1744181641Skmacy lazyptd = vtophys(pmap->pm_pdpt); 1745181641Skmacy#else 1746181641Skmacy lazyptd = vtophys(pmap->pm_pdir); 1747181641Skmacy#endif 1748223758Sattilio cpuid = PCPU_GET(cpuid); 1749223758Sattilio 1750223758Sattilio /* Use a cpuset just for having an easy check. */ 1751223758Sattilio CPU_SETOF(cpuid, &mymask); 1752222813Sattilio if (!CPU_CMP(&mask, &mymask)) { 1753181641Skmacy lazymask = &pmap->pm_active; 1754223758Sattilio pmap_lazyfix_self(cpuid); 1755181641Skmacy } else { 1756181641Skmacy atomic_store_rel_int((u_int *)&lazymask, 1757181641Skmacy (u_int)&pmap->pm_active); 1758181641Skmacy atomic_store_rel_int(&lazywait, 0); 1759181641Skmacy ipi_selected(mask, IPI_LAZYPMAP); 1760181641Skmacy while (lazywait == 0) { 1761181641Skmacy ia32_pause(); 1762181641Skmacy if (--spins == 0) 1763181641Skmacy break; 1764181641Skmacy } 1765181641Skmacy } 1766181641Skmacy mtx_unlock_spin(&smp_ipi_mtx); 1767181641Skmacy if (spins == 0) 1768181641Skmacy printf("pmap_lazyfix: spun for 50000000\n"); 1769222813Sattilio mask = pmap->pm_active; 1770181641Skmacy } 1771181641Skmacy} 1772181641Skmacy 1773181641Skmacy#else /* SMP */ 1774181641Skmacy 1775181641Skmacy/* 1776181641Skmacy * Cleaning up on uniprocessor is easy. For various reasons, we're 1777181641Skmacy * unlikely to have to even execute this code, including the fact 1778181641Skmacy * that the cleanup is deferred until the parent does a wait(2), which 1779181641Skmacy * means that another userland process has run. 1780181641Skmacy */ 1781181641Skmacystatic void 1782181641Skmacypmap_lazyfix(pmap_t pmap) 1783181641Skmacy{ 1784181641Skmacy u_int cr3; 1785181641Skmacy 1786181641Skmacy cr3 = vtophys(pmap->pm_pdir); 1787181641Skmacy if (cr3 == rcr3()) { 1788181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1789222813Sattilio CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active); 1790181641Skmacy } 1791181641Skmacy} 1792181641Skmacy#endif /* SMP */ 1793181641Skmacy 1794181641Skmacy/* 1795181641Skmacy * Release any resources held by the given physical map. 1796181641Skmacy * Called when a pmap initialized by pmap_pinit is being released. 1797181641Skmacy * Should only be called if the map contains no valid mappings. 1798181641Skmacy */ 1799181641Skmacyvoid 1800181641Skmacypmap_release(pmap_t pmap) 1801181641Skmacy{ 1802181641Skmacy vm_page_t m, ptdpg[2*NPGPTD+1]; 1803181641Skmacy vm_paddr_t ma; 1804181641Skmacy int i; 1805181641Skmacy#ifdef PAE 1806181641Skmacy int npgptd = NPGPTD + 1; 1807181641Skmacy#else 1808181641Skmacy int npgptd = NPGPTD; 1809181641Skmacy#endif 1810181641Skmacy KASSERT(pmap->pm_stats.resident_count == 0, 1811181641Skmacy ("pmap_release: pmap resident count %ld != 0", 1812181641Skmacy pmap->pm_stats.resident_count)); 1813181641Skmacy PT_UPDATES_FLUSH(); 1814181641Skmacy 1815216960Scperciva#ifdef HAMFISTED_LOCKING 1816216960Scperciva mtx_lock(&createdelete_lock); 1817216960Scperciva#endif 1818216960Scperciva 1819181641Skmacy pmap_lazyfix(pmap); 1820181641Skmacy mtx_lock_spin(&allpmaps_lock); 1821181641Skmacy LIST_REMOVE(pmap, pm_list); 1822181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1823181641Skmacy 1824181641Skmacy for (i = 0; i < NPGPTD; i++) 1825181641Skmacy ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1826181641Skmacy pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1827215593Scperciva#ifdef PAE 1828181641Skmacy ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1829181641Skmacy#endif 1830181641Skmacy 1831181641Skmacy for (i = 0; i < npgptd; i++) { 1832181641Skmacy m = ptdpg[i]; 1833215587Scperciva ma = VM_PAGE_TO_MACH(m); 1834181641Skmacy /* unpinning L1 and L2 treated the same */ 1835215525Scperciva#if 0 1836181641Skmacy xen_pgd_unpin(ma); 1837215525Scperciva#else 1838215525Scperciva if (i == NPGPTD) 1839215525Scperciva xen_pgd_unpin(ma); 1840215525Scperciva#endif 1841181641Skmacy#ifdef PAE 1842215470Scperciva if (i < NPGPTD) 1843215587Scperciva KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1844215470Scperciva ("pmap_release: got wrong ptd page")); 1845181641Skmacy#endif 1846181641Skmacy m->wire_count--; 1847181641Skmacy atomic_subtract_int(&cnt.v_wire_count, 1); 1848181641Skmacy vm_page_free(m); 1849181641Skmacy } 1850215472Scperciva#ifdef PAE 1851215472Scperciva pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1); 1852215472Scperciva#endif 1853181641Skmacy PMAP_LOCK_DESTROY(pmap); 1854216960Scperciva 1855216960Scperciva#ifdef HAMFISTED_LOCKING 1856216960Scperciva mtx_unlock(&createdelete_lock); 1857216960Scperciva#endif 1858181641Skmacy} 1859181641Skmacy 1860181641Skmacystatic int 1861181641Skmacykvm_size(SYSCTL_HANDLER_ARGS) 1862181641Skmacy{ 1863181641Skmacy unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1864181641Skmacy 1865181641Skmacy return sysctl_handle_long(oidp, &ksize, 0, req); 1866181641Skmacy} 1867181641SkmacySYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1868181641Skmacy 0, 0, kvm_size, "IU", "Size of KVM"); 1869181641Skmacy 1870181641Skmacystatic int 1871181641Skmacykvm_free(SYSCTL_HANDLER_ARGS) 1872181641Skmacy{ 1873181641Skmacy unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1874181641Skmacy 1875181641Skmacy return sysctl_handle_long(oidp, &kfree, 0, req); 1876181641Skmacy} 1877181641SkmacySYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1878181641Skmacy 0, 0, kvm_free, "IU", "Amount of KVM free"); 1879181641Skmacy 1880181641Skmacy/* 1881181641Skmacy * grow the number of kernel page table entries, if needed 1882181641Skmacy */ 1883181641Skmacyvoid 1884181641Skmacypmap_growkernel(vm_offset_t addr) 1885181641Skmacy{ 1886181641Skmacy struct pmap *pmap; 1887181641Skmacy vm_paddr_t ptppaddr; 1888181641Skmacy vm_page_t nkpg; 1889181641Skmacy pd_entry_t newpdir; 1890181641Skmacy 1891181641Skmacy mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1892181641Skmacy if (kernel_vm_end == 0) { 1893181641Skmacy kernel_vm_end = KERNBASE; 1894181641Skmacy nkpt = 0; 1895181641Skmacy while (pdir_pde(PTD, kernel_vm_end)) { 1896181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1897181641Skmacy nkpt++; 1898181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1899181641Skmacy kernel_vm_end = kernel_map->max_offset; 1900181641Skmacy break; 1901181641Skmacy } 1902181641Skmacy } 1903181641Skmacy } 1904181641Skmacy addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1905181641Skmacy if (addr - 1 >= kernel_map->max_offset) 1906181641Skmacy addr = kernel_map->max_offset; 1907181641Skmacy while (kernel_vm_end < addr) { 1908181641Skmacy if (pdir_pde(PTD, kernel_vm_end)) { 1909181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1910181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1911181641Skmacy kernel_vm_end = kernel_map->max_offset; 1912181641Skmacy break; 1913181641Skmacy } 1914181641Skmacy continue; 1915181641Skmacy } 1916181641Skmacy 1917181641Skmacy /* 1918181641Skmacy * This index is bogus, but out of the way 1919181641Skmacy */ 1920181641Skmacy nkpg = vm_page_alloc(NULL, nkpt, 1921181641Skmacy VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 1922181641Skmacy if (!nkpg) 1923181641Skmacy panic("pmap_growkernel: no memory to grow kernel"); 1924181641Skmacy 1925181641Skmacy nkpt++; 1926181641Skmacy 1927181641Skmacy pmap_zero_page(nkpg); 1928181641Skmacy ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1929181641Skmacy newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1930181946Skmacy vm_page_lock_queues(); 1931181641Skmacy PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1932181641Skmacy mtx_lock_spin(&allpmaps_lock); 1933181641Skmacy LIST_FOREACH(pmap, &allpmaps, pm_list) 1934181641Skmacy PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1935181641Skmacy 1936181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1937181946Skmacy vm_page_unlock_queues(); 1938181946Skmacy 1939181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1940181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1941181641Skmacy kernel_vm_end = kernel_map->max_offset; 1942181641Skmacy break; 1943181641Skmacy } 1944181641Skmacy } 1945181641Skmacy} 1946181641Skmacy 1947181641Skmacy 1948181641Skmacy/*************************************************** 1949181641Skmacy * page management routines. 1950181641Skmacy ***************************************************/ 1951181641Skmacy 1952181641SkmacyCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1953181641SkmacyCTASSERT(_NPCM == 11); 1954181641Skmacy 1955181641Skmacystatic __inline struct pv_chunk * 1956181641Skmacypv_to_chunk(pv_entry_t pv) 1957181641Skmacy{ 1958181641Skmacy 1959181641Skmacy return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK); 1960181641Skmacy} 1961181641Skmacy 1962181641Skmacy#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1963181641Skmacy 1964181641Skmacy#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1965181641Skmacy#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1966181641Skmacy 1967181641Skmacystatic uint32_t pc_freemask[11] = { 1968181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1969181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1970181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1971181641Skmacy PC_FREE0_9, PC_FREE10 1972181641Skmacy}; 1973181641Skmacy 1974181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1975181641Skmacy "Current number of pv entries"); 1976181641Skmacy 1977181641Skmacy#ifdef PV_STATS 1978181641Skmacystatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1979181641Skmacy 1980181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1981181641Skmacy "Current number of pv entry chunks"); 1982181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1983181641Skmacy "Current number of pv entry chunks allocated"); 1984181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1985181641Skmacy "Current number of pv entry chunks frees"); 1986181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1987181641Skmacy "Number of times tried to get a chunk page but failed."); 1988181641Skmacy 1989181641Skmacystatic long pv_entry_frees, pv_entry_allocs; 1990181641Skmacystatic int pv_entry_spare; 1991181641Skmacy 1992181641SkmacySYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1993181641Skmacy "Current number of pv entry frees"); 1994181641SkmacySYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1995181641Skmacy "Current number of pv entry allocs"); 1996181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1997181641Skmacy "Current number of spare pv entries"); 1998181641Skmacy 1999181641Skmacystatic int pmap_collect_inactive, pmap_collect_active; 2000181641Skmacy 2001181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 2002181641Skmacy "Current number times pmap_collect called on inactive queue"); 2003181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 2004181641Skmacy "Current number times pmap_collect called on active queue"); 2005181641Skmacy#endif 2006181641Skmacy 2007181641Skmacy/* 2008181641Skmacy * We are in a serious low memory condition. Resort to 2009181641Skmacy * drastic measures to free some pages so we can allocate 2010181641Skmacy * another pv entry chunk. This is normally called to 2011181641Skmacy * unmap inactive pages, and if necessary, active pages. 2012181641Skmacy */ 2013181641Skmacystatic void 2014181641Skmacypmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 2015181641Skmacy{ 2016181641Skmacy pmap_t pmap; 2017181641Skmacy pt_entry_t *pte, tpte; 2018181641Skmacy pv_entry_t next_pv, pv; 2019181641Skmacy vm_offset_t va; 2020181641Skmacy vm_page_t m, free; 2021181641Skmacy 2022181641Skmacy sched_pin(); 2023181641Skmacy TAILQ_FOREACH(m, &vpq->pl, pageq) { 2024223732Salc if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy) 2025181641Skmacy continue; 2026181641Skmacy TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 2027181641Skmacy va = pv->pv_va; 2028181641Skmacy pmap = PV_PMAP(pv); 2029181641Skmacy /* Avoid deadlock and lock recursion. */ 2030181641Skmacy if (pmap > locked_pmap) 2031181641Skmacy PMAP_LOCK(pmap); 2032181641Skmacy else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 2033181641Skmacy continue; 2034181641Skmacy pmap->pm_stats.resident_count--; 2035181641Skmacy pte = pmap_pte_quick(pmap, va); 2036181641Skmacy tpte = pte_load_clear(pte); 2037181641Skmacy KASSERT((tpte & PG_W) == 0, 2038181641Skmacy ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 2039181641Skmacy if (tpte & PG_A) 2040225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 2041208651Salc if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2042181641Skmacy vm_page_dirty(m); 2043181641Skmacy free = NULL; 2044181641Skmacy pmap_unuse_pt(pmap, va, &free); 2045181641Skmacy pmap_invalidate_page(pmap, va); 2046181641Skmacy pmap_free_zero_pages(free); 2047181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2048181641Skmacy free_pv_entry(pmap, pv); 2049181641Skmacy if (pmap != locked_pmap) 2050181641Skmacy PMAP_UNLOCK(pmap); 2051181641Skmacy } 2052208657Salc if (TAILQ_EMPTY(&m->md.pv_list)) 2053225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2054181641Skmacy } 2055181641Skmacy sched_unpin(); 2056181641Skmacy} 2057181641Skmacy 2058181641Skmacy 2059181641Skmacy/* 2060181641Skmacy * free the pv_entry back to the free list 2061181641Skmacy */ 2062181641Skmacystatic void 2063181641Skmacyfree_pv_entry(pmap_t pmap, pv_entry_t pv) 2064181641Skmacy{ 2065181641Skmacy vm_page_t m; 2066181641Skmacy struct pv_chunk *pc; 2067181641Skmacy int idx, field, bit; 2068181641Skmacy 2069181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2070181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2071181641Skmacy PV_STAT(pv_entry_frees++); 2072181641Skmacy PV_STAT(pv_entry_spare++); 2073181641Skmacy pv_entry_count--; 2074181641Skmacy pc = pv_to_chunk(pv); 2075181641Skmacy idx = pv - &pc->pc_pventry[0]; 2076181641Skmacy field = idx / 32; 2077181641Skmacy bit = idx % 32; 2078181641Skmacy pc->pc_map[field] |= 1ul << bit; 2079181641Skmacy /* move to head of list */ 2080181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2081181641Skmacy TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2082181641Skmacy for (idx = 0; idx < _NPCM; idx++) 2083181641Skmacy if (pc->pc_map[idx] != pc_freemask[idx]) 2084181641Skmacy return; 2085181641Skmacy PV_STAT(pv_entry_spare -= _NPCPV); 2086181641Skmacy PV_STAT(pc_chunk_count--); 2087181641Skmacy PV_STAT(pc_chunk_frees++); 2088181641Skmacy /* entire chunk is free, return it */ 2089181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2090181641Skmacy m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2091181641Skmacy pmap_qremove((vm_offset_t)pc, 1); 2092181641Skmacy vm_page_unwire(m, 0); 2093181641Skmacy vm_page_free(m); 2094181641Skmacy pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2095181641Skmacy} 2096181641Skmacy 2097181641Skmacy/* 2098181641Skmacy * get a new pv_entry, allocating a block from the system 2099181641Skmacy * when needed. 2100181641Skmacy */ 2101181641Skmacystatic pv_entry_t 2102181641Skmacyget_pv_entry(pmap_t pmap, int try) 2103181641Skmacy{ 2104181641Skmacy static const struct timeval printinterval = { 60, 0 }; 2105181641Skmacy static struct timeval lastprint; 2106181641Skmacy static vm_pindex_t colour; 2107181641Skmacy struct vpgqueues *pq; 2108181641Skmacy int bit, field; 2109181641Skmacy pv_entry_t pv; 2110181641Skmacy struct pv_chunk *pc; 2111181641Skmacy vm_page_t m; 2112181641Skmacy 2113181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2114181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2115181641Skmacy PV_STAT(pv_entry_allocs++); 2116181641Skmacy pv_entry_count++; 2117181641Skmacy if (pv_entry_count > pv_entry_high_water) 2118181641Skmacy if (ratecheck(&lastprint, &printinterval)) 2119181641Skmacy printf("Approaching the limit on PV entries, consider " 2120181641Skmacy "increasing either the vm.pmap.shpgperproc or the " 2121181641Skmacy "vm.pmap.pv_entry_max tunable.\n"); 2122181641Skmacy pq = NULL; 2123181641Skmacyretry: 2124181641Skmacy pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2125181641Skmacy if (pc != NULL) { 2126181641Skmacy for (field = 0; field < _NPCM; field++) { 2127181641Skmacy if (pc->pc_map[field]) { 2128181641Skmacy bit = bsfl(pc->pc_map[field]); 2129181641Skmacy break; 2130181641Skmacy } 2131181641Skmacy } 2132181641Skmacy if (field < _NPCM) { 2133181641Skmacy pv = &pc->pc_pventry[field * 32 + bit]; 2134181641Skmacy pc->pc_map[field] &= ~(1ul << bit); 2135181641Skmacy /* If this was the last item, move it to tail */ 2136181641Skmacy for (field = 0; field < _NPCM; field++) 2137181641Skmacy if (pc->pc_map[field] != 0) { 2138181641Skmacy PV_STAT(pv_entry_spare--); 2139181641Skmacy return (pv); /* not full, return */ 2140181641Skmacy } 2141181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2142181641Skmacy TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2143181641Skmacy PV_STAT(pv_entry_spare--); 2144181641Skmacy return (pv); 2145181641Skmacy } 2146181641Skmacy } 2147181641Skmacy /* 2148181641Skmacy * Access to the ptelist "pv_vafree" is synchronized by the page 2149181641Skmacy * queues lock. If "pv_vafree" is currently non-empty, it will 2150181641Skmacy * remain non-empty until pmap_ptelist_alloc() completes. 2151181641Skmacy */ 2152181641Skmacy if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq == 2153181641Skmacy &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | 2154181641Skmacy VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2155181641Skmacy if (try) { 2156181641Skmacy pv_entry_count--; 2157181641Skmacy PV_STAT(pc_chunk_tryfail++); 2158181641Skmacy return (NULL); 2159181641Skmacy } 2160181641Skmacy /* 2161181641Skmacy * Reclaim pv entries: At first, destroy mappings to 2162181641Skmacy * inactive pages. After that, if a pv chunk entry 2163181641Skmacy * is still needed, destroy mappings to active pages. 2164181641Skmacy */ 2165181641Skmacy if (pq == NULL) { 2166181641Skmacy PV_STAT(pmap_collect_inactive++); 2167181641Skmacy pq = &vm_page_queues[PQ_INACTIVE]; 2168181641Skmacy } else if (pq == &vm_page_queues[PQ_INACTIVE]) { 2169181641Skmacy PV_STAT(pmap_collect_active++); 2170181641Skmacy pq = &vm_page_queues[PQ_ACTIVE]; 2171181641Skmacy } else 2172181641Skmacy panic("get_pv_entry: increase vm.pmap.shpgperproc"); 2173181641Skmacy pmap_collect(pmap, pq); 2174181641Skmacy goto retry; 2175181641Skmacy } 2176181641Skmacy PV_STAT(pc_chunk_count++); 2177181641Skmacy PV_STAT(pc_chunk_allocs++); 2178181641Skmacy colour++; 2179181641Skmacy pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2180181641Skmacy pmap_qenter((vm_offset_t)pc, &m, 1); 2181181641Skmacy if ((m->flags & PG_ZERO) == 0) 2182181641Skmacy pagezero(pc); 2183181641Skmacy pc->pc_pmap = pmap; 2184181641Skmacy pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2185181641Skmacy for (field = 1; field < _NPCM; field++) 2186181641Skmacy pc->pc_map[field] = pc_freemask[field]; 2187181641Skmacy pv = &pc->pc_pventry[0]; 2188181641Skmacy TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2189181641Skmacy PV_STAT(pv_entry_spare += _NPCPV - 1); 2190181641Skmacy return (pv); 2191181641Skmacy} 2192181641Skmacy 2193208651Salcstatic __inline pv_entry_t 2194208651Salcpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2195181641Skmacy{ 2196181641Skmacy pv_entry_t pv; 2197181641Skmacy 2198181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2199208651Salc TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 2200208651Salc if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 2201208651Salc TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 2202181641Skmacy break; 2203208651Salc } 2204181641Skmacy } 2205208651Salc return (pv); 2206181641Skmacy} 2207181641Skmacy 2208181641Skmacystatic void 2209208651Salcpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 2210181641Skmacy{ 2211181641Skmacy pv_entry_t pv; 2212181641Skmacy 2213208651Salc pv = pmap_pvh_remove(pvh, pmap, va); 2214208651Salc KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 2215208651Salc free_pv_entry(pmap, pv); 2216208651Salc} 2217208651Salc 2218208651Salcstatic void 2219208651Salcpmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2220208651Salc{ 2221208651Salc 2222181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2223208651Salc pmap_pvh_free(&m->md, pmap, va); 2224208651Salc if (TAILQ_EMPTY(&m->md.pv_list)) 2225225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2226181641Skmacy} 2227181641Skmacy 2228181641Skmacy/* 2229181641Skmacy * Conditionally create a pv entry. 2230181641Skmacy */ 2231181641Skmacystatic boolean_t 2232181641Skmacypmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2233181641Skmacy{ 2234181641Skmacy pv_entry_t pv; 2235181641Skmacy 2236181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2237181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2238181641Skmacy if (pv_entry_count < pv_entry_high_water && 2239181641Skmacy (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2240181641Skmacy pv->pv_va = va; 2241181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2242181641Skmacy return (TRUE); 2243181641Skmacy } else 2244181641Skmacy return (FALSE); 2245181641Skmacy} 2246181641Skmacy 2247181641Skmacy/* 2248181641Skmacy * pmap_remove_pte: do the things to unmap a page in a process 2249181641Skmacy */ 2250181641Skmacystatic int 2251181641Skmacypmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2252181641Skmacy{ 2253181641Skmacy pt_entry_t oldpte; 2254181641Skmacy vm_page_t m; 2255181641Skmacy 2256181641Skmacy CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2257181641Skmacy pmap, (u_long)*ptq, va); 2258181641Skmacy 2259181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2260181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2261181641Skmacy oldpte = *ptq; 2262181641Skmacy PT_SET_VA_MA(ptq, 0, TRUE); 2263181641Skmacy if (oldpte & PG_W) 2264181641Skmacy pmap->pm_stats.wired_count -= 1; 2265181641Skmacy /* 2266181641Skmacy * Machines that don't support invlpg, also don't support 2267181641Skmacy * PG_G. 2268181641Skmacy */ 2269181641Skmacy if (oldpte & PG_G) 2270181641Skmacy pmap_invalidate_page(kernel_pmap, va); 2271181641Skmacy pmap->pm_stats.resident_count -= 1; 2272216762Scperciva if (oldpte & PG_MANAGED) { 2273181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2274208651Salc if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2275181641Skmacy vm_page_dirty(m); 2276181641Skmacy if (oldpte & PG_A) 2277225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 2278181641Skmacy pmap_remove_entry(pmap, m, va); 2279216762Scperciva } 2280181641Skmacy return (pmap_unuse_pt(pmap, va, free)); 2281181641Skmacy} 2282181641Skmacy 2283181641Skmacy/* 2284181641Skmacy * Remove a single page from a process address space 2285181641Skmacy */ 2286181641Skmacystatic void 2287181641Skmacypmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2288181641Skmacy{ 2289181641Skmacy pt_entry_t *pte; 2290181641Skmacy 2291181641Skmacy CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2292181641Skmacy pmap, va); 2293181641Skmacy 2294181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2295181641Skmacy KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2296181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2297181641Skmacy if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2298181641Skmacy return; 2299181641Skmacy pmap_remove_pte(pmap, pte, va, free); 2300181641Skmacy pmap_invalidate_page(pmap, va); 2301181641Skmacy if (*PMAP1) 2302181641Skmacy PT_SET_MA(PADDR1, 0); 2303181641Skmacy 2304181641Skmacy} 2305181641Skmacy 2306181641Skmacy/* 2307181641Skmacy * Remove the given range of addresses from the specified map. 2308181641Skmacy * 2309181641Skmacy * It is assumed that the start and end are properly 2310181641Skmacy * rounded to the page size. 2311181641Skmacy */ 2312181641Skmacyvoid 2313181641Skmacypmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2314181641Skmacy{ 2315181641Skmacy vm_offset_t pdnxt; 2316181641Skmacy pd_entry_t ptpaddr; 2317181641Skmacy pt_entry_t *pte; 2318181641Skmacy vm_page_t free = NULL; 2319181641Skmacy int anyvalid; 2320181641Skmacy 2321181641Skmacy CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2322181641Skmacy pmap, sva, eva); 2323181641Skmacy 2324181641Skmacy /* 2325181641Skmacy * Perform an unsynchronized read. This is, however, safe. 2326181641Skmacy */ 2327181641Skmacy if (pmap->pm_stats.resident_count == 0) 2328181641Skmacy return; 2329181641Skmacy 2330181641Skmacy anyvalid = 0; 2331181641Skmacy 2332181641Skmacy vm_page_lock_queues(); 2333181641Skmacy sched_pin(); 2334181641Skmacy PMAP_LOCK(pmap); 2335181641Skmacy 2336181641Skmacy /* 2337181641Skmacy * special handling of removing one page. a very 2338181641Skmacy * common operation and easy to short circuit some 2339181641Skmacy * code. 2340181641Skmacy */ 2341181641Skmacy if ((sva + PAGE_SIZE == eva) && 2342181641Skmacy ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2343181641Skmacy pmap_remove_page(pmap, sva, &free); 2344181641Skmacy goto out; 2345181641Skmacy } 2346181641Skmacy 2347181641Skmacy for (; sva < eva; sva = pdnxt) { 2348181641Skmacy unsigned pdirindex; 2349181641Skmacy 2350181641Skmacy /* 2351181641Skmacy * Calculate index for next page table. 2352181641Skmacy */ 2353181641Skmacy pdnxt = (sva + NBPDR) & ~PDRMASK; 2354181641Skmacy if (pmap->pm_stats.resident_count == 0) 2355181641Skmacy break; 2356181641Skmacy 2357181641Skmacy pdirindex = sva >> PDRSHIFT; 2358181641Skmacy ptpaddr = pmap->pm_pdir[pdirindex]; 2359181641Skmacy 2360181641Skmacy /* 2361181641Skmacy * Weed out invalid mappings. Note: we assume that the page 2362181641Skmacy * directory table is always allocated, and in kernel virtual. 2363181641Skmacy */ 2364181641Skmacy if (ptpaddr == 0) 2365181641Skmacy continue; 2366181641Skmacy 2367181641Skmacy /* 2368181641Skmacy * Check for large page. 2369181641Skmacy */ 2370181641Skmacy if ((ptpaddr & PG_PS) != 0) { 2371181641Skmacy PD_CLEAR_VA(pmap, pdirindex, TRUE); 2372181641Skmacy pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2373181641Skmacy anyvalid = 1; 2374181641Skmacy continue; 2375181641Skmacy } 2376181641Skmacy 2377181641Skmacy /* 2378181641Skmacy * Limit our scan to either the end of the va represented 2379181641Skmacy * by the current page table page, or to the end of the 2380181641Skmacy * range being removed. 2381181641Skmacy */ 2382181641Skmacy if (pdnxt > eva) 2383181641Skmacy pdnxt = eva; 2384181641Skmacy 2385181641Skmacy for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2386181641Skmacy sva += PAGE_SIZE) { 2387181641Skmacy if ((*pte & PG_V) == 0) 2388181641Skmacy continue; 2389181641Skmacy 2390181641Skmacy /* 2391181641Skmacy * The TLB entry for a PG_G mapping is invalidated 2392181641Skmacy * by pmap_remove_pte(). 2393181641Skmacy */ 2394181641Skmacy if ((*pte & PG_G) == 0) 2395181641Skmacy anyvalid = 1; 2396181641Skmacy if (pmap_remove_pte(pmap, pte, sva, &free)) 2397181641Skmacy break; 2398181641Skmacy } 2399181641Skmacy } 2400181641Skmacy PT_UPDATES_FLUSH(); 2401181641Skmacy if (*PMAP1) 2402181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2403181641Skmacyout: 2404181641Skmacy if (anyvalid) 2405181641Skmacy pmap_invalidate_all(pmap); 2406181641Skmacy sched_unpin(); 2407181641Skmacy vm_page_unlock_queues(); 2408181641Skmacy PMAP_UNLOCK(pmap); 2409181641Skmacy pmap_free_zero_pages(free); 2410181641Skmacy} 2411181641Skmacy 2412181641Skmacy/* 2413181641Skmacy * Routine: pmap_remove_all 2414181641Skmacy * Function: 2415181641Skmacy * Removes this physical page from 2416181641Skmacy * all physical maps in which it resides. 2417181641Skmacy * Reflects back modify bits to the pager. 2418181641Skmacy * 2419181641Skmacy * Notes: 2420181641Skmacy * Original versions of this routine were very 2421181641Skmacy * inefficient because they iteratively called 2422181641Skmacy * pmap_remove (slow...) 2423181641Skmacy */ 2424181641Skmacy 2425181641Skmacyvoid 2426181641Skmacypmap_remove_all(vm_page_t m) 2427181641Skmacy{ 2428181641Skmacy pv_entry_t pv; 2429181641Skmacy pmap_t pmap; 2430181641Skmacy pt_entry_t *pte, tpte; 2431181641Skmacy vm_page_t free; 2432181641Skmacy 2433224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2434223677Salc ("pmap_remove_all: page %p is not managed", m)); 2435208651Salc free = NULL; 2436207796Salc vm_page_lock_queues(); 2437181641Skmacy sched_pin(); 2438181641Skmacy while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2439181641Skmacy pmap = PV_PMAP(pv); 2440181641Skmacy PMAP_LOCK(pmap); 2441181641Skmacy pmap->pm_stats.resident_count--; 2442181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 2443181641Skmacy 2444181641Skmacy tpte = *pte; 2445181641Skmacy PT_SET_VA_MA(pte, 0, TRUE); 2446181641Skmacy if (tpte & PG_W) 2447181641Skmacy pmap->pm_stats.wired_count--; 2448181641Skmacy if (tpte & PG_A) 2449225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 2450181641Skmacy 2451181641Skmacy /* 2452181641Skmacy * Update the vm_page_t clean and reference bits. 2453181641Skmacy */ 2454208651Salc if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 2455181641Skmacy vm_page_dirty(m); 2456181641Skmacy pmap_unuse_pt(pmap, pv->pv_va, &free); 2457181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 2458181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2459181641Skmacy free_pv_entry(pmap, pv); 2460181641Skmacy PMAP_UNLOCK(pmap); 2461181641Skmacy } 2462225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2463181641Skmacy PT_UPDATES_FLUSH(); 2464181641Skmacy if (*PMAP1) 2465181641Skmacy PT_SET_MA(PADDR1, 0); 2466181641Skmacy sched_unpin(); 2467207796Salc vm_page_unlock_queues(); 2468208651Salc pmap_free_zero_pages(free); 2469181641Skmacy} 2470181641Skmacy 2471181641Skmacy/* 2472181641Skmacy * Set the physical protection on the 2473181641Skmacy * specified range of this map as requested. 2474181641Skmacy */ 2475181641Skmacyvoid 2476181641Skmacypmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2477181641Skmacy{ 2478181641Skmacy vm_offset_t pdnxt; 2479181641Skmacy pd_entry_t ptpaddr; 2480181641Skmacy pt_entry_t *pte; 2481181641Skmacy int anychanged; 2482181641Skmacy 2483181641Skmacy CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2484181641Skmacy pmap, sva, eva, prot); 2485181641Skmacy 2486181641Skmacy if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2487181641Skmacy pmap_remove(pmap, sva, eva); 2488181641Skmacy return; 2489181641Skmacy } 2490181641Skmacy 2491181641Skmacy#ifdef PAE 2492181641Skmacy if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2493181641Skmacy (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2494181641Skmacy return; 2495181641Skmacy#else 2496181641Skmacy if (prot & VM_PROT_WRITE) 2497181641Skmacy return; 2498181641Skmacy#endif 2499181641Skmacy 2500181641Skmacy anychanged = 0; 2501181641Skmacy 2502181641Skmacy vm_page_lock_queues(); 2503181641Skmacy sched_pin(); 2504181641Skmacy PMAP_LOCK(pmap); 2505181641Skmacy for (; sva < eva; sva = pdnxt) { 2506181641Skmacy pt_entry_t obits, pbits; 2507181641Skmacy unsigned pdirindex; 2508181641Skmacy 2509181641Skmacy pdnxt = (sva + NBPDR) & ~PDRMASK; 2510181641Skmacy 2511181641Skmacy pdirindex = sva >> PDRSHIFT; 2512181641Skmacy ptpaddr = pmap->pm_pdir[pdirindex]; 2513181641Skmacy 2514181641Skmacy /* 2515181641Skmacy * Weed out invalid mappings. Note: we assume that the page 2516181641Skmacy * directory table is always allocated, and in kernel virtual. 2517181641Skmacy */ 2518181641Skmacy if (ptpaddr == 0) 2519181641Skmacy continue; 2520181641Skmacy 2521181641Skmacy /* 2522181641Skmacy * Check for large page. 2523181641Skmacy */ 2524181641Skmacy if ((ptpaddr & PG_PS) != 0) { 2525181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2526181641Skmacy pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2527181641Skmacy#ifdef PAE 2528181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2529181641Skmacy pmap->pm_pdir[pdirindex] |= pg_nx; 2530181641Skmacy#endif 2531181641Skmacy anychanged = 1; 2532181641Skmacy continue; 2533181641Skmacy } 2534181641Skmacy 2535181641Skmacy if (pdnxt > eva) 2536181641Skmacy pdnxt = eva; 2537181641Skmacy 2538181641Skmacy for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2539181641Skmacy sva += PAGE_SIZE) { 2540181641Skmacy vm_page_t m; 2541181641Skmacy 2542181641Skmacyretry: 2543181641Skmacy /* 2544181641Skmacy * Regardless of whether a pte is 32 or 64 bits in 2545181641Skmacy * size, PG_RW, PG_A, and PG_M are among the least 2546181641Skmacy * significant 32 bits. 2547181641Skmacy */ 2548181641Skmacy obits = pbits = *pte; 2549181641Skmacy if ((pbits & PG_V) == 0) 2550181641Skmacy continue; 2551207262Salc 2552207262Salc if ((prot & VM_PROT_WRITE) == 0) { 2553207262Salc if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 2554207262Salc (PG_MANAGED | PG_M | PG_RW)) { 2555207262Salc m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & 2556207262Salc PG_FRAME); 2557181641Skmacy vm_page_dirty(m); 2558181641Skmacy } 2559207262Salc pbits &= ~(PG_RW | PG_M); 2560181641Skmacy } 2561181641Skmacy#ifdef PAE 2562181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2563181641Skmacy pbits |= pg_nx; 2564181641Skmacy#endif 2565181641Skmacy 2566181641Skmacy if (pbits != obits) { 2567181641Skmacy obits = *pte; 2568181641Skmacy PT_SET_VA_MA(pte, pbits, TRUE); 2569181641Skmacy if (*pte != pbits) 2570181641Skmacy goto retry; 2571181641Skmacy if (obits & PG_G) 2572181641Skmacy pmap_invalidate_page(pmap, sva); 2573181641Skmacy else 2574181641Skmacy anychanged = 1; 2575181641Skmacy } 2576181641Skmacy } 2577181641Skmacy } 2578181641Skmacy PT_UPDATES_FLUSH(); 2579181641Skmacy if (*PMAP1) 2580181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2581181641Skmacy if (anychanged) 2582181641Skmacy pmap_invalidate_all(pmap); 2583181641Skmacy sched_unpin(); 2584181641Skmacy vm_page_unlock_queues(); 2585181641Skmacy PMAP_UNLOCK(pmap); 2586181641Skmacy} 2587181641Skmacy 2588181641Skmacy/* 2589181641Skmacy * Insert the given physical page (p) at 2590181641Skmacy * the specified virtual address (v) in the 2591181641Skmacy * target physical map with the protection requested. 2592181641Skmacy * 2593181641Skmacy * If specified, the page will be wired down, meaning 2594181641Skmacy * that the related pte can not be reclaimed. 2595181641Skmacy * 2596181641Skmacy * NB: This is the only routine which MAY NOT lazy-evaluate 2597181641Skmacy * or lose information. That is, this routine must actually 2598181641Skmacy * insert this page into the given map NOW. 2599181641Skmacy */ 2600181641Skmacyvoid 2601181641Skmacypmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2602181641Skmacy vm_prot_t prot, boolean_t wired) 2603181641Skmacy{ 2604181641Skmacy pd_entry_t *pde; 2605181641Skmacy pt_entry_t *pte; 2606208651Salc pt_entry_t newpte, origpte; 2607208651Salc pv_entry_t pv; 2608208651Salc vm_paddr_t opa, pa; 2609181641Skmacy vm_page_t mpte, om; 2610181641Skmacy boolean_t invlva; 2611181641Skmacy 2612181641Skmacy CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2613215587Scperciva pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired); 2614181641Skmacy va = trunc_page(va); 2615208651Salc KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2616208651Salc KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, 2617208175Salc ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", 2618208175Salc va)); 2619224746Skib KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, 2620208175Salc ("pmap_enter: page %p is not busy", m)); 2621181641Skmacy 2622181641Skmacy mpte = NULL; 2623181641Skmacy 2624181641Skmacy vm_page_lock_queues(); 2625181641Skmacy PMAP_LOCK(pmap); 2626181641Skmacy sched_pin(); 2627181641Skmacy 2628181641Skmacy /* 2629181641Skmacy * In the case that a page table page is not 2630181641Skmacy * resident, we are creating it here. 2631181641Skmacy */ 2632181641Skmacy if (va < VM_MAXUSER_ADDRESS) { 2633181641Skmacy mpte = pmap_allocpte(pmap, va, M_WAITOK); 2634181641Skmacy } 2635181641Skmacy 2636181641Skmacy pde = pmap_pde(pmap, va); 2637181641Skmacy if ((*pde & PG_PS) != 0) 2638181641Skmacy panic("pmap_enter: attempted pmap_enter on 4MB page"); 2639181641Skmacy pte = pmap_pte_quick(pmap, va); 2640181641Skmacy 2641181641Skmacy /* 2642181641Skmacy * Page Directory table entry not valid, we need a new PT page 2643181641Skmacy */ 2644181641Skmacy if (pte == NULL) { 2645208651Salc panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", 2646181641Skmacy (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2647181641Skmacy } 2648181641Skmacy 2649181641Skmacy pa = VM_PAGE_TO_PHYS(m); 2650181641Skmacy om = NULL; 2651181641Skmacy opa = origpte = 0; 2652181641Skmacy 2653181641Skmacy#if 0 2654181641Skmacy KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2655181641Skmacy pte, *pte)); 2656181641Skmacy#endif 2657181641Skmacy origpte = *pte; 2658181641Skmacy if (origpte) 2659181641Skmacy origpte = xpmap_mtop(origpte); 2660181641Skmacy opa = origpte & PG_FRAME; 2661181641Skmacy 2662181641Skmacy /* 2663181641Skmacy * Mapping has not changed, must be protection or wiring change. 2664181641Skmacy */ 2665181641Skmacy if (origpte && (opa == pa)) { 2666181641Skmacy /* 2667181641Skmacy * Wiring change, just update stats. We don't worry about 2668181641Skmacy * wiring PT pages as they remain resident as long as there 2669181641Skmacy * are valid mappings in them. Hence, if a user page is wired, 2670181641Skmacy * the PT page will be also. 2671181641Skmacy */ 2672181641Skmacy if (wired && ((origpte & PG_W) == 0)) 2673181641Skmacy pmap->pm_stats.wired_count++; 2674181641Skmacy else if (!wired && (origpte & PG_W)) 2675181641Skmacy pmap->pm_stats.wired_count--; 2676181641Skmacy 2677181641Skmacy /* 2678181641Skmacy * Remove extra pte reference 2679181641Skmacy */ 2680181641Skmacy if (mpte) 2681181641Skmacy mpte->wire_count--; 2682181641Skmacy 2683181641Skmacy if (origpte & PG_MANAGED) { 2684181641Skmacy om = m; 2685181641Skmacy pa |= PG_MANAGED; 2686181641Skmacy } 2687181641Skmacy goto validate; 2688181641Skmacy } 2689208651Salc 2690208651Salc pv = NULL; 2691208651Salc 2692181641Skmacy /* 2693181641Skmacy * Mapping has changed, invalidate old range and fall through to 2694181641Skmacy * handle validating new mapping. 2695181641Skmacy */ 2696181641Skmacy if (opa) { 2697181641Skmacy if (origpte & PG_W) 2698181641Skmacy pmap->pm_stats.wired_count--; 2699181641Skmacy if (origpte & PG_MANAGED) { 2700181641Skmacy om = PHYS_TO_VM_PAGE(opa); 2701208651Salc pv = pmap_pvh_remove(&om->md, pmap, va); 2702181641Skmacy } else if (va < VM_MAXUSER_ADDRESS) 2703181641Skmacy printf("va=0x%x is unmanaged :-( \n", va); 2704181641Skmacy 2705181641Skmacy if (mpte != NULL) { 2706181641Skmacy mpte->wire_count--; 2707181641Skmacy KASSERT(mpte->wire_count > 0, 2708181641Skmacy ("pmap_enter: missing reference to page table page," 2709181641Skmacy " va: 0x%x", va)); 2710181641Skmacy } 2711181641Skmacy } else 2712181641Skmacy pmap->pm_stats.resident_count++; 2713181641Skmacy 2714181641Skmacy /* 2715181641Skmacy * Enter on the PV list if part of our managed memory. 2716181641Skmacy */ 2717224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) { 2718181641Skmacy KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2719181641Skmacy ("pmap_enter: managed mapping within the clean submap")); 2720208651Salc if (pv == NULL) 2721208651Salc pv = get_pv_entry(pmap, FALSE); 2722208651Salc pv->pv_va = va; 2723208651Salc TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2724181641Skmacy pa |= PG_MANAGED; 2725208651Salc } else if (pv != NULL) 2726208651Salc free_pv_entry(pmap, pv); 2727181641Skmacy 2728181641Skmacy /* 2729181641Skmacy * Increment counters 2730181641Skmacy */ 2731181641Skmacy if (wired) 2732181641Skmacy pmap->pm_stats.wired_count++; 2733181641Skmacy 2734181641Skmacyvalidate: 2735181641Skmacy /* 2736181641Skmacy * Now validate mapping with desired protection/wiring. 2737181641Skmacy */ 2738181641Skmacy newpte = (pt_entry_t)(pa | PG_V); 2739181641Skmacy if ((prot & VM_PROT_WRITE) != 0) { 2740181641Skmacy newpte |= PG_RW; 2741208651Salc if ((newpte & PG_MANAGED) != 0) 2742225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 2743181641Skmacy } 2744181641Skmacy#ifdef PAE 2745181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2746181641Skmacy newpte |= pg_nx; 2747181641Skmacy#endif 2748181641Skmacy if (wired) 2749181641Skmacy newpte |= PG_W; 2750181641Skmacy if (va < VM_MAXUSER_ADDRESS) 2751181641Skmacy newpte |= PG_U; 2752181641Skmacy if (pmap == kernel_pmap) 2753181641Skmacy newpte |= pgeflag; 2754181641Skmacy 2755181641Skmacy critical_enter(); 2756181641Skmacy /* 2757181641Skmacy * if the mapping or permission bits are different, we need 2758181641Skmacy * to update the pte. 2759181641Skmacy */ 2760181641Skmacy if ((origpte & ~(PG_M|PG_A)) != newpte) { 2761181641Skmacy if (origpte) { 2762181641Skmacy invlva = FALSE; 2763181641Skmacy origpte = *pte; 2764181641Skmacy PT_SET_VA(pte, newpte | PG_A, FALSE); 2765181641Skmacy if (origpte & PG_A) { 2766181641Skmacy if (origpte & PG_MANAGED) 2767225418Skib vm_page_aflag_set(om, PGA_REFERENCED); 2768181641Skmacy if (opa != VM_PAGE_TO_PHYS(m)) 2769181641Skmacy invlva = TRUE; 2770181641Skmacy#ifdef PAE 2771181641Skmacy if ((origpte & PG_NX) == 0 && 2772181641Skmacy (newpte & PG_NX) != 0) 2773181641Skmacy invlva = TRUE; 2774181641Skmacy#endif 2775181641Skmacy } 2776208651Salc if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2777181641Skmacy if ((origpte & PG_MANAGED) != 0) 2778181641Skmacy vm_page_dirty(om); 2779181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2780181641Skmacy invlva = TRUE; 2781181641Skmacy } 2782208651Salc if ((origpte & PG_MANAGED) != 0 && 2783208651Salc TAILQ_EMPTY(&om->md.pv_list)) 2784225418Skib vm_page_aflag_clear(om, PGA_WRITEABLE); 2785181641Skmacy if (invlva) 2786181641Skmacy pmap_invalidate_page(pmap, va); 2787181641Skmacy } else{ 2788181641Skmacy PT_SET_VA(pte, newpte | PG_A, FALSE); 2789181641Skmacy } 2790181641Skmacy 2791181641Skmacy } 2792181641Skmacy PT_UPDATES_FLUSH(); 2793181641Skmacy critical_exit(); 2794181641Skmacy if (*PMAP1) 2795181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2796181641Skmacy sched_unpin(); 2797181641Skmacy vm_page_unlock_queues(); 2798181641Skmacy PMAP_UNLOCK(pmap); 2799181641Skmacy} 2800181641Skmacy 2801181641Skmacy/* 2802181641Skmacy * Maps a sequence of resident pages belonging to the same object. 2803181641Skmacy * The sequence begins with the given page m_start. This page is 2804181641Skmacy * mapped at the given virtual address start. Each subsequent page is 2805181641Skmacy * mapped at a virtual address that is offset from start by the same 2806181641Skmacy * amount as the page is offset from m_start within the object. The 2807181641Skmacy * last page in the sequence is the page with the largest offset from 2808181641Skmacy * m_start that can be mapped at a virtual address less than the given 2809181641Skmacy * virtual address end. Not every virtual page between start and end 2810181641Skmacy * is mapped; only those for which a resident page exists with the 2811181641Skmacy * corresponding offset from m_start are mapped. 2812181641Skmacy */ 2813181641Skmacyvoid 2814181641Skmacypmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2815181641Skmacy vm_page_t m_start, vm_prot_t prot) 2816181641Skmacy{ 2817181641Skmacy vm_page_t m, mpte; 2818181641Skmacy vm_pindex_t diff, psize; 2819181641Skmacy multicall_entry_t mcl[16]; 2820181641Skmacy multicall_entry_t *mclp = mcl; 2821181641Skmacy int error, count = 0; 2822181641Skmacy 2823181641Skmacy VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2824181641Skmacy psize = atop(end - start); 2825181641Skmacy 2826181641Skmacy mpte = NULL; 2827181641Skmacy m = m_start; 2828208574Salc vm_page_lock_queues(); 2829181641Skmacy PMAP_LOCK(pmap); 2830181641Skmacy while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2831181641Skmacy mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2832181641Skmacy prot, mpte); 2833181641Skmacy m = TAILQ_NEXT(m, listq); 2834181641Skmacy if (count == 16) { 2835181641Skmacy error = HYPERVISOR_multicall(mcl, count); 2836181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2837181641Skmacy mclp = mcl; 2838181641Skmacy count = 0; 2839181641Skmacy } 2840181641Skmacy } 2841181641Skmacy if (count) { 2842181641Skmacy error = HYPERVISOR_multicall(mcl, count); 2843181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2844181641Skmacy } 2845208574Salc vm_page_unlock_queues(); 2846181641Skmacy PMAP_UNLOCK(pmap); 2847181641Skmacy} 2848181641Skmacy 2849181641Skmacy/* 2850181641Skmacy * this code makes some *MAJOR* assumptions: 2851181641Skmacy * 1. Current pmap & pmap exists. 2852181641Skmacy * 2. Not wired. 2853181641Skmacy * 3. Read access. 2854181641Skmacy * 4. No page table pages. 2855181641Skmacy * but is *MUCH* faster than pmap_enter... 2856181641Skmacy */ 2857181641Skmacy 2858181641Skmacyvoid 2859181641Skmacypmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2860181641Skmacy{ 2861181641Skmacy multicall_entry_t mcl, *mclp; 2862181641Skmacy int count = 0; 2863181641Skmacy mclp = &mcl; 2864181641Skmacy 2865181641Skmacy CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2866181641Skmacy pmap, va, m, prot); 2867181641Skmacy 2868207796Salc vm_page_lock_queues(); 2869181641Skmacy PMAP_LOCK(pmap); 2870207796Salc (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2871181641Skmacy if (count) 2872181641Skmacy HYPERVISOR_multicall(&mcl, count); 2873207796Salc vm_page_unlock_queues(); 2874181641Skmacy PMAP_UNLOCK(pmap); 2875181641Skmacy} 2876181641Skmacy 2877181747Skmacy#ifdef notyet 2878181641Skmacyvoid 2879181641Skmacypmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2880181641Skmacy{ 2881181641Skmacy int i, error, index = 0; 2882181641Skmacy multicall_entry_t mcl[16]; 2883181641Skmacy multicall_entry_t *mclp = mcl; 2884181641Skmacy 2885181641Skmacy PMAP_LOCK(pmap); 2886181641Skmacy for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2887181641Skmacy if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2888181641Skmacy continue; 2889181641Skmacy 2890181641Skmacy (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2891181641Skmacy if (index == 16) { 2892181641Skmacy error = HYPERVISOR_multicall(mcl, index); 2893181641Skmacy mclp = mcl; 2894181641Skmacy index = 0; 2895181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2896181641Skmacy } 2897181641Skmacy } 2898181641Skmacy if (index) { 2899181641Skmacy error = HYPERVISOR_multicall(mcl, index); 2900181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2901181641Skmacy } 2902181641Skmacy 2903181641Skmacy PMAP_UNLOCK(pmap); 2904181641Skmacy} 2905181747Skmacy#endif 2906181641Skmacy 2907181641Skmacystatic vm_page_t 2908181641Skmacypmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2909181641Skmacy vm_prot_t prot, vm_page_t mpte) 2910181641Skmacy{ 2911181641Skmacy pt_entry_t *pte; 2912181641Skmacy vm_paddr_t pa; 2913181641Skmacy vm_page_t free; 2914181641Skmacy multicall_entry_t *mcl = *mclpp; 2915181641Skmacy 2916181641Skmacy KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2917224746Skib (m->oflags & VPO_UNMANAGED) != 0, 2918181641Skmacy ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2919181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2920181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2921181641Skmacy 2922181641Skmacy /* 2923181641Skmacy * In the case that a page table page is not 2924181641Skmacy * resident, we are creating it here. 2925181641Skmacy */ 2926181641Skmacy if (va < VM_MAXUSER_ADDRESS) { 2927181641Skmacy unsigned ptepindex; 2928181641Skmacy pd_entry_t ptema; 2929181641Skmacy 2930181641Skmacy /* 2931181641Skmacy * Calculate pagetable page index 2932181641Skmacy */ 2933181641Skmacy ptepindex = va >> PDRSHIFT; 2934181641Skmacy if (mpte && (mpte->pindex == ptepindex)) { 2935181641Skmacy mpte->wire_count++; 2936181641Skmacy } else { 2937181641Skmacy /* 2938181641Skmacy * Get the page directory entry 2939181641Skmacy */ 2940181641Skmacy ptema = pmap->pm_pdir[ptepindex]; 2941181641Skmacy 2942181641Skmacy /* 2943181641Skmacy * If the page table page is mapped, we just increment 2944181641Skmacy * the hold count, and activate it. 2945181641Skmacy */ 2946181641Skmacy if (ptema & PG_V) { 2947181641Skmacy if (ptema & PG_PS) 2948181641Skmacy panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2949181641Skmacy mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2950181641Skmacy mpte->wire_count++; 2951181641Skmacy } else { 2952181641Skmacy mpte = _pmap_allocpte(pmap, ptepindex, 2953181641Skmacy M_NOWAIT); 2954181641Skmacy if (mpte == NULL) 2955181641Skmacy return (mpte); 2956181641Skmacy } 2957181641Skmacy } 2958181641Skmacy } else { 2959181641Skmacy mpte = NULL; 2960181641Skmacy } 2961181641Skmacy 2962181641Skmacy /* 2963181641Skmacy * This call to vtopte makes the assumption that we are 2964181641Skmacy * entering the page into the current pmap. In order to support 2965181641Skmacy * quick entry into any pmap, one would likely use pmap_pte_quick. 2966181641Skmacy * But that isn't as quick as vtopte. 2967181641Skmacy */ 2968181641Skmacy KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 2969181641Skmacy pte = vtopte(va); 2970181641Skmacy if (*pte & PG_V) { 2971181641Skmacy if (mpte != NULL) { 2972181641Skmacy mpte->wire_count--; 2973181641Skmacy mpte = NULL; 2974181641Skmacy } 2975181641Skmacy return (mpte); 2976181641Skmacy } 2977181641Skmacy 2978181641Skmacy /* 2979181641Skmacy * Enter on the PV list if part of our managed memory. 2980181641Skmacy */ 2981224746Skib if ((m->oflags & VPO_UNMANAGED) == 0 && 2982181641Skmacy !pmap_try_insert_pv_entry(pmap, va, m)) { 2983181641Skmacy if (mpte != NULL) { 2984181641Skmacy free = NULL; 2985181641Skmacy if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 2986181641Skmacy pmap_invalidate_page(pmap, va); 2987181641Skmacy pmap_free_zero_pages(free); 2988181641Skmacy } 2989181641Skmacy 2990181641Skmacy mpte = NULL; 2991181641Skmacy } 2992181641Skmacy return (mpte); 2993181641Skmacy } 2994181641Skmacy 2995181641Skmacy /* 2996181641Skmacy * Increment counters 2997181641Skmacy */ 2998181641Skmacy pmap->pm_stats.resident_count++; 2999181641Skmacy 3000181641Skmacy pa = VM_PAGE_TO_PHYS(m); 3001181641Skmacy#ifdef PAE 3002181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 3003181641Skmacy pa |= pg_nx; 3004181641Skmacy#endif 3005181641Skmacy 3006181641Skmacy#if 0 3007181641Skmacy /* 3008181641Skmacy * Now validate mapping with RO protection 3009181641Skmacy */ 3010224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3011181641Skmacy pte_store(pte, pa | PG_V | PG_U); 3012181641Skmacy else 3013181641Skmacy pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3014181641Skmacy#else 3015181641Skmacy /* 3016181641Skmacy * Now validate mapping with RO protection 3017181641Skmacy */ 3018224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3019181641Skmacy pa = xpmap_ptom(pa | PG_V | PG_U); 3020181641Skmacy else 3021181641Skmacy pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3022181641Skmacy 3023181641Skmacy mcl->op = __HYPERVISOR_update_va_mapping; 3024181641Skmacy mcl->args[0] = va; 3025181641Skmacy mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3026181641Skmacy mcl->args[2] = (uint32_t)(pa >> 32); 3027181641Skmacy mcl->args[3] = 0; 3028181641Skmacy *mclpp = mcl + 1; 3029181641Skmacy *count = *count + 1; 3030181641Skmacy#endif 3031181641Skmacy return mpte; 3032181641Skmacy} 3033181641Skmacy 3034181641Skmacy/* 3035181641Skmacy * Make a temporary mapping for a physical address. This is only intended 3036181641Skmacy * to be used for panic dumps. 3037181641Skmacy */ 3038181641Skmacyvoid * 3039181641Skmacypmap_kenter_temporary(vm_paddr_t pa, int i) 3040181641Skmacy{ 3041181641Skmacy vm_offset_t va; 3042200346Skmacy vm_paddr_t ma = xpmap_ptom(pa); 3043181641Skmacy 3044181641Skmacy va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3045200346Skmacy PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag); 3046181641Skmacy invlpg(va); 3047181641Skmacy return ((void *)crashdumpmap); 3048181641Skmacy} 3049181641Skmacy 3050181641Skmacy/* 3051181641Skmacy * This code maps large physical mmap regions into the 3052181641Skmacy * processor address space. Note that some shortcuts 3053181641Skmacy * are taken, but the code works. 3054181641Skmacy */ 3055181641Skmacyvoid 3056181641Skmacypmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 3057181641Skmacy vm_object_t object, vm_pindex_t pindex, 3058181641Skmacy vm_size_t size) 3059181641Skmacy{ 3060207419Skmacy pd_entry_t *pde; 3061207419Skmacy vm_paddr_t pa, ptepa; 3062181641Skmacy vm_page_t p; 3063207419Skmacy int pat_mode; 3064181641Skmacy 3065181641Skmacy VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3066195840Sjhb KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3067181641Skmacy ("pmap_object_init_pt: non-device object")); 3068181641Skmacy if (pseflag && 3069207419Skmacy (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { 3070207419Skmacy if (!vm_object_populate(object, pindex, pindex + atop(size))) 3071207419Skmacy return; 3072181641Skmacy p = vm_page_lookup(object, pindex); 3073207419Skmacy KASSERT(p->valid == VM_PAGE_BITS_ALL, 3074207419Skmacy ("pmap_object_init_pt: invalid page %p", p)); 3075207419Skmacy pat_mode = p->md.pat_mode; 3076207419Skmacy /* 3077207419Skmacy * Abort the mapping if the first page is not physically 3078207419Skmacy * aligned to a 2/4MB page boundary. 3079207419Skmacy */ 3080181641Skmacy ptepa = VM_PAGE_TO_PHYS(p); 3081181641Skmacy if (ptepa & (NBPDR - 1)) 3082181641Skmacy return; 3083207419Skmacy /* 3084207419Skmacy * Skip the first page. Abort the mapping if the rest of 3085207419Skmacy * the pages are not physically contiguous or have differing 3086207419Skmacy * memory attributes. 3087207419Skmacy */ 3088207419Skmacy p = TAILQ_NEXT(p, listq); 3089207419Skmacy for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 3090207419Skmacy pa += PAGE_SIZE) { 3091207419Skmacy KASSERT(p->valid == VM_PAGE_BITS_ALL, 3092207419Skmacy ("pmap_object_init_pt: invalid page %p", p)); 3093207419Skmacy if (pa != VM_PAGE_TO_PHYS(p) || 3094207419Skmacy pat_mode != p->md.pat_mode) 3095207419Skmacy return; 3096207419Skmacy p = TAILQ_NEXT(p, listq); 3097207419Skmacy } 3098207419Skmacy /* Map using 2/4MB pages. */ 3099181641Skmacy PMAP_LOCK(pmap); 3100207419Skmacy for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + 3101207419Skmacy size; pa += NBPDR) { 3102207419Skmacy pde = pmap_pde(pmap, addr); 3103207419Skmacy if (*pde == 0) { 3104207419Skmacy pde_store(pde, pa | PG_PS | PG_M | PG_A | 3105207419Skmacy PG_U | PG_RW | PG_V); 3106207419Skmacy pmap->pm_stats.resident_count += NBPDR / 3107207419Skmacy PAGE_SIZE; 3108207419Skmacy pmap_pde_mappings++; 3109207419Skmacy } 3110207419Skmacy /* Else continue on if the PDE is already valid. */ 3111207419Skmacy addr += NBPDR; 3112181641Skmacy } 3113181641Skmacy PMAP_UNLOCK(pmap); 3114181641Skmacy } 3115181641Skmacy} 3116181641Skmacy 3117181641Skmacy/* 3118181641Skmacy * Routine: pmap_change_wiring 3119181641Skmacy * Function: Change the wiring attribute for a map/virtual-address 3120181641Skmacy * pair. 3121181641Skmacy * In/out conditions: 3122181641Skmacy * The mapping must already exist in the pmap. 3123181641Skmacy */ 3124181641Skmacyvoid 3125181641Skmacypmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3126181641Skmacy{ 3127181641Skmacy pt_entry_t *pte; 3128181641Skmacy 3129181641Skmacy vm_page_lock_queues(); 3130181641Skmacy PMAP_LOCK(pmap); 3131181641Skmacy pte = pmap_pte(pmap, va); 3132181641Skmacy 3133181641Skmacy if (wired && !pmap_pte_w(pte)) { 3134181641Skmacy PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3135181641Skmacy pmap->pm_stats.wired_count++; 3136181641Skmacy } else if (!wired && pmap_pte_w(pte)) { 3137181641Skmacy PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3138181641Skmacy pmap->pm_stats.wired_count--; 3139181641Skmacy } 3140181641Skmacy 3141181641Skmacy /* 3142181641Skmacy * Wiring is not a hardware characteristic so there is no need to 3143181641Skmacy * invalidate TLB. 3144181641Skmacy */ 3145181641Skmacy pmap_pte_release(pte); 3146181641Skmacy PMAP_UNLOCK(pmap); 3147181641Skmacy vm_page_unlock_queues(); 3148181641Skmacy} 3149181641Skmacy 3150181641Skmacy 3151181641Skmacy 3152181641Skmacy/* 3153181641Skmacy * Copy the range specified by src_addr/len 3154181641Skmacy * from the source map to the range dst_addr/len 3155181641Skmacy * in the destination map. 3156181641Skmacy * 3157181641Skmacy * This routine is only advisory and need not do anything. 3158181641Skmacy */ 3159181641Skmacy 3160181641Skmacyvoid 3161181641Skmacypmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3162181641Skmacy vm_offset_t src_addr) 3163181641Skmacy{ 3164181641Skmacy vm_page_t free; 3165181641Skmacy vm_offset_t addr; 3166181641Skmacy vm_offset_t end_addr = src_addr + len; 3167181641Skmacy vm_offset_t pdnxt; 3168181641Skmacy 3169181641Skmacy if (dst_addr != src_addr) 3170181641Skmacy return; 3171181641Skmacy 3172181641Skmacy if (!pmap_is_current(src_pmap)) { 3173181641Skmacy CTR2(KTR_PMAP, 3174181641Skmacy "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3175181641Skmacy (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3176181641Skmacy 3177181641Skmacy return; 3178181641Skmacy } 3179181641Skmacy CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3180181641Skmacy dst_pmap, src_pmap, dst_addr, len, src_addr); 3181181641Skmacy 3182216960Scperciva#ifdef HAMFISTED_LOCKING 3183216960Scperciva mtx_lock(&createdelete_lock); 3184216960Scperciva#endif 3185216960Scperciva 3186181641Skmacy vm_page_lock_queues(); 3187181641Skmacy if (dst_pmap < src_pmap) { 3188181641Skmacy PMAP_LOCK(dst_pmap); 3189181641Skmacy PMAP_LOCK(src_pmap); 3190181641Skmacy } else { 3191181641Skmacy PMAP_LOCK(src_pmap); 3192181641Skmacy PMAP_LOCK(dst_pmap); 3193181641Skmacy } 3194181641Skmacy sched_pin(); 3195181641Skmacy for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3196181641Skmacy pt_entry_t *src_pte, *dst_pte; 3197181641Skmacy vm_page_t dstmpte, srcmpte; 3198181641Skmacy pd_entry_t srcptepaddr; 3199181641Skmacy unsigned ptepindex; 3200181641Skmacy 3201208651Salc KASSERT(addr < UPT_MIN_ADDRESS, 3202208651Salc ("pmap_copy: invalid to pmap_copy page tables")); 3203181641Skmacy 3204181641Skmacy pdnxt = (addr + NBPDR) & ~PDRMASK; 3205181641Skmacy ptepindex = addr >> PDRSHIFT; 3206181641Skmacy 3207181641Skmacy srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3208181641Skmacy if (srcptepaddr == 0) 3209181641Skmacy continue; 3210181641Skmacy 3211181641Skmacy if (srcptepaddr & PG_PS) { 3212181641Skmacy if (dst_pmap->pm_pdir[ptepindex] == 0) { 3213181641Skmacy PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3214181641Skmacy dst_pmap->pm_stats.resident_count += 3215181641Skmacy NBPDR / PAGE_SIZE; 3216181641Skmacy } 3217181641Skmacy continue; 3218181641Skmacy } 3219181641Skmacy 3220181641Skmacy srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3221208651Salc KASSERT(srcmpte->wire_count > 0, 3222208651Salc ("pmap_copy: source page table page is unused")); 3223181641Skmacy 3224181641Skmacy if (pdnxt > end_addr) 3225181641Skmacy pdnxt = end_addr; 3226181641Skmacy 3227181641Skmacy src_pte = vtopte(addr); 3228181641Skmacy while (addr < pdnxt) { 3229181641Skmacy pt_entry_t ptetemp; 3230181641Skmacy ptetemp = *src_pte; 3231181641Skmacy /* 3232181641Skmacy * we only virtual copy managed pages 3233181641Skmacy */ 3234181641Skmacy if ((ptetemp & PG_MANAGED) != 0) { 3235181641Skmacy dstmpte = pmap_allocpte(dst_pmap, addr, 3236181641Skmacy M_NOWAIT); 3237181641Skmacy if (dstmpte == NULL) 3238181641Skmacy break; 3239181641Skmacy dst_pte = pmap_pte_quick(dst_pmap, addr); 3240181641Skmacy if (*dst_pte == 0 && 3241181641Skmacy pmap_try_insert_pv_entry(dst_pmap, addr, 3242181641Skmacy PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3243181641Skmacy /* 3244181641Skmacy * Clear the wired, modified, and 3245181641Skmacy * accessed (referenced) bits 3246181641Skmacy * during the copy. 3247181641Skmacy */ 3248181641Skmacy KASSERT(ptetemp != 0, ("src_pte not set")); 3249181641Skmacy PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3250181641Skmacy KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3251181641Skmacy ("no pmap copy expected: 0x%jx saw: 0x%jx", 3252181641Skmacy ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3253181641Skmacy dst_pmap->pm_stats.resident_count++; 3254181641Skmacy } else { 3255181641Skmacy free = NULL; 3256181641Skmacy if (pmap_unwire_pte_hold(dst_pmap, 3257181641Skmacy dstmpte, &free)) { 3258181641Skmacy pmap_invalidate_page(dst_pmap, 3259181641Skmacy addr); 3260181641Skmacy pmap_free_zero_pages(free); 3261181641Skmacy } 3262181641Skmacy } 3263181641Skmacy if (dstmpte->wire_count >= srcmpte->wire_count) 3264181641Skmacy break; 3265181641Skmacy } 3266181641Skmacy addr += PAGE_SIZE; 3267181641Skmacy src_pte++; 3268181641Skmacy } 3269181641Skmacy } 3270181641Skmacy PT_UPDATES_FLUSH(); 3271181641Skmacy sched_unpin(); 3272181641Skmacy vm_page_unlock_queues(); 3273181641Skmacy PMAP_UNLOCK(src_pmap); 3274181641Skmacy PMAP_UNLOCK(dst_pmap); 3275216960Scperciva 3276216960Scperciva#ifdef HAMFISTED_LOCKING 3277216960Scperciva mtx_unlock(&createdelete_lock); 3278216960Scperciva#endif 3279181641Skmacy} 3280181641Skmacy 3281196723Sadrianstatic __inline void 3282196723Sadrianpagezero(void *page) 3283196723Sadrian{ 3284196723Sadrian#if defined(I686_CPU) 3285196723Sadrian if (cpu_class == CPUCLASS_686) { 3286196723Sadrian#if defined(CPU_ENABLE_SSE) 3287196723Sadrian if (cpu_feature & CPUID_SSE2) 3288196723Sadrian sse2_pagezero(page); 3289196723Sadrian else 3290196723Sadrian#endif 3291196723Sadrian i686_pagezero(page); 3292196723Sadrian } else 3293196723Sadrian#endif 3294196723Sadrian bzero(page, PAGE_SIZE); 3295196723Sadrian} 3296196723Sadrian 3297181641Skmacy/* 3298181641Skmacy * pmap_zero_page zeros the specified hardware page by mapping 3299181641Skmacy * the page into KVM and using bzero to clear its contents. 3300181641Skmacy */ 3301181641Skmacyvoid 3302181641Skmacypmap_zero_page(vm_page_t m) 3303181641Skmacy{ 3304181641Skmacy struct sysmaps *sysmaps; 3305181641Skmacy 3306181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3307181641Skmacy mtx_lock(&sysmaps->lock); 3308181641Skmacy if (*sysmaps->CMAP2) 3309181641Skmacy panic("pmap_zero_page: CMAP2 busy"); 3310181641Skmacy sched_pin(); 3311215587Scperciva PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3312181641Skmacy pagezero(sysmaps->CADDR2); 3313181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3314181641Skmacy sched_unpin(); 3315181641Skmacy mtx_unlock(&sysmaps->lock); 3316181641Skmacy} 3317181641Skmacy 3318181641Skmacy/* 3319181641Skmacy * pmap_zero_page_area zeros the specified hardware page by mapping 3320181641Skmacy * the page into KVM and using bzero to clear its contents. 3321181641Skmacy * 3322181641Skmacy * off and size may not cover an area beyond a single hardware page. 3323181641Skmacy */ 3324181641Skmacyvoid 3325181641Skmacypmap_zero_page_area(vm_page_t m, int off, int size) 3326181641Skmacy{ 3327181641Skmacy struct sysmaps *sysmaps; 3328181641Skmacy 3329181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3330181641Skmacy mtx_lock(&sysmaps->lock); 3331181641Skmacy if (*sysmaps->CMAP2) 3332181641Skmacy panic("pmap_zero_page: CMAP2 busy"); 3333181641Skmacy sched_pin(); 3334215587Scperciva PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3335181641Skmacy 3336181641Skmacy if (off == 0 && size == PAGE_SIZE) 3337181641Skmacy pagezero(sysmaps->CADDR2); 3338181641Skmacy else 3339181641Skmacy bzero((char *)sysmaps->CADDR2 + off, size); 3340181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3341181641Skmacy sched_unpin(); 3342181641Skmacy mtx_unlock(&sysmaps->lock); 3343181641Skmacy} 3344181641Skmacy 3345181641Skmacy/* 3346181641Skmacy * pmap_zero_page_idle zeros the specified hardware page by mapping 3347181641Skmacy * the page into KVM and using bzero to clear its contents. This 3348181641Skmacy * is intended to be called from the vm_pagezero process only and 3349181641Skmacy * outside of Giant. 3350181641Skmacy */ 3351181641Skmacyvoid 3352181641Skmacypmap_zero_page_idle(vm_page_t m) 3353181641Skmacy{ 3354181641Skmacy 3355181641Skmacy if (*CMAP3) 3356181641Skmacy panic("pmap_zero_page: CMAP3 busy"); 3357181641Skmacy sched_pin(); 3358215587Scperciva PT_SET_MA(CADDR3, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M); 3359181641Skmacy pagezero(CADDR3); 3360181641Skmacy PT_SET_MA(CADDR3, 0); 3361181641Skmacy sched_unpin(); 3362181641Skmacy} 3363181641Skmacy 3364181641Skmacy/* 3365181641Skmacy * pmap_copy_page copies the specified (machine independent) 3366181641Skmacy * page by mapping the page into virtual memory and using 3367181641Skmacy * bcopy to copy the page, one machine dependent page at a 3368181641Skmacy * time. 3369181641Skmacy */ 3370181641Skmacyvoid 3371181641Skmacypmap_copy_page(vm_page_t src, vm_page_t dst) 3372181641Skmacy{ 3373181641Skmacy struct sysmaps *sysmaps; 3374181641Skmacy 3375181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3376181641Skmacy mtx_lock(&sysmaps->lock); 3377181641Skmacy if (*sysmaps->CMAP1) 3378181641Skmacy panic("pmap_copy_page: CMAP1 busy"); 3379181641Skmacy if (*sysmaps->CMAP2) 3380181641Skmacy panic("pmap_copy_page: CMAP2 busy"); 3381181641Skmacy sched_pin(); 3382215587Scperciva PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(src) | PG_A); 3383215587Scperciva PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(dst) | PG_A | PG_M); 3384181641Skmacy bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3385181641Skmacy PT_SET_MA(sysmaps->CADDR1, 0); 3386181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3387181641Skmacy sched_unpin(); 3388181641Skmacy mtx_unlock(&sysmaps->lock); 3389181641Skmacy} 3390181641Skmacy 3391181641Skmacy/* 3392181641Skmacy * Returns true if the pmap's pv is one of the first 3393181641Skmacy * 16 pvs linked to from this page. This count may 3394181641Skmacy * be changed upwards or downwards in the future; it 3395181641Skmacy * is only necessary that true be returned for a small 3396181641Skmacy * subset of pmaps for proper page aging. 3397181641Skmacy */ 3398181641Skmacyboolean_t 3399181641Skmacypmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3400181641Skmacy{ 3401181641Skmacy pv_entry_t pv; 3402181641Skmacy int loops = 0; 3403208990Salc boolean_t rv; 3404181641Skmacy 3405224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3406208990Salc ("pmap_page_exists_quick: page %p is not managed", m)); 3407208990Salc rv = FALSE; 3408208990Salc vm_page_lock_queues(); 3409181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3410181641Skmacy if (PV_PMAP(pv) == pmap) { 3411208990Salc rv = TRUE; 3412208990Salc break; 3413181641Skmacy } 3414181641Skmacy loops++; 3415181641Skmacy if (loops >= 16) 3416181641Skmacy break; 3417181641Skmacy } 3418208990Salc vm_page_unlock_queues(); 3419208990Salc return (rv); 3420181641Skmacy} 3421181641Skmacy 3422181641Skmacy/* 3423181641Skmacy * pmap_page_wired_mappings: 3424181641Skmacy * 3425181641Skmacy * Return the number of managed mappings to the given physical page 3426181641Skmacy * that are wired. 3427181641Skmacy */ 3428181641Skmacyint 3429181641Skmacypmap_page_wired_mappings(vm_page_t m) 3430181641Skmacy{ 3431181641Skmacy pv_entry_t pv; 3432181641Skmacy pt_entry_t *pte; 3433181641Skmacy pmap_t pmap; 3434181641Skmacy int count; 3435181641Skmacy 3436181641Skmacy count = 0; 3437224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3438181641Skmacy return (count); 3439207796Salc vm_page_lock_queues(); 3440181641Skmacy sched_pin(); 3441181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3442181641Skmacy pmap = PV_PMAP(pv); 3443181641Skmacy PMAP_LOCK(pmap); 3444181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3445181641Skmacy if ((*pte & PG_W) != 0) 3446181641Skmacy count++; 3447181641Skmacy PMAP_UNLOCK(pmap); 3448181641Skmacy } 3449181641Skmacy sched_unpin(); 3450207796Salc vm_page_unlock_queues(); 3451181641Skmacy return (count); 3452181641Skmacy} 3453181641Skmacy 3454181641Skmacy/* 3455181747Skmacy * Returns TRUE if the given page is mapped individually or as part of 3456181747Skmacy * a 4mpage. Otherwise, returns FALSE. 3457181747Skmacy */ 3458181747Skmacyboolean_t 3459181747Skmacypmap_page_is_mapped(vm_page_t m) 3460181747Skmacy{ 3461207796Salc boolean_t rv; 3462181747Skmacy 3463224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 3464181747Skmacy return (FALSE); 3465207796Salc vm_page_lock_queues(); 3466207796Salc rv = !TAILQ_EMPTY(&m->md.pv_list) || 3467207796Salc !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list); 3468207796Salc vm_page_unlock_queues(); 3469207796Salc return (rv); 3470181747Skmacy} 3471181747Skmacy 3472181747Skmacy/* 3473181641Skmacy * Remove all pages from specified address space 3474181641Skmacy * this aids process exit speeds. Also, this code 3475181641Skmacy * is special cased for current process only, but 3476181641Skmacy * can have the more generic (and slightly slower) 3477181641Skmacy * mode enabled. This is much faster than pmap_remove 3478181641Skmacy * in the case of running down an entire address space. 3479181641Skmacy */ 3480181641Skmacyvoid 3481181641Skmacypmap_remove_pages(pmap_t pmap) 3482181641Skmacy{ 3483181641Skmacy pt_entry_t *pte, tpte; 3484181641Skmacy vm_page_t m, free = NULL; 3485181641Skmacy pv_entry_t pv; 3486181641Skmacy struct pv_chunk *pc, *npc; 3487181641Skmacy int field, idx; 3488181641Skmacy int32_t bit; 3489181641Skmacy uint32_t inuse, bitmask; 3490181641Skmacy int allfree; 3491181641Skmacy 3492181641Skmacy CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3493181641Skmacy 3494181641Skmacy if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3495181641Skmacy printf("warning: pmap_remove_pages called with non-current pmap\n"); 3496181641Skmacy return; 3497181641Skmacy } 3498181641Skmacy vm_page_lock_queues(); 3499181641Skmacy KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3500181641Skmacy PMAP_LOCK(pmap); 3501181641Skmacy sched_pin(); 3502181641Skmacy TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3503181641Skmacy allfree = 1; 3504181641Skmacy for (field = 0; field < _NPCM; field++) { 3505181641Skmacy inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3506181641Skmacy while (inuse != 0) { 3507181641Skmacy bit = bsfl(inuse); 3508181641Skmacy bitmask = 1UL << bit; 3509181641Skmacy idx = field * 32 + bit; 3510181641Skmacy pv = &pc->pc_pventry[idx]; 3511181641Skmacy inuse &= ~bitmask; 3512181641Skmacy 3513181641Skmacy pte = vtopte(pv->pv_va); 3514181641Skmacy tpte = *pte ? xpmap_mtop(*pte) : 0; 3515181641Skmacy 3516181641Skmacy if (tpte == 0) { 3517181641Skmacy printf( 3518181641Skmacy "TPTE at %p IS ZERO @ VA %08x\n", 3519181641Skmacy pte, pv->pv_va); 3520181641Skmacy panic("bad pte"); 3521181641Skmacy } 3522181641Skmacy 3523181641Skmacy/* 3524181641Skmacy * We cannot remove wired pages from a process' mapping at this time 3525181641Skmacy */ 3526181641Skmacy if (tpte & PG_W) { 3527181641Skmacy allfree = 0; 3528181641Skmacy continue; 3529181641Skmacy } 3530181641Skmacy 3531181641Skmacy m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3532181641Skmacy KASSERT(m->phys_addr == (tpte & PG_FRAME), 3533181641Skmacy ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3534181641Skmacy m, (uintmax_t)m->phys_addr, 3535181641Skmacy (uintmax_t)tpte)); 3536181641Skmacy 3537181641Skmacy KASSERT(m < &vm_page_array[vm_page_array_size], 3538181641Skmacy ("pmap_remove_pages: bad tpte %#jx", 3539181641Skmacy (uintmax_t)tpte)); 3540181641Skmacy 3541181641Skmacy 3542181641Skmacy PT_CLEAR_VA(pte, FALSE); 3543181641Skmacy 3544181641Skmacy /* 3545181641Skmacy * Update the vm_page_t clean/reference bits. 3546181641Skmacy */ 3547181641Skmacy if (tpte & PG_M) 3548181641Skmacy vm_page_dirty(m); 3549181641Skmacy 3550181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3551181641Skmacy if (TAILQ_EMPTY(&m->md.pv_list)) 3552225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 3553181641Skmacy 3554181641Skmacy pmap_unuse_pt(pmap, pv->pv_va, &free); 3555181641Skmacy 3556181641Skmacy /* Mark free */ 3557181641Skmacy PV_STAT(pv_entry_frees++); 3558181641Skmacy PV_STAT(pv_entry_spare++); 3559181641Skmacy pv_entry_count--; 3560181641Skmacy pc->pc_map[field] |= bitmask; 3561181641Skmacy pmap->pm_stats.resident_count--; 3562181641Skmacy } 3563181641Skmacy } 3564181641Skmacy PT_UPDATES_FLUSH(); 3565181641Skmacy if (allfree) { 3566181641Skmacy PV_STAT(pv_entry_spare -= _NPCPV); 3567181641Skmacy PV_STAT(pc_chunk_count--); 3568181641Skmacy PV_STAT(pc_chunk_frees++); 3569181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3570181641Skmacy m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 3571181641Skmacy pmap_qremove((vm_offset_t)pc, 1); 3572181641Skmacy vm_page_unwire(m, 0); 3573181641Skmacy vm_page_free(m); 3574181641Skmacy pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 3575181641Skmacy } 3576181641Skmacy } 3577181641Skmacy PT_UPDATES_FLUSH(); 3578181641Skmacy if (*PMAP1) 3579181641Skmacy PT_SET_MA(PADDR1, 0); 3580181641Skmacy 3581181641Skmacy sched_unpin(); 3582181641Skmacy pmap_invalidate_all(pmap); 3583181641Skmacy vm_page_unlock_queues(); 3584181641Skmacy PMAP_UNLOCK(pmap); 3585181641Skmacy pmap_free_zero_pages(free); 3586181641Skmacy} 3587181641Skmacy 3588181641Skmacy/* 3589181641Skmacy * pmap_is_modified: 3590181641Skmacy * 3591181641Skmacy * Return whether or not the specified physical page was modified 3592181641Skmacy * in any physical maps. 3593181641Skmacy */ 3594181641Skmacyboolean_t 3595181641Skmacypmap_is_modified(vm_page_t m) 3596181641Skmacy{ 3597181641Skmacy pv_entry_t pv; 3598181641Skmacy pt_entry_t *pte; 3599181641Skmacy pmap_t pmap; 3600181641Skmacy boolean_t rv; 3601181641Skmacy 3602224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3603208504Salc ("pmap_is_modified: page %p is not managed", m)); 3604181641Skmacy rv = FALSE; 3605208504Salc 3606208504Salc /* 3607225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 3608225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 3609208504Salc * is clear, no PTEs can have PG_M set. 3610208504Salc */ 3611208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3612208504Salc if ((m->oflags & VPO_BUSY) == 0 && 3613225418Skib (m->aflags & PGA_WRITEABLE) == 0) 3614181641Skmacy return (rv); 3615208504Salc vm_page_lock_queues(); 3616181641Skmacy sched_pin(); 3617181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3618181641Skmacy pmap = PV_PMAP(pv); 3619181641Skmacy PMAP_LOCK(pmap); 3620181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3621181641Skmacy rv = (*pte & PG_M) != 0; 3622181641Skmacy PMAP_UNLOCK(pmap); 3623181641Skmacy if (rv) 3624181641Skmacy break; 3625181641Skmacy } 3626181641Skmacy if (*PMAP1) 3627181641Skmacy PT_SET_MA(PADDR1, 0); 3628181641Skmacy sched_unpin(); 3629208504Salc vm_page_unlock_queues(); 3630181641Skmacy return (rv); 3631181641Skmacy} 3632181641Skmacy 3633181641Skmacy/* 3634181641Skmacy * pmap_is_prefaultable: 3635181641Skmacy * 3636181641Skmacy * Return whether or not the specified virtual address is elgible 3637181641Skmacy * for prefault. 3638181641Skmacy */ 3639181641Skmacystatic boolean_t 3640181641Skmacypmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3641181641Skmacy{ 3642181641Skmacy pt_entry_t *pte; 3643181641Skmacy boolean_t rv = FALSE; 3644181641Skmacy 3645181641Skmacy return (rv); 3646181641Skmacy 3647181641Skmacy if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3648181641Skmacy pte = vtopte(addr); 3649181641Skmacy rv = (*pte == 0); 3650181641Skmacy } 3651181641Skmacy return (rv); 3652181641Skmacy} 3653181641Skmacy 3654181641Skmacyboolean_t 3655181641Skmacypmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3656181641Skmacy{ 3657181641Skmacy boolean_t rv; 3658181641Skmacy 3659181641Skmacy PMAP_LOCK(pmap); 3660181641Skmacy rv = pmap_is_prefaultable_locked(pmap, addr); 3661181641Skmacy PMAP_UNLOCK(pmap); 3662181641Skmacy return (rv); 3663181641Skmacy} 3664181641Skmacy 3665207155Salcboolean_t 3666207155Salcpmap_is_referenced(vm_page_t m) 3667207155Salc{ 3668207155Salc pv_entry_t pv; 3669207155Salc pt_entry_t *pte; 3670207155Salc pmap_t pmap; 3671207155Salc boolean_t rv; 3672207155Salc 3673224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3674208574Salc ("pmap_is_referenced: page %p is not managed", m)); 3675207155Salc rv = FALSE; 3676208574Salc vm_page_lock_queues(); 3677207155Salc sched_pin(); 3678207155Salc TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3679207155Salc pmap = PV_PMAP(pv); 3680207155Salc PMAP_LOCK(pmap); 3681207155Salc pte = pmap_pte_quick(pmap, pv->pv_va); 3682207155Salc rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); 3683207155Salc PMAP_UNLOCK(pmap); 3684207155Salc if (rv) 3685207155Salc break; 3686207155Salc } 3687207155Salc if (*PMAP1) 3688207155Salc PT_SET_MA(PADDR1, 0); 3689207155Salc sched_unpin(); 3690208574Salc vm_page_unlock_queues(); 3691207155Salc return (rv); 3692207155Salc} 3693207155Salc 3694181641Skmacyvoid 3695181641Skmacypmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3696181641Skmacy{ 3697181641Skmacy int i, npages = round_page(len) >> PAGE_SHIFT; 3698181641Skmacy for (i = 0; i < npages; i++) { 3699181641Skmacy pt_entry_t *pte; 3700181641Skmacy pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3701216843Scperciva vm_page_lock_queues(); 3702181641Skmacy pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3703216843Scperciva vm_page_unlock_queues(); 3704181641Skmacy PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3705181641Skmacy pmap_pte_release(pte); 3706181641Skmacy } 3707181641Skmacy} 3708181641Skmacy 3709181641Skmacyvoid 3710181641Skmacypmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3711181641Skmacy{ 3712181641Skmacy int i, npages = round_page(len) >> PAGE_SHIFT; 3713181641Skmacy for (i = 0; i < npages; i++) { 3714181641Skmacy pt_entry_t *pte; 3715181641Skmacy pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3716181641Skmacy PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3717216843Scperciva vm_page_lock_queues(); 3718181641Skmacy pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3719216843Scperciva vm_page_unlock_queues(); 3720181641Skmacy pmap_pte_release(pte); 3721181641Skmacy } 3722181641Skmacy} 3723181641Skmacy 3724181641Skmacy/* 3725181641Skmacy * Clear the write and modified bits in each of the given page's mappings. 3726181641Skmacy */ 3727181641Skmacyvoid 3728181641Skmacypmap_remove_write(vm_page_t m) 3729181641Skmacy{ 3730181641Skmacy pv_entry_t pv; 3731181641Skmacy pmap_t pmap; 3732181641Skmacy pt_entry_t oldpte, *pte; 3733181641Skmacy 3734224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3735208175Salc ("pmap_remove_write: page %p is not managed", m)); 3736208175Salc 3737208175Salc /* 3738225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 3739225418Skib * another thread while the object is locked. Thus, if PGA_WRITEABLE 3740208175Salc * is clear, no page table entries need updating. 3741208175Salc */ 3742208175Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3743208175Salc if ((m->oflags & VPO_BUSY) == 0 && 3744225418Skib (m->aflags & PGA_WRITEABLE) == 0) 3745181641Skmacy return; 3746207796Salc vm_page_lock_queues(); 3747181641Skmacy sched_pin(); 3748181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3749181641Skmacy pmap = PV_PMAP(pv); 3750181641Skmacy PMAP_LOCK(pmap); 3751181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3752181641Skmacyretry: 3753181641Skmacy oldpte = *pte; 3754181641Skmacy if ((oldpte & PG_RW) != 0) { 3755188341Skmacy vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3756188341Skmacy 3757181641Skmacy /* 3758181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3759181641Skmacy * in size, PG_RW and PG_M are among the least 3760181641Skmacy * significant 32 bits. 3761181641Skmacy */ 3762188341Skmacy PT_SET_VA_MA(pte, newpte, TRUE); 3763188341Skmacy if (*pte != newpte) 3764181641Skmacy goto retry; 3765188341Skmacy 3766181641Skmacy if ((oldpte & PG_M) != 0) 3767181641Skmacy vm_page_dirty(m); 3768181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3769181641Skmacy } 3770181641Skmacy PMAP_UNLOCK(pmap); 3771181641Skmacy } 3772225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 3773181641Skmacy PT_UPDATES_FLUSH(); 3774181641Skmacy if (*PMAP1) 3775181641Skmacy PT_SET_MA(PADDR1, 0); 3776181641Skmacy sched_unpin(); 3777207796Salc vm_page_unlock_queues(); 3778181641Skmacy} 3779181641Skmacy 3780181641Skmacy/* 3781181641Skmacy * pmap_ts_referenced: 3782181641Skmacy * 3783181641Skmacy * Return a count of reference bits for a page, clearing those bits. 3784181641Skmacy * It is not necessary for every reference bit to be cleared, but it 3785181641Skmacy * is necessary that 0 only be returned when there are truly no 3786181641Skmacy * reference bits set. 3787181641Skmacy * 3788181641Skmacy * XXX: The exact number of bits to check and clear is a matter that 3789181641Skmacy * should be tested and standardized at some point in the future for 3790181641Skmacy * optimal aging of shared pages. 3791181641Skmacy */ 3792181641Skmacyint 3793181641Skmacypmap_ts_referenced(vm_page_t m) 3794181641Skmacy{ 3795181641Skmacy pv_entry_t pv, pvf, pvn; 3796181641Skmacy pmap_t pmap; 3797181641Skmacy pt_entry_t *pte; 3798181641Skmacy int rtval = 0; 3799181641Skmacy 3800224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3801208990Salc ("pmap_ts_referenced: page %p is not managed", m)); 3802208990Salc vm_page_lock_queues(); 3803181641Skmacy sched_pin(); 3804181641Skmacy if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3805181641Skmacy pvf = pv; 3806181641Skmacy do { 3807181641Skmacy pvn = TAILQ_NEXT(pv, pv_list); 3808181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3809181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3810181641Skmacy pmap = PV_PMAP(pv); 3811181641Skmacy PMAP_LOCK(pmap); 3812181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3813181641Skmacy if ((*pte & PG_A) != 0) { 3814181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3815181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3816181641Skmacy rtval++; 3817181641Skmacy if (rtval > 4) 3818181641Skmacy pvn = NULL; 3819181641Skmacy } 3820181641Skmacy PMAP_UNLOCK(pmap); 3821181641Skmacy } while ((pv = pvn) != NULL && pv != pvf); 3822181641Skmacy } 3823181641Skmacy PT_UPDATES_FLUSH(); 3824181641Skmacy if (*PMAP1) 3825181641Skmacy PT_SET_MA(PADDR1, 0); 3826181641Skmacy 3827181641Skmacy sched_unpin(); 3828208990Salc vm_page_unlock_queues(); 3829181641Skmacy return (rtval); 3830181641Skmacy} 3831181641Skmacy 3832181641Skmacy/* 3833181641Skmacy * Clear the modify bits on the specified physical page. 3834181641Skmacy */ 3835181641Skmacyvoid 3836181641Skmacypmap_clear_modify(vm_page_t m) 3837181641Skmacy{ 3838181641Skmacy pv_entry_t pv; 3839181641Skmacy pmap_t pmap; 3840181641Skmacy pt_entry_t *pte; 3841181641Skmacy 3842224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3843208504Salc ("pmap_clear_modify: page %p is not managed", m)); 3844208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3845208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 3846208504Salc ("pmap_clear_modify: page %p is busy", m)); 3847208504Salc 3848208504Salc /* 3849225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 3850208504Salc * If the object containing the page is locked and the page is not 3851225418Skib * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 3852208504Salc */ 3853225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 3854181641Skmacy return; 3855208504Salc vm_page_lock_queues(); 3856181641Skmacy sched_pin(); 3857181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3858181641Skmacy pmap = PV_PMAP(pv); 3859181641Skmacy PMAP_LOCK(pmap); 3860181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3861181641Skmacy if ((*pte & PG_M) != 0) { 3862181641Skmacy /* 3863181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3864181641Skmacy * in size, PG_M is among the least significant 3865181641Skmacy * 32 bits. 3866181641Skmacy */ 3867181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3868181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3869181641Skmacy } 3870181641Skmacy PMAP_UNLOCK(pmap); 3871181641Skmacy } 3872181641Skmacy sched_unpin(); 3873208504Salc vm_page_unlock_queues(); 3874181641Skmacy} 3875181641Skmacy 3876181641Skmacy/* 3877181641Skmacy * pmap_clear_reference: 3878181641Skmacy * 3879181641Skmacy * Clear the reference bit on the specified physical page. 3880181641Skmacy */ 3881181641Skmacyvoid 3882181641Skmacypmap_clear_reference(vm_page_t m) 3883181641Skmacy{ 3884181641Skmacy pv_entry_t pv; 3885181641Skmacy pmap_t pmap; 3886181641Skmacy pt_entry_t *pte; 3887181641Skmacy 3888224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3889208504Salc ("pmap_clear_reference: page %p is not managed", m)); 3890208504Salc vm_page_lock_queues(); 3891181641Skmacy sched_pin(); 3892181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3893181641Skmacy pmap = PV_PMAP(pv); 3894181641Skmacy PMAP_LOCK(pmap); 3895181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3896181641Skmacy if ((*pte & PG_A) != 0) { 3897181641Skmacy /* 3898181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3899181641Skmacy * in size, PG_A is among the least significant 3900181641Skmacy * 32 bits. 3901181641Skmacy */ 3902181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3903181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3904181641Skmacy } 3905181641Skmacy PMAP_UNLOCK(pmap); 3906181641Skmacy } 3907181641Skmacy sched_unpin(); 3908208504Salc vm_page_unlock_queues(); 3909181641Skmacy} 3910181641Skmacy 3911181641Skmacy/* 3912181641Skmacy * Miscellaneous support routines follow 3913181641Skmacy */ 3914181641Skmacy 3915181641Skmacy/* 3916181641Skmacy * Map a set of physical memory pages into the kernel virtual 3917181641Skmacy * address space. Return a pointer to where it is mapped. This 3918181641Skmacy * routine is intended to be used for mapping device memory, 3919181641Skmacy * NOT real memory. 3920181641Skmacy */ 3921181641Skmacyvoid * 3922181641Skmacypmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3923181641Skmacy{ 3924195949Skib vm_offset_t va, offset; 3925195949Skib vm_size_t tmpsize; 3926181641Skmacy 3927181641Skmacy offset = pa & PAGE_MASK; 3928181641Skmacy size = roundup(offset + size, PAGE_SIZE); 3929181641Skmacy pa = pa & PG_FRAME; 3930181641Skmacy 3931181641Skmacy if (pa < KERNLOAD && pa + size <= KERNLOAD) 3932181641Skmacy va = KERNBASE + pa; 3933181641Skmacy else 3934181641Skmacy va = kmem_alloc_nofault(kernel_map, size); 3935181641Skmacy if (!va) 3936181641Skmacy panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3937181641Skmacy 3938195949Skib for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) 3939195949Skib pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); 3940195949Skib pmap_invalidate_range(kernel_pmap, va, va + tmpsize); 3941195949Skib pmap_invalidate_cache_range(va, va + size); 3942181641Skmacy return ((void *)(va + offset)); 3943181641Skmacy} 3944181641Skmacy 3945181641Skmacyvoid * 3946181641Skmacypmap_mapdev(vm_paddr_t pa, vm_size_t size) 3947181641Skmacy{ 3948181641Skmacy 3949181641Skmacy return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3950181641Skmacy} 3951181641Skmacy 3952181641Skmacyvoid * 3953181641Skmacypmap_mapbios(vm_paddr_t pa, vm_size_t size) 3954181641Skmacy{ 3955181641Skmacy 3956181641Skmacy return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3957181641Skmacy} 3958181641Skmacy 3959181641Skmacyvoid 3960181641Skmacypmap_unmapdev(vm_offset_t va, vm_size_t size) 3961181641Skmacy{ 3962181641Skmacy vm_offset_t base, offset, tmpva; 3963181641Skmacy 3964181641Skmacy if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3965181641Skmacy return; 3966181641Skmacy base = trunc_page(va); 3967181641Skmacy offset = va & PAGE_MASK; 3968181641Skmacy size = roundup(offset + size, PAGE_SIZE); 3969181641Skmacy critical_enter(); 3970181641Skmacy for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 3971181641Skmacy pmap_kremove(tmpva); 3972181641Skmacy pmap_invalidate_range(kernel_pmap, va, tmpva); 3973181641Skmacy critical_exit(); 3974181641Skmacy kmem_free(kernel_map, base, size); 3975181641Skmacy} 3976181641Skmacy 3977195774Salc/* 3978195774Salc * Sets the memory attribute for the specified page. 3979195774Salc */ 3980195774Salcvoid 3981195774Salcpmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 3982195774Salc{ 3983195949Skib struct sysmaps *sysmaps; 3984195949Skib vm_offset_t sva, eva; 3985195774Salc 3986195774Salc m->md.pat_mode = ma; 3987195949Skib if ((m->flags & PG_FICTITIOUS) != 0) 3988195949Skib return; 3989195774Salc 3990195774Salc /* 3991195774Salc * If "m" is a normal page, flush it from the cache. 3992195949Skib * See pmap_invalidate_cache_range(). 3993195949Skib * 3994195949Skib * First, try to find an existing mapping of the page by sf 3995195949Skib * buffer. sf_buf_invalidate_cache() modifies mapping and 3996195949Skib * flushes the cache. 3997195774Salc */ 3998195949Skib if (sf_buf_invalidate_cache(m)) 3999195949Skib return; 4000195949Skib 4001195949Skib /* 4002195949Skib * If page is not mapped by sf buffer, but CPU does not 4003195949Skib * support self snoop, map the page transient and do 4004195949Skib * invalidation. In the worst case, whole cache is flushed by 4005195949Skib * pmap_invalidate_cache_range(). 4006195949Skib */ 4007195949Skib if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) { 4008195949Skib sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 4009195949Skib mtx_lock(&sysmaps->lock); 4010195949Skib if (*sysmaps->CMAP2) 4011195949Skib panic("pmap_page_set_memattr: CMAP2 busy"); 4012195949Skib sched_pin(); 4013195949Skib PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | 4014215587Scperciva VM_PAGE_TO_MACH(m) | PG_A | PG_M | 4015195949Skib pmap_cache_bits(m->md.pat_mode, 0)); 4016195949Skib invlcaddr(sysmaps->CADDR2); 4017195949Skib sva = (vm_offset_t)sysmaps->CADDR2; 4018195949Skib eva = sva + PAGE_SIZE; 4019195949Skib } else 4020195949Skib sva = eva = 0; /* gcc */ 4021195949Skib pmap_invalidate_cache_range(sva, eva); 4022195949Skib if (sva != 0) { 4023195949Skib PT_SET_MA(sysmaps->CADDR2, 0); 4024195949Skib sched_unpin(); 4025195949Skib mtx_unlock(&sysmaps->lock); 4026195774Salc } 4027195774Salc} 4028195774Salc 4029181641Skmacyint 4030181641Skmacypmap_change_attr(va, size, mode) 4031181641Skmacy vm_offset_t va; 4032181641Skmacy vm_size_t size; 4033181641Skmacy int mode; 4034181641Skmacy{ 4035181641Skmacy vm_offset_t base, offset, tmpva; 4036181641Skmacy pt_entry_t *pte; 4037181641Skmacy u_int opte, npte; 4038181641Skmacy pd_entry_t *pde; 4039195949Skib boolean_t changed; 4040181641Skmacy 4041181641Skmacy base = trunc_page(va); 4042181641Skmacy offset = va & PAGE_MASK; 4043181641Skmacy size = roundup(offset + size, PAGE_SIZE); 4044181641Skmacy 4045181641Skmacy /* Only supported on kernel virtual addresses. */ 4046181641Skmacy if (base <= VM_MAXUSER_ADDRESS) 4047181641Skmacy return (EINVAL); 4048181641Skmacy 4049181641Skmacy /* 4MB pages and pages that aren't mapped aren't supported. */ 4050181641Skmacy for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 4051181641Skmacy pde = pmap_pde(kernel_pmap, tmpva); 4052181641Skmacy if (*pde & PG_PS) 4053181641Skmacy return (EINVAL); 4054181641Skmacy if ((*pde & PG_V) == 0) 4055181641Skmacy return (EINVAL); 4056181641Skmacy pte = vtopte(va); 4057181641Skmacy if ((*pte & PG_V) == 0) 4058181641Skmacy return (EINVAL); 4059181641Skmacy } 4060181641Skmacy 4061195949Skib changed = FALSE; 4062195949Skib 4063181641Skmacy /* 4064181641Skmacy * Ok, all the pages exist and are 4k, so run through them updating 4065181641Skmacy * their cache mode. 4066181641Skmacy */ 4067181641Skmacy for (tmpva = base; size > 0; ) { 4068181641Skmacy pte = vtopte(tmpva); 4069181641Skmacy 4070181641Skmacy /* 4071181641Skmacy * The cache mode bits are all in the low 32-bits of the 4072181641Skmacy * PTE, so we can just spin on updating the low 32-bits. 4073181641Skmacy */ 4074181641Skmacy do { 4075181641Skmacy opte = *(u_int *)pte; 4076181641Skmacy npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 4077181641Skmacy npte |= pmap_cache_bits(mode, 0); 4078181641Skmacy PT_SET_VA_MA(pte, npte, TRUE); 4079181641Skmacy } while (npte != opte && (*pte != npte)); 4080195949Skib if (npte != opte) 4081195949Skib changed = TRUE; 4082181641Skmacy tmpva += PAGE_SIZE; 4083181641Skmacy size -= PAGE_SIZE; 4084181641Skmacy } 4085181641Skmacy 4086181641Skmacy /* 4087181641Skmacy * Flush CPU caches to make sure any data isn't cached that shouldn't 4088181641Skmacy * be, etc. 4089181641Skmacy */ 4090195949Skib if (changed) { 4091195949Skib pmap_invalidate_range(kernel_pmap, base, tmpva); 4092195949Skib pmap_invalidate_cache_range(base, tmpva); 4093195949Skib } 4094181641Skmacy return (0); 4095181641Skmacy} 4096181641Skmacy 4097181641Skmacy/* 4098181641Skmacy * perform the pmap work for mincore 4099181641Skmacy */ 4100181641Skmacyint 4101208504Salcpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4102181641Skmacy{ 4103181641Skmacy pt_entry_t *ptep, pte; 4104208504Salc vm_paddr_t pa; 4105208504Salc int val; 4106181641Skmacy 4107181641Skmacy PMAP_LOCK(pmap); 4108208504Salcretry: 4109181641Skmacy ptep = pmap_pte(pmap, addr); 4110181641Skmacy pte = (ptep != NULL) ? PT_GET(ptep) : 0; 4111181641Skmacy pmap_pte_release(ptep); 4112208504Salc val = 0; 4113208504Salc if ((pte & PG_V) != 0) { 4114208504Salc val |= MINCORE_INCORE; 4115208504Salc if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 4116208504Salc val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4117208504Salc if ((pte & PG_A) != 0) 4118208504Salc val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4119208504Salc } 4120208504Salc if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4121208504Salc (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 4122208504Salc (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 4123208504Salc pa = pte & PG_FRAME; 4124208504Salc /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4125208504Salc if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4126208504Salc goto retry; 4127208504Salc } else 4128208504Salc PA_UNLOCK_COND(*locked_pa); 4129181641Skmacy PMAP_UNLOCK(pmap); 4130208504Salc return (val); 4131181641Skmacy} 4132181641Skmacy 4133181641Skmacyvoid 4134181641Skmacypmap_activate(struct thread *td) 4135181641Skmacy{ 4136181641Skmacy pmap_t pmap, oldpmap; 4137223758Sattilio u_int cpuid; 4138181641Skmacy u_int32_t cr3; 4139181641Skmacy 4140181641Skmacy critical_enter(); 4141181641Skmacy pmap = vmspace_pmap(td->td_proc->p_vmspace); 4142181641Skmacy oldpmap = PCPU_GET(curpmap); 4143223758Sattilio cpuid = PCPU_GET(cpuid); 4144181641Skmacy#if defined(SMP) 4145223758Sattilio CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 4146223758Sattilio CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 4147181641Skmacy#else 4148223758Sattilio CPU_CLR(cpuid, &oldpmap->pm_active); 4149223758Sattilio CPU_SET(cpuid, &pmap->pm_active); 4150181641Skmacy#endif 4151181641Skmacy#ifdef PAE 4152181641Skmacy cr3 = vtophys(pmap->pm_pdpt); 4153181641Skmacy#else 4154181641Skmacy cr3 = vtophys(pmap->pm_pdir); 4155181641Skmacy#endif 4156181641Skmacy /* 4157181641Skmacy * pmap_activate is for the current thread on the current cpu 4158181641Skmacy */ 4159181641Skmacy td->td_pcb->pcb_cr3 = cr3; 4160181641Skmacy PT_UPDATES_FLUSH(); 4161181641Skmacy load_cr3(cr3); 4162181641Skmacy PCPU_SET(curpmap, pmap); 4163181641Skmacy critical_exit(); 4164181641Skmacy} 4165181641Skmacy 4166198341Smarcelvoid 4167198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4168198341Smarcel{ 4169198341Smarcel} 4170198341Smarcel 4171181747Skmacy/* 4172181747Skmacy * Increase the starting virtual address of the given mapping if a 4173181747Skmacy * different alignment might result in more superpage mappings. 4174181747Skmacy */ 4175181747Skmacyvoid 4176181747Skmacypmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4177181747Skmacy vm_offset_t *addr, vm_size_t size) 4178181641Skmacy{ 4179181747Skmacy vm_offset_t superpage_offset; 4180181641Skmacy 4181181747Skmacy if (size < NBPDR) 4182181747Skmacy return; 4183181747Skmacy if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4184181747Skmacy offset += ptoa(object->pg_color); 4185181747Skmacy superpage_offset = offset & PDRMASK; 4186181747Skmacy if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4187181747Skmacy (*addr & PDRMASK) == superpage_offset) 4188181747Skmacy return; 4189181747Skmacy if ((*addr & PDRMASK) < superpage_offset) 4190181747Skmacy *addr = (*addr & ~PDRMASK) + superpage_offset; 4191181747Skmacy else 4192181747Skmacy *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4193181641Skmacy} 4194181641Skmacy 4195190627Sdfrvoid 4196190627Sdfrpmap_suspend() 4197190627Sdfr{ 4198190627Sdfr pmap_t pmap; 4199190627Sdfr int i, pdir, offset; 4200190627Sdfr vm_paddr_t pdirma; 4201190627Sdfr mmu_update_t mu[4]; 4202190627Sdfr 4203190627Sdfr /* 4204190627Sdfr * We need to remove the recursive mapping structure from all 4205190627Sdfr * our pmaps so that Xen doesn't get confused when it restores 4206190627Sdfr * the page tables. The recursive map lives at page directory 4207190627Sdfr * index PTDPTDI. We assume that the suspend code has stopped 4208190627Sdfr * the other vcpus (if any). 4209190627Sdfr */ 4210190627Sdfr LIST_FOREACH(pmap, &allpmaps, pm_list) { 4211190627Sdfr for (i = 0; i < 4; i++) { 4212190627Sdfr /* 4213190627Sdfr * Figure out which page directory (L2) page 4214190627Sdfr * contains this bit of the recursive map and 4215190627Sdfr * the offset within that page of the map 4216190627Sdfr * entry 4217190627Sdfr */ 4218190627Sdfr pdir = (PTDPTDI + i) / NPDEPG; 4219190627Sdfr offset = (PTDPTDI + i) % NPDEPG; 4220190627Sdfr pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4221190627Sdfr mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4222190627Sdfr mu[i].val = 0; 4223190627Sdfr } 4224190627Sdfr HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4225190627Sdfr } 4226190627Sdfr} 4227190627Sdfr 4228190627Sdfrvoid 4229190627Sdfrpmap_resume() 4230190627Sdfr{ 4231190627Sdfr pmap_t pmap; 4232190627Sdfr int i, pdir, offset; 4233190627Sdfr vm_paddr_t pdirma; 4234190627Sdfr mmu_update_t mu[4]; 4235190627Sdfr 4236190627Sdfr /* 4237190627Sdfr * Restore the recursive map that we removed on suspend. 4238190627Sdfr */ 4239190627Sdfr LIST_FOREACH(pmap, &allpmaps, pm_list) { 4240190627Sdfr for (i = 0; i < 4; i++) { 4241190627Sdfr /* 4242190627Sdfr * Figure out which page directory (L2) page 4243190627Sdfr * contains this bit of the recursive map and 4244190627Sdfr * the offset within that page of the map 4245190627Sdfr * entry 4246190627Sdfr */ 4247190627Sdfr pdir = (PTDPTDI + i) / NPDEPG; 4248190627Sdfr offset = (PTDPTDI + i) % NPDEPG; 4249190627Sdfr pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4250190627Sdfr mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4251190627Sdfr mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4252190627Sdfr } 4253190627Sdfr HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4254190627Sdfr } 4255190627Sdfr} 4256190627Sdfr 4257181641Skmacy#if defined(PMAP_DEBUG) 4258181641Skmacypmap_pid_dump(int pid) 4259181641Skmacy{ 4260181641Skmacy pmap_t pmap; 4261181641Skmacy struct proc *p; 4262181641Skmacy int npte = 0; 4263181641Skmacy int index; 4264181641Skmacy 4265181641Skmacy sx_slock(&allproc_lock); 4266181641Skmacy FOREACH_PROC_IN_SYSTEM(p) { 4267181641Skmacy if (p->p_pid != pid) 4268181641Skmacy continue; 4269181641Skmacy 4270181641Skmacy if (p->p_vmspace) { 4271181641Skmacy int i,j; 4272181641Skmacy index = 0; 4273181641Skmacy pmap = vmspace_pmap(p->p_vmspace); 4274181641Skmacy for (i = 0; i < NPDEPTD; i++) { 4275181641Skmacy pd_entry_t *pde; 4276181641Skmacy pt_entry_t *pte; 4277181641Skmacy vm_offset_t base = i << PDRSHIFT; 4278181641Skmacy 4279181641Skmacy pde = &pmap->pm_pdir[i]; 4280181641Skmacy if (pde && pmap_pde_v(pde)) { 4281181641Skmacy for (j = 0; j < NPTEPG; j++) { 4282181641Skmacy vm_offset_t va = base + (j << PAGE_SHIFT); 4283181641Skmacy if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4284181641Skmacy if (index) { 4285181641Skmacy index = 0; 4286181641Skmacy printf("\n"); 4287181641Skmacy } 4288181641Skmacy sx_sunlock(&allproc_lock); 4289181641Skmacy return npte; 4290181641Skmacy } 4291181641Skmacy pte = pmap_pte(pmap, va); 4292181641Skmacy if (pte && pmap_pte_v(pte)) { 4293181641Skmacy pt_entry_t pa; 4294181641Skmacy vm_page_t m; 4295181641Skmacy pa = PT_GET(pte); 4296181641Skmacy m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4297181641Skmacy printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4298181641Skmacy va, pa, m->hold_count, m->wire_count, m->flags); 4299181641Skmacy npte++; 4300181641Skmacy index++; 4301181641Skmacy if (index >= 2) { 4302181641Skmacy index = 0; 4303181641Skmacy printf("\n"); 4304181641Skmacy } else { 4305181641Skmacy printf(" "); 4306181641Skmacy } 4307181641Skmacy } 4308181641Skmacy } 4309181641Skmacy } 4310181641Skmacy } 4311181641Skmacy } 4312181641Skmacy } 4313181641Skmacy sx_sunlock(&allproc_lock); 4314181641Skmacy return npte; 4315181641Skmacy} 4316181641Skmacy#endif 4317181641Skmacy 4318181641Skmacy#if defined(DEBUG) 4319181641Skmacy 4320181641Skmacystatic void pads(pmap_t pm); 4321181641Skmacyvoid pmap_pvdump(vm_paddr_t pa); 4322181641Skmacy 4323181641Skmacy/* print address space of pmap*/ 4324181641Skmacystatic void 4325181641Skmacypads(pmap_t pm) 4326181641Skmacy{ 4327181641Skmacy int i, j; 4328181641Skmacy vm_paddr_t va; 4329181641Skmacy pt_entry_t *ptep; 4330181641Skmacy 4331181641Skmacy if (pm == kernel_pmap) 4332181641Skmacy return; 4333181641Skmacy for (i = 0; i < NPDEPTD; i++) 4334181641Skmacy if (pm->pm_pdir[i]) 4335181641Skmacy for (j = 0; j < NPTEPG; j++) { 4336181641Skmacy va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4337181641Skmacy if (pm == kernel_pmap && va < KERNBASE) 4338181641Skmacy continue; 4339181641Skmacy if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4340181641Skmacy continue; 4341181641Skmacy ptep = pmap_pte(pm, va); 4342181641Skmacy if (pmap_pte_v(ptep)) 4343181641Skmacy printf("%x:%x ", va, *ptep); 4344181641Skmacy }; 4345181641Skmacy 4346181641Skmacy} 4347181641Skmacy 4348181641Skmacyvoid 4349181641Skmacypmap_pvdump(vm_paddr_t pa) 4350181641Skmacy{ 4351181641Skmacy pv_entry_t pv; 4352181641Skmacy pmap_t pmap; 4353181641Skmacy vm_page_t m; 4354181641Skmacy 4355181641Skmacy printf("pa %x", pa); 4356181641Skmacy m = PHYS_TO_VM_PAGE(pa); 4357181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4358181641Skmacy pmap = PV_PMAP(pv); 4359181641Skmacy printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4360181641Skmacy pads(pmap); 4361181641Skmacy } 4362181641Skmacy printf(" "); 4363181641Skmacy} 4364181641Skmacy#endif 4365