pmap.c revision 195649
1181641Skmacy/*- 2181641Skmacy * Copyright (c) 1991 Regents of the University of California. 3181641Skmacy * All rights reserved. 4181641Skmacy * Copyright (c) 1994 John S. Dyson 5181641Skmacy * All rights reserved. 6181641Skmacy * Copyright (c) 1994 David Greenman 7181641Skmacy * All rights reserved. 8181641Skmacy * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu> 9181641Skmacy * All rights reserved. 10181641Skmacy * 11181641Skmacy * This code is derived from software contributed to Berkeley by 12181641Skmacy * the Systems Programming Group of the University of Utah Computer 13181641Skmacy * Science Department and William Jolitz of UUNET Technologies Inc. 14181641Skmacy * 15181641Skmacy * Redistribution and use in source and binary forms, with or without 16181641Skmacy * modification, are permitted provided that the following conditions 17181641Skmacy * are met: 18181641Skmacy * 1. Redistributions of source code must retain the above copyright 19181641Skmacy * notice, this list of conditions and the following disclaimer. 20181641Skmacy * 2. Redistributions in binary form must reproduce the above copyright 21181641Skmacy * notice, this list of conditions and the following disclaimer in the 22181641Skmacy * documentation and/or other materials provided with the distribution. 23181641Skmacy * 3. All advertising materials mentioning features or use of this software 24181641Skmacy * must display the following acknowledgement: 25181641Skmacy * This product includes software developed by the University of 26181641Skmacy * California, Berkeley and its contributors. 27181641Skmacy * 4. Neither the name of the University nor the names of its contributors 28181641Skmacy * may be used to endorse or promote products derived from this software 29181641Skmacy * without specific prior written permission. 30181641Skmacy * 31181641Skmacy * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32181641Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33181641Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34181641Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35181641Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36181641Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37181641Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38181641Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39181641Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40181641Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41181641Skmacy * SUCH DAMAGE. 42181641Skmacy * 43181641Skmacy * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44181641Skmacy */ 45181641Skmacy/*- 46181641Skmacy * Copyright (c) 2003 Networks Associates Technology, Inc. 47181641Skmacy * All rights reserved. 48181641Skmacy * 49181641Skmacy * This software was developed for the FreeBSD Project by Jake Burkholder, 50181641Skmacy * Safeport Network Services, and Network Associates Laboratories, the 51181641Skmacy * Security Research Division of Network Associates, Inc. under 52181641Skmacy * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 53181641Skmacy * CHATS research program. 54181641Skmacy * 55181641Skmacy * Redistribution and use in source and binary forms, with or without 56181641Skmacy * modification, are permitted provided that the following conditions 57181641Skmacy * are met: 58181641Skmacy * 1. Redistributions of source code must retain the above copyright 59181641Skmacy * notice, this list of conditions and the following disclaimer. 60181641Skmacy * 2. Redistributions in binary form must reproduce the above copyright 61181641Skmacy * notice, this list of conditions and the following disclaimer in the 62181641Skmacy * documentation and/or other materials provided with the distribution. 63181641Skmacy * 64181641Skmacy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 65181641Skmacy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 66181641Skmacy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 67181641Skmacy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 68181641Skmacy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 69181641Skmacy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 70181641Skmacy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 71181641Skmacy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 72181641Skmacy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 73181641Skmacy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 74181641Skmacy * SUCH DAMAGE. 75181641Skmacy */ 76181641Skmacy 77181641Skmacy#include <sys/cdefs.h> 78181641Skmacy__FBSDID("$FreeBSD: head/sys/i386/xen/pmap.c 195649 2009-07-12 23:31:20Z alc $"); 79181641Skmacy 80181641Skmacy/* 81181641Skmacy * Manages physical address maps. 82181641Skmacy * 83181641Skmacy * In addition to hardware address maps, this 84181641Skmacy * module is called upon to provide software-use-only 85181641Skmacy * maps which may or may not be stored in the same 86181641Skmacy * form as hardware maps. These pseudo-maps are 87181641Skmacy * used to store intermediate results from copy 88181641Skmacy * operations to and from address spaces. 89181641Skmacy * 90181641Skmacy * Since the information managed by this module is 91181641Skmacy * also stored by the logical address mapping module, 92181641Skmacy * this module may throw away valid virtual-to-physical 93181641Skmacy * mappings at almost any time. However, invalidations 94181641Skmacy * of virtual-to-physical mappings must be done as 95181641Skmacy * requested. 96181641Skmacy * 97181641Skmacy * In order to cope with hardware architectures which 98181641Skmacy * make virtual-to-physical map invalidates expensive, 99181641Skmacy * this module may delay invalidate or reduced protection 100181641Skmacy * operations until such time as they are actually 101181641Skmacy * necessary. This module is given full information as 102181641Skmacy * to which processors are currently using which maps, 103181641Skmacy * and to when physical maps must be made correct. 104181641Skmacy */ 105181641Skmacy 106181641Skmacy#define PMAP_DIAGNOSTIC 107181641Skmacy 108181641Skmacy#include "opt_cpu.h" 109181641Skmacy#include "opt_pmap.h" 110181641Skmacy#include "opt_msgbuf.h" 111181641Skmacy#include "opt_smp.h" 112181641Skmacy#include "opt_xbox.h" 113181641Skmacy 114181641Skmacy#include <sys/param.h> 115181641Skmacy#include <sys/systm.h> 116181641Skmacy#include <sys/kernel.h> 117181641Skmacy#include <sys/ktr.h> 118181641Skmacy#include <sys/lock.h> 119181641Skmacy#include <sys/malloc.h> 120181641Skmacy#include <sys/mman.h> 121181641Skmacy#include <sys/msgbuf.h> 122181641Skmacy#include <sys/mutex.h> 123181641Skmacy#include <sys/proc.h> 124181641Skmacy#include <sys/sx.h> 125181641Skmacy#include <sys/vmmeter.h> 126181641Skmacy#include <sys/sched.h> 127181641Skmacy#include <sys/sysctl.h> 128181641Skmacy#ifdef SMP 129181641Skmacy#include <sys/smp.h> 130181641Skmacy#endif 131181641Skmacy 132181641Skmacy#include <vm/vm.h> 133181641Skmacy#include <vm/vm_param.h> 134181641Skmacy#include <vm/vm_kern.h> 135181641Skmacy#include <vm/vm_page.h> 136181641Skmacy#include <vm/vm_map.h> 137181641Skmacy#include <vm/vm_object.h> 138181641Skmacy#include <vm/vm_extern.h> 139181641Skmacy#include <vm/vm_pageout.h> 140181641Skmacy#include <vm/vm_pager.h> 141181641Skmacy#include <vm/uma.h> 142181641Skmacy 143181641Skmacy#include <machine/cpu.h> 144181641Skmacy#include <machine/cputypes.h> 145181641Skmacy#include <machine/md_var.h> 146181641Skmacy#include <machine/pcb.h> 147181641Skmacy#include <machine/specialreg.h> 148181641Skmacy#ifdef SMP 149181641Skmacy#include <machine/smp.h> 150181641Skmacy#endif 151181641Skmacy 152181641Skmacy#ifdef XBOX 153181641Skmacy#include <machine/xbox.h> 154181641Skmacy#endif 155181641Skmacy 156181641Skmacy#include <xen/interface/xen.h> 157186557Skmacy#include <xen/hypervisor.h> 158181641Skmacy#include <machine/xen/hypercall.h> 159181641Skmacy#include <machine/xen/xenvar.h> 160181641Skmacy#include <machine/xen/xenfunc.h> 161181641Skmacy 162181641Skmacy#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 163181641Skmacy#define CPU_ENABLE_SSE 164181641Skmacy#endif 165181641Skmacy 166181641Skmacy#ifndef PMAP_SHPGPERPROC 167181641Skmacy#define PMAP_SHPGPERPROC 200 168181641Skmacy#endif 169181641Skmacy 170181641Skmacy#if defined(DIAGNOSTIC) 171181641Skmacy#define PMAP_DIAGNOSTIC 172181641Skmacy#endif 173181641Skmacy 174181641Skmacy#if !defined(PMAP_DIAGNOSTIC) 175193734Sed#define PMAP_INLINE __gnu89_inline 176181641Skmacy#else 177181641Skmacy#define PMAP_INLINE 178181641Skmacy#endif 179181641Skmacy 180181641Skmacy#define PV_STATS 181181641Skmacy#ifdef PV_STATS 182181641Skmacy#define PV_STAT(x) do { x ; } while (0) 183181641Skmacy#else 184181641Skmacy#define PV_STAT(x) do { } while (0) 185181641Skmacy#endif 186181641Skmacy 187181747Skmacy#define pa_index(pa) ((pa) >> PDRSHIFT) 188181747Skmacy#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 189181747Skmacy 190181641Skmacy/* 191181641Skmacy * Get PDEs and PTEs for user/kernel address space 192181641Skmacy */ 193181641Skmacy#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) 194181641Skmacy#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) 195181641Skmacy 196181641Skmacy#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 197181641Skmacy#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 198181641Skmacy#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 199181641Skmacy#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) 200181641Skmacy#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 201181641Skmacy 202181641Skmacy#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 203181641Skmacy 204181641Skmacystruct pmap kernel_pmap_store; 205181641SkmacyLIST_HEAD(pmaplist, pmap); 206181641Skmacystatic struct pmaplist allpmaps; 207181641Skmacystatic struct mtx allpmaps_lock; 208181641Skmacy 209181641Skmacyvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 210181641Skmacyvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 211181641Skmacyint pgeflag = 0; /* PG_G or-in */ 212181641Skmacyint pseflag = 0; /* PG_PS or-in */ 213181641Skmacy 214182902Skmacyint nkpt; 215181641Skmacyvm_offset_t kernel_vm_end; 216181641Skmacyextern u_int32_t KERNend; 217181641Skmacy 218181641Skmacy#ifdef PAE 219181641Skmacypt_entry_t pg_nx; 220181641Skmacy#if !defined(XEN) 221181641Skmacystatic uma_zone_t pdptzone; 222181641Skmacy#endif 223181641Skmacy#endif 224181641Skmacy 225181641Skmacy/* 226181641Skmacy * Data for the pv entry allocation mechanism 227181641Skmacy */ 228181641Skmacystatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 229181747Skmacystatic struct md_page *pv_table; 230181641Skmacystatic int shpgperproc = PMAP_SHPGPERPROC; 231181641Skmacy 232181641Skmacystruct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 233181641Skmacyint pv_maxchunks; /* How many chunks we have KVA for */ 234181641Skmacyvm_offset_t pv_vafree; /* freelist stored in the PTE */ 235181641Skmacy 236181641Skmacy/* 237181641Skmacy * All those kernel PT submaps that BSD is so fond of 238181641Skmacy */ 239181641Skmacystruct sysmaps { 240181641Skmacy struct mtx lock; 241181641Skmacy pt_entry_t *CMAP1; 242181641Skmacy pt_entry_t *CMAP2; 243181641Skmacy caddr_t CADDR1; 244181641Skmacy caddr_t CADDR2; 245181641Skmacy}; 246181641Skmacystatic struct sysmaps sysmaps_pcpu[MAXCPU]; 247181641Skmacypt_entry_t *CMAP1 = 0; 248181641Skmacystatic pt_entry_t *CMAP3; 249181641Skmacycaddr_t CADDR1 = 0, ptvmmap = 0; 250181641Skmacystatic caddr_t CADDR3; 251181641Skmacystruct msgbuf *msgbufp = 0; 252181641Skmacy 253181641Skmacy/* 254181641Skmacy * Crashdump maps. 255181641Skmacy */ 256181641Skmacystatic caddr_t crashdumpmap; 257181641Skmacy 258181641Skmacystatic pt_entry_t *PMAP1 = 0, *PMAP2; 259181641Skmacystatic pt_entry_t *PADDR1 = 0, *PADDR2; 260181641Skmacy#ifdef SMP 261181641Skmacystatic int PMAP1cpu; 262181641Skmacystatic int PMAP1changedcpu; 263181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 264181641Skmacy &PMAP1changedcpu, 0, 265181641Skmacy "Number of times pmap_pte_quick changed CPU with same PMAP1"); 266181641Skmacy#endif 267181641Skmacystatic int PMAP1changed; 268181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 269181641Skmacy &PMAP1changed, 0, 270181641Skmacy "Number of times pmap_pte_quick changed PMAP1"); 271181641Skmacystatic int PMAP1unchanged; 272181641SkmacySYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 273181641Skmacy &PMAP1unchanged, 0, 274181641Skmacy "Number of times pmap_pte_quick didn't change PMAP1"); 275181641Skmacystatic struct mtx PMAP2mutex; 276181641Skmacy 277181747SkmacySYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 278181747Skmacystatic int pg_ps_enabled; 279181747SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0, 280181747Skmacy "Are large page mappings enabled?"); 281181747Skmacy 282181747SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 283181747Skmacy "Max number of PV entries"); 284181747SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 285181747Skmacy "Page share factor per proc"); 286181747Skmacy 287181641Skmacystatic void free_pv_entry(pmap_t pmap, pv_entry_t pv); 288181641Skmacystatic pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); 289181641Skmacy 290181641Skmacystatic vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va, 291181641Skmacy vm_page_t m, vm_prot_t prot, vm_page_t mpte); 292181641Skmacystatic int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 293181641Skmacy vm_page_t *free); 294181641Skmacystatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va, 295181641Skmacy vm_page_t *free); 296181641Skmacystatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, 297181641Skmacy vm_offset_t va); 298181641Skmacystatic void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); 299181641Skmacystatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, 300181641Skmacy vm_page_t m); 301181641Skmacy 302181641Skmacystatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 303181641Skmacy 304181641Skmacystatic vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 305181641Skmacystatic int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free); 306181641Skmacystatic pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); 307181641Skmacystatic void pmap_pte_release(pt_entry_t *pte); 308181641Skmacystatic int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *); 309181641Skmacystatic vm_offset_t pmap_kmem_choose(vm_offset_t addr); 310181641Skmacystatic boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr); 311181747Skmacystatic void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); 312181641Skmacy 313181747Skmacy 314181641Skmacy#if defined(PAE) && !defined(XEN) 315181641Skmacystatic void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 316181641Skmacy#endif 317181641Skmacy 318181641SkmacyCTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 319181641SkmacyCTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 320181641Skmacy 321181641Skmacy/* 322181641Skmacy * If you get an error here, then you set KVA_PAGES wrong! See the 323181641Skmacy * description of KVA_PAGES in sys/i386/include/pmap.h. It must be 324181641Skmacy * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE. 325181641Skmacy */ 326181641SkmacyCTASSERT(KERNBASE % (1 << 24) == 0); 327181641Skmacy 328181641Skmacy 329181641Skmacy 330181641Skmacystatic __inline void 331181641Skmacypagezero(void *page) 332181641Skmacy{ 333181641Skmacy#if defined(I686_CPU) 334181641Skmacy if (cpu_class == CPUCLASS_686) { 335181641Skmacy#if defined(CPU_ENABLE_SSE) 336181641Skmacy if (cpu_feature & CPUID_SSE2) 337181641Skmacy sse2_pagezero(page); 338181641Skmacy else 339181641Skmacy#endif 340181641Skmacy i686_pagezero(page); 341181641Skmacy } else 342181641Skmacy#endif 343181641Skmacy bzero(page, PAGE_SIZE); 344181641Skmacy} 345181641Skmacy 346181641Skmacyvoid 347181641Skmacypd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type) 348181641Skmacy{ 349181641Skmacy vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]); 350181641Skmacy 351181641Skmacy switch (type) { 352181641Skmacy case SH_PD_SET_VA: 353181641Skmacy#if 0 354181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 355181641Skmacy xpmap_ptom(val & ~(PG_RW))); 356181641Skmacy#endif 357181641Skmacy xen_queue_pt_update(pdir_ma, 358181641Skmacy xpmap_ptom(val)); 359181641Skmacy break; 360181641Skmacy case SH_PD_SET_VA_MA: 361181641Skmacy#if 0 362181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 363181641Skmacy val & ~(PG_RW)); 364181641Skmacy#endif 365181641Skmacy xen_queue_pt_update(pdir_ma, val); 366181641Skmacy break; 367181641Skmacy case SH_PD_SET_VA_CLEAR: 368181641Skmacy#if 0 369181641Skmacy xen_queue_pt_update(shadow_pdir_ma, 0); 370181641Skmacy#endif 371181641Skmacy xen_queue_pt_update(pdir_ma, 0); 372181641Skmacy break; 373181641Skmacy } 374181641Skmacy} 375181641Skmacy 376181641Skmacy/* 377181641Skmacy * Move the kernel virtual free pointer to the next 378181641Skmacy * 4MB. This is used to help improve performance 379181641Skmacy * by using a large (4MB) page for much of the kernel 380181641Skmacy * (.text, .data, .bss) 381181641Skmacy */ 382181641Skmacystatic vm_offset_t 383181641Skmacypmap_kmem_choose(vm_offset_t addr) 384181641Skmacy{ 385181641Skmacy vm_offset_t newaddr = addr; 386181641Skmacy 387181641Skmacy#ifndef DISABLE_PSE 388181641Skmacy if (cpu_feature & CPUID_PSE) 389181641Skmacy newaddr = (addr + PDRMASK) & ~PDRMASK; 390181641Skmacy#endif 391181641Skmacy return newaddr; 392181641Skmacy} 393181641Skmacy 394181641Skmacy/* 395181641Skmacy * Bootstrap the system enough to run with virtual memory. 396181641Skmacy * 397181641Skmacy * On the i386 this is called after mapping has already been enabled 398181641Skmacy * and just syncs the pmap module with what has already been done. 399181641Skmacy * [We can't call it easily with mapping off since the kernel is not 400181641Skmacy * mapped with PA == VA, hence we would have to relocate every address 401181641Skmacy * from the linked base (virtual) address "KERNBASE" to the actual 402181641Skmacy * (physical) address starting relative to 0] 403181641Skmacy */ 404181641Skmacyvoid 405181641Skmacypmap_bootstrap(vm_paddr_t firstaddr) 406181641Skmacy{ 407181641Skmacy vm_offset_t va; 408181641Skmacy pt_entry_t *pte, *unused; 409181641Skmacy struct sysmaps *sysmaps; 410181641Skmacy int i; 411181641Skmacy 412181641Skmacy /* 413181641Skmacy * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too 414181641Skmacy * large. It should instead be correctly calculated in locore.s and 415181641Skmacy * not based on 'first' (which is a physical address, not a virtual 416181641Skmacy * address, for the start of unused physical memory). The kernel 417181641Skmacy * page tables are NOT double mapped and thus should not be included 418181641Skmacy * in this calculation. 419181641Skmacy */ 420181641Skmacy virtual_avail = (vm_offset_t) KERNBASE + firstaddr; 421181641Skmacy virtual_avail = pmap_kmem_choose(virtual_avail); 422181641Skmacy 423181641Skmacy virtual_end = VM_MAX_KERNEL_ADDRESS; 424181641Skmacy 425181641Skmacy /* 426181641Skmacy * Initialize the kernel pmap (which is statically allocated). 427181641Skmacy */ 428181641Skmacy PMAP_LOCK_INIT(kernel_pmap); 429181641Skmacy kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); 430181641Skmacy#ifdef PAE 431181641Skmacy kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 432181641Skmacy#endif 433181641Skmacy kernel_pmap->pm_active = -1; /* don't allow deactivation */ 434181641Skmacy TAILQ_INIT(&kernel_pmap->pm_pvchunk); 435181641Skmacy LIST_INIT(&allpmaps); 436181641Skmacy mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 437181641Skmacy mtx_lock_spin(&allpmaps_lock); 438181641Skmacy LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 439181641Skmacy mtx_unlock_spin(&allpmaps_lock); 440183342Skmacy if (nkpt == 0) 441183342Skmacy nkpt = NKPT; 442181641Skmacy 443181641Skmacy /* 444181641Skmacy * Reserve some special page table entries/VA space for temporary 445181641Skmacy * mapping of pages. 446181641Skmacy */ 447181641Skmacy#define SYSMAP(c, p, v, n) \ 448181641Skmacy v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 449181641Skmacy 450181641Skmacy va = virtual_avail; 451181641Skmacy pte = vtopte(va); 452181641Skmacy 453181641Skmacy /* 454181641Skmacy * CMAP1/CMAP2 are used for zeroing and copying pages. 455181641Skmacy * CMAP3 is used for the idle process page zeroing. 456181641Skmacy */ 457181641Skmacy for (i = 0; i < MAXCPU; i++) { 458181641Skmacy sysmaps = &sysmaps_pcpu[i]; 459181641Skmacy mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF); 460181641Skmacy SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) 461181641Skmacy SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) 462181641Skmacy } 463181641Skmacy SYSMAP(caddr_t, CMAP1, CADDR1, 1) 464181641Skmacy SYSMAP(caddr_t, CMAP3, CADDR3, 1) 465181641Skmacy PT_SET_MA(CADDR3, 0); 466181641Skmacy 467181641Skmacy /* 468181641Skmacy * Crashdump maps. 469181641Skmacy */ 470181641Skmacy SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) 471181641Skmacy 472181641Skmacy /* 473181641Skmacy * ptvmmap is used for reading arbitrary physical pages via /dev/mem. 474181641Skmacy */ 475181641Skmacy SYSMAP(caddr_t, unused, ptvmmap, 1) 476181641Skmacy 477181641Skmacy /* 478181641Skmacy * msgbufp is used to map the system message buffer. 479181641Skmacy */ 480181641Skmacy SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE))) 481181641Skmacy 482181641Skmacy /* 483181641Skmacy * ptemap is used for pmap_pte_quick 484181641Skmacy */ 485181641Skmacy SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1); 486181641Skmacy SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1); 487181641Skmacy 488181641Skmacy mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 489181641Skmacy 490181641Skmacy virtual_avail = va; 491181641Skmacy PT_SET_MA(CADDR1, 0); 492181641Skmacy 493181641Skmacy /* 494181641Skmacy * Leave in place an identity mapping (virt == phys) for the low 1 MB 495181641Skmacy * physical memory region that is used by the ACPI wakeup code. This 496181641Skmacy * mapping must not have PG_G set. 497181641Skmacy */ 498181641Skmacy#ifndef XEN 499181641Skmacy /* 500181641Skmacy * leave here deliberately to show that this is not supported 501181641Skmacy */ 502181641Skmacy#ifdef XBOX 503181641Skmacy /* FIXME: This is gross, but needed for the XBOX. Since we are in such 504181641Skmacy * an early stadium, we cannot yet neatly map video memory ... :-( 505181641Skmacy * Better fixes are very welcome! */ 506181641Skmacy if (!arch_i386_is_xbox) 507181641Skmacy#endif 508181641Skmacy for (i = 1; i < NKPT; i++) 509181641Skmacy PTD[i] = 0; 510181641Skmacy 511181641Skmacy /* Initialize the PAT MSR if present. */ 512181641Skmacy pmap_init_pat(); 513181641Skmacy 514181641Skmacy /* Turn on PG_G on kernel page(s) */ 515181641Skmacy pmap_set_pg(); 516181641Skmacy#endif 517181641Skmacy} 518181641Skmacy 519181641Skmacy/* 520181641Skmacy * Setup the PAT MSR. 521181641Skmacy */ 522181641Skmacyvoid 523181641Skmacypmap_init_pat(void) 524181641Skmacy{ 525181641Skmacy uint64_t pat_msr; 526181641Skmacy 527181641Skmacy /* Bail if this CPU doesn't implement PAT. */ 528181641Skmacy if (!(cpu_feature & CPUID_PAT)) 529181641Skmacy return; 530181641Skmacy 531181641Skmacy#ifdef PAT_WORKS 532181641Skmacy /* 533181641Skmacy * Leave the indices 0-3 at the default of WB, WT, UC, and UC-. 534181641Skmacy * Program 4 and 5 as WP and WC. 535181641Skmacy * Leave 6 and 7 as UC and UC-. 536181641Skmacy */ 537181641Skmacy pat_msr = rdmsr(MSR_PAT); 538181641Skmacy pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5)); 539181641Skmacy pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) | 540181641Skmacy PAT_VALUE(5, PAT_WRITE_COMBINING); 541181641Skmacy#else 542181641Skmacy /* 543181641Skmacy * Due to some Intel errata, we can only safely use the lower 4 544181641Skmacy * PAT entries. Thus, just replace PAT Index 2 with WC instead 545181641Skmacy * of UC-. 546181641Skmacy * 547181641Skmacy * Intel Pentium III Processor Specification Update 548181641Skmacy * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B 549181641Skmacy * or Mode C Paging) 550181641Skmacy * 551181641Skmacy * Intel Pentium IV Processor Specification Update 552181641Skmacy * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) 553181641Skmacy */ 554181641Skmacy pat_msr = rdmsr(MSR_PAT); 555181641Skmacy pat_msr &= ~PAT_MASK(2); 556181641Skmacy pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); 557181641Skmacy#endif 558181641Skmacy wrmsr(MSR_PAT, pat_msr); 559181641Skmacy} 560181641Skmacy 561181641Skmacy/* 562181641Skmacy * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on. 563181641Skmacy */ 564181641Skmacyvoid 565181641Skmacypmap_set_pg(void) 566181641Skmacy{ 567181641Skmacy pd_entry_t pdir; 568181641Skmacy pt_entry_t *pte; 569181641Skmacy vm_offset_t va, endva; 570181641Skmacy int i; 571181641Skmacy 572181641Skmacy if (pgeflag == 0) 573181641Skmacy return; 574181641Skmacy 575181641Skmacy i = KERNLOAD/NBPDR; 576181641Skmacy endva = KERNBASE + KERNend; 577181641Skmacy 578181641Skmacy if (pseflag) { 579181641Skmacy va = KERNBASE + KERNLOAD; 580181641Skmacy while (va < endva) { 581181641Skmacy pdir = kernel_pmap->pm_pdir[KPTDI+i]; 582181641Skmacy pdir |= pgeflag; 583181641Skmacy kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir; 584181641Skmacy invltlb(); /* Play it safe, invltlb() every time */ 585181641Skmacy i++; 586181641Skmacy va += NBPDR; 587181641Skmacy } 588181641Skmacy } else { 589181641Skmacy va = (vm_offset_t)btext; 590181641Skmacy while (va < endva) { 591181641Skmacy pte = vtopte(va); 592181641Skmacy if (*pte & PG_V) 593181641Skmacy *pte |= pgeflag; 594181641Skmacy invltlb(); /* Play it safe, invltlb() every time */ 595181641Skmacy va += PAGE_SIZE; 596181641Skmacy } 597181641Skmacy } 598181641Skmacy} 599181641Skmacy 600181641Skmacy/* 601181641Skmacy * Initialize a vm_page's machine-dependent fields. 602181641Skmacy */ 603181641Skmacyvoid 604181641Skmacypmap_page_init(vm_page_t m) 605181641Skmacy{ 606181641Skmacy 607181641Skmacy TAILQ_INIT(&m->md.pv_list); 608195649Salc m->md.pat_mode = PAT_WRITE_BACK; 609181641Skmacy} 610181641Skmacy 611181641Skmacy#if defined(PAE) && !defined(XEN) 612181641Skmacystatic void * 613181641Skmacypmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 614181641Skmacy{ 615195385Salc 616195385Salc /* Inform UMA that this allocator uses kernel_map/object. */ 617195385Salc *flags = UMA_SLAB_KERNEL; 618195385Salc return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL, 619195649Salc 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); 620181641Skmacy} 621181641Skmacy#endif 622181641Skmacy 623181641Skmacy/* 624181641Skmacy * ABuse the pte nodes for unmapped kva to thread a kva freelist through. 625181641Skmacy * Requirements: 626181641Skmacy * - Must deal with pages in order to ensure that none of the PG_* bits 627181641Skmacy * are ever set, PG_V in particular. 628181641Skmacy * - Assumes we can write to ptes without pte_store() atomic ops, even 629181641Skmacy * on PAE systems. This should be ok. 630181641Skmacy * - Assumes nothing will ever test these addresses for 0 to indicate 631181641Skmacy * no mapping instead of correctly checking PG_V. 632181641Skmacy * - Assumes a vm_offset_t will fit in a pte (true for i386). 633181641Skmacy * Because PG_V is never set, there can be no mappings to invalidate. 634181641Skmacy */ 635181641Skmacystatic int ptelist_count = 0; 636181641Skmacystatic vm_offset_t 637181641Skmacypmap_ptelist_alloc(vm_offset_t *head) 638181641Skmacy{ 639181641Skmacy vm_offset_t va; 640181641Skmacy vm_offset_t *phead = (vm_offset_t *)*head; 641181641Skmacy 642181641Skmacy if (ptelist_count == 0) { 643181641Skmacy printf("out of memory!!!!!!\n"); 644181641Skmacy return (0); /* Out of memory */ 645181641Skmacy } 646181641Skmacy ptelist_count--; 647181641Skmacy va = phead[ptelist_count]; 648181641Skmacy return (va); 649181641Skmacy} 650181641Skmacy 651181641Skmacystatic void 652181641Skmacypmap_ptelist_free(vm_offset_t *head, vm_offset_t va) 653181641Skmacy{ 654181641Skmacy vm_offset_t *phead = (vm_offset_t *)*head; 655181641Skmacy 656181641Skmacy phead[ptelist_count++] = va; 657181641Skmacy} 658181641Skmacy 659181641Skmacystatic void 660181641Skmacypmap_ptelist_init(vm_offset_t *head, void *base, int npages) 661181641Skmacy{ 662181641Skmacy int i, nstackpages; 663181641Skmacy vm_offset_t va; 664181641Skmacy vm_page_t m; 665181641Skmacy 666181641Skmacy nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t)); 667181641Skmacy for (i = 0; i < nstackpages; i++) { 668181641Skmacy va = (vm_offset_t)base + i * PAGE_SIZE; 669181641Skmacy m = vm_page_alloc(NULL, i, 670181641Skmacy VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 671181641Skmacy VM_ALLOC_ZERO); 672181641Skmacy pmap_qenter(va, &m, 1); 673181641Skmacy } 674181641Skmacy 675181641Skmacy *head = (vm_offset_t)base; 676181641Skmacy for (i = npages - 1; i >= nstackpages; i--) { 677181641Skmacy va = (vm_offset_t)base + i * PAGE_SIZE; 678181641Skmacy pmap_ptelist_free(head, va); 679181641Skmacy } 680181641Skmacy} 681181641Skmacy 682181641Skmacy 683181641Skmacy/* 684181641Skmacy * Initialize the pmap module. 685181641Skmacy * Called by vm_init, to initialize any structures that the pmap 686181641Skmacy * system needs to map virtual memory. 687181641Skmacy */ 688181641Skmacyvoid 689181641Skmacypmap_init(void) 690181641Skmacy{ 691181747Skmacy vm_page_t mpte; 692181747Skmacy vm_size_t s; 693181747Skmacy int i, pv_npg; 694181641Skmacy 695181641Skmacy /* 696181747Skmacy * Initialize the vm page array entries for the kernel pmap's 697181747Skmacy * page table pages. 698181747Skmacy */ 699181747Skmacy for (i = 0; i < nkpt; i++) { 700181808Skmacy mpte = PHYS_TO_VM_PAGE(xpmap_mtop(PTD[i + KPTDI] & PG_FRAME)); 701181747Skmacy KASSERT(mpte >= vm_page_array && 702181747Skmacy mpte < &vm_page_array[vm_page_array_size], 703181747Skmacy ("pmap_init: page table page is out of range")); 704181747Skmacy mpte->pindex = i + KPTDI; 705181808Skmacy mpte->phys_addr = xpmap_mtop(PTD[i + KPTDI] & PG_FRAME); 706181747Skmacy } 707181747Skmacy 708181747Skmacy /* 709181641Skmacy * Initialize the address space (zone) for the pv entries. Set a 710181641Skmacy * high water mark so that the system can recover from excessive 711181641Skmacy * numbers of pv entries. 712181641Skmacy */ 713181641Skmacy TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 714181641Skmacy pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 715181641Skmacy TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 716181641Skmacy pv_entry_max = roundup(pv_entry_max, _NPCPV); 717181641Skmacy pv_entry_high_water = 9 * (pv_entry_max / 10); 718181641Skmacy 719181747Skmacy /* 720181747Skmacy * Are large page mappings enabled? 721181747Skmacy */ 722181747Skmacy TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); 723181747Skmacy 724181747Skmacy /* 725181747Skmacy * Calculate the size of the pv head table for superpages. 726181747Skmacy */ 727181747Skmacy for (i = 0; phys_avail[i + 1]; i += 2); 728181747Skmacy pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR; 729181747Skmacy 730181747Skmacy /* 731181747Skmacy * Allocate memory for the pv head table for superpages. 732181747Skmacy */ 733181747Skmacy s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 734181747Skmacy s = round_page(s); 735181747Skmacy pv_table = (struct md_page *)kmem_alloc(kernel_map, s); 736181747Skmacy for (i = 0; i < pv_npg; i++) 737181747Skmacy TAILQ_INIT(&pv_table[i].pv_list); 738181747Skmacy 739181641Skmacy pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 740181641Skmacy pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, 741181641Skmacy PAGE_SIZE * pv_maxchunks); 742181641Skmacy if (pv_chunkbase == NULL) 743181641Skmacy panic("pmap_init: not enough kvm for pv chunks"); 744181641Skmacy pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 745181641Skmacy#if defined(PAE) && !defined(XEN) 746181641Skmacy pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 747181641Skmacy NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 748181641Skmacy UMA_ZONE_VM | UMA_ZONE_NOFREE); 749181641Skmacy uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 750181641Skmacy#endif 751181641Skmacy} 752181641Skmacy 753181641Skmacy 754181641Skmacy/*************************************************** 755181641Skmacy * Low level helper routines..... 756181641Skmacy ***************************************************/ 757181641Skmacy 758181641Skmacy/* 759181641Skmacy * Determine the appropriate bits to set in a PTE or PDE for a specified 760181641Skmacy * caching mode. 761181641Skmacy */ 762181641Skmacystatic int 763181641Skmacypmap_cache_bits(int mode, boolean_t is_pde) 764181641Skmacy{ 765181641Skmacy int pat_flag, pat_index, cache_bits; 766181641Skmacy 767181641Skmacy /* The PAT bit is different for PTE's and PDE's. */ 768181641Skmacy pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; 769181641Skmacy 770181641Skmacy /* If we don't support PAT, map extended modes to older ones. */ 771181641Skmacy if (!(cpu_feature & CPUID_PAT)) { 772181641Skmacy switch (mode) { 773181641Skmacy case PAT_UNCACHEABLE: 774181641Skmacy case PAT_WRITE_THROUGH: 775181641Skmacy case PAT_WRITE_BACK: 776181641Skmacy break; 777181641Skmacy case PAT_UNCACHED: 778181641Skmacy case PAT_WRITE_COMBINING: 779181641Skmacy case PAT_WRITE_PROTECTED: 780181641Skmacy mode = PAT_UNCACHEABLE; 781181641Skmacy break; 782181641Skmacy } 783181641Skmacy } 784181641Skmacy 785181641Skmacy /* Map the caching mode to a PAT index. */ 786181641Skmacy switch (mode) { 787181641Skmacy#ifdef PAT_WORKS 788181641Skmacy case PAT_UNCACHEABLE: 789181641Skmacy pat_index = 3; 790181641Skmacy break; 791181641Skmacy case PAT_WRITE_THROUGH: 792181641Skmacy pat_index = 1; 793181641Skmacy break; 794181641Skmacy case PAT_WRITE_BACK: 795181641Skmacy pat_index = 0; 796181641Skmacy break; 797181641Skmacy case PAT_UNCACHED: 798181641Skmacy pat_index = 2; 799181641Skmacy break; 800181641Skmacy case PAT_WRITE_COMBINING: 801181641Skmacy pat_index = 5; 802181641Skmacy break; 803181641Skmacy case PAT_WRITE_PROTECTED: 804181641Skmacy pat_index = 4; 805181641Skmacy break; 806181641Skmacy#else 807181641Skmacy case PAT_UNCACHED: 808181641Skmacy case PAT_UNCACHEABLE: 809181641Skmacy case PAT_WRITE_PROTECTED: 810181641Skmacy pat_index = 3; 811181641Skmacy break; 812181641Skmacy case PAT_WRITE_THROUGH: 813181641Skmacy pat_index = 1; 814181641Skmacy break; 815181641Skmacy case PAT_WRITE_BACK: 816181641Skmacy pat_index = 0; 817181641Skmacy break; 818181641Skmacy case PAT_WRITE_COMBINING: 819181641Skmacy pat_index = 2; 820181641Skmacy break; 821181641Skmacy#endif 822181641Skmacy default: 823181641Skmacy panic("Unknown caching mode %d\n", mode); 824181641Skmacy } 825181641Skmacy 826181641Skmacy /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ 827181641Skmacy cache_bits = 0; 828181641Skmacy if (pat_index & 0x4) 829181641Skmacy cache_bits |= pat_flag; 830181641Skmacy if (pat_index & 0x2) 831181641Skmacy cache_bits |= PG_NC_PCD; 832181641Skmacy if (pat_index & 0x1) 833181641Skmacy cache_bits |= PG_NC_PWT; 834181641Skmacy return (cache_bits); 835181641Skmacy} 836181641Skmacy#ifdef SMP 837181641Skmacy/* 838181641Skmacy * For SMP, these functions have to use the IPI mechanism for coherence. 839181641Skmacy * 840181641Skmacy * N.B.: Before calling any of the following TLB invalidation functions, 841181641Skmacy * the calling processor must ensure that all stores updating a non- 842181641Skmacy * kernel page table are globally performed. Otherwise, another 843181641Skmacy * processor could cache an old, pre-update entry without being 844181641Skmacy * invalidated. This can happen one of two ways: (1) The pmap becomes 845181641Skmacy * active on another processor after its pm_active field is checked by 846181641Skmacy * one of the following functions but before a store updating the page 847181641Skmacy * table is globally performed. (2) The pmap becomes active on another 848181641Skmacy * processor before its pm_active field is checked but due to 849181641Skmacy * speculative loads one of the following functions stills reads the 850181641Skmacy * pmap as inactive on the other processor. 851181641Skmacy * 852181641Skmacy * The kernel page table is exempt because its pm_active field is 853181641Skmacy * immutable. The kernel page table is always active on every 854181641Skmacy * processor. 855181641Skmacy */ 856181641Skmacyvoid 857181641Skmacypmap_invalidate_page(pmap_t pmap, vm_offset_t va) 858181641Skmacy{ 859181641Skmacy u_int cpumask; 860181641Skmacy u_int other_cpus; 861181641Skmacy 862181641Skmacy CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 863181641Skmacy pmap, va); 864181641Skmacy 865181641Skmacy sched_pin(); 866181641Skmacy if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 867181641Skmacy invlpg(va); 868181641Skmacy smp_invlpg(va); 869181641Skmacy } else { 870181641Skmacy cpumask = PCPU_GET(cpumask); 871181641Skmacy other_cpus = PCPU_GET(other_cpus); 872181641Skmacy if (pmap->pm_active & cpumask) 873181641Skmacy invlpg(va); 874181641Skmacy if (pmap->pm_active & other_cpus) 875181641Skmacy smp_masked_invlpg(pmap->pm_active & other_cpus, va); 876181641Skmacy } 877181641Skmacy sched_unpin(); 878181641Skmacy PT_UPDATES_FLUSH(); 879181641Skmacy} 880181641Skmacy 881181641Skmacyvoid 882181641Skmacypmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 883181641Skmacy{ 884181641Skmacy u_int cpumask; 885181641Skmacy u_int other_cpus; 886181641Skmacy vm_offset_t addr; 887181641Skmacy 888181641Skmacy CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x", 889181641Skmacy pmap, sva, eva); 890181641Skmacy 891181641Skmacy sched_pin(); 892181641Skmacy if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 893181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 894181641Skmacy invlpg(addr); 895181641Skmacy smp_invlpg_range(sva, eva); 896181641Skmacy } else { 897181641Skmacy cpumask = PCPU_GET(cpumask); 898181641Skmacy other_cpus = PCPU_GET(other_cpus); 899181641Skmacy if (pmap->pm_active & cpumask) 900181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 901181641Skmacy invlpg(addr); 902181641Skmacy if (pmap->pm_active & other_cpus) 903181641Skmacy smp_masked_invlpg_range(pmap->pm_active & other_cpus, 904181641Skmacy sva, eva); 905181641Skmacy } 906181641Skmacy sched_unpin(); 907181641Skmacy PT_UPDATES_FLUSH(); 908181641Skmacy} 909181641Skmacy 910181641Skmacyvoid 911181641Skmacypmap_invalidate_all(pmap_t pmap) 912181641Skmacy{ 913181641Skmacy u_int cpumask; 914181641Skmacy u_int other_cpus; 915181641Skmacy 916181641Skmacy CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap); 917181641Skmacy 918181641Skmacy sched_pin(); 919181641Skmacy if (pmap == kernel_pmap || pmap->pm_active == all_cpus) { 920181641Skmacy invltlb(); 921181641Skmacy smp_invltlb(); 922181641Skmacy } else { 923181641Skmacy cpumask = PCPU_GET(cpumask); 924181641Skmacy other_cpus = PCPU_GET(other_cpus); 925181641Skmacy if (pmap->pm_active & cpumask) 926181641Skmacy invltlb(); 927181641Skmacy if (pmap->pm_active & other_cpus) 928181641Skmacy smp_masked_invltlb(pmap->pm_active & other_cpus); 929181641Skmacy } 930181641Skmacy sched_unpin(); 931181641Skmacy} 932181641Skmacy 933181641Skmacyvoid 934181641Skmacypmap_invalidate_cache(void) 935181641Skmacy{ 936181641Skmacy 937181641Skmacy sched_pin(); 938181641Skmacy wbinvd(); 939181641Skmacy smp_cache_flush(); 940181641Skmacy sched_unpin(); 941181641Skmacy} 942181641Skmacy#else /* !SMP */ 943181641Skmacy/* 944181641Skmacy * Normal, non-SMP, 486+ invalidation functions. 945181641Skmacy * We inline these within pmap.c for speed. 946181641Skmacy */ 947181641SkmacyPMAP_INLINE void 948181641Skmacypmap_invalidate_page(pmap_t pmap, vm_offset_t va) 949181641Skmacy{ 950181641Skmacy CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x", 951181641Skmacy pmap, va); 952181641Skmacy 953181641Skmacy if (pmap == kernel_pmap || pmap->pm_active) 954181641Skmacy invlpg(va); 955181641Skmacy PT_UPDATES_FLUSH(); 956181641Skmacy} 957181641Skmacy 958181641SkmacyPMAP_INLINE void 959181641Skmacypmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 960181641Skmacy{ 961181641Skmacy vm_offset_t addr; 962181641Skmacy 963181641Skmacy if (eva - sva > PAGE_SIZE) 964181641Skmacy CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x", 965181641Skmacy pmap, sva, eva); 966181641Skmacy 967181641Skmacy if (pmap == kernel_pmap || pmap->pm_active) 968181641Skmacy for (addr = sva; addr < eva; addr += PAGE_SIZE) 969181641Skmacy invlpg(addr); 970181641Skmacy PT_UPDATES_FLUSH(); 971181641Skmacy} 972181641Skmacy 973181641SkmacyPMAP_INLINE void 974181641Skmacypmap_invalidate_all(pmap_t pmap) 975181641Skmacy{ 976181641Skmacy 977181641Skmacy CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap); 978181641Skmacy 979181641Skmacy if (pmap == kernel_pmap || pmap->pm_active) 980181641Skmacy invltlb(); 981181641Skmacy} 982181641Skmacy 983181641SkmacyPMAP_INLINE void 984181641Skmacypmap_invalidate_cache(void) 985181641Skmacy{ 986181641Skmacy 987181641Skmacy wbinvd(); 988181641Skmacy} 989181641Skmacy#endif /* !SMP */ 990181641Skmacy 991181641Skmacy/* 992181641Skmacy * Are we current address space or kernel? N.B. We return FALSE when 993181641Skmacy * a pmap's page table is in use because a kernel thread is borrowing 994181641Skmacy * it. The borrowed page table can change spontaneously, making any 995181641Skmacy * dependence on its continued use subject to a race condition. 996181641Skmacy */ 997181641Skmacystatic __inline int 998181641Skmacypmap_is_current(pmap_t pmap) 999181641Skmacy{ 1000181641Skmacy 1001181641Skmacy return (pmap == kernel_pmap || 1002181641Skmacy (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) && 1003181641Skmacy (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME))); 1004181641Skmacy} 1005181641Skmacy 1006181641Skmacy/* 1007181641Skmacy * If the given pmap is not the current or kernel pmap, the returned pte must 1008181641Skmacy * be released by passing it to pmap_pte_release(). 1009181641Skmacy */ 1010181641Skmacypt_entry_t * 1011181641Skmacypmap_pte(pmap_t pmap, vm_offset_t va) 1012181641Skmacy{ 1013181641Skmacy pd_entry_t newpf; 1014181641Skmacy pd_entry_t *pde; 1015181641Skmacy 1016181641Skmacy pde = pmap_pde(pmap, va); 1017181641Skmacy if (*pde & PG_PS) 1018181641Skmacy return (pde); 1019181641Skmacy if (*pde != 0) { 1020181641Skmacy /* are we current address space or kernel? */ 1021181641Skmacy if (pmap_is_current(pmap)) 1022181641Skmacy return (vtopte(va)); 1023181641Skmacy mtx_lock(&PMAP2mutex); 1024181641Skmacy newpf = *pde & PG_FRAME; 1025181641Skmacy if ((*PMAP2 & PG_FRAME) != newpf) { 1026181641Skmacy PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M); 1027181641Skmacy CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x", 1028181641Skmacy pmap, va, (*PMAP2 & 0xffffffff)); 1029181641Skmacy } 1030181641Skmacy 1031181641Skmacy return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 1032181641Skmacy } 1033181641Skmacy return (0); 1034181641Skmacy} 1035181641Skmacy 1036181641Skmacy/* 1037181641Skmacy * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte 1038181641Skmacy * being NULL. 1039181641Skmacy */ 1040181641Skmacystatic __inline void 1041181641Skmacypmap_pte_release(pt_entry_t *pte) 1042181641Skmacy{ 1043181641Skmacy 1044181641Skmacy if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) { 1045181641Skmacy CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx", 1046181641Skmacy *PMAP2); 1047181641Skmacy PT_SET_VA(PMAP2, 0, TRUE); 1048181641Skmacy mtx_unlock(&PMAP2mutex); 1049181641Skmacy } 1050181641Skmacy} 1051181641Skmacy 1052181641Skmacystatic __inline void 1053181641Skmacyinvlcaddr(void *caddr) 1054181641Skmacy{ 1055181641Skmacy 1056181641Skmacy invlpg((u_int)caddr); 1057181641Skmacy PT_UPDATES_FLUSH(); 1058181641Skmacy} 1059181641Skmacy 1060181641Skmacy/* 1061181641Skmacy * Super fast pmap_pte routine best used when scanning 1062181641Skmacy * the pv lists. This eliminates many coarse-grained 1063181641Skmacy * invltlb calls. Note that many of the pv list 1064181641Skmacy * scans are across different pmaps. It is very wasteful 1065181641Skmacy * to do an entire invltlb for checking a single mapping. 1066181641Skmacy * 1067181641Skmacy * If the given pmap is not the current pmap, vm_page_queue_mtx 1068181641Skmacy * must be held and curthread pinned to a CPU. 1069181641Skmacy */ 1070181641Skmacystatic pt_entry_t * 1071181641Skmacypmap_pte_quick(pmap_t pmap, vm_offset_t va) 1072181641Skmacy{ 1073181641Skmacy pd_entry_t newpf; 1074181641Skmacy pd_entry_t *pde; 1075181641Skmacy 1076181641Skmacy pde = pmap_pde(pmap, va); 1077181641Skmacy if (*pde & PG_PS) 1078181641Skmacy return (pde); 1079181641Skmacy if (*pde != 0) { 1080181641Skmacy /* are we current address space or kernel? */ 1081181641Skmacy if (pmap_is_current(pmap)) 1082181641Skmacy return (vtopte(va)); 1083181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1084181641Skmacy KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 1085181641Skmacy newpf = *pde & PG_FRAME; 1086181641Skmacy if ((*PMAP1 & PG_FRAME) != newpf) { 1087181641Skmacy PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M); 1088181641Skmacy CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x", 1089181641Skmacy pmap, va, (u_long)*PMAP1); 1090181641Skmacy 1091181641Skmacy#ifdef SMP 1092181641Skmacy PMAP1cpu = PCPU_GET(cpuid); 1093181641Skmacy#endif 1094181641Skmacy PMAP1changed++; 1095181641Skmacy } else 1096181641Skmacy#ifdef SMP 1097181641Skmacy if (PMAP1cpu != PCPU_GET(cpuid)) { 1098181641Skmacy PMAP1cpu = PCPU_GET(cpuid); 1099181641Skmacy invlcaddr(PADDR1); 1100181641Skmacy PMAP1changedcpu++; 1101181641Skmacy } else 1102181641Skmacy#endif 1103181641Skmacy PMAP1unchanged++; 1104181641Skmacy return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); 1105181641Skmacy } 1106181641Skmacy return (0); 1107181641Skmacy} 1108181641Skmacy 1109181641Skmacy/* 1110181641Skmacy * Routine: pmap_extract 1111181641Skmacy * Function: 1112181641Skmacy * Extract the physical page address associated 1113181641Skmacy * with the given map/virtual_address pair. 1114181641Skmacy */ 1115181641Skmacyvm_paddr_t 1116181641Skmacypmap_extract(pmap_t pmap, vm_offset_t va) 1117181641Skmacy{ 1118181641Skmacy vm_paddr_t rtval; 1119181641Skmacy pt_entry_t *pte; 1120181641Skmacy pd_entry_t pde; 1121181641Skmacy pt_entry_t pteval; 1122181641Skmacy 1123181641Skmacy rtval = 0; 1124181641Skmacy PMAP_LOCK(pmap); 1125181641Skmacy pde = pmap->pm_pdir[va >> PDRSHIFT]; 1126181641Skmacy if (pde != 0) { 1127181641Skmacy if ((pde & PG_PS) != 0) { 1128181641Skmacy rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK); 1129181641Skmacy PMAP_UNLOCK(pmap); 1130181641Skmacy return rtval; 1131181641Skmacy } 1132181641Skmacy pte = pmap_pte(pmap, va); 1133181641Skmacy pteval = *pte ? xpmap_mtop(*pte) : 0; 1134181641Skmacy rtval = (pteval & PG_FRAME) | (va & PAGE_MASK); 1135181641Skmacy pmap_pte_release(pte); 1136181641Skmacy } 1137181641Skmacy PMAP_UNLOCK(pmap); 1138181641Skmacy return (rtval); 1139181641Skmacy} 1140181641Skmacy 1141181641Skmacy/* 1142181641Skmacy * Routine: pmap_extract_ma 1143181641Skmacy * Function: 1144181641Skmacy * Like pmap_extract, but returns machine address 1145181641Skmacy */ 1146181641Skmacyvm_paddr_t 1147181641Skmacypmap_extract_ma(pmap_t pmap, vm_offset_t va) 1148181641Skmacy{ 1149181641Skmacy vm_paddr_t rtval; 1150181641Skmacy pt_entry_t *pte; 1151181641Skmacy pd_entry_t pde; 1152181641Skmacy 1153181641Skmacy rtval = 0; 1154181641Skmacy PMAP_LOCK(pmap); 1155181641Skmacy pde = pmap->pm_pdir[va >> PDRSHIFT]; 1156181641Skmacy if (pde != 0) { 1157181641Skmacy if ((pde & PG_PS) != 0) { 1158181641Skmacy rtval = (pde & ~PDRMASK) | (va & PDRMASK); 1159181641Skmacy PMAP_UNLOCK(pmap); 1160181641Skmacy return rtval; 1161181641Skmacy } 1162181641Skmacy pte = pmap_pte(pmap, va); 1163181641Skmacy rtval = (*pte & PG_FRAME) | (va & PAGE_MASK); 1164181641Skmacy pmap_pte_release(pte); 1165181641Skmacy } 1166181641Skmacy PMAP_UNLOCK(pmap); 1167181641Skmacy return (rtval); 1168181641Skmacy} 1169181641Skmacy 1170181641Skmacy/* 1171181641Skmacy * Routine: pmap_extract_and_hold 1172181641Skmacy * Function: 1173181641Skmacy * Atomically extract and hold the physical page 1174181641Skmacy * with the given pmap and virtual address pair 1175181641Skmacy * if that mapping permits the given protection. 1176181641Skmacy */ 1177181641Skmacyvm_page_t 1178181641Skmacypmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1179181641Skmacy{ 1180181641Skmacy pd_entry_t pde; 1181181641Skmacy pt_entry_t pte; 1182181641Skmacy vm_page_t m; 1183181641Skmacy 1184181641Skmacy m = NULL; 1185181641Skmacy vm_page_lock_queues(); 1186181641Skmacy PMAP_LOCK(pmap); 1187181641Skmacy pde = PT_GET(pmap_pde(pmap, va)); 1188181641Skmacy if (pde != 0) { 1189181641Skmacy if (pde & PG_PS) { 1190181641Skmacy if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { 1191181641Skmacy m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | 1192181641Skmacy (va & PDRMASK)); 1193181641Skmacy vm_page_hold(m); 1194181641Skmacy } 1195181641Skmacy } else { 1196181641Skmacy sched_pin(); 1197181641Skmacy pte = PT_GET(pmap_pte_quick(pmap, va)); 1198181641Skmacy if (*PMAP1) 1199181641Skmacy PT_SET_MA(PADDR1, 0); 1200181641Skmacy if ((pte & PG_V) && 1201181641Skmacy ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { 1202181641Skmacy m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 1203181641Skmacy vm_page_hold(m); 1204181641Skmacy } 1205181641Skmacy sched_unpin(); 1206181641Skmacy } 1207181641Skmacy } 1208181641Skmacy vm_page_unlock_queues(); 1209181641Skmacy PMAP_UNLOCK(pmap); 1210181641Skmacy return (m); 1211181641Skmacy} 1212181641Skmacy 1213181641Skmacy/*************************************************** 1214181641Skmacy * Low level mapping routines..... 1215181641Skmacy ***************************************************/ 1216181641Skmacy 1217181641Skmacy/* 1218181641Skmacy * Add a wired page to the kva. 1219181641Skmacy * Note: not SMP coherent. 1220181641Skmacy */ 1221181747Skmacyvoid 1222181641Skmacypmap_kenter(vm_offset_t va, vm_paddr_t pa) 1223181641Skmacy{ 1224181641Skmacy PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag); 1225181641Skmacy} 1226181641Skmacy 1227181747Skmacyvoid 1228181641Skmacypmap_kenter_ma(vm_offset_t va, vm_paddr_t ma) 1229181641Skmacy{ 1230181641Skmacy pt_entry_t *pte; 1231181641Skmacy 1232181641Skmacy pte = vtopte(va); 1233181641Skmacy pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag); 1234181641Skmacy} 1235181641Skmacy 1236181641Skmacy 1237181747Skmacystatic __inline void 1238181641Skmacypmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) 1239181641Skmacy{ 1240181641Skmacy PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); 1241181641Skmacy} 1242181641Skmacy 1243181641Skmacy/* 1244181641Skmacy * Remove a page from the kernel pagetables. 1245181641Skmacy * Note: not SMP coherent. 1246181641Skmacy */ 1247181641SkmacyPMAP_INLINE void 1248181641Skmacypmap_kremove(vm_offset_t va) 1249181641Skmacy{ 1250181641Skmacy pt_entry_t *pte; 1251181641Skmacy 1252181641Skmacy pte = vtopte(va); 1253181641Skmacy PT_CLEAR_VA(pte, FALSE); 1254181641Skmacy} 1255181641Skmacy 1256181641Skmacy/* 1257181641Skmacy * Used to map a range of physical addresses into kernel 1258181641Skmacy * virtual address space. 1259181641Skmacy * 1260181641Skmacy * The value passed in '*virt' is a suggested virtual address for 1261181641Skmacy * the mapping. Architectures which can support a direct-mapped 1262181641Skmacy * physical to virtual region can return the appropriate address 1263181641Skmacy * within that region, leaving '*virt' unchanged. Other 1264181641Skmacy * architectures should map the pages starting at '*virt' and 1265181641Skmacy * update '*virt' with the first usable address after the mapped 1266181641Skmacy * region. 1267181641Skmacy */ 1268181641Skmacyvm_offset_t 1269181641Skmacypmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1270181641Skmacy{ 1271181641Skmacy vm_offset_t va, sva; 1272181641Skmacy 1273181641Skmacy va = sva = *virt; 1274181641Skmacy CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x", 1275181641Skmacy va, start, end, prot); 1276181641Skmacy while (start < end) { 1277181641Skmacy pmap_kenter(va, start); 1278181641Skmacy va += PAGE_SIZE; 1279181641Skmacy start += PAGE_SIZE; 1280181641Skmacy } 1281181641Skmacy pmap_invalidate_range(kernel_pmap, sva, va); 1282181641Skmacy *virt = va; 1283181641Skmacy return (sva); 1284181641Skmacy} 1285181641Skmacy 1286181641Skmacy 1287181641Skmacy/* 1288181641Skmacy * Add a list of wired pages to the kva 1289181641Skmacy * this routine is only used for temporary 1290181641Skmacy * kernel mappings that do not need to have 1291181641Skmacy * page modification or references recorded. 1292181641Skmacy * Note that old mappings are simply written 1293181641Skmacy * over. The page *must* be wired. 1294181641Skmacy * Note: SMP coherent. Uses a ranged shootdown IPI. 1295181641Skmacy */ 1296181641Skmacyvoid 1297181641Skmacypmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1298181641Skmacy{ 1299181641Skmacy pt_entry_t *endpte, *pte; 1300181641Skmacy vm_paddr_t pa; 1301181641Skmacy vm_offset_t va = sva; 1302181641Skmacy int mclcount = 0; 1303181641Skmacy multicall_entry_t mcl[16]; 1304181641Skmacy multicall_entry_t *mclp = mcl; 1305181641Skmacy int error; 1306181641Skmacy 1307181641Skmacy CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count); 1308181641Skmacy pte = vtopte(sva); 1309181641Skmacy endpte = pte + count; 1310181641Skmacy while (pte < endpte) { 1311181641Skmacy pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V | PG_M | PG_A; 1312181641Skmacy 1313181641Skmacy mclp->op = __HYPERVISOR_update_va_mapping; 1314181641Skmacy mclp->args[0] = va; 1315181641Skmacy mclp->args[1] = (uint32_t)(pa & 0xffffffff); 1316181641Skmacy mclp->args[2] = (uint32_t)(pa >> 32); 1317181641Skmacy mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0; 1318181641Skmacy 1319181641Skmacy va += PAGE_SIZE; 1320181641Skmacy pte++; 1321181641Skmacy ma++; 1322181641Skmacy mclp++; 1323181641Skmacy mclcount++; 1324181641Skmacy if (mclcount == 16) { 1325181641Skmacy error = HYPERVISOR_multicall(mcl, mclcount); 1326181641Skmacy mclp = mcl; 1327181641Skmacy mclcount = 0; 1328181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 1329181641Skmacy } 1330181641Skmacy } 1331181641Skmacy if (mclcount) { 1332181641Skmacy error = HYPERVISOR_multicall(mcl, mclcount); 1333181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 1334181641Skmacy } 1335181641Skmacy 1336181641Skmacy#ifdef INVARIANTS 1337181641Skmacy for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++) 1338181641Skmacy KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE)); 1339181641Skmacy#endif 1340181641Skmacy} 1341181641Skmacy 1342181641Skmacy 1343181641Skmacy/* 1344181641Skmacy * This routine tears out page mappings from the 1345181641Skmacy * kernel -- it is meant only for temporary mappings. 1346181641Skmacy * Note: SMP coherent. Uses a ranged shootdown IPI. 1347181641Skmacy */ 1348181641Skmacyvoid 1349181641Skmacypmap_qremove(vm_offset_t sva, int count) 1350181641Skmacy{ 1351181641Skmacy vm_offset_t va; 1352181641Skmacy 1353181641Skmacy CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count); 1354181641Skmacy va = sva; 1355181641Skmacy vm_page_lock_queues(); 1356181641Skmacy critical_enter(); 1357181641Skmacy while (count-- > 0) { 1358181641Skmacy pmap_kremove(va); 1359181641Skmacy va += PAGE_SIZE; 1360181641Skmacy } 1361181641Skmacy pmap_invalidate_range(kernel_pmap, sva, va); 1362181641Skmacy critical_exit(); 1363181641Skmacy vm_page_unlock_queues(); 1364181641Skmacy} 1365181641Skmacy 1366181641Skmacy/*************************************************** 1367181641Skmacy * Page table page management routines..... 1368181641Skmacy ***************************************************/ 1369181641Skmacystatic __inline void 1370181641Skmacypmap_free_zero_pages(vm_page_t free) 1371181641Skmacy{ 1372181641Skmacy vm_page_t m; 1373181641Skmacy 1374181641Skmacy while (free != NULL) { 1375181641Skmacy m = free; 1376181641Skmacy free = m->right; 1377181641Skmacy vm_page_free_zero(m); 1378181641Skmacy } 1379181641Skmacy} 1380181641Skmacy 1381181641Skmacy/* 1382181641Skmacy * This routine unholds page table pages, and if the hold count 1383181641Skmacy * drops to zero, then it decrements the wire count. 1384181641Skmacy */ 1385181641Skmacystatic __inline int 1386181641Skmacypmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1387181641Skmacy{ 1388181641Skmacy 1389181641Skmacy --m->wire_count; 1390181641Skmacy if (m->wire_count == 0) 1391181641Skmacy return _pmap_unwire_pte_hold(pmap, m, free); 1392181641Skmacy else 1393181641Skmacy return 0; 1394181641Skmacy} 1395181641Skmacy 1396181641Skmacystatic int 1397181641Skmacy_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free) 1398181641Skmacy{ 1399181641Skmacy vm_offset_t pteva; 1400181641Skmacy 1401181641Skmacy PT_UPDATES_FLUSH(); 1402181641Skmacy /* 1403181641Skmacy * unmap the page table page 1404181641Skmacy */ 1405181641Skmacy xen_pt_unpin(pmap->pm_pdir[m->pindex]); 1406181641Skmacy /* 1407181641Skmacy * page *might* contain residual mapping :-/ 1408181641Skmacy */ 1409181641Skmacy PD_CLEAR_VA(pmap, m->pindex, TRUE); 1410181641Skmacy pmap_zero_page(m); 1411181641Skmacy --pmap->pm_stats.resident_count; 1412181641Skmacy 1413181641Skmacy /* 1414181641Skmacy * This is a release store so that the ordinary store unmapping 1415181641Skmacy * the page table page is globally performed before TLB shoot- 1416181641Skmacy * down is begun. 1417181641Skmacy */ 1418181641Skmacy atomic_subtract_rel_int(&cnt.v_wire_count, 1); 1419181641Skmacy 1420181641Skmacy /* 1421181641Skmacy * Do an invltlb to make the invalidated mapping 1422181641Skmacy * take effect immediately. 1423181641Skmacy */ 1424181641Skmacy pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex); 1425181641Skmacy pmap_invalidate_page(pmap, pteva); 1426181641Skmacy 1427181641Skmacy /* 1428181641Skmacy * Put page on a list so that it is released after 1429181641Skmacy * *ALL* TLB shootdown is done 1430181641Skmacy */ 1431181641Skmacy m->right = *free; 1432181641Skmacy *free = m; 1433181641Skmacy 1434181641Skmacy return 1; 1435181641Skmacy} 1436181641Skmacy 1437181641Skmacy/* 1438181641Skmacy * After removing a page table entry, this routine is used to 1439181641Skmacy * conditionally free the page, and manage the hold/wire counts. 1440181641Skmacy */ 1441181641Skmacystatic int 1442181641Skmacypmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free) 1443181641Skmacy{ 1444181641Skmacy pd_entry_t ptepde; 1445181641Skmacy vm_page_t mpte; 1446181641Skmacy 1447181641Skmacy if (va >= VM_MAXUSER_ADDRESS) 1448181641Skmacy return 0; 1449181641Skmacy ptepde = PT_GET(pmap_pde(pmap, va)); 1450181641Skmacy mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 1451181641Skmacy return pmap_unwire_pte_hold(pmap, mpte, free); 1452181641Skmacy} 1453181641Skmacy 1454181641Skmacyvoid 1455181641Skmacypmap_pinit0(pmap_t pmap) 1456181641Skmacy{ 1457181641Skmacy 1458181641Skmacy PMAP_LOCK_INIT(pmap); 1459181641Skmacy pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); 1460181641Skmacy#ifdef PAE 1461181641Skmacy pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1462181641Skmacy#endif 1463181641Skmacy pmap->pm_active = 0; 1464181641Skmacy PCPU_SET(curpmap, pmap); 1465181641Skmacy TAILQ_INIT(&pmap->pm_pvchunk); 1466181641Skmacy bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1467181641Skmacy mtx_lock_spin(&allpmaps_lock); 1468181641Skmacy LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1469181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1470181641Skmacy} 1471181641Skmacy 1472181641Skmacy/* 1473181641Skmacy * Initialize a preallocated and zeroed pmap structure, 1474181641Skmacy * such as one in a vmspace structure. 1475181641Skmacy */ 1476181641Skmacyint 1477181641Skmacypmap_pinit(pmap_t pmap) 1478181641Skmacy{ 1479181641Skmacy vm_page_t m, ptdpg[NPGPTD + 1]; 1480181641Skmacy int npgptd = NPGPTD + 1; 1481181641Skmacy static int color; 1482181641Skmacy int i; 1483181641Skmacy 1484181641Skmacy PMAP_LOCK_INIT(pmap); 1485181641Skmacy 1486181641Skmacy /* 1487181641Skmacy * No need to allocate page table space yet but we do need a valid 1488181641Skmacy * page directory table. 1489181641Skmacy */ 1490181641Skmacy if (pmap->pm_pdir == NULL) { 1491181641Skmacy pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1492181641Skmacy NBPTD); 1493181641Skmacy if (pmap->pm_pdir == NULL) { 1494181641Skmacy PMAP_LOCK_DESTROY(pmap); 1495181641Skmacy return (0); 1496181641Skmacy } 1497181641Skmacy#if defined(XEN) && defined(PAE) 1498181641Skmacy pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); 1499181641Skmacy#endif 1500181641Skmacy 1501181641Skmacy#if defined(PAE) && !defined(XEN) 1502181641Skmacy pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 1503181641Skmacy KASSERT(((vm_offset_t)pmap->pm_pdpt & 1504181641Skmacy ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 1505181641Skmacy ("pmap_pinit: pdpt misaligned")); 1506181641Skmacy KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 1507181641Skmacy ("pmap_pinit: pdpt above 4g")); 1508181641Skmacy#endif 1509181641Skmacy } 1510181641Skmacy 1511181641Skmacy /* 1512181641Skmacy * allocate the page directory page(s) 1513181641Skmacy */ 1514181641Skmacy for (i = 0; i < npgptd;) { 1515181641Skmacy m = vm_page_alloc(NULL, color++, 1516181641Skmacy VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 1517181641Skmacy VM_ALLOC_ZERO); 1518181641Skmacy if (m == NULL) 1519181641Skmacy VM_WAIT; 1520181641Skmacy else { 1521181641Skmacy ptdpg[i++] = m; 1522181641Skmacy } 1523181641Skmacy } 1524181641Skmacy pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD); 1525181641Skmacy for (i = 0; i < NPGPTD; i++) { 1526181641Skmacy if ((ptdpg[i]->flags & PG_ZERO) == 0) 1527181641Skmacy pagezero(&pmap->pm_pdir[i*NPTEPG]); 1528181641Skmacy } 1529181641Skmacy 1530181641Skmacy mtx_lock_spin(&allpmaps_lock); 1531181641Skmacy LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1532181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1533181641Skmacy /* Wire in kernel global address entries. */ 1534181641Skmacy 1535181641Skmacy bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t)); 1536181641Skmacy#ifdef PAE 1537181641Skmacy#ifdef XEN 1538181641Skmacy pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1); 1539181641Skmacy if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0) 1540181641Skmacy bzero(pmap->pm_pdpt, PAGE_SIZE); 1541181641Skmacy#endif 1542181641Skmacy for (i = 0; i < NPGPTD; i++) { 1543181641Skmacy vm_paddr_t ma; 1544181641Skmacy 1545181641Skmacy ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1546181641Skmacy pmap->pm_pdpt[i] = ma | PG_V; 1547181641Skmacy 1548181641Skmacy } 1549181641Skmacy#endif 1550181641Skmacy#ifdef XEN 1551181641Skmacy for (i = 0; i < NPGPTD; i++) { 1552181641Skmacy pt_entry_t *pd; 1553181641Skmacy vm_paddr_t ma; 1554181641Skmacy 1555181641Skmacy ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1556181641Skmacy pd = pmap->pm_pdir + (i * NPDEPG); 1557181641Skmacy PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW)); 1558181641Skmacy#if 0 1559181641Skmacy xen_pgd_pin(ma); 1560181641Skmacy#endif 1561181641Skmacy } 1562181641Skmacy 1563181641Skmacy#ifdef PAE 1564181641Skmacy PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW); 1565181641Skmacy#endif 1566181641Skmacy vm_page_lock_queues(); 1567181641Skmacy xen_flush_queue(); 1568181641Skmacy xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD]))); 1569181641Skmacy for (i = 0; i < NPGPTD; i++) { 1570181641Skmacy vm_paddr_t ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 1571181641Skmacy PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE); 1572181641Skmacy } 1573181641Skmacy xen_flush_queue(); 1574181641Skmacy vm_page_unlock_queues(); 1575181641Skmacy#endif 1576181641Skmacy pmap->pm_active = 0; 1577181641Skmacy TAILQ_INIT(&pmap->pm_pvchunk); 1578181641Skmacy bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1579181641Skmacy 1580181641Skmacy return (1); 1581181641Skmacy} 1582181641Skmacy 1583181641Skmacy/* 1584181641Skmacy * this routine is called if the page table page is not 1585181641Skmacy * mapped correctly. 1586181641Skmacy */ 1587181641Skmacystatic vm_page_t 1588181641Skmacy_pmap_allocpte(pmap_t pmap, unsigned int ptepindex, int flags) 1589181641Skmacy{ 1590181641Skmacy vm_paddr_t ptema; 1591181641Skmacy vm_page_t m; 1592181641Skmacy 1593181641Skmacy KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1594181641Skmacy (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1595181641Skmacy ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1596181641Skmacy 1597181641Skmacy /* 1598181641Skmacy * Allocate a page table page. 1599181641Skmacy */ 1600181641Skmacy if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 1601181641Skmacy VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 1602181641Skmacy if (flags & M_WAITOK) { 1603181641Skmacy PMAP_UNLOCK(pmap); 1604181641Skmacy vm_page_unlock_queues(); 1605181641Skmacy VM_WAIT; 1606181641Skmacy vm_page_lock_queues(); 1607181641Skmacy PMAP_LOCK(pmap); 1608181641Skmacy } 1609181641Skmacy 1610181641Skmacy /* 1611181641Skmacy * Indicate the need to retry. While waiting, the page table 1612181641Skmacy * page may have been allocated. 1613181641Skmacy */ 1614181641Skmacy return (NULL); 1615181641Skmacy } 1616181641Skmacy if ((m->flags & PG_ZERO) == 0) 1617181641Skmacy pmap_zero_page(m); 1618181641Skmacy 1619181641Skmacy /* 1620181641Skmacy * Map the pagetable page into the process address space, if 1621181641Skmacy * it isn't already there. 1622181641Skmacy */ 1623181641Skmacy pmap->pm_stats.resident_count++; 1624181641Skmacy 1625181641Skmacy ptema = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 1626181641Skmacy xen_pt_pin(ptema); 1627181641Skmacy PT_SET_VA_MA(&pmap->pm_pdir[ptepindex], 1628181641Skmacy (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 1629181641Skmacy 1630181641Skmacy KASSERT(pmap->pm_pdir[ptepindex], 1631181641Skmacy ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex)); 1632181641Skmacy return (m); 1633181641Skmacy} 1634181641Skmacy 1635181641Skmacystatic vm_page_t 1636181641Skmacypmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1637181641Skmacy{ 1638181641Skmacy unsigned ptepindex; 1639181641Skmacy pd_entry_t ptema; 1640181641Skmacy vm_page_t m; 1641181641Skmacy 1642181641Skmacy KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1643181641Skmacy (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1644181641Skmacy ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1645181641Skmacy 1646181641Skmacy /* 1647181641Skmacy * Calculate pagetable page index 1648181641Skmacy */ 1649181641Skmacy ptepindex = va >> PDRSHIFT; 1650181641Skmacyretry: 1651181641Skmacy /* 1652181641Skmacy * Get the page directory entry 1653181641Skmacy */ 1654181641Skmacy ptema = pmap->pm_pdir[ptepindex]; 1655181641Skmacy 1656181641Skmacy /* 1657181641Skmacy * This supports switching from a 4MB page to a 1658181641Skmacy * normal 4K page. 1659181641Skmacy */ 1660181641Skmacy if (ptema & PG_PS) { 1661181641Skmacy /* 1662181641Skmacy * XXX 1663181641Skmacy */ 1664181641Skmacy pmap->pm_pdir[ptepindex] = 0; 1665181641Skmacy ptema = 0; 1666181641Skmacy pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 1667181641Skmacy pmap_invalidate_all(kernel_pmap); 1668181641Skmacy } 1669181641Skmacy 1670181641Skmacy /* 1671181641Skmacy * If the page table page is mapped, we just increment the 1672181641Skmacy * hold count, and activate it. 1673181641Skmacy */ 1674181641Skmacy if (ptema & PG_V) { 1675181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 1676181641Skmacy m->wire_count++; 1677181641Skmacy } else { 1678181641Skmacy /* 1679181641Skmacy * Here if the pte page isn't mapped, or if it has 1680181641Skmacy * been deallocated. 1681181641Skmacy */ 1682181641Skmacy CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x", 1683181641Skmacy pmap, va, flags); 1684181641Skmacy m = _pmap_allocpte(pmap, ptepindex, flags); 1685181641Skmacy if (m == NULL && (flags & M_WAITOK)) 1686181641Skmacy goto retry; 1687181641Skmacy 1688181641Skmacy KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex)); 1689181641Skmacy } 1690181641Skmacy return (m); 1691181641Skmacy} 1692181641Skmacy 1693181641Skmacy 1694181641Skmacy/*************************************************** 1695181641Skmacy* Pmap allocation/deallocation routines. 1696181641Skmacy ***************************************************/ 1697181641Skmacy 1698181641Skmacy#ifdef SMP 1699181641Skmacy/* 1700181641Skmacy * Deal with a SMP shootdown of other users of the pmap that we are 1701181641Skmacy * trying to dispose of. This can be a bit hairy. 1702181641Skmacy */ 1703181641Skmacystatic u_int *lazymask; 1704181641Skmacystatic u_int lazyptd; 1705181641Skmacystatic volatile u_int lazywait; 1706181641Skmacy 1707181641Skmacyvoid pmap_lazyfix_action(void); 1708181641Skmacy 1709181641Skmacyvoid 1710181641Skmacypmap_lazyfix_action(void) 1711181641Skmacy{ 1712181641Skmacy u_int mymask = PCPU_GET(cpumask); 1713181641Skmacy 1714181641Skmacy#ifdef COUNT_IPIS 1715181641Skmacy (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++; 1716181641Skmacy#endif 1717181641Skmacy if (rcr3() == lazyptd) 1718181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1719181641Skmacy atomic_clear_int(lazymask, mymask); 1720181641Skmacy atomic_store_rel_int(&lazywait, 1); 1721181641Skmacy} 1722181641Skmacy 1723181641Skmacystatic void 1724181641Skmacypmap_lazyfix_self(u_int mymask) 1725181641Skmacy{ 1726181641Skmacy 1727181641Skmacy if (rcr3() == lazyptd) 1728181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1729181641Skmacy atomic_clear_int(lazymask, mymask); 1730181641Skmacy} 1731181641Skmacy 1732181641Skmacy 1733181641Skmacystatic void 1734181641Skmacypmap_lazyfix(pmap_t pmap) 1735181641Skmacy{ 1736181641Skmacy u_int mymask; 1737181641Skmacy u_int mask; 1738181641Skmacy u_int spins; 1739181641Skmacy 1740181641Skmacy while ((mask = pmap->pm_active) != 0) { 1741181641Skmacy spins = 50000000; 1742181641Skmacy mask = mask & -mask; /* Find least significant set bit */ 1743181641Skmacy mtx_lock_spin(&smp_ipi_mtx); 1744181641Skmacy#ifdef PAE 1745181641Skmacy lazyptd = vtophys(pmap->pm_pdpt); 1746181641Skmacy#else 1747181641Skmacy lazyptd = vtophys(pmap->pm_pdir); 1748181641Skmacy#endif 1749181641Skmacy mymask = PCPU_GET(cpumask); 1750181641Skmacy if (mask == mymask) { 1751181641Skmacy lazymask = &pmap->pm_active; 1752181641Skmacy pmap_lazyfix_self(mymask); 1753181641Skmacy } else { 1754181641Skmacy atomic_store_rel_int((u_int *)&lazymask, 1755181641Skmacy (u_int)&pmap->pm_active); 1756181641Skmacy atomic_store_rel_int(&lazywait, 0); 1757181641Skmacy ipi_selected(mask, IPI_LAZYPMAP); 1758181641Skmacy while (lazywait == 0) { 1759181641Skmacy ia32_pause(); 1760181641Skmacy if (--spins == 0) 1761181641Skmacy break; 1762181641Skmacy } 1763181641Skmacy } 1764181641Skmacy mtx_unlock_spin(&smp_ipi_mtx); 1765181641Skmacy if (spins == 0) 1766181641Skmacy printf("pmap_lazyfix: spun for 50000000\n"); 1767181641Skmacy } 1768181641Skmacy} 1769181641Skmacy 1770181641Skmacy#else /* SMP */ 1771181641Skmacy 1772181641Skmacy/* 1773181641Skmacy * Cleaning up on uniprocessor is easy. For various reasons, we're 1774181641Skmacy * unlikely to have to even execute this code, including the fact 1775181641Skmacy * that the cleanup is deferred until the parent does a wait(2), which 1776181641Skmacy * means that another userland process has run. 1777181641Skmacy */ 1778181641Skmacystatic void 1779181641Skmacypmap_lazyfix(pmap_t pmap) 1780181641Skmacy{ 1781181641Skmacy u_int cr3; 1782181641Skmacy 1783181641Skmacy cr3 = vtophys(pmap->pm_pdir); 1784181641Skmacy if (cr3 == rcr3()) { 1785181641Skmacy load_cr3(PCPU_GET(curpcb)->pcb_cr3); 1786181641Skmacy pmap->pm_active &= ~(PCPU_GET(cpumask)); 1787181641Skmacy } 1788181641Skmacy} 1789181641Skmacy#endif /* SMP */ 1790181641Skmacy 1791181641Skmacy/* 1792181641Skmacy * Release any resources held by the given physical map. 1793181641Skmacy * Called when a pmap initialized by pmap_pinit is being released. 1794181641Skmacy * Should only be called if the map contains no valid mappings. 1795181641Skmacy */ 1796181641Skmacyvoid 1797181641Skmacypmap_release(pmap_t pmap) 1798181641Skmacy{ 1799181641Skmacy vm_page_t m, ptdpg[2*NPGPTD+1]; 1800181641Skmacy vm_paddr_t ma; 1801181641Skmacy int i; 1802181641Skmacy#ifdef XEN 1803181641Skmacy#ifdef PAE 1804181641Skmacy int npgptd = NPGPTD + 1; 1805181641Skmacy#else 1806181641Skmacy int npgptd = NPGPTD; 1807181641Skmacy#endif 1808181641Skmacy#else 1809181641Skmacy int npgptd = NPGPTD; 1810181641Skmacy#endif 1811181641Skmacy KASSERT(pmap->pm_stats.resident_count == 0, 1812181641Skmacy ("pmap_release: pmap resident count %ld != 0", 1813181641Skmacy pmap->pm_stats.resident_count)); 1814181641Skmacy PT_UPDATES_FLUSH(); 1815181641Skmacy 1816181641Skmacy pmap_lazyfix(pmap); 1817181641Skmacy mtx_lock_spin(&allpmaps_lock); 1818181641Skmacy LIST_REMOVE(pmap, pm_list); 1819181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1820181641Skmacy 1821181641Skmacy for (i = 0; i < NPGPTD; i++) 1822181641Skmacy ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME); 1823181641Skmacy pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1824181641Skmacy#if defined(PAE) && defined(XEN) 1825181641Skmacy ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt)); 1826181641Skmacy#endif 1827181641Skmacy 1828181641Skmacy for (i = 0; i < npgptd; i++) { 1829181641Skmacy m = ptdpg[i]; 1830181641Skmacy ma = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 1831181641Skmacy /* unpinning L1 and L2 treated the same */ 1832181641Skmacy xen_pgd_unpin(ma); 1833181641Skmacy#ifdef PAE 1834181641Skmacy KASSERT(xpmap_ptom(VM_PAGE_TO_PHYS(m)) == (pmap->pm_pdpt[i] & PG_FRAME), 1835181641Skmacy ("pmap_release: got wrong ptd page")); 1836181641Skmacy#endif 1837181641Skmacy m->wire_count--; 1838181641Skmacy atomic_subtract_int(&cnt.v_wire_count, 1); 1839181641Skmacy vm_page_free(m); 1840181641Skmacy } 1841181641Skmacy PMAP_LOCK_DESTROY(pmap); 1842181641Skmacy} 1843181641Skmacy 1844181641Skmacystatic int 1845181641Skmacykvm_size(SYSCTL_HANDLER_ARGS) 1846181641Skmacy{ 1847181641Skmacy unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; 1848181641Skmacy 1849181641Skmacy return sysctl_handle_long(oidp, &ksize, 0, req); 1850181641Skmacy} 1851181641SkmacySYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 1852181641Skmacy 0, 0, kvm_size, "IU", "Size of KVM"); 1853181641Skmacy 1854181641Skmacystatic int 1855181641Skmacykvm_free(SYSCTL_HANDLER_ARGS) 1856181641Skmacy{ 1857181641Skmacy unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; 1858181641Skmacy 1859181641Skmacy return sysctl_handle_long(oidp, &kfree, 0, req); 1860181641Skmacy} 1861181641SkmacySYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 1862181641Skmacy 0, 0, kvm_free, "IU", "Amount of KVM free"); 1863181641Skmacy 1864181641Skmacy/* 1865181641Skmacy * grow the number of kernel page table entries, if needed 1866181641Skmacy */ 1867181641Skmacyvoid 1868181641Skmacypmap_growkernel(vm_offset_t addr) 1869181641Skmacy{ 1870181641Skmacy struct pmap *pmap; 1871181641Skmacy vm_paddr_t ptppaddr; 1872181641Skmacy vm_page_t nkpg; 1873181641Skmacy pd_entry_t newpdir; 1874181641Skmacy 1875181641Skmacy mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1876181641Skmacy if (kernel_vm_end == 0) { 1877181641Skmacy kernel_vm_end = KERNBASE; 1878181641Skmacy nkpt = 0; 1879181641Skmacy while (pdir_pde(PTD, kernel_vm_end)) { 1880181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1881181641Skmacy nkpt++; 1882181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1883181641Skmacy kernel_vm_end = kernel_map->max_offset; 1884181641Skmacy break; 1885181641Skmacy } 1886181641Skmacy } 1887181641Skmacy } 1888181641Skmacy addr = roundup2(addr, PAGE_SIZE * NPTEPG); 1889181641Skmacy if (addr - 1 >= kernel_map->max_offset) 1890181641Skmacy addr = kernel_map->max_offset; 1891181641Skmacy while (kernel_vm_end < addr) { 1892181641Skmacy if (pdir_pde(PTD, kernel_vm_end)) { 1893181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1894181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1895181641Skmacy kernel_vm_end = kernel_map->max_offset; 1896181641Skmacy break; 1897181641Skmacy } 1898181641Skmacy continue; 1899181641Skmacy } 1900181641Skmacy 1901181641Skmacy /* 1902181641Skmacy * This index is bogus, but out of the way 1903181641Skmacy */ 1904181641Skmacy nkpg = vm_page_alloc(NULL, nkpt, 1905181641Skmacy VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); 1906181641Skmacy if (!nkpg) 1907181641Skmacy panic("pmap_growkernel: no memory to grow kernel"); 1908181641Skmacy 1909181641Skmacy nkpt++; 1910181641Skmacy 1911181641Skmacy pmap_zero_page(nkpg); 1912181641Skmacy ptppaddr = VM_PAGE_TO_PHYS(nkpg); 1913181641Skmacy newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 1914181946Skmacy vm_page_lock_queues(); 1915181641Skmacy PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1916181641Skmacy mtx_lock_spin(&allpmaps_lock); 1917181641Skmacy LIST_FOREACH(pmap, &allpmaps, pm_list) 1918181641Skmacy PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE); 1919181641Skmacy 1920181641Skmacy mtx_unlock_spin(&allpmaps_lock); 1921181946Skmacy vm_page_unlock_queues(); 1922181946Skmacy 1923181641Skmacy kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); 1924181641Skmacy if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1925181641Skmacy kernel_vm_end = kernel_map->max_offset; 1926181641Skmacy break; 1927181641Skmacy } 1928181641Skmacy } 1929181641Skmacy} 1930181641Skmacy 1931181641Skmacy 1932181641Skmacy/*************************************************** 1933181641Skmacy * page management routines. 1934181641Skmacy ***************************************************/ 1935181641Skmacy 1936181641SkmacyCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1937181641SkmacyCTASSERT(_NPCM == 11); 1938181641Skmacy 1939181641Skmacystatic __inline struct pv_chunk * 1940181641Skmacypv_to_chunk(pv_entry_t pv) 1941181641Skmacy{ 1942181641Skmacy 1943181641Skmacy return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK); 1944181641Skmacy} 1945181641Skmacy 1946181641Skmacy#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1947181641Skmacy 1948181641Skmacy#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1949181641Skmacy#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1950181641Skmacy 1951181641Skmacystatic uint32_t pc_freemask[11] = { 1952181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1953181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1954181641Skmacy PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1955181641Skmacy PC_FREE0_9, PC_FREE10 1956181641Skmacy}; 1957181641Skmacy 1958181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1959181641Skmacy "Current number of pv entries"); 1960181641Skmacy 1961181641Skmacy#ifdef PV_STATS 1962181641Skmacystatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1963181641Skmacy 1964181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1965181641Skmacy "Current number of pv entry chunks"); 1966181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1967181641Skmacy "Current number of pv entry chunks allocated"); 1968181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1969181641Skmacy "Current number of pv entry chunks frees"); 1970181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1971181641Skmacy "Number of times tried to get a chunk page but failed."); 1972181641Skmacy 1973181641Skmacystatic long pv_entry_frees, pv_entry_allocs; 1974181641Skmacystatic int pv_entry_spare; 1975181641Skmacy 1976181641SkmacySYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1977181641Skmacy "Current number of pv entry frees"); 1978181641SkmacySYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1979181641Skmacy "Current number of pv entry allocs"); 1980181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1981181641Skmacy "Current number of spare pv entries"); 1982181641Skmacy 1983181641Skmacystatic int pmap_collect_inactive, pmap_collect_active; 1984181641Skmacy 1985181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0, 1986181641Skmacy "Current number times pmap_collect called on inactive queue"); 1987181641SkmacySYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0, 1988181641Skmacy "Current number times pmap_collect called on active queue"); 1989181641Skmacy#endif 1990181641Skmacy 1991181641Skmacy/* 1992181641Skmacy * We are in a serious low memory condition. Resort to 1993181641Skmacy * drastic measures to free some pages so we can allocate 1994181641Skmacy * another pv entry chunk. This is normally called to 1995181641Skmacy * unmap inactive pages, and if necessary, active pages. 1996181641Skmacy */ 1997181641Skmacystatic void 1998181641Skmacypmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) 1999181641Skmacy{ 2000181641Skmacy pmap_t pmap; 2001181641Skmacy pt_entry_t *pte, tpte; 2002181641Skmacy pv_entry_t next_pv, pv; 2003181641Skmacy vm_offset_t va; 2004181641Skmacy vm_page_t m, free; 2005181641Skmacy 2006181641Skmacy sched_pin(); 2007181641Skmacy TAILQ_FOREACH(m, &vpq->pl, pageq) { 2008181641Skmacy if (m->hold_count || m->busy) 2009181641Skmacy continue; 2010181641Skmacy TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 2011181641Skmacy va = pv->pv_va; 2012181641Skmacy pmap = PV_PMAP(pv); 2013181641Skmacy /* Avoid deadlock and lock recursion. */ 2014181641Skmacy if (pmap > locked_pmap) 2015181641Skmacy PMAP_LOCK(pmap); 2016181641Skmacy else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 2017181641Skmacy continue; 2018181641Skmacy pmap->pm_stats.resident_count--; 2019181641Skmacy pte = pmap_pte_quick(pmap, va); 2020181641Skmacy tpte = pte_load_clear(pte); 2021181641Skmacy KASSERT((tpte & PG_W) == 0, 2022181641Skmacy ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); 2023181641Skmacy if (tpte & PG_A) 2024181641Skmacy vm_page_flag_set(m, PG_REFERENCED); 2025181641Skmacy if (tpte & PG_M) { 2026181641Skmacy KASSERT((tpte & PG_RW), 2027181641Skmacy ("pmap_collect: modified page not writable: va: %#x, pte: %#jx", 2028181641Skmacy va, (uintmax_t)tpte)); 2029181641Skmacy vm_page_dirty(m); 2030181641Skmacy } 2031181641Skmacy free = NULL; 2032181641Skmacy pmap_unuse_pt(pmap, va, &free); 2033181641Skmacy pmap_invalidate_page(pmap, va); 2034181641Skmacy pmap_free_zero_pages(free); 2035181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2036181641Skmacy if (TAILQ_EMPTY(&m->md.pv_list)) 2037181641Skmacy vm_page_flag_clear(m, PG_WRITEABLE); 2038181641Skmacy free_pv_entry(pmap, pv); 2039181641Skmacy if (pmap != locked_pmap) 2040181641Skmacy PMAP_UNLOCK(pmap); 2041181641Skmacy } 2042181641Skmacy } 2043181641Skmacy sched_unpin(); 2044181641Skmacy} 2045181641Skmacy 2046181641Skmacy 2047181641Skmacy/* 2048181641Skmacy * free the pv_entry back to the free list 2049181641Skmacy */ 2050181641Skmacystatic void 2051181641Skmacyfree_pv_entry(pmap_t pmap, pv_entry_t pv) 2052181641Skmacy{ 2053181641Skmacy vm_page_t m; 2054181641Skmacy struct pv_chunk *pc; 2055181641Skmacy int idx, field, bit; 2056181641Skmacy 2057181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2058181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2059181641Skmacy PV_STAT(pv_entry_frees++); 2060181641Skmacy PV_STAT(pv_entry_spare++); 2061181641Skmacy pv_entry_count--; 2062181641Skmacy pc = pv_to_chunk(pv); 2063181641Skmacy idx = pv - &pc->pc_pventry[0]; 2064181641Skmacy field = idx / 32; 2065181641Skmacy bit = idx % 32; 2066181641Skmacy pc->pc_map[field] |= 1ul << bit; 2067181641Skmacy /* move to head of list */ 2068181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2069181641Skmacy TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2070181641Skmacy for (idx = 0; idx < _NPCM; idx++) 2071181641Skmacy if (pc->pc_map[idx] != pc_freemask[idx]) 2072181641Skmacy return; 2073181641Skmacy PV_STAT(pv_entry_spare -= _NPCPV); 2074181641Skmacy PV_STAT(pc_chunk_count--); 2075181641Skmacy PV_STAT(pc_chunk_frees++); 2076181641Skmacy /* entire chunk is free, return it */ 2077181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2078181641Skmacy m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2079181641Skmacy pmap_qremove((vm_offset_t)pc, 1); 2080181641Skmacy vm_page_unwire(m, 0); 2081181641Skmacy vm_page_free(m); 2082181641Skmacy pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 2083181641Skmacy} 2084181641Skmacy 2085181641Skmacy/* 2086181641Skmacy * get a new pv_entry, allocating a block from the system 2087181641Skmacy * when needed. 2088181641Skmacy */ 2089181641Skmacystatic pv_entry_t 2090181641Skmacyget_pv_entry(pmap_t pmap, int try) 2091181641Skmacy{ 2092181641Skmacy static const struct timeval printinterval = { 60, 0 }; 2093181641Skmacy static struct timeval lastprint; 2094181641Skmacy static vm_pindex_t colour; 2095181641Skmacy struct vpgqueues *pq; 2096181641Skmacy int bit, field; 2097181641Skmacy pv_entry_t pv; 2098181641Skmacy struct pv_chunk *pc; 2099181641Skmacy vm_page_t m; 2100181641Skmacy 2101181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2102181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2103181641Skmacy PV_STAT(pv_entry_allocs++); 2104181641Skmacy pv_entry_count++; 2105181641Skmacy if (pv_entry_count > pv_entry_high_water) 2106181641Skmacy if (ratecheck(&lastprint, &printinterval)) 2107181641Skmacy printf("Approaching the limit on PV entries, consider " 2108181641Skmacy "increasing either the vm.pmap.shpgperproc or the " 2109181641Skmacy "vm.pmap.pv_entry_max tunable.\n"); 2110181641Skmacy pq = NULL; 2111181641Skmacyretry: 2112181641Skmacy pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2113181641Skmacy if (pc != NULL) { 2114181641Skmacy for (field = 0; field < _NPCM; field++) { 2115181641Skmacy if (pc->pc_map[field]) { 2116181641Skmacy bit = bsfl(pc->pc_map[field]); 2117181641Skmacy break; 2118181641Skmacy } 2119181641Skmacy } 2120181641Skmacy if (field < _NPCM) { 2121181641Skmacy pv = &pc->pc_pventry[field * 32 + bit]; 2122181641Skmacy pc->pc_map[field] &= ~(1ul << bit); 2123181641Skmacy /* If this was the last item, move it to tail */ 2124181641Skmacy for (field = 0; field < _NPCM; field++) 2125181641Skmacy if (pc->pc_map[field] != 0) { 2126181641Skmacy PV_STAT(pv_entry_spare--); 2127181641Skmacy return (pv); /* not full, return */ 2128181641Skmacy } 2129181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2130181641Skmacy TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 2131181641Skmacy PV_STAT(pv_entry_spare--); 2132181641Skmacy return (pv); 2133181641Skmacy } 2134181641Skmacy } 2135181641Skmacy /* 2136181641Skmacy * Access to the ptelist "pv_vafree" is synchronized by the page 2137181641Skmacy * queues lock. If "pv_vafree" is currently non-empty, it will 2138181641Skmacy * remain non-empty until pmap_ptelist_alloc() completes. 2139181641Skmacy */ 2140181641Skmacy if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq == 2141181641Skmacy &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | 2142181641Skmacy VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 2143181641Skmacy if (try) { 2144181641Skmacy pv_entry_count--; 2145181641Skmacy PV_STAT(pc_chunk_tryfail++); 2146181641Skmacy return (NULL); 2147181641Skmacy } 2148181641Skmacy /* 2149181641Skmacy * Reclaim pv entries: At first, destroy mappings to 2150181641Skmacy * inactive pages. After that, if a pv chunk entry 2151181641Skmacy * is still needed, destroy mappings to active pages. 2152181641Skmacy */ 2153181641Skmacy if (pq == NULL) { 2154181641Skmacy PV_STAT(pmap_collect_inactive++); 2155181641Skmacy pq = &vm_page_queues[PQ_INACTIVE]; 2156181641Skmacy } else if (pq == &vm_page_queues[PQ_INACTIVE]) { 2157181641Skmacy PV_STAT(pmap_collect_active++); 2158181641Skmacy pq = &vm_page_queues[PQ_ACTIVE]; 2159181641Skmacy } else 2160181641Skmacy panic("get_pv_entry: increase vm.pmap.shpgperproc"); 2161181641Skmacy pmap_collect(pmap, pq); 2162181641Skmacy goto retry; 2163181641Skmacy } 2164181641Skmacy PV_STAT(pc_chunk_count++); 2165181641Skmacy PV_STAT(pc_chunk_allocs++); 2166181641Skmacy colour++; 2167181641Skmacy pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); 2168181641Skmacy pmap_qenter((vm_offset_t)pc, &m, 1); 2169181641Skmacy if ((m->flags & PG_ZERO) == 0) 2170181641Skmacy pagezero(pc); 2171181641Skmacy pc->pc_pmap = pmap; 2172181641Skmacy pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 2173181641Skmacy for (field = 1; field < _NPCM; field++) 2174181641Skmacy pc->pc_map[field] = pc_freemask[field]; 2175181641Skmacy pv = &pc->pc_pventry[0]; 2176181641Skmacy TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 2177181641Skmacy PV_STAT(pv_entry_spare += _NPCPV - 1); 2178181641Skmacy return (pv); 2179181641Skmacy} 2180181641Skmacy 2181181641Skmacystatic void 2182181641Skmacypmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 2183181641Skmacy{ 2184181641Skmacy pv_entry_t pv; 2185181641Skmacy 2186181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2187181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2188181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2189181641Skmacy if (pmap == PV_PMAP(pv) && va == pv->pv_va) 2190181641Skmacy break; 2191181641Skmacy } 2192181641Skmacy KASSERT(pv != NULL, ("pmap_remove_entry: pv not found")); 2193181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2194181641Skmacy if (TAILQ_EMPTY(&m->md.pv_list)) 2195181641Skmacy vm_page_flag_clear(m, PG_WRITEABLE); 2196181641Skmacy free_pv_entry(pmap, pv); 2197181641Skmacy} 2198181641Skmacy 2199181641Skmacy/* 2200181641Skmacy * Create a pv entry for page at pa for 2201181641Skmacy * (pmap, va). 2202181641Skmacy */ 2203181641Skmacystatic void 2204181641Skmacypmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2205181641Skmacy{ 2206181641Skmacy pv_entry_t pv; 2207181641Skmacy 2208181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2209181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2210181641Skmacy pv = get_pv_entry(pmap, FALSE); 2211181641Skmacy pv->pv_va = va; 2212181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2213181641Skmacy} 2214181641Skmacy 2215181641Skmacy/* 2216181641Skmacy * Conditionally create a pv entry. 2217181641Skmacy */ 2218181641Skmacystatic boolean_t 2219181641Skmacypmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 2220181641Skmacy{ 2221181641Skmacy pv_entry_t pv; 2222181641Skmacy 2223181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2224181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2225181641Skmacy if (pv_entry_count < pv_entry_high_water && 2226181641Skmacy (pv = get_pv_entry(pmap, TRUE)) != NULL) { 2227181641Skmacy pv->pv_va = va; 2228181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2229181641Skmacy return (TRUE); 2230181641Skmacy } else 2231181641Skmacy return (FALSE); 2232181641Skmacy} 2233181641Skmacy 2234181641Skmacy/* 2235181641Skmacy * pmap_remove_pte: do the things to unmap a page in a process 2236181641Skmacy */ 2237181641Skmacystatic int 2238181641Skmacypmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) 2239181641Skmacy{ 2240181641Skmacy pt_entry_t oldpte; 2241181641Skmacy vm_page_t m; 2242181641Skmacy 2243181641Skmacy CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x", 2244181641Skmacy pmap, (u_long)*ptq, va); 2245181641Skmacy 2246181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2247181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2248181641Skmacy oldpte = *ptq; 2249181641Skmacy PT_SET_VA_MA(ptq, 0, TRUE); 2250181641Skmacy if (oldpte & PG_W) 2251181641Skmacy pmap->pm_stats.wired_count -= 1; 2252181641Skmacy /* 2253181641Skmacy * Machines that don't support invlpg, also don't support 2254181641Skmacy * PG_G. 2255181641Skmacy */ 2256181641Skmacy if (oldpte & PG_G) 2257181641Skmacy pmap_invalidate_page(kernel_pmap, va); 2258181641Skmacy pmap->pm_stats.resident_count -= 1; 2259181641Skmacy /* 2260181641Skmacy * XXX This is not strictly correctly, but somewhere along the line 2261181641Skmacy * we are losing the managed bit on some pages. It is unclear to me 2262181641Skmacy * why, but I think the most likely explanation is that xen's writable 2263181641Skmacy * page table implementation doesn't respect the unused bits. 2264181641Skmacy */ 2265181641Skmacy if ((oldpte & PG_MANAGED) || ((oldpte & PG_V) && (va < VM_MAXUSER_ADDRESS)) 2266181641Skmacy ) { 2267181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME); 2268181641Skmacy 2269181641Skmacy if (!(oldpte & PG_MANAGED)) 2270181641Skmacy printf("va=0x%x is unmanaged :-( pte=0x%llx\n", va, oldpte); 2271181641Skmacy 2272181641Skmacy if (oldpte & PG_M) { 2273181641Skmacy KASSERT((oldpte & PG_RW), 2274181641Skmacy ("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx", 2275181641Skmacy va, (uintmax_t)oldpte)); 2276181641Skmacy vm_page_dirty(m); 2277181641Skmacy } 2278181641Skmacy if (oldpte & PG_A) 2279181641Skmacy vm_page_flag_set(m, PG_REFERENCED); 2280181641Skmacy pmap_remove_entry(pmap, m, va); 2281181641Skmacy } else if ((va < VM_MAXUSER_ADDRESS) && (oldpte & PG_V)) 2282181641Skmacy printf("va=0x%x is unmanaged :-( pte=0x%llx\n", va, oldpte); 2283181641Skmacy 2284181641Skmacy return (pmap_unuse_pt(pmap, va, free)); 2285181641Skmacy} 2286181641Skmacy 2287181641Skmacy/* 2288181641Skmacy * Remove a single page from a process address space 2289181641Skmacy */ 2290181641Skmacystatic void 2291181641Skmacypmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free) 2292181641Skmacy{ 2293181641Skmacy pt_entry_t *pte; 2294181641Skmacy 2295181641Skmacy CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x", 2296181641Skmacy pmap, va); 2297181641Skmacy 2298181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2299181641Skmacy KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 2300181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2301181641Skmacy if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0) 2302181641Skmacy return; 2303181641Skmacy pmap_remove_pte(pmap, pte, va, free); 2304181641Skmacy pmap_invalidate_page(pmap, va); 2305181641Skmacy if (*PMAP1) 2306181641Skmacy PT_SET_MA(PADDR1, 0); 2307181641Skmacy 2308181641Skmacy} 2309181641Skmacy 2310181641Skmacy/* 2311181641Skmacy * Remove the given range of addresses from the specified map. 2312181641Skmacy * 2313181641Skmacy * It is assumed that the start and end are properly 2314181641Skmacy * rounded to the page size. 2315181641Skmacy */ 2316181641Skmacyvoid 2317181641Skmacypmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2318181641Skmacy{ 2319181641Skmacy vm_offset_t pdnxt; 2320181641Skmacy pd_entry_t ptpaddr; 2321181641Skmacy pt_entry_t *pte; 2322181641Skmacy vm_page_t free = NULL; 2323181641Skmacy int anyvalid; 2324181641Skmacy 2325181641Skmacy CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x", 2326181641Skmacy pmap, sva, eva); 2327181641Skmacy 2328181641Skmacy /* 2329181641Skmacy * Perform an unsynchronized read. This is, however, safe. 2330181641Skmacy */ 2331181641Skmacy if (pmap->pm_stats.resident_count == 0) 2332181641Skmacy return; 2333181641Skmacy 2334181641Skmacy anyvalid = 0; 2335181641Skmacy 2336181641Skmacy vm_page_lock_queues(); 2337181641Skmacy sched_pin(); 2338181641Skmacy PMAP_LOCK(pmap); 2339181641Skmacy 2340181641Skmacy /* 2341181641Skmacy * special handling of removing one page. a very 2342181641Skmacy * common operation and easy to short circuit some 2343181641Skmacy * code. 2344181641Skmacy */ 2345181641Skmacy if ((sva + PAGE_SIZE == eva) && 2346181641Skmacy ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { 2347181641Skmacy pmap_remove_page(pmap, sva, &free); 2348181641Skmacy goto out; 2349181641Skmacy } 2350181641Skmacy 2351181641Skmacy for (; sva < eva; sva = pdnxt) { 2352181641Skmacy unsigned pdirindex; 2353181641Skmacy 2354181641Skmacy /* 2355181641Skmacy * Calculate index for next page table. 2356181641Skmacy */ 2357181641Skmacy pdnxt = (sva + NBPDR) & ~PDRMASK; 2358181641Skmacy if (pmap->pm_stats.resident_count == 0) 2359181641Skmacy break; 2360181641Skmacy 2361181641Skmacy pdirindex = sva >> PDRSHIFT; 2362181641Skmacy ptpaddr = pmap->pm_pdir[pdirindex]; 2363181641Skmacy 2364181641Skmacy /* 2365181641Skmacy * Weed out invalid mappings. Note: we assume that the page 2366181641Skmacy * directory table is always allocated, and in kernel virtual. 2367181641Skmacy */ 2368181641Skmacy if (ptpaddr == 0) 2369181641Skmacy continue; 2370181641Skmacy 2371181641Skmacy /* 2372181641Skmacy * Check for large page. 2373181641Skmacy */ 2374181641Skmacy if ((ptpaddr & PG_PS) != 0) { 2375181641Skmacy PD_CLEAR_VA(pmap, pdirindex, TRUE); 2376181641Skmacy pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2377181641Skmacy anyvalid = 1; 2378181641Skmacy continue; 2379181641Skmacy } 2380181641Skmacy 2381181641Skmacy /* 2382181641Skmacy * Limit our scan to either the end of the va represented 2383181641Skmacy * by the current page table page, or to the end of the 2384181641Skmacy * range being removed. 2385181641Skmacy */ 2386181641Skmacy if (pdnxt > eva) 2387181641Skmacy pdnxt = eva; 2388181641Skmacy 2389181641Skmacy for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2390181641Skmacy sva += PAGE_SIZE) { 2391181641Skmacy if ((*pte & PG_V) == 0) 2392181641Skmacy continue; 2393181641Skmacy 2394181641Skmacy /* 2395181641Skmacy * The TLB entry for a PG_G mapping is invalidated 2396181641Skmacy * by pmap_remove_pte(). 2397181641Skmacy */ 2398181641Skmacy if ((*pte & PG_G) == 0) 2399181641Skmacy anyvalid = 1; 2400181641Skmacy if (pmap_remove_pte(pmap, pte, sva, &free)) 2401181641Skmacy break; 2402181641Skmacy } 2403181641Skmacy } 2404181641Skmacy PT_UPDATES_FLUSH(); 2405181641Skmacy if (*PMAP1) 2406181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2407181641Skmacyout: 2408181641Skmacy if (anyvalid) 2409181641Skmacy pmap_invalidate_all(pmap); 2410181641Skmacy sched_unpin(); 2411181641Skmacy vm_page_unlock_queues(); 2412181641Skmacy PMAP_UNLOCK(pmap); 2413181641Skmacy pmap_free_zero_pages(free); 2414181641Skmacy} 2415181641Skmacy 2416181641Skmacy/* 2417181641Skmacy * Routine: pmap_remove_all 2418181641Skmacy * Function: 2419181641Skmacy * Removes this physical page from 2420181641Skmacy * all physical maps in which it resides. 2421181641Skmacy * Reflects back modify bits to the pager. 2422181641Skmacy * 2423181641Skmacy * Notes: 2424181641Skmacy * Original versions of this routine were very 2425181641Skmacy * inefficient because they iteratively called 2426181641Skmacy * pmap_remove (slow...) 2427181641Skmacy */ 2428181641Skmacy 2429181641Skmacyvoid 2430181641Skmacypmap_remove_all(vm_page_t m) 2431181641Skmacy{ 2432181641Skmacy pv_entry_t pv; 2433181641Skmacy pmap_t pmap; 2434181641Skmacy pt_entry_t *pte, tpte; 2435181641Skmacy vm_page_t free; 2436181641Skmacy 2437181641Skmacy#if defined(PMAP_DIAGNOSTIC) 2438181641Skmacy /* 2439181641Skmacy * XXX This makes pmap_remove_all() illegal for non-managed pages! 2440181641Skmacy */ 2441181641Skmacy if (m->flags & PG_FICTITIOUS) { 2442181641Skmacy panic("pmap_remove_all: illegal for unmanaged page, va: 0x%jx", 2443181641Skmacy VM_PAGE_TO_PHYS(m) & 0xffffffff); 2444181641Skmacy } 2445181641Skmacy#endif 2446181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2447181641Skmacy sched_pin(); 2448181641Skmacy while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 2449181641Skmacy pmap = PV_PMAP(pv); 2450181641Skmacy PMAP_LOCK(pmap); 2451181641Skmacy pmap->pm_stats.resident_count--; 2452181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 2453181641Skmacy 2454181641Skmacy tpte = *pte; 2455181641Skmacy PT_SET_VA_MA(pte, 0, TRUE); 2456181641Skmacy if (tpte & PG_W) 2457181641Skmacy pmap->pm_stats.wired_count--; 2458181641Skmacy if (tpte & PG_A) 2459181641Skmacy vm_page_flag_set(m, PG_REFERENCED); 2460181641Skmacy 2461181641Skmacy /* 2462181641Skmacy * Update the vm_page_t clean and reference bits. 2463181641Skmacy */ 2464181641Skmacy if (tpte & PG_M) { 2465181641Skmacy KASSERT((tpte & PG_RW), 2466181641Skmacy ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx", 2467181641Skmacy pv->pv_va, (uintmax_t)tpte)); 2468181641Skmacy vm_page_dirty(m); 2469181641Skmacy } 2470181641Skmacy free = NULL; 2471181641Skmacy pmap_unuse_pt(pmap, pv->pv_va, &free); 2472181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 2473181641Skmacy pmap_free_zero_pages(free); 2474181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2475181641Skmacy free_pv_entry(pmap, pv); 2476181641Skmacy PMAP_UNLOCK(pmap); 2477181641Skmacy } 2478181641Skmacy vm_page_flag_clear(m, PG_WRITEABLE); 2479181641Skmacy PT_UPDATES_FLUSH(); 2480181641Skmacy if (*PMAP1) 2481181641Skmacy PT_SET_MA(PADDR1, 0); 2482181641Skmacy sched_unpin(); 2483181641Skmacy} 2484181641Skmacy 2485181641Skmacy/* 2486181641Skmacy * Set the physical protection on the 2487181641Skmacy * specified range of this map as requested. 2488181641Skmacy */ 2489181641Skmacyvoid 2490181641Skmacypmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 2491181641Skmacy{ 2492181641Skmacy vm_offset_t pdnxt; 2493181641Skmacy pd_entry_t ptpaddr; 2494181641Skmacy pt_entry_t *pte; 2495181641Skmacy int anychanged; 2496181641Skmacy 2497181641Skmacy CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x", 2498181641Skmacy pmap, sva, eva, prot); 2499181641Skmacy 2500181641Skmacy if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2501181641Skmacy pmap_remove(pmap, sva, eva); 2502181641Skmacy return; 2503181641Skmacy } 2504181641Skmacy 2505181641Skmacy#ifdef PAE 2506181641Skmacy if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 2507181641Skmacy (VM_PROT_WRITE|VM_PROT_EXECUTE)) 2508181641Skmacy return; 2509181641Skmacy#else 2510181641Skmacy if (prot & VM_PROT_WRITE) 2511181641Skmacy return; 2512181641Skmacy#endif 2513181641Skmacy 2514181641Skmacy anychanged = 0; 2515181641Skmacy 2516181641Skmacy vm_page_lock_queues(); 2517181641Skmacy sched_pin(); 2518181641Skmacy PMAP_LOCK(pmap); 2519181641Skmacy for (; sva < eva; sva = pdnxt) { 2520181641Skmacy pt_entry_t obits, pbits; 2521181641Skmacy unsigned pdirindex; 2522181641Skmacy 2523181641Skmacy pdnxt = (sva + NBPDR) & ~PDRMASK; 2524181641Skmacy 2525181641Skmacy pdirindex = sva >> PDRSHIFT; 2526181641Skmacy ptpaddr = pmap->pm_pdir[pdirindex]; 2527181641Skmacy 2528181641Skmacy /* 2529181641Skmacy * Weed out invalid mappings. Note: we assume that the page 2530181641Skmacy * directory table is always allocated, and in kernel virtual. 2531181641Skmacy */ 2532181641Skmacy if (ptpaddr == 0) 2533181641Skmacy continue; 2534181641Skmacy 2535181641Skmacy /* 2536181641Skmacy * Check for large page. 2537181641Skmacy */ 2538181641Skmacy if ((ptpaddr & PG_PS) != 0) { 2539181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2540181641Skmacy pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW); 2541181641Skmacy#ifdef PAE 2542181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2543181641Skmacy pmap->pm_pdir[pdirindex] |= pg_nx; 2544181641Skmacy#endif 2545181641Skmacy anychanged = 1; 2546181641Skmacy continue; 2547181641Skmacy } 2548181641Skmacy 2549181641Skmacy if (pdnxt > eva) 2550181641Skmacy pdnxt = eva; 2551181641Skmacy 2552181641Skmacy for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, 2553181641Skmacy sva += PAGE_SIZE) { 2554181641Skmacy vm_page_t m; 2555181641Skmacy 2556181641Skmacyretry: 2557181641Skmacy /* 2558181641Skmacy * Regardless of whether a pte is 32 or 64 bits in 2559181641Skmacy * size, PG_RW, PG_A, and PG_M are among the least 2560181641Skmacy * significant 32 bits. 2561181641Skmacy */ 2562181641Skmacy obits = pbits = *pte; 2563181641Skmacy if ((pbits & PG_V) == 0) 2564181641Skmacy continue; 2565181641Skmacy if (pbits & PG_MANAGED) { 2566181641Skmacy m = NULL; 2567181641Skmacy if (pbits & PG_A) { 2568181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME); 2569181641Skmacy vm_page_flag_set(m, PG_REFERENCED); 2570181641Skmacy pbits &= ~PG_A; 2571181641Skmacy } 2572181641Skmacy if ((pbits & PG_M) != 0) { 2573181641Skmacy if (m == NULL) 2574181641Skmacy m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME); 2575181641Skmacy vm_page_dirty(m); 2576181641Skmacy } 2577181641Skmacy } 2578181641Skmacy 2579181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2580181641Skmacy pbits &= ~(PG_RW | PG_M); 2581181641Skmacy#ifdef PAE 2582181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2583181641Skmacy pbits |= pg_nx; 2584181641Skmacy#endif 2585181641Skmacy 2586181641Skmacy if (pbits != obits) { 2587181641Skmacy#ifdef XEN 2588181641Skmacy obits = *pte; 2589181641Skmacy PT_SET_VA_MA(pte, pbits, TRUE); 2590181641Skmacy if (*pte != pbits) 2591181641Skmacy goto retry; 2592181641Skmacy#else 2593181641Skmacy#ifdef PAE 2594181641Skmacy if (!atomic_cmpset_64(pte, obits, pbits)) 2595181641Skmacy goto retry; 2596181641Skmacy#else 2597181641Skmacy if (!atomic_cmpset_int((u_int *)pte, obits, 2598181641Skmacy pbits)) 2599181641Skmacy goto retry; 2600181641Skmacy#endif 2601181641Skmacy#endif 2602181641Skmacy if (obits & PG_G) 2603181641Skmacy pmap_invalidate_page(pmap, sva); 2604181641Skmacy else 2605181641Skmacy anychanged = 1; 2606181641Skmacy } 2607181641Skmacy } 2608181641Skmacy } 2609181641Skmacy PT_UPDATES_FLUSH(); 2610181641Skmacy if (*PMAP1) 2611181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2612181641Skmacy if (anychanged) 2613181641Skmacy pmap_invalidate_all(pmap); 2614181641Skmacy sched_unpin(); 2615181641Skmacy vm_page_unlock_queues(); 2616181641Skmacy PMAP_UNLOCK(pmap); 2617181641Skmacy} 2618181641Skmacy 2619181641Skmacy/* 2620181641Skmacy * Insert the given physical page (p) at 2621181641Skmacy * the specified virtual address (v) in the 2622181641Skmacy * target physical map with the protection requested. 2623181641Skmacy * 2624181641Skmacy * If specified, the page will be wired down, meaning 2625181641Skmacy * that the related pte can not be reclaimed. 2626181641Skmacy * 2627181641Skmacy * NB: This is the only routine which MAY NOT lazy-evaluate 2628181641Skmacy * or lose information. That is, this routine must actually 2629181641Skmacy * insert this page into the given map NOW. 2630181641Skmacy */ 2631181641Skmacyvoid 2632181641Skmacypmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2633181641Skmacy vm_prot_t prot, boolean_t wired) 2634181641Skmacy{ 2635181641Skmacy vm_paddr_t pa; 2636181641Skmacy pd_entry_t *pde; 2637181641Skmacy pt_entry_t *pte; 2638181641Skmacy vm_paddr_t opa; 2639181641Skmacy pt_entry_t origpte, newpte; 2640181641Skmacy vm_page_t mpte, om; 2641181641Skmacy boolean_t invlva; 2642181641Skmacy 2643181641Skmacy CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d", 2644181641Skmacy pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired); 2645181641Skmacy va = trunc_page(va); 2646181641Skmacy#ifdef PMAP_DIAGNOSTIC 2647181641Skmacy if (va > VM_MAX_KERNEL_ADDRESS) 2648181641Skmacy panic("pmap_enter: toobig"); 2649181641Skmacy if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) 2650181641Skmacy panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va); 2651181641Skmacy#endif 2652181641Skmacy 2653181641Skmacy mpte = NULL; 2654181641Skmacy 2655181641Skmacy vm_page_lock_queues(); 2656181641Skmacy PMAP_LOCK(pmap); 2657181641Skmacy sched_pin(); 2658181641Skmacy 2659181641Skmacy /* 2660181641Skmacy * In the case that a page table page is not 2661181641Skmacy * resident, we are creating it here. 2662181641Skmacy */ 2663181641Skmacy if (va < VM_MAXUSER_ADDRESS) { 2664181641Skmacy mpte = pmap_allocpte(pmap, va, M_WAITOK); 2665181641Skmacy } 2666181641Skmacy#if 0 && defined(PMAP_DIAGNOSTIC) 2667181641Skmacy else { 2668181641Skmacy pd_entry_t *pdeaddr = pmap_pde(pmap, va); 2669181641Skmacy origpte = *pdeaddr; 2670181641Skmacy if ((origpte & PG_V) == 0) { 2671181641Skmacy panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n", 2672181641Skmacy pmap->pm_pdir[PTDPTDI], origpte, va); 2673181641Skmacy } 2674181641Skmacy } 2675181641Skmacy#endif 2676181641Skmacy 2677181641Skmacy pde = pmap_pde(pmap, va); 2678181641Skmacy if ((*pde & PG_PS) != 0) 2679181641Skmacy panic("pmap_enter: attempted pmap_enter on 4MB page"); 2680181641Skmacy pte = pmap_pte_quick(pmap, va); 2681181641Skmacy 2682181641Skmacy /* 2683181641Skmacy * Page Directory table entry not valid, we need a new PT page 2684181641Skmacy */ 2685181641Skmacy if (pte == NULL) { 2686181641Skmacy panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n", 2687181641Skmacy (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va); 2688181641Skmacy } 2689181641Skmacy 2690181641Skmacy pa = VM_PAGE_TO_PHYS(m); 2691181641Skmacy om = NULL; 2692181641Skmacy opa = origpte = 0; 2693181641Skmacy 2694181641Skmacy#if 0 2695181641Skmacy KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx", 2696181641Skmacy pte, *pte)); 2697181641Skmacy#endif 2698181641Skmacy origpte = *pte; 2699181641Skmacy if (origpte) 2700181641Skmacy origpte = xpmap_mtop(origpte); 2701181641Skmacy opa = origpte & PG_FRAME; 2702181641Skmacy 2703181641Skmacy /* 2704181641Skmacy * Mapping has not changed, must be protection or wiring change. 2705181641Skmacy */ 2706181641Skmacy if (origpte && (opa == pa)) { 2707181641Skmacy /* 2708181641Skmacy * Wiring change, just update stats. We don't worry about 2709181641Skmacy * wiring PT pages as they remain resident as long as there 2710181641Skmacy * are valid mappings in them. Hence, if a user page is wired, 2711181641Skmacy * the PT page will be also. 2712181641Skmacy */ 2713181641Skmacy if (wired && ((origpte & PG_W) == 0)) 2714181641Skmacy pmap->pm_stats.wired_count++; 2715181641Skmacy else if (!wired && (origpte & PG_W)) 2716181641Skmacy pmap->pm_stats.wired_count--; 2717181641Skmacy 2718181641Skmacy /* 2719181641Skmacy * Remove extra pte reference 2720181641Skmacy */ 2721181641Skmacy if (mpte) 2722181641Skmacy mpte->wire_count--; 2723181641Skmacy 2724181641Skmacy /* 2725181641Skmacy * We might be turning off write access to the page, 2726181641Skmacy * so we go ahead and sense modify status. 2727181641Skmacy */ 2728181641Skmacy if (origpte & PG_MANAGED) { 2729181641Skmacy om = m; 2730181641Skmacy pa |= PG_MANAGED; 2731181641Skmacy } 2732181641Skmacy goto validate; 2733181641Skmacy } 2734181641Skmacy /* 2735181641Skmacy * Mapping has changed, invalidate old range and fall through to 2736181641Skmacy * handle validating new mapping. 2737181641Skmacy */ 2738181641Skmacy if (opa) { 2739181641Skmacy if (origpte & PG_W) 2740181641Skmacy pmap->pm_stats.wired_count--; 2741181641Skmacy if (origpte & PG_MANAGED) { 2742181641Skmacy om = PHYS_TO_VM_PAGE(opa); 2743181641Skmacy pmap_remove_entry(pmap, om, va); 2744181641Skmacy } else if (va < VM_MAXUSER_ADDRESS) 2745181641Skmacy printf("va=0x%x is unmanaged :-( \n", va); 2746181641Skmacy 2747181641Skmacy if (mpte != NULL) { 2748181641Skmacy mpte->wire_count--; 2749181641Skmacy KASSERT(mpte->wire_count > 0, 2750181641Skmacy ("pmap_enter: missing reference to page table page," 2751181641Skmacy " va: 0x%x", va)); 2752181641Skmacy } 2753181641Skmacy } else 2754181641Skmacy pmap->pm_stats.resident_count++; 2755181641Skmacy 2756181641Skmacy /* 2757181641Skmacy * Enter on the PV list if part of our managed memory. 2758181641Skmacy */ 2759181641Skmacy if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 2760181641Skmacy KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2761181641Skmacy ("pmap_enter: managed mapping within the clean submap")); 2762181641Skmacy pmap_insert_entry(pmap, va, m); 2763181641Skmacy pa |= PG_MANAGED; 2764181641Skmacy } 2765181641Skmacy 2766181641Skmacy /* 2767181641Skmacy * Increment counters 2768181641Skmacy */ 2769181641Skmacy if (wired) 2770181641Skmacy pmap->pm_stats.wired_count++; 2771181641Skmacy 2772181641Skmacyvalidate: 2773181641Skmacy /* 2774181641Skmacy * Now validate mapping with desired protection/wiring. 2775181641Skmacy */ 2776181641Skmacy newpte = (pt_entry_t)(pa | PG_V); 2777181641Skmacy if ((prot & VM_PROT_WRITE) != 0) { 2778181641Skmacy newpte |= PG_RW; 2779181641Skmacy vm_page_flag_set(m, PG_WRITEABLE); 2780181641Skmacy } 2781181641Skmacy#ifdef PAE 2782181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 2783181641Skmacy newpte |= pg_nx; 2784181641Skmacy#endif 2785181641Skmacy if (wired) 2786181641Skmacy newpte |= PG_W; 2787181641Skmacy if (va < VM_MAXUSER_ADDRESS) 2788181641Skmacy newpte |= PG_U; 2789181641Skmacy if (pmap == kernel_pmap) 2790181641Skmacy newpte |= pgeflag; 2791181641Skmacy 2792181641Skmacy critical_enter(); 2793181641Skmacy /* 2794181641Skmacy * if the mapping or permission bits are different, we need 2795181641Skmacy * to update the pte. 2796181641Skmacy */ 2797181641Skmacy if ((origpte & ~(PG_M|PG_A)) != newpte) { 2798181641Skmacy if (origpte) { 2799181641Skmacy invlva = FALSE; 2800181641Skmacy origpte = *pte; 2801181641Skmacy PT_SET_VA(pte, newpte | PG_A, FALSE); 2802181641Skmacy if (origpte & PG_A) { 2803181641Skmacy if (origpte & PG_MANAGED) 2804181641Skmacy vm_page_flag_set(om, PG_REFERENCED); 2805181641Skmacy if (opa != VM_PAGE_TO_PHYS(m)) 2806181641Skmacy invlva = TRUE; 2807181641Skmacy#ifdef PAE 2808181641Skmacy if ((origpte & PG_NX) == 0 && 2809181641Skmacy (newpte & PG_NX) != 0) 2810181641Skmacy invlva = TRUE; 2811181641Skmacy#endif 2812181641Skmacy } 2813181641Skmacy if (origpte & PG_M) { 2814181641Skmacy KASSERT((origpte & PG_RW), 2815181641Skmacy ("pmap_enter: modified page not writable: va: %#x, pte: %#jx", 2816181641Skmacy va, (uintmax_t)origpte)); 2817181641Skmacy if ((origpte & PG_MANAGED) != 0) 2818181641Skmacy vm_page_dirty(om); 2819181641Skmacy if ((prot & VM_PROT_WRITE) == 0) 2820181641Skmacy invlva = TRUE; 2821181641Skmacy } 2822181641Skmacy if (invlva) 2823181641Skmacy pmap_invalidate_page(pmap, va); 2824181641Skmacy } else{ 2825181641Skmacy PT_SET_VA(pte, newpte | PG_A, FALSE); 2826181641Skmacy } 2827181641Skmacy 2828181641Skmacy } 2829181641Skmacy PT_UPDATES_FLUSH(); 2830181641Skmacy critical_exit(); 2831181641Skmacy if (*PMAP1) 2832181641Skmacy PT_SET_VA_MA(PMAP1, 0, TRUE); 2833181641Skmacy sched_unpin(); 2834181641Skmacy vm_page_unlock_queues(); 2835181641Skmacy PMAP_UNLOCK(pmap); 2836181641Skmacy} 2837181641Skmacy 2838181641Skmacy/* 2839181641Skmacy * Maps a sequence of resident pages belonging to the same object. 2840181641Skmacy * The sequence begins with the given page m_start. This page is 2841181641Skmacy * mapped at the given virtual address start. Each subsequent page is 2842181641Skmacy * mapped at a virtual address that is offset from start by the same 2843181641Skmacy * amount as the page is offset from m_start within the object. The 2844181641Skmacy * last page in the sequence is the page with the largest offset from 2845181641Skmacy * m_start that can be mapped at a virtual address less than the given 2846181641Skmacy * virtual address end. Not every virtual page between start and end 2847181641Skmacy * is mapped; only those for which a resident page exists with the 2848181641Skmacy * corresponding offset from m_start are mapped. 2849181641Skmacy */ 2850181641Skmacyvoid 2851181641Skmacypmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2852181641Skmacy vm_page_t m_start, vm_prot_t prot) 2853181641Skmacy{ 2854181641Skmacy vm_page_t m, mpte; 2855181641Skmacy vm_pindex_t diff, psize; 2856181641Skmacy multicall_entry_t mcl[16]; 2857181641Skmacy multicall_entry_t *mclp = mcl; 2858181641Skmacy int error, count = 0; 2859181641Skmacy 2860181641Skmacy VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2861181641Skmacy psize = atop(end - start); 2862181641Skmacy 2863181641Skmacy mpte = NULL; 2864181641Skmacy m = m_start; 2865181641Skmacy PMAP_LOCK(pmap); 2866181641Skmacy while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2867181641Skmacy mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m, 2868181641Skmacy prot, mpte); 2869181641Skmacy m = TAILQ_NEXT(m, listq); 2870181641Skmacy if (count == 16) { 2871181641Skmacy error = HYPERVISOR_multicall(mcl, count); 2872181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2873181641Skmacy mclp = mcl; 2874181641Skmacy count = 0; 2875181641Skmacy } 2876181641Skmacy } 2877181641Skmacy if (count) { 2878181641Skmacy error = HYPERVISOR_multicall(mcl, count); 2879181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2880181641Skmacy } 2881181641Skmacy 2882181641Skmacy PMAP_UNLOCK(pmap); 2883181641Skmacy} 2884181641Skmacy 2885181641Skmacy/* 2886181641Skmacy * this code makes some *MAJOR* assumptions: 2887181641Skmacy * 1. Current pmap & pmap exists. 2888181641Skmacy * 2. Not wired. 2889181641Skmacy * 3. Read access. 2890181641Skmacy * 4. No page table pages. 2891181641Skmacy * but is *MUCH* faster than pmap_enter... 2892181641Skmacy */ 2893181641Skmacy 2894181641Skmacyvoid 2895181641Skmacypmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2896181641Skmacy{ 2897181641Skmacy multicall_entry_t mcl, *mclp; 2898181641Skmacy int count = 0; 2899181641Skmacy mclp = &mcl; 2900181641Skmacy 2901181641Skmacy CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x", 2902181641Skmacy pmap, va, m, prot); 2903181641Skmacy 2904181641Skmacy PMAP_LOCK(pmap); 2905181641Skmacy (void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL); 2906181641Skmacy if (count) 2907181641Skmacy HYPERVISOR_multicall(&mcl, count); 2908181641Skmacy PMAP_UNLOCK(pmap); 2909181641Skmacy} 2910181641Skmacy 2911181747Skmacy#ifdef notyet 2912181641Skmacyvoid 2913181641Skmacypmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count) 2914181641Skmacy{ 2915181641Skmacy int i, error, index = 0; 2916181641Skmacy multicall_entry_t mcl[16]; 2917181641Skmacy multicall_entry_t *mclp = mcl; 2918181641Skmacy 2919181641Skmacy PMAP_LOCK(pmap); 2920181641Skmacy for (i = 0; i < count; i++, addrs++, pages++, prots++) { 2921181641Skmacy if (!pmap_is_prefaultable_locked(pmap, *addrs)) 2922181641Skmacy continue; 2923181641Skmacy 2924181641Skmacy (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL); 2925181641Skmacy if (index == 16) { 2926181641Skmacy error = HYPERVISOR_multicall(mcl, index); 2927181641Skmacy mclp = mcl; 2928181641Skmacy index = 0; 2929181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2930181641Skmacy } 2931181641Skmacy } 2932181641Skmacy if (index) { 2933181641Skmacy error = HYPERVISOR_multicall(mcl, index); 2934181641Skmacy KASSERT(error == 0, ("bad multicall %d", error)); 2935181641Skmacy } 2936181641Skmacy 2937181641Skmacy PMAP_UNLOCK(pmap); 2938181641Skmacy} 2939181747Skmacy#endif 2940181641Skmacy 2941181641Skmacystatic vm_page_t 2942181641Skmacypmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m, 2943181641Skmacy vm_prot_t prot, vm_page_t mpte) 2944181641Skmacy{ 2945181641Skmacy pt_entry_t *pte; 2946181641Skmacy vm_paddr_t pa; 2947181641Skmacy vm_page_t free; 2948181641Skmacy multicall_entry_t *mcl = *mclpp; 2949181641Skmacy 2950181641Skmacy KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2951181641Skmacy (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, 2952181641Skmacy ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2953181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2954181641Skmacy PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2955181641Skmacy 2956181641Skmacy /* 2957181641Skmacy * In the case that a page table page is not 2958181641Skmacy * resident, we are creating it here. 2959181641Skmacy */ 2960181641Skmacy if (va < VM_MAXUSER_ADDRESS) { 2961181641Skmacy unsigned ptepindex; 2962181641Skmacy pd_entry_t ptema; 2963181641Skmacy 2964181641Skmacy /* 2965181641Skmacy * Calculate pagetable page index 2966181641Skmacy */ 2967181641Skmacy ptepindex = va >> PDRSHIFT; 2968181641Skmacy if (mpte && (mpte->pindex == ptepindex)) { 2969181641Skmacy mpte->wire_count++; 2970181641Skmacy } else { 2971181641Skmacy /* 2972181641Skmacy * Get the page directory entry 2973181641Skmacy */ 2974181641Skmacy ptema = pmap->pm_pdir[ptepindex]; 2975181641Skmacy 2976181641Skmacy /* 2977181641Skmacy * If the page table page is mapped, we just increment 2978181641Skmacy * the hold count, and activate it. 2979181641Skmacy */ 2980181641Skmacy if (ptema & PG_V) { 2981181641Skmacy if (ptema & PG_PS) 2982181641Skmacy panic("pmap_enter_quick: unexpected mapping into 4MB page"); 2983181641Skmacy mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME); 2984181641Skmacy mpte->wire_count++; 2985181641Skmacy } else { 2986181641Skmacy mpte = _pmap_allocpte(pmap, ptepindex, 2987181641Skmacy M_NOWAIT); 2988181641Skmacy if (mpte == NULL) 2989181641Skmacy return (mpte); 2990181641Skmacy } 2991181641Skmacy } 2992181641Skmacy } else { 2993181641Skmacy mpte = NULL; 2994181641Skmacy } 2995181641Skmacy 2996181641Skmacy /* 2997181641Skmacy * This call to vtopte makes the assumption that we are 2998181641Skmacy * entering the page into the current pmap. In order to support 2999181641Skmacy * quick entry into any pmap, one would likely use pmap_pte_quick. 3000181641Skmacy * But that isn't as quick as vtopte. 3001181641Skmacy */ 3002181641Skmacy KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap")); 3003181641Skmacy pte = vtopte(va); 3004181641Skmacy if (*pte & PG_V) { 3005181641Skmacy if (mpte != NULL) { 3006181641Skmacy mpte->wire_count--; 3007181641Skmacy mpte = NULL; 3008181641Skmacy } 3009181641Skmacy return (mpte); 3010181641Skmacy } 3011181641Skmacy 3012181641Skmacy /* 3013181641Skmacy * Enter on the PV list if part of our managed memory. 3014181641Skmacy */ 3015181641Skmacy if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && 3016181641Skmacy !pmap_try_insert_pv_entry(pmap, va, m)) { 3017181641Skmacy if (mpte != NULL) { 3018181641Skmacy free = NULL; 3019181641Skmacy if (pmap_unwire_pte_hold(pmap, mpte, &free)) { 3020181641Skmacy pmap_invalidate_page(pmap, va); 3021181641Skmacy pmap_free_zero_pages(free); 3022181641Skmacy } 3023181641Skmacy 3024181641Skmacy mpte = NULL; 3025181641Skmacy } 3026181641Skmacy return (mpte); 3027181641Skmacy } 3028181641Skmacy 3029181641Skmacy /* 3030181641Skmacy * Increment counters 3031181641Skmacy */ 3032181641Skmacy pmap->pm_stats.resident_count++; 3033181641Skmacy 3034181641Skmacy pa = VM_PAGE_TO_PHYS(m); 3035181641Skmacy#ifdef PAE 3036181641Skmacy if ((prot & VM_PROT_EXECUTE) == 0) 3037181641Skmacy pa |= pg_nx; 3038181641Skmacy#endif 3039181641Skmacy 3040181641Skmacy#if 0 3041181641Skmacy /* 3042181641Skmacy * Now validate mapping with RO protection 3043181641Skmacy */ 3044181641Skmacy if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3045181641Skmacy pte_store(pte, pa | PG_V | PG_U); 3046181641Skmacy else 3047181641Skmacy pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 3048181641Skmacy#else 3049181641Skmacy /* 3050181641Skmacy * Now validate mapping with RO protection 3051181641Skmacy */ 3052181641Skmacy if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 3053181641Skmacy pa = xpmap_ptom(pa | PG_V | PG_U); 3054181641Skmacy else 3055181641Skmacy pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED); 3056181641Skmacy 3057181641Skmacy mcl->op = __HYPERVISOR_update_va_mapping; 3058181641Skmacy mcl->args[0] = va; 3059181641Skmacy mcl->args[1] = (uint32_t)(pa & 0xffffffff); 3060181641Skmacy mcl->args[2] = (uint32_t)(pa >> 32); 3061181641Skmacy mcl->args[3] = 0; 3062181641Skmacy *mclpp = mcl + 1; 3063181641Skmacy *count = *count + 1; 3064181641Skmacy#endif 3065181641Skmacy return mpte; 3066181641Skmacy} 3067181641Skmacy 3068181641Skmacy/* 3069181641Skmacy * Make a temporary mapping for a physical address. This is only intended 3070181641Skmacy * to be used for panic dumps. 3071181641Skmacy */ 3072181641Skmacyvoid * 3073181641Skmacypmap_kenter_temporary(vm_paddr_t pa, int i) 3074181641Skmacy{ 3075181641Skmacy vm_offset_t va; 3076181641Skmacy 3077181641Skmacy va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 3078181641Skmacy pmap_kenter(va, pa); 3079181641Skmacy invlpg(va); 3080181641Skmacy return ((void *)crashdumpmap); 3081181641Skmacy} 3082181641Skmacy 3083181641Skmacy/* 3084181641Skmacy * This code maps large physical mmap regions into the 3085181641Skmacy * processor address space. Note that some shortcuts 3086181641Skmacy * are taken, but the code works. 3087181641Skmacy */ 3088181641Skmacyvoid 3089181641Skmacypmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 3090181641Skmacy vm_object_t object, vm_pindex_t pindex, 3091181641Skmacy vm_size_t size) 3092181641Skmacy{ 3093181641Skmacy vm_page_t p; 3094181641Skmacy 3095181641Skmacy VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3096181641Skmacy KASSERT(object->type == OBJT_DEVICE, 3097181641Skmacy ("pmap_object_init_pt: non-device object")); 3098181641Skmacy if (pseflag && 3099181641Skmacy ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) { 3100181641Skmacy int i; 3101181641Skmacy vm_page_t m[1]; 3102181641Skmacy unsigned int ptepindex; 3103181641Skmacy int npdes; 3104181641Skmacy pd_entry_t ptepa; 3105181641Skmacy 3106181641Skmacy PMAP_LOCK(pmap); 3107181641Skmacy if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)]) 3108181641Skmacy goto out; 3109181641Skmacy PMAP_UNLOCK(pmap); 3110181641Skmacyretry: 3111181641Skmacy p = vm_page_lookup(object, pindex); 3112181641Skmacy if (p != NULL) { 3113181641Skmacy if (vm_page_sleep_if_busy(p, FALSE, "init4p")) 3114181641Skmacy goto retry; 3115181641Skmacy } else { 3116181641Skmacy p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL); 3117181641Skmacy if (p == NULL) 3118181641Skmacy return; 3119181641Skmacy m[0] = p; 3120181641Skmacy 3121181641Skmacy if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) { 3122181641Skmacy vm_page_lock_queues(); 3123181641Skmacy vm_page_free(p); 3124181641Skmacy vm_page_unlock_queues(); 3125181641Skmacy return; 3126181641Skmacy } 3127181641Skmacy 3128181641Skmacy p = vm_page_lookup(object, pindex); 3129181641Skmacy vm_page_wakeup(p); 3130181641Skmacy } 3131181641Skmacy 3132181641Skmacy ptepa = VM_PAGE_TO_PHYS(p); 3133181641Skmacy if (ptepa & (NBPDR - 1)) 3134181641Skmacy return; 3135181641Skmacy 3136181641Skmacy p->valid = VM_PAGE_BITS_ALL; 3137181641Skmacy 3138181641Skmacy PMAP_LOCK(pmap); 3139181641Skmacy pmap->pm_stats.resident_count += size >> PAGE_SHIFT; 3140181641Skmacy npdes = size >> PDRSHIFT; 3141181641Skmacy critical_enter(); 3142181641Skmacy for(i = 0; i < npdes; i++) { 3143181641Skmacy PD_SET_VA(pmap, ptepindex, 3144181641Skmacy ptepa | PG_U | PG_M | PG_RW | PG_V | PG_PS, FALSE); 3145181641Skmacy ptepa += NBPDR; 3146181641Skmacy ptepindex += 1; 3147181641Skmacy } 3148181641Skmacy pmap_invalidate_all(pmap); 3149181641Skmacy critical_exit(); 3150181641Skmacyout: 3151181641Skmacy PMAP_UNLOCK(pmap); 3152181641Skmacy } 3153181641Skmacy} 3154181641Skmacy 3155181641Skmacy/* 3156181641Skmacy * Routine: pmap_change_wiring 3157181641Skmacy * Function: Change the wiring attribute for a map/virtual-address 3158181641Skmacy * pair. 3159181641Skmacy * In/out conditions: 3160181641Skmacy * The mapping must already exist in the pmap. 3161181641Skmacy */ 3162181641Skmacyvoid 3163181641Skmacypmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3164181641Skmacy{ 3165181641Skmacy pt_entry_t *pte; 3166181641Skmacy 3167181641Skmacy vm_page_lock_queues(); 3168181641Skmacy PMAP_LOCK(pmap); 3169181641Skmacy pte = pmap_pte(pmap, va); 3170181641Skmacy 3171181641Skmacy if (wired && !pmap_pte_w(pte)) { 3172181641Skmacy PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE); 3173181641Skmacy pmap->pm_stats.wired_count++; 3174181641Skmacy } else if (!wired && pmap_pte_w(pte)) { 3175181641Skmacy PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE); 3176181641Skmacy pmap->pm_stats.wired_count--; 3177181641Skmacy } 3178181641Skmacy 3179181641Skmacy /* 3180181641Skmacy * Wiring is not a hardware characteristic so there is no need to 3181181641Skmacy * invalidate TLB. 3182181641Skmacy */ 3183181641Skmacy pmap_pte_release(pte); 3184181641Skmacy PMAP_UNLOCK(pmap); 3185181641Skmacy vm_page_unlock_queues(); 3186181641Skmacy} 3187181641Skmacy 3188181641Skmacy 3189181641Skmacy 3190181641Skmacy/* 3191181641Skmacy * Copy the range specified by src_addr/len 3192181641Skmacy * from the source map to the range dst_addr/len 3193181641Skmacy * in the destination map. 3194181641Skmacy * 3195181641Skmacy * This routine is only advisory and need not do anything. 3196181641Skmacy */ 3197181641Skmacy 3198181641Skmacyvoid 3199181641Skmacypmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 3200181641Skmacy vm_offset_t src_addr) 3201181641Skmacy{ 3202181641Skmacy vm_page_t free; 3203181641Skmacy vm_offset_t addr; 3204181641Skmacy vm_offset_t end_addr = src_addr + len; 3205181641Skmacy vm_offset_t pdnxt; 3206181641Skmacy 3207181641Skmacy if (dst_addr != src_addr) 3208181641Skmacy return; 3209181641Skmacy 3210181641Skmacy if (!pmap_is_current(src_pmap)) { 3211181641Skmacy CTR2(KTR_PMAP, 3212181641Skmacy "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx", 3213181641Skmacy (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME)); 3214181641Skmacy 3215181641Skmacy return; 3216181641Skmacy } 3217181641Skmacy CTR5(KTR_PMAP, "pmap_copy: dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x", 3218181641Skmacy dst_pmap, src_pmap, dst_addr, len, src_addr); 3219181641Skmacy 3220181641Skmacy vm_page_lock_queues(); 3221181641Skmacy if (dst_pmap < src_pmap) { 3222181641Skmacy PMAP_LOCK(dst_pmap); 3223181641Skmacy PMAP_LOCK(src_pmap); 3224181641Skmacy } else { 3225181641Skmacy PMAP_LOCK(src_pmap); 3226181641Skmacy PMAP_LOCK(dst_pmap); 3227181641Skmacy } 3228181641Skmacy sched_pin(); 3229181641Skmacy for (addr = src_addr; addr < end_addr; addr = pdnxt) { 3230181641Skmacy pt_entry_t *src_pte, *dst_pte; 3231181641Skmacy vm_page_t dstmpte, srcmpte; 3232181641Skmacy pd_entry_t srcptepaddr; 3233181641Skmacy unsigned ptepindex; 3234181641Skmacy 3235181641Skmacy if (addr >= UPT_MIN_ADDRESS) 3236181641Skmacy panic("pmap_copy: invalid to pmap_copy page tables"); 3237181641Skmacy 3238181641Skmacy pdnxt = (addr + NBPDR) & ~PDRMASK; 3239181641Skmacy ptepindex = addr >> PDRSHIFT; 3240181641Skmacy 3241181641Skmacy srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]); 3242181641Skmacy if (srcptepaddr == 0) 3243181641Skmacy continue; 3244181641Skmacy 3245181641Skmacy if (srcptepaddr & PG_PS) { 3246181641Skmacy if (dst_pmap->pm_pdir[ptepindex] == 0) { 3247181641Skmacy PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE); 3248181641Skmacy dst_pmap->pm_stats.resident_count += 3249181641Skmacy NBPDR / PAGE_SIZE; 3250181641Skmacy } 3251181641Skmacy continue; 3252181641Skmacy } 3253181641Skmacy 3254181641Skmacy srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); 3255181641Skmacy if (srcmpte->wire_count == 0) 3256181641Skmacy panic("pmap_copy: source page table page is unused"); 3257181641Skmacy 3258181641Skmacy if (pdnxt > end_addr) 3259181641Skmacy pdnxt = end_addr; 3260181641Skmacy 3261181641Skmacy src_pte = vtopte(addr); 3262181641Skmacy while (addr < pdnxt) { 3263181641Skmacy pt_entry_t ptetemp; 3264181641Skmacy ptetemp = *src_pte; 3265181641Skmacy /* 3266181641Skmacy * we only virtual copy managed pages 3267181641Skmacy */ 3268181641Skmacy if ((ptetemp & PG_MANAGED) != 0) { 3269181641Skmacy dstmpte = pmap_allocpte(dst_pmap, addr, 3270181641Skmacy M_NOWAIT); 3271181641Skmacy if (dstmpte == NULL) 3272181641Skmacy break; 3273181641Skmacy dst_pte = pmap_pte_quick(dst_pmap, addr); 3274181641Skmacy if (*dst_pte == 0 && 3275181641Skmacy pmap_try_insert_pv_entry(dst_pmap, addr, 3276181641Skmacy PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) { 3277181641Skmacy /* 3278181641Skmacy * Clear the wired, modified, and 3279181641Skmacy * accessed (referenced) bits 3280181641Skmacy * during the copy. 3281181641Skmacy */ 3282181641Skmacy KASSERT(ptetemp != 0, ("src_pte not set")); 3283181641Skmacy PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */); 3284181641Skmacy KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)), 3285181641Skmacy ("no pmap copy expected: 0x%jx saw: 0x%jx", 3286181641Skmacy ptetemp & ~(PG_W | PG_M | PG_A), *dst_pte)); 3287181641Skmacy dst_pmap->pm_stats.resident_count++; 3288181641Skmacy } else { 3289181641Skmacy free = NULL; 3290181641Skmacy if (pmap_unwire_pte_hold(dst_pmap, 3291181641Skmacy dstmpte, &free)) { 3292181641Skmacy pmap_invalidate_page(dst_pmap, 3293181641Skmacy addr); 3294181641Skmacy pmap_free_zero_pages(free); 3295181641Skmacy } 3296181641Skmacy } 3297181641Skmacy if (dstmpte->wire_count >= srcmpte->wire_count) 3298181641Skmacy break; 3299181641Skmacy } 3300181641Skmacy addr += PAGE_SIZE; 3301181641Skmacy src_pte++; 3302181641Skmacy } 3303181641Skmacy } 3304181641Skmacy PT_UPDATES_FLUSH(); 3305181641Skmacy sched_unpin(); 3306181641Skmacy vm_page_unlock_queues(); 3307181641Skmacy PMAP_UNLOCK(src_pmap); 3308181641Skmacy PMAP_UNLOCK(dst_pmap); 3309181641Skmacy} 3310181641Skmacy 3311181641Skmacy/* 3312181641Skmacy * pmap_zero_page zeros the specified hardware page by mapping 3313181641Skmacy * the page into KVM and using bzero to clear its contents. 3314181641Skmacy */ 3315181641Skmacyvoid 3316181641Skmacypmap_zero_page(vm_page_t m) 3317181641Skmacy{ 3318181641Skmacy struct sysmaps *sysmaps; 3319181641Skmacy 3320181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3321181641Skmacy mtx_lock(&sysmaps->lock); 3322181641Skmacy if (*sysmaps->CMAP2) 3323181641Skmacy panic("pmap_zero_page: CMAP2 busy"); 3324181641Skmacy sched_pin(); 3325181641Skmacy PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3326181641Skmacy pagezero(sysmaps->CADDR2); 3327181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3328181641Skmacy sched_unpin(); 3329181641Skmacy mtx_unlock(&sysmaps->lock); 3330181641Skmacy} 3331181641Skmacy 3332181641Skmacy/* 3333181641Skmacy * pmap_zero_page_area zeros the specified hardware page by mapping 3334181641Skmacy * the page into KVM and using bzero to clear its contents. 3335181641Skmacy * 3336181641Skmacy * off and size may not cover an area beyond a single hardware page. 3337181641Skmacy */ 3338181641Skmacyvoid 3339181641Skmacypmap_zero_page_area(vm_page_t m, int off, int size) 3340181641Skmacy{ 3341181641Skmacy struct sysmaps *sysmaps; 3342181641Skmacy 3343181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3344181641Skmacy mtx_lock(&sysmaps->lock); 3345181641Skmacy if (*sysmaps->CMAP2) 3346181641Skmacy panic("pmap_zero_page: CMAP2 busy"); 3347181641Skmacy sched_pin(); 3348181641Skmacy PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3349181641Skmacy 3350181641Skmacy if (off == 0 && size == PAGE_SIZE) 3351181641Skmacy pagezero(sysmaps->CADDR2); 3352181641Skmacy else 3353181641Skmacy bzero((char *)sysmaps->CADDR2 + off, size); 3354181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3355181641Skmacy sched_unpin(); 3356181641Skmacy mtx_unlock(&sysmaps->lock); 3357181641Skmacy} 3358181641Skmacy 3359181641Skmacy/* 3360181641Skmacy * pmap_zero_page_idle zeros the specified hardware page by mapping 3361181641Skmacy * the page into KVM and using bzero to clear its contents. This 3362181641Skmacy * is intended to be called from the vm_pagezero process only and 3363181641Skmacy * outside of Giant. 3364181641Skmacy */ 3365181641Skmacyvoid 3366181641Skmacypmap_zero_page_idle(vm_page_t m) 3367181641Skmacy{ 3368181641Skmacy 3369181641Skmacy if (*CMAP3) 3370181641Skmacy panic("pmap_zero_page: CMAP3 busy"); 3371181641Skmacy sched_pin(); 3372181641Skmacy PT_SET_MA(CADDR3, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M); 3373181641Skmacy pagezero(CADDR3); 3374181641Skmacy PT_SET_MA(CADDR3, 0); 3375181641Skmacy sched_unpin(); 3376181641Skmacy} 3377181641Skmacy 3378181641Skmacy/* 3379181641Skmacy * pmap_copy_page copies the specified (machine independent) 3380181641Skmacy * page by mapping the page into virtual memory and using 3381181641Skmacy * bcopy to copy the page, one machine dependent page at a 3382181641Skmacy * time. 3383181641Skmacy */ 3384181641Skmacyvoid 3385181641Skmacypmap_copy_page(vm_page_t src, vm_page_t dst) 3386181641Skmacy{ 3387181641Skmacy struct sysmaps *sysmaps; 3388181641Skmacy 3389181641Skmacy sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; 3390181641Skmacy mtx_lock(&sysmaps->lock); 3391181641Skmacy if (*sysmaps->CMAP1) 3392181641Skmacy panic("pmap_copy_page: CMAP1 busy"); 3393181641Skmacy if (*sysmaps->CMAP2) 3394181641Skmacy panic("pmap_copy_page: CMAP2 busy"); 3395181641Skmacy sched_pin(); 3396181641Skmacy PT_SET_MA(sysmaps->CADDR1, PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A); 3397181641Skmacy PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M); 3398181641Skmacy bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); 3399181641Skmacy PT_SET_MA(sysmaps->CADDR1, 0); 3400181641Skmacy PT_SET_MA(sysmaps->CADDR2, 0); 3401181641Skmacy sched_unpin(); 3402181641Skmacy mtx_unlock(&sysmaps->lock); 3403181641Skmacy} 3404181641Skmacy 3405181641Skmacy/* 3406181641Skmacy * Returns true if the pmap's pv is one of the first 3407181641Skmacy * 16 pvs linked to from this page. This count may 3408181641Skmacy * be changed upwards or downwards in the future; it 3409181641Skmacy * is only necessary that true be returned for a small 3410181641Skmacy * subset of pmaps for proper page aging. 3411181641Skmacy */ 3412181641Skmacyboolean_t 3413181641Skmacypmap_page_exists_quick(pmap_t pmap, vm_page_t m) 3414181641Skmacy{ 3415181641Skmacy pv_entry_t pv; 3416181641Skmacy int loops = 0; 3417181641Skmacy 3418181641Skmacy if (m->flags & PG_FICTITIOUS) 3419181641Skmacy return (FALSE); 3420181641Skmacy 3421181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3422181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3423181641Skmacy if (PV_PMAP(pv) == pmap) { 3424181641Skmacy return TRUE; 3425181641Skmacy } 3426181641Skmacy loops++; 3427181641Skmacy if (loops >= 16) 3428181641Skmacy break; 3429181641Skmacy } 3430181641Skmacy return (FALSE); 3431181641Skmacy} 3432181641Skmacy 3433181641Skmacy/* 3434181641Skmacy * pmap_page_wired_mappings: 3435181641Skmacy * 3436181641Skmacy * Return the number of managed mappings to the given physical page 3437181641Skmacy * that are wired. 3438181641Skmacy */ 3439181641Skmacyint 3440181641Skmacypmap_page_wired_mappings(vm_page_t m) 3441181641Skmacy{ 3442181641Skmacy pv_entry_t pv; 3443181641Skmacy pt_entry_t *pte; 3444181641Skmacy pmap_t pmap; 3445181641Skmacy int count; 3446181641Skmacy 3447181641Skmacy count = 0; 3448181641Skmacy if ((m->flags & PG_FICTITIOUS) != 0) 3449181641Skmacy return (count); 3450181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3451181641Skmacy sched_pin(); 3452181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3453181641Skmacy pmap = PV_PMAP(pv); 3454181641Skmacy PMAP_LOCK(pmap); 3455181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3456181641Skmacy if ((*pte & PG_W) != 0) 3457181641Skmacy count++; 3458181641Skmacy PMAP_UNLOCK(pmap); 3459181641Skmacy } 3460181641Skmacy sched_unpin(); 3461181641Skmacy return (count); 3462181641Skmacy} 3463181641Skmacy 3464181641Skmacy/* 3465181747Skmacy * Returns TRUE if the given page is mapped individually or as part of 3466181747Skmacy * a 4mpage. Otherwise, returns FALSE. 3467181747Skmacy */ 3468181747Skmacyboolean_t 3469181747Skmacypmap_page_is_mapped(vm_page_t m) 3470181747Skmacy{ 3471181747Skmacy struct md_page *pvh; 3472181747Skmacy 3473181747Skmacy if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 3474181747Skmacy return (FALSE); 3475181747Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3476181747Skmacy if (TAILQ_EMPTY(&m->md.pv_list)) { 3477181747Skmacy pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3478181747Skmacy return (!TAILQ_EMPTY(&pvh->pv_list)); 3479181747Skmacy } else 3480181747Skmacy return (TRUE); 3481181747Skmacy} 3482181747Skmacy 3483181747Skmacy/* 3484181641Skmacy * Remove all pages from specified address space 3485181641Skmacy * this aids process exit speeds. Also, this code 3486181641Skmacy * is special cased for current process only, but 3487181641Skmacy * can have the more generic (and slightly slower) 3488181641Skmacy * mode enabled. This is much faster than pmap_remove 3489181641Skmacy * in the case of running down an entire address space. 3490181641Skmacy */ 3491181641Skmacyvoid 3492181641Skmacypmap_remove_pages(pmap_t pmap) 3493181641Skmacy{ 3494181641Skmacy pt_entry_t *pte, tpte; 3495181641Skmacy vm_page_t m, free = NULL; 3496181641Skmacy pv_entry_t pv; 3497181641Skmacy struct pv_chunk *pc, *npc; 3498181641Skmacy int field, idx; 3499181641Skmacy int32_t bit; 3500181641Skmacy uint32_t inuse, bitmask; 3501181641Skmacy int allfree; 3502181641Skmacy 3503181641Skmacy CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap); 3504181641Skmacy 3505181641Skmacy if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 3506181641Skmacy printf("warning: pmap_remove_pages called with non-current pmap\n"); 3507181641Skmacy return; 3508181641Skmacy } 3509181641Skmacy vm_page_lock_queues(); 3510181641Skmacy KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap")); 3511181641Skmacy PMAP_LOCK(pmap); 3512181641Skmacy sched_pin(); 3513181641Skmacy TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 3514181641Skmacy allfree = 1; 3515181641Skmacy for (field = 0; field < _NPCM; field++) { 3516181641Skmacy inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 3517181641Skmacy while (inuse != 0) { 3518181641Skmacy bit = bsfl(inuse); 3519181641Skmacy bitmask = 1UL << bit; 3520181641Skmacy idx = field * 32 + bit; 3521181641Skmacy pv = &pc->pc_pventry[idx]; 3522181641Skmacy inuse &= ~bitmask; 3523181641Skmacy 3524181641Skmacy pte = vtopte(pv->pv_va); 3525181641Skmacy tpte = *pte ? xpmap_mtop(*pte) : 0; 3526181641Skmacy 3527181641Skmacy if (tpte == 0) { 3528181641Skmacy printf( 3529181641Skmacy "TPTE at %p IS ZERO @ VA %08x\n", 3530181641Skmacy pte, pv->pv_va); 3531181641Skmacy panic("bad pte"); 3532181641Skmacy } 3533181641Skmacy 3534181641Skmacy/* 3535181641Skmacy * We cannot remove wired pages from a process' mapping at this time 3536181641Skmacy */ 3537181641Skmacy if (tpte & PG_W) { 3538181641Skmacy allfree = 0; 3539181641Skmacy continue; 3540181641Skmacy } 3541181641Skmacy 3542181641Skmacy m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 3543181641Skmacy KASSERT(m->phys_addr == (tpte & PG_FRAME), 3544181641Skmacy ("vm_page_t %p phys_addr mismatch %016jx %016jx", 3545181641Skmacy m, (uintmax_t)m->phys_addr, 3546181641Skmacy (uintmax_t)tpte)); 3547181641Skmacy 3548181641Skmacy KASSERT(m < &vm_page_array[vm_page_array_size], 3549181641Skmacy ("pmap_remove_pages: bad tpte %#jx", 3550181641Skmacy (uintmax_t)tpte)); 3551181641Skmacy 3552181641Skmacy 3553181641Skmacy PT_CLEAR_VA(pte, FALSE); 3554181641Skmacy 3555181641Skmacy /* 3556181641Skmacy * Update the vm_page_t clean/reference bits. 3557181641Skmacy */ 3558181641Skmacy if (tpte & PG_M) 3559181641Skmacy vm_page_dirty(m); 3560181641Skmacy 3561181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3562181641Skmacy if (TAILQ_EMPTY(&m->md.pv_list)) 3563181641Skmacy vm_page_flag_clear(m, PG_WRITEABLE); 3564181641Skmacy 3565181641Skmacy pmap_unuse_pt(pmap, pv->pv_va, &free); 3566181641Skmacy 3567181641Skmacy /* Mark free */ 3568181641Skmacy PV_STAT(pv_entry_frees++); 3569181641Skmacy PV_STAT(pv_entry_spare++); 3570181641Skmacy pv_entry_count--; 3571181641Skmacy pc->pc_map[field] |= bitmask; 3572181641Skmacy pmap->pm_stats.resident_count--; 3573181641Skmacy } 3574181641Skmacy } 3575181641Skmacy PT_UPDATES_FLUSH(); 3576181641Skmacy if (allfree) { 3577181641Skmacy PV_STAT(pv_entry_spare -= _NPCPV); 3578181641Skmacy PV_STAT(pc_chunk_count--); 3579181641Skmacy PV_STAT(pc_chunk_frees++); 3580181641Skmacy TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3581181641Skmacy m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 3582181641Skmacy pmap_qremove((vm_offset_t)pc, 1); 3583181641Skmacy vm_page_unwire(m, 0); 3584181641Skmacy vm_page_free(m); 3585181641Skmacy pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); 3586181641Skmacy } 3587181641Skmacy } 3588181641Skmacy PT_UPDATES_FLUSH(); 3589181641Skmacy if (*PMAP1) 3590181641Skmacy PT_SET_MA(PADDR1, 0); 3591181641Skmacy 3592181641Skmacy sched_unpin(); 3593181641Skmacy pmap_invalidate_all(pmap); 3594181641Skmacy vm_page_unlock_queues(); 3595181641Skmacy PMAP_UNLOCK(pmap); 3596181641Skmacy pmap_free_zero_pages(free); 3597181641Skmacy} 3598181641Skmacy 3599181641Skmacy/* 3600181641Skmacy * pmap_is_modified: 3601181641Skmacy * 3602181641Skmacy * Return whether or not the specified physical page was modified 3603181641Skmacy * in any physical maps. 3604181641Skmacy */ 3605181641Skmacyboolean_t 3606181641Skmacypmap_is_modified(vm_page_t m) 3607181641Skmacy{ 3608181641Skmacy pv_entry_t pv; 3609181641Skmacy pt_entry_t *pte; 3610181641Skmacy pmap_t pmap; 3611181641Skmacy boolean_t rv; 3612181641Skmacy 3613181641Skmacy rv = FALSE; 3614181641Skmacy if (m->flags & PG_FICTITIOUS) 3615181641Skmacy return (rv); 3616181641Skmacy 3617181641Skmacy sched_pin(); 3618181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3619181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3620181641Skmacy pmap = PV_PMAP(pv); 3621181641Skmacy PMAP_LOCK(pmap); 3622181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3623181641Skmacy rv = (*pte & PG_M) != 0; 3624181641Skmacy PMAP_UNLOCK(pmap); 3625181641Skmacy if (rv) 3626181641Skmacy break; 3627181641Skmacy } 3628181641Skmacy if (*PMAP1) 3629181641Skmacy PT_SET_MA(PADDR1, 0); 3630181641Skmacy sched_unpin(); 3631181641Skmacy return (rv); 3632181641Skmacy} 3633181641Skmacy 3634181641Skmacy/* 3635181641Skmacy * pmap_is_prefaultable: 3636181641Skmacy * 3637181641Skmacy * Return whether or not the specified virtual address is elgible 3638181641Skmacy * for prefault. 3639181641Skmacy */ 3640181641Skmacystatic boolean_t 3641181641Skmacypmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr) 3642181641Skmacy{ 3643181641Skmacy pt_entry_t *pte; 3644181641Skmacy boolean_t rv = FALSE; 3645181641Skmacy 3646181641Skmacy return (rv); 3647181641Skmacy 3648181641Skmacy if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) { 3649181641Skmacy pte = vtopte(addr); 3650181641Skmacy rv = (*pte == 0); 3651181641Skmacy } 3652181641Skmacy return (rv); 3653181641Skmacy} 3654181641Skmacy 3655181641Skmacyboolean_t 3656181641Skmacypmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3657181641Skmacy{ 3658181641Skmacy boolean_t rv; 3659181641Skmacy 3660181641Skmacy PMAP_LOCK(pmap); 3661181641Skmacy rv = pmap_is_prefaultable_locked(pmap, addr); 3662181641Skmacy PMAP_UNLOCK(pmap); 3663181641Skmacy return (rv); 3664181641Skmacy} 3665181641Skmacy 3666181641Skmacyvoid 3667181641Skmacypmap_map_readonly(pmap_t pmap, vm_offset_t va, int len) 3668181641Skmacy{ 3669181641Skmacy int i, npages = round_page(len) >> PAGE_SHIFT; 3670181641Skmacy for (i = 0; i < npages; i++) { 3671181641Skmacy pt_entry_t *pte; 3672181641Skmacy pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3673181641Skmacy pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 3674181641Skmacy PMAP_MARK_PRIV(xpmap_mtop(*pte)); 3675181641Skmacy pmap_pte_release(pte); 3676181641Skmacy } 3677181641Skmacy} 3678181641Skmacy 3679181641Skmacyvoid 3680181641Skmacypmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len) 3681181641Skmacy{ 3682181641Skmacy int i, npages = round_page(len) >> PAGE_SHIFT; 3683181641Skmacy for (i = 0; i < npages; i++) { 3684181641Skmacy pt_entry_t *pte; 3685181641Skmacy pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 3686181641Skmacy PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 3687181641Skmacy pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 3688181641Skmacy pmap_pte_release(pte); 3689181641Skmacy } 3690181641Skmacy} 3691181641Skmacy 3692181641Skmacy/* 3693181641Skmacy * Clear the write and modified bits in each of the given page's mappings. 3694181641Skmacy */ 3695181641Skmacyvoid 3696181641Skmacypmap_remove_write(vm_page_t m) 3697181641Skmacy{ 3698181641Skmacy pv_entry_t pv; 3699181641Skmacy pmap_t pmap; 3700181641Skmacy pt_entry_t oldpte, *pte; 3701181641Skmacy 3702181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3703181641Skmacy if ((m->flags & PG_FICTITIOUS) != 0 || 3704181641Skmacy (m->flags & PG_WRITEABLE) == 0) 3705181641Skmacy return; 3706181641Skmacy sched_pin(); 3707181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3708181641Skmacy pmap = PV_PMAP(pv); 3709181641Skmacy PMAP_LOCK(pmap); 3710181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3711181641Skmacyretry: 3712181641Skmacy oldpte = *pte; 3713181641Skmacy if ((oldpte & PG_RW) != 0) { 3714188341Skmacy vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M); 3715188341Skmacy 3716181641Skmacy /* 3717181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3718181641Skmacy * in size, PG_RW and PG_M are among the least 3719181641Skmacy * significant 32 bits. 3720181641Skmacy */ 3721188341Skmacy PT_SET_VA_MA(pte, newpte, TRUE); 3722188341Skmacy if (*pte != newpte) 3723181641Skmacy goto retry; 3724188341Skmacy 3725181641Skmacy if ((oldpte & PG_M) != 0) 3726181641Skmacy vm_page_dirty(m); 3727181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3728181641Skmacy } 3729181641Skmacy PMAP_UNLOCK(pmap); 3730181641Skmacy } 3731181641Skmacy vm_page_flag_clear(m, PG_WRITEABLE); 3732181641Skmacy PT_UPDATES_FLUSH(); 3733181641Skmacy if (*PMAP1) 3734181641Skmacy PT_SET_MA(PADDR1, 0); 3735181641Skmacy sched_unpin(); 3736181641Skmacy} 3737181641Skmacy 3738181641Skmacy/* 3739181641Skmacy * pmap_ts_referenced: 3740181641Skmacy * 3741181641Skmacy * Return a count of reference bits for a page, clearing those bits. 3742181641Skmacy * It is not necessary for every reference bit to be cleared, but it 3743181641Skmacy * is necessary that 0 only be returned when there are truly no 3744181641Skmacy * reference bits set. 3745181641Skmacy * 3746181641Skmacy * XXX: The exact number of bits to check and clear is a matter that 3747181641Skmacy * should be tested and standardized at some point in the future for 3748181641Skmacy * optimal aging of shared pages. 3749181641Skmacy */ 3750181641Skmacyint 3751181641Skmacypmap_ts_referenced(vm_page_t m) 3752181641Skmacy{ 3753181641Skmacy pv_entry_t pv, pvf, pvn; 3754181641Skmacy pmap_t pmap; 3755181641Skmacy pt_entry_t *pte; 3756181641Skmacy int rtval = 0; 3757181641Skmacy 3758181641Skmacy if (m->flags & PG_FICTITIOUS) 3759181641Skmacy return (rtval); 3760181641Skmacy sched_pin(); 3761181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3762181641Skmacy if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3763181641Skmacy pvf = pv; 3764181641Skmacy do { 3765181641Skmacy pvn = TAILQ_NEXT(pv, pv_list); 3766181641Skmacy TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 3767181641Skmacy TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 3768181641Skmacy pmap = PV_PMAP(pv); 3769181641Skmacy PMAP_LOCK(pmap); 3770181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3771181641Skmacy if ((*pte & PG_A) != 0) { 3772181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3773181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3774181641Skmacy rtval++; 3775181641Skmacy if (rtval > 4) 3776181641Skmacy pvn = NULL; 3777181641Skmacy } 3778181641Skmacy PMAP_UNLOCK(pmap); 3779181641Skmacy } while ((pv = pvn) != NULL && pv != pvf); 3780181641Skmacy } 3781181641Skmacy PT_UPDATES_FLUSH(); 3782181641Skmacy if (*PMAP1) 3783181641Skmacy PT_SET_MA(PADDR1, 0); 3784181641Skmacy 3785181641Skmacy sched_unpin(); 3786181641Skmacy return (rtval); 3787181641Skmacy} 3788181641Skmacy 3789181641Skmacy/* 3790181641Skmacy * Clear the modify bits on the specified physical page. 3791181641Skmacy */ 3792181641Skmacyvoid 3793181641Skmacypmap_clear_modify(vm_page_t m) 3794181641Skmacy{ 3795181641Skmacy pv_entry_t pv; 3796181641Skmacy pmap_t pmap; 3797181641Skmacy pt_entry_t *pte; 3798181641Skmacy 3799181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3800181641Skmacy if ((m->flags & PG_FICTITIOUS) != 0) 3801181641Skmacy return; 3802181641Skmacy sched_pin(); 3803181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3804181641Skmacy pmap = PV_PMAP(pv); 3805181641Skmacy PMAP_LOCK(pmap); 3806181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3807181641Skmacy if ((*pte & PG_M) != 0) { 3808181641Skmacy /* 3809181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3810181641Skmacy * in size, PG_M is among the least significant 3811181641Skmacy * 32 bits. 3812181641Skmacy */ 3813181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE); 3814181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3815181641Skmacy } 3816181641Skmacy PMAP_UNLOCK(pmap); 3817181641Skmacy } 3818181641Skmacy sched_unpin(); 3819181641Skmacy} 3820181641Skmacy 3821181641Skmacy/* 3822181641Skmacy * pmap_clear_reference: 3823181641Skmacy * 3824181641Skmacy * Clear the reference bit on the specified physical page. 3825181641Skmacy */ 3826181641Skmacyvoid 3827181641Skmacypmap_clear_reference(vm_page_t m) 3828181641Skmacy{ 3829181641Skmacy pv_entry_t pv; 3830181641Skmacy pmap_t pmap; 3831181641Skmacy pt_entry_t *pte; 3832181641Skmacy 3833181641Skmacy mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3834181641Skmacy if ((m->flags & PG_FICTITIOUS) != 0) 3835181641Skmacy return; 3836181641Skmacy sched_pin(); 3837181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 3838181641Skmacy pmap = PV_PMAP(pv); 3839181641Skmacy PMAP_LOCK(pmap); 3840181641Skmacy pte = pmap_pte_quick(pmap, pv->pv_va); 3841181641Skmacy if ((*pte & PG_A) != 0) { 3842181641Skmacy /* 3843181641Skmacy * Regardless of whether a pte is 32 or 64 bits 3844181641Skmacy * in size, PG_A is among the least significant 3845181641Skmacy * 32 bits. 3846181641Skmacy */ 3847181641Skmacy PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE); 3848181641Skmacy pmap_invalidate_page(pmap, pv->pv_va); 3849181641Skmacy } 3850181641Skmacy PMAP_UNLOCK(pmap); 3851181641Skmacy } 3852181641Skmacy sched_unpin(); 3853181641Skmacy} 3854181641Skmacy 3855181641Skmacy/* 3856181641Skmacy * Miscellaneous support routines follow 3857181641Skmacy */ 3858181641Skmacy 3859181641Skmacy/* 3860181641Skmacy * Map a set of physical memory pages into the kernel virtual 3861181641Skmacy * address space. Return a pointer to where it is mapped. This 3862181641Skmacy * routine is intended to be used for mapping device memory, 3863181641Skmacy * NOT real memory. 3864181641Skmacy */ 3865181641Skmacyvoid * 3866181641Skmacypmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) 3867181641Skmacy{ 3868181641Skmacy vm_offset_t va, tmpva, offset; 3869181641Skmacy 3870181641Skmacy offset = pa & PAGE_MASK; 3871181641Skmacy size = roundup(offset + size, PAGE_SIZE); 3872181641Skmacy pa = pa & PG_FRAME; 3873181641Skmacy 3874181641Skmacy if (pa < KERNLOAD && pa + size <= KERNLOAD) 3875181641Skmacy va = KERNBASE + pa; 3876181641Skmacy else 3877181641Skmacy va = kmem_alloc_nofault(kernel_map, size); 3878181641Skmacy if (!va) 3879181641Skmacy panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3880181641Skmacy 3881181641Skmacy for (tmpva = va; size > 0; ) { 3882181641Skmacy pmap_kenter_attr(tmpva, pa, mode); 3883181641Skmacy size -= PAGE_SIZE; 3884181641Skmacy tmpva += PAGE_SIZE; 3885181641Skmacy pa += PAGE_SIZE; 3886181641Skmacy } 3887181641Skmacy pmap_invalidate_range(kernel_pmap, va, tmpva); 3888181641Skmacy pmap_invalidate_cache(); 3889181641Skmacy return ((void *)(va + offset)); 3890181641Skmacy} 3891181641Skmacy 3892181641Skmacyvoid * 3893181641Skmacypmap_mapdev(vm_paddr_t pa, vm_size_t size) 3894181641Skmacy{ 3895181641Skmacy 3896181641Skmacy return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); 3897181641Skmacy} 3898181641Skmacy 3899181641Skmacyvoid * 3900181641Skmacypmap_mapbios(vm_paddr_t pa, vm_size_t size) 3901181641Skmacy{ 3902181641Skmacy 3903181641Skmacy return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); 3904181641Skmacy} 3905181641Skmacy 3906181641Skmacyvoid 3907181641Skmacypmap_unmapdev(vm_offset_t va, vm_size_t size) 3908181641Skmacy{ 3909181641Skmacy vm_offset_t base, offset, tmpva; 3910181641Skmacy 3911181641Skmacy if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD) 3912181641Skmacy return; 3913181641Skmacy base = trunc_page(va); 3914181641Skmacy offset = va & PAGE_MASK; 3915181641Skmacy size = roundup(offset + size, PAGE_SIZE); 3916181641Skmacy critical_enter(); 3917181641Skmacy for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 3918181641Skmacy pmap_kremove(tmpva); 3919181641Skmacy pmap_invalidate_range(kernel_pmap, va, tmpva); 3920181641Skmacy critical_exit(); 3921181641Skmacy kmem_free(kernel_map, base, size); 3922181641Skmacy} 3923181641Skmacy 3924181641Skmacyint 3925181641Skmacypmap_change_attr(va, size, mode) 3926181641Skmacy vm_offset_t va; 3927181641Skmacy vm_size_t size; 3928181641Skmacy int mode; 3929181641Skmacy{ 3930181641Skmacy vm_offset_t base, offset, tmpva; 3931181641Skmacy pt_entry_t *pte; 3932181641Skmacy u_int opte, npte; 3933181641Skmacy pd_entry_t *pde; 3934181641Skmacy 3935181641Skmacy base = trunc_page(va); 3936181641Skmacy offset = va & PAGE_MASK; 3937181641Skmacy size = roundup(offset + size, PAGE_SIZE); 3938181641Skmacy 3939181641Skmacy /* Only supported on kernel virtual addresses. */ 3940181641Skmacy if (base <= VM_MAXUSER_ADDRESS) 3941181641Skmacy return (EINVAL); 3942181641Skmacy 3943181641Skmacy /* 4MB pages and pages that aren't mapped aren't supported. */ 3944181641Skmacy for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { 3945181641Skmacy pde = pmap_pde(kernel_pmap, tmpva); 3946181641Skmacy if (*pde & PG_PS) 3947181641Skmacy return (EINVAL); 3948181641Skmacy if ((*pde & PG_V) == 0) 3949181641Skmacy return (EINVAL); 3950181641Skmacy pte = vtopte(va); 3951181641Skmacy if ((*pte & PG_V) == 0) 3952181641Skmacy return (EINVAL); 3953181641Skmacy } 3954181641Skmacy 3955181641Skmacy /* 3956181641Skmacy * Ok, all the pages exist and are 4k, so run through them updating 3957181641Skmacy * their cache mode. 3958181641Skmacy */ 3959181641Skmacy for (tmpva = base; size > 0; ) { 3960181641Skmacy pte = vtopte(tmpva); 3961181641Skmacy 3962181641Skmacy /* 3963181641Skmacy * The cache mode bits are all in the low 32-bits of the 3964181641Skmacy * PTE, so we can just spin on updating the low 32-bits. 3965181641Skmacy */ 3966181641Skmacy do { 3967181641Skmacy opte = *(u_int *)pte; 3968181641Skmacy npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); 3969181641Skmacy npte |= pmap_cache_bits(mode, 0); 3970181641Skmacy PT_SET_VA_MA(pte, npte, TRUE); 3971181641Skmacy } while (npte != opte && (*pte != npte)); 3972181641Skmacy tmpva += PAGE_SIZE; 3973181641Skmacy size -= PAGE_SIZE; 3974181641Skmacy } 3975181641Skmacy 3976181641Skmacy /* 3977181641Skmacy * Flush CPU caches to make sure any data isn't cached that shouldn't 3978181641Skmacy * be, etc. 3979181641Skmacy */ 3980181641Skmacy pmap_invalidate_range(kernel_pmap, base, tmpva); 3981181641Skmacy pmap_invalidate_cache(); 3982181641Skmacy return (0); 3983181641Skmacy} 3984181641Skmacy 3985181641Skmacy/* 3986181641Skmacy * perform the pmap work for mincore 3987181641Skmacy */ 3988181641Skmacyint 3989181641Skmacypmap_mincore(pmap_t pmap, vm_offset_t addr) 3990181641Skmacy{ 3991181641Skmacy pt_entry_t *ptep, pte; 3992181641Skmacy vm_page_t m; 3993181641Skmacy int val = 0; 3994181641Skmacy 3995181641Skmacy PMAP_LOCK(pmap); 3996181641Skmacy ptep = pmap_pte(pmap, addr); 3997181641Skmacy pte = (ptep != NULL) ? PT_GET(ptep) : 0; 3998181641Skmacy pmap_pte_release(ptep); 3999181641Skmacy PMAP_UNLOCK(pmap); 4000181641Skmacy 4001181641Skmacy if (pte != 0) { 4002181641Skmacy vm_paddr_t pa; 4003181641Skmacy 4004181641Skmacy val = MINCORE_INCORE; 4005181641Skmacy if ((pte & PG_MANAGED) == 0) 4006181641Skmacy return val; 4007181641Skmacy 4008181641Skmacy pa = pte & PG_FRAME; 4009181641Skmacy 4010181641Skmacy m = PHYS_TO_VM_PAGE(pa); 4011181641Skmacy 4012181641Skmacy /* 4013181641Skmacy * Modified by us 4014181641Skmacy */ 4015181641Skmacy if (pte & PG_M) 4016181641Skmacy val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 4017181641Skmacy else { 4018181641Skmacy /* 4019181641Skmacy * Modified by someone else 4020181641Skmacy */ 4021181641Skmacy vm_page_lock_queues(); 4022181641Skmacy if (m->dirty || pmap_is_modified(m)) 4023181641Skmacy val |= MINCORE_MODIFIED_OTHER; 4024181641Skmacy vm_page_unlock_queues(); 4025181641Skmacy } 4026181641Skmacy /* 4027181641Skmacy * Referenced by us 4028181641Skmacy */ 4029181641Skmacy if (pte & PG_A) 4030181641Skmacy val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 4031181641Skmacy else { 4032181641Skmacy /* 4033181641Skmacy * Referenced by someone else 4034181641Skmacy */ 4035181641Skmacy vm_page_lock_queues(); 4036181641Skmacy if ((m->flags & PG_REFERENCED) || 4037181641Skmacy pmap_ts_referenced(m)) { 4038181641Skmacy val |= MINCORE_REFERENCED_OTHER; 4039181641Skmacy vm_page_flag_set(m, PG_REFERENCED); 4040181641Skmacy } 4041181641Skmacy vm_page_unlock_queues(); 4042181641Skmacy } 4043181641Skmacy } 4044181641Skmacy return val; 4045181641Skmacy} 4046181641Skmacy 4047181641Skmacyvoid 4048181641Skmacypmap_activate(struct thread *td) 4049181641Skmacy{ 4050181641Skmacy pmap_t pmap, oldpmap; 4051181641Skmacy u_int32_t cr3; 4052181641Skmacy 4053181641Skmacy critical_enter(); 4054181641Skmacy pmap = vmspace_pmap(td->td_proc->p_vmspace); 4055181641Skmacy oldpmap = PCPU_GET(curpmap); 4056181641Skmacy#if defined(SMP) 4057181641Skmacy atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask)); 4058181641Skmacy atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 4059181641Skmacy#else 4060181641Skmacy oldpmap->pm_active &= ~1; 4061181641Skmacy pmap->pm_active |= 1; 4062181641Skmacy#endif 4063181641Skmacy#ifdef PAE 4064181641Skmacy cr3 = vtophys(pmap->pm_pdpt); 4065181641Skmacy#else 4066181641Skmacy cr3 = vtophys(pmap->pm_pdir); 4067181641Skmacy#endif 4068181641Skmacy /* 4069181641Skmacy * pmap_activate is for the current thread on the current cpu 4070181641Skmacy */ 4071181641Skmacy td->td_pcb->pcb_cr3 = cr3; 4072181641Skmacy PT_UPDATES_FLUSH(); 4073181641Skmacy load_cr3(cr3); 4074181641Skmacy 4075181641Skmacy PCPU_SET(curpmap, pmap); 4076181641Skmacy critical_exit(); 4077181641Skmacy} 4078181641Skmacy 4079181747Skmacy/* 4080181747Skmacy * Increase the starting virtual address of the given mapping if a 4081181747Skmacy * different alignment might result in more superpage mappings. 4082181747Skmacy */ 4083181747Skmacyvoid 4084181747Skmacypmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4085181747Skmacy vm_offset_t *addr, vm_size_t size) 4086181641Skmacy{ 4087181747Skmacy vm_offset_t superpage_offset; 4088181641Skmacy 4089181747Skmacy if (size < NBPDR) 4090181747Skmacy return; 4091181747Skmacy if (object != NULL && (object->flags & OBJ_COLORED) != 0) 4092181747Skmacy offset += ptoa(object->pg_color); 4093181747Skmacy superpage_offset = offset & PDRMASK; 4094181747Skmacy if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || 4095181747Skmacy (*addr & PDRMASK) == superpage_offset) 4096181747Skmacy return; 4097181747Skmacy if ((*addr & PDRMASK) < superpage_offset) 4098181747Skmacy *addr = (*addr & ~PDRMASK) + superpage_offset; 4099181747Skmacy else 4100181747Skmacy *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; 4101181641Skmacy} 4102181641Skmacy 4103190627Sdfr#ifdef XEN 4104190627Sdfr 4105190627Sdfrvoid 4106190627Sdfrpmap_suspend() 4107190627Sdfr{ 4108190627Sdfr pmap_t pmap; 4109190627Sdfr int i, pdir, offset; 4110190627Sdfr vm_paddr_t pdirma; 4111190627Sdfr mmu_update_t mu[4]; 4112190627Sdfr 4113190627Sdfr /* 4114190627Sdfr * We need to remove the recursive mapping structure from all 4115190627Sdfr * our pmaps so that Xen doesn't get confused when it restores 4116190627Sdfr * the page tables. The recursive map lives at page directory 4117190627Sdfr * index PTDPTDI. We assume that the suspend code has stopped 4118190627Sdfr * the other vcpus (if any). 4119190627Sdfr */ 4120190627Sdfr LIST_FOREACH(pmap, &allpmaps, pm_list) { 4121190627Sdfr for (i = 0; i < 4; i++) { 4122190627Sdfr /* 4123190627Sdfr * Figure out which page directory (L2) page 4124190627Sdfr * contains this bit of the recursive map and 4125190627Sdfr * the offset within that page of the map 4126190627Sdfr * entry 4127190627Sdfr */ 4128190627Sdfr pdir = (PTDPTDI + i) / NPDEPG; 4129190627Sdfr offset = (PTDPTDI + i) % NPDEPG; 4130190627Sdfr pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4131190627Sdfr mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4132190627Sdfr mu[i].val = 0; 4133190627Sdfr } 4134190627Sdfr HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4135190627Sdfr } 4136190627Sdfr} 4137190627Sdfr 4138190627Sdfrvoid 4139190627Sdfrpmap_resume() 4140190627Sdfr{ 4141190627Sdfr pmap_t pmap; 4142190627Sdfr int i, pdir, offset; 4143190627Sdfr vm_paddr_t pdirma; 4144190627Sdfr mmu_update_t mu[4]; 4145190627Sdfr 4146190627Sdfr /* 4147190627Sdfr * Restore the recursive map that we removed on suspend. 4148190627Sdfr */ 4149190627Sdfr LIST_FOREACH(pmap, &allpmaps, pm_list) { 4150190627Sdfr for (i = 0; i < 4; i++) { 4151190627Sdfr /* 4152190627Sdfr * Figure out which page directory (L2) page 4153190627Sdfr * contains this bit of the recursive map and 4154190627Sdfr * the offset within that page of the map 4155190627Sdfr * entry 4156190627Sdfr */ 4157190627Sdfr pdir = (PTDPTDI + i) / NPDEPG; 4158190627Sdfr offset = (PTDPTDI + i) % NPDEPG; 4159190627Sdfr pdirma = pmap->pm_pdpt[pdir] & PG_FRAME; 4160190627Sdfr mu[i].ptr = pdirma + offset * sizeof(pd_entry_t); 4161190627Sdfr mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V; 4162190627Sdfr } 4163190627Sdfr HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF); 4164190627Sdfr } 4165190627Sdfr} 4166190627Sdfr 4167190627Sdfr#endif 4168190627Sdfr 4169181641Skmacy#if defined(PMAP_DEBUG) 4170181641Skmacypmap_pid_dump(int pid) 4171181641Skmacy{ 4172181641Skmacy pmap_t pmap; 4173181641Skmacy struct proc *p; 4174181641Skmacy int npte = 0; 4175181641Skmacy int index; 4176181641Skmacy 4177181641Skmacy sx_slock(&allproc_lock); 4178181641Skmacy FOREACH_PROC_IN_SYSTEM(p) { 4179181641Skmacy if (p->p_pid != pid) 4180181641Skmacy continue; 4181181641Skmacy 4182181641Skmacy if (p->p_vmspace) { 4183181641Skmacy int i,j; 4184181641Skmacy index = 0; 4185181641Skmacy pmap = vmspace_pmap(p->p_vmspace); 4186181641Skmacy for (i = 0; i < NPDEPTD; i++) { 4187181641Skmacy pd_entry_t *pde; 4188181641Skmacy pt_entry_t *pte; 4189181641Skmacy vm_offset_t base = i << PDRSHIFT; 4190181641Skmacy 4191181641Skmacy pde = &pmap->pm_pdir[i]; 4192181641Skmacy if (pde && pmap_pde_v(pde)) { 4193181641Skmacy for (j = 0; j < NPTEPG; j++) { 4194181641Skmacy vm_offset_t va = base + (j << PAGE_SHIFT); 4195181641Skmacy if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 4196181641Skmacy if (index) { 4197181641Skmacy index = 0; 4198181641Skmacy printf("\n"); 4199181641Skmacy } 4200181641Skmacy sx_sunlock(&allproc_lock); 4201181641Skmacy return npte; 4202181641Skmacy } 4203181641Skmacy pte = pmap_pte(pmap, va); 4204181641Skmacy if (pte && pmap_pte_v(pte)) { 4205181641Skmacy pt_entry_t pa; 4206181641Skmacy vm_page_t m; 4207181641Skmacy pa = PT_GET(pte); 4208181641Skmacy m = PHYS_TO_VM_PAGE(pa & PG_FRAME); 4209181641Skmacy printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 4210181641Skmacy va, pa, m->hold_count, m->wire_count, m->flags); 4211181641Skmacy npte++; 4212181641Skmacy index++; 4213181641Skmacy if (index >= 2) { 4214181641Skmacy index = 0; 4215181641Skmacy printf("\n"); 4216181641Skmacy } else { 4217181641Skmacy printf(" "); 4218181641Skmacy } 4219181641Skmacy } 4220181641Skmacy } 4221181641Skmacy } 4222181641Skmacy } 4223181641Skmacy } 4224181641Skmacy } 4225181641Skmacy sx_sunlock(&allproc_lock); 4226181641Skmacy return npte; 4227181641Skmacy} 4228181641Skmacy#endif 4229181641Skmacy 4230181641Skmacy#if defined(DEBUG) 4231181641Skmacy 4232181641Skmacystatic void pads(pmap_t pm); 4233181641Skmacyvoid pmap_pvdump(vm_paddr_t pa); 4234181641Skmacy 4235181641Skmacy/* print address space of pmap*/ 4236181641Skmacystatic void 4237181641Skmacypads(pmap_t pm) 4238181641Skmacy{ 4239181641Skmacy int i, j; 4240181641Skmacy vm_paddr_t va; 4241181641Skmacy pt_entry_t *ptep; 4242181641Skmacy 4243181641Skmacy if (pm == kernel_pmap) 4244181641Skmacy return; 4245181641Skmacy for (i = 0; i < NPDEPTD; i++) 4246181641Skmacy if (pm->pm_pdir[i]) 4247181641Skmacy for (j = 0; j < NPTEPG; j++) { 4248181641Skmacy va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 4249181641Skmacy if (pm == kernel_pmap && va < KERNBASE) 4250181641Skmacy continue; 4251181641Skmacy if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 4252181641Skmacy continue; 4253181641Skmacy ptep = pmap_pte(pm, va); 4254181641Skmacy if (pmap_pte_v(ptep)) 4255181641Skmacy printf("%x:%x ", va, *ptep); 4256181641Skmacy }; 4257181641Skmacy 4258181641Skmacy} 4259181641Skmacy 4260181641Skmacyvoid 4261181641Skmacypmap_pvdump(vm_paddr_t pa) 4262181641Skmacy{ 4263181641Skmacy pv_entry_t pv; 4264181641Skmacy pmap_t pmap; 4265181641Skmacy vm_page_t m; 4266181641Skmacy 4267181641Skmacy printf("pa %x", pa); 4268181641Skmacy m = PHYS_TO_VM_PAGE(pa); 4269181641Skmacy TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4270181641Skmacy pmap = PV_PMAP(pv); 4271181641Skmacy printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); 4272181641Skmacy pads(pmap); 4273181641Skmacy } 4274181641Skmacy printf(" "); 4275181641Skmacy} 4276181641Skmacy#endif 4277