pmap.c revision 320429
1251881Speter/*-
2251881Speter * Copyright (c) 1991 Regents of the University of California.
3251881Speter * All rights reserved.
4251881Speter * Copyright (c) 1994 John S. Dyson
5251881Speter * All rights reserved.
6251881Speter * Copyright (c) 1994 David Greenman
7251881Speter * All rights reserved.
8251881Speter * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
9251881Speter * All rights reserved.
10251881Speter *
11251881Speter * This code is derived from software contributed to Berkeley by
12251881Speter * the Systems Programming Group of the University of Utah Computer
13251881Speter * Science Department and William Jolitz of UUNET Technologies Inc.
14251881Speter *
15251881Speter * Redistribution and use in source and binary forms, with or without
16251881Speter * modification, are permitted provided that the following conditions
17251881Speter * are met:
18251881Speter * 1. Redistributions of source code must retain the above copyright
19251881Speter *    notice, this list of conditions and the following disclaimer.
20251881Speter * 2. Redistributions in binary form must reproduce the above copyright
21251881Speter *    notice, this list of conditions and the following disclaimer in the
22251881Speter *    documentation and/or other materials provided with the distribution.
23251881Speter * 3. All advertising materials mentioning features or use of this software
24251881Speter *    must display the following acknowledgement:
25251881Speter *	This product includes software developed by the University of
26251881Speter *	California, Berkeley and its contributors.
27251881Speter * 4. Neither the name of the University nor the names of its contributors
28251881Speter *    may be used to endorse or promote products derived from this software
29251881Speter *    without specific prior written permission.
30251881Speter *
31251881Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32251881Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33251881Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34251881Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35251881Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36251881Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37251881Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38251881Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39251881Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40251881Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41262253Speter * SUCH DAMAGE.
42251881Speter *
43251881Speter *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
44251881Speter */
45251881Speter/*-
46251881Speter * Copyright (c) 2003 Networks Associates Technology, Inc.
47251881Speter * All rights reserved.
48251881Speter *
49251881Speter * This software was developed for the FreeBSD Project by Jake Burkholder,
50251881Speter * Safeport Network Services, and Network Associates Laboratories, the
51251881Speter * Security Research Division of Network Associates, Inc. under
52251881Speter * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
53251881Speter * CHATS research program.
54251881Speter *
55251881Speter * Redistribution and use in source and binary forms, with or without
56251881Speter * modification, are permitted provided that the following conditions
57251881Speter * are met:
58251881Speter * 1. Redistributions of source code must retain the above copyright
59251881Speter *    notice, this list of conditions and the following disclaimer.
60251881Speter * 2. Redistributions in binary form must reproduce the above copyright
61251881Speter *    notice, this list of conditions and the following disclaimer in the
62251881Speter *    documentation and/or other materials provided with the distribution.
63251881Speter *
64251881Speter * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
65251881Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66251881Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67251881Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
68251881Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69251881Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70251881Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71251881Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72251881Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73251881Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74251881Speter * SUCH DAMAGE.
75251881Speter */
76251881Speter
77251881Speter#include <sys/cdefs.h>
78251881Speter__FBSDID("$FreeBSD: stable/11/sys/i386/i386/pmap.c 320429 2017-06-28 04:01:29Z alc $");
79251881Speter
80251881Speter/*
81251881Speter *	Manages physical address maps.
82251881Speter *
83251881Speter *	Since the information managed by this module is
84251881Speter *	also stored by the logical address mapping module,
85251881Speter *	this module may throw away valid virtual-to-physical
86251881Speter *	mappings at almost any time.  However, invalidations
87251881Speter *	of virtual-to-physical mappings must be done as
88251881Speter *	requested.
89251881Speter *
90251881Speter *	In order to cope with hardware architectures which
91251881Speter *	make virtual-to-physical map invalidates expensive,
92251881Speter *	this module may delay invalidate or reduced protection
93251881Speter *	operations until such time as they are actually
94251881Speter *	necessary.  This module is given full information as
95251881Speter *	to which processors are currently using which maps,
96251881Speter *	and to when physical maps must be made correct.
97251881Speter */
98251881Speter
99251881Speter#include "opt_apic.h"
100251881Speter#include "opt_cpu.h"
101251881Speter#include "opt_pmap.h"
102251881Speter#include "opt_smp.h"
103251881Speter#include "opt_xbox.h"
104251881Speter
105251881Speter#include <sys/param.h>
106251881Speter#include <sys/systm.h>
107251881Speter#include <sys/kernel.h>
108251881Speter#include <sys/ktr.h>
109251881Speter#include <sys/lock.h>
110251881Speter#include <sys/malloc.h>
111251881Speter#include <sys/mman.h>
112251881Speter#include <sys/msgbuf.h>
113251881Speter#include <sys/mutex.h>
114251881Speter#include <sys/proc.h>
115251881Speter#include <sys/rwlock.h>
116251881Speter#include <sys/sf_buf.h>
117251881Speter#include <sys/sx.h>
118251881Speter#include <sys/vmmeter.h>
119251881Speter#include <sys/sched.h>
120251881Speter#include <sys/sysctl.h>
121251881Speter#include <sys/smp.h>
122251881Speter
123251881Speter#include <vm/vm.h>
124251881Speter#include <vm/vm_param.h>
125251881Speter#include <vm/vm_kern.h>
126251881Speter#include <vm/vm_page.h>
127251881Speter#include <vm/vm_map.h>
128251881Speter#include <vm/vm_object.h>
129251881Speter#include <vm/vm_extern.h>
130251881Speter#include <vm/vm_pageout.h>
131251881Speter#include <vm/vm_pager.h>
132251881Speter#include <vm/vm_phys.h>
133251881Speter#include <vm/vm_radix.h>
134251881Speter#include <vm/vm_reserv.h>
135251881Speter#include <vm/uma.h>
136251881Speter
137251881Speter#ifdef DEV_APIC
138251881Speter#include <sys/bus.h>
139251881Speter#include <machine/intr_machdep.h>
140251881Speter#include <x86/apicvar.h>
141251881Speter#endif
142251881Speter#include <machine/cpu.h>
143251881Speter#include <machine/cputypes.h>
144251881Speter#include <machine/md_var.h>
145251881Speter#include <machine/pcb.h>
146251881Speter#include <machine/specialreg.h>
147251881Speter#ifdef SMP
148251881Speter#include <machine/smp.h>
149251881Speter#endif
150251881Speter
151251881Speter#ifdef XBOX
152251881Speter#include <machine/xbox.h>
153251881Speter#endif
154251881Speter
155251881Speter#ifndef PMAP_SHPGPERPROC
156251881Speter#define PMAP_SHPGPERPROC 200
157251881Speter#endif
158251881Speter
159251881Speter#if !defined(DIAGNOSTIC)
160251881Speter#ifdef __GNUC_GNU_INLINE__
161251881Speter#define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
162251881Speter#else
163251881Speter#define PMAP_INLINE	extern inline
164251881Speter#endif
165251881Speter#else
166251881Speter#define PMAP_INLINE
167251881Speter#endif
168251881Speter
169251881Speter#ifdef PV_STATS
170251881Speter#define PV_STAT(x)	do { x ; } while (0)
171251881Speter#else
172251881Speter#define PV_STAT(x)	do { } while (0)
173251881Speter#endif
174251881Speter
175251881Speter#define	pa_index(pa)	((pa) >> PDRSHIFT)
176251881Speter#define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
177251881Speter
178251881Speter/*
179251881Speter * Get PDEs and PTEs for user/kernel address space
180251881Speter */
181251881Speter#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
182251881Speter#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
183251881Speter
184251881Speter#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
185251881Speter#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
186251881Speter#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
187251881Speter#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
188251881Speter#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
189251881Speter
190251881Speter#define pmap_pte_set_w(pte, v)	((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
191251881Speter    atomic_clear_int((u_int *)(pte), PG_W))
192251881Speter#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
193251881Speter
194251881Speterstruct pmap kernel_pmap_store;
195251881SpeterLIST_HEAD(pmaplist, pmap);
196251881Speterstatic struct pmaplist allpmaps;
197251881Speterstatic struct mtx allpmaps_lock;
198251881Speter
199251881Spetervm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
200251881Spetervm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
201251881Speterint pgeflag = 0;		/* PG_G or-in */
202251881Speterint pseflag = 0;		/* PG_PS or-in */
203251881Speter
204251881Speterstatic int nkpt = NKPT;
205251881Spetervm_offset_t kernel_vm_end = KERNBASE + NKPT * NBPDR;
206251881Speterextern u_int32_t KERNend;
207251881Speterextern u_int32_t KPTphys;
208251881Speter
209251881Speter#if defined(PAE) || defined(PAE_TABLES)
210251881Speterpt_entry_t pg_nx;
211251881Speterstatic uma_zone_t pdptzone;
212251881Speter#endif
213251881Speter
214251881Speterstatic SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
215251881Speter
216251881Speterstatic int pat_works = 1;
217251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
218251881Speter    "Is page attribute table fully functional?");
219251881Speter
220251881Speterstatic int pg_ps_enabled = 1;
221251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
222251881Speter    &pg_ps_enabled, 0, "Are large page mappings enabled?");
223251881Speter
224251881Speter#define	PAT_INDEX_SIZE	8
225251881Speterstatic int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
226251881Speter
227251881Speter/*
228251881Speter * pmap_mapdev support pre initialization (i.e. console)
229251881Speter */
230251881Speter#define	PMAP_PREINIT_MAPPING_COUNT	8
231251881Speterstatic struct pmap_preinit_mapping {
232251881Speter	vm_paddr_t	pa;
233251881Speter	vm_offset_t	va;
234251881Speter	vm_size_t	sz;
235251881Speter	int		mode;
236251881Speter} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
237251881Speterstatic int pmap_initialized;
238251881Speter
239251881Speterstatic struct rwlock_padalign pvh_global_lock;
240251881Speter
241251881Speter/*
242251881Speter * Data for the pv entry allocation mechanism
243251881Speter */
244251881Speterstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
245251881Speterstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
246251881Speterstatic struct md_page *pv_table;
247251881Speterstatic int shpgperproc = PMAP_SHPGPERPROC;
248251881Speter
249251881Speterstruct pv_chunk *pv_chunkbase;		/* KVA block for pv_chunks */
250251881Speterint pv_maxchunks;			/* How many chunks we have KVA for */
251251881Spetervm_offset_t pv_vafree;			/* freelist stored in the PTE */
252251881Speter
253251881Speter/*
254251881Speter * All those kernel PT submaps that BSD is so fond of
255251881Speter */
256251881Speterpt_entry_t *CMAP3;
257251881Speterstatic pd_entry_t *KPTD;
258251881Spetercaddr_t ptvmmap = 0;
259251881Spetercaddr_t CADDR3;
260251881Speterstruct msgbuf *msgbufp = NULL;
261251881Speter
262251881Speter/*
263251881Speter * Crashdump maps.
264251881Speter */
265251881Speterstatic caddr_t crashdumpmap;
266251881Speter
267251881Speterstatic pt_entry_t *PMAP1 = NULL, *PMAP2;
268251881Speterstatic pt_entry_t *PADDR1 = NULL, *PADDR2;
269251881Speter#ifdef SMP
270251881Speterstatic int PMAP1cpu;
271251881Speterstatic int PMAP1changedcpu;
272251881SpeterSYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
273251881Speter	   &PMAP1changedcpu, 0,
274251881Speter	   "Number of times pmap_pte_quick changed CPU with same PMAP1");
275251881Speter#endif
276251881Speterstatic int PMAP1changed;
277251881SpeterSYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
278251881Speter	   &PMAP1changed, 0,
279251881Speter	   "Number of times pmap_pte_quick changed PMAP1");
280251881Speterstatic int PMAP1unchanged;
281251881SpeterSYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
282251881Speter	   &PMAP1unchanged, 0,
283251881Speter	   "Number of times pmap_pte_quick didn't change PMAP1");
284251881Speterstatic struct mtx PMAP2mutex;
285251881Speter
286251881Speterstatic void	free_pv_chunk(struct pv_chunk *pc);
287251881Speterstatic void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
288251881Speterstatic pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
289251881Speterstatic void	pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
290251881Speterstatic boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
291251881Speterstatic void	pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
292251881Speterstatic void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
293251881Speterstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
294251881Speter		    vm_offset_t va);
295251881Speterstatic int	pmap_pvh_wired_mappings(struct md_page *pvh, int count);
296251881Speter
297251881Speterstatic boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
298251881Speterstatic boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
299251881Speter    vm_prot_t prot);
300251881Speterstatic vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
301251881Speter    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
302251881Speterstatic void pmap_flush_page(vm_page_t m);
303251881Speterstatic int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
304251881Speterstatic void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
305251881Speter		    pd_entry_t pde);
306251881Speterstatic void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
307251881Speterstatic boolean_t pmap_is_modified_pvh(struct md_page *pvh);
308251881Speterstatic boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
309251881Speterstatic void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
310251881Speterstatic void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
311251881Speterstatic void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
312251881Speterstatic void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
313251881Speterstatic boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
314251881Speter    vm_prot_t prot);
315251881Speterstatic void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
316262253Speterstatic void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
317251881Speter    struct spglist *free);
318251881Speterstatic int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
319251881Speter    struct spglist *free);
320251881Speterstatic vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
321251881Speterstatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
322251881Speter    struct spglist *free);
323251881Speterstatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
324251881Speter					vm_offset_t va);
325251881Speterstatic void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
326251881Speterstatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
327251881Speter    vm_page_t m);
328251881Speterstatic void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
329251881Speter    pd_entry_t newpde);
330251881Speterstatic void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
331251881Speter
332251881Speterstatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
333251881Speter
334251881Speterstatic vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
335251881Speterstatic void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
336251881Speterstatic pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
337251881Speterstatic void pmap_pte_release(pt_entry_t *pte);
338251881Speterstatic int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
339251881Speter#if defined(PAE) || defined(PAE_TABLES)
340251881Speterstatic void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
341251881Speter    int wait);
342251881Speter#endif
343251881Speterstatic void pmap_set_pg(void);
344251881Speter
345251881Speterstatic __inline void pagezero(void *page);
346251881Speter
347251881SpeterCTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
348251881SpeterCTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
349251881Speter
350251881Speter/*
351251881Speter * If you get an error here, then you set KVA_PAGES wrong! See the
352251881Speter * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
353251881Speter * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
354251881Speter */
355251881SpeterCTASSERT(KERNBASE % (1 << 24) == 0);
356251881Speter
357251881Speter/*
358251881Speter *	Bootstrap the system enough to run with virtual memory.
359251881Speter *
360251881Speter *	On the i386 this is called after mapping has already been enabled
361251881Speter *	and just syncs the pmap module with what has already been done.
362251881Speter *	[We can't call it easily with mapping off since the kernel is not
363251881Speter *	mapped with PA == VA, hence we would have to relocate every address
364251881Speter *	from the linked base (virtual) address "KERNBASE" to the actual
365251881Speter *	(physical) address starting relative to 0]
366251881Speter */
367251881Spetervoid
368251881Speterpmap_bootstrap(vm_paddr_t firstaddr)
369251881Speter{
370251881Speter	vm_offset_t va;
371251881Speter	pt_entry_t *pte, *unused;
372251881Speter	struct pcpu *pc;
373251881Speter	int i;
374251881Speter
375251881Speter	/*
376251881Speter	 * Add a physical memory segment (vm_phys_seg) corresponding to the
377251881Speter	 * preallocated kernel page table pages so that vm_page structures
378251881Speter	 * representing these pages will be created.  The vm_page structures
379251881Speter	 * are required for promotion of the corresponding kernel virtual
380251881Speter	 * addresses to superpage mappings.
381251881Speter	 */
382251881Speter	vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
383251881Speter
384251881Speter	/*
385251881Speter	 * Initialize the first available kernel virtual address.  However,
386251881Speter	 * using "firstaddr" may waste a few pages of the kernel virtual
387251881Speter	 * address space, because locore may not have mapped every physical
388251881Speter	 * page that it allocated.  Preferably, locore would provide a first
389251881Speter	 * unused virtual address in addition to "firstaddr".
390251881Speter	 */
391251881Speter	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
392251881Speter
393251881Speter	virtual_end = VM_MAX_KERNEL_ADDRESS;
394251881Speter
395251881Speter	/*
396251881Speter	 * Initialize the kernel pmap (which is statically allocated).
397251881Speter	 */
398251881Speter	PMAP_LOCK_INIT(kernel_pmap);
399251881Speter	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
400251881Speter#if defined(PAE) || defined(PAE_TABLES)
401251881Speter	kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
402251881Speter#endif
403251881Speter	CPU_FILL(&kernel_pmap->pm_active);	/* don't allow deactivation */
404251881Speter	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
405251881Speter
406251881Speter 	/*
407251881Speter	 * Initialize the global pv list lock.
408251881Speter	 */
409251881Speter	rw_init(&pvh_global_lock, "pmap pv global");
410251881Speter
411251881Speter	LIST_INIT(&allpmaps);
412251881Speter
413251881Speter	/*
414251881Speter	 * Request a spin mutex so that changes to allpmaps cannot be
415251881Speter	 * preempted by smp_rendezvous_cpus().  Otherwise,
416251881Speter	 * pmap_update_pde_kernel() could access allpmaps while it is
417251881Speter	 * being changed.
418251881Speter	 */
419251881Speter	mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
420251881Speter	mtx_lock_spin(&allpmaps_lock);
421251881Speter	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
422251881Speter	mtx_unlock_spin(&allpmaps_lock);
423251881Speter
424251881Speter	/*
425251881Speter	 * Reserve some special page table entries/VA space for temporary
426251881Speter	 * mapping of pages.
427251881Speter	 */
428251881Speter#define	SYSMAP(c, p, v, n)	\
429251881Speter	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
430251881Speter
431251881Speter	va = virtual_avail;
432251881Speter	pte = vtopte(va);
433251881Speter
434251881Speter
435251881Speter	/*
436251881Speter	 * Initialize temporary map objects on the current CPU for use
437251881Speter	 * during early boot.
438251881Speter	 * CMAP1/CMAP2 are used for zeroing and copying pages.
439251881Speter	 * CMAP3 is used for the idle process page zeroing.
440251881Speter	 */
441251881Speter	pc = get_pcpu();
442251881Speter	mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
443251881Speter	SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1)
444251881Speter	SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1)
445251881Speter	SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1)
446251881Speter
447251881Speter	SYSMAP(caddr_t, CMAP3, CADDR3, 1)
448251881Speter
449251881Speter	/*
450251881Speter	 * Crashdump maps.
451251881Speter	 */
452251881Speter	SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
453251881Speter
454251881Speter	/*
455251881Speter	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
456251881Speter	 */
457251881Speter	SYSMAP(caddr_t, unused, ptvmmap, 1)
458251881Speter
459251881Speter	/*
460251881Speter	 * msgbufp is used to map the system message buffer.
461251881Speter	 */
462251881Speter	SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize)))
463251881Speter
464251881Speter	/*
465251881Speter	 * KPTmap is used by pmap_kextract().
466251881Speter	 *
467251881Speter	 * KPTmap is first initialized by locore.  However, that initial
468251881Speter	 * KPTmap can only support NKPT page table pages.  Here, a larger
469251881Speter	 * KPTmap is created that can support KVA_PAGES page table pages.
470251881Speter	 */
471251881Speter	SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES)
472251881Speter
473251881Speter	for (i = 0; i < NKPT; i++)
474251881Speter		KPTD[i] = (KPTphys + (i << PAGE_SHIFT)) | pgeflag | PG_RW | PG_V;
475251881Speter
476251881Speter	/*
477251881Speter	 * Adjust the start of the KPTD and KPTmap so that the implementation
478251881Speter	 * of pmap_kextract() and pmap_growkernel() can be made simpler.
479251881Speter	 */
480251881Speter	KPTD -= KPTDI;
481251881Speter	KPTmap -= i386_btop(KPTDI << PDRSHIFT);
482251881Speter
483251881Speter	/*
484251881Speter	 * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(),
485251881Speter	 * respectively.
486251881Speter	 */
487251881Speter	SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1)
488251881Speter	SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1)
489251881Speter
490251881Speter	mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
491251881Speter
492251881Speter	virtual_avail = va;
493251881Speter
494251881Speter	/*
495251881Speter	 * Leave in place an identity mapping (virt == phys) for the low 1 MB
496251881Speter	 * physical memory region that is used by the ACPI wakeup code.  This
497251881Speter	 * mapping must not have PG_G set.
498251881Speter	 */
499251881Speter#ifdef XBOX
500251881Speter	/* FIXME: This is gross, but needed for the XBOX. Since we are in such
501251881Speter	 * an early stadium, we cannot yet neatly map video memory ... :-(
502251881Speter	 * Better fixes are very welcome! */
503251881Speter	if (!arch_i386_is_xbox)
504251881Speter#endif
505251881Speter	for (i = 1; i < NKPT; i++)
506251881Speter		PTD[i] = 0;
507251881Speter
508251881Speter	/*
509251881Speter	 * Initialize the PAT MSR if present.
510251881Speter	 * pmap_init_pat() clears and sets CR4_PGE, which, as a
511251881Speter	 * side-effect, invalidates stale PG_G TLB entries that might
512251881Speter	 * have been created in our pre-boot environment.  We assume
513251881Speter	 * that PAT support implies PGE and in reverse, PGE presence
514251881Speter	 * comes with PAT.  Both features were added for Pentium Pro.
515251881Speter	 */
516251881Speter	pmap_init_pat();
517251881Speter
518251881Speter	/* Turn on PG_G on kernel page(s) */
519251881Speter	pmap_set_pg();
520251881Speter}
521251881Speter
522251881Speterstatic void
523251881Speterpmap_init_reserved_pages(void)
524251881Speter{
525251881Speter	struct pcpu *pc;
526251881Speter	vm_offset_t pages;
527251881Speter	int i;
528251881Speter
529251881Speter	CPU_FOREACH(i) {
530251881Speter		pc = pcpu_find(i);
531251881Speter		/*
532251881Speter		 * Skip if the mapping has already been initialized,
533251881Speter		 * i.e. this is the BSP.
534251881Speter		 */
535251881Speter		if (pc->pc_cmap_addr1 != 0)
536251881Speter			continue;
537251881Speter		mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
538251881Speter		pages = kva_alloc(PAGE_SIZE * 3);
539251881Speter		if (pages == 0)
540251881Speter			panic("%s: unable to allocate KVA", __func__);
541251881Speter		pc->pc_cmap_pte1 = vtopte(pages);
542251881Speter		pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE);
543251881Speter		pc->pc_cmap_addr1 = (caddr_t)pages;
544251881Speter		pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE);
545251881Speter		pc->pc_qmap_addr = pages + (PAGE_SIZE * 2);
546251881Speter	}
547251881Speter}
548251881Speter
549251881SpeterSYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
550251881Speter
551251881Speter/*
552251881Speter * Setup the PAT MSR.
553251881Speter */
554251881Spetervoid
555251881Speterpmap_init_pat(void)
556251881Speter{
557251881Speter	int pat_table[PAT_INDEX_SIZE];
558251881Speter	uint64_t pat_msr;
559251881Speter	u_long cr0, cr4;
560251881Speter	int i;
561251881Speter
562251881Speter	/* Set default PAT index table. */
563251881Speter	for (i = 0; i < PAT_INDEX_SIZE; i++)
564251881Speter		pat_table[i] = -1;
565251881Speter	pat_table[PAT_WRITE_BACK] = 0;
566251881Speter	pat_table[PAT_WRITE_THROUGH] = 1;
567251881Speter	pat_table[PAT_UNCACHEABLE] = 3;
568251881Speter	pat_table[PAT_WRITE_COMBINING] = 3;
569251881Speter	pat_table[PAT_WRITE_PROTECTED] = 3;
570251881Speter	pat_table[PAT_UNCACHED] = 3;
571251881Speter
572251881Speter	/*
573251881Speter	 * Bail if this CPU doesn't implement PAT.
574251881Speter	 * We assume that PAT support implies PGE.
575251881Speter	 */
576251881Speter	if ((cpu_feature & CPUID_PAT) == 0) {
577251881Speter		for (i = 0; i < PAT_INDEX_SIZE; i++)
578251881Speter			pat_index[i] = pat_table[i];
579251881Speter		pat_works = 0;
580251881Speter		return;
581251881Speter	}
582251881Speter
583251881Speter	/*
584251881Speter	 * Due to some Intel errata, we can only safely use the lower 4
585251881Speter	 * PAT entries.
586251881Speter	 *
587251881Speter	 *   Intel Pentium III Processor Specification Update
588251881Speter	 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
589251881Speter	 * or Mode C Paging)
590251881Speter	 *
591251881Speter	 *   Intel Pentium IV  Processor Specification Update
592251881Speter	 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
593251881Speter	 */
594251881Speter	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
595251881Speter	    !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe))
596251881Speter		pat_works = 0;
597251881Speter
598251881Speter	/* Initialize default PAT entries. */
599251881Speter	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
600251881Speter	    PAT_VALUE(1, PAT_WRITE_THROUGH) |
601251881Speter	    PAT_VALUE(2, PAT_UNCACHED) |
602251881Speter	    PAT_VALUE(3, PAT_UNCACHEABLE) |
603251881Speter	    PAT_VALUE(4, PAT_WRITE_BACK) |
604251881Speter	    PAT_VALUE(5, PAT_WRITE_THROUGH) |
605251881Speter	    PAT_VALUE(6, PAT_UNCACHED) |
606251881Speter	    PAT_VALUE(7, PAT_UNCACHEABLE);
607251881Speter
608251881Speter	if (pat_works) {
609251881Speter		/*
610251881Speter		 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
611251881Speter		 * Program 5 and 6 as WP and WC.
612251881Speter		 * Leave 4 and 7 as WB and UC.
613251881Speter		 */
614251881Speter		pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
615251881Speter		pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
616251881Speter		    PAT_VALUE(6, PAT_WRITE_COMBINING);
617251881Speter		pat_table[PAT_UNCACHED] = 2;
618251881Speter		pat_table[PAT_WRITE_PROTECTED] = 5;
619251881Speter		pat_table[PAT_WRITE_COMBINING] = 6;
620251881Speter	} else {
621251881Speter		/*
622251881Speter		 * Just replace PAT Index 2 with WC instead of UC-.
623251881Speter		 */
624251881Speter		pat_msr &= ~PAT_MASK(2);
625251881Speter		pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
626251881Speter		pat_table[PAT_WRITE_COMBINING] = 2;
627251881Speter	}
628251881Speter
629251881Speter	/* Disable PGE. */
630251881Speter	cr4 = rcr4();
631251881Speter	load_cr4(cr4 & ~CR4_PGE);
632251881Speter
633251881Speter	/* Disable caches (CD = 1, NW = 0). */
634251881Speter	cr0 = rcr0();
635251881Speter	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
636251881Speter
637251881Speter	/* Flushes caches and TLBs. */
638251881Speter	wbinvd();
639251881Speter	invltlb();
640251881Speter
641251881Speter	/* Update PAT and index table. */
642251881Speter	wrmsr(MSR_PAT, pat_msr);
643251881Speter	for (i = 0; i < PAT_INDEX_SIZE; i++)
644251881Speter		pat_index[i] = pat_table[i];
645251881Speter
646251881Speter	/* Flush caches and TLBs again. */
647251881Speter	wbinvd();
648251881Speter	invltlb();
649251881Speter
650251881Speter	/* Restore caches and PGE. */
651251881Speter	load_cr0(cr0);
652251881Speter	load_cr4(cr4);
653251881Speter}
654251881Speter
655251881Speter/*
656251881Speter * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
657251881Speter */
658251881Speterstatic void
659251881Speterpmap_set_pg(void)
660251881Speter{
661251881Speter	pt_entry_t *pte;
662251881Speter	vm_offset_t va, endva;
663251881Speter
664251881Speter	if (pgeflag == 0)
665251881Speter		return;
666251881Speter
667251881Speter	endva = KERNBASE + KERNend;
668251881Speter
669251881Speter	if (pseflag) {
670251881Speter		va = KERNBASE + KERNLOAD;
671251881Speter		while (va  < endva) {
672251881Speter			pdir_pde(PTD, va) |= pgeflag;
673251881Speter			invltlb();	/* Flush non-PG_G entries. */
674251881Speter			va += NBPDR;
675251881Speter		}
676251881Speter	} else {
677251881Speter		va = (vm_offset_t)btext;
678251881Speter		while (va < endva) {
679251881Speter			pte = vtopte(va);
680251881Speter			if (*pte)
681251881Speter				*pte |= pgeflag;
682251881Speter			invltlb();	/* Flush non-PG_G entries. */
683251881Speter			va += PAGE_SIZE;
684251881Speter		}
685251881Speter	}
686251881Speter}
687251881Speter
688251881Speter/*
689251881Speter * Initialize a vm_page's machine-dependent fields.
690251881Speter */
691251881Spetervoid
692251881Speterpmap_page_init(vm_page_t m)
693251881Speter{
694251881Speter
695251881Speter	TAILQ_INIT(&m->md.pv_list);
696251881Speter	m->md.pat_mode = PAT_WRITE_BACK;
697251881Speter}
698251881Speter
699251881Speter#if defined(PAE) || defined(PAE_TABLES)
700251881Speterstatic void *
701251881Speterpmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
702251881Speter{
703251881Speter
704251881Speter	/* Inform UMA that this allocator uses kernel_map/object. */
705251881Speter	*flags = UMA_SLAB_KERNEL;
706251881Speter	return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 0x0ULL,
707251881Speter	    0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
708251881Speter}
709251881Speter#endif
710251881Speter
711251881Speter/*
712251881Speter * Abuse the pte nodes for unmapped kva to thread a kva freelist through.
713251881Speter * Requirements:
714251881Speter *  - Must deal with pages in order to ensure that none of the PG_* bits
715251881Speter *    are ever set, PG_V in particular.
716251881Speter *  - Assumes we can write to ptes without pte_store() atomic ops, even
717251881Speter *    on PAE systems.  This should be ok.
718251881Speter *  - Assumes nothing will ever test these addresses for 0 to indicate
719251881Speter *    no mapping instead of correctly checking PG_V.
720251881Speter *  - Assumes a vm_offset_t will fit in a pte (true for i386).
721251881Speter * Because PG_V is never set, there can be no mappings to invalidate.
722251881Speter */
723251881Speterstatic vm_offset_t
724251881Speterpmap_ptelist_alloc(vm_offset_t *head)
725251881Speter{
726251881Speter	pt_entry_t *pte;
727251881Speter	vm_offset_t va;
728251881Speter
729251881Speter	va = *head;
730251881Speter	if (va == 0)
731251881Speter		panic("pmap_ptelist_alloc: exhausted ptelist KVA");
732251881Speter	pte = vtopte(va);
733251881Speter	*head = *pte;
734251881Speter	if (*head & PG_V)
735251881Speter		panic("pmap_ptelist_alloc: va with PG_V set!");
736251881Speter	*pte = 0;
737251881Speter	return (va);
738251881Speter}
739251881Speter
740251881Speterstatic void
741251881Speterpmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
742251881Speter{
743251881Speter	pt_entry_t *pte;
744251881Speter
745251881Speter	if (va & PG_V)
746251881Speter		panic("pmap_ptelist_free: freeing va with PG_V set!");
747251881Speter	pte = vtopte(va);
748251881Speter	*pte = *head;		/* virtual! PG_V is 0 though */
749251881Speter	*head = va;
750251881Speter}
751251881Speter
752251881Speterstatic void
753251881Speterpmap_ptelist_init(vm_offset_t *head, void *base, int npages)
754251881Speter{
755251881Speter	int i;
756251881Speter	vm_offset_t va;
757251881Speter
758251881Speter	*head = 0;
759251881Speter	for (i = npages - 1; i >= 0; i--) {
760251881Speter		va = (vm_offset_t)base + i * PAGE_SIZE;
761251881Speter		pmap_ptelist_free(head, va);
762251881Speter	}
763251881Speter}
764251881Speter
765251881Speter
766251881Speter/*
767251881Speter *	Initialize the pmap module.
768251881Speter *	Called by vm_init, to initialize any structures that the pmap
769251881Speter *	system needs to map virtual memory.
770251881Speter */
771251881Spetervoid
772251881Speterpmap_init(void)
773251881Speter{
774251881Speter	struct pmap_preinit_mapping *ppim;
775251881Speter	vm_page_t mpte;
776251881Speter	vm_size_t s;
777251881Speter	int i, pv_npg;
778251881Speter
779251881Speter	/*
780251881Speter	 * Initialize the vm page array entries for the kernel pmap's
781251881Speter	 * page table pages.
782251881Speter	 */
783251881Speter	for (i = 0; i < NKPT; i++) {
784251881Speter		mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
785251881Speter		KASSERT(mpte >= vm_page_array &&
786251881Speter		    mpte < &vm_page_array[vm_page_array_size],
787251881Speter		    ("pmap_init: page table page is out of range"));
788251881Speter		mpte->pindex = i + KPTDI;
789251881Speter		mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
790251881Speter	}
791251881Speter
792251881Speter	/*
793251881Speter	 * Initialize the address space (zone) for the pv entries.  Set a
794251881Speter	 * high water mark so that the system can recover from excessive
795251881Speter	 * numbers of pv entries.
796251881Speter	 */
797251881Speter	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
798251881Speter	pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
799251881Speter	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
800251881Speter	pv_entry_max = roundup(pv_entry_max, _NPCPV);
801251881Speter	pv_entry_high_water = 9 * (pv_entry_max / 10);
802251881Speter
803251881Speter	/*
804251881Speter	 * If the kernel is running on a virtual machine, then it must assume
805251881Speter	 * that MCA is enabled by the hypervisor.  Moreover, the kernel must
806251881Speter	 * be prepared for the hypervisor changing the vendor and family that
807251881Speter	 * are reported by CPUID.  Consequently, the workaround for AMD Family
808251881Speter	 * 10h Erratum 383 is enabled if the processor's feature set does not
809251881Speter	 * include at least one feature that is only supported by older Intel
810251881Speter	 * or newer AMD processors.
811251881Speter	 */
812251881Speter	if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
813251881Speter	    (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
814251881Speter	    CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
815251881Speter	    AMDID2_FMA4)) == 0)
816251881Speter		workaround_erratum383 = 1;
817251881Speter
818251881Speter	/*
819251881Speter	 * Are large page mappings supported and enabled?
820251881Speter	 */
821251881Speter	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
822251881Speter	if (pseflag == 0)
823251881Speter		pg_ps_enabled = 0;
824251881Speter	else if (pg_ps_enabled) {
825251881Speter		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
826251881Speter		    ("pmap_init: can't assign to pagesizes[1]"));
827251881Speter		pagesizes[1] = NBPDR;
828251881Speter	}
829251881Speter
830251881Speter	/*
831251881Speter	 * Calculate the size of the pv head table for superpages.
832251881Speter	 * Handle the possibility that "vm_phys_segs[...].end" is zero.
833251881Speter	 */
834251881Speter	pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end -
835251881Speter	    PAGE_SIZE) / NBPDR + 1;
836251881Speter
837251881Speter	/*
838251881Speter	 * Allocate memory for the pv head table for superpages.
839251881Speter	 */
840251881Speter	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
841251881Speter	s = round_page(s);
842251881Speter	pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
843251881Speter	    M_WAITOK | M_ZERO);
844251881Speter	for (i = 0; i < pv_npg; i++)
845251881Speter		TAILQ_INIT(&pv_table[i].pv_list);
846251881Speter
847251881Speter	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
848251881Speter	pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
849251881Speter	if (pv_chunkbase == NULL)
850251881Speter		panic("pmap_init: not enough kvm for pv chunks");
851251881Speter	pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
852251881Speter#if defined(PAE) || defined(PAE_TABLES)
853251881Speter	pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
854251881Speter	    NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
855251881Speter	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
856251881Speter	uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
857251881Speter#endif
858251881Speter
859251881Speter	pmap_initialized = 1;
860251881Speter	if (!bootverbose)
861251881Speter		return;
862251881Speter	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
863251881Speter		ppim = pmap_preinit_mapping + i;
864251881Speter		if (ppim->va == 0)
865251881Speter			continue;
866251881Speter		printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i,
867251881Speter		    (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode);
868251881Speter	}
869251881Speter}
870251881Speter
871251881Speter
872251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
873251881Speter	"Max number of PV entries");
874251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
875251881Speter	"Page share factor per proc");
876251881Speter
877251881Speterstatic SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
878251881Speter    "2/4MB page mapping counters");
879251881Speter
880251881Speterstatic u_long pmap_pde_demotions;
881251881SpeterSYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
882251881Speter    &pmap_pde_demotions, 0, "2/4MB page demotions");
883251881Speter
884251881Speterstatic u_long pmap_pde_mappings;
885251881SpeterSYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
886251881Speter    &pmap_pde_mappings, 0, "2/4MB page mappings");
887251881Speter
888251881Speterstatic u_long pmap_pde_p_failures;
889251881SpeterSYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
890251881Speter    &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
891251881Speter
892251881Speterstatic u_long pmap_pde_promotions;
893251881SpeterSYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
894251881Speter    &pmap_pde_promotions, 0, "2/4MB page promotions");
895251881Speter
896251881Speter/***************************************************
897251881Speter * Low level helper routines.....
898251881Speter ***************************************************/
899251881Speter
900251881Speter/*
901251881Speter * Determine the appropriate bits to set in a PTE or PDE for a specified
902251881Speter * caching mode.
903251881Speter */
904251881Speterint
905251881Speterpmap_cache_bits(int mode, boolean_t is_pde)
906251881Speter{
907251881Speter	int cache_bits, pat_flag, pat_idx;
908251881Speter
909251881Speter	if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
910251881Speter		panic("Unknown caching mode %d\n", mode);
911251881Speter
912251881Speter	/* The PAT bit is different for PTE's and PDE's. */
913251881Speter	pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
914251881Speter
915251881Speter	/* Map the caching mode to a PAT index. */
916251881Speter	pat_idx = pat_index[mode];
917251881Speter
918251881Speter	/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
919251881Speter	cache_bits = 0;
920251881Speter	if (pat_idx & 0x4)
921251881Speter		cache_bits |= pat_flag;
922251881Speter	if (pat_idx & 0x2)
923251881Speter		cache_bits |= PG_NC_PCD;
924251881Speter	if (pat_idx & 0x1)
925251881Speter		cache_bits |= PG_NC_PWT;
926251881Speter	return (cache_bits);
927251881Speter}
928251881Speter
929251881Speter/*
930251881Speter * The caller is responsible for maintaining TLB consistency.
931251881Speter */
932251881Speterstatic void
933251881Speterpmap_kenter_pde(vm_offset_t va, pd_entry_t newpde)
934251881Speter{
935251881Speter	pd_entry_t *pde;
936251881Speter	pmap_t pmap;
937251881Speter	boolean_t PTD_updated;
938251881Speter
939251881Speter	PTD_updated = FALSE;
940251881Speter	mtx_lock_spin(&allpmaps_lock);
941251881Speter	LIST_FOREACH(pmap, &allpmaps, pm_list) {
942251881Speter		if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
943251881Speter		    PG_FRAME))
944251881Speter			PTD_updated = TRUE;
945251881Speter		pde = pmap_pde(pmap, va);
946251881Speter		pde_store(pde, newpde);
947251881Speter	}
948251881Speter	mtx_unlock_spin(&allpmaps_lock);
949251881Speter	KASSERT(PTD_updated,
950251881Speter	    ("pmap_kenter_pde: current page table is not in allpmaps"));
951251881Speter}
952251881Speter
953251881Speter/*
954251881Speter * After changing the page size for the specified virtual address in the page
955251881Speter * table, flush the corresponding entries from the processor's TLB.  Only the
956251881Speter * calling processor's TLB is affected.
957251881Speter *
958251881Speter * The calling thread must be pinned to a processor.
959251881Speter */
960251881Speterstatic void
961251881Speterpmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
962251881Speter{
963251881Speter	u_long cr4;
964251881Speter
965251881Speter	if ((newpde & PG_PS) == 0)
966251881Speter		/* Demotion: flush a specific 2MB page mapping. */
967251881Speter		invlpg(va);
968251881Speter	else if ((newpde & PG_G) == 0)
969251881Speter		/*
970251881Speter		 * Promotion: flush every 4KB page mapping from the TLB
971251881Speter		 * because there are too many to flush individually.
972251881Speter		 */
973251881Speter		invltlb();
974251881Speter	else {
975251881Speter		/*
976251881Speter		 * Promotion: flush every 4KB page mapping from the TLB,
977251881Speter		 * including any global (PG_G) mappings.
978251881Speter		 */
979251881Speter		cr4 = rcr4();
980251881Speter		load_cr4(cr4 & ~CR4_PGE);
981251881Speter		/*
982251881Speter		 * Although preemption at this point could be detrimental to
983251881Speter		 * performance, it would not lead to an error.  PG_G is simply
984251881Speter		 * ignored if CR4.PGE is clear.  Moreover, in case this block
985251881Speter		 * is re-entered, the load_cr4() either above or below will
986251881Speter		 * modify CR4.PGE flushing the TLB.
987251881Speter		 */
988251881Speter		load_cr4(cr4 | CR4_PGE);
989251881Speter	}
990251881Speter}
991251881Speter
992251881Spetervoid
993251881Speterinvltlb_glob(void)
994251881Speter{
995251881Speter	uint64_t cr4;
996251881Speter
997251881Speter	if (pgeflag == 0) {
998251881Speter		invltlb();
999251881Speter	} else {
1000251881Speter		cr4 = rcr4();
1001251881Speter		load_cr4(cr4 & ~CR4_PGE);
1002251881Speter		load_cr4(cr4 | CR4_PGE);
1003251881Speter	}
1004251881Speter}
1005251881Speter
1006251881Speter
1007251881Speter#ifdef SMP
1008251881Speter/*
1009251881Speter * For SMP, these functions have to use the IPI mechanism for coherence.
1010251881Speter *
1011251881Speter * N.B.: Before calling any of the following TLB invalidation functions,
1012251881Speter * the calling processor must ensure that all stores updating a non-
1013251881Speter * kernel page table are globally performed.  Otherwise, another
1014251881Speter * processor could cache an old, pre-update entry without being
1015251881Speter * invalidated.  This can happen one of two ways: (1) The pmap becomes
1016251881Speter * active on another processor after its pm_active field is checked by
1017251881Speter * one of the following functions but before a store updating the page
1018251881Speter * table is globally performed. (2) The pmap becomes active on another
1019251881Speter * processor before its pm_active field is checked but due to
1020251881Speter * speculative loads one of the following functions stills reads the
1021251881Speter * pmap as inactive on the other processor.
1022251881Speter *
1023251881Speter * The kernel page table is exempt because its pm_active field is
1024251881Speter * immutable.  The kernel page table is always active on every
1025251881Speter * processor.
1026251881Speter */
1027251881Spetervoid
1028251881Speterpmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1029251881Speter{
1030251881Speter	cpuset_t *mask, other_cpus;
1031251881Speter	u_int cpuid;
1032251881Speter
1033251881Speter	sched_pin();
1034251881Speter	if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
1035251881Speter		invlpg(va);
1036251881Speter		mask = &all_cpus;
1037251881Speter	} else {
1038251881Speter		cpuid = PCPU_GET(cpuid);
1039251881Speter		other_cpus = all_cpus;
1040251881Speter		CPU_CLR(cpuid, &other_cpus);
1041251881Speter		if (CPU_ISSET(cpuid, &pmap->pm_active))
1042251881Speter			invlpg(va);
1043251881Speter		CPU_AND(&other_cpus, &pmap->pm_active);
1044251881Speter		mask = &other_cpus;
1045251881Speter	}
1046251881Speter	smp_masked_invlpg(*mask, va);
1047251881Speter	sched_unpin();
1048251881Speter}
1049251881Speter
1050251881Speter/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
1051251881Speter#define	PMAP_INVLPG_THRESHOLD	(4 * 1024 * PAGE_SIZE)
1052251881Speter
1053251881Spetervoid
1054251881Speterpmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1055251881Speter{
1056251881Speter	cpuset_t *mask, other_cpus;
1057251881Speter	vm_offset_t addr;
1058251881Speter	u_int cpuid;
1059251881Speter
1060251881Speter	if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
1061251881Speter		pmap_invalidate_all(pmap);
1062251881Speter		return;
1063251881Speter	}
1064251881Speter
1065251881Speter	sched_pin();
1066251881Speter	if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
1067251881Speter		for (addr = sva; addr < eva; addr += PAGE_SIZE)
1068251881Speter			invlpg(addr);
1069251881Speter		mask = &all_cpus;
1070251881Speter	} else {
1071251881Speter		cpuid = PCPU_GET(cpuid);
1072251881Speter		other_cpus = all_cpus;
1073251881Speter		CPU_CLR(cpuid, &other_cpus);
1074251881Speter		if (CPU_ISSET(cpuid, &pmap->pm_active))
1075251881Speter			for (addr = sva; addr < eva; addr += PAGE_SIZE)
1076251881Speter				invlpg(addr);
1077251881Speter		CPU_AND(&other_cpus, &pmap->pm_active);
1078251881Speter		mask = &other_cpus;
1079251881Speter	}
1080251881Speter	smp_masked_invlpg_range(*mask, sva, eva);
1081251881Speter	sched_unpin();
1082251881Speter}
1083251881Speter
1084251881Spetervoid
1085251881Speterpmap_invalidate_all(pmap_t pmap)
1086251881Speter{
1087251881Speter	cpuset_t *mask, other_cpus;
1088251881Speter	u_int cpuid;
1089251881Speter
1090251881Speter	sched_pin();
1091251881Speter	if (pmap == kernel_pmap) {
1092251881Speter		invltlb_glob();
1093251881Speter		mask = &all_cpus;
1094251881Speter	} else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
1095251881Speter		invltlb();
1096251881Speter		mask = &all_cpus;
1097251881Speter	} else {
1098251881Speter		cpuid = PCPU_GET(cpuid);
1099251881Speter		other_cpus = all_cpus;
1100251881Speter		CPU_CLR(cpuid, &other_cpus);
1101251881Speter		if (CPU_ISSET(cpuid, &pmap->pm_active))
1102251881Speter			invltlb();
1103251881Speter		CPU_AND(&other_cpus, &pmap->pm_active);
1104251881Speter		mask = &other_cpus;
1105251881Speter	}
1106251881Speter	smp_masked_invltlb(*mask, pmap);
1107251881Speter	sched_unpin();
1108251881Speter}
1109251881Speter
1110251881Spetervoid
1111251881Speterpmap_invalidate_cache(void)
1112251881Speter{
1113251881Speter
1114251881Speter	sched_pin();
1115251881Speter	wbinvd();
1116251881Speter	smp_cache_flush();
1117251881Speter	sched_unpin();
1118251881Speter}
1119251881Speter
1120251881Speterstruct pde_action {
1121251881Speter	cpuset_t invalidate;	/* processors that invalidate their TLB */
1122251881Speter	vm_offset_t va;
1123251881Speter	pd_entry_t *pde;
1124251881Speter	pd_entry_t newpde;
1125251881Speter	u_int store;		/* processor that updates the PDE */
1126251881Speter};
1127251881Speter
1128251881Speterstatic void
1129251881Speterpmap_update_pde_kernel(void *arg)
1130251881Speter{
1131251881Speter	struct pde_action *act = arg;
1132251881Speter	pd_entry_t *pde;
1133251881Speter	pmap_t pmap;
1134251881Speter
1135251881Speter	if (act->store == PCPU_GET(cpuid)) {
1136251881Speter
1137251881Speter		/*
1138251881Speter		 * Elsewhere, this operation requires allpmaps_lock for
1139251881Speter		 * synchronization.  Here, it does not because it is being
1140251881Speter		 * performed in the context of an all_cpus rendezvous.
1141251881Speter		 */
1142251881Speter		LIST_FOREACH(pmap, &allpmaps, pm_list) {
1143251881Speter			pde = pmap_pde(pmap, act->va);
1144251881Speter			pde_store(pde, act->newpde);
1145251881Speter		}
1146251881Speter	}
1147251881Speter}
1148251881Speter
1149251881Speterstatic void
1150251881Speterpmap_update_pde_user(void *arg)
1151251881Speter{
1152251881Speter	struct pde_action *act = arg;
1153251881Speter
1154251881Speter	if (act->store == PCPU_GET(cpuid))
1155251881Speter		pde_store(act->pde, act->newpde);
1156251881Speter}
1157251881Speter
1158251881Speterstatic void
1159251881Speterpmap_update_pde_teardown(void *arg)
1160251881Speter{
1161251881Speter	struct pde_action *act = arg;
1162251881Speter
1163251881Speter	if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
1164251881Speter		pmap_update_pde_invalidate(act->va, act->newpde);
1165251881Speter}
1166251881Speter
1167251881Speter/*
1168251881Speter * Change the page size for the specified virtual address in a way that
1169251881Speter * prevents any possibility of the TLB ever having two entries that map the
1170251881Speter * same virtual address using different page sizes.  This is the recommended
1171251881Speter * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
1172251881Speter * machine check exception for a TLB state that is improperly diagnosed as a
1173251881Speter * hardware error.
1174251881Speter */
1175251881Speterstatic void
1176251881Speterpmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1177251881Speter{
1178251881Speter	struct pde_action act;
1179251881Speter	cpuset_t active, other_cpus;
1180251881Speter	u_int cpuid;
1181251881Speter
1182251881Speter	sched_pin();
1183251881Speter	cpuid = PCPU_GET(cpuid);
1184251881Speter	other_cpus = all_cpus;
1185251881Speter	CPU_CLR(cpuid, &other_cpus);
1186251881Speter	if (pmap == kernel_pmap)
1187251881Speter		active = all_cpus;
1188251881Speter	else
1189251881Speter		active = pmap->pm_active;
1190251881Speter	if (CPU_OVERLAP(&active, &other_cpus)) {
1191251881Speter		act.store = cpuid;
1192251881Speter		act.invalidate = active;
1193251881Speter		act.va = va;
1194251881Speter		act.pde = pde;
1195251881Speter		act.newpde = newpde;
1196251881Speter		CPU_SET(cpuid, &active);
1197251881Speter		smp_rendezvous_cpus(active,
1198251881Speter		    smp_no_rendevous_barrier, pmap == kernel_pmap ?
1199251881Speter		    pmap_update_pde_kernel : pmap_update_pde_user,
1200251881Speter		    pmap_update_pde_teardown, &act);
1201251881Speter	} else {
1202251881Speter		if (pmap == kernel_pmap)
1203251881Speter			pmap_kenter_pde(va, newpde);
1204251881Speter		else
1205251881Speter			pde_store(pde, newpde);
1206251881Speter		if (CPU_ISSET(cpuid, &active))
1207251881Speter			pmap_update_pde_invalidate(va, newpde);
1208251881Speter	}
1209251881Speter	sched_unpin();
1210251881Speter}
1211251881Speter#else /* !SMP */
1212251881Speter/*
1213251881Speter * Normal, non-SMP, 486+ invalidation functions.
1214251881Speter * We inline these within pmap.c for speed.
1215251881Speter */
1216251881SpeterPMAP_INLINE void
1217251881Speterpmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1218251881Speter{
1219251881Speter
1220251881Speter	if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1221251881Speter		invlpg(va);
1222251881Speter}
1223251881Speter
1224251881SpeterPMAP_INLINE void
1225251881Speterpmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1226251881Speter{
1227251881Speter	vm_offset_t addr;
1228251881Speter
1229251881Speter	if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1230251881Speter		for (addr = sva; addr < eva; addr += PAGE_SIZE)
1231251881Speter			invlpg(addr);
1232251881Speter}
1233251881Speter
1234251881SpeterPMAP_INLINE void
1235251881Speterpmap_invalidate_all(pmap_t pmap)
1236251881Speter{
1237251881Speter
1238251881Speter	if (pmap == kernel_pmap)
1239251881Speter		invltlb_glob();
1240251881Speter	else if (!CPU_EMPTY(&pmap->pm_active))
1241251881Speter		invltlb();
1242251881Speter}
1243251881Speter
1244251881SpeterPMAP_INLINE void
1245251881Speterpmap_invalidate_cache(void)
1246251881Speter{
1247251881Speter
1248251881Speter	wbinvd();
1249251881Speter}
1250251881Speter
1251251881Speterstatic void
1252251881Speterpmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1253251881Speter{
1254251881Speter
1255251881Speter	if (pmap == kernel_pmap)
1256251881Speter		pmap_kenter_pde(va, newpde);
1257251881Speter	else
1258251881Speter		pde_store(pde, newpde);
1259251881Speter	if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1260251881Speter		pmap_update_pde_invalidate(va, newpde);
1261251881Speter}
1262251881Speter#endif /* !SMP */
1263251881Speter
1264251881Speterstatic void
1265251881Speterpmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1266251881Speter{
1267251881Speter
1268251881Speter	/*
1269251881Speter	 * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was
1270251881Speter	 * created by a promotion that did not invalidate the 512 or 1024 4KB
1271251881Speter	 * page mappings that might exist in the TLB.  Consequently, at this
1272251881Speter	 * point, the TLB may hold both 4KB and 2- or 4MB page mappings for
1273251881Speter	 * the address range [va, va + NBPDR).  Therefore, the entire range
1274251881Speter	 * must be invalidated here.  In contrast, when PG_PROMOTED is clear,
1275251881Speter	 * the TLB will not hold any 4KB page mappings for the address range
1276251881Speter	 * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the
1277251881Speter	 * 2- or 4MB page mapping from the TLB.
1278251881Speter	 */
1279251881Speter	if ((pde & PG_PROMOTED) != 0)
1280251881Speter		pmap_invalidate_range(pmap, va, va + NBPDR - 1);
1281251881Speter	else
1282251881Speter		pmap_invalidate_page(pmap, va);
1283251881Speter}
1284251881Speter
1285251881Speter#define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
1286251881Speter
1287251881Spetervoid
1288251881Speterpmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
1289251881Speter{
1290251881Speter
1291251881Speter	if (force) {
1292251881Speter		sva &= ~(vm_offset_t)cpu_clflush_line_size;
1293251881Speter	} else {
1294251881Speter		KASSERT((sva & PAGE_MASK) == 0,
1295251881Speter		    ("pmap_invalidate_cache_range: sva not page-aligned"));
1296251881Speter		KASSERT((eva & PAGE_MASK) == 0,
1297251881Speter		    ("pmap_invalidate_cache_range: eva not page-aligned"));
1298251881Speter	}
1299251881Speter
1300251881Speter	if ((cpu_feature & CPUID_SS) != 0 && !force)
1301262253Speter		; /* If "Self Snoop" is supported and allowed, do nothing. */
1302251881Speter	else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
1303251881Speter	    eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1304251881Speter#ifdef DEV_APIC
1305251881Speter		/*
1306251881Speter		 * XXX: Some CPUs fault, hang, or trash the local APIC
1307251881Speter		 * registers if we use CLFLUSH on the local APIC
1308251881Speter		 * range.  The local APIC is always uncached, so we
1309251881Speter		 * don't need to flush for that range anyway.
1310251881Speter		 */
1311251881Speter		if (pmap_kextract(sva) == lapic_paddr)
1312251881Speter			return;
1313251881Speter#endif
1314251881Speter		/*
1315251881Speter		 * Otherwise, do per-cache line flush.  Use the sfence
1316251881Speter		 * instruction to insure that previous stores are
1317251881Speter		 * included in the write-back.  The processor
1318251881Speter		 * propagates flush to other processors in the cache
1319251881Speter		 * coherence domain.
1320251881Speter		 */
1321251881Speter		sfence();
1322251881Speter		for (; sva < eva; sva += cpu_clflush_line_size)
1323251881Speter			clflushopt(sva);
1324251881Speter		sfence();
1325251881Speter	} else if ((cpu_feature & CPUID_CLFSH) != 0 &&
1326251881Speter	    eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1327251881Speter#ifdef DEV_APIC
1328251881Speter		if (pmap_kextract(sva) == lapic_paddr)
1329251881Speter			return;
1330251881Speter#endif
1331251881Speter		/*
1332251881Speter		 * Writes are ordered by CLFLUSH on Intel CPUs.
1333251881Speter		 */
1334251881Speter		if (cpu_vendor_id != CPU_VENDOR_INTEL)
1335251881Speter			mfence();
1336251881Speter		for (; sva < eva; sva += cpu_clflush_line_size)
1337251881Speter			clflush(sva);
1338251881Speter		if (cpu_vendor_id != CPU_VENDOR_INTEL)
1339251881Speter			mfence();
1340251881Speter	} else {
1341251881Speter
1342251881Speter		/*
1343251881Speter		 * No targeted cache flush methods are supported by CPU,
1344251881Speter		 * or the supplied range is bigger than 2MB.
1345251881Speter		 * Globally invalidate cache.
1346251881Speter		 */
1347251881Speter		pmap_invalidate_cache();
1348251881Speter	}
1349251881Speter}
1350251881Speter
1351251881Spetervoid
1352251881Speterpmap_invalidate_cache_pages(vm_page_t *pages, int count)
1353251881Speter{
1354251881Speter	int i;
1355251881Speter
1356251881Speter	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1357251881Speter	    (cpu_feature & CPUID_CLFSH) == 0) {
1358251881Speter		pmap_invalidate_cache();
1359251881Speter	} else {
1360251881Speter		for (i = 0; i < count; i++)
1361251881Speter			pmap_flush_page(pages[i]);
1362251881Speter	}
1363251881Speter}
1364251881Speter
1365251881Speter/*
1366251881Speter * Are we current address space or kernel?
1367251881Speter */
1368251881Speterstatic __inline int
1369251881Speterpmap_is_current(pmap_t pmap)
1370251881Speter{
1371251881Speter
1372251881Speter	return (pmap == kernel_pmap || pmap ==
1373251881Speter	    vmspace_pmap(curthread->td_proc->p_vmspace));
1374251881Speter}
1375251881Speter
1376251881Speter/*
1377251881Speter * If the given pmap is not the current or kernel pmap, the returned pte must
1378251881Speter * be released by passing it to pmap_pte_release().
1379251881Speter */
1380251881Speterpt_entry_t *
1381251881Speterpmap_pte(pmap_t pmap, vm_offset_t va)
1382251881Speter{
1383251881Speter	pd_entry_t newpf;
1384251881Speter	pd_entry_t *pde;
1385251881Speter
1386251881Speter	pde = pmap_pde(pmap, va);
1387251881Speter	if (*pde & PG_PS)
1388251881Speter		return (pde);
1389251881Speter	if (*pde != 0) {
1390251881Speter		/* are we current address space or kernel? */
1391251881Speter		if (pmap_is_current(pmap))
1392251881Speter			return (vtopte(va));
1393251881Speter		mtx_lock(&PMAP2mutex);
1394251881Speter		newpf = *pde & PG_FRAME;
1395251881Speter		if ((*PMAP2 & PG_FRAME) != newpf) {
1396251881Speter			*PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
1397251881Speter			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
1398251881Speter		}
1399251881Speter		return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
1400251881Speter	}
1401251881Speter	return (NULL);
1402251881Speter}
1403251881Speter
1404251881Speter/*
1405251881Speter * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
1406251881Speter * being NULL.
1407251881Speter */
1408251881Speterstatic __inline void
1409251881Speterpmap_pte_release(pt_entry_t *pte)
1410251881Speter{
1411251881Speter
1412251881Speter	if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
1413251881Speter		mtx_unlock(&PMAP2mutex);
1414251881Speter}
1415251881Speter
1416251881Speter/*
1417251881Speter * NB:  The sequence of updating a page table followed by accesses to the
1418251881Speter * corresponding pages is subject to the situation described in the "AMD64
1419251881Speter * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23,
1420251881Speter * "7.3.1 Special Coherency Considerations".  Therefore, issuing the INVLPG
1421251881Speter * right after modifying the PTE bits is crucial.
1422251881Speter */
1423251881Speterstatic __inline void
1424251881Speterinvlcaddr(void *caddr)
1425251881Speter{
1426251881Speter
1427251881Speter	invlpg((u_int)caddr);
1428251881Speter}
1429251881Speter
1430251881Speter/*
1431251881Speter * Super fast pmap_pte routine best used when scanning
1432251881Speter * the pv lists.  This eliminates many coarse-grained
1433251881Speter * invltlb calls.  Note that many of the pv list
1434251881Speter * scans are across different pmaps.  It is very wasteful
1435251881Speter * to do an entire invltlb for checking a single mapping.
1436251881Speter *
1437251881Speter * If the given pmap is not the current pmap, pvh_global_lock
1438251881Speter * must be held and curthread pinned to a CPU.
1439251881Speter */
1440251881Speterstatic pt_entry_t *
1441251881Speterpmap_pte_quick(pmap_t pmap, vm_offset_t va)
1442251881Speter{
1443251881Speter	pd_entry_t newpf;
1444251881Speter	pd_entry_t *pde;
1445251881Speter
1446251881Speter	pde = pmap_pde(pmap, va);
1447251881Speter	if (*pde & PG_PS)
1448251881Speter		return (pde);
1449251881Speter	if (*pde != 0) {
1450251881Speter		/* are we current address space or kernel? */
1451251881Speter		if (pmap_is_current(pmap))
1452251881Speter			return (vtopte(va));
1453251881Speter		rw_assert(&pvh_global_lock, RA_WLOCKED);
1454251881Speter		KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1455251881Speter		newpf = *pde & PG_FRAME;
1456251881Speter		if ((*PMAP1 & PG_FRAME) != newpf) {
1457251881Speter			*PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
1458251881Speter#ifdef SMP
1459251881Speter			PMAP1cpu = PCPU_GET(cpuid);
1460251881Speter#endif
1461251881Speter			invlcaddr(PADDR1);
1462251881Speter			PMAP1changed++;
1463251881Speter		} else
1464251881Speter#ifdef SMP
1465251881Speter		if (PMAP1cpu != PCPU_GET(cpuid)) {
1466251881Speter			PMAP1cpu = PCPU_GET(cpuid);
1467251881Speter			invlcaddr(PADDR1);
1468251881Speter			PMAP1changedcpu++;
1469251881Speter		} else
1470251881Speter#endif
1471251881Speter			PMAP1unchanged++;
1472251881Speter		return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
1473251881Speter	}
1474251881Speter	return (0);
1475251881Speter}
1476251881Speter
1477251881Speter/*
1478251881Speter *	Routine:	pmap_extract
1479251881Speter *	Function:
1480251881Speter *		Extract the physical page address associated
1481251881Speter *		with the given map/virtual_address pair.
1482251881Speter */
1483251881Spetervm_paddr_t
1484251881Speterpmap_extract(pmap_t pmap, vm_offset_t va)
1485251881Speter{
1486251881Speter	vm_paddr_t rtval;
1487251881Speter	pt_entry_t *pte;
1488251881Speter	pd_entry_t pde;
1489251881Speter
1490251881Speter	rtval = 0;
1491251881Speter	PMAP_LOCK(pmap);
1492251881Speter	pde = pmap->pm_pdir[va >> PDRSHIFT];
1493251881Speter	if (pde != 0) {
1494251881Speter		if ((pde & PG_PS) != 0)
1495251881Speter			rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
1496251881Speter		else {
1497251881Speter			pte = pmap_pte(pmap, va);
1498251881Speter			rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
1499251881Speter			pmap_pte_release(pte);
1500251881Speter		}
1501251881Speter	}
1502251881Speter	PMAP_UNLOCK(pmap);
1503251881Speter	return (rtval);
1504251881Speter}
1505251881Speter
1506251881Speter/*
1507251881Speter *	Routine:	pmap_extract_and_hold
1508251881Speter *	Function:
1509251881Speter *		Atomically extract and hold the physical page
1510251881Speter *		with the given pmap and virtual address pair
1511251881Speter *		if that mapping permits the given protection.
1512251881Speter */
1513251881Spetervm_page_t
1514251881Speterpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1515251881Speter{
1516251881Speter	pd_entry_t pde;
1517251881Speter	pt_entry_t pte, *ptep;
1518251881Speter	vm_page_t m;
1519251881Speter	vm_paddr_t pa;
1520251881Speter
1521251881Speter	pa = 0;
1522251881Speter	m = NULL;
1523251881Speter	PMAP_LOCK(pmap);
1524251881Speterretry:
1525251881Speter	pde = *pmap_pde(pmap, va);
1526251881Speter	if (pde != 0) {
1527251881Speter		if (pde & PG_PS) {
1528251881Speter			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
1529251881Speter				if (vm_page_pa_tryrelock(pmap, (pde &
1530251881Speter				    PG_PS_FRAME) | (va & PDRMASK), &pa))
1531251881Speter					goto retry;
1532251881Speter				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
1533251881Speter				    (va & PDRMASK));
1534251881Speter				vm_page_hold(m);
1535251881Speter			}
1536251881Speter		} else {
1537251881Speter			ptep = pmap_pte(pmap, va);
1538251881Speter			pte = *ptep;
1539251881Speter			pmap_pte_release(ptep);
1540251881Speter			if (pte != 0 &&
1541251881Speter			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
1542251881Speter				if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
1543251881Speter				    &pa))
1544251881Speter					goto retry;
1545251881Speter				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
1546251881Speter				vm_page_hold(m);
1547251881Speter			}
1548251881Speter		}
1549251881Speter	}
1550251881Speter	PA_UNLOCK_COND(pa);
1551251881Speter	PMAP_UNLOCK(pmap);
1552251881Speter	return (m);
1553251881Speter}
1554251881Speter
1555251881Speter/***************************************************
1556251881Speter * Low level mapping routines.....
1557251881Speter ***************************************************/
1558251881Speter
1559251881Speter/*
1560251881Speter * Add a wired page to the kva.
1561251881Speter * Note: not SMP coherent.
1562251881Speter *
1563251881Speter * This function may be used before pmap_bootstrap() is called.
1564251881Speter */
1565251881SpeterPMAP_INLINE void
1566251881Speterpmap_kenter(vm_offset_t va, vm_paddr_t pa)
1567251881Speter{
1568251881Speter	pt_entry_t *pte;
1569251881Speter
1570251881Speter	pte = vtopte(va);
1571251881Speter	pte_store(pte, pa | PG_RW | PG_V | pgeflag);
1572251881Speter}
1573251881Speter
1574251881Speterstatic __inline void
1575251881Speterpmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1576251881Speter{
1577251881Speter	pt_entry_t *pte;
1578251881Speter
1579251881Speter	pte = vtopte(va);
1580251881Speter	pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
1581251881Speter}
1582251881Speter
1583251881Speter/*
1584251881Speter * Remove a page from the kernel pagetables.
1585251881Speter * Note: not SMP coherent.
1586251881Speter *
1587251881Speter * This function may be used before pmap_bootstrap() is called.
1588251881Speter */
1589251881SpeterPMAP_INLINE void
1590251881Speterpmap_kremove(vm_offset_t va)
1591251881Speter{
1592251881Speter	pt_entry_t *pte;
1593251881Speter
1594251881Speter	pte = vtopte(va);
1595251881Speter	pte_clear(pte);
1596251881Speter}
1597251881Speter
1598251881Speter/*
1599251881Speter *	Used to map a range of physical addresses into kernel
1600251881Speter *	virtual address space.
1601251881Speter *
1602251881Speter *	The value passed in '*virt' is a suggested virtual address for
1603251881Speter *	the mapping. Architectures which can support a direct-mapped
1604251881Speter *	physical to virtual region can return the appropriate address
1605251881Speter *	within that region, leaving '*virt' unchanged. Other
1606251881Speter *	architectures should map the pages starting at '*virt' and
1607251881Speter *	update '*virt' with the first usable address after the mapped
1608251881Speter *	region.
1609251881Speter */
1610251881Spetervm_offset_t
1611251881Speterpmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1612251881Speter{
1613251881Speter	vm_offset_t va, sva;
1614251881Speter	vm_paddr_t superpage_offset;
1615251881Speter	pd_entry_t newpde;
1616251881Speter
1617251881Speter	va = *virt;
1618251881Speter	/*
1619251881Speter	 * Does the physical address range's size and alignment permit at
1620251881Speter	 * least one superpage mapping to be created?
1621251881Speter	 */
1622251881Speter	superpage_offset = start & PDRMASK;
1623251881Speter	if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) {
1624251881Speter		/*
1625251881Speter		 * Increase the starting virtual address so that its alignment
1626251881Speter		 * does not preclude the use of superpage mappings.
1627251881Speter		 */
1628251881Speter		if ((va & PDRMASK) < superpage_offset)
1629251881Speter			va = (va & ~PDRMASK) + superpage_offset;
1630251881Speter		else if ((va & PDRMASK) > superpage_offset)
1631251881Speter			va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset;
1632251881Speter	}
1633251881Speter	sva = va;
1634251881Speter	while (start < end) {
1635251881Speter		if ((start & PDRMASK) == 0 && end - start >= NBPDR &&
1636251881Speter		    pseflag) {
1637251881Speter			KASSERT((va & PDRMASK) == 0,
1638251881Speter			    ("pmap_map: misaligned va %#x", va));
1639251881Speter			newpde = start | PG_PS | pgeflag | PG_RW | PG_V;
1640251881Speter			pmap_kenter_pde(va, newpde);
1641251881Speter			va += NBPDR;
1642251881Speter			start += NBPDR;
1643251881Speter		} else {
1644251881Speter			pmap_kenter(va, start);
1645251881Speter			va += PAGE_SIZE;
1646251881Speter			start += PAGE_SIZE;
1647251881Speter		}
1648251881Speter	}
1649251881Speter	pmap_invalidate_range(kernel_pmap, sva, va);
1650251881Speter	*virt = va;
1651251881Speter	return (sva);
1652251881Speter}
1653251881Speter
1654251881Speter
1655251881Speter/*
1656251881Speter * Add a list of wired pages to the kva
1657251881Speter * this routine is only used for temporary
1658251881Speter * kernel mappings that do not need to have
1659251881Speter * page modification or references recorded.
1660251881Speter * Note that old mappings are simply written
1661251881Speter * over.  The page *must* be wired.
1662251881Speter * Note: SMP coherent.  Uses a ranged shootdown IPI.
1663251881Speter */
1664251881Spetervoid
1665251881Speterpmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1666251881Speter{
1667251881Speter	pt_entry_t *endpte, oldpte, pa, *pte;
1668251881Speter	vm_page_t m;
1669251881Speter
1670251881Speter	oldpte = 0;
1671251881Speter	pte = vtopte(sva);
1672251881Speter	endpte = pte + count;
1673251881Speter	while (pte < endpte) {
1674251881Speter		m = *ma++;
1675251881Speter		pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
1676251881Speter		if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
1677251881Speter			oldpte |= *pte;
1678251881Speter			pte_store(pte, pa | pgeflag | PG_RW | PG_V);
1679251881Speter		}
1680251881Speter		pte++;
1681251881Speter	}
1682251881Speter	if (__predict_false((oldpte & PG_V) != 0))
1683251881Speter		pmap_invalidate_range(kernel_pmap, sva, sva + count *
1684251881Speter		    PAGE_SIZE);
1685251881Speter}
1686251881Speter
1687251881Speter/*
1688251881Speter * This routine tears out page mappings from the
1689251881Speter * kernel -- it is meant only for temporary mappings.
1690251881Speter * Note: SMP coherent.  Uses a ranged shootdown IPI.
1691251881Speter */
1692251881Spetervoid
1693251881Speterpmap_qremove(vm_offset_t sva, int count)
1694251881Speter{
1695251881Speter	vm_offset_t va;
1696251881Speter
1697251881Speter	va = sva;
1698251881Speter	while (count-- > 0) {
1699251881Speter		pmap_kremove(va);
1700251881Speter		va += PAGE_SIZE;
1701251881Speter	}
1702251881Speter	pmap_invalidate_range(kernel_pmap, sva, va);
1703251881Speter}
1704251881Speter
1705251881Speter/***************************************************
1706251881Speter * Page table page management routines.....
1707251881Speter ***************************************************/
1708251881Speterstatic __inline void
1709251881Speterpmap_free_zero_pages(struct spglist *free)
1710251881Speter{
1711251881Speter	vm_page_t m;
1712251881Speter
1713251881Speter	while ((m = SLIST_FIRST(free)) != NULL) {
1714251881Speter		SLIST_REMOVE_HEAD(free, plinks.s.ss);
1715251881Speter		/* Preserve the page's PG_ZERO setting. */
1716251881Speter		vm_page_free_toq(m);
1717251881Speter	}
1718251881Speter}
1719251881Speter
1720251881Speter/*
1721251881Speter * Schedule the specified unused page table page to be freed.  Specifically,
1722251881Speter * add the page to the specified list of pages that will be released to the
1723251881Speter * physical memory manager after the TLB has been updated.
1724251881Speter */
1725251881Speterstatic __inline void
1726251881Speterpmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1727251881Speter    boolean_t set_PG_ZERO)
1728251881Speter{
1729251881Speter
1730251881Speter	if (set_PG_ZERO)
1731251881Speter		m->flags |= PG_ZERO;
1732251881Speter	else
1733251881Speter		m->flags &= ~PG_ZERO;
1734251881Speter	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1735251881Speter}
1736251881Speter
1737251881Speter/*
1738251881Speter * Inserts the specified page table page into the specified pmap's collection
1739251881Speter * of idle page table pages.  Each of a pmap's page table pages is responsible
1740251881Speter * for mapping a distinct range of virtual addresses.  The pmap's collection is
1741251881Speter * ordered by this virtual address range.
1742251881Speter */
1743251881Speterstatic __inline int
1744251881Speterpmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
1745251881Speter{
1746251881Speter
1747251881Speter	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1748251881Speter	return (vm_radix_insert(&pmap->pm_root, mpte));
1749251881Speter}
1750251881Speter
1751251881Speter/*
1752251881Speter * Removes the page table page mapping the specified virtual address from the
1753251881Speter * specified pmap's collection of idle page table pages, and returns it.
1754251881Speter * Otherwise, returns NULL if there is no page table page corresponding to the
1755251881Speter * specified virtual address.
1756251881Speter */
1757251881Speterstatic __inline vm_page_t
1758251881Speterpmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
1759251881Speter{
1760251881Speter
1761251881Speter	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1762251881Speter	return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT));
1763251881Speter}
1764251881Speter
1765251881Speter/*
1766251881Speter * Decrements a page table page's wire count, which is used to record the
1767251881Speter * number of valid page table entries within the page.  If the wire count
1768251881Speter * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1769251881Speter * page table page was unmapped and FALSE otherwise.
1770251881Speter */
1771251881Speterstatic inline boolean_t
1772251881Speterpmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
1773251881Speter{
1774251881Speter
1775251881Speter	--m->wire_count;
1776251881Speter	if (m->wire_count == 0) {
1777251881Speter		_pmap_unwire_ptp(pmap, m, free);
1778251881Speter		return (TRUE);
1779251881Speter	} else
1780251881Speter		return (FALSE);
1781251881Speter}
1782251881Speter
1783251881Speterstatic void
1784251881Speter_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
1785251881Speter{
1786251881Speter	vm_offset_t pteva;
1787251881Speter
1788251881Speter	/*
1789251881Speter	 * unmap the page table page
1790251881Speter	 */
1791251881Speter	pmap->pm_pdir[m->pindex] = 0;
1792251881Speter	--pmap->pm_stats.resident_count;
1793251881Speter
1794251881Speter	/*
1795251881Speter	 * This is a release store so that the ordinary store unmapping
1796251881Speter	 * the page table page is globally performed before TLB shoot-
1797251881Speter	 * down is begun.
1798251881Speter	 */
1799251881Speter	atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
1800251881Speter
1801251881Speter	/*
1802251881Speter	 * Do an invltlb to make the invalidated mapping
1803251881Speter	 * take effect immediately.
1804251881Speter	 */
1805251881Speter	pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
1806251881Speter	pmap_invalidate_page(pmap, pteva);
1807251881Speter
1808251881Speter	/*
1809251881Speter	 * Put page on a list so that it is released after
1810251881Speter	 * *ALL* TLB shootdown is done
1811251881Speter	 */
1812251881Speter	pmap_add_delayed_free_list(m, free, TRUE);
1813251881Speter}
1814251881Speter
1815251881Speter/*
1816251881Speter * After removing a page table entry, this routine is used to
1817251881Speter * conditionally free the page, and manage the hold/wire counts.
1818251881Speter */
1819251881Speterstatic int
1820251881Speterpmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
1821251881Speter{
1822251881Speter	pd_entry_t ptepde;
1823251881Speter	vm_page_t mpte;
1824251881Speter
1825251881Speter	if (va >= VM_MAXUSER_ADDRESS)
1826251881Speter		return (0);
1827251881Speter	ptepde = *pmap_pde(pmap, va);
1828251881Speter	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1829251881Speter	return (pmap_unwire_ptp(pmap, mpte, free));
1830251881Speter}
1831251881Speter
1832251881Speter/*
1833251881Speter * Initialize the pmap for the swapper process.
1834251881Speter */
1835251881Spetervoid
1836251881Speterpmap_pinit0(pmap_t pmap)
1837251881Speter{
1838251881Speter
1839251881Speter	PMAP_LOCK_INIT(pmap);
1840251881Speter	/*
1841251881Speter	 * Since the page table directory is shared with the kernel pmap,
1842251881Speter	 * which is already included in the list "allpmaps", this pmap does
1843251881Speter	 * not need to be inserted into that list.
1844251881Speter	 */
1845251881Speter	pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1846251881Speter#if defined(PAE) || defined(PAE_TABLES)
1847251881Speter	pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1848251881Speter#endif
1849251881Speter	pmap->pm_root.rt_root = 0;
1850251881Speter	CPU_ZERO(&pmap->pm_active);
1851251881Speter	PCPU_SET(curpmap, pmap);
1852251881Speter	TAILQ_INIT(&pmap->pm_pvchunk);
1853251881Speter	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1854251881Speter}
1855251881Speter
1856251881Speter/*
1857251881Speter * Initialize a preallocated and zeroed pmap structure,
1858251881Speter * such as one in a vmspace structure.
1859251881Speter */
1860251881Speterint
1861251881Speterpmap_pinit(pmap_t pmap)
1862251881Speter{
1863251881Speter	vm_page_t m, ptdpg[NPGPTD];
1864251881Speter	vm_paddr_t pa;
1865251881Speter	int i;
1866251881Speter
1867251881Speter	/*
1868251881Speter	 * No need to allocate page table space yet but we do need a valid
1869251881Speter	 * page directory table.
1870251881Speter	 */
1871251881Speter	if (pmap->pm_pdir == NULL) {
1872251881Speter		pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
1873251881Speter		if (pmap->pm_pdir == NULL)
1874251881Speter			return (0);
1875251881Speter#if defined(PAE) || defined(PAE_TABLES)
1876251881Speter		pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
1877251881Speter		KASSERT(((vm_offset_t)pmap->pm_pdpt &
1878251881Speter		    ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
1879251881Speter		    ("pmap_pinit: pdpt misaligned"));
1880251881Speter		KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
1881251881Speter		    ("pmap_pinit: pdpt above 4g"));
1882251881Speter#endif
1883251881Speter		pmap->pm_root.rt_root = 0;
1884251881Speter	}
1885251881Speter	KASSERT(vm_radix_is_empty(&pmap->pm_root),
1886251881Speter	    ("pmap_pinit: pmap has reserved page table page(s)"));
1887251881Speter
1888251881Speter	/*
1889251881Speter	 * allocate the page directory page(s)
1890251881Speter	 */
1891251881Speter	for (i = 0; i < NPGPTD;) {
1892251881Speter		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1893251881Speter		    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1894251881Speter		if (m == NULL)
1895251881Speter			VM_WAIT;
1896251881Speter		else {
1897251881Speter			ptdpg[i++] = m;
1898251881Speter		}
1899251881Speter	}
1900251881Speter
1901251881Speter	pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1902251881Speter
1903251881Speter	for (i = 0; i < NPGPTD; i++)
1904251881Speter		if ((ptdpg[i]->flags & PG_ZERO) == 0)
1905251881Speter			pagezero(pmap->pm_pdir + (i * NPDEPG));
1906251881Speter
1907251881Speter	mtx_lock_spin(&allpmaps_lock);
1908251881Speter	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1909251881Speter	/* Copy the kernel page table directory entries. */
1910251881Speter	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1911251881Speter	mtx_unlock_spin(&allpmaps_lock);
1912251881Speter
1913251881Speter	/* install self-referential address mapping entry(s) */
1914251881Speter	for (i = 0; i < NPGPTD; i++) {
1915251881Speter		pa = VM_PAGE_TO_PHYS(ptdpg[i]);
1916251881Speter		pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
1917251881Speter#if defined(PAE) || defined(PAE_TABLES)
1918251881Speter		pmap->pm_pdpt[i] = pa | PG_V;
1919251881Speter#endif
1920251881Speter	}
1921251881Speter
1922251881Speter	CPU_ZERO(&pmap->pm_active);
1923251881Speter	TAILQ_INIT(&pmap->pm_pvchunk);
1924251881Speter	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1925251881Speter
1926251881Speter	return (1);
1927251881Speter}
1928251881Speter
1929251881Speter/*
1930251881Speter * this routine is called if the page table page is not
1931251881Speter * mapped correctly.
1932251881Speter */
1933251881Speterstatic vm_page_t
1934251881Speter_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
1935251881Speter{
1936251881Speter	vm_paddr_t ptepa;
1937251881Speter	vm_page_t m;
1938251881Speter
1939251881Speter	/*
1940251881Speter	 * Allocate a page table page.
1941251881Speter	 */
1942251881Speter	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1943251881Speter	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1944251881Speter		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
1945251881Speter			PMAP_UNLOCK(pmap);
1946251881Speter			rw_wunlock(&pvh_global_lock);
1947251881Speter			VM_WAIT;
1948251881Speter			rw_wlock(&pvh_global_lock);
1949251881Speter			PMAP_LOCK(pmap);
1950251881Speter		}
1951251881Speter
1952251881Speter		/*
1953251881Speter		 * Indicate the need to retry.  While waiting, the page table
1954251881Speter		 * page may have been allocated.
1955251881Speter		 */
1956251881Speter		return (NULL);
1957251881Speter	}
1958251881Speter	if ((m->flags & PG_ZERO) == 0)
1959251881Speter		pmap_zero_page(m);
1960251881Speter
1961251881Speter	/*
1962251881Speter	 * Map the pagetable page into the process address space, if
1963251881Speter	 * it isn't already there.
1964251881Speter	 */
1965251881Speter
1966251881Speter	pmap->pm_stats.resident_count++;
1967251881Speter
1968251881Speter	ptepa = VM_PAGE_TO_PHYS(m);
1969251881Speter	pmap->pm_pdir[ptepindex] =
1970251881Speter		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1971251881Speter
1972251881Speter	return (m);
1973251881Speter}
1974251881Speter
1975251881Speterstatic vm_page_t
1976251881Speterpmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
1977251881Speter{
1978251881Speter	u_int ptepindex;
1979251881Speter	pd_entry_t ptepa;
1980251881Speter	vm_page_t m;
1981251881Speter
1982251881Speter	/*
1983251881Speter	 * Calculate pagetable page index
1984251881Speter	 */
1985251881Speter	ptepindex = va >> PDRSHIFT;
1986251881Speterretry:
1987251881Speter	/*
1988251881Speter	 * Get the page directory entry
1989251881Speter	 */
1990251881Speter	ptepa = pmap->pm_pdir[ptepindex];
1991251881Speter
1992251881Speter	/*
1993251881Speter	 * This supports switching from a 4MB page to a
1994251881Speter	 * normal 4K page.
1995251881Speter	 */
1996251881Speter	if (ptepa & PG_PS) {
1997251881Speter		(void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
1998251881Speter		ptepa = pmap->pm_pdir[ptepindex];
1999251881Speter	}
2000251881Speter
2001251881Speter	/*
2002251881Speter	 * If the page table page is mapped, we just increment the
2003251881Speter	 * hold count, and activate it.
2004251881Speter	 */
2005251881Speter	if (ptepa) {
2006251881Speter		m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
2007251881Speter		m->wire_count++;
2008251881Speter	} else {
2009251881Speter		/*
2010251881Speter		 * Here if the pte page isn't mapped, or if it has
2011251881Speter		 * been deallocated.
2012251881Speter		 */
2013251881Speter		m = _pmap_allocpte(pmap, ptepindex, flags);
2014251881Speter		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
2015251881Speter			goto retry;
2016251881Speter	}
2017251881Speter	return (m);
2018251881Speter}
2019251881Speter
2020251881Speter
2021251881Speter/***************************************************
2022251881Speter* Pmap allocation/deallocation routines.
2023251881Speter ***************************************************/
2024251881Speter
2025251881Speter/*
2026251881Speter * Release any resources held by the given physical map.
2027251881Speter * Called when a pmap initialized by pmap_pinit is being released.
2028251881Speter * Should only be called if the map contains no valid mappings.
2029251881Speter */
2030251881Spetervoid
2031251881Speterpmap_release(pmap_t pmap)
2032251881Speter{
2033251881Speter	vm_page_t m, ptdpg[NPGPTD];
2034251881Speter	int i;
2035251881Speter
2036251881Speter	KASSERT(pmap->pm_stats.resident_count == 0,
2037251881Speter	    ("pmap_release: pmap resident count %ld != 0",
2038251881Speter	    pmap->pm_stats.resident_count));
2039251881Speter	KASSERT(vm_radix_is_empty(&pmap->pm_root),
2040251881Speter	    ("pmap_release: pmap has reserved page table page(s)"));
2041251881Speter	KASSERT(CPU_EMPTY(&pmap->pm_active),
2042251881Speter	    ("releasing active pmap %p", pmap));
2043251881Speter
2044251881Speter	mtx_lock_spin(&allpmaps_lock);
2045251881Speter	LIST_REMOVE(pmap, pm_list);
2046251881Speter	mtx_unlock_spin(&allpmaps_lock);
2047251881Speter
2048251881Speter	for (i = 0; i < NPGPTD; i++)
2049251881Speter		ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
2050251881Speter		    PG_FRAME);
2051251881Speter
2052251881Speter	bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
2053251881Speter	    sizeof(*pmap->pm_pdir));
2054251881Speter
2055251881Speter	pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
2056251881Speter
2057251881Speter	for (i = 0; i < NPGPTD; i++) {
2058251881Speter		m = ptdpg[i];
2059251881Speter#if defined(PAE) || defined(PAE_TABLES)
2060251881Speter		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
2061251881Speter		    ("pmap_release: got wrong ptd page"));
2062251881Speter#endif
2063251881Speter		m->wire_count--;
2064251881Speter		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2065251881Speter		vm_page_free_zero(m);
2066251881Speter	}
2067251881Speter}
2068251881Speter
2069251881Speterstatic int
2070251881Speterkvm_size(SYSCTL_HANDLER_ARGS)
2071251881Speter{
2072251881Speter	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
2073251881Speter
2074251881Speter	return (sysctl_handle_long(oidp, &ksize, 0, req));
2075251881Speter}
2076251881SpeterSYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
2077251881Speter    0, 0, kvm_size, "IU", "Size of KVM");
2078251881Speter
2079251881Speterstatic int
2080251881Speterkvm_free(SYSCTL_HANDLER_ARGS)
2081251881Speter{
2082251881Speter	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
2083251881Speter
2084251881Speter	return (sysctl_handle_long(oidp, &kfree, 0, req));
2085251881Speter}
2086251881SpeterSYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
2087251881Speter    0, 0, kvm_free, "IU", "Amount of KVM free");
2088251881Speter
2089251881Speter/*
2090251881Speter * grow the number of kernel page table entries, if needed
2091251881Speter */
2092251881Spetervoid
2093251881Speterpmap_growkernel(vm_offset_t addr)
2094251881Speter{
2095251881Speter	vm_paddr_t ptppaddr;
2096251881Speter	vm_page_t nkpg;
2097251881Speter	pd_entry_t newpdir;
2098251881Speter
2099251881Speter	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2100251881Speter	addr = roundup2(addr, NBPDR);
2101251881Speter	if (addr - 1 >= kernel_map->max_offset)
2102251881Speter		addr = kernel_map->max_offset;
2103251881Speter	while (kernel_vm_end < addr) {
2104251881Speter		if (pdir_pde(PTD, kernel_vm_end)) {
2105251881Speter			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2106251881Speter			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
2107251881Speter				kernel_vm_end = kernel_map->max_offset;
2108251881Speter				break;
2109251881Speter			}
2110251881Speter			continue;
2111251881Speter		}
2112251881Speter
2113251881Speter		nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
2114251881Speter		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2115251881Speter		    VM_ALLOC_ZERO);
2116251881Speter		if (nkpg == NULL)
2117251881Speter			panic("pmap_growkernel: no memory to grow kernel");
2118251881Speter
2119251881Speter		nkpt++;
2120251881Speter
2121251881Speter		if ((nkpg->flags & PG_ZERO) == 0)
2122251881Speter			pmap_zero_page(nkpg);
2123251881Speter		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
2124251881Speter		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
2125251881Speter		pdir_pde(KPTD, kernel_vm_end) = pgeflag | newpdir;
2126251881Speter
2127251881Speter		pmap_kenter_pde(kernel_vm_end, newpdir);
2128251881Speter		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2129251881Speter		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
2130251881Speter			kernel_vm_end = kernel_map->max_offset;
2131251881Speter			break;
2132251881Speter		}
2133251881Speter	}
2134251881Speter}
2135251881Speter
2136251881Speter
2137251881Speter/***************************************************
2138251881Speter * page management routines.
2139251881Speter ***************************************************/
2140251881Speter
2141251881SpeterCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2142251881SpeterCTASSERT(_NPCM == 11);
2143251881SpeterCTASSERT(_NPCPV == 336);
2144251881Speter
2145251881Speterstatic __inline struct pv_chunk *
2146251881Speterpv_to_chunk(pv_entry_t pv)
2147251881Speter{
2148251881Speter
2149251881Speter	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2150251881Speter}
2151251881Speter
2152251881Speter#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2153251881Speter
2154251881Speter#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
2155251881Speter#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
2156251881Speter
2157251881Speterstatic const uint32_t pc_freemask[_NPCM] = {
2158251881Speter	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
2159251881Speter	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
2160251881Speter	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
2161251881Speter	PC_FREE0_9, PC_FREE10
2162251881Speter};
2163251881Speter
2164251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2165251881Speter	"Current number of pv entries");
2166251881Speter
2167251881Speter#ifdef PV_STATS
2168251881Speterstatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2169251881Speter
2170251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2171251881Speter	"Current number of pv entry chunks");
2172251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2173251881Speter	"Current number of pv entry chunks allocated");
2174251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2175251881Speter	"Current number of pv entry chunks frees");
2176251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
2177251881Speter	"Number of times tried to get a chunk page but failed.");
2178251881Speter
2179251881Speterstatic long pv_entry_frees, pv_entry_allocs;
2180251881Speterstatic int pv_entry_spare;
2181251881Speter
2182251881SpeterSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2183251881Speter	"Current number of pv entry frees");
2184251881SpeterSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
2185251881Speter	"Current number of pv entry allocs");
2186251881SpeterSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2187251881Speter	"Current number of spare pv entries");
2188251881Speter#endif
2189251881Speter
2190251881Speter/*
2191251881Speter * We are in a serious low memory condition.  Resort to
2192251881Speter * drastic measures to free some pages so we can allocate
2193251881Speter * another pv entry chunk.
2194251881Speter */
2195251881Speterstatic vm_page_t
2196251881Speterpmap_pv_reclaim(pmap_t locked_pmap)
2197251881Speter{
2198251881Speter	struct pch newtail;
2199251881Speter	struct pv_chunk *pc;
2200251881Speter	struct md_page *pvh;
2201251881Speter	pd_entry_t *pde;
2202251881Speter	pmap_t pmap;
2203251881Speter	pt_entry_t *pte, tpte;
2204251881Speter	pv_entry_t pv;
2205251881Speter	vm_offset_t va;
2206251881Speter	vm_page_t m, m_pc;
2207251881Speter	struct spglist free;
2208251881Speter	uint32_t inuse;
2209251881Speter	int bit, field, freed;
2210251881Speter
2211251881Speter	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2212251881Speter	pmap = NULL;
2213251881Speter	m_pc = NULL;
2214251881Speter	SLIST_INIT(&free);
2215251881Speter	TAILQ_INIT(&newtail);
2216251881Speter	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
2217251881Speter	    SLIST_EMPTY(&free))) {
2218251881Speter		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2219251881Speter		if (pmap != pc->pc_pmap) {
2220251881Speter			if (pmap != NULL) {
2221251881Speter				pmap_invalidate_all(pmap);
2222251881Speter				if (pmap != locked_pmap)
2223251881Speter					PMAP_UNLOCK(pmap);
2224251881Speter			}
2225251881Speter			pmap = pc->pc_pmap;
2226251881Speter			/* Avoid deadlock and lock recursion. */
2227251881Speter			if (pmap > locked_pmap)
2228251881Speter				PMAP_LOCK(pmap);
2229251881Speter			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
2230251881Speter				pmap = NULL;
2231251881Speter				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2232251881Speter				continue;
2233251881Speter			}
2234251881Speter		}
2235251881Speter
2236251881Speter		/*
2237251881Speter		 * Destroy every non-wired, 4 KB page mapping in the chunk.
2238251881Speter		 */
2239251881Speter		freed = 0;
2240251881Speter		for (field = 0; field < _NPCM; field++) {
2241251881Speter			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2242251881Speter			    inuse != 0; inuse &= ~(1UL << bit)) {
2243251881Speter				bit = bsfl(inuse);
2244251881Speter				pv = &pc->pc_pventry[field * 32 + bit];
2245251881Speter				va = pv->pv_va;
2246251881Speter				pde = pmap_pde(pmap, va);
2247251881Speter				if ((*pde & PG_PS) != 0)
2248251881Speter					continue;
2249251881Speter				pte = pmap_pte(pmap, va);
2250251881Speter				tpte = *pte;
2251251881Speter				if ((tpte & PG_W) == 0)
2252251881Speter					tpte = pte_load_clear(pte);
2253251881Speter				pmap_pte_release(pte);
2254251881Speter				if ((tpte & PG_W) != 0)
2255251881Speter					continue;
2256251881Speter				KASSERT(tpte != 0,
2257251881Speter				    ("pmap_pv_reclaim: pmap %p va %x zero pte",
2258251881Speter				    pmap, va));
2259251881Speter				if ((tpte & PG_G) != 0)
2260251881Speter					pmap_invalidate_page(pmap, va);
2261251881Speter				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
2262251881Speter				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2263251881Speter					vm_page_dirty(m);
2264251881Speter				if ((tpte & PG_A) != 0)
2265251881Speter					vm_page_aflag_set(m, PGA_REFERENCED);
2266251881Speter				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2267251881Speter				if (TAILQ_EMPTY(&m->md.pv_list) &&
2268251881Speter				    (m->flags & PG_FICTITIOUS) == 0) {
2269251881Speter					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2270251881Speter					if (TAILQ_EMPTY(&pvh->pv_list)) {
2271251881Speter						vm_page_aflag_clear(m,
2272251881Speter						    PGA_WRITEABLE);
2273251881Speter					}
2274251881Speter				}
2275251881Speter				pc->pc_map[field] |= 1UL << bit;
2276251881Speter				pmap_unuse_pt(pmap, va, &free);
2277251881Speter				freed++;
2278251881Speter			}
2279251881Speter		}
2280251881Speter		if (freed == 0) {
2281251881Speter			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2282251881Speter			continue;
2283251881Speter		}
2284251881Speter		/* Every freed mapping is for a 4 KB page. */
2285251881Speter		pmap->pm_stats.resident_count -= freed;
2286251881Speter		PV_STAT(pv_entry_frees += freed);
2287251881Speter		PV_STAT(pv_entry_spare += freed);
2288251881Speter		pv_entry_count -= freed;
2289251881Speter		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2290251881Speter		for (field = 0; field < _NPCM; field++)
2291251881Speter			if (pc->pc_map[field] != pc_freemask[field]) {
2292251881Speter				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2293251881Speter				    pc_list);
2294251881Speter				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
2295251881Speter
2296251881Speter				/*
2297251881Speter				 * One freed pv entry in locked_pmap is
2298251881Speter				 * sufficient.
2299251881Speter				 */
2300251881Speter				if (pmap == locked_pmap)
2301251881Speter					goto out;
2302251881Speter				break;
2303251881Speter			}
2304251881Speter		if (field == _NPCM) {
2305251881Speter			PV_STAT(pv_entry_spare -= _NPCPV);
2306251881Speter			PV_STAT(pc_chunk_count--);
2307			PV_STAT(pc_chunk_frees++);
2308			/* Entire chunk is free; return it. */
2309			m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2310			pmap_qremove((vm_offset_t)pc, 1);
2311			pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
2312			break;
2313		}
2314	}
2315out:
2316	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
2317	if (pmap != NULL) {
2318		pmap_invalidate_all(pmap);
2319		if (pmap != locked_pmap)
2320			PMAP_UNLOCK(pmap);
2321	}
2322	if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
2323		m_pc = SLIST_FIRST(&free);
2324		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2325		/* Recycle a freed page table page. */
2326		m_pc->wire_count = 1;
2327		atomic_add_int(&vm_cnt.v_wire_count, 1);
2328	}
2329	pmap_free_zero_pages(&free);
2330	return (m_pc);
2331}
2332
2333/*
2334 * free the pv_entry back to the free list
2335 */
2336static void
2337free_pv_entry(pmap_t pmap, pv_entry_t pv)
2338{
2339	struct pv_chunk *pc;
2340	int idx, field, bit;
2341
2342	rw_assert(&pvh_global_lock, RA_WLOCKED);
2343	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2344	PV_STAT(pv_entry_frees++);
2345	PV_STAT(pv_entry_spare++);
2346	pv_entry_count--;
2347	pc = pv_to_chunk(pv);
2348	idx = pv - &pc->pc_pventry[0];
2349	field = idx / 32;
2350	bit = idx % 32;
2351	pc->pc_map[field] |= 1ul << bit;
2352	for (idx = 0; idx < _NPCM; idx++)
2353		if (pc->pc_map[idx] != pc_freemask[idx]) {
2354			/*
2355			 * 98% of the time, pc is already at the head of the
2356			 * list.  If it isn't already, move it to the head.
2357			 */
2358			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
2359			    pc)) {
2360				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2361				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2362				    pc_list);
2363			}
2364			return;
2365		}
2366	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2367	free_pv_chunk(pc);
2368}
2369
2370static void
2371free_pv_chunk(struct pv_chunk *pc)
2372{
2373	vm_page_t m;
2374
2375 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2376	PV_STAT(pv_entry_spare -= _NPCPV);
2377	PV_STAT(pc_chunk_count--);
2378	PV_STAT(pc_chunk_frees++);
2379	/* entire chunk is free, return it */
2380	m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2381	pmap_qremove((vm_offset_t)pc, 1);
2382	vm_page_unwire(m, PQ_NONE);
2383	vm_page_free(m);
2384	pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
2385}
2386
2387/*
2388 * get a new pv_entry, allocating a block from the system
2389 * when needed.
2390 */
2391static pv_entry_t
2392get_pv_entry(pmap_t pmap, boolean_t try)
2393{
2394	static const struct timeval printinterval = { 60, 0 };
2395	static struct timeval lastprint;
2396	int bit, field;
2397	pv_entry_t pv;
2398	struct pv_chunk *pc;
2399	vm_page_t m;
2400
2401	rw_assert(&pvh_global_lock, RA_WLOCKED);
2402	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2403	PV_STAT(pv_entry_allocs++);
2404	pv_entry_count++;
2405	if (pv_entry_count > pv_entry_high_water)
2406		if (ratecheck(&lastprint, &printinterval))
2407			printf("Approaching the limit on PV entries, consider "
2408			    "increasing either the vm.pmap.shpgperproc or the "
2409			    "vm.pmap.pv_entry_max tunable.\n");
2410retry:
2411	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2412	if (pc != NULL) {
2413		for (field = 0; field < _NPCM; field++) {
2414			if (pc->pc_map[field]) {
2415				bit = bsfl(pc->pc_map[field]);
2416				break;
2417			}
2418		}
2419		if (field < _NPCM) {
2420			pv = &pc->pc_pventry[field * 32 + bit];
2421			pc->pc_map[field] &= ~(1ul << bit);
2422			/* If this was the last item, move it to tail */
2423			for (field = 0; field < _NPCM; field++)
2424				if (pc->pc_map[field] != 0) {
2425					PV_STAT(pv_entry_spare--);
2426					return (pv);	/* not full, return */
2427				}
2428			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2429			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2430			PV_STAT(pv_entry_spare--);
2431			return (pv);
2432		}
2433	}
2434	/*
2435	 * Access to the ptelist "pv_vafree" is synchronized by the pvh
2436	 * global lock.  If "pv_vafree" is currently non-empty, it will
2437	 * remain non-empty until pmap_ptelist_alloc() completes.
2438	 */
2439	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2440	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
2441		if (try) {
2442			pv_entry_count--;
2443			PV_STAT(pc_chunk_tryfail++);
2444			return (NULL);
2445		}
2446		m = pmap_pv_reclaim(pmap);
2447		if (m == NULL)
2448			goto retry;
2449	}
2450	PV_STAT(pc_chunk_count++);
2451	PV_STAT(pc_chunk_allocs++);
2452	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
2453	pmap_qenter((vm_offset_t)pc, &m, 1);
2454	pc->pc_pmap = pmap;
2455	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
2456	for (field = 1; field < _NPCM; field++)
2457		pc->pc_map[field] = pc_freemask[field];
2458	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2459	pv = &pc->pc_pventry[0];
2460	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2461	PV_STAT(pv_entry_spare += _NPCPV - 1);
2462	return (pv);
2463}
2464
2465static __inline pv_entry_t
2466pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2467{
2468	pv_entry_t pv;
2469
2470	rw_assert(&pvh_global_lock, RA_WLOCKED);
2471	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2472		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2473			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2474			break;
2475		}
2476	}
2477	return (pv);
2478}
2479
2480static void
2481pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2482{
2483	struct md_page *pvh;
2484	pv_entry_t pv;
2485	vm_offset_t va_last;
2486	vm_page_t m;
2487
2488	rw_assert(&pvh_global_lock, RA_WLOCKED);
2489	KASSERT((pa & PDRMASK) == 0,
2490	    ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
2491
2492	/*
2493	 * Transfer the 4mpage's pv entry for this mapping to the first
2494	 * page's pv list.
2495	 */
2496	pvh = pa_to_pvh(pa);
2497	va = trunc_4mpage(va);
2498	pv = pmap_pvh_remove(pvh, pmap, va);
2499	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
2500	m = PHYS_TO_VM_PAGE(pa);
2501	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2502	/* Instantiate the remaining NPTEPG - 1 pv entries. */
2503	va_last = va + NBPDR - PAGE_SIZE;
2504	do {
2505		m++;
2506		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2507		    ("pmap_pv_demote_pde: page %p is not managed", m));
2508		va += PAGE_SIZE;
2509		pmap_insert_entry(pmap, va, m);
2510	} while (va < va_last);
2511}
2512
2513static void
2514pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2515{
2516	struct md_page *pvh;
2517	pv_entry_t pv;
2518	vm_offset_t va_last;
2519	vm_page_t m;
2520
2521	rw_assert(&pvh_global_lock, RA_WLOCKED);
2522	KASSERT((pa & PDRMASK) == 0,
2523	    ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
2524
2525	/*
2526	 * Transfer the first page's pv entry for this mapping to the
2527	 * 4mpage's pv list.  Aside from avoiding the cost of a call
2528	 * to get_pv_entry(), a transfer avoids the possibility that
2529	 * get_pv_entry() calls pmap_collect() and that pmap_collect()
2530	 * removes one of the mappings that is being promoted.
2531	 */
2532	m = PHYS_TO_VM_PAGE(pa);
2533	va = trunc_4mpage(va);
2534	pv = pmap_pvh_remove(&m->md, pmap, va);
2535	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
2536	pvh = pa_to_pvh(pa);
2537	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2538	/* Free the remaining NPTEPG - 1 pv entries. */
2539	va_last = va + NBPDR - PAGE_SIZE;
2540	do {
2541		m++;
2542		va += PAGE_SIZE;
2543		pmap_pvh_free(&m->md, pmap, va);
2544	} while (va < va_last);
2545}
2546
2547static void
2548pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2549{
2550	pv_entry_t pv;
2551
2552	pv = pmap_pvh_remove(pvh, pmap, va);
2553	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2554	free_pv_entry(pmap, pv);
2555}
2556
2557static void
2558pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
2559{
2560	struct md_page *pvh;
2561
2562	rw_assert(&pvh_global_lock, RA_WLOCKED);
2563	pmap_pvh_free(&m->md, pmap, va);
2564	if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
2565		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2566		if (TAILQ_EMPTY(&pvh->pv_list))
2567			vm_page_aflag_clear(m, PGA_WRITEABLE);
2568	}
2569}
2570
2571/*
2572 * Create a pv entry for page at pa for
2573 * (pmap, va).
2574 */
2575static void
2576pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2577{
2578	pv_entry_t pv;
2579
2580	rw_assert(&pvh_global_lock, RA_WLOCKED);
2581	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2582	pv = get_pv_entry(pmap, FALSE);
2583	pv->pv_va = va;
2584	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2585}
2586
2587/*
2588 * Conditionally create a pv entry.
2589 */
2590static boolean_t
2591pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2592{
2593	pv_entry_t pv;
2594
2595	rw_assert(&pvh_global_lock, RA_WLOCKED);
2596	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2597	if (pv_entry_count < pv_entry_high_water &&
2598	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2599		pv->pv_va = va;
2600		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2601		return (TRUE);
2602	} else
2603		return (FALSE);
2604}
2605
2606/*
2607 * Create the pv entries for each of the pages within a superpage.
2608 */
2609static boolean_t
2610pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2611{
2612	struct md_page *pvh;
2613	pv_entry_t pv;
2614
2615	rw_assert(&pvh_global_lock, RA_WLOCKED);
2616	if (pv_entry_count < pv_entry_high_water &&
2617	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2618		pv->pv_va = va;
2619		pvh = pa_to_pvh(pa);
2620		TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2621		return (TRUE);
2622	} else
2623		return (FALSE);
2624}
2625
2626/*
2627 * Fills a page table page with mappings to consecutive physical pages.
2628 */
2629static void
2630pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
2631{
2632	pt_entry_t *pte;
2633
2634	for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
2635		*pte = newpte;
2636		newpte += PAGE_SIZE;
2637	}
2638}
2639
2640/*
2641 * Tries to demote a 2- or 4MB page mapping.  If demotion fails, the
2642 * 2- or 4MB page mapping is invalidated.
2643 */
2644static boolean_t
2645pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2646{
2647	pd_entry_t newpde, oldpde;
2648	pt_entry_t *firstpte, newpte;
2649	vm_paddr_t mptepa;
2650	vm_page_t mpte;
2651	struct spglist free;
2652	vm_offset_t sva;
2653
2654	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2655	oldpde = *pde;
2656	KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
2657	    ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
2658	if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
2659	    NULL) {
2660		KASSERT((oldpde & PG_W) == 0,
2661		    ("pmap_demote_pde: page table page for a wired mapping"
2662		    " is missing"));
2663
2664		/*
2665		 * Invalidate the 2- or 4MB page mapping and return
2666		 * "failure" if the mapping was never accessed or the
2667		 * allocation of the new page table page fails.
2668		 */
2669		if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
2670		    va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
2671		    VM_ALLOC_WIRED)) == NULL) {
2672			SLIST_INIT(&free);
2673			sva = trunc_4mpage(va);
2674			pmap_remove_pde(pmap, pde, sva, &free);
2675			if ((oldpde & PG_G) == 0)
2676				pmap_invalidate_pde_page(pmap, sva, oldpde);
2677			pmap_free_zero_pages(&free);
2678			CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
2679			    " in pmap %p", va, pmap);
2680			return (FALSE);
2681		}
2682		if (va < VM_MAXUSER_ADDRESS)
2683			pmap->pm_stats.resident_count++;
2684	}
2685	mptepa = VM_PAGE_TO_PHYS(mpte);
2686
2687	/*
2688	 * If the page mapping is in the kernel's address space, then the
2689	 * KPTmap can provide access to the page table page.  Otherwise,
2690	 * temporarily map the page table page (mpte) into the kernel's
2691	 * address space at either PADDR1 or PADDR2.
2692	 */
2693	if (va >= KERNBASE)
2694		firstpte = &KPTmap[i386_btop(trunc_4mpage(va))];
2695	else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) {
2696		if ((*PMAP1 & PG_FRAME) != mptepa) {
2697			*PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2698#ifdef SMP
2699			PMAP1cpu = PCPU_GET(cpuid);
2700#endif
2701			invlcaddr(PADDR1);
2702			PMAP1changed++;
2703		} else
2704#ifdef SMP
2705		if (PMAP1cpu != PCPU_GET(cpuid)) {
2706			PMAP1cpu = PCPU_GET(cpuid);
2707			invlcaddr(PADDR1);
2708			PMAP1changedcpu++;
2709		} else
2710#endif
2711			PMAP1unchanged++;
2712		firstpte = PADDR1;
2713	} else {
2714		mtx_lock(&PMAP2mutex);
2715		if ((*PMAP2 & PG_FRAME) != mptepa) {
2716			*PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2717			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
2718		}
2719		firstpte = PADDR2;
2720	}
2721	newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
2722	KASSERT((oldpde & PG_A) != 0,
2723	    ("pmap_demote_pde: oldpde is missing PG_A"));
2724	KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
2725	    ("pmap_demote_pde: oldpde is missing PG_M"));
2726	newpte = oldpde & ~PG_PS;
2727	if ((newpte & PG_PDE_PAT) != 0)
2728		newpte ^= PG_PDE_PAT | PG_PTE_PAT;
2729
2730	/*
2731	 * If the page table page is new, initialize it.
2732	 */
2733	if (mpte->wire_count == 1) {
2734		mpte->wire_count = NPTEPG;
2735		pmap_fill_ptp(firstpte, newpte);
2736	}
2737	KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
2738	    ("pmap_demote_pde: firstpte and newpte map different physical"
2739	    " addresses"));
2740
2741	/*
2742	 * If the mapping has changed attributes, update the page table
2743	 * entries.
2744	 */
2745	if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
2746		pmap_fill_ptp(firstpte, newpte);
2747
2748	/*
2749	 * Demote the mapping.  This pmap is locked.  The old PDE has
2750	 * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
2751	 * set.  Thus, there is no danger of a race with another
2752	 * processor changing the setting of PG_A and/or PG_M between
2753	 * the read above and the store below.
2754	 */
2755	if (workaround_erratum383)
2756		pmap_update_pde(pmap, va, pde, newpde);
2757	else if (pmap == kernel_pmap)
2758		pmap_kenter_pde(va, newpde);
2759	else
2760		pde_store(pde, newpde);
2761	if (firstpte == PADDR2)
2762		mtx_unlock(&PMAP2mutex);
2763
2764	/*
2765	 * Invalidate the recursive mapping of the page table page.
2766	 */
2767	pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
2768
2769	/*
2770	 * Demote the pv entry.  This depends on the earlier demotion
2771	 * of the mapping.  Specifically, the (re)creation of a per-
2772	 * page pv entry might trigger the execution of pmap_collect(),
2773	 * which might reclaim a newly (re)created per-page pv entry
2774	 * and destroy the associated mapping.  In order to destroy
2775	 * the mapping, the PDE must have already changed from mapping
2776	 * the 2mpage to referencing the page table page.
2777	 */
2778	if ((oldpde & PG_MANAGED) != 0)
2779		pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
2780
2781	pmap_pde_demotions++;
2782	CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
2783	    " in pmap %p", va, pmap);
2784	return (TRUE);
2785}
2786
2787/*
2788 * Removes a 2- or 4MB page mapping from the kernel pmap.
2789 */
2790static void
2791pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2792{
2793	pd_entry_t newpde;
2794	vm_paddr_t mptepa;
2795	vm_page_t mpte;
2796
2797	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2798	mpte = pmap_remove_pt_page(pmap, va);
2799	if (mpte == NULL)
2800		panic("pmap_remove_kernel_pde: Missing pt page.");
2801
2802	mptepa = VM_PAGE_TO_PHYS(mpte);
2803	newpde = mptepa | PG_M | PG_A | PG_RW | PG_V;
2804
2805	/*
2806	 * Initialize the page table page.
2807	 */
2808	pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]);
2809
2810	/*
2811	 * Remove the mapping.
2812	 */
2813	if (workaround_erratum383)
2814		pmap_update_pde(pmap, va, pde, newpde);
2815	else
2816		pmap_kenter_pde(va, newpde);
2817
2818	/*
2819	 * Invalidate the recursive mapping of the page table page.
2820	 */
2821	pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
2822}
2823
2824/*
2825 * pmap_remove_pde: do the things to unmap a superpage in a process
2826 */
2827static void
2828pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
2829    struct spglist *free)
2830{
2831	struct md_page *pvh;
2832	pd_entry_t oldpde;
2833	vm_offset_t eva, va;
2834	vm_page_t m, mpte;
2835
2836	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2837	KASSERT((sva & PDRMASK) == 0,
2838	    ("pmap_remove_pde: sva is not 4mpage aligned"));
2839	oldpde = pte_load_clear(pdq);
2840	if (oldpde & PG_W)
2841		pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
2842
2843	/*
2844	 * Machines that don't support invlpg, also don't support
2845	 * PG_G.
2846	 */
2847	if ((oldpde & PG_G) != 0)
2848		pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
2849
2850	pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2851	if (oldpde & PG_MANAGED) {
2852		pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
2853		pmap_pvh_free(pvh, pmap, sva);
2854		eva = sva + NBPDR;
2855		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
2856		    va < eva; va += PAGE_SIZE, m++) {
2857			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
2858				vm_page_dirty(m);
2859			if (oldpde & PG_A)
2860				vm_page_aflag_set(m, PGA_REFERENCED);
2861			if (TAILQ_EMPTY(&m->md.pv_list) &&
2862			    TAILQ_EMPTY(&pvh->pv_list))
2863				vm_page_aflag_clear(m, PGA_WRITEABLE);
2864		}
2865	}
2866	if (pmap == kernel_pmap) {
2867		pmap_remove_kernel_pde(pmap, pdq, sva);
2868	} else {
2869		mpte = pmap_remove_pt_page(pmap, sva);
2870		if (mpte != NULL) {
2871			pmap->pm_stats.resident_count--;
2872			KASSERT(mpte->wire_count == NPTEPG,
2873			    ("pmap_remove_pde: pte page wire count error"));
2874			mpte->wire_count = 0;
2875			pmap_add_delayed_free_list(mpte, free, FALSE);
2876			atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2877		}
2878	}
2879}
2880
2881/*
2882 * pmap_remove_pte: do the things to unmap a page in a process
2883 */
2884static int
2885pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
2886    struct spglist *free)
2887{
2888	pt_entry_t oldpte;
2889	vm_page_t m;
2890
2891	rw_assert(&pvh_global_lock, RA_WLOCKED);
2892	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2893	oldpte = pte_load_clear(ptq);
2894	KASSERT(oldpte != 0,
2895	    ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va));
2896	if (oldpte & PG_W)
2897		pmap->pm_stats.wired_count -= 1;
2898	/*
2899	 * Machines that don't support invlpg, also don't support
2900	 * PG_G.
2901	 */
2902	if (oldpte & PG_G)
2903		pmap_invalidate_page(kernel_pmap, va);
2904	pmap->pm_stats.resident_count -= 1;
2905	if (oldpte & PG_MANAGED) {
2906		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
2907		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2908			vm_page_dirty(m);
2909		if (oldpte & PG_A)
2910			vm_page_aflag_set(m, PGA_REFERENCED);
2911		pmap_remove_entry(pmap, m, va);
2912	}
2913	return (pmap_unuse_pt(pmap, va, free));
2914}
2915
2916/*
2917 * Remove a single page from a process address space
2918 */
2919static void
2920pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
2921{
2922	pt_entry_t *pte;
2923
2924	rw_assert(&pvh_global_lock, RA_WLOCKED);
2925	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
2926	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2927	if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
2928		return;
2929	pmap_remove_pte(pmap, pte, va, free);
2930	pmap_invalidate_page(pmap, va);
2931}
2932
2933/*
2934 *	Remove the given range of addresses from the specified map.
2935 *
2936 *	It is assumed that the start and end are properly
2937 *	rounded to the page size.
2938 */
2939void
2940pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2941{
2942	vm_offset_t pdnxt;
2943	pd_entry_t ptpaddr;
2944	pt_entry_t *pte;
2945	struct spglist free;
2946	int anyvalid;
2947
2948	/*
2949	 * Perform an unsynchronized read.  This is, however, safe.
2950	 */
2951	if (pmap->pm_stats.resident_count == 0)
2952		return;
2953
2954	anyvalid = 0;
2955	SLIST_INIT(&free);
2956
2957	rw_wlock(&pvh_global_lock);
2958	sched_pin();
2959	PMAP_LOCK(pmap);
2960
2961	/*
2962	 * special handling of removing one page.  a very
2963	 * common operation and easy to short circuit some
2964	 * code.
2965	 */
2966	if ((sva + PAGE_SIZE == eva) &&
2967	    ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
2968		pmap_remove_page(pmap, sva, &free);
2969		goto out;
2970	}
2971
2972	for (; sva < eva; sva = pdnxt) {
2973		u_int pdirindex;
2974
2975		/*
2976		 * Calculate index for next page table.
2977		 */
2978		pdnxt = (sva + NBPDR) & ~PDRMASK;
2979		if (pdnxt < sva)
2980			pdnxt = eva;
2981		if (pmap->pm_stats.resident_count == 0)
2982			break;
2983
2984		pdirindex = sva >> PDRSHIFT;
2985		ptpaddr = pmap->pm_pdir[pdirindex];
2986
2987		/*
2988		 * Weed out invalid mappings. Note: we assume that the page
2989		 * directory table is always allocated, and in kernel virtual.
2990		 */
2991		if (ptpaddr == 0)
2992			continue;
2993
2994		/*
2995		 * Check for large page.
2996		 */
2997		if ((ptpaddr & PG_PS) != 0) {
2998			/*
2999			 * Are we removing the entire large page?  If not,
3000			 * demote the mapping and fall through.
3001			 */
3002			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
3003				/*
3004				 * The TLB entry for a PG_G mapping is
3005				 * invalidated by pmap_remove_pde().
3006				 */
3007				if ((ptpaddr & PG_G) == 0)
3008					anyvalid = 1;
3009				pmap_remove_pde(pmap,
3010				    &pmap->pm_pdir[pdirindex], sva, &free);
3011				continue;
3012			} else if (!pmap_demote_pde(pmap,
3013			    &pmap->pm_pdir[pdirindex], sva)) {
3014				/* The large page mapping was destroyed. */
3015				continue;
3016			}
3017		}
3018
3019		/*
3020		 * Limit our scan to either the end of the va represented
3021		 * by the current page table page, or to the end of the
3022		 * range being removed.
3023		 */
3024		if (pdnxt > eva)
3025			pdnxt = eva;
3026
3027		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3028		    sva += PAGE_SIZE) {
3029			if (*pte == 0)
3030				continue;
3031
3032			/*
3033			 * The TLB entry for a PG_G mapping is invalidated
3034			 * by pmap_remove_pte().
3035			 */
3036			if ((*pte & PG_G) == 0)
3037				anyvalid = 1;
3038			if (pmap_remove_pte(pmap, pte, sva, &free))
3039				break;
3040		}
3041	}
3042out:
3043	sched_unpin();
3044	if (anyvalid)
3045		pmap_invalidate_all(pmap);
3046	rw_wunlock(&pvh_global_lock);
3047	PMAP_UNLOCK(pmap);
3048	pmap_free_zero_pages(&free);
3049}
3050
3051/*
3052 *	Routine:	pmap_remove_all
3053 *	Function:
3054 *		Removes this physical page from
3055 *		all physical maps in which it resides.
3056 *		Reflects back modify bits to the pager.
3057 *
3058 *	Notes:
3059 *		Original versions of this routine were very
3060 *		inefficient because they iteratively called
3061 *		pmap_remove (slow...)
3062 */
3063
3064void
3065pmap_remove_all(vm_page_t m)
3066{
3067	struct md_page *pvh;
3068	pv_entry_t pv;
3069	pmap_t pmap;
3070	pt_entry_t *pte, tpte;
3071	pd_entry_t *pde;
3072	vm_offset_t va;
3073	struct spglist free;
3074
3075	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3076	    ("pmap_remove_all: page %p is not managed", m));
3077	SLIST_INIT(&free);
3078	rw_wlock(&pvh_global_lock);
3079	sched_pin();
3080	if ((m->flags & PG_FICTITIOUS) != 0)
3081		goto small_mappings;
3082	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3083	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
3084		va = pv->pv_va;
3085		pmap = PV_PMAP(pv);
3086		PMAP_LOCK(pmap);
3087		pde = pmap_pde(pmap, va);
3088		(void)pmap_demote_pde(pmap, pde, va);
3089		PMAP_UNLOCK(pmap);
3090	}
3091small_mappings:
3092	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3093		pmap = PV_PMAP(pv);
3094		PMAP_LOCK(pmap);
3095		pmap->pm_stats.resident_count--;
3096		pde = pmap_pde(pmap, pv->pv_va);
3097		KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
3098		    " a 4mpage in page %p's pv list", m));
3099		pte = pmap_pte_quick(pmap, pv->pv_va);
3100		tpte = pte_load_clear(pte);
3101		KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte",
3102		    pmap, pv->pv_va));
3103		if (tpte & PG_W)
3104			pmap->pm_stats.wired_count--;
3105		if (tpte & PG_A)
3106			vm_page_aflag_set(m, PGA_REFERENCED);
3107
3108		/*
3109		 * Update the vm_page_t clean and reference bits.
3110		 */
3111		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3112			vm_page_dirty(m);
3113		pmap_unuse_pt(pmap, pv->pv_va, &free);
3114		pmap_invalidate_page(pmap, pv->pv_va);
3115		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3116		free_pv_entry(pmap, pv);
3117		PMAP_UNLOCK(pmap);
3118	}
3119	vm_page_aflag_clear(m, PGA_WRITEABLE);
3120	sched_unpin();
3121	rw_wunlock(&pvh_global_lock);
3122	pmap_free_zero_pages(&free);
3123}
3124
3125/*
3126 * pmap_protect_pde: do the things to protect a 4mpage in a process
3127 */
3128static boolean_t
3129pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
3130{
3131	pd_entry_t newpde, oldpde;
3132	vm_offset_t eva, va;
3133	vm_page_t m;
3134	boolean_t anychanged;
3135
3136	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3137	KASSERT((sva & PDRMASK) == 0,
3138	    ("pmap_protect_pde: sva is not 4mpage aligned"));
3139	anychanged = FALSE;
3140retry:
3141	oldpde = newpde = *pde;
3142	if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
3143	    (PG_MANAGED | PG_M | PG_RW)) {
3144		eva = sva + NBPDR;
3145		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
3146		    va < eva; va += PAGE_SIZE, m++)
3147			vm_page_dirty(m);
3148	}
3149	if ((prot & VM_PROT_WRITE) == 0)
3150		newpde &= ~(PG_RW | PG_M);
3151#if defined(PAE) || defined(PAE_TABLES)
3152	if ((prot & VM_PROT_EXECUTE) == 0)
3153		newpde |= pg_nx;
3154#endif
3155	if (newpde != oldpde) {
3156		/*
3157		 * As an optimization to future operations on this PDE, clear
3158		 * PG_PROMOTED.  The impending invalidation will remove any
3159		 * lingering 4KB page mappings from the TLB.
3160		 */
3161		if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED))
3162			goto retry;
3163		if ((oldpde & PG_G) != 0)
3164			pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
3165		else
3166			anychanged = TRUE;
3167	}
3168	return (anychanged);
3169}
3170
3171/*
3172 *	Set the physical protection on the
3173 *	specified range of this map as requested.
3174 */
3175void
3176pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3177{
3178	vm_offset_t pdnxt;
3179	pd_entry_t ptpaddr;
3180	pt_entry_t *pte;
3181	boolean_t anychanged, pv_lists_locked;
3182
3183	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
3184	if (prot == VM_PROT_NONE) {
3185		pmap_remove(pmap, sva, eva);
3186		return;
3187	}
3188
3189#if defined(PAE) || defined(PAE_TABLES)
3190	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
3191	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
3192		return;
3193#else
3194	if (prot & VM_PROT_WRITE)
3195		return;
3196#endif
3197
3198	if (pmap_is_current(pmap))
3199		pv_lists_locked = FALSE;
3200	else {
3201		pv_lists_locked = TRUE;
3202resume:
3203		rw_wlock(&pvh_global_lock);
3204		sched_pin();
3205	}
3206	anychanged = FALSE;
3207
3208	PMAP_LOCK(pmap);
3209	for (; sva < eva; sva = pdnxt) {
3210		pt_entry_t obits, pbits;
3211		u_int pdirindex;
3212
3213		pdnxt = (sva + NBPDR) & ~PDRMASK;
3214		if (pdnxt < sva)
3215			pdnxt = eva;
3216
3217		pdirindex = sva >> PDRSHIFT;
3218		ptpaddr = pmap->pm_pdir[pdirindex];
3219
3220		/*
3221		 * Weed out invalid mappings. Note: we assume that the page
3222		 * directory table is always allocated, and in kernel virtual.
3223		 */
3224		if (ptpaddr == 0)
3225			continue;
3226
3227		/*
3228		 * Check for large page.
3229		 */
3230		if ((ptpaddr & PG_PS) != 0) {
3231			/*
3232			 * Are we protecting the entire large page?  If not,
3233			 * demote the mapping and fall through.
3234			 */
3235			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
3236				/*
3237				 * The TLB entry for a PG_G mapping is
3238				 * invalidated by pmap_protect_pde().
3239				 */
3240				if (pmap_protect_pde(pmap,
3241				    &pmap->pm_pdir[pdirindex], sva, prot))
3242					anychanged = TRUE;
3243				continue;
3244			} else {
3245				if (!pv_lists_locked) {
3246					pv_lists_locked = TRUE;
3247					if (!rw_try_wlock(&pvh_global_lock)) {
3248						if (anychanged)
3249							pmap_invalidate_all(
3250							    pmap);
3251						PMAP_UNLOCK(pmap);
3252						goto resume;
3253					}
3254					sched_pin();
3255				}
3256				if (!pmap_demote_pde(pmap,
3257				    &pmap->pm_pdir[pdirindex], sva)) {
3258					/*
3259					 * The large page mapping was
3260					 * destroyed.
3261					 */
3262					continue;
3263				}
3264			}
3265		}
3266
3267		if (pdnxt > eva)
3268			pdnxt = eva;
3269
3270		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3271		    sva += PAGE_SIZE) {
3272			vm_page_t m;
3273
3274retry:
3275			/*
3276			 * Regardless of whether a pte is 32 or 64 bits in
3277			 * size, PG_RW, PG_A, and PG_M are among the least
3278			 * significant 32 bits.
3279			 */
3280			obits = pbits = *pte;
3281			if ((pbits & PG_V) == 0)
3282				continue;
3283
3284			if ((prot & VM_PROT_WRITE) == 0) {
3285				if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
3286				    (PG_MANAGED | PG_M | PG_RW)) {
3287					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
3288					vm_page_dirty(m);
3289				}
3290				pbits &= ~(PG_RW | PG_M);
3291			}
3292#if defined(PAE) || defined(PAE_TABLES)
3293			if ((prot & VM_PROT_EXECUTE) == 0)
3294				pbits |= pg_nx;
3295#endif
3296
3297			if (pbits != obits) {
3298#if defined(PAE) || defined(PAE_TABLES)
3299				if (!atomic_cmpset_64(pte, obits, pbits))
3300					goto retry;
3301#else
3302				if (!atomic_cmpset_int((u_int *)pte, obits,
3303				    pbits))
3304					goto retry;
3305#endif
3306				if (obits & PG_G)
3307					pmap_invalidate_page(pmap, sva);
3308				else
3309					anychanged = TRUE;
3310			}
3311		}
3312	}
3313	if (anychanged)
3314		pmap_invalidate_all(pmap);
3315	if (pv_lists_locked) {
3316		sched_unpin();
3317		rw_wunlock(&pvh_global_lock);
3318	}
3319	PMAP_UNLOCK(pmap);
3320}
3321
3322/*
3323 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
3324 * within a single page table page (PTP) to a single 2- or 4MB page mapping.
3325 * For promotion to occur, two conditions must be met: (1) the 4KB page
3326 * mappings must map aligned, contiguous physical memory and (2) the 4KB page
3327 * mappings must have identical characteristics.
3328 *
3329 * Managed (PG_MANAGED) mappings within the kernel address space are not
3330 * promoted.  The reason is that kernel PDEs are replicated in each pmap but
3331 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
3332 * pmap.
3333 */
3334static void
3335pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3336{
3337	pd_entry_t newpde;
3338	pt_entry_t *firstpte, oldpte, pa, *pte;
3339	vm_offset_t oldpteva;
3340	vm_page_t mpte;
3341
3342	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3343
3344	/*
3345	 * Examine the first PTE in the specified PTP.  Abort if this PTE is
3346	 * either invalid, unused, or does not map the first 4KB physical page
3347	 * within a 2- or 4MB page.
3348	 */
3349	firstpte = pmap_pte_quick(pmap, trunc_4mpage(va));
3350setpde:
3351	newpde = *firstpte;
3352	if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
3353		pmap_pde_p_failures++;
3354		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3355		    " in pmap %p", va, pmap);
3356		return;
3357	}
3358	if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
3359		pmap_pde_p_failures++;
3360		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3361		    " in pmap %p", va, pmap);
3362		return;
3363	}
3364	if ((newpde & (PG_M | PG_RW)) == PG_RW) {
3365		/*
3366		 * When PG_M is already clear, PG_RW can be cleared without
3367		 * a TLB invalidation.
3368		 */
3369		if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
3370		    ~PG_RW))
3371			goto setpde;
3372		newpde &= ~PG_RW;
3373	}
3374
3375	/*
3376	 * Examine each of the other PTEs in the specified PTP.  Abort if this
3377	 * PTE maps an unexpected 4KB physical page or does not have identical
3378	 * characteristics to the first PTE.
3379	 */
3380	pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
3381	for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
3382setpte:
3383		oldpte = *pte;
3384		if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
3385			pmap_pde_p_failures++;
3386			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3387			    " in pmap %p", va, pmap);
3388			return;
3389		}
3390		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
3391			/*
3392			 * When PG_M is already clear, PG_RW can be cleared
3393			 * without a TLB invalidation.
3394			 */
3395			if (!atomic_cmpset_int((u_int *)pte, oldpte,
3396			    oldpte & ~PG_RW))
3397				goto setpte;
3398			oldpte &= ~PG_RW;
3399			oldpteva = (oldpte & PG_FRAME & PDRMASK) |
3400			    (va & ~PDRMASK);
3401			CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
3402			    " in pmap %p", oldpteva, pmap);
3403		}
3404		if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
3405			pmap_pde_p_failures++;
3406			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3407			    " in pmap %p", va, pmap);
3408			return;
3409		}
3410		pa -= PAGE_SIZE;
3411	}
3412
3413	/*
3414	 * Save the page table page in its current state until the PDE
3415	 * mapping the superpage is demoted by pmap_demote_pde() or
3416	 * destroyed by pmap_remove_pde().
3417	 */
3418	mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3419	KASSERT(mpte >= vm_page_array &&
3420	    mpte < &vm_page_array[vm_page_array_size],
3421	    ("pmap_promote_pde: page table page is out of range"));
3422	KASSERT(mpte->pindex == va >> PDRSHIFT,
3423	    ("pmap_promote_pde: page table page's pindex is wrong"));
3424	if (pmap_insert_pt_page(pmap, mpte)) {
3425		pmap_pde_p_failures++;
3426		CTR2(KTR_PMAP,
3427		    "pmap_promote_pde: failure for va %#x in pmap %p", va,
3428		    pmap);
3429		return;
3430	}
3431
3432	/*
3433	 * Promote the pv entries.
3434	 */
3435	if ((newpde & PG_MANAGED) != 0)
3436		pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
3437
3438	/*
3439	 * Propagate the PAT index to its proper position.
3440	 */
3441	if ((newpde & PG_PTE_PAT) != 0)
3442		newpde ^= PG_PDE_PAT | PG_PTE_PAT;
3443
3444	/*
3445	 * Map the superpage.
3446	 */
3447	if (workaround_erratum383)
3448		pmap_update_pde(pmap, va, pde, PG_PS | newpde);
3449	else if (pmap == kernel_pmap)
3450		pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde);
3451	else
3452		pde_store(pde, PG_PROMOTED | PG_PS | newpde);
3453
3454	pmap_pde_promotions++;
3455	CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
3456	    " in pmap %p", va, pmap);
3457}
3458
3459/*
3460 *	Insert the given physical page (p) at
3461 *	the specified virtual address (v) in the
3462 *	target physical map with the protection requested.
3463 *
3464 *	If specified, the page will be wired down, meaning
3465 *	that the related pte can not be reclaimed.
3466 *
3467 *	NB:  This is the only routine which MAY NOT lazy-evaluate
3468 *	or lose information.  That is, this routine must actually
3469 *	insert this page into the given map NOW.
3470 */
3471int
3472pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3473    u_int flags, int8_t psind)
3474{
3475	pd_entry_t *pde;
3476	pt_entry_t *pte;
3477	pt_entry_t newpte, origpte;
3478	pv_entry_t pv;
3479	vm_paddr_t opa, pa;
3480	vm_page_t mpte, om;
3481	boolean_t invlva, wired;
3482
3483	va = trunc_page(va);
3484	mpte = NULL;
3485	wired = (flags & PMAP_ENTER_WIRED) != 0;
3486
3487	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
3488	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
3489	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
3490	    va));
3491	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
3492		VM_OBJECT_ASSERT_LOCKED(m->object);
3493
3494	rw_wlock(&pvh_global_lock);
3495	PMAP_LOCK(pmap);
3496	sched_pin();
3497
3498	pde = pmap_pde(pmap, va);
3499	if (va < VM_MAXUSER_ADDRESS) {
3500		/*
3501		 * va is for UVA.
3502		 * In the case that a page table page is not resident,
3503		 * we are creating it here.  pmap_allocpte() handles
3504		 * demotion.
3505		 */
3506		mpte = pmap_allocpte(pmap, va, flags);
3507		if (mpte == NULL) {
3508			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
3509			    ("pmap_allocpte failed with sleep allowed"));
3510			sched_unpin();
3511			rw_wunlock(&pvh_global_lock);
3512			PMAP_UNLOCK(pmap);
3513			return (KERN_RESOURCE_SHORTAGE);
3514		}
3515	} else {
3516		/*
3517		 * va is for KVA, so pmap_demote_pde() will never fail
3518		 * to install a page table page.  PG_V is also
3519		 * asserted by pmap_demote_pde().
3520		 */
3521		KASSERT(pde != NULL && (*pde & PG_V) != 0,
3522		    ("KVA %#x invalid pde pdir %#jx", va,
3523		    (uintmax_t)pmap->pm_pdir[PTDPTDI]));
3524		if ((*pde & PG_PS) != 0)
3525			pmap_demote_pde(pmap, pde, va);
3526	}
3527	pte = pmap_pte_quick(pmap, va);
3528
3529	/*
3530	 * Page Directory table entry is not valid, which should not
3531	 * happen.  We should have either allocated the page table
3532	 * page or demoted the existing mapping above.
3533	 */
3534	if (pte == NULL) {
3535		panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
3536		    (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
3537	}
3538
3539	pa = VM_PAGE_TO_PHYS(m);
3540	om = NULL;
3541	origpte = *pte;
3542	opa = origpte & PG_FRAME;
3543
3544	/*
3545	 * Mapping has not changed, must be protection or wiring change.
3546	 */
3547	if (origpte && (opa == pa)) {
3548		/*
3549		 * Wiring change, just update stats. We don't worry about
3550		 * wiring PT pages as they remain resident as long as there
3551		 * are valid mappings in them. Hence, if a user page is wired,
3552		 * the PT page will be also.
3553		 */
3554		if (wired && ((origpte & PG_W) == 0))
3555			pmap->pm_stats.wired_count++;
3556		else if (!wired && (origpte & PG_W))
3557			pmap->pm_stats.wired_count--;
3558
3559		/*
3560		 * Remove extra pte reference
3561		 */
3562		if (mpte)
3563			mpte->wire_count--;
3564
3565		if (origpte & PG_MANAGED) {
3566			om = m;
3567			pa |= PG_MANAGED;
3568		}
3569		goto validate;
3570	}
3571
3572	pv = NULL;
3573
3574	/*
3575	 * Mapping has changed, invalidate old range and fall through to
3576	 * handle validating new mapping.
3577	 */
3578	if (opa) {
3579		if (origpte & PG_W)
3580			pmap->pm_stats.wired_count--;
3581		if (origpte & PG_MANAGED) {
3582			om = PHYS_TO_VM_PAGE(opa);
3583			pv = pmap_pvh_remove(&om->md, pmap, va);
3584		}
3585		if (mpte != NULL) {
3586			mpte->wire_count--;
3587			KASSERT(mpte->wire_count > 0,
3588			    ("pmap_enter: missing reference to page table page,"
3589			     " va: 0x%x", va));
3590		}
3591	} else
3592		pmap->pm_stats.resident_count++;
3593
3594	/*
3595	 * Enter on the PV list if part of our managed memory.
3596	 */
3597	if ((m->oflags & VPO_UNMANAGED) == 0) {
3598		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
3599		    ("pmap_enter: managed mapping within the clean submap"));
3600		if (pv == NULL)
3601			pv = get_pv_entry(pmap, FALSE);
3602		pv->pv_va = va;
3603		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3604		pa |= PG_MANAGED;
3605	} else if (pv != NULL)
3606		free_pv_entry(pmap, pv);
3607
3608	/*
3609	 * Increment counters
3610	 */
3611	if (wired)
3612		pmap->pm_stats.wired_count++;
3613
3614validate:
3615	/*
3616	 * Now validate mapping with desired protection/wiring.
3617	 */
3618	newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
3619	if ((prot & VM_PROT_WRITE) != 0) {
3620		newpte |= PG_RW;
3621		if ((newpte & PG_MANAGED) != 0)
3622			vm_page_aflag_set(m, PGA_WRITEABLE);
3623	}
3624#if defined(PAE) || defined(PAE_TABLES)
3625	if ((prot & VM_PROT_EXECUTE) == 0)
3626		newpte |= pg_nx;
3627#endif
3628	if (wired)
3629		newpte |= PG_W;
3630	if (va < VM_MAXUSER_ADDRESS)
3631		newpte |= PG_U;
3632	if (pmap == kernel_pmap)
3633		newpte |= pgeflag;
3634
3635	/*
3636	 * if the mapping or permission bits are different, we need
3637	 * to update the pte.
3638	 */
3639	if ((origpte & ~(PG_M|PG_A)) != newpte) {
3640		newpte |= PG_A;
3641		if ((flags & VM_PROT_WRITE) != 0)
3642			newpte |= PG_M;
3643		if (origpte & PG_V) {
3644			invlva = FALSE;
3645			origpte = pte_load_store(pte, newpte);
3646			if (origpte & PG_A) {
3647				if (origpte & PG_MANAGED)
3648					vm_page_aflag_set(om, PGA_REFERENCED);
3649				if (opa != VM_PAGE_TO_PHYS(m))
3650					invlva = TRUE;
3651#if defined(PAE) || defined(PAE_TABLES)
3652				if ((origpte & PG_NX) == 0 &&
3653				    (newpte & PG_NX) != 0)
3654					invlva = TRUE;
3655#endif
3656			}
3657			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3658				if ((origpte & PG_MANAGED) != 0)
3659					vm_page_dirty(om);
3660				if ((prot & VM_PROT_WRITE) == 0)
3661					invlva = TRUE;
3662			}
3663			if ((origpte & PG_MANAGED) != 0 &&
3664			    TAILQ_EMPTY(&om->md.pv_list) &&
3665			    ((om->flags & PG_FICTITIOUS) != 0 ||
3666			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3667				vm_page_aflag_clear(om, PGA_WRITEABLE);
3668			if (invlva)
3669				pmap_invalidate_page(pmap, va);
3670		} else
3671			pte_store(pte, newpte);
3672	}
3673
3674	/*
3675	 * If both the page table page and the reservation are fully
3676	 * populated, then attempt promotion.
3677	 */
3678	if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
3679	    pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
3680	    vm_reserv_level_iffullpop(m) == 0)
3681		pmap_promote_pde(pmap, pde, va);
3682
3683	sched_unpin();
3684	rw_wunlock(&pvh_global_lock);
3685	PMAP_UNLOCK(pmap);
3686	return (KERN_SUCCESS);
3687}
3688
3689/*
3690 * Tries to create a 2- or 4MB page mapping.  Returns TRUE if successful and
3691 * FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
3692 * blocking, (2) a mapping already exists at the specified virtual address, or
3693 * (3) a pv entry cannot be allocated without reclaiming another pv entry.
3694 */
3695static boolean_t
3696pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3697{
3698	pd_entry_t *pde, newpde;
3699
3700	rw_assert(&pvh_global_lock, RA_WLOCKED);
3701	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3702	pde = pmap_pde(pmap, va);
3703	if (*pde != 0) {
3704		CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3705		    " in pmap %p", va, pmap);
3706		return (FALSE);
3707	}
3708	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
3709	    PG_PS | PG_V;
3710	if ((m->oflags & VPO_UNMANAGED) == 0) {
3711		newpde |= PG_MANAGED;
3712
3713		/*
3714		 * Abort this mapping if its PV entry could not be created.
3715		 */
3716		if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
3717			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3718			    " in pmap %p", va, pmap);
3719			return (FALSE);
3720		}
3721	}
3722#if defined(PAE) || defined(PAE_TABLES)
3723	if ((prot & VM_PROT_EXECUTE) == 0)
3724		newpde |= pg_nx;
3725#endif
3726	if (va < VM_MAXUSER_ADDRESS)
3727		newpde |= PG_U;
3728
3729	/*
3730	 * Increment counters.
3731	 */
3732	pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
3733
3734	/*
3735	 * Map the superpage.  (This is not a promoted mapping; there will not
3736	 * be any lingering 4KB page mappings in the TLB.)
3737	 */
3738	pde_store(pde, newpde);
3739
3740	pmap_pde_mappings++;
3741	CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3742	    " in pmap %p", va, pmap);
3743	return (TRUE);
3744}
3745
3746/*
3747 * Maps a sequence of resident pages belonging to the same object.
3748 * The sequence begins with the given page m_start.  This page is
3749 * mapped at the given virtual address start.  Each subsequent page is
3750 * mapped at a virtual address that is offset from start by the same
3751 * amount as the page is offset from m_start within the object.  The
3752 * last page in the sequence is the page with the largest offset from
3753 * m_start that can be mapped at a virtual address less than the given
3754 * virtual address end.  Not every virtual page between start and end
3755 * is mapped; only those for which a resident page exists with the
3756 * corresponding offset from m_start are mapped.
3757 */
3758void
3759pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3760    vm_page_t m_start, vm_prot_t prot)
3761{
3762	vm_offset_t va;
3763	vm_page_t m, mpte;
3764	vm_pindex_t diff, psize;
3765
3766	VM_OBJECT_ASSERT_LOCKED(m_start->object);
3767
3768	psize = atop(end - start);
3769	mpte = NULL;
3770	m = m_start;
3771	rw_wlock(&pvh_global_lock);
3772	PMAP_LOCK(pmap);
3773	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3774		va = start + ptoa(diff);
3775		if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
3776		    m->psind == 1 && pg_ps_enabled &&
3777		    pmap_enter_pde(pmap, va, m, prot))
3778			m = &m[NBPDR / PAGE_SIZE - 1];
3779		else
3780			mpte = pmap_enter_quick_locked(pmap, va, m, prot,
3781			    mpte);
3782		m = TAILQ_NEXT(m, listq);
3783	}
3784	rw_wunlock(&pvh_global_lock);
3785	PMAP_UNLOCK(pmap);
3786}
3787
3788/*
3789 * this code makes some *MAJOR* assumptions:
3790 * 1. Current pmap & pmap exists.
3791 * 2. Not wired.
3792 * 3. Read access.
3793 * 4. No page table pages.
3794 * but is *MUCH* faster than pmap_enter...
3795 */
3796
3797void
3798pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3799{
3800
3801	rw_wlock(&pvh_global_lock);
3802	PMAP_LOCK(pmap);
3803	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
3804	rw_wunlock(&pvh_global_lock);
3805	PMAP_UNLOCK(pmap);
3806}
3807
3808static vm_page_t
3809pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3810    vm_prot_t prot, vm_page_t mpte)
3811{
3812	pt_entry_t *pte;
3813	vm_paddr_t pa;
3814	struct spglist free;
3815
3816	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3817	    (m->oflags & VPO_UNMANAGED) != 0,
3818	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3819	rw_assert(&pvh_global_lock, RA_WLOCKED);
3820	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3821
3822	/*
3823	 * In the case that a page table page is not
3824	 * resident, we are creating it here.
3825	 */
3826	if (va < VM_MAXUSER_ADDRESS) {
3827		u_int ptepindex;
3828		pd_entry_t ptepa;
3829
3830		/*
3831		 * Calculate pagetable page index
3832		 */
3833		ptepindex = va >> PDRSHIFT;
3834		if (mpte && (mpte->pindex == ptepindex)) {
3835			mpte->wire_count++;
3836		} else {
3837			/*
3838			 * Get the page directory entry
3839			 */
3840			ptepa = pmap->pm_pdir[ptepindex];
3841
3842			/*
3843			 * If the page table page is mapped, we just increment
3844			 * the hold count, and activate it.
3845			 */
3846			if (ptepa) {
3847				if (ptepa & PG_PS)
3848					return (NULL);
3849				mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
3850				mpte->wire_count++;
3851			} else {
3852				mpte = _pmap_allocpte(pmap, ptepindex,
3853				    PMAP_ENTER_NOSLEEP);
3854				if (mpte == NULL)
3855					return (mpte);
3856			}
3857		}
3858	} else {
3859		mpte = NULL;
3860	}
3861
3862	/*
3863	 * This call to vtopte makes the assumption that we are
3864	 * entering the page into the current pmap.  In order to support
3865	 * quick entry into any pmap, one would likely use pmap_pte_quick.
3866	 * But that isn't as quick as vtopte.
3867	 */
3868	pte = vtopte(va);
3869	if (*pte) {
3870		if (mpte != NULL) {
3871			mpte->wire_count--;
3872			mpte = NULL;
3873		}
3874		return (mpte);
3875	}
3876
3877	/*
3878	 * Enter on the PV list if part of our managed memory.
3879	 */
3880	if ((m->oflags & VPO_UNMANAGED) == 0 &&
3881	    !pmap_try_insert_pv_entry(pmap, va, m)) {
3882		if (mpte != NULL) {
3883			SLIST_INIT(&free);
3884			if (pmap_unwire_ptp(pmap, mpte, &free)) {
3885				pmap_invalidate_page(pmap, va);
3886				pmap_free_zero_pages(&free);
3887			}
3888
3889			mpte = NULL;
3890		}
3891		return (mpte);
3892	}
3893
3894	/*
3895	 * Increment counters
3896	 */
3897	pmap->pm_stats.resident_count++;
3898
3899	pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
3900#if defined(PAE) || defined(PAE_TABLES)
3901	if ((prot & VM_PROT_EXECUTE) == 0)
3902		pa |= pg_nx;
3903#endif
3904
3905	/*
3906	 * Now validate mapping with RO protection
3907	 */
3908	if ((m->oflags & VPO_UNMANAGED) != 0)
3909		pte_store(pte, pa | PG_V | PG_U);
3910	else
3911		pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
3912	return (mpte);
3913}
3914
3915/*
3916 * Make a temporary mapping for a physical address.  This is only intended
3917 * to be used for panic dumps.
3918 */
3919void *
3920pmap_kenter_temporary(vm_paddr_t pa, int i)
3921{
3922	vm_offset_t va;
3923
3924	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
3925	pmap_kenter(va, pa);
3926	invlpg(va);
3927	return ((void *)crashdumpmap);
3928}
3929
3930/*
3931 * This code maps large physical mmap regions into the
3932 * processor address space.  Note that some shortcuts
3933 * are taken, but the code works.
3934 */
3935void
3936pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3937    vm_pindex_t pindex, vm_size_t size)
3938{
3939	pd_entry_t *pde;
3940	vm_paddr_t pa, ptepa;
3941	vm_page_t p;
3942	int pat_mode;
3943
3944	VM_OBJECT_ASSERT_WLOCKED(object);
3945	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3946	    ("pmap_object_init_pt: non-device object"));
3947	if (pseflag &&
3948	    (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
3949		if (!vm_object_populate(object, pindex, pindex + atop(size)))
3950			return;
3951		p = vm_page_lookup(object, pindex);
3952		KASSERT(p->valid == VM_PAGE_BITS_ALL,
3953		    ("pmap_object_init_pt: invalid page %p", p));
3954		pat_mode = p->md.pat_mode;
3955
3956		/*
3957		 * Abort the mapping if the first page is not physically
3958		 * aligned to a 2/4MB page boundary.
3959		 */
3960		ptepa = VM_PAGE_TO_PHYS(p);
3961		if (ptepa & (NBPDR - 1))
3962			return;
3963
3964		/*
3965		 * Skip the first page.  Abort the mapping if the rest of
3966		 * the pages are not physically contiguous or have differing
3967		 * memory attributes.
3968		 */
3969		p = TAILQ_NEXT(p, listq);
3970		for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
3971		    pa += PAGE_SIZE) {
3972			KASSERT(p->valid == VM_PAGE_BITS_ALL,
3973			    ("pmap_object_init_pt: invalid page %p", p));
3974			if (pa != VM_PAGE_TO_PHYS(p) ||
3975			    pat_mode != p->md.pat_mode)
3976				return;
3977			p = TAILQ_NEXT(p, listq);
3978		}
3979
3980		/*
3981		 * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
3982		 * "size" is a multiple of 2/4M, adding the PAT setting to
3983		 * "pa" will not affect the termination of this loop.
3984		 */
3985		PMAP_LOCK(pmap);
3986		for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
3987		    size; pa += NBPDR) {
3988			pde = pmap_pde(pmap, addr);
3989			if (*pde == 0) {
3990				pde_store(pde, pa | PG_PS | PG_M | PG_A |
3991				    PG_U | PG_RW | PG_V);
3992				pmap->pm_stats.resident_count += NBPDR /
3993				    PAGE_SIZE;
3994				pmap_pde_mappings++;
3995			}
3996			/* Else continue on if the PDE is already valid. */
3997			addr += NBPDR;
3998		}
3999		PMAP_UNLOCK(pmap);
4000	}
4001}
4002
4003/*
4004 *	Clear the wired attribute from the mappings for the specified range of
4005 *	addresses in the given pmap.  Every valid mapping within that range
4006 *	must have the wired attribute set.  In contrast, invalid mappings
4007 *	cannot have the wired attribute set, so they are ignored.
4008 *
4009 *	The wired attribute of the page table entry is not a hardware feature,
4010 *	so there is no need to invalidate any TLB entries.
4011 */
4012void
4013pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4014{
4015	vm_offset_t pdnxt;
4016	pd_entry_t *pde;
4017	pt_entry_t *pte;
4018	boolean_t pv_lists_locked;
4019
4020	if (pmap_is_current(pmap))
4021		pv_lists_locked = FALSE;
4022	else {
4023		pv_lists_locked = TRUE;
4024resume:
4025		rw_wlock(&pvh_global_lock);
4026		sched_pin();
4027	}
4028	PMAP_LOCK(pmap);
4029	for (; sva < eva; sva = pdnxt) {
4030		pdnxt = (sva + NBPDR) & ~PDRMASK;
4031		if (pdnxt < sva)
4032			pdnxt = eva;
4033		pde = pmap_pde(pmap, sva);
4034		if ((*pde & PG_V) == 0)
4035			continue;
4036		if ((*pde & PG_PS) != 0) {
4037			if ((*pde & PG_W) == 0)
4038				panic("pmap_unwire: pde %#jx is missing PG_W",
4039				    (uintmax_t)*pde);
4040
4041			/*
4042			 * Are we unwiring the entire large page?  If not,
4043			 * demote the mapping and fall through.
4044			 */
4045			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
4046				/*
4047				 * Regardless of whether a pde (or pte) is 32
4048				 * or 64 bits in size, PG_W is among the least
4049				 * significant 32 bits.
4050				 */
4051				atomic_clear_int((u_int *)pde, PG_W);
4052				pmap->pm_stats.wired_count -= NBPDR /
4053				    PAGE_SIZE;
4054				continue;
4055			} else {
4056				if (!pv_lists_locked) {
4057					pv_lists_locked = TRUE;
4058					if (!rw_try_wlock(&pvh_global_lock)) {
4059						PMAP_UNLOCK(pmap);
4060						/* Repeat sva. */
4061						goto resume;
4062					}
4063					sched_pin();
4064				}
4065				if (!pmap_demote_pde(pmap, pde, sva))
4066					panic("pmap_unwire: demotion failed");
4067			}
4068		}
4069		if (pdnxt > eva)
4070			pdnxt = eva;
4071		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
4072		    sva += PAGE_SIZE) {
4073			if ((*pte & PG_V) == 0)
4074				continue;
4075			if ((*pte & PG_W) == 0)
4076				panic("pmap_unwire: pte %#jx is missing PG_W",
4077				    (uintmax_t)*pte);
4078
4079			/*
4080			 * PG_W must be cleared atomically.  Although the pmap
4081			 * lock synchronizes access to PG_W, another processor
4082			 * could be setting PG_M and/or PG_A concurrently.
4083			 *
4084			 * PG_W is among the least significant 32 bits.
4085			 */
4086			atomic_clear_int((u_int *)pte, PG_W);
4087			pmap->pm_stats.wired_count--;
4088		}
4089	}
4090	if (pv_lists_locked) {
4091		sched_unpin();
4092		rw_wunlock(&pvh_global_lock);
4093	}
4094	PMAP_UNLOCK(pmap);
4095}
4096
4097
4098/*
4099 *	Copy the range specified by src_addr/len
4100 *	from the source map to the range dst_addr/len
4101 *	in the destination map.
4102 *
4103 *	This routine is only advisory and need not do anything.
4104 */
4105
4106void
4107pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
4108    vm_offset_t src_addr)
4109{
4110	struct spglist free;
4111	vm_offset_t addr;
4112	vm_offset_t end_addr = src_addr + len;
4113	vm_offset_t pdnxt;
4114
4115	if (dst_addr != src_addr)
4116		return;
4117
4118	if (!pmap_is_current(src_pmap))
4119		return;
4120
4121	rw_wlock(&pvh_global_lock);
4122	if (dst_pmap < src_pmap) {
4123		PMAP_LOCK(dst_pmap);
4124		PMAP_LOCK(src_pmap);
4125	} else {
4126		PMAP_LOCK(src_pmap);
4127		PMAP_LOCK(dst_pmap);
4128	}
4129	sched_pin();
4130	for (addr = src_addr; addr < end_addr; addr = pdnxt) {
4131		pt_entry_t *src_pte, *dst_pte;
4132		vm_page_t dstmpte, srcmpte;
4133		pd_entry_t srcptepaddr;
4134		u_int ptepindex;
4135
4136		KASSERT(addr < UPT_MIN_ADDRESS,
4137		    ("pmap_copy: invalid to pmap_copy page tables"));
4138
4139		pdnxt = (addr + NBPDR) & ~PDRMASK;
4140		if (pdnxt < addr)
4141			pdnxt = end_addr;
4142		ptepindex = addr >> PDRSHIFT;
4143
4144		srcptepaddr = src_pmap->pm_pdir[ptepindex];
4145		if (srcptepaddr == 0)
4146			continue;
4147
4148		if (srcptepaddr & PG_PS) {
4149			if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
4150				continue;
4151			if (dst_pmap->pm_pdir[ptepindex] == 0 &&
4152			    ((srcptepaddr & PG_MANAGED) == 0 ||
4153			    pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
4154			    PG_PS_FRAME))) {
4155				dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
4156				    ~PG_W;
4157				dst_pmap->pm_stats.resident_count +=
4158				    NBPDR / PAGE_SIZE;
4159				pmap_pde_mappings++;
4160			}
4161			continue;
4162		}
4163
4164		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
4165		KASSERT(srcmpte->wire_count > 0,
4166		    ("pmap_copy: source page table page is unused"));
4167
4168		if (pdnxt > end_addr)
4169			pdnxt = end_addr;
4170
4171		src_pte = vtopte(addr);
4172		while (addr < pdnxt) {
4173			pt_entry_t ptetemp;
4174			ptetemp = *src_pte;
4175			/*
4176			 * we only virtual copy managed pages
4177			 */
4178			if ((ptetemp & PG_MANAGED) != 0) {
4179				dstmpte = pmap_allocpte(dst_pmap, addr,
4180				    PMAP_ENTER_NOSLEEP);
4181				if (dstmpte == NULL)
4182					goto out;
4183				dst_pte = pmap_pte_quick(dst_pmap, addr);
4184				if (*dst_pte == 0 &&
4185				    pmap_try_insert_pv_entry(dst_pmap, addr,
4186				    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
4187					/*
4188					 * Clear the wired, modified, and
4189					 * accessed (referenced) bits
4190					 * during the copy.
4191					 */
4192					*dst_pte = ptetemp & ~(PG_W | PG_M |
4193					    PG_A);
4194					dst_pmap->pm_stats.resident_count++;
4195	 			} else {
4196					SLIST_INIT(&free);
4197					if (pmap_unwire_ptp(dst_pmap, dstmpte,
4198					    &free)) {
4199						pmap_invalidate_page(dst_pmap,
4200						    addr);
4201						pmap_free_zero_pages(&free);
4202					}
4203					goto out;
4204				}
4205				if (dstmpte->wire_count >= srcmpte->wire_count)
4206					break;
4207			}
4208			addr += PAGE_SIZE;
4209			src_pte++;
4210		}
4211	}
4212out:
4213	sched_unpin();
4214	rw_wunlock(&pvh_global_lock);
4215	PMAP_UNLOCK(src_pmap);
4216	PMAP_UNLOCK(dst_pmap);
4217}
4218
4219static __inline void
4220pagezero(void *page)
4221{
4222#if defined(I686_CPU)
4223	if (cpu_class == CPUCLASS_686) {
4224		if (cpu_feature & CPUID_SSE2)
4225			sse2_pagezero(page);
4226		else
4227			i686_pagezero(page);
4228	} else
4229#endif
4230		bzero(page, PAGE_SIZE);
4231}
4232
4233/*
4234 *	pmap_zero_page zeros the specified hardware page by mapping
4235 *	the page into KVM and using bzero to clear its contents.
4236 */
4237void
4238pmap_zero_page(vm_page_t m)
4239{
4240	pt_entry_t *cmap_pte2;
4241	struct pcpu *pc;
4242
4243	sched_pin();
4244	pc = get_pcpu();
4245	cmap_pte2 = pc->pc_cmap_pte2;
4246	mtx_lock(&pc->pc_cmap_lock);
4247	if (*cmap_pte2)
4248		panic("pmap_zero_page: CMAP2 busy");
4249	*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4250	    pmap_cache_bits(m->md.pat_mode, 0);
4251	invlcaddr(pc->pc_cmap_addr2);
4252	pagezero(pc->pc_cmap_addr2);
4253	*cmap_pte2 = 0;
4254
4255	/*
4256	 * Unpin the thread before releasing the lock.  Otherwise the thread
4257	 * could be rescheduled while still bound to the current CPU, only
4258	 * to unpin itself immediately upon resuming execution.
4259	 */
4260	sched_unpin();
4261	mtx_unlock(&pc->pc_cmap_lock);
4262}
4263
4264/*
4265 *	pmap_zero_page_area zeros the specified hardware page by mapping
4266 *	the page into KVM and using bzero to clear its contents.
4267 *
4268 *	off and size may not cover an area beyond a single hardware page.
4269 */
4270void
4271pmap_zero_page_area(vm_page_t m, int off, int size)
4272{
4273	pt_entry_t *cmap_pte2;
4274	struct pcpu *pc;
4275
4276	sched_pin();
4277	pc = get_pcpu();
4278	cmap_pte2 = pc->pc_cmap_pte2;
4279	mtx_lock(&pc->pc_cmap_lock);
4280	if (*cmap_pte2)
4281		panic("pmap_zero_page_area: CMAP2 busy");
4282	*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4283	    pmap_cache_bits(m->md.pat_mode, 0);
4284	invlcaddr(pc->pc_cmap_addr2);
4285	if (off == 0 && size == PAGE_SIZE)
4286		pagezero(pc->pc_cmap_addr2);
4287	else
4288		bzero(pc->pc_cmap_addr2 + off, size);
4289	*cmap_pte2 = 0;
4290	sched_unpin();
4291	mtx_unlock(&pc->pc_cmap_lock);
4292}
4293
4294/*
4295 *	pmap_zero_page_idle zeros the specified hardware page by mapping
4296 *	the page into KVM and using bzero to clear its contents.  This
4297 *	is intended to be called from the vm_pagezero process only and
4298 *	outside of Giant.
4299 */
4300void
4301pmap_zero_page_idle(vm_page_t m)
4302{
4303
4304	if (*CMAP3)
4305		panic("pmap_zero_page_idle: CMAP3 busy");
4306	sched_pin();
4307	*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4308	    pmap_cache_bits(m->md.pat_mode, 0);
4309	invlcaddr(CADDR3);
4310	pagezero(CADDR3);
4311	*CMAP3 = 0;
4312	sched_unpin();
4313}
4314
4315/*
4316 *	pmap_copy_page copies the specified (machine independent)
4317 *	page by mapping the page into virtual memory and using
4318 *	bcopy to copy the page, one machine dependent page at a
4319 *	time.
4320 */
4321void
4322pmap_copy_page(vm_page_t src, vm_page_t dst)
4323{
4324	pt_entry_t *cmap_pte1, *cmap_pte2;
4325	struct pcpu *pc;
4326
4327	sched_pin();
4328	pc = get_pcpu();
4329	cmap_pte1 = pc->pc_cmap_pte1;
4330	cmap_pte2 = pc->pc_cmap_pte2;
4331	mtx_lock(&pc->pc_cmap_lock);
4332	if (*cmap_pte1)
4333		panic("pmap_copy_page: CMAP1 busy");
4334	if (*cmap_pte2)
4335		panic("pmap_copy_page: CMAP2 busy");
4336	*cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
4337	    pmap_cache_bits(src->md.pat_mode, 0);
4338	invlcaddr(pc->pc_cmap_addr1);
4339	*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
4340	    pmap_cache_bits(dst->md.pat_mode, 0);
4341	invlcaddr(pc->pc_cmap_addr2);
4342	bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
4343	*cmap_pte1 = 0;
4344	*cmap_pte2 = 0;
4345	sched_unpin();
4346	mtx_unlock(&pc->pc_cmap_lock);
4347}
4348
4349int unmapped_buf_allowed = 1;
4350
4351void
4352pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4353    vm_offset_t b_offset, int xfersize)
4354{
4355	vm_page_t a_pg, b_pg;
4356	char *a_cp, *b_cp;
4357	vm_offset_t a_pg_offset, b_pg_offset;
4358	pt_entry_t *cmap_pte1, *cmap_pte2;
4359	struct pcpu *pc;
4360	int cnt;
4361
4362	sched_pin();
4363	pc = get_pcpu();
4364	cmap_pte1 = pc->pc_cmap_pte1;
4365	cmap_pte2 = pc->pc_cmap_pte2;
4366	mtx_lock(&pc->pc_cmap_lock);
4367	if (*cmap_pte1 != 0)
4368		panic("pmap_copy_pages: CMAP1 busy");
4369	if (*cmap_pte2 != 0)
4370		panic("pmap_copy_pages: CMAP2 busy");
4371	while (xfersize > 0) {
4372		a_pg = ma[a_offset >> PAGE_SHIFT];
4373		a_pg_offset = a_offset & PAGE_MASK;
4374		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4375		b_pg = mb[b_offset >> PAGE_SHIFT];
4376		b_pg_offset = b_offset & PAGE_MASK;
4377		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4378		*cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
4379		    pmap_cache_bits(a_pg->md.pat_mode, 0);
4380		invlcaddr(pc->pc_cmap_addr1);
4381		*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
4382		    PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
4383		invlcaddr(pc->pc_cmap_addr2);
4384		a_cp = pc->pc_cmap_addr1 + a_pg_offset;
4385		b_cp = pc->pc_cmap_addr2 + b_pg_offset;
4386		bcopy(a_cp, b_cp, cnt);
4387		a_offset += cnt;
4388		b_offset += cnt;
4389		xfersize -= cnt;
4390	}
4391	*cmap_pte1 = 0;
4392	*cmap_pte2 = 0;
4393	sched_unpin();
4394	mtx_unlock(&pc->pc_cmap_lock);
4395}
4396
4397/*
4398 * Returns true if the pmap's pv is one of the first
4399 * 16 pvs linked to from this page.  This count may
4400 * be changed upwards or downwards in the future; it
4401 * is only necessary that true be returned for a small
4402 * subset of pmaps for proper page aging.
4403 */
4404boolean_t
4405pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4406{
4407	struct md_page *pvh;
4408	pv_entry_t pv;
4409	int loops = 0;
4410	boolean_t rv;
4411
4412	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4413	    ("pmap_page_exists_quick: page %p is not managed", m));
4414	rv = FALSE;
4415	rw_wlock(&pvh_global_lock);
4416	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4417		if (PV_PMAP(pv) == pmap) {
4418			rv = TRUE;
4419			break;
4420		}
4421		loops++;
4422		if (loops >= 16)
4423			break;
4424	}
4425	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4426		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4427		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4428			if (PV_PMAP(pv) == pmap) {
4429				rv = TRUE;
4430				break;
4431			}
4432			loops++;
4433			if (loops >= 16)
4434				break;
4435		}
4436	}
4437	rw_wunlock(&pvh_global_lock);
4438	return (rv);
4439}
4440
4441/*
4442 *	pmap_page_wired_mappings:
4443 *
4444 *	Return the number of managed mappings to the given physical page
4445 *	that are wired.
4446 */
4447int
4448pmap_page_wired_mappings(vm_page_t m)
4449{
4450	int count;
4451
4452	count = 0;
4453	if ((m->oflags & VPO_UNMANAGED) != 0)
4454		return (count);
4455	rw_wlock(&pvh_global_lock);
4456	count = pmap_pvh_wired_mappings(&m->md, count);
4457	if ((m->flags & PG_FICTITIOUS) == 0) {
4458	    count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
4459	        count);
4460	}
4461	rw_wunlock(&pvh_global_lock);
4462	return (count);
4463}
4464
4465/*
4466 *	pmap_pvh_wired_mappings:
4467 *
4468 *	Return the updated number "count" of managed mappings that are wired.
4469 */
4470static int
4471pmap_pvh_wired_mappings(struct md_page *pvh, int count)
4472{
4473	pmap_t pmap;
4474	pt_entry_t *pte;
4475	pv_entry_t pv;
4476
4477	rw_assert(&pvh_global_lock, RA_WLOCKED);
4478	sched_pin();
4479	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4480		pmap = PV_PMAP(pv);
4481		PMAP_LOCK(pmap);
4482		pte = pmap_pte_quick(pmap, pv->pv_va);
4483		if ((*pte & PG_W) != 0)
4484			count++;
4485		PMAP_UNLOCK(pmap);
4486	}
4487	sched_unpin();
4488	return (count);
4489}
4490
4491/*
4492 * Returns TRUE if the given page is mapped individually or as part of
4493 * a 4mpage.  Otherwise, returns FALSE.
4494 */
4495boolean_t
4496pmap_page_is_mapped(vm_page_t m)
4497{
4498	boolean_t rv;
4499
4500	if ((m->oflags & VPO_UNMANAGED) != 0)
4501		return (FALSE);
4502	rw_wlock(&pvh_global_lock);
4503	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
4504	    ((m->flags & PG_FICTITIOUS) == 0 &&
4505	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
4506	rw_wunlock(&pvh_global_lock);
4507	return (rv);
4508}
4509
4510/*
4511 * Remove all pages from specified address space
4512 * this aids process exit speeds.  Also, this code
4513 * is special cased for current process only, but
4514 * can have the more generic (and slightly slower)
4515 * mode enabled.  This is much faster than pmap_remove
4516 * in the case of running down an entire address space.
4517 */
4518void
4519pmap_remove_pages(pmap_t pmap)
4520{
4521	pt_entry_t *pte, tpte;
4522	vm_page_t m, mpte, mt;
4523	pv_entry_t pv;
4524	struct md_page *pvh;
4525	struct pv_chunk *pc, *npc;
4526	struct spglist free;
4527	int field, idx;
4528	int32_t bit;
4529	uint32_t inuse, bitmask;
4530	int allfree;
4531
4532	if (pmap != PCPU_GET(curpmap)) {
4533		printf("warning: pmap_remove_pages called with non-current pmap\n");
4534		return;
4535	}
4536	SLIST_INIT(&free);
4537	rw_wlock(&pvh_global_lock);
4538	PMAP_LOCK(pmap);
4539	sched_pin();
4540	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4541		KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap,
4542		    pc->pc_pmap));
4543		allfree = 1;
4544		for (field = 0; field < _NPCM; field++) {
4545			inuse = ~pc->pc_map[field] & pc_freemask[field];
4546			while (inuse != 0) {
4547				bit = bsfl(inuse);
4548				bitmask = 1UL << bit;
4549				idx = field * 32 + bit;
4550				pv = &pc->pc_pventry[idx];
4551				inuse &= ~bitmask;
4552
4553				pte = pmap_pde(pmap, pv->pv_va);
4554				tpte = *pte;
4555				if ((tpte & PG_PS) == 0) {
4556					pte = vtopte(pv->pv_va);
4557					tpte = *pte & ~PG_PTE_PAT;
4558				}
4559
4560				if (tpte == 0) {
4561					printf(
4562					    "TPTE at %p  IS ZERO @ VA %08x\n",
4563					    pte, pv->pv_va);
4564					panic("bad pte");
4565				}
4566
4567/*
4568 * We cannot remove wired pages from a process' mapping at this time
4569 */
4570				if (tpte & PG_W) {
4571					allfree = 0;
4572					continue;
4573				}
4574
4575				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
4576				KASSERT(m->phys_addr == (tpte & PG_FRAME),
4577				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4578				    m, (uintmax_t)m->phys_addr,
4579				    (uintmax_t)tpte));
4580
4581				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4582				    m < &vm_page_array[vm_page_array_size],
4583				    ("pmap_remove_pages: bad tpte %#jx",
4584				    (uintmax_t)tpte));
4585
4586				pte_clear(pte);
4587
4588				/*
4589				 * Update the vm_page_t clean/reference bits.
4590				 */
4591				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4592					if ((tpte & PG_PS) != 0) {
4593						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4594							vm_page_dirty(mt);
4595					} else
4596						vm_page_dirty(m);
4597				}
4598
4599				/* Mark free */
4600				PV_STAT(pv_entry_frees++);
4601				PV_STAT(pv_entry_spare++);
4602				pv_entry_count--;
4603				pc->pc_map[field] |= bitmask;
4604				if ((tpte & PG_PS) != 0) {
4605					pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
4606					pvh = pa_to_pvh(tpte & PG_PS_FRAME);
4607					TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4608					if (TAILQ_EMPTY(&pvh->pv_list)) {
4609						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4610							if (TAILQ_EMPTY(&mt->md.pv_list))
4611								vm_page_aflag_clear(mt, PGA_WRITEABLE);
4612					}
4613					mpte = pmap_remove_pt_page(pmap, pv->pv_va);
4614					if (mpte != NULL) {
4615						pmap->pm_stats.resident_count--;
4616						KASSERT(mpte->wire_count == NPTEPG,
4617						    ("pmap_remove_pages: pte page wire count error"));
4618						mpte->wire_count = 0;
4619						pmap_add_delayed_free_list(mpte, &free, FALSE);
4620						atomic_subtract_int(&vm_cnt.v_wire_count, 1);
4621					}
4622				} else {
4623					pmap->pm_stats.resident_count--;
4624					TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4625					if (TAILQ_EMPTY(&m->md.pv_list) &&
4626					    (m->flags & PG_FICTITIOUS) == 0) {
4627						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4628						if (TAILQ_EMPTY(&pvh->pv_list))
4629							vm_page_aflag_clear(m, PGA_WRITEABLE);
4630					}
4631					pmap_unuse_pt(pmap, pv->pv_va, &free);
4632				}
4633			}
4634		}
4635		if (allfree) {
4636			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4637			free_pv_chunk(pc);
4638		}
4639	}
4640	sched_unpin();
4641	pmap_invalidate_all(pmap);
4642	rw_wunlock(&pvh_global_lock);
4643	PMAP_UNLOCK(pmap);
4644	pmap_free_zero_pages(&free);
4645}
4646
4647/*
4648 *	pmap_is_modified:
4649 *
4650 *	Return whether or not the specified physical page was modified
4651 *	in any physical maps.
4652 */
4653boolean_t
4654pmap_is_modified(vm_page_t m)
4655{
4656	boolean_t rv;
4657
4658	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4659	    ("pmap_is_modified: page %p is not managed", m));
4660
4661	/*
4662	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4663	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
4664	 * is clear, no PTEs can have PG_M set.
4665	 */
4666	VM_OBJECT_ASSERT_WLOCKED(m->object);
4667	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4668		return (FALSE);
4669	rw_wlock(&pvh_global_lock);
4670	rv = pmap_is_modified_pvh(&m->md) ||
4671	    ((m->flags & PG_FICTITIOUS) == 0 &&
4672	    pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4673	rw_wunlock(&pvh_global_lock);
4674	return (rv);
4675}
4676
4677/*
4678 * Returns TRUE if any of the given mappings were used to modify
4679 * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
4680 * mappings are supported.
4681 */
4682static boolean_t
4683pmap_is_modified_pvh(struct md_page *pvh)
4684{
4685	pv_entry_t pv;
4686	pt_entry_t *pte;
4687	pmap_t pmap;
4688	boolean_t rv;
4689
4690	rw_assert(&pvh_global_lock, RA_WLOCKED);
4691	rv = FALSE;
4692	sched_pin();
4693	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4694		pmap = PV_PMAP(pv);
4695		PMAP_LOCK(pmap);
4696		pte = pmap_pte_quick(pmap, pv->pv_va);
4697		rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
4698		PMAP_UNLOCK(pmap);
4699		if (rv)
4700			break;
4701	}
4702	sched_unpin();
4703	return (rv);
4704}
4705
4706/*
4707 *	pmap_is_prefaultable:
4708 *
4709 *	Return whether or not the specified virtual address is elgible
4710 *	for prefault.
4711 */
4712boolean_t
4713pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4714{
4715	pd_entry_t *pde;
4716	pt_entry_t *pte;
4717	boolean_t rv;
4718
4719	rv = FALSE;
4720	PMAP_LOCK(pmap);
4721	pde = pmap_pde(pmap, addr);
4722	if (*pde != 0 && (*pde & PG_PS) == 0) {
4723		pte = vtopte(addr);
4724		rv = *pte == 0;
4725	}
4726	PMAP_UNLOCK(pmap);
4727	return (rv);
4728}
4729
4730/*
4731 *	pmap_is_referenced:
4732 *
4733 *	Return whether or not the specified physical page was referenced
4734 *	in any physical maps.
4735 */
4736boolean_t
4737pmap_is_referenced(vm_page_t m)
4738{
4739	boolean_t rv;
4740
4741	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4742	    ("pmap_is_referenced: page %p is not managed", m));
4743	rw_wlock(&pvh_global_lock);
4744	rv = pmap_is_referenced_pvh(&m->md) ||
4745	    ((m->flags & PG_FICTITIOUS) == 0 &&
4746	    pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4747	rw_wunlock(&pvh_global_lock);
4748	return (rv);
4749}
4750
4751/*
4752 * Returns TRUE if any of the given mappings were referenced and FALSE
4753 * otherwise.  Both page and 4mpage mappings are supported.
4754 */
4755static boolean_t
4756pmap_is_referenced_pvh(struct md_page *pvh)
4757{
4758	pv_entry_t pv;
4759	pt_entry_t *pte;
4760	pmap_t pmap;
4761	boolean_t rv;
4762
4763	rw_assert(&pvh_global_lock, RA_WLOCKED);
4764	rv = FALSE;
4765	sched_pin();
4766	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4767		pmap = PV_PMAP(pv);
4768		PMAP_LOCK(pmap);
4769		pte = pmap_pte_quick(pmap, pv->pv_va);
4770		rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
4771		PMAP_UNLOCK(pmap);
4772		if (rv)
4773			break;
4774	}
4775	sched_unpin();
4776	return (rv);
4777}
4778
4779/*
4780 * Clear the write and modified bits in each of the given page's mappings.
4781 */
4782void
4783pmap_remove_write(vm_page_t m)
4784{
4785	struct md_page *pvh;
4786	pv_entry_t next_pv, pv;
4787	pmap_t pmap;
4788	pd_entry_t *pde;
4789	pt_entry_t oldpte, *pte;
4790	vm_offset_t va;
4791
4792	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4793	    ("pmap_remove_write: page %p is not managed", m));
4794
4795	/*
4796	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4797	 * set by another thread while the object is locked.  Thus,
4798	 * if PGA_WRITEABLE is clear, no page table entries need updating.
4799	 */
4800	VM_OBJECT_ASSERT_WLOCKED(m->object);
4801	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4802		return;
4803	rw_wlock(&pvh_global_lock);
4804	sched_pin();
4805	if ((m->flags & PG_FICTITIOUS) != 0)
4806		goto small_mappings;
4807	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4808	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4809		va = pv->pv_va;
4810		pmap = PV_PMAP(pv);
4811		PMAP_LOCK(pmap);
4812		pde = pmap_pde(pmap, va);
4813		if ((*pde & PG_RW) != 0)
4814			(void)pmap_demote_pde(pmap, pde, va);
4815		PMAP_UNLOCK(pmap);
4816	}
4817small_mappings:
4818	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4819		pmap = PV_PMAP(pv);
4820		PMAP_LOCK(pmap);
4821		pde = pmap_pde(pmap, pv->pv_va);
4822		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
4823		    " a 4mpage in page %p's pv list", m));
4824		pte = pmap_pte_quick(pmap, pv->pv_va);
4825retry:
4826		oldpte = *pte;
4827		if ((oldpte & PG_RW) != 0) {
4828			/*
4829			 * Regardless of whether a pte is 32 or 64 bits
4830			 * in size, PG_RW and PG_M are among the least
4831			 * significant 32 bits.
4832			 */
4833			if (!atomic_cmpset_int((u_int *)pte, oldpte,
4834			    oldpte & ~(PG_RW | PG_M)))
4835				goto retry;
4836			if ((oldpte & PG_M) != 0)
4837				vm_page_dirty(m);
4838			pmap_invalidate_page(pmap, pv->pv_va);
4839		}
4840		PMAP_UNLOCK(pmap);
4841	}
4842	vm_page_aflag_clear(m, PGA_WRITEABLE);
4843	sched_unpin();
4844	rw_wunlock(&pvh_global_lock);
4845}
4846
4847#define	PMAP_TS_REFERENCED_MAX	5
4848
4849/*
4850 *	pmap_ts_referenced:
4851 *
4852 *	Return a count of reference bits for a page, clearing those bits.
4853 *	It is not necessary for every reference bit to be cleared, but it
4854 *	is necessary that 0 only be returned when there are truly no
4855 *	reference bits set.
4856 *
4857 *	XXX: The exact number of bits to check and clear is a matter that
4858 *	should be tested and standardized at some point in the future for
4859 *	optimal aging of shared pages.
4860 *
4861 *	As an optimization, update the page's dirty field if a modified bit is
4862 *	found while counting reference bits.  This opportunistic update can be
4863 *	performed at low cost and can eliminate the need for some future calls
4864 *	to pmap_is_modified().  However, since this function stops after
4865 *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
4866 *	dirty pages.  Those dirty pages will only be detected by a future call
4867 *	to pmap_is_modified().
4868 */
4869int
4870pmap_ts_referenced(vm_page_t m)
4871{
4872	struct md_page *pvh;
4873	pv_entry_t pv, pvf;
4874	pmap_t pmap;
4875	pd_entry_t *pde;
4876	pt_entry_t *pte;
4877	vm_paddr_t pa;
4878	int rtval = 0;
4879
4880	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4881	    ("pmap_ts_referenced: page %p is not managed", m));
4882	pa = VM_PAGE_TO_PHYS(m);
4883	pvh = pa_to_pvh(pa);
4884	rw_wlock(&pvh_global_lock);
4885	sched_pin();
4886	if ((m->flags & PG_FICTITIOUS) != 0 ||
4887	    (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
4888		goto small_mappings;
4889	pv = pvf;
4890	do {
4891		pmap = PV_PMAP(pv);
4892		PMAP_LOCK(pmap);
4893		pde = pmap_pde(pmap, pv->pv_va);
4894		if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4895			/*
4896			 * Although "*pde" is mapping a 2/4MB page, because
4897			 * this function is called at a 4KB page granularity,
4898			 * we only update the 4KB page under test.
4899			 */
4900			vm_page_dirty(m);
4901		}
4902		if ((*pde & PG_A) != 0) {
4903			/*
4904			 * Since this reference bit is shared by either 1024
4905			 * or 512 4KB pages, it should not be cleared every
4906			 * time it is tested.  Apply a simple "hash" function
4907			 * on the physical page number, the virtual superpage
4908			 * number, and the pmap address to select one 4KB page
4909			 * out of the 1024 or 512 on which testing the
4910			 * reference bit will result in clearing that bit.
4911			 * This function is designed to avoid the selection of
4912			 * the same 4KB page for every 2- or 4MB page mapping.
4913			 *
4914			 * On demotion, a mapping that hasn't been referenced
4915			 * is simply destroyed.  To avoid the possibility of a
4916			 * subsequent page fault on a demoted wired mapping,
4917			 * always leave its reference bit set.  Moreover,
4918			 * since the superpage is wired, the current state of
4919			 * its reference bit won't affect page replacement.
4920			 */
4921			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
4922			    (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
4923			    (*pde & PG_W) == 0) {
4924				atomic_clear_int((u_int *)pde, PG_A);
4925				pmap_invalidate_page(pmap, pv->pv_va);
4926			}
4927			rtval++;
4928		}
4929		PMAP_UNLOCK(pmap);
4930		/* Rotate the PV list if it has more than one entry. */
4931		if (TAILQ_NEXT(pv, pv_next) != NULL) {
4932			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4933			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4934		}
4935		if (rtval >= PMAP_TS_REFERENCED_MAX)
4936			goto out;
4937	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4938small_mappings:
4939	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4940		goto out;
4941	pv = pvf;
4942	do {
4943		pmap = PV_PMAP(pv);
4944		PMAP_LOCK(pmap);
4945		pde = pmap_pde(pmap, pv->pv_va);
4946		KASSERT((*pde & PG_PS) == 0,
4947		    ("pmap_ts_referenced: found a 4mpage in page %p's pv list",
4948		    m));
4949		pte = pmap_pte_quick(pmap, pv->pv_va);
4950		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4951			vm_page_dirty(m);
4952		if ((*pte & PG_A) != 0) {
4953			atomic_clear_int((u_int *)pte, PG_A);
4954			pmap_invalidate_page(pmap, pv->pv_va);
4955			rtval++;
4956		}
4957		PMAP_UNLOCK(pmap);
4958		/* Rotate the PV list if it has more than one entry. */
4959		if (TAILQ_NEXT(pv, pv_next) != NULL) {
4960			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4961			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4962		}
4963	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval <
4964	    PMAP_TS_REFERENCED_MAX);
4965out:
4966	sched_unpin();
4967	rw_wunlock(&pvh_global_lock);
4968	return (rtval);
4969}
4970
4971/*
4972 *	Apply the given advice to the specified range of addresses within the
4973 *	given pmap.  Depending on the advice, clear the referenced and/or
4974 *	modified flags in each mapping and set the mapped page's dirty field.
4975 */
4976void
4977pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4978{
4979	pd_entry_t oldpde, *pde;
4980	pt_entry_t *pte;
4981	vm_offset_t va, pdnxt;
4982	vm_page_t m;
4983	boolean_t anychanged, pv_lists_locked;
4984
4985	if (advice != MADV_DONTNEED && advice != MADV_FREE)
4986		return;
4987	if (pmap_is_current(pmap))
4988		pv_lists_locked = FALSE;
4989	else {
4990		pv_lists_locked = TRUE;
4991resume:
4992		rw_wlock(&pvh_global_lock);
4993		sched_pin();
4994	}
4995	anychanged = FALSE;
4996	PMAP_LOCK(pmap);
4997	for (; sva < eva; sva = pdnxt) {
4998		pdnxt = (sva + NBPDR) & ~PDRMASK;
4999		if (pdnxt < sva)
5000			pdnxt = eva;
5001		pde = pmap_pde(pmap, sva);
5002		oldpde = *pde;
5003		if ((oldpde & PG_V) == 0)
5004			continue;
5005		else if ((oldpde & PG_PS) != 0) {
5006			if ((oldpde & PG_MANAGED) == 0)
5007				continue;
5008			if (!pv_lists_locked) {
5009				pv_lists_locked = TRUE;
5010				if (!rw_try_wlock(&pvh_global_lock)) {
5011					if (anychanged)
5012						pmap_invalidate_all(pmap);
5013					PMAP_UNLOCK(pmap);
5014					goto resume;
5015				}
5016				sched_pin();
5017			}
5018			if (!pmap_demote_pde(pmap, pde, sva)) {
5019				/*
5020				 * The large page mapping was destroyed.
5021				 */
5022				continue;
5023			}
5024
5025			/*
5026			 * Unless the page mappings are wired, remove the
5027			 * mapping to a single page so that a subsequent
5028			 * access may repromote.  Since the underlying page
5029			 * table page is fully populated, this removal never
5030			 * frees a page table page.
5031			 */
5032			if ((oldpde & PG_W) == 0) {
5033				pte = pmap_pte_quick(pmap, sva);
5034				KASSERT((*pte & PG_V) != 0,
5035				    ("pmap_advise: invalid PTE"));
5036				pmap_remove_pte(pmap, pte, sva, NULL);
5037				anychanged = TRUE;
5038			}
5039		}
5040		if (pdnxt > eva)
5041			pdnxt = eva;
5042		va = pdnxt;
5043		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
5044		    sva += PAGE_SIZE) {
5045			if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
5046				goto maybe_invlrng;
5047			else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5048				if (advice == MADV_DONTNEED) {
5049					/*
5050					 * Future calls to pmap_is_modified()
5051					 * can be avoided by making the page
5052					 * dirty now.
5053					 */
5054					m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
5055					vm_page_dirty(m);
5056				}
5057				atomic_clear_int((u_int *)pte, PG_M | PG_A);
5058			} else if ((*pte & PG_A) != 0)
5059				atomic_clear_int((u_int *)pte, PG_A);
5060			else
5061				goto maybe_invlrng;
5062			if ((*pte & PG_G) != 0) {
5063				if (va == pdnxt)
5064					va = sva;
5065			} else
5066				anychanged = TRUE;
5067			continue;
5068maybe_invlrng:
5069			if (va != pdnxt) {
5070				pmap_invalidate_range(pmap, va, sva);
5071				va = pdnxt;
5072			}
5073		}
5074		if (va != pdnxt)
5075			pmap_invalidate_range(pmap, va, sva);
5076	}
5077	if (anychanged)
5078		pmap_invalidate_all(pmap);
5079	if (pv_lists_locked) {
5080		sched_unpin();
5081		rw_wunlock(&pvh_global_lock);
5082	}
5083	PMAP_UNLOCK(pmap);
5084}
5085
5086/*
5087 *	Clear the modify bits on the specified physical page.
5088 */
5089void
5090pmap_clear_modify(vm_page_t m)
5091{
5092	struct md_page *pvh;
5093	pv_entry_t next_pv, pv;
5094	pmap_t pmap;
5095	pd_entry_t oldpde, *pde;
5096	pt_entry_t oldpte, *pte;
5097	vm_offset_t va;
5098
5099	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5100	    ("pmap_clear_modify: page %p is not managed", m));
5101	VM_OBJECT_ASSERT_WLOCKED(m->object);
5102	KASSERT(!vm_page_xbusied(m),
5103	    ("pmap_clear_modify: page %p is exclusive busied", m));
5104
5105	/*
5106	 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
5107	 * If the object containing the page is locked and the page is not
5108	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
5109	 */
5110	if ((m->aflags & PGA_WRITEABLE) == 0)
5111		return;
5112	rw_wlock(&pvh_global_lock);
5113	sched_pin();
5114	if ((m->flags & PG_FICTITIOUS) != 0)
5115		goto small_mappings;
5116	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5117	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5118		va = pv->pv_va;
5119		pmap = PV_PMAP(pv);
5120		PMAP_LOCK(pmap);
5121		pde = pmap_pde(pmap, va);
5122		oldpde = *pde;
5123		if ((oldpde & PG_RW) != 0) {
5124			if (pmap_demote_pde(pmap, pde, va)) {
5125				if ((oldpde & PG_W) == 0) {
5126					/*
5127					 * Write protect the mapping to a
5128					 * single page so that a subsequent
5129					 * write access may repromote.
5130					 */
5131					va += VM_PAGE_TO_PHYS(m) - (oldpde &
5132					    PG_PS_FRAME);
5133					pte = pmap_pte_quick(pmap, va);
5134					oldpte = *pte;
5135					if ((oldpte & PG_V) != 0) {
5136						/*
5137						 * Regardless of whether a pte is 32 or 64 bits
5138						 * in size, PG_RW and PG_M are among the least
5139						 * significant 32 bits.
5140						 */
5141						while (!atomic_cmpset_int((u_int *)pte,
5142						    oldpte,
5143						    oldpte & ~(PG_M | PG_RW)))
5144							oldpte = *pte;
5145						vm_page_dirty(m);
5146						pmap_invalidate_page(pmap, va);
5147					}
5148				}
5149			}
5150		}
5151		PMAP_UNLOCK(pmap);
5152	}
5153small_mappings:
5154	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5155		pmap = PV_PMAP(pv);
5156		PMAP_LOCK(pmap);
5157		pde = pmap_pde(pmap, pv->pv_va);
5158		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
5159		    " a 4mpage in page %p's pv list", m));
5160		pte = pmap_pte_quick(pmap, pv->pv_va);
5161		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5162			/*
5163			 * Regardless of whether a pte is 32 or 64 bits
5164			 * in size, PG_M is among the least significant
5165			 * 32 bits.
5166			 */
5167			atomic_clear_int((u_int *)pte, PG_M);
5168			pmap_invalidate_page(pmap, pv->pv_va);
5169		}
5170		PMAP_UNLOCK(pmap);
5171	}
5172	sched_unpin();
5173	rw_wunlock(&pvh_global_lock);
5174}
5175
5176/*
5177 * Miscellaneous support routines follow
5178 */
5179
5180/* Adjust the cache mode for a 4KB page mapped via a PTE. */
5181static __inline void
5182pmap_pte_attr(pt_entry_t *pte, int cache_bits)
5183{
5184	u_int opte, npte;
5185
5186	/*
5187	 * The cache mode bits are all in the low 32-bits of the
5188	 * PTE, so we can just spin on updating the low 32-bits.
5189	 */
5190	do {
5191		opte = *(u_int *)pte;
5192		npte = opte & ~PG_PTE_CACHE;
5193		npte |= cache_bits;
5194	} while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
5195}
5196
5197/* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
5198static __inline void
5199pmap_pde_attr(pd_entry_t *pde, int cache_bits)
5200{
5201	u_int opde, npde;
5202
5203	/*
5204	 * The cache mode bits are all in the low 32-bits of the
5205	 * PDE, so we can just spin on updating the low 32-bits.
5206	 */
5207	do {
5208		opde = *(u_int *)pde;
5209		npde = opde & ~PG_PDE_CACHE;
5210		npde |= cache_bits;
5211	} while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
5212}
5213
5214/*
5215 * Map a set of physical memory pages into the kernel virtual
5216 * address space. Return a pointer to where it is mapped. This
5217 * routine is intended to be used for mapping device memory,
5218 * NOT real memory.
5219 */
5220void *
5221pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
5222{
5223	struct pmap_preinit_mapping *ppim;
5224	vm_offset_t va, offset;
5225	vm_size_t tmpsize;
5226	int i;
5227
5228	offset = pa & PAGE_MASK;
5229	size = round_page(offset + size);
5230	pa = pa & PG_FRAME;
5231
5232	if (pa < KERNLOAD && pa + size <= KERNLOAD)
5233		va = KERNBASE + pa;
5234	else if (!pmap_initialized) {
5235		va = 0;
5236		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5237			ppim = pmap_preinit_mapping + i;
5238			if (ppim->va == 0) {
5239				ppim->pa = pa;
5240				ppim->sz = size;
5241				ppim->mode = mode;
5242				ppim->va = virtual_avail;
5243				virtual_avail += size;
5244				va = ppim->va;
5245				break;
5246			}
5247		}
5248		if (va == 0)
5249			panic("%s: too many preinit mappings", __func__);
5250	} else {
5251		/*
5252		 * If we have a preinit mapping, re-use it.
5253		 */
5254		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5255			ppim = pmap_preinit_mapping + i;
5256			if (ppim->pa == pa && ppim->sz == size &&
5257			    ppim->mode == mode)
5258				return ((void *)(ppim->va + offset));
5259		}
5260		va = kva_alloc(size);
5261		if (va == 0)
5262			panic("%s: Couldn't allocate KVA", __func__);
5263	}
5264	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5265		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
5266	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
5267	pmap_invalidate_cache_range(va, va + size, FALSE);
5268	return ((void *)(va + offset));
5269}
5270
5271void *
5272pmap_mapdev(vm_paddr_t pa, vm_size_t size)
5273{
5274
5275	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
5276}
5277
5278void *
5279pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5280{
5281
5282	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
5283}
5284
5285void
5286pmap_unmapdev(vm_offset_t va, vm_size_t size)
5287{
5288	struct pmap_preinit_mapping *ppim;
5289	vm_offset_t offset;
5290	int i;
5291
5292	if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
5293		return;
5294	offset = va & PAGE_MASK;
5295	size = round_page(offset + size);
5296	va = trunc_page(va);
5297	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5298		ppim = pmap_preinit_mapping + i;
5299		if (ppim->va == va && ppim->sz == size) {
5300			if (pmap_initialized)
5301				return;
5302			ppim->pa = 0;
5303			ppim->va = 0;
5304			ppim->sz = 0;
5305			ppim->mode = 0;
5306			if (va + size == virtual_avail)
5307				virtual_avail = va;
5308			return;
5309		}
5310	}
5311	if (pmap_initialized)
5312		kva_free(va, size);
5313}
5314
5315/*
5316 * Sets the memory attribute for the specified page.
5317 */
5318void
5319pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5320{
5321
5322	m->md.pat_mode = ma;
5323	if ((m->flags & PG_FICTITIOUS) != 0)
5324		return;
5325
5326	/*
5327	 * If "m" is a normal page, flush it from the cache.
5328	 * See pmap_invalidate_cache_range().
5329	 *
5330	 * First, try to find an existing mapping of the page by sf
5331	 * buffer. sf_buf_invalidate_cache() modifies mapping and
5332	 * flushes the cache.
5333	 */
5334	if (sf_buf_invalidate_cache(m))
5335		return;
5336
5337	/*
5338	 * If page is not mapped by sf buffer, but CPU does not
5339	 * support self snoop, map the page transient and do
5340	 * invalidation. In the worst case, whole cache is flushed by
5341	 * pmap_invalidate_cache_range().
5342	 */
5343	if ((cpu_feature & CPUID_SS) == 0)
5344		pmap_flush_page(m);
5345}
5346
5347static void
5348pmap_flush_page(vm_page_t m)
5349{
5350	pt_entry_t *cmap_pte2;
5351	struct pcpu *pc;
5352	vm_offset_t sva, eva;
5353	bool useclflushopt;
5354
5355	useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
5356	if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) {
5357		sched_pin();
5358		pc = get_pcpu();
5359		cmap_pte2 = pc->pc_cmap_pte2;
5360		mtx_lock(&pc->pc_cmap_lock);
5361		if (*cmap_pte2)
5362			panic("pmap_flush_page: CMAP2 busy");
5363		*cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
5364		    PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
5365		invlcaddr(pc->pc_cmap_addr2);
5366		sva = (vm_offset_t)pc->pc_cmap_addr2;
5367		eva = sva + PAGE_SIZE;
5368
5369		/*
5370		 * Use mfence or sfence despite the ordering implied by
5371		 * mtx_{un,}lock() because clflush on non-Intel CPUs
5372		 * and clflushopt are not guaranteed to be ordered by
5373		 * any other instruction.
5374		 */
5375		if (useclflushopt)
5376			sfence();
5377		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
5378			mfence();
5379		for (; sva < eva; sva += cpu_clflush_line_size) {
5380			if (useclflushopt)
5381				clflushopt(sva);
5382			else
5383				clflush(sva);
5384		}
5385		if (useclflushopt)
5386			sfence();
5387		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
5388			mfence();
5389		*cmap_pte2 = 0;
5390		sched_unpin();
5391		mtx_unlock(&pc->pc_cmap_lock);
5392	} else
5393		pmap_invalidate_cache();
5394}
5395
5396/*
5397 * Changes the specified virtual address range's memory type to that given by
5398 * the parameter "mode".  The specified virtual address range must be
5399 * completely contained within either the kernel map.
5400 *
5401 * Returns zero if the change completed successfully, and either EINVAL or
5402 * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
5403 * of the virtual address range was not mapped, and ENOMEM is returned if
5404 * there was insufficient memory available to complete the change.
5405 */
5406int
5407pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5408{
5409	vm_offset_t base, offset, tmpva;
5410	pd_entry_t *pde;
5411	pt_entry_t *pte;
5412	int cache_bits_pte, cache_bits_pde;
5413	boolean_t changed;
5414
5415	base = trunc_page(va);
5416	offset = va & PAGE_MASK;
5417	size = round_page(offset + size);
5418
5419	/*
5420	 * Only supported on kernel virtual addresses above the recursive map.
5421	 */
5422	if (base < VM_MIN_KERNEL_ADDRESS)
5423		return (EINVAL);
5424
5425	cache_bits_pde = pmap_cache_bits(mode, 1);
5426	cache_bits_pte = pmap_cache_bits(mode, 0);
5427	changed = FALSE;
5428
5429	/*
5430	 * Pages that aren't mapped aren't supported.  Also break down
5431	 * 2/4MB pages into 4KB pages if required.
5432	 */
5433	PMAP_LOCK(kernel_pmap);
5434	for (tmpva = base; tmpva < base + size; ) {
5435		pde = pmap_pde(kernel_pmap, tmpva);
5436		if (*pde == 0) {
5437			PMAP_UNLOCK(kernel_pmap);
5438			return (EINVAL);
5439		}
5440		if (*pde & PG_PS) {
5441			/*
5442			 * If the current 2/4MB page already has
5443			 * the required memory type, then we need not
5444			 * demote this page.  Just increment tmpva to
5445			 * the next 2/4MB page frame.
5446			 */
5447			if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
5448				tmpva = trunc_4mpage(tmpva) + NBPDR;
5449				continue;
5450			}
5451
5452			/*
5453			 * If the current offset aligns with a 2/4MB
5454			 * page frame and there is at least 2/4MB left
5455			 * within the range, then we need not break
5456			 * down this page into 4KB pages.
5457			 */
5458			if ((tmpva & PDRMASK) == 0 &&
5459			    tmpva + PDRMASK < base + size) {
5460				tmpva += NBPDR;
5461				continue;
5462			}
5463			if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
5464				PMAP_UNLOCK(kernel_pmap);
5465				return (ENOMEM);
5466			}
5467		}
5468		pte = vtopte(tmpva);
5469		if (*pte == 0) {
5470			PMAP_UNLOCK(kernel_pmap);
5471			return (EINVAL);
5472		}
5473		tmpva += PAGE_SIZE;
5474	}
5475	PMAP_UNLOCK(kernel_pmap);
5476
5477	/*
5478	 * Ok, all the pages exist, so run through them updating their
5479	 * cache mode if required.
5480	 */
5481	for (tmpva = base; tmpva < base + size; ) {
5482		pde = pmap_pde(kernel_pmap, tmpva);
5483		if (*pde & PG_PS) {
5484			if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
5485				pmap_pde_attr(pde, cache_bits_pde);
5486				changed = TRUE;
5487			}
5488			tmpva = trunc_4mpage(tmpva) + NBPDR;
5489		} else {
5490			pte = vtopte(tmpva);
5491			if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
5492				pmap_pte_attr(pte, cache_bits_pte);
5493				changed = TRUE;
5494			}
5495			tmpva += PAGE_SIZE;
5496		}
5497	}
5498
5499	/*
5500	 * Flush CPU caches to make sure any data isn't cached that
5501	 * shouldn't be, etc.
5502	 */
5503	if (changed) {
5504		pmap_invalidate_range(kernel_pmap, base, tmpva);
5505		pmap_invalidate_cache_range(base, tmpva, FALSE);
5506	}
5507	return (0);
5508}
5509
5510/*
5511 * perform the pmap work for mincore
5512 */
5513int
5514pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5515{
5516	pd_entry_t *pdep;
5517	pt_entry_t *ptep, pte;
5518	vm_paddr_t pa;
5519	int val;
5520
5521	PMAP_LOCK(pmap);
5522retry:
5523	pdep = pmap_pde(pmap, addr);
5524	if (*pdep != 0) {
5525		if (*pdep & PG_PS) {
5526			pte = *pdep;
5527			/* Compute the physical address of the 4KB page. */
5528			pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
5529			    PG_FRAME;
5530			val = MINCORE_SUPER;
5531		} else {
5532			ptep = pmap_pte(pmap, addr);
5533			pte = *ptep;
5534			pmap_pte_release(ptep);
5535			pa = pte & PG_FRAME;
5536			val = 0;
5537		}
5538	} else {
5539		pte = 0;
5540		pa = 0;
5541		val = 0;
5542	}
5543	if ((pte & PG_V) != 0) {
5544		val |= MINCORE_INCORE;
5545		if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5546			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5547		if ((pte & PG_A) != 0)
5548			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5549	}
5550	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5551	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5552	    (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5553		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
5554		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
5555			goto retry;
5556	} else
5557		PA_UNLOCK_COND(*locked_pa);
5558	PMAP_UNLOCK(pmap);
5559	return (val);
5560}
5561
5562void
5563pmap_activate(struct thread *td)
5564{
5565	pmap_t	pmap, oldpmap;
5566	u_int	cpuid;
5567	u_int32_t  cr3;
5568
5569	critical_enter();
5570	pmap = vmspace_pmap(td->td_proc->p_vmspace);
5571	oldpmap = PCPU_GET(curpmap);
5572	cpuid = PCPU_GET(cpuid);
5573#if defined(SMP)
5574	CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
5575	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
5576#else
5577	CPU_CLR(cpuid, &oldpmap->pm_active);
5578	CPU_SET(cpuid, &pmap->pm_active);
5579#endif
5580#if defined(PAE) || defined(PAE_TABLES)
5581	cr3 = vtophys(pmap->pm_pdpt);
5582#else
5583	cr3 = vtophys(pmap->pm_pdir);
5584#endif
5585	/*
5586	 * pmap_activate is for the current thread on the current cpu
5587	 */
5588	td->td_pcb->pcb_cr3 = cr3;
5589	load_cr3(cr3);
5590	PCPU_SET(curpmap, pmap);
5591	critical_exit();
5592}
5593
5594void
5595pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
5596{
5597}
5598
5599/*
5600 *	Increase the starting virtual address of the given mapping if a
5601 *	different alignment might result in more superpage mappings.
5602 */
5603void
5604pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
5605    vm_offset_t *addr, vm_size_t size)
5606{
5607	vm_offset_t superpage_offset;
5608
5609	if (size < NBPDR)
5610		return;
5611	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5612		offset += ptoa(object->pg_color);
5613	superpage_offset = offset & PDRMASK;
5614	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
5615	    (*addr & PDRMASK) == superpage_offset)
5616		return;
5617	if ((*addr & PDRMASK) < superpage_offset)
5618		*addr = (*addr & ~PDRMASK) + superpage_offset;
5619	else
5620		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
5621}
5622
5623vm_offset_t
5624pmap_quick_enter_page(vm_page_t m)
5625{
5626	vm_offset_t qaddr;
5627	pt_entry_t *pte;
5628
5629	critical_enter();
5630	qaddr = PCPU_GET(qmap_addr);
5631	pte = vtopte(qaddr);
5632
5633	KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
5634	*pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
5635	    pmap_cache_bits(pmap_page_get_memattr(m), 0);
5636	invlpg(qaddr);
5637
5638	return (qaddr);
5639}
5640
5641void
5642pmap_quick_remove_page(vm_offset_t addr)
5643{
5644	vm_offset_t qaddr;
5645	pt_entry_t *pte;
5646
5647	qaddr = PCPU_GET(qmap_addr);
5648	pte = vtopte(qaddr);
5649
5650	KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
5651	KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address"));
5652
5653	*pte = 0;
5654	critical_exit();
5655}
5656
5657#if defined(PMAP_DEBUG)
5658pmap_pid_dump(int pid)
5659{
5660	pmap_t pmap;
5661	struct proc *p;
5662	int npte = 0;
5663	int index;
5664
5665	sx_slock(&allproc_lock);
5666	FOREACH_PROC_IN_SYSTEM(p) {
5667		if (p->p_pid != pid)
5668			continue;
5669
5670		if (p->p_vmspace) {
5671			int i,j;
5672			index = 0;
5673			pmap = vmspace_pmap(p->p_vmspace);
5674			for (i = 0; i < NPDEPTD; i++) {
5675				pd_entry_t *pde;
5676				pt_entry_t *pte;
5677				vm_offset_t base = i << PDRSHIFT;
5678
5679				pde = &pmap->pm_pdir[i];
5680				if (pde && pmap_pde_v(pde)) {
5681					for (j = 0; j < NPTEPG; j++) {
5682						vm_offset_t va = base + (j << PAGE_SHIFT);
5683						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
5684							if (index) {
5685								index = 0;
5686								printf("\n");
5687							}
5688							sx_sunlock(&allproc_lock);
5689							return (npte);
5690						}
5691						pte = pmap_pte(pmap, va);
5692						if (pte && pmap_pte_v(pte)) {
5693							pt_entry_t pa;
5694							vm_page_t m;
5695							pa = *pte;
5696							m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
5697							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
5698								va, pa, m->hold_count, m->wire_count, m->flags);
5699							npte++;
5700							index++;
5701							if (index >= 2) {
5702								index = 0;
5703								printf("\n");
5704							} else {
5705								printf(" ");
5706							}
5707						}
5708					}
5709				}
5710			}
5711		}
5712	}
5713	sx_sunlock(&allproc_lock);
5714	return (npte);
5715}
5716#endif
5717